language stringclasses 1 value | repo stringclasses 346 values | path stringlengths 6 201 | class_span dict | source stringlengths 21 2.38M | target stringlengths 1 96 |
|---|---|---|---|---|---|
python | ray-project__ray | python/ray/serve/tests/test_model_composition.py | {
"start": 1429,
"end": 2030
} | class ____:
def __init__(self, val):
self.val = val
def get(self):
return self.val
def inc(self, inc):
self.val += inc
@serve.deployment
def fn_hello():
return "hello"
@serve.deployment
def combine(m1_output, m2_output, kwargs_output=0):
return m1_output + m2_output + kwargs_output
def class_factory():
class MyInlineClass:
def __init__(self, val):
self.val = val
def get(self):
return self.val
def __call__(self):
return self.get()
return MyInlineClass
@serve.deployment
| Counter |
python | PrefectHQ__prefect | tests/cli/transfer/test_dag.py | {
"start": 327,
"end": 1706
} | class ____:
"""Mock migratable resource for testing DAG functionality."""
def __init__(
self,
resource_id: uuid.UUID,
name: str,
migrate_success: bool = True,
dependencies: Optional[list[uuid.UUID]] = None,
):
self.id = resource_id
self.source_id = resource_id
self.destination_id = None
self.name = name
self._migrate_success = migrate_success
self._dependencies = dependencies or []
self.migrate_called = False
self.get_dependencies_called = False
async def migrate(self) -> None:
"""Mock migrate method."""
self.migrate_called = True
if not self._migrate_success:
if self.name.endswith("_skip"):
raise TransferSkipped("Test skip")
else:
raise ValueError(f"Mock migration error for {self.name}")
async def get_dependencies(self) -> list[MigratableProtocol]:
"""Mock get_dependencies method."""
self.get_dependencies_called = True
return [
MockMigratableResource(dep_id, f"dep_{dep_id}")
for dep_id in self._dependencies
]
def __str__(self) -> str:
return f"MockResource({self.name})"
def __repr__(self) -> str:
return f"MockResource(id={self.id}, name='{self.name}')"
| MockMigratableResource |
python | spack__spack | lib/spack/spack/util/crypto.py | {
"start": 3541,
"end": 5668
} | class ____:
"""A checker checks files against one particular hex digest.
It will automatically determine what hashing algorithm
to used based on the length of the digest it's initialized
with. e.g., if the digest is 32 hex characters long this will
use md5.
Example: know your tarball should hash to ``abc123``. You want
to check files against this. You would use this class like so::
hexdigest = 'abc123'
checker = Checker(hexdigest)
success = checker.check('downloaded.tar.gz')
After the call to check, the actual checksum is available in
checker.sum, in case it's needed for error output.
You can trade read performance and memory usage by
adjusting the block_size optional arg. By default it's
a 1MB (2**20 bytes) buffer.
"""
def __init__(self, hexdigest: str, **kwargs) -> None:
self.block_size = kwargs.get("block_size", 2**20)
self.hexdigest = hexdigest
self.sum: Optional[str] = None
self.hash_fun = hash_fun_for_digest(hexdigest)
@property
def hash_name(self) -> str:
"""Get the name of the hash function this Checker is using."""
return self.hash_fun().name.lower()
def check(self, filename: str) -> bool:
"""Read the file with the specified name and check its checksum
against self.hexdigest. Return True if they match, False
otherwise. Actual checksum is stored in self.sum.
"""
self.sum = checksum(self.hash_fun, filename, block_size=self.block_size)
return self.sum == self.hexdigest
def prefix_bits(byte_array, bits):
"""Return the first <bits> bits of a byte array as an integer."""
b2i = lambda b: b # In Python 3, indexing byte_array gives int
result = 0
n = 0
for i, b in enumerate(byte_array):
n += 8
result = (result << 8) | b2i(b)
if n >= bits:
break
result >>= n - bits
return result
def bit_length(num):
"""Number of bits required to represent an integer in binary."""
s = bin(num)
s = s.lstrip("-0b")
return len(s)
| Checker |
python | airbytehq__airbyte | airbyte-ci/connectors/metadata_service/lib/metadata_service/models/generated/ConnectorBreakingChanges.py | {
"start": 785,
"end": 1791
} | class ____(BaseModel):
class Config:
extra = Extra.forbid
upgradeDeadline: date = Field(
...,
description="The deadline by which to upgrade before the breaking change takes effect.",
)
message: str = Field(
..., description="Descriptive message detailing the breaking change."
)
deadlineAction: Optional[Literal["auto_upgrade", "disable"]] = Field(
None, description="Action to do when the deadline is reached."
)
migrationDocumentationUrl: Optional[AnyUrl] = Field(
None,
description="URL to documentation on how to migrate to the current version. Defaults to ${documentationUrl}-migrations#${version}",
)
scopedImpact: Optional[List[BreakingChangeScope]] = Field(
None,
description="List of scopes that are impacted by the breaking change. If not specified, the breaking change cannot be scoped to reduce impact via the supported scope types.",
min_items=1,
)
| VersionBreakingChange |
python | apache__airflow | providers/amazon/src/airflow/providers/amazon/aws/queues/sqs.py | {
"start": 1476,
"end": 2870
} | class ____(BaseMessageQueueProvider):
"""
Configuration for SQS integration with common-messaging.
[START sqs_message_queue_provider_description]
* It uses ``sqs`` as scheme for identifying SQS queues.
* For parameter definitions take a look at :class:`~airflow.providers.amazon.aws.triggers.sqs.SqsSensorTrigger`.
.. code-block:: python
from airflow.providers.common.messaging.triggers.msg_queue import MessageQueueTrigger
from airflow.sdk import Asset, AssetWatcher
trigger = MessageQueueTrigger(
scheme="sqs",
# Additional AWS SqsSensorTrigger parameters as needed
sqs_queue="https://sqs.us-east-1.amazonaws.com/123456789012/my-queue",
aws_conn_id="aws_default",
)
asset = Asset("sqs_queue_asset", watchers=[AssetWatcher(name="sqs_watcher", trigger=trigger)])
For a complete example, see:
:mod:`tests.system.amazon.aws.example_dag_sqs_message_queue_trigger`
[END sqs_message_queue_provider_description]
"""
scheme = "sqs"
def queue_matches(self, queue: str) -> bool:
return bool(re.match(QUEUE_REGEXP, queue))
def trigger_class(self) -> type[BaseEventTrigger]:
return SqsSensorTrigger
def trigger_kwargs(self, queue: str, **kwargs) -> dict:
return {
"sqs_queue": queue,
}
| SqsMessageQueueProvider |
python | pydantic__pydantic | pydantic/functional_serializers.py | {
"start": 3489,
"end": 18117
} | class ____:
"""Wrap serializers receive the raw inputs along with a handler function that applies the standard serialization
logic, and can modify the resulting value before returning it as the final output of serialization.
For example, here's a scenario in which a wrap serializer transforms timezones to UTC **and** utilizes the existing `datetime` serialization logic.
```python
from datetime import datetime, timezone
from typing import Annotated, Any
from pydantic import BaseModel, WrapSerializer
class EventDatetime(BaseModel):
start: datetime
end: datetime
def convert_to_utc(value: Any, handler, info) -> dict[str, datetime]:
# Note that `handler` can actually help serialize the `value` for
# further custom serialization in case it's a subclass.
partial_result = handler(value, info)
if info.mode == 'json':
return {
k: datetime.fromisoformat(v).astimezone(timezone.utc)
for k, v in partial_result.items()
}
return {k: v.astimezone(timezone.utc) for k, v in partial_result.items()}
UTCEventDatetime = Annotated[EventDatetime, WrapSerializer(convert_to_utc)]
class EventModel(BaseModel):
event_datetime: UTCEventDatetime
dt = EventDatetime(
start='2024-01-01T07:00:00-08:00', end='2024-01-03T20:00:00+06:00'
)
event = EventModel(event_datetime=dt)
print(event.model_dump())
'''
{
'event_datetime': {
'start': datetime.datetime(
2024, 1, 1, 15, 0, tzinfo=datetime.timezone.utc
),
'end': datetime.datetime(
2024, 1, 3, 14, 0, tzinfo=datetime.timezone.utc
),
}
}
'''
print(event.model_dump_json())
'''
{"event_datetime":{"start":"2024-01-01T15:00:00Z","end":"2024-01-03T14:00:00Z"}}
'''
```
Attributes:
func: The serializer function to be wrapped.
return_type: The return type for the function. If omitted it will be inferred from the type annotation.
when_used: Determines when this serializer should be used. Accepts a string with values `'always'`,
`'unless-none'`, `'json'`, and `'json-unless-none'`. Defaults to 'always'.
"""
func: core_schema.WrapSerializerFunction
return_type: Any = PydanticUndefined
when_used: WhenUsed = 'always'
def __get_pydantic_core_schema__(self, source_type: Any, handler: GetCoreSchemaHandler) -> core_schema.CoreSchema:
"""This method is used to get the Pydantic core schema of the class.
Args:
source_type: Source type.
handler: Core schema handler.
Returns:
The generated core schema of the class.
"""
schema = handler(source_type)
if self.return_type is not PydanticUndefined:
return_type = self.return_type
else:
try:
# Do not pass in globals as the function could be defined in a different module.
# Instead, let `get_callable_return_type` infer the globals to use, but still pass
# in locals that may contain a parent/rebuild namespace:
return_type = _decorators.get_callable_return_type(
self.func,
localns=handler._get_types_namespace().locals,
)
except NameError as e:
raise PydanticUndefinedAnnotation.from_name_error(e) from e
return_schema = None if return_type is PydanticUndefined else handler.generate_schema(return_type)
schema['serialization'] = core_schema.wrap_serializer_function_ser_schema(
function=self.func,
info_arg=_decorators.inspect_annotated_serializer(self.func, 'wrap'),
return_schema=return_schema,
when_used=self.when_used,
)
return schema
if TYPE_CHECKING:
_Partial: TypeAlias = 'partial[Any] | partialmethod[Any]'
FieldPlainSerializer: TypeAlias = 'core_schema.SerializerFunction | _Partial'
"""A field serializer method or function in `plain` mode."""
FieldWrapSerializer: TypeAlias = 'core_schema.WrapSerializerFunction | _Partial'
"""A field serializer method or function in `wrap` mode."""
FieldSerializer: TypeAlias = 'FieldPlainSerializer | FieldWrapSerializer'
"""A field serializer method or function."""
_FieldPlainSerializerT = TypeVar('_FieldPlainSerializerT', bound=FieldPlainSerializer)
_FieldWrapSerializerT = TypeVar('_FieldWrapSerializerT', bound=FieldWrapSerializer)
@overload
def field_serializer(
field: str,
/,
*fields: str,
mode: Literal['wrap'],
return_type: Any = ...,
when_used: WhenUsed = ...,
check_fields: bool | None = ...,
) -> Callable[[_FieldWrapSerializerT], _FieldWrapSerializerT]: ...
@overload
def field_serializer(
field: str,
/,
*fields: str,
mode: Literal['plain'] = ...,
return_type: Any = ...,
when_used: WhenUsed = ...,
check_fields: bool | None = ...,
) -> Callable[[_FieldPlainSerializerT], _FieldPlainSerializerT]: ...
def field_serializer( # noqa: D417
field: str,
/,
*fields: str,
mode: Literal['plain', 'wrap'] = 'plain',
# TODO PEP 747 (grep for 'return_type' on the whole code base):
return_type: Any = PydanticUndefined,
when_used: WhenUsed = 'always',
check_fields: bool | None = None,
) -> (
Callable[[_FieldWrapSerializerT], _FieldWrapSerializerT]
| Callable[[_FieldPlainSerializerT], _FieldPlainSerializerT]
):
"""Decorator that enables custom field serialization.
In the below example, a field of type `set` is used to mitigate duplication. A `field_serializer` is used to serialize the data as a sorted list.
```python
from pydantic import BaseModel, field_serializer
class StudentModel(BaseModel):
name: str = 'Jane'
courses: set[str]
@field_serializer('courses', when_used='json')
def serialize_courses_in_order(self, courses: set[str]):
return sorted(courses)
student = StudentModel(courses={'Math', 'Chemistry', 'English'})
print(student.model_dump_json())
#> {"name":"Jane","courses":["Chemistry","English","Math"]}
```
See [the usage documentation](../concepts/serialization.md#serializers) for more information.
Four signatures are supported for the decorated serializer:
- `(self, value: Any, info: FieldSerializationInfo)`
- `(self, value: Any, nxt: SerializerFunctionWrapHandler, info: FieldSerializationInfo)`
- `(value: Any, info: SerializationInfo)`
- `(value: Any, nxt: SerializerFunctionWrapHandler, info: SerializationInfo)`
Args:
*fields: The field names the serializer should apply to.
mode: The serialization mode.
- `plain` means the function will be called instead of the default serialization logic,
- `wrap` means the function will be called with an argument to optionally call the
default serialization logic.
return_type: Optional return type for the function, if omitted it will be inferred from the type annotation.
when_used: Determines the serializer will be used for serialization.
check_fields: Whether to check that the fields actually exist on the model.
Raises:
PydanticUserError:
- If the decorator is used without any arguments (at least one field name must be provided).
- If the provided field names are not strings.
"""
if callable(field) or isinstance(field, classmethod):
raise PydanticUserError(
'The `@field_serializer` decorator cannot be used without arguments, at least one field must be provided. '
"For example: `@field_serializer('<field_name>', ...)`.",
code='decorator-missing-arguments',
)
fields = field, *fields
if not all(isinstance(field, str) for field in fields):
raise PydanticUserError(
'The provided field names to the `@field_serializer` decorator should be strings. '
"For example: `@field_serializer('<field_name_1>', '<field_name_2>', ...).`",
code='decorator-invalid-fields',
)
def dec(f: FieldSerializer) -> _decorators.PydanticDescriptorProxy[Any]:
dec_info = _decorators.FieldSerializerDecoratorInfo(
fields=fields,
mode=mode,
return_type=return_type,
when_used=when_used,
check_fields=check_fields,
)
return _decorators.PydanticDescriptorProxy(f, dec_info) # pyright: ignore[reportArgumentType]
return dec # pyright: ignore[reportReturnType]
if TYPE_CHECKING:
# The first argument in the following callables represent the `self` type:
ModelPlainSerializerWithInfo: TypeAlias = Callable[[Any, SerializationInfo[Any]], Any]
"""A model serializer method with the `info` argument, in `plain` mode."""
ModelPlainSerializerWithoutInfo: TypeAlias = Callable[[Any], Any]
"""A model serializer method without the `info` argument, in `plain` mode."""
ModelPlainSerializer: TypeAlias = 'ModelPlainSerializerWithInfo | ModelPlainSerializerWithoutInfo'
"""A model serializer method in `plain` mode."""
ModelWrapSerializerWithInfo: TypeAlias = Callable[[Any, SerializerFunctionWrapHandler, SerializationInfo[Any]], Any]
"""A model serializer method with the `info` argument, in `wrap` mode."""
ModelWrapSerializerWithoutInfo: TypeAlias = Callable[[Any, SerializerFunctionWrapHandler], Any]
"""A model serializer method without the `info` argument, in `wrap` mode."""
ModelWrapSerializer: TypeAlias = 'ModelWrapSerializerWithInfo | ModelWrapSerializerWithoutInfo'
"""A model serializer method in `wrap` mode."""
ModelSerializer: TypeAlias = 'ModelPlainSerializer | ModelWrapSerializer'
_ModelPlainSerializerT = TypeVar('_ModelPlainSerializerT', bound=ModelPlainSerializer)
_ModelWrapSerializerT = TypeVar('_ModelWrapSerializerT', bound=ModelWrapSerializer)
@overload
def model_serializer(f: _ModelPlainSerializerT, /) -> _ModelPlainSerializerT: ...
@overload
def model_serializer(
*, mode: Literal['wrap'], when_used: WhenUsed = 'always', return_type: Any = ...
) -> Callable[[_ModelWrapSerializerT], _ModelWrapSerializerT]: ...
@overload
def model_serializer(
*,
mode: Literal['plain'] = ...,
when_used: WhenUsed = 'always',
return_type: Any = ...,
) -> Callable[[_ModelPlainSerializerT], _ModelPlainSerializerT]: ...
def model_serializer(
f: _ModelPlainSerializerT | _ModelWrapSerializerT | None = None,
/,
*,
mode: Literal['plain', 'wrap'] = 'plain',
when_used: WhenUsed = 'always',
return_type: Any = PydanticUndefined,
) -> (
_ModelPlainSerializerT
| Callable[[_ModelWrapSerializerT], _ModelWrapSerializerT]
| Callable[[_ModelPlainSerializerT], _ModelPlainSerializerT]
):
"""Decorator that enables custom model serialization.
This is useful when a model need to be serialized in a customized manner, allowing for flexibility beyond just specific fields.
An example would be to serialize temperature to the same temperature scale, such as degrees Celsius.
```python
from typing import Literal
from pydantic import BaseModel, model_serializer
class TemperatureModel(BaseModel):
unit: Literal['C', 'F']
value: int
@model_serializer()
def serialize_model(self):
if self.unit == 'F':
return {'unit': 'C', 'value': int((self.value - 32) / 1.8)}
return {'unit': self.unit, 'value': self.value}
temperature = TemperatureModel(unit='F', value=212)
print(temperature.model_dump())
#> {'unit': 'C', 'value': 100}
```
Two signatures are supported for `mode='plain'`, which is the default:
- `(self)`
- `(self, info: SerializationInfo)`
And two other signatures for `mode='wrap'`:
- `(self, nxt: SerializerFunctionWrapHandler)`
- `(self, nxt: SerializerFunctionWrapHandler, info: SerializationInfo)`
See [the usage documentation](../concepts/serialization.md#serializers) for more information.
Args:
f: The function to be decorated.
mode: The serialization mode.
- `'plain'` means the function will be called instead of the default serialization logic
- `'wrap'` means the function will be called with an argument to optionally call the default
serialization logic.
when_used: Determines when this serializer should be used.
return_type: The return type for the function. If omitted it will be inferred from the type annotation.
Returns:
The decorator function.
"""
def dec(f: ModelSerializer) -> _decorators.PydanticDescriptorProxy[Any]:
dec_info = _decorators.ModelSerializerDecoratorInfo(mode=mode, return_type=return_type, when_used=when_used)
return _decorators.PydanticDescriptorProxy(f, dec_info)
if f is None:
return dec # pyright: ignore[reportReturnType]
else:
return dec(f) # pyright: ignore[reportReturnType]
AnyType = TypeVar('AnyType')
if TYPE_CHECKING:
SerializeAsAny = Annotated[AnyType, ...] # SerializeAsAny[list[str]] will be treated by type checkers as list[str]
"""Annotation used to mark a type as having duck-typing serialization behavior.
See [usage documentation](../concepts/serialization.md#serializing-with-duck-typing) for more details.
"""
else:
@dataclasses.dataclass(**_internal_dataclass.slots_true)
class SerializeAsAny:
"""Annotation used to mark a type as having duck-typing serialization behavior.
See [usage documentation](../concepts/serialization.md#serializing-with-duck-typing) for more details.
"""
def __class_getitem__(cls, item: Any) -> Any:
return Annotated[item, SerializeAsAny()]
def __get_pydantic_core_schema__(
self, source_type: Any, handler: GetCoreSchemaHandler
) -> core_schema.CoreSchema:
schema = handler(source_type)
schema_to_update = schema
while schema_to_update['type'] == 'definitions':
schema_to_update = schema_to_update.copy()
schema_to_update = schema_to_update['schema']
schema_to_update['serialization'] = core_schema.simple_ser_schema('any')
return schema
__hash__ = object.__hash__
| WrapSerializer |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-github/source_github/github_schema.py | {
"start": 565011,
"end": 565745
} | class ____(sgqlc.types.relay.Connection):
"""The connection type for Deployment."""
__schema__ = github_schema
__field_names__ = ("edges", "nodes", "page_info", "total_count")
edges = sgqlc.types.Field(sgqlc.types.list_of("DeploymentEdge"), graphql_name="edges")
"""A list of edges."""
nodes = sgqlc.types.Field(sgqlc.types.list_of("Deployment"), graphql_name="nodes")
"""A list of nodes."""
page_info = sgqlc.types.Field(sgqlc.types.non_null("PageInfo"), graphql_name="pageInfo")
"""Information to aid in pagination."""
total_count = sgqlc.types.Field(sgqlc.types.non_null(Int), graphql_name="totalCount")
"""Identifies the total count of items in the connection."""
| DeploymentConnection |
python | pydantic__pydantic | pydantic/v1/errors.py | {
"start": 3464,
"end": 3531
} | class ____(PydanticErrorMixin, TypeError):
pass
| PydanticTypeError |
python | streamlit__streamlit | lib/tests/streamlit/elements/heading_test.py | {
"start": 18540,
"end": 19749
} | class ____(DeltaGeneratorTestCase):
"""Test st.subheader text_alignment parameter."""
@parameterized.expand(
[
("left", 1),
("center", 2),
("right", 3),
("justify", 4),
(None, 1), # Default case
]
)
def test_st_subheader_text_alignment(
self, text_alignment: str | None, expected_alignment: int
):
"""Test st.subheader with various text_alignment values."""
if text_alignment is None:
st.subheader("Subheader text")
else:
st.subheader("Subheader text", text_alignment=text_alignment)
el = self.get_delta_from_queue().new_element
assert el.heading.body == "Subheader text"
assert el.heading.tag == "h3"
assert el.text_alignment_config.alignment == expected_alignment
def test_st_subheader_text_alignment_invalid(self):
"""Test st.subheader with invalid text_alignment raises error."""
with pytest.raises(StreamlitAPIException) as exc:
st.subheader("Subheader text", text_alignment="middle")
assert 'Invalid text_alignment value: "middle"' in str(exc.value)
| StSubheaderTextAlignmentTest |
python | gwtw__py-sorting | test/quicksort_test.py | {
"start": 404,
"end": 728
} | class ____(unittest.TestCase,
BaseCustomComparisonSortTest,
BasePositiveIntegerSortTest,
BaseNegativeIntegerSortTest,
BaseStringSortTest):
def setUp(self):
self.sort = quicksort.sort
if __name__ == '__main__':
unittest.main()
| QuicksortTest |
python | charliermarsh__ruff | crates/ruff_linter/resources/test/fixtures/pylint/too_many_public_methods.py | {
"start": 44,
"end": 485
} | class ____:
foo = 1
def __init__(self):
pass
def _private(self):
pass
def method1(self):
pass
def method2(self):
pass
def method3(self):
pass
def method4(self):
pass
def method5(self):
pass
def method6(self):
pass
def method7(self):
pass
def method8(self):
pass
def method9(self):
pass
| Everything |
python | keras-team__keras | keras/src/distribution/distribution_lib.py | {
"start": 8490,
"end": 10763
} | class ____:
"""A layout to apply to a tensor.
This API is aligned with `jax.sharding.NamedSharding`
and `tf.dtensor.Layout`.
See more details in [jax.sharding.NamedSharding](
https://jax.readthedocs.io/en/latest/jax.sharding.html#jax.sharding.NamedSharding)
and [tf.dtensor.Layout](
https://www.tensorflow.org/api_docs/python/tf/experimental/dtensor/Layout).
Args:
axes: tuple of strings that should map to the `axis_names` in
a `DeviceMesh`. For any dimensions that doesn't need any sharding,
A `None` can be used a placeholder.
device_mesh: Optional `DeviceMesh` that will be used to create
the layout. The actual mapping of tensor to physical device
is not known until the mesh is specified.
"""
def __init__(self, axes, device_mesh=None):
self._axes = tuple(axes)
self._device_mesh = device_mesh
self._validate_axes()
@property
def axes(self):
return self._axes
@property
def device_mesh(self):
return self._device_mesh
@device_mesh.setter
def device_mesh(self, device_mesh):
if self._device_mesh is not None:
raise ValueError(
"Cannot override device mesh value. Existing "
f"value is {self._device_mesh}"
)
self._device_mesh = device_mesh
self._validate_axes()
@property
def backend_layout(self):
if not hasattr(self, "_backend_layout"):
self._backend_layout = distribution_lib._to_backend_layout(self)
return self._backend_layout
def _validate_axes(self):
if self._device_mesh:
valid_axis_names = set(self._device_mesh.axis_names)
axis_names = set(self._axes) - set([None])
if axis_names - valid_axis_names:
raise ValueError(
"Invalid axis names for Layout. Valid axis "
f"names: {valid_axis_names}, Got {axis_names}"
)
def __repr__(self):
return (
f"<{self.__class__.__name__} "
f"axes={self.axes}, device_mesh={self.device_mesh}>"
)
def __str__(self):
return self.__repr__()
| TensorLayout |
python | airbytehq__airbyte | airbyte-integrations/connectors/destination-ragie/unit_tests/test_ragie_client.py | {
"start": 162,
"end": 1144
} | class ____(unittest.TestCase):
def setUp(self):
# Setup mock client
self.mock_client = MagicMock(RagieClient)
def test_find_ids_by_metadata(self):
# Test finding IDs by metadata
self.mock_client.find_ids_by_metadata.return_value = [1, 2, 3]
result = self.mock_client.find_ids_by_metadata({"key": "value"})
self.assertEqual(result, [1, 2, 3])
def test_delete_documents_by_id(self):
# Test document deletion
self.mock_client.delete_documents_by_id([1, 2, 3])
self.mock_client.delete_documents_by_id.assert_called_once_with([1, 2, 3])
def test_find_docs_by_metadata(self):
# Test finding documents by metadata
self.mock_client.find_docs_by_metadata.return_value = [{"id": 1}, {"id": 2}]
result = self.mock_client.find_docs_by_metadata({"key": "value"})
self.assertEqual(result, [{"id": 1}, {"id": 2}])
if __name__ == "__main__":
unittest.main()
| TestRagieClient |
python | huggingface__transformers | src/transformers/models/ernie4_5/modular_ernie4_5.py | {
"start": 1046,
"end": 3826
} | class ____(OlmoRotaryEmbedding):
@torch.no_grad()
@dynamic_rope_update # power user: used with advanced RoPE types (e.g. dynamic rope)
def forward(self, x, position_ids):
inv_freq_expanded = self.inv_freq[None, :, None].float().expand(position_ids.shape[0], -1, 1).to(x.device)
position_ids_expanded = position_ids[:, None, :].float()
device_type = x.device.type if isinstance(x.device.type, str) and x.device.type != "mps" else "cpu"
with torch.autocast(device_type=device_type, enabled=False): # Force float32
freqs = (inv_freq_expanded.float() @ position_ids_expanded.float()).transpose(1, 2)
emb = torch.cat((freqs, freqs), dim=-1)
cos = emb.cos() * self.attention_scaling
sin = emb.sin() * self.attention_scaling
# keeping it in full precision
return cos, sin
def apply_rotary_pos_emb(q, k, cos, sin, position_ids=None, unsqueeze_dim=1):
"""Applies Rotary Position Embedding to the query and key tensors.
Args:
q (`torch.Tensor`): The query tensor.
k (`torch.Tensor`): The key tensor.
cos (`torch.Tensor`): The cosine part of the rotary embedding.
sin (`torch.Tensor`): The sine part of the rotary embedding.
position_ids (`torch.Tensor`, *optional*):
Deprecated and unused.
unsqueeze_dim (`int`, *optional*, defaults to 1):
The 'unsqueeze_dim' argument specifies the dimension along which to unsqueeze cos[position_ids] and
sin[position_ids] so that they can be properly broadcasted to the dimensions of q and k. For example, note
that cos[position_ids] and sin[position_ids] have the shape [batch_size, seq_len, head_dim]. Then, if q and
k have the shape [batch_size, heads, seq_len, head_dim], then setting unsqueeze_dim=1 makes
cos[position_ids] and sin[position_ids] broadcastable to the shapes of q and k. Similarly, if q and k have
the shape [batch_size, seq_len, heads, head_dim], then set unsqueeze_dim=2.
Returns:
`tuple(torch.Tensor)` comprising of the query and key tensors rotated using the Rotary Position Embedding.
"""
# glm rope style (with full dim) and full precision
original_dtype = q.dtype
cos = cos.unsqueeze(unsqueeze_dim)
sin = sin.unsqueeze(unsqueeze_dim)
# Interleave them instead of usual shape
cos = cos[..., : cos.shape[-1] // 2].repeat_interleave(2, dim=-1)
sin = sin[..., : sin.shape[-1] // 2].repeat_interleave(2, dim=-1)
q_embed = (q.float() * cos) + (rotate_half(q).float() * sin)
k_embed = (k.float() * cos) + (rotate_half(k).float() * sin)
return q_embed.to(original_dtype), k_embed.to(original_dtype)
| Ernie4_5RotaryEmbedding |
python | coleifer__peewee | tests/sql.py | {
"start": 83384,
"end": 84177
} | class ____(BaseTestCase):
database = SqliteDatabase(None)
def test_replace(self):
query = Person.insert(name='huey').on_conflict('replace')
self.assertSQL(query, (
'INSERT OR REPLACE INTO "person" ("name") VALUES (?)'), ['huey'])
def test_ignore(self):
query = Person.insert(name='huey').on_conflict('ignore')
self.assertSQL(query, (
'INSERT OR IGNORE INTO "person" ("name") VALUES (?)'), ['huey'])
def test_update_not_supported(self):
query = Person.insert(name='huey').on_conflict(
preserve=(Person.dob,),
update={Person.name: Person.name.concat(' (updated)')})
with self.assertRaisesCtx(ValueError):
self.database.get_sql_context().parse(query)
| TestOnConflictSqlite |
python | django__django | tests/postgres_tests/test_functions.py | {
"start": 223,
"end": 874
} | class ____(PostgreSQLTestCase):
def test_transaction_now(self):
"""
The test case puts everything under a transaction, so two models
updated with a short gap should have the same time.
"""
m1 = NowTestModel.objects.create()
m2 = NowTestModel.objects.create()
NowTestModel.objects.filter(id=m1.id).update(when=TransactionNow())
sleep(0.1)
NowTestModel.objects.filter(id=m2.id).update(when=TransactionNow())
m1.refresh_from_db()
m2.refresh_from_db()
self.assertIsInstance(m1.when, datetime)
self.assertEqual(m1.when, m2.when)
| TestTransactionNow |
python | pytorch__pytorch | test/torch_np/numpy_tests/core/test_indexing.py | {
"start": 43284,
"end": 45044
} | class ____(TestCase):
"""
These test that ``TypeError`` is raised when you try to use
non-integers as arguments to for indexing and slicing e.g. ``a[0.0:5]``
and ``a[0.5]``, or other functions like ``array.reshape(1., -1)``.
"""
def test_valid_indexing(self):
# These should raise no errors.
a = np.array([[[5]]])
a[np.array([0])]
a[[0, 0]]
a[:, [0, 0]]
a[:, 0, :]
a[:, :, :]
def test_valid_slicing(self):
# These should raise no errors.
a = np.array([[[5]]])
a[::]
a[0:]
a[:2]
a[0:2]
a[::2]
a[1::2]
a[:2:2]
a[1:2:2]
def test_non_integer_argument_errors(self):
a = np.array([[5]])
assert_raises(TypeError, np.reshape, a, (1.0, 1.0, -1))
assert_raises(TypeError, np.reshape, a, (np.array(1.0), -1))
assert_raises(TypeError, np.take, a, [0], 1.0)
assert_raises((TypeError, RuntimeError), np.take, a, [0], np.float64(1.0))
@skip(
reason=("torch doesn't have scalar types with distinct element-wise behaviours")
)
def test_non_integer_sequence_multiplication(self):
# NumPy scalar sequence multiply should not work with non-integers
def mult(a, b):
return a * b
assert_raises(TypeError, mult, [1], np.float64(3))
# following should be OK
mult([1], np.int_(3))
def test_reduce_axis_float_index(self):
d = np.zeros((3, 3, 3))
assert_raises(TypeError, np.min, d, 0.5)
assert_raises(TypeError, np.min, d, (0.5, 1))
assert_raises(TypeError, np.min, d, (1, 2.2))
assert_raises(TypeError, np.min, d, (0.2, 1.2))
| TestFloatNonIntegerArgument |
python | ray-project__ray | rllib/examples/_old_api_stack/models/autoregressive_action_dist.py | {
"start": 338,
"end": 2632
} | class ____(ActionDistribution):
"""Action distribution P(a1, a2) = P(a1) * P(a2 | a1)"""
def deterministic_sample(self):
# First, sample a1.
a1_dist = self._a1_distribution()
a1 = a1_dist.deterministic_sample()
# Sample a2 conditioned on a1.
a2_dist = self._a2_distribution(a1)
a2 = a2_dist.deterministic_sample()
self._action_logp = a1_dist.logp(a1) + a2_dist.logp(a2)
# Return the action tuple.
return (a1, a2)
def sample(self):
# First, sample a1.
a1_dist = self._a1_distribution()
a1 = a1_dist.sample()
# Sample a2 conditioned on a1.
a2_dist = self._a2_distribution(a1)
a2 = a2_dist.sample()
self._action_logp = a1_dist.logp(a1) + a2_dist.logp(a2)
# Return the action tuple.
return (a1, a2)
def logp(self, actions):
a1, a2 = actions[:, 0], actions[:, 1]
a1_vec = tf.expand_dims(tf.cast(a1, tf.float32), 1)
a1_logits, a2_logits = self.model.action_model([self.inputs, a1_vec])
return Categorical(a1_logits).logp(a1) + Categorical(a2_logits).logp(a2)
def sampled_action_logp(self):
return self._action_logp
def entropy(self):
a1_dist = self._a1_distribution()
a2_dist = self._a2_distribution(a1_dist.sample())
return a1_dist.entropy() + a2_dist.entropy()
def kl(self, other):
a1_dist = self._a1_distribution()
a1_terms = a1_dist.kl(other._a1_distribution())
a1 = a1_dist.sample()
a2_terms = self._a2_distribution(a1).kl(other._a2_distribution(a1))
return a1_terms + a2_terms
def _a1_distribution(self):
BATCH = tf.shape(self.inputs)[0]
a1_logits, _ = self.model.action_model([self.inputs, tf.zeros((BATCH, 1))])
a1_dist = Categorical(a1_logits)
return a1_dist
def _a2_distribution(self, a1):
a1_vec = tf.expand_dims(tf.cast(a1, tf.float32), 1)
_, a2_logits = self.model.action_model([self.inputs, a1_vec])
a2_dist = Categorical(a2_logits)
return a2_dist
@staticmethod
def required_model_output_shape(action_space, model_config):
return 16 # controls model output feature vector size
| BinaryAutoregressiveDistribution |
python | airbytehq__airbyte | airbyte-ci/connectors/live-tests/src/live_tests/commons/models.py | {
"start": 4049,
"end": 4218
} | class ____(UserDict):
def __str__(self) -> str:
return f"{self.__class__.__name__}(******)"
def __repr__(self) -> str:
return str(self)
| SecretDict |
python | openai__openai-python | src/openai/types/realtime/realtime_conversation_item_function_call_param.py | {
"start": 241,
"end": 1182
} | class ____(TypedDict, total=False):
arguments: Required[str]
"""The arguments of the function call.
This is a JSON-encoded string representing the arguments passed to the function,
for example `{"arg1": "value1", "arg2": 42}`.
"""
name: Required[str]
"""The name of the function being called."""
type: Required[Literal["function_call"]]
"""The type of the item. Always `function_call`."""
id: str
"""The unique ID of the item.
This may be provided by the client or generated by the server.
"""
call_id: str
"""The ID of the function call."""
object: Literal["realtime.item"]
"""Identifier for the API object being returned - always `realtime.item`.
Optional when creating a new item.
"""
status: Literal["completed", "incomplete", "in_progress"]
"""The status of the item. Has no effect on the conversation."""
| RealtimeConversationItemFunctionCallParam |
python | milvus-io__pymilvus | tests/test_grpc_handler.py | {
"start": 5327,
"end": 8141
} | class ____:
def test_init_with_uri(self) -> None:
with patch('pymilvus.client.grpc_handler.grpc.insecure_channel') as mock_channel:
mock_channel.return_value = MagicMock()
handler = GrpcHandler(uri="http://localhost:19530")
assert handler.server_address == "localhost:19530"
def test_init_with_host_port(self) -> None:
with patch('pymilvus.client.grpc_handler.grpc.insecure_channel') as mock_channel:
mock_channel.return_value = MagicMock()
handler = GrpcHandler(host="localhost", port="19530")
assert handler.server_address == "localhost:19530"
def test_init_with_secure_connection(self) -> None:
with patch('pymilvus.client.grpc_handler.grpc.secure_channel') as mock_channel:
mock_channel.return_value = MagicMock()
handler = GrpcHandler(uri="http://localhost:19530", secure=True)
assert handler.server_address == "localhost:19530"
def test_init_with_invalid_secure_param(self) -> None:
with pytest.raises(ParamError, match="secure must be bool type"):
GrpcHandler(uri="http://localhost:19530", secure="not_bool")
def test_init_with_authorization(self) -> None:
with patch('pymilvus.client.grpc_handler.grpc.insecure_channel') as mock_channel:
mock_channel.return_value = MagicMock()
handler = GrpcHandler(
uri="http://localhost:19530",
user="test_user",
password="test_password"
)
assert handler.server_address == "localhost:19530"
def test_init_with_token(self) -> None:
with patch('pymilvus.client.grpc_handler.grpc.insecure_channel') as mock_channel:
mock_channel.return_value = MagicMock()
handler = GrpcHandler(
uri="http://localhost:19530",
token="test_token"
)
assert handler.server_address == "localhost:19530"
def test_init_with_db_name(self) -> None:
with patch('pymilvus.client.grpc_handler.grpc.insecure_channel') as mock_channel:
mock_channel.return_value = MagicMock()
handler = GrpcHandler(
uri="http://localhost:19530",
db_name="test_db"
)
assert handler.server_address == "localhost:19530"
def test_get_server_type(self) -> None:
with patch('pymilvus.client.grpc_handler.grpc.insecure_channel') as mock_channel:
mock_channel.return_value = MagicMock()
handler = GrpcHandler(uri="http://localhost:19530")
# get_server_type will return 'milvus' for localhost
server_type = handler.get_server_type()
assert server_type == "milvus"
| TestGrpcHandlerInitialization |
python | ansible__ansible | lib/ansible/plugins/filter/encryption.py | {
"start": 2237,
"end": 2450
} | class ____(object):
""" Ansible vault jinja2 filters """
def filters(self):
filters = {
'vault': do_vault,
'unvault': do_unvault,
}
return filters
| FilterModule |
python | joblib__joblib | joblib/test/test_parallel.py | {
"start": 28651,
"end": 31119
} | class ____(SequentialBackend):
"""Pretends to run conncurrently while running sequentially."""
def __init__(self, param=None):
if param is None:
raise ValueError("param should not be None")
self.param = param
@parametrize("context", [parallel_config, parallel_backend])
def test_parameterized_backend_context_manager(monkeypatch, context):
monkeypatch.setitem(BACKENDS, "param_backend", ParameterizedParallelBackend)
assert _active_backend_type() == get_default_backend_instance()
with context("param_backend", param=42, n_jobs=3):
active_backend, active_n_jobs = parallel.get_active_backend()
assert type(active_backend) is ParameterizedParallelBackend
assert active_backend.param == 42
assert active_n_jobs == 3
p = Parallel()
assert p.n_jobs == 3
assert p._backend is active_backend
results = p(delayed(sqrt)(i) for i in range(5))
assert results == [sqrt(i) for i in range(5)]
# The default backend is again restored
assert _active_backend_type() == get_default_backend_instance()
@parametrize("context", [parallel_config, parallel_backend])
def test_directly_parameterized_backend_context_manager(context):
assert _active_backend_type() == get_default_backend_instance()
# Check that it's possible to pass a backend instance directly,
# without registration
with context(ParameterizedParallelBackend(param=43), n_jobs=5):
active_backend, active_n_jobs = parallel.get_active_backend()
assert type(active_backend) is ParameterizedParallelBackend
assert active_backend.param == 43
assert active_n_jobs == 5
p = Parallel()
assert p.n_jobs == 5
assert p._backend is active_backend
results = p(delayed(sqrt)(i) for i in range(5))
assert results == [sqrt(i) for i in range(5)]
# The default backend is again restored
assert _active_backend_type() == get_default_backend_instance()
def sleep_and_return_pid():
sleep(0.1)
return os.getpid()
def get_nested_pids():
assert _active_backend_type() == ThreadingBackend
# Assert that the nested backend does not change the default number of
# jobs used in Parallel
assert Parallel()._effective_n_jobs() == 1
# Assert that the tasks are running only on one process
return Parallel(n_jobs=2)(delayed(sleep_and_return_pid)() for _ in range(2))
| ParameterizedParallelBackend |
python | gevent__gevent | src/greentest/3.10/test_socket.py | {
"start": 91557,
"end": 95370
} | class ____(SocketConnectedTest):
def __init__(self, methodName='runTest'):
SocketConnectedTest.__init__(self, methodName=methodName)
def testRecv(self):
# Testing large receive over TCP
msg = self.cli_conn.recv(1024)
self.assertEqual(msg, MSG)
def _testRecv(self):
self.serv_conn.send(MSG)
def testOverFlowRecv(self):
# Testing receive in chunks over TCP
seg1 = self.cli_conn.recv(len(MSG) - 3)
seg2 = self.cli_conn.recv(1024)
msg = seg1 + seg2
self.assertEqual(msg, MSG)
def _testOverFlowRecv(self):
self.serv_conn.send(MSG)
def testRecvFrom(self):
# Testing large recvfrom() over TCP
msg, addr = self.cli_conn.recvfrom(1024)
self.assertEqual(msg, MSG)
def _testRecvFrom(self):
self.serv_conn.send(MSG)
def testOverFlowRecvFrom(self):
# Testing recvfrom() in chunks over TCP
seg1, addr = self.cli_conn.recvfrom(len(MSG)-3)
seg2, addr = self.cli_conn.recvfrom(1024)
msg = seg1 + seg2
self.assertEqual(msg, MSG)
def _testOverFlowRecvFrom(self):
self.serv_conn.send(MSG)
def testSendAll(self):
# Testing sendall() with a 2048 byte string over TCP
msg = b''
while 1:
read = self.cli_conn.recv(1024)
if not read:
break
msg += read
self.assertEqual(msg, b'f' * 2048)
def _testSendAll(self):
big_chunk = b'f' * 2048
self.serv_conn.sendall(big_chunk)
def testFromFd(self):
# Testing fromfd()
fd = self.cli_conn.fileno()
sock = socket.fromfd(fd, socket.AF_INET, socket.SOCK_STREAM)
self.addCleanup(sock.close)
self.assertIsInstance(sock, socket.socket)
msg = sock.recv(1024)
self.assertEqual(msg, MSG)
def _testFromFd(self):
self.serv_conn.send(MSG)
def testDup(self):
# Testing dup()
sock = self.cli_conn.dup()
self.addCleanup(sock.close)
msg = sock.recv(1024)
self.assertEqual(msg, MSG)
def _testDup(self):
self.serv_conn.send(MSG)
def testShutdown(self):
# Testing shutdown()
msg = self.cli_conn.recv(1024)
self.assertEqual(msg, MSG)
# wait for _testShutdown to finish: on OS X, when the server
# closes the connection the client also becomes disconnected,
# and the client's shutdown call will fail. (Issue #4397.)
self.done.wait()
def _testShutdown(self):
self.serv_conn.send(MSG)
self.serv_conn.shutdown(2)
testShutdown_overflow = support.cpython_only(testShutdown)
@support.cpython_only
def _testShutdown_overflow(self):
import _testcapi
self.serv_conn.send(MSG)
# Issue 15989
self.assertRaises(OverflowError, self.serv_conn.shutdown,
_testcapi.INT_MAX + 1)
self.assertRaises(OverflowError, self.serv_conn.shutdown,
2 + (_testcapi.UINT_MAX + 1))
self.serv_conn.shutdown(2)
def testDetach(self):
# Testing detach()
fileno = self.cli_conn.fileno()
f = self.cli_conn.detach()
self.assertEqual(f, fileno)
# cli_conn cannot be used anymore...
self.assertTrue(self.cli_conn._closed)
self.assertRaises(OSError, self.cli_conn.recv, 1024)
self.cli_conn.close()
# ...but we can create another socket using the (still open)
# file descriptor
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM, fileno=f)
self.addCleanup(sock.close)
msg = sock.recv(1024)
self.assertEqual(msg, MSG)
def _testDetach(self):
self.serv_conn.send(MSG)
| BasicTCPTest |
python | PyCQA__pylint | tests/functional/a/arguments_differ.py | {
"start": 2151,
"end": 2217
} | class ____:
@property
def close(self):
pass
| Property |
python | django__django | tests/admin_registration/tests.py | {
"start": 4347,
"end": 6255
} | class ____(SimpleTestCase):
"""
Tests the register decorator in admin.decorators
For clarity:
@register(Person)
class AuthorAdmin(ModelAdmin):
pass
is functionally equal to (the way it is written in these tests):
AuthorAdmin = register(Person)(AuthorAdmin)
"""
def setUp(self):
self.default_site = site
self.custom_site = CustomSite()
def test_basic_registration(self):
register(Person)(NameAdmin)
self.assertIsInstance(
self.default_site.get_model_admin(Person), admin.ModelAdmin
)
self.default_site.unregister(Person)
def test_custom_site_registration(self):
register(Person, site=self.custom_site)(NameAdmin)
self.assertIsInstance(
self.custom_site.get_model_admin(Person), admin.ModelAdmin
)
def test_multiple_registration(self):
register(Traveler, Place)(NameAdmin)
self.assertIsInstance(
self.default_site.get_model_admin(Traveler), admin.ModelAdmin
)
self.default_site.unregister(Traveler)
self.assertIsInstance(
self.default_site.get_model_admin(Place), admin.ModelAdmin
)
self.default_site.unregister(Place)
def test_wrapped_class_not_a_model_admin(self):
with self.assertRaisesMessage(
ValueError, "Wrapped class must subclass ModelAdmin."
):
register(Person)(CustomSite)
def test_custom_site_not_an_admin_site(self):
with self.assertRaisesMessage(ValueError, "site must subclass AdminSite"):
register(Person, site=Traveler)(NameAdmin)
def test_empty_models_list_registration_fails(self):
with self.assertRaisesMessage(
ValueError, "At least one model must be passed to register."
):
register()(NameAdmin)
| TestRegistrationDecorator |
python | apache__airflow | airflow-core/src/airflow/api_fastapi/core_api/datamodels/dag_run.py | {
"start": 2202,
"end": 3031
} | class ____(BaseModel):
"""DAG Run serializer for responses."""
dag_run_id: str = Field(validation_alias="run_id")
dag_id: str
logical_date: datetime | None
queued_at: datetime | None
start_date: datetime | None
end_date: datetime | None
duration: float | None
data_interval_start: datetime | None
data_interval_end: datetime | None
run_after: datetime
last_scheduling_decision: datetime | None
run_type: DagRunType
state: DagRunState
triggered_by: DagRunTriggeredByType | None
triggering_user_name: str | None
conf: dict | None
note: str | None
dag_versions: list[DagVersionResponse]
bundle_version: str | None
dag_display_name: str = Field(validation_alias=AliasPath("dag_model", "dag_display_name"))
partition_key: str | None
| DAGRunResponse |
python | huggingface__transformers | src/transformers/models/llava_onevision/modeling_llava_onevision.py | {
"start": 11808,
"end": 32119
} | class ____(LlavaOnevisionPreTrainedModel):
_checkpoint_conversion_mapping = {
r"^language_model.model": "language_model",
}
base_model_prefix = "model"
def __init__(self, config):
super().__init__(config)
self.vision_tower = AutoModel.from_config(config.vision_config)
self.multi_modal_projector = LlavaOnevisionMultiModalProjector(config)
embed_std = 1 / math.sqrt(config.text_config.hidden_size)
self.image_newline = nn.Parameter(torch.randn(config.text_config.hidden_size, dtype=self.dtype) * embed_std)
self.vocab_size = config.text_config.vocab_size
self.language_model = AutoModel.from_config(config.text_config)
self.pad_token_id = self.config.pad_token_id if self.config.pad_token_id is not None else -1
self.post_init()
def get_input_embeddings(self):
return self.language_model.get_input_embeddings()
def set_input_embeddings(self, value):
self.language_model.set_input_embeddings(value)
def pack_image_features(self, image_features, image_sizes, image_newline=None, vision_aspect_ratio="anyres_max_9"):
"""
Reshape, unpad and then pack each image_feature into a single image_features tensor containing all visual vectors.
Args:
image_features (`list[torch.Tensor]` of length num_images, each of shape `(num_patches, image_length, embed_dim)`)
List of image feature tensor, each contains all the visual feature of all patches.
image_sizes (`torch.Tensor` of shape `(num_images, 2)`)
Actual image size of each images (H, W).
image_newline (`torch.Tensor` of shape `(embed_dim)`)
New line embedding vector.
vision_aspect_ratio (`str`, *optional*, "anyres_max_9"):
Aspect ratio used when processong image features. The default value is "anyres_max_9".
Returns:
image_features (`torch.Tensor` of shape `(all_feat_len, embed_dim)`)
feature_lens (`list[int]`)
token length of each image in image_features
"""
new_image_features = []
feature_lens = []
for image_idx, image_feature in enumerate(image_features):
if image_feature.shape[0] > 1:
base_image_feature = image_feature[0]
image_feature = image_feature[1:]
height = width = self.config.vision_config.image_size // self.config.vision_config.patch_size
if height * width != base_image_feature.shape[0]:
raise ValueError("The number of patches is not consistent with the image size.")
num_patch_height, num_patch_width = get_anyres_image_grid_shape(
image_sizes[image_idx],
self.config.image_grid_pinpoints,
self.config.vision_config.image_size,
)
image_feature = image_feature.view(num_patch_height, num_patch_width, height, width, -1)
image_feature = image_feature.permute(4, 0, 2, 1, 3).contiguous()
image_feature = image_feature.flatten(1, 2).flatten(2, 3)
image_feature = unpad_image(image_feature, image_sizes[image_idx])
max_num_patches = int(vision_aspect_ratio.strip("anyres_max_"))
channels, curr_height, curr_width = image_feature.shape
ratio = math.sqrt(curr_height * curr_width / (max_num_patches * height**2))
if ratio > 1.1:
image_feature = image_feature[None]
image_feature = nn.functional.interpolate(
image_feature, [int(curr_height // ratio), int(curr_width // ratio)], mode="bilinear"
)[0]
if image_newline is not None:
image_feature = torch.cat(
(
image_feature,
image_newline[:, None, None]
.expand(*image_feature.shape[:-1], 1)
.to(image_feature.device, image_feature.dtype),
),
dim=-1,
)
image_feature = image_feature.flatten(1, 2).transpose(0, 1)
image_feature = torch.cat((base_image_feature, image_feature), dim=0)
else:
image_feature = image_feature[0]
if image_newline is not None:
image_feature = torch.cat((image_feature, image_newline[None].to(image_feature)), dim=0)
new_image_features.append(image_feature)
feature_lens.append(image_feature.size(0))
feature_lens = torch.tensor(feature_lens, dtype=torch.long, device=image_features[0].device)
return new_image_features, feature_lens
def get_image_features(
self,
pixel_values: torch.FloatTensor,
image_sizes: torch.Tensor,
vision_feature_layer: Optional[Union[int, list[int]]] = None,
vision_feature_select_strategy: Optional[str] = None,
vision_aspect_ratio: Optional[str] = None,
batch_num_images: Optional[torch.LongTensor] = None,
):
"""
Obtains image last hidden states from the vision tower and apply multimodal projection.
Args:
pixel_values (`torch.FloatTensor]` of shape `(batch_size, num_patches, channels, height, width)`)
The tensors corresponding to the input images.
image_sizes (`torch.Tensor` of shape `(num_images, 2)`)
Actual image size of each images (H, W).
vision_feature_layer (`Union[int, list[int]]`):
The index of the layer to select the vision feature. If multiple indices are provided,
the vision feature of the corresponding indices will be concatenated to form the
vision features.
vision_feature_select_strategy (`str`):
The feature selection strategy used to select the vision feature from the vision backbone.
Can be one of `"default"` or `"full"`
batch_num_images (`torch.LongTensor`, *optional*):
Number of images in each sample.
Returns:
image_features (list[`torch.Tensor`]): List of image feature tensor, each contains all the visual feature of all patches
and are of shape `(num_patches, image_length, embed_dim)`).
"""
vision_feature_layer = (
vision_feature_layer if vision_feature_layer is not None else self.config.vision_feature_layer
)
vision_feature_select_strategy = (
vision_feature_select_strategy
if vision_feature_select_strategy is not None
else self.config.vision_feature_select_strategy
)
vision_aspect_ratio = (
vision_aspect_ratio if vision_aspect_ratio is not None else self.config.vision_aspect_ratio
)
# ! infer image_num_patches from image_sizes
if batch_num_images is None:
# treat this as a single-image case for backward compatibility
need_patching = [True] * len(image_sizes)
else:
need_patching = [n == 1 for n in batch_num_images for _ in range(n)]
image_num_patches = [
image_size_to_num_patches(
image_size=imsize,
grid_pinpoints=self.config.image_grid_pinpoints,
patch_size=self.config.vision_config.image_size,
)
if should_patch
else 1
for imsize, should_patch in zip(image_sizes, need_patching)
]
if pixel_values.dim() == 5:
# stacked if input is (batch_size, num_patches, num_channels, height, width)
_pixel_values_list = [pix_val[:num_patch] for pix_val, num_patch in zip(pixel_values, image_num_patches)]
pixel_values = torch.cat(_pixel_values_list, dim=0)
elif pixel_values.dim() != 4:
# otherwise has to be stacked from list of (num_patches, num_channels, height, width)
raise ValueError(f"pixel_values of shape {pixel_values.shape}, expect to be of 4 or 5 dimensions")
image_features = self.vision_tower(pixel_values, output_hidden_states=True)
# If we have one vision feature layer, return the corresponding hidden states,
# otherwise, select the hidden states of each feature layer and concatenate them
if isinstance(vision_feature_layer, int):
selected_image_feature = image_features.hidden_states[vision_feature_layer]
else:
hs_pool = [image_features.hidden_states[layer_idx] for layer_idx in vision_feature_layer]
selected_image_feature = torch.cat(hs_pool, dim=-1)
if vision_feature_select_strategy == "default":
selected_image_feature = selected_image_feature[:, 1:]
image_features = self.multi_modal_projector(selected_image_feature)
image_features = torch.split(image_features, image_num_patches, dim=0)
image_features, feature_lens = self.pack_image_features(
image_features,
image_sizes,
image_newline=self.image_newline,
vision_aspect_ratio=vision_aspect_ratio,
)
return image_features
def get_placeholder_mask(
self,
input_ids: torch.LongTensor,
inputs_embeds: torch.FloatTensor,
image_features: Optional[torch.FloatTensor] = None,
video_features: Optional[torch.FloatTensor] = None,
):
"""
Obtains multimodal placeholder mask from `input_ids` or `inputs_embeds`, and checks that the placeholder token count is
equal to the length of multimodal features. If the lengths are different, an error is raised.
"""
if input_ids is None:
special_image_mask = inputs_embeds == self.get_input_embeddings()(
torch.tensor(self.config.image_token_id, dtype=torch.long, device=inputs_embeds.device)
)
special_image_mask = special_image_mask.all(-1)
special_video_mask = inputs_embeds == self.get_input_embeddings()(
torch.tensor(self.config.video_token_id, dtype=torch.long, device=inputs_embeds.device)
)
special_video_mask = special_video_mask.all(-1)
else:
special_image_mask = input_ids == self.config.image_token_id
special_video_mask = input_ids == self.config.video_token_id
n_image_tokens = special_image_mask.sum()
special_image_mask = special_image_mask.unsqueeze(-1).expand_as(inputs_embeds).to(inputs_embeds.device)
if image_features is not None and inputs_embeds[special_image_mask].numel() != image_features.numel():
raise ValueError(
f"Image features and image tokens do not match: tokens: {n_image_tokens}, features {image_features.shape[0]}"
)
n_video_tokens = special_video_mask.sum()
special_video_mask = special_video_mask.unsqueeze(-1).expand_as(inputs_embeds).to(inputs_embeds.device)
if video_features is not None and inputs_embeds[special_video_mask].numel() != video_features.numel():
raise ValueError(
f"Videos features and image tokens do not match: tokens: {n_video_tokens}, features {video_features.shape[0]}"
)
return special_image_mask, special_video_mask
@can_return_tuple
@auto_docstring
def forward(
self,
input_ids: Optional[torch.LongTensor] = None,
pixel_values: Optional[torch.FloatTensor] = None,
image_sizes: Optional[torch.LongTensor] = None,
pixel_values_videos: Optional[torch.FloatTensor] = None,
image_sizes_videos: Optional[torch.LongTensor] = None,
attention_mask: Optional[torch.Tensor] = None,
position_ids: Optional[torch.LongTensor] = None,
past_key_values: Optional[Cache] = None,
inputs_embeds: Optional[torch.FloatTensor] = None,
vision_feature_layer: Optional[Union[int, list[int]]] = None,
vision_feature_select_strategy: Optional[str] = None,
vision_aspect_ratio: Optional[str] = None,
batch_num_images: Optional[torch.LongTensor] = None,
use_cache: Optional[bool] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
cache_position: Optional[torch.LongTensor] = None,
**kwargs: Unpack[FlashAttentionKwargs],
) -> Union[tuple, LlavaOnevisionModelOutputWithPast]:
r"""
image_sizes_videos (`torch.LongTensor` of shape `(batch_size, frames, 2)`, *optional*):
The sizes of the videos in the batch, being (height, width) for each frame in the video.
vision_aspect_ratio (`str`, *optional*, defaults to `"anyres_max_9"`):
Aspect ratio used when processong image features. The default value is "anyres_max_9".
batch_num_images (`torch.LongTensor`, *optional*):
Number of images in each sample.
"""
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
vision_feature_layer = (
vision_feature_layer if vision_feature_layer is not None else self.config.vision_feature_layer
)
vision_feature_select_strategy = (
vision_feature_select_strategy
if vision_feature_select_strategy is not None
else self.config.vision_feature_select_strategy
)
vision_aspect_ratio = (
vision_aspect_ratio if vision_aspect_ratio is not None else self.config.vision_aspect_ratio
)
if (input_ids is None) ^ (inputs_embeds is not None):
raise ValueError("You must specify exactly one of input_ids or inputs_embeds")
if inputs_embeds is None:
inputs_embeds = self.get_input_embeddings()(input_ids)
# Images are processed with Anyres
if pixel_values is not None:
image_features = self.get_image_features(
pixel_values,
image_sizes,
vision_feature_layer=vision_feature_layer,
vision_feature_select_strategy=vision_feature_select_strategy,
batch_num_images=batch_num_images,
)
image_features = torch.cat(image_features, dim=0)
image_features = image_features.to(inputs_embeds.device, inputs_embeds.dtype)
special_image_mask, _ = self.get_placeholder_mask(
input_ids, inputs_embeds=inputs_embeds, image_features=image_features
)
inputs_embeds = inputs_embeds.masked_scatter(special_image_mask, image_features)
# Video are simply embedded and further pooled to decrease seq len
if pixel_values_videos is not None:
video_features = self.get_video_features(
pixel_values_videos,
vision_feature_layer=vision_feature_layer,
vision_feature_select_strategy=vision_feature_select_strategy,
)
image_newline = (
self.image_newline[None, None, :].repeat(video_features.shape[0], 1, 1).to(video_features.device)
)
video_features = torch.cat((video_features, image_newline), dim=1)
video_features = video_features.flatten(0, 1).to(inputs_embeds.device, inputs_embeds.dtype)
_, special_video_mask = self.get_placeholder_mask(
input_ids, inputs_embeds=inputs_embeds, video_features=video_features
)
inputs_embeds = inputs_embeds.masked_scatter(special_video_mask, video_features)
outputs = self.language_model(
attention_mask=attention_mask,
position_ids=position_ids,
past_key_values=past_key_values,
inputs_embeds=inputs_embeds,
use_cache=use_cache,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=True,
cache_position=cache_position,
**kwargs,
)
return LlavaOnevisionModelOutputWithPast(
last_hidden_state=outputs.last_hidden_state,
past_key_values=outputs.past_key_values,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
image_hidden_states=image_features if pixel_values is not None else None,
video_hidden_states=video_features if pixel_values_videos is not None else None,
)
def get_video_features(
self,
pixel_values: torch.FloatTensor,
vision_feature_layer: Union[int, list[int]],
vision_feature_select_strategy: str,
):
"""
Obtains video last hidden states from the vision tower, apply multimodal projection and pooling.
Args:
pixel_values (`torch.FloatTensor]` of shape `(batch_size, num_frames, channels, height, width)`)
The tensors corresponding to the input video.
vision_feature_layer (`Union[int, list[int]], *optional*, defaults to -2`):
The index of the layer to select the vision feature. If multiple indices are provided,
the vision feature of the corresponding indices will be concatenated to form the
vision features.
vision_feature_select_strategy (`str`):
The feature selection strategy used to select the vision feature from the vision backbone.
Can be one of `"default"` or `"full"`
Returns:
video_features (list[`torch.Tensor`]): List of video feature tensor, each contains all the visual feature of all patches
and are of shape `(num_videos, video_length, embed_dim)`).
"""
batch_size, frames, channels, height, width = pixel_values.shape
pixel_values = pixel_values.view(batch_size * frames, channels, height, width)
video_features = self.vision_tower(pixel_values, output_hidden_states=True)
# If we have one vision feature layer, return the corresponding hidden states,
# otherwise, select the hidden states of each feature layer and concatenate them
if isinstance(vision_feature_layer, int):
selected_video_feature = video_features.hidden_states[vision_feature_layer]
else:
hs_pool = [video_features.hidden_states[layer_idx] for layer_idx in vision_feature_layer]
selected_video_feature = torch.cat(hs_pool, dim=-1)
if vision_feature_select_strategy == "default":
selected_video_feature = selected_video_feature[:, 1:]
video_features = self.multi_modal_projector(selected_video_feature)
video_features = self.apply_pooling(video_features)
video_features = video_features.reshape(batch_size, frames * video_features.shape[1], -1)
return video_features
def apply_pooling(self, image_features):
height = width = self.config.vision_config.image_size // self.config.vision_config.patch_size
batch_frames, seq_len, dim = image_features.shape
image_features = image_features.view(batch_frames, height, width, -1)
image_features = image_features.permute(0, 3, 1, 2).contiguous()
height, width = image_features.shape[2:]
scaled_shape = [math.ceil(height / 2), math.ceil(width / 2)]
image_features = nn.functional.interpolate(image_features, size=scaled_shape, mode="bilinear")
image_features = image_features.permute(0, 2, 3, 1)
image_features = image_features.view(batch_frames, -1, dim)
return image_features
@auto_docstring(
custom_intro="""
The LLAVA-NeXT model which consists of a vision backbone and a language model.
"""
)
| LlavaOnevisionModel |
python | scipy__scipy | scipy/special/tests/test_basic.py | {
"start": 55807,
"end": 58556
} | class ____:
def test_comb(self):
assert_allclose(special.comb([10, 10], [3, 4]), [120., 210.])
assert_allclose(special.comb(10, 3), 120.)
assert_equal(special.comb(10, 3, exact=True), 120)
assert_equal(special.comb(10, 3, exact=True, repetition=True), 220)
assert_allclose([special.comb(20, k, exact=True) for k in range(21)],
special.comb(20, list(range(21))), atol=1e-15)
ii = np.iinfo(int).max + 1
assert_equal(special.comb(ii, ii-1, exact=True), ii)
expected = 100891344545564193334812497256
assert special.comb(100, 50, exact=True) == expected
def test_comb_with_np_int64(self):
n = 70
k = 30
np_n = np.int64(n)
np_k = np.int64(k)
res_np = special.comb(np_n, np_k, exact=True)
res_py = special.comb(n, k, exact=True)
assert res_np == res_py
def test_comb_zeros(self):
assert_equal(special.comb(2, 3, exact=True), 0)
assert_equal(special.comb(-1, 3, exact=True), 0)
assert_equal(special.comb(2, -1, exact=True), 0)
assert_equal(special.comb(2, -1, exact=False), 0)
assert_allclose(special.comb([2, -1, 2, 10], [3, 3, -1, 3]), [0., 0., 0., 120.])
def test_comb_exact_non_int_error(self):
msg = "`exact=True`"
with pytest.raises(ValueError, match=msg):
special.comb(3.4, 4, exact=True)
with pytest.raises(ValueError, match=msg):
special.comb(3, 4.4, exact=True)
def test_perm(self):
assert_allclose(special.perm([10, 10], [3, 4]), [720., 5040.])
assert_allclose(special.perm(10, 3), 720., atol=1.5e-7, rtol=0)
assert_equal(special.perm(10, 3, exact=True), 720)
def test_perm_zeros(self):
assert_equal(special.perm(2, 3, exact=True), 0)
assert_equal(special.perm(-1, 3, exact=True), 0)
assert_equal(special.perm(2, -1, exact=True), 0)
assert_equal(special.perm(2, -1, exact=False), 0)
assert_allclose(special.perm([2, -1, 2, 10], [3, 3, -1, 3]), [0., 0., 0., 720.])
def test_perm_iv(self):
# currently `exact=True` only support scalars
with pytest.raises(ValueError, match="scalar integers"):
special.perm([1, 2], [4, 5], exact=True)
with pytest.raises(ValueError, match="Non-integer"):
special.perm(4.6, 6, exact=True)
with pytest.raises(ValueError, match="Non-integer"):
special.perm(-4.6, 3, exact=True)
with pytest.raises(ValueError, match="Non-integer"):
special.perm(4, -3.9, exact=True)
with pytest.raises(ValueError, match="Non-integer"):
special.perm(6.0, 4.6, exact=True)
| TestCombinatorics |
python | pyodide__pyodide | src/py/_pyodide/_importhook.py | {
"start": 281,
"end": 3812
} | class ____(MetaPathFinder):
def __init__(self) -> None:
self.jsproxies: dict[str, Any] = {}
self.hook: Callable[[JsProxy], None] = lambda _: None
def find_spec(
self,
fullname: str,
path: Sequence[bytes | str] | None,
target: ModuleType | None = None,
) -> ModuleSpec | None:
[parent, _, child] = fullname.rpartition(".")
if parent:
try:
parent_module = sys.modules[parent]
except KeyError:
# Note: This will never happen when we're called from importlib,
# but pytest hits this codepath. See
# `test_importhook_called_from_pytest`.
return None
if not isinstance(parent_module, JsProxy):
# Not one of us.
return None
try:
jsproxy = getattr(parent_module, child)
except AttributeError:
raise ModuleNotFoundError(
f"No module named {fullname!r}", name=fullname
) from None
if not isinstance(jsproxy, JsProxy):
raise ModuleNotFoundError(
f"No module named {fullname!r}", name=fullname
)
else:
try:
jsproxy = self.jsproxies[fullname]
except KeyError:
return None
loader = JsLoader(jsproxy)
return spec_from_loader(fullname, loader, origin="javascript")
def register_js_module(self, name: str, jsproxy: Any) -> None:
"""
Registers ``jsproxy`` as a JavaScript module named ``name``. The module
can then be imported from Python using the standard Python import
system. If another module by the same name has already been imported,
this won't have much effect unless you also delete the imported module
from :py:data:`sys.modules`. This is called by the JavaScript API
:js:func:`pyodide.registerJsModule`.
Parameters
----------
name :
Name of js module
jsproxy :
JavaScript object backing the module
"""
assert JsProxy is not None
if not isinstance(name, str):
raise TypeError(
f"Argument 'name' must be a str, not {type(name).__name__!r}"
)
if not isinstance(jsproxy, JsProxy):
raise TypeError(
f"Argument 'jsproxy' must be a JsProxy, not {type(jsproxy).__name__!r}"
)
self.jsproxies[name] = jsproxy
def unregister_js_module(self, name: str) -> None:
"""
Unregisters a JavaScript module with given name that has been previously
registered with :js:func:`pyodide.registerJsModule` or
:py:func:`pyodide.ffi.register_js_module`. If a JavaScript module with that name
does not already exist, will raise an error. If the module has already
been imported, this won't have much effect unless you also delete the
imported module from :py:data:`sys.modules`. This is called by the JavaScript
API :js:func:`pyodide.unregisterJsModule`.
Parameters
----------
name :
Name of the module to unregister
"""
try:
del self.jsproxies[name]
except KeyError:
raise ValueError(
f"Cannot unregister {name!r}: no Javascript module with that name is registered"
) from None
| JsFinder |
python | charliermarsh__ruff | python/ruff-ecosystem/ruff_ecosystem/types.py | {
"start": 2336,
"end": 2502
} | class ____(Serializable):
"""
The result of a completed ecosystem comparison for a single project.
"""
diff: Diff
repo: ClonedRepository
| Comparison |
python | cython__cython | Cython/Compiler/Symtab.py | {
"start": 14148,
"end": 54683
} | class ____:
# name string Unqualified name
# outer_scope Scope or None Enclosing scope
# entries {string : Entry} Python name to entry, non-types
# const_entries [Entry] Constant entries
# type_entries [Entry] Struct/union/enum/typedef/exttype entries
# sue_entries [Entry] Struct/union/enum entries
# arg_entries [Entry] Function argument entries
# var_entries [Entry] User-defined variable entries
# pyfunc_entries [Entry] Python function entries
# cfunc_entries [Entry] C function entries
# c_class_entries [Entry] All extension type entries
# cname_to_entry {string : Entry} Temp cname to entry mapping
# return_type PyrexType or None Return type of function owning scope
# is_builtin_scope boolean Is the builtin scope of Python/Cython
# is_py_class_scope boolean Is a Python class scope
# is_c_class_scope boolean Is an extension type scope
# is_local_scope boolean Is a local (i.e. function/method/generator) scope
# is_closure_scope boolean Is a closure scope
# is_generator_expression_scope boolean A subset of closure scope used for generator expressions
# is_passthrough boolean Outer scope is passed directly
# is_cpp_class_scope boolean Is a C++ class scope
# is_property_scope boolean Is a extension type property scope
# is_c_dataclass_scope boolean or "frozen" is a cython.dataclasses.dataclass
# scope_prefix string Disambiguator for C names
# in_cinclude boolean Suppress C declaration code
# qualified_name string "modname" or "modname.classname"
# Python strings in this scope
# nogil boolean In a nogil section
# directives dict Helper variable for the recursive
# analysis, contains directive values.
# is_internal boolean Is only used internally (simpler setup)
# scope_predefined_names list of str Class variable containing special names defined by
# this type of scope (e.g. __builtins__, __qualname__)
# node_positions_to_offset {pos: offset} Mapping from node positions to line table offsets
is_builtin_scope = 0
is_py_class_scope = 0
is_c_class_scope = 0
is_closure_scope = 0
is_local_scope = False
is_generator_expression_scope = 0
is_comprehension_scope = 0
is_passthrough = 0
is_cpp_class_scope = 0
is_property_scope = 0
is_module_scope = 0
is_c_dataclass_scope = False
is_internal = 0
scope_prefix = ""
in_cinclude = 0
nogil = 0
fused_to_specific = None
return_type = None
scope_predefined_names = []
# Do ambiguous type names like 'int' and 'float' refer to the C types? (Otherwise, Python types.)
in_c_type_context = True
node_positions_to_offset = {} # read-only fallback dict
def __init__(self, name, outer_scope, parent_scope):
# The outer_scope is the next scope in the lookup chain.
# The parent_scope is used to derive the qualified name of this scope.
self.name = name
self.outer_scope = outer_scope
self.parent_scope = parent_scope
mangled_name = "%d%s_" % (len(name), name.replace('.', '_dot_'))
qual_scope = self.qualifying_scope()
if qual_scope:
self.qualified_name = qual_scope.qualify_name(name)
self.scope_prefix = qual_scope.scope_prefix + mangled_name
else:
self.qualified_name = EncodedString(name)
self.scope_prefix = mangled_name
self.entries = {}
self.subscopes = set()
self.const_entries = []
self.type_entries = []
self.sue_entries = []
self.arg_entries = []
self.var_entries = []
self.pyfunc_entries = []
self.cfunc_entries = []
self.c_class_entries = []
self.defined_c_classes = []
self.imported_c_classes = {}
self.cname_to_entry = {}
self.identifier_to_entry = {}
self.num_to_entry = {}
self.obj_to_entry = {}
self.buffer_entries = []
self.lambda_defs = []
self.id_counters = {}
for var_name in self.scope_predefined_names:
self.declare_var(EncodedString(var_name), py_object_type, pos=None)
def __deepcopy__(self, memo):
return self
def merge_in(self, other, merge_unused=True, allowlist=None):
# Use with care...
entries = []
for name, entry in other.entries.items():
if not allowlist or name in allowlist:
if entry.used or merge_unused:
entries.append((name, entry))
self.entries.update(entries)
for attr in ('const_entries',
'type_entries',
'sue_entries',
'arg_entries',
'var_entries',
'pyfunc_entries',
'cfunc_entries',
'c_class_entries'):
self_entries = getattr(self, attr)
names = {e.name for e in self_entries}
for entry in getattr(other, attr):
if (entry.used or merge_unused) and entry.name not in names:
self_entries.append(entry)
def __str__(self):
return "<%s %s>" % (self.__class__.__name__, self.qualified_name)
def qualifying_scope(self):
return self.parent_scope
def mangle(self, prefix, name = None):
if name:
return punycodify_name("%s%s%s" % (prefix, self.scope_prefix, name))
else:
return self.parent_scope.mangle(prefix, self.name)
def mangle_internal(self, name):
# Mangle an internal name so as not to clash with any
# user-defined name in this scope.
prefix = "%s%s_" % (Naming.pyrex_prefix, name)
return self.mangle(prefix)
#return self.parent_scope.mangle(prefix, self.name)
def mangle_class_private_name(self, name):
if self.parent_scope:
return self.parent_scope.mangle_class_private_name(name)
return name
def next_id(self, name=None):
# Return a cname fragment that is unique for this module
counters = self.global_scope().id_counters
try:
count = counters[name] + 1
except KeyError:
count = 0
counters[name] = count
if name:
if not count:
# unique names don't need a suffix, reoccurrences will get one
return name
return '%s%d' % (name, count)
else:
return '%d' % count
@property
def context(self):
return self.global_scope().context
def global_scope(self):
""" Return the module-level scope containing this scope. """
return self.outer_scope.global_scope()
def builtin_scope(self):
""" Return the module-level scope containing this scope. """
return self.outer_scope.builtin_scope()
def iter_local_scopes(self):
yield self
if self.subscopes:
yield from sorted(self.subscopes, key=operator.attrgetter('scope_prefix'))
@try_finally_contextmanager
def new_c_type_context(self, in_c_type_context=None):
old_c_type_context = self.in_c_type_context
if in_c_type_context is not None:
self.in_c_type_context = in_c_type_context
yield
self.in_c_type_context = old_c_type_context
def handle_already_declared_name(self, name, cname, type, pos, visibility, copy_entry=False):
"""
Returns an entry or None
If it returns an entry, it makes sense for "declare" to keep using that
entry and not to declare its own.
May be overridden (e.g. for builtin scope,
which always allows redeclarations)
"""
entry = None
entries = self.entries
old_entry = entries[name]
# Reject redeclared C++ functions only if they have a compatible type signature.
cpp_override_allowed = False
if type.is_cfunction and old_entry.type.is_cfunction and self.is_cpp():
# If we redefine a C++ class method which is either inherited
# or automatically generated (base constructor), then it's fine.
# Otherwise, we shout.
for alt_entry in old_entry.all_alternatives():
if type.compatible_signature_with(alt_entry.type):
if name == '<init>' and not type.args:
# Cython pre-declares the no-args constructor - allow later user definitions.
cpp_override_allowed = True
elif alt_entry.is_inherited:
# Note that we can override an inherited method with a compatible but not exactly equal signature, as in C++.
cpp_override_allowed = True
if cpp_override_allowed:
entry = alt_entry
if copy_entry:
entry = copy.copy(alt_entry)
# A compatible signature doesn't mean the exact same signature,
# so we're taking the new signature for the entry.
entry.type = type
entry.is_inherited = False
# Updating the entry attributes which can be modified in the method redefinition.
entry.cname = cname
entry.pos = pos
break
else:
cpp_override_allowed = True
if cpp_override_allowed:
# C++ function/method overrides with different signatures are ok.
pass
elif entries[name].is_inherited:
# Likewise ignore inherited classes.
pass
else:
if visibility == 'extern':
# Silenced outside of "cdef extern" blocks, until we have a safe way to
# prevent pxd-defined cpdef functions from ending up here.
warning(pos, "'%s' redeclared " % name, 1 if self.in_cinclude else 0)
elif visibility != 'ignore':
error(pos, "'%s' redeclared " % name)
self.entries[name].already_declared_here()
return None
return entry
def declare(self, name, cname, type, pos, visibility, shadow = 0, is_type = 0, create_wrapper = 0):
# Create new entry, and add to dictionary if
# name is not None. Reports a warning if already
# declared.
if type.is_buffer and not isinstance(self, LocalScope): # and not is_type:
error(pos, 'Buffer types only allowed as function local variables')
if not self.in_cinclude and cname and re.match("^_[_A-Z]+$", cname):
# See https://www.gnu.org/software/libc/manual/html_node/Reserved-Names.html#Reserved-Names
warning(pos, "'%s' is a reserved name in C." % cname, -1)
entries = self.entries
entry = None
if name and name in entries and not shadow:
entry = self.handle_already_declared_name(name, cname, type, pos, visibility)
if not entry:
entry = Entry(name, cname, type, pos = pos)
entry.in_cinclude = self.in_cinclude
entry.create_wrapper = create_wrapper
if name:
entry.qualified_name = self.qualify_name(name)
if not shadow:
if name in entries and self.is_cpp() and type.is_cfunction and not entries[name].is_cmethod:
# Which means: function or cppclass method is already present
entries[name].overloaded_alternatives.append(entry)
else:
entries[name] = entry
if type.is_memoryviewslice:
entry.init = type.default_value
entry.scope = self
entry.visibility = visibility
return entry
def qualify_name(self, name):
return EncodedString("%s.%s" % (self.qualified_name, name))
def declare_const(self, name, type, value, pos, cname = None, visibility = 'private', api = 0, create_wrapper = 0):
# Add an entry for a named constant.
if not cname:
if self.in_cinclude or (visibility == 'public' or api):
cname = name
else:
cname = self.mangle(Naming.enum_prefix, name)
entry = self.declare(name, cname, type, pos, visibility, create_wrapper = create_wrapper)
entry.is_const = 1
entry.value_node = value
return entry
def declare_type(self, name, type, pos,
cname = None, visibility = 'private', api = 0, defining = 1,
shadow = 0, template = 0):
# Add an entry for a type definition.
if not cname:
cname = name
entry = self.declare(name, cname, type, pos, visibility, shadow,
is_type=True)
entry.is_type = 1
entry.api = api
if defining:
self.type_entries.append(entry)
# don't replace an entry that's already set
if not template and getattr(type, "entry", None) is None:
type.entry = entry
# here we would set as_variable to an object representing this type
return entry
def declare_typedef(self, name, base_type, pos, cname = None,
visibility = 'private', api = 0):
if not cname:
if self.in_cinclude or (visibility != 'private' or api):
cname = name
else:
cname = self.mangle(Naming.type_prefix, name)
try:
if self.is_cpp_class_scope:
namespace = self.outer_scope.lookup(self.name).type
else:
namespace = None
type = PyrexTypes.create_typedef_type(name, base_type, cname,
(visibility == 'extern'),
namespace)
except ValueError as e:
error(pos, e.args[0])
type = PyrexTypes.error_type
entry = self.declare_type(name, type, pos, cname,
visibility = visibility, api = api)
type.qualified_name = entry.qualified_name
return entry
def declare_struct_or_union(self, name, kind, scope,
typedef_flag, pos, cname = None,
visibility = 'private', api = 0,
packed = False):
# Add an entry for a struct or union definition.
if not cname:
if self.in_cinclude or (visibility == 'public' or api):
cname = name
else:
cname = self.mangle(Naming.type_prefix, name)
entry = self.lookup_here(name)
if not entry:
in_cpp = self.is_cpp()
type = PyrexTypes.CStructOrUnionType(
name, kind, scope, typedef_flag, cname, packed,
in_cpp = in_cpp)
entry = self.declare_type(name, type, pos, cname,
visibility = visibility, api = api,
defining = scope is not None)
self.sue_entries.append(entry)
type.entry = entry
else:
if not (entry.is_type and entry.type.is_struct_or_union
and entry.type.kind == kind):
warning(pos, "'%s' redeclared " % name, 0)
elif scope and entry.type.scope:
warning(pos, "'%s' already defined (ignoring second definition)" % name, 0)
else:
self.check_previous_typedef_flag(entry, typedef_flag, pos)
self.check_previous_visibility(entry, visibility, pos)
if scope:
entry.type.scope = scope
self.type_entries.append(entry)
if self.is_cpp_class_scope:
entry.type.namespace = self.outer_scope.lookup(self.name).type
return entry
def declare_cpp_class(self, name, scope,
pos, cname = None, base_classes = (),
visibility = 'extern', templates = None):
if cname is None:
if self.in_cinclude or (visibility != 'private'):
cname = name
else:
cname = self.mangle(Naming.type_prefix, name)
base_classes = list(base_classes)
entry = self.lookup_here(name)
if not entry:
type = PyrexTypes.CppClassType(
name, scope, cname, base_classes, templates = templates)
entry = self.declare_type(name, type, pos, cname,
visibility = visibility, defining = scope is not None)
self.sue_entries.append(entry)
else:
if not (entry.is_type and entry.type.is_cpp_class):
error(pos, "'%s' redeclared " % name)
entry.already_declared_here()
return None
elif scope and entry.type.scope:
warning(pos, "'%s' already defined (ignoring second definition)" % name, 0)
else:
if scope:
entry.type.scope = scope
self.type_entries.append(entry)
if base_classes:
if entry.type.base_classes and entry.type.base_classes != base_classes:
error(pos, "Base type does not match previous declaration")
entry.already_declared_here()
else:
entry.type.base_classes = base_classes
if templates or entry.type.templates:
if templates != entry.type.templates:
error(pos, "Template parameters do not match previous declaration")
entry.already_declared_here()
def declare_inherited_attributes(entry, base_classes):
for base_class in base_classes:
if base_class is PyrexTypes.error_type:
continue
if base_class.scope is None:
error(pos, "Cannot inherit from incomplete type")
else:
declare_inherited_attributes(entry, base_class.base_classes)
entry.type.scope.declare_inherited_cpp_attributes(base_class)
if scope:
declare_inherited_attributes(entry, base_classes)
scope.declare_var(name="this", cname="this", type=PyrexTypes.CPtrType(entry.type), pos=entry.pos)
if self.is_cpp_class_scope:
entry.type.namespace = self.outer_scope.lookup(self.name).type
return entry
def check_previous_typedef_flag(self, entry, typedef_flag, pos):
if typedef_flag != entry.type.typedef_flag:
error(pos, "'%s' previously declared using '%s'" % (
entry.name, ("cdef", "ctypedef")[entry.type.typedef_flag]))
def check_previous_visibility(self, entry, visibility, pos):
if entry.visibility != visibility:
error(pos, "'%s' previously declared as '%s'" % (
entry.name, entry.visibility))
def declare_enum(self, name, pos, cname, scoped, typedef_flag,
visibility='private', api=0, create_wrapper=0, doc=None):
if name:
if not cname:
if (self.in_cinclude or visibility == 'public'
or visibility == 'extern' or api):
cname = name
else:
cname = self.mangle(Naming.type_prefix, name)
if self.is_cpp_class_scope:
namespace = self.outer_scope.lookup(self.name).type
else:
namespace = None
if scoped:
type = PyrexTypes.CppScopedEnumType(name, cname, namespace, doc=doc)
else:
type = PyrexTypes.CEnumType(name, cname, typedef_flag, namespace, doc=doc)
else:
type = PyrexTypes.c_anon_enum_type
entry = self.declare_type(name, type, pos, cname = cname,
visibility = visibility, api = api)
if scoped:
entry.utility_code = Code.UtilityCode.load_cached("EnumClassDecl", "CppSupport.cpp")
self.use_entry_utility_code(entry)
entry.create_wrapper = create_wrapper
entry.enum_values = []
self.sue_entries.append(entry)
return entry
def declare_tuple_type(self, pos, components):
return self.outer_scope.declare_tuple_type(pos, components)
def declare_var(self, name, type, pos,
cname=None, visibility='private',
api=False, in_pxd=False, is_cdef=False, pytyping_modifiers=None):
# Add an entry for a variable.
if not cname:
if visibility != 'private' or api:
cname = name
else:
cname = self.mangle(Naming.var_prefix, name)
entry = self.declare(name, cname, type, pos, visibility)
entry.is_variable = 1
if type.is_cpp_class and visibility != 'extern':
if self.directives['cpp_locals']:
entry.make_cpp_optional()
else:
type.check_nullary_constructor(pos)
if in_pxd and visibility != 'extern':
entry.defined_in_pxd = 1
entry.used = 1
if api:
entry.api = 1
entry.used = 1
if pytyping_modifiers:
entry.pytyping_modifiers = pytyping_modifiers
return entry
def _reject_pytyping_modifiers(self, pos, modifiers, allowed=()):
if not modifiers:
return
for modifier in modifiers:
if modifier not in allowed:
error(pos, "Modifier '%s' is not allowed here." % modifier)
def declare_assignment_expression_target(self, name, type, pos):
# In most cases declares the variable as normal.
# For generator expressions and comprehensions the variable is declared in their parent
return self.declare_var(name, type, pos)
def declare_builtin(self, name, pos):
name = self.mangle_class_private_name(name)
return self.outer_scope.declare_builtin(name, pos)
def _declare_pyfunction(self, name, pos, visibility='extern', entry=None):
if entry and not entry.type.is_cfunction:
error(pos, "'%s' already declared" % name)
error(entry.pos, "Previous declaration is here")
entry = self.declare_var(name, py_object_type, pos, visibility=visibility)
entry.signature = pyfunction_signature
self.pyfunc_entries.append(entry)
return entry
def declare_pyfunction(self, name, pos, allow_redefine=False, visibility='extern'):
# Add an entry for a Python function.
entry = self.lookup_here(name)
if not allow_redefine:
return self._declare_pyfunction(name, pos, visibility=visibility, entry=entry)
if entry:
if entry.type.is_unspecified:
entry.type = py_object_type
elif entry.type is not py_object_type:
return self._declare_pyfunction(name, pos, visibility=visibility, entry=entry)
else: # declare entry stub
self.declare_var(name, py_object_type, pos, visibility=visibility)
entry = self.declare_var(None, py_object_type, pos,
cname=name, visibility='private')
entry.name = EncodedString(name)
entry.qualified_name = self.qualify_name(name)
entry.signature = pyfunction_signature
entry.is_anonymous = True
return entry
def declare_lambda_function(self, lambda_name, pos):
# Add an entry for an anonymous Python function.
func_cname = self.mangle(Naming.lambda_func_prefix + 'funcdef_', lambda_name)
pymethdef_cname = self.mangle(Naming.lambda_func_prefix + 'methdef_', lambda_name)
qualified_name = self.qualify_name(lambda_name)
entry = self.declare(None, func_cname, py_object_type, pos, 'private')
entry.name = EncodedString(lambda_name)
entry.qualified_name = qualified_name
entry.pymethdef_cname = pymethdef_cname
entry.func_cname = func_cname
entry.signature = pyfunction_signature
entry.is_anonymous = True
return entry
def add_lambda_def(self, def_node):
self.lambda_defs.append(def_node)
def register_pyfunction(self, entry):
self.pyfunc_entries.append(entry)
def declare_cfunction(self, name, type, pos,
cname=None, visibility='private', api=0, in_pxd=0,
defining=0, modifiers=(), utility_code=None, overridable=False):
# Add an entry for a C function.
if not cname:
if visibility != 'private' or api:
cname = name
else:
cname = self.mangle(Naming.func_prefix, name)
inline_in_pxd = 'inline' in modifiers and in_pxd and defining
if inline_in_pxd:
# in_pxd does special things that we don't want to apply to inline functions
in_pxd = False
entry = self.lookup_here(name)
if entry:
if not in_pxd and visibility != entry.visibility and visibility == 'extern':
# Previously declared, but now extern => treat this
# as implementing the function, using the new cname
defining = True
visibility = entry.visibility
entry.cname = cname
entry.func_cname = cname
if visibility != 'private' and visibility != entry.visibility:
warning(pos, "Function '%s' previously declared as '%s', now as '%s'" % (
name, entry.visibility, visibility), 1)
if overridable != entry.is_overridable:
warning(pos, "Function '%s' previously declared as '%s'" % (
name, 'cpdef' if overridable else 'cdef'), 1)
if entry.type.same_as(type):
# Fix with_gil vs nogil.
entry.type = entry.type.with_with_gil(type.with_gil)
else:
if visibility == 'extern' and entry.visibility == 'extern':
can_override = self.is_builtin_scope
if self.is_cpp():
can_override = True
elif cname and not can_override:
# if all alternatives have different cnames,
# it's safe to allow signature overrides
for alt_entry in entry.all_alternatives():
if not alt_entry.cname or cname == alt_entry.cname:
break # cname not unique!
else:
can_override = True
if can_override:
temp = self.add_cfunction(name, type, pos, cname, visibility, modifiers)
temp.overloaded_alternatives = entry.all_alternatives()
if entry.specialiser is not None:
temp.specialiser = entry.specialiser
entry = temp
else:
warning(pos, "Function signature does not match previous declaration", 1)
entry.type = type
elif not in_pxd and entry.defined_in_pxd and type.compatible_signature_with(entry.type):
# TODO: check that this was done by a signature optimisation and not a user error.
#warning(pos, "Function signature does not match previous declaration", 1)
# Cython can't assume anything about cimported functions declared without
# an exception value. This is a performance problem mainly for nogil functions.
if entry.type.nogil and entry.type.exception_value is None and type.exception_value:
performance_hint(
entry.pos,
f"No exception value declared for '{entry.name}' in pxd file.\n"
"Users cimporting this function and calling it without the gil "
"will always require an exception check.\n"
"Suggest adding an explicit exception value.",
self)
entry.type = type
else:
error(pos, "Function signature does not match previous declaration")
else:
entry = self.add_cfunction(name, type, pos, cname, visibility, modifiers)
entry.func_cname = cname
entry.is_overridable = overridable
if inline_in_pxd:
entry.inline_func_in_pxd = True
if in_pxd and visibility != 'extern':
entry.defined_in_pxd = 1
if api:
entry.api = 1
if not defining and not in_pxd and visibility != 'extern':
error(pos, "Non-extern C function '%s' declared but not defined" % name)
if defining:
entry.is_implemented = True
if modifiers:
entry.func_modifiers = modifiers
if utility_code:
assert not entry.utility_code, "duplicate utility code definition in entry %s (%s)" % (name, cname)
entry.utility_code = utility_code
if overridable:
# names of cpdef functions can be used as variables and can be assigned to
var_entry = Entry(name, cname, py_object_type) # FIXME: cname?
var_entry.qualified_name = self.qualify_name(name)
var_entry.is_variable = 1
var_entry.is_pyglobal = 1
var_entry.scope = entry.scope
entry.as_variable = var_entry
type.entry = entry
if (type.exception_check and type.exception_value is None and type.nogil and
not pos[0].in_utility_code and
# don't warn about external functions here - the user likely can't do anything
defining and not in_pxd and not inline_in_pxd):
PyrexTypes.write_noexcept_performance_hint(
pos, self, function_name=name, void_return=type.return_type.is_void)
return entry
def declare_cgetter(self, name, return_type, pos=None, cname=None,
visibility="private", modifiers=(), defining=False, **cfunc_type_config):
assert all(
k in ('exception_value', 'exception_check', 'nogil', 'with_gil', 'is_const_method', 'is_static_method')
for k in cfunc_type_config
)
cfunc_type = PyrexTypes.CFuncType(
return_type,
[PyrexTypes.CFuncTypeArg("self", self.parent_type, None)],
**cfunc_type_config)
entry = self.declare_cfunction(
name, cfunc_type, pos, cname=None, visibility=visibility, modifiers=modifiers, defining=defining)
entry.is_cgetter = True
if cname is not None:
entry.func_cname = cname
return entry
def add_cfunction(self, name, type, pos, cname, visibility, modifiers, inherited=False):
# Add a C function entry without giving it a func_cname.
entry = self.declare(name, cname, type, pos, visibility)
entry.is_cfunction = 1
if modifiers:
entry.func_modifiers = modifiers
if inherited or type.is_fused:
self.cfunc_entries.append(entry)
else:
# For backwards compatibility reasons, we must keep all non-fused methods
# before all fused methods, but separately for each type.
i = len(self.cfunc_entries)
for cfunc_entry in reversed(self.cfunc_entries):
if cfunc_entry.is_inherited or not cfunc_entry.type.is_fused:
break
i -= 1
self.cfunc_entries.insert(i, entry)
return entry
def find(self, name, pos):
# Look up name, report error if not found.
entry = self.lookup(name)
if entry:
return entry
else:
error(pos, "'%s' is not declared" % name)
def find_imported_module(self, path, pos):
# Look up qualified name, must be a module, report error if not found.
# Path is a list of names.
scope = self
for name in path:
entry = scope.find(name, pos)
if not entry:
return None
if entry.as_module:
scope = entry.as_module
else:
error(pos, "'%s' is not a cimported module" % '.'.join(path))
return None
return scope
def lookup(self, name):
# Look up name in this scope or an enclosing one.
# Return None if not found.
mangled_name = self.mangle_class_private_name(name)
entry = (self.lookup_here(name) # lookup here also does mangling
or (self.outer_scope and self.outer_scope.lookup(mangled_name))
or None)
if entry:
return entry
# look up the original name in the outer scope
# Not strictly Python behaviour but see https://github.com/cython/cython/issues/3544
entry = (self.outer_scope and self.outer_scope.lookup(name)) or None
if entry and entry.is_pyglobal:
self._emit_class_private_warning(entry.pos, name)
return entry
def lookup_here(self, name):
# Look up in this scope only, return None if not found.
entry = self.entries.get(self.mangle_class_private_name(name), None)
if entry:
return entry
# Also check the unmangled name in the current scope
# (even if mangling should give us something else).
# This is to support things like global __foo which makes a declaration for __foo
return self.entries.get(name, None)
def lookup_here_unmangled(self, name):
return self.entries.get(name, None)
def lookup_assignment_expression_target(self, name):
# For most cases behaves like "lookup_here".
# However, it does look outwards for comprehension and generator expression scopes
return self.lookup_here(name)
def lookup_target(self, name):
# Look up name in this scope only. Declare as Python
# variable if not found.
entry = self.lookup_here(name)
if not entry:
entry = self.lookup_here_unmangled(name)
if entry and entry.is_pyglobal:
self._emit_class_private_warning(entry.pos, name)
if not entry:
entry = self.declare_var(name, py_object_type, None)
return entry
def _type_or_specialized_type_from_entry(self, entry):
if entry and entry.is_type:
if entry.type.is_fused and self.fused_to_specific:
return entry.type.specialize(self.fused_to_specific)
return entry.type
def lookup_type(self, name):
entry = self.lookup(name)
# The logic here is:
# 1. if entry is a type then return it (and maybe specialize it)
# 2. if the entry comes from a known standard library import then follow that
# 3. repeat step 1 with the (possibly) updated entry
tp = self._type_or_specialized_type_from_entry(entry)
if tp:
return tp
# allow us to find types from the "typing" module and similar
if entry and entry.known_standard_library_import:
from .Builtin import get_known_standard_library_entry
entry = get_known_standard_library_entry(entry.known_standard_library_import)
return self._type_or_specialized_type_from_entry(entry)
def lookup_operator(self, operator, operands):
if operands[0].type.is_cpp_class:
obj_type = operands[0].type
method = obj_type.scope.lookup("operator%s" % operator)
if method is not None:
arg_types = [arg.type for arg in operands[1:]]
res = PyrexTypes.best_match(arg_types, method.all_alternatives())
if res is not None:
return res
function = self.lookup("operator%s" % operator)
function_alternatives = []
if function is not None:
function_alternatives = function.all_alternatives()
# look-up nonmember methods listed within a class
method_alternatives = []
if len(operands) == 2: # binary operators only
for n in range(2):
if operands[n].type.is_cpp_class:
obj_type = operands[n].type
method = obj_type.scope.lookup("operator%s" % operator)
if method is not None:
method_alternatives += method.all_alternatives()
if (not method_alternatives) and (not function_alternatives):
return None
# select the unique alternatives
all_alternatives = list(set(method_alternatives + function_alternatives))
return PyrexTypes.best_match([arg.type for arg in operands],
all_alternatives)
def lookup_operator_for_types(self, pos, operator, types):
from .Nodes import Node
class FakeOperand(Node):
pass
operands = [FakeOperand(pos, type=type) for type in types]
return self.lookup_operator(operator, operands)
def _emit_class_private_warning(self, pos, name):
warning(pos, "Global name %s matched from within class scope "
"in contradiction to Python 'class private name' rules. "
"This may change in a future release." % name, 1)
def use_utility_code(self, new_code):
self.global_scope().use_utility_code(new_code)
def use_entry_utility_code(self, entry):
self.global_scope().use_entry_utility_code(entry)
def defines_any(self, names):
# Test whether any of the given names are defined in this scope.
for name in names:
if name in self.entries:
return 1
return 0
def defines_any_special(self, names):
# Test whether any of the given names are defined as special methods in this scope.
for name in names:
if name in self.entries and self.entries[name].is_special:
return 1
return 0
def infer_types(self):
from .TypeInference import get_type_inferer
get_type_inferer().infer_types(self)
def is_cpp(self):
outer = self.outer_scope
if outer is None:
return False
else:
return outer.is_cpp()
def add_include_file(self, filename, verbatim_include=None, late=False):
self.outer_scope.add_include_file(filename, verbatim_include, late)
def name_in_module_state(self, cname):
# TODO - override to give more choices depending on the type of scope
# e.g. slot, function, method
return f"{Naming.modulestateglobal_cname}->{cname}"
def find_shared_usages_of_type(self, type_to_find, _seen_scopes=None):
if _seen_scopes is None:
_seen_scopes = set()
include_all_entries = not self.is_module_scope
for entry in self.entries.values():
if not (include_all_entries or entry.defined_in_pxd or entry.visibility == "public" or entry.api):
continue
entry_subtypes = PyrexTypes.get_all_subtypes(entry.type)
if any(type_to_find == sub_tp for sub_tp in entry_subtypes):
return True
type_scope = getattr(entry.type, "scope", None)
if type_scope is None or type_scope in _seen_scopes:
continue
_seen_scopes.add(type_scope)
if type_scope.find_shared_usages_of_type(type_to_find, _seen_scopes):
return True
return False
| Scope |
python | euske__pdfminer | pdfminer/ccitt.py | {
"start": 1271,
"end": 18771
} | class ____(BitParser):
MODE = [None, None]
BitParser.add(MODE, 0, '1')
BitParser.add(MODE, +1, '011')
BitParser.add(MODE, -1, '010')
BitParser.add(MODE, 'h', '001')
BitParser.add(MODE, 'p', '0001')
BitParser.add(MODE, +2, '000011')
BitParser.add(MODE, -2, '000010')
BitParser.add(MODE, +3, '0000011')
BitParser.add(MODE, -3, '0000010')
BitParser.add(MODE, 'u', '0000001111')
BitParser.add(MODE, 'x1', '0000001000')
BitParser.add(MODE, 'x2', '0000001001')
BitParser.add(MODE, 'x3', '0000001010')
BitParser.add(MODE, 'x4', '0000001011')
BitParser.add(MODE, 'x5', '0000001100')
BitParser.add(MODE, 'x6', '0000001101')
BitParser.add(MODE, 'x7', '0000001110')
BitParser.add(MODE, 'e', '000000000001000000000001')
WHITE = [None, None]
BitParser.add(WHITE, 0 , '00110101')
BitParser.add(WHITE, 1 , '000111')
BitParser.add(WHITE, 2 , '0111')
BitParser.add(WHITE, 3 , '1000')
BitParser.add(WHITE, 4 , '1011')
BitParser.add(WHITE, 5 , '1100')
BitParser.add(WHITE, 6 , '1110')
BitParser.add(WHITE, 7 , '1111')
BitParser.add(WHITE, 8 , '10011')
BitParser.add(WHITE, 9 , '10100')
BitParser.add(WHITE, 10 , '00111')
BitParser.add(WHITE, 11 , '01000')
BitParser.add(WHITE, 12 , '001000')
BitParser.add(WHITE, 13 , '000011')
BitParser.add(WHITE, 14 , '110100')
BitParser.add(WHITE, 15 , '110101')
BitParser.add(WHITE, 16 , '101010')
BitParser.add(WHITE, 17 , '101011')
BitParser.add(WHITE, 18 , '0100111')
BitParser.add(WHITE, 19 , '0001100')
BitParser.add(WHITE, 20 , '0001000')
BitParser.add(WHITE, 21 , '0010111')
BitParser.add(WHITE, 22 , '0000011')
BitParser.add(WHITE, 23 , '0000100')
BitParser.add(WHITE, 24 , '0101000')
BitParser.add(WHITE, 25 , '0101011')
BitParser.add(WHITE, 26 , '0010011')
BitParser.add(WHITE, 27 , '0100100')
BitParser.add(WHITE, 28 , '0011000')
BitParser.add(WHITE, 29 , '00000010')
BitParser.add(WHITE, 30 , '00000011')
BitParser.add(WHITE, 31 , '00011010')
BitParser.add(WHITE, 32 , '00011011')
BitParser.add(WHITE, 33 , '00010010')
BitParser.add(WHITE, 34 , '00010011')
BitParser.add(WHITE, 35 , '00010100')
BitParser.add(WHITE, 36 , '00010101')
BitParser.add(WHITE, 37 , '00010110')
BitParser.add(WHITE, 38 , '00010111')
BitParser.add(WHITE, 39 , '00101000')
BitParser.add(WHITE, 40 , '00101001')
BitParser.add(WHITE, 41 , '00101010')
BitParser.add(WHITE, 42 , '00101011')
BitParser.add(WHITE, 43 , '00101100')
BitParser.add(WHITE, 44 , '00101101')
BitParser.add(WHITE, 45 , '00000100')
BitParser.add(WHITE, 46 , '00000101')
BitParser.add(WHITE, 47 , '00001010')
BitParser.add(WHITE, 48 , '00001011')
BitParser.add(WHITE, 49 , '01010010')
BitParser.add(WHITE, 50 , '01010011')
BitParser.add(WHITE, 51 , '01010100')
BitParser.add(WHITE, 52 , '01010101')
BitParser.add(WHITE, 53 , '00100100')
BitParser.add(WHITE, 54 , '00100101')
BitParser.add(WHITE, 55 , '01011000')
BitParser.add(WHITE, 56 , '01011001')
BitParser.add(WHITE, 57 , '01011010')
BitParser.add(WHITE, 58 , '01011011')
BitParser.add(WHITE, 59 , '01001010')
BitParser.add(WHITE, 60 , '01001011')
BitParser.add(WHITE, 61 , '00110010')
BitParser.add(WHITE, 62 , '00110011')
BitParser.add(WHITE, 63 , '00110100')
BitParser.add(WHITE, 64 , '11011')
BitParser.add(WHITE, 128 , '10010')
BitParser.add(WHITE, 192 , '010111')
BitParser.add(WHITE, 256 , '0110111')
BitParser.add(WHITE, 320 , '00110110')
BitParser.add(WHITE, 384 , '00110111')
BitParser.add(WHITE, 448 , '01100100')
BitParser.add(WHITE, 512 , '01100101')
BitParser.add(WHITE, 576 , '01101000')
BitParser.add(WHITE, 640 , '01100111')
BitParser.add(WHITE, 704 , '011001100')
BitParser.add(WHITE, 768 , '011001101')
BitParser.add(WHITE, 832 , '011010010')
BitParser.add(WHITE, 896 , '011010011')
BitParser.add(WHITE, 960 , '011010100')
BitParser.add(WHITE, 1024, '011010101')
BitParser.add(WHITE, 1088, '011010110')
BitParser.add(WHITE, 1152, '011010111')
BitParser.add(WHITE, 1216, '011011000')
BitParser.add(WHITE, 1280, '011011001')
BitParser.add(WHITE, 1344, '011011010')
BitParser.add(WHITE, 1408, '011011011')
BitParser.add(WHITE, 1472, '010011000')
BitParser.add(WHITE, 1536, '010011001')
BitParser.add(WHITE, 1600, '010011010')
BitParser.add(WHITE, 1664, '011000')
BitParser.add(WHITE, 1728, '010011011')
BitParser.add(WHITE, 1792, '00000001000')
BitParser.add(WHITE, 1856, '00000001100')
BitParser.add(WHITE, 1920, '00000001101')
BitParser.add(WHITE, 1984, '000000010010')
BitParser.add(WHITE, 2048, '000000010011')
BitParser.add(WHITE, 2112, '000000010100')
BitParser.add(WHITE, 2176, '000000010101')
BitParser.add(WHITE, 2240, '000000010110')
BitParser.add(WHITE, 2304, '000000010111')
BitParser.add(WHITE, 2368, '000000011100')
BitParser.add(WHITE, 2432, '000000011101')
BitParser.add(WHITE, 2496, '000000011110')
BitParser.add(WHITE, 2560, '000000011111')
BLACK = [None, None]
BitParser.add(BLACK, 0 , '0000110111')
BitParser.add(BLACK, 1 , '010')
BitParser.add(BLACK, 2 , '11')
BitParser.add(BLACK, 3 , '10')
BitParser.add(BLACK, 4 , '011')
BitParser.add(BLACK, 5 , '0011')
BitParser.add(BLACK, 6 , '0010')
BitParser.add(BLACK, 7 , '00011')
BitParser.add(BLACK, 8 , '000101')
BitParser.add(BLACK, 9 , '000100')
BitParser.add(BLACK, 10 , '0000100')
BitParser.add(BLACK, 11 , '0000101')
BitParser.add(BLACK, 12 , '0000111')
BitParser.add(BLACK, 13 , '00000100')
BitParser.add(BLACK, 14 , '00000111')
BitParser.add(BLACK, 15 , '000011000')
BitParser.add(BLACK, 16 , '0000010111')
BitParser.add(BLACK, 17 , '0000011000')
BitParser.add(BLACK, 18 , '0000001000')
BitParser.add(BLACK, 19 , '00001100111')
BitParser.add(BLACK, 20 , '00001101000')
BitParser.add(BLACK, 21 , '00001101100')
BitParser.add(BLACK, 22 , '00000110111')
BitParser.add(BLACK, 23 , '00000101000')
BitParser.add(BLACK, 24 , '00000010111')
BitParser.add(BLACK, 25 , '00000011000')
BitParser.add(BLACK, 26 , '000011001010')
BitParser.add(BLACK, 27 , '000011001011')
BitParser.add(BLACK, 28 , '000011001100')
BitParser.add(BLACK, 29 , '000011001101')
BitParser.add(BLACK, 30 , '000001101000')
BitParser.add(BLACK, 31 , '000001101001')
BitParser.add(BLACK, 32 , '000001101010')
BitParser.add(BLACK, 33 , '000001101011')
BitParser.add(BLACK, 34 , '000011010010')
BitParser.add(BLACK, 35 , '000011010011')
BitParser.add(BLACK, 36 , '000011010100')
BitParser.add(BLACK, 37 , '000011010101')
BitParser.add(BLACK, 38 , '000011010110')
BitParser.add(BLACK, 39 , '000011010111')
BitParser.add(BLACK, 40 , '000001101100')
BitParser.add(BLACK, 41 , '000001101101')
BitParser.add(BLACK, 42 , '000011011010')
BitParser.add(BLACK, 43 , '000011011011')
BitParser.add(BLACK, 44 , '000001010100')
BitParser.add(BLACK, 45 , '000001010101')
BitParser.add(BLACK, 46 , '000001010110')
BitParser.add(BLACK, 47 , '000001010111')
BitParser.add(BLACK, 48 , '000001100100')
BitParser.add(BLACK, 49 , '000001100101')
BitParser.add(BLACK, 50 , '000001010010')
BitParser.add(BLACK, 51 , '000001010011')
BitParser.add(BLACK, 52 , '000000100100')
BitParser.add(BLACK, 53 , '000000110111')
BitParser.add(BLACK, 54 , '000000111000')
BitParser.add(BLACK, 55 , '000000100111')
BitParser.add(BLACK, 56 , '000000101000')
BitParser.add(BLACK, 57 , '000001011000')
BitParser.add(BLACK, 58 , '000001011001')
BitParser.add(BLACK, 59 , '000000101011')
BitParser.add(BLACK, 60 , '000000101100')
BitParser.add(BLACK, 61 , '000001011010')
BitParser.add(BLACK, 62 , '000001100110')
BitParser.add(BLACK, 63 , '000001100111')
BitParser.add(BLACK, 64 , '0000001111')
BitParser.add(BLACK, 128 , '000011001000')
BitParser.add(BLACK, 192 , '000011001001')
BitParser.add(BLACK, 256 , '000001011011')
BitParser.add(BLACK, 320 , '000000110011')
BitParser.add(BLACK, 384 , '000000110100')
BitParser.add(BLACK, 448 , '000000110101')
BitParser.add(BLACK, 512 , '0000001101100')
BitParser.add(BLACK, 576 , '0000001101101')
BitParser.add(BLACK, 640 , '0000001001010')
BitParser.add(BLACK, 704 , '0000001001011')
BitParser.add(BLACK, 768 , '0000001001100')
BitParser.add(BLACK, 832 , '0000001001101')
BitParser.add(BLACK, 896 , '0000001110010')
BitParser.add(BLACK, 960 , '0000001110011')
BitParser.add(BLACK, 1024, '0000001110100')
BitParser.add(BLACK, 1088, '0000001110101')
BitParser.add(BLACK, 1152, '0000001110110')
BitParser.add(BLACK, 1216, '0000001110111')
BitParser.add(BLACK, 1280, '0000001010010')
BitParser.add(BLACK, 1344, '0000001010011')
BitParser.add(BLACK, 1408, '0000001010100')
BitParser.add(BLACK, 1472, '0000001010101')
BitParser.add(BLACK, 1536, '0000001011010')
BitParser.add(BLACK, 1600, '0000001011011')
BitParser.add(BLACK, 1664, '0000001100100')
BitParser.add(BLACK, 1728, '0000001100101')
BitParser.add(BLACK, 1792, '00000001000')
BitParser.add(BLACK, 1856, '00000001100')
BitParser.add(BLACK, 1920, '00000001101')
BitParser.add(BLACK, 1984, '000000010010')
BitParser.add(BLACK, 2048, '000000010011')
BitParser.add(BLACK, 2112, '000000010100')
BitParser.add(BLACK, 2176, '000000010101')
BitParser.add(BLACK, 2240, '000000010110')
BitParser.add(BLACK, 2304, '000000010111')
BitParser.add(BLACK, 2368, '000000011100')
BitParser.add(BLACK, 2432, '000000011101')
BitParser.add(BLACK, 2496, '000000011110')
BitParser.add(BLACK, 2560, '000000011111')
UNCOMPRESSED = [None, None]
BitParser.add(UNCOMPRESSED, '1', '1')
BitParser.add(UNCOMPRESSED, '01', '01')
BitParser.add(UNCOMPRESSED, '001', '001')
BitParser.add(UNCOMPRESSED, '0001', '0001')
BitParser.add(UNCOMPRESSED, '00001', '00001')
BitParser.add(UNCOMPRESSED, '00000', '000001')
BitParser.add(UNCOMPRESSED, 'T00', '00000011')
BitParser.add(UNCOMPRESSED, 'T10', '00000010')
BitParser.add(UNCOMPRESSED, 'T000', '000000011')
BitParser.add(UNCOMPRESSED, 'T100', '000000010')
BitParser.add(UNCOMPRESSED, 'T0000', '0000000011')
BitParser.add(UNCOMPRESSED, 'T1000', '0000000010')
BitParser.add(UNCOMPRESSED, 'T00000', '00000000011')
BitParser.add(UNCOMPRESSED, 'T10000', '00000000010')
class EOFB(Exception):
pass
class InvalidData(Exception):
pass
class ByteSkip(Exception):
pass
def __init__(self, width, bytealign=False):
BitParser.__init__(self)
self.width = width
self.bytealign = bytealign
self.reset()
return
def feedbytes(self, data):
for b in data:
try:
for m in (128, 64, 32, 16, 8, 4, 2, 1):
self._parse_bit(b & m)
except self.ByteSkip:
self._accept = self._parse_mode
self._state = self.MODE
except self.EOFB:
break
return
def _parse_mode(self, mode):
if mode == 'p':
self._do_pass()
self._flush_line()
return self.MODE
elif mode == 'h':
self._n1 = 0
self._accept = self._parse_horiz1
if self._color:
return self.WHITE
else:
return self.BLACK
elif mode == 'u':
self._accept = self._parse_uncompressed
return self.UNCOMPRESSED
elif mode == 'e':
raise self.EOFB
elif isinstance(mode, int):
self._do_vertical(mode)
self._flush_line()
return self.MODE
else:
raise self.InvalidData(mode)
def _parse_horiz1(self, n):
if n is None:
raise self.InvalidData
self._n1 += n
if n < 64:
self._n2 = 0
self._color = 1-self._color
self._accept = self._parse_horiz2
if self._color:
return self.WHITE
else:
return self.BLACK
def _parse_horiz2(self, n):
if n is None:
raise self.InvalidData
self._n2 += n
if n < 64:
self._color = 1-self._color
self._accept = self._parse_mode
self._do_horizontal(self._n1, self._n2)
self._flush_line()
return self.MODE
elif self._color:
return self.WHITE
else:
return self.BLACK
def _parse_uncompressed(self, bits):
if not bits:
raise self.InvalidData
if bits.startswith('T'):
self._accept = self._parse_mode
self._color = int(bits[1])
self._do_uncompressed(bits[2:])
return self.MODE
else:
self._do_uncompressed(bits)
return self.UNCOMPRESSED
def _get_bits(self):
return ''.join(str(b) for b in self._curline[:self._curpos])
def _get_refline(self, i):
if i < 0:
return '[]'+''.join(str(b) for b in self._refline)
elif len(self._refline) <= i:
return ''.join(str(b) for b in self._refline)+'[]'
else:
return (''.join(str(b) for b in self._refline[:i]) +
'['+str(self._refline[i])+']' +
''.join(str(b) for b in self._refline[i+1:]))
def reset(self):
self._y = 0
self._curline = array.array('b', [1]*self.width)
self._reset_line()
self._accept = self._parse_mode
self._state = self.MODE
return
def output_line(self, y, bits):
print(y, ''.join(str(b) for b in bits))
return
def _reset_line(self):
self._refline = self._curline
self._curline = array.array('b', [1]*self.width)
self._curpos = -1
self._color = 1
return
def _flush_line(self):
if self.width <= self._curpos:
self.output_line(self._y, self._curline)
self._y += 1
self._reset_line()
if self.bytealign:
raise self.ByteSkip
return
def _do_vertical(self, dx):
#print('* vertical(%d): curpos=%r, color=%r' % (dx, self._curpos, self._color))
#print(' refline:', self._get_refline(self._curpos+1))
x1 = self._curpos+1
while 1:
if x1 == 0:
if (self._color == 1 and self._refline[x1] != self._color):
break
elif x1 == len(self._refline):
break
elif (self._refline[x1-1] == self._color and
self._refline[x1] != self._color):
break
x1 += 1
x1 += dx
x0 = max(0, self._curpos)
x1 = max(0, min(self.width, x1))
if x1 < x0:
for x in range(x1, x0):
self._curline[x] = self._color
elif x0 < x1:
for x in range(x0, x1):
self._curline[x] = self._color
self._curpos = x1
self._color = 1-self._color
return
def _do_pass(self):
#print('* pass: curpos=%r, color=%r' % (self._curpos, self._color))
#print(' refline:', self._get_refline(self._curpos+1))
x1 = self._curpos+1
while 1:
if x1 == 0:
if (self._color == 1 and self._refline[x1] != self._color):
break
elif x1 == len(self._refline):
break
elif (self._refline[x1-1] == self._color and
self._refline[x1] != self._color):
break
x1 += 1
while 1:
if x1 == 0:
if (self._color == 0 and self._refline[x1] == self._color):
break
elif x1 == len(self._refline):
break
elif (self._refline[x1-1] != self._color and
self._refline[x1] == self._color):
break
x1 += 1
for x in range(self._curpos, x1):
self._curline[x] = self._color
self._curpos = x1
return
def _do_horizontal(self, n1, n2):
#print('* horizontal(%d,%d): curpos=%r, color=%r' % (n1, n2, self._curpos, self._color))
if self._curpos < 0:
self._curpos = 0
x = self._curpos
for _ in range(n1):
if len(self._curline) <= x:
break
self._curline[x] = self._color
x += 1
for _ in range(n2):
if len(self._curline) <= x:
break
self._curline[x] = 1-self._color
x += 1
self._curpos = x
return
def _do_uncompressed(self, bits):
#print('* uncompressed(%r): curpos=%r' % (bits, self._curpos))
for c in bits:
self._curline[self._curpos] = int(c)
self._curpos += 1
self._flush_line()
return
import unittest
## Test cases
##
| CCITTG4Parser |
python | walkccc__LeetCode | solutions/215. Kth Largest Element in an Array/215-2.py | {
"start": 0,
"end": 672
} | class ____:
def findKthLargest(self, nums: list[int], k: int) -> int:
def quickSelect(l: int, r: int, k: int) -> int:
pivot = nums[r]
nextSwapped = l
for i in range(l, r):
if nums[i] >= pivot:
nums[nextSwapped], nums[i] = nums[i], nums[nextSwapped]
nextSwapped += 1
nums[nextSwapped], nums[r] = nums[r], nums[nextSwapped]
count = nextSwapped - l + 1 # Number of nums >= pivot
if count == k:
return nums[nextSwapped]
if count > k:
return quickSelect(l, nextSwapped - 1, k)
return quickSelect(nextSwapped + 1, r, k - count)
return quickSelect(0, len(nums) - 1, k)
| Solution |
python | readthedocs__readthedocs.org | readthedocs/rtd_tests/tests/test_automation_rule_views.py | {
"start": 12111,
"end": 12450
} | class ____(TestAutomationRulesViews):
@pytest.fixture(autouse=True)
def setup_organization(self, settings):
settings.RTD_ALLOW_ORGANIZATIONS = True
self.organization = get(
Organization,
owners=[self.user],
projects=[self.project],
)
| TestAutomationRulesViewsWithOrganizations |
python | keras-team__keras | keras/src/ops/core.py | {
"start": 16457,
"end": 18298
} | class ____(Operation):
def call(self, inputs, start_indices, updates):
return backend.core.slice_update(inputs, start_indices, updates)
def compute_output_spec(self, inputs, start_indices, updates):
return KerasTensor(inputs.shape, dtype=inputs.dtype)
@keras_export("keras.ops.slice_update")
def slice_update(inputs, start_indices, updates):
"""Update an input by slicing in a tensor of updated values.
At a high level, this operation does
`inputs[start_indices: start_indices + updates.shape] = updates`.
Assume inputs is a tensor of shape `(D0, D1, ..., Dn)`,
`start_indices` must be a list/tuple of n integers, specifying the starting
indices. `updates` must have the same rank as `inputs`, and the size of each
dim must not exceed `Di - start_indices[i]`. For example, if we have 2D
inputs `inputs = np.zeros((5, 5))`, and we want to update the intersection
of last 2 rows and last 2 columns as 1, i.e.,
`inputs[3:, 3:] = np.ones((2, 2))`, then we can use the code below:
```python
inputs = np.zeros((5, 5))
start_indices = [3, 3]
updates = np.ones((2, 2))
inputs = keras.ops.slice_update(inputs, start_indices, updates)
```
Args:
inputs: A tensor, the tensor to be updated.
start_indices: A list/tuple of shape `(inputs.ndim,)`, specifying
the starting indices for updating.
updates: A tensor, the new values to be put to `inputs` at `indices`.
`updates` must have the same rank as `inputs`.
Returns:
A tensor, has the same shape and dtype as `inputs`.
"""
if any_symbolic_tensors((inputs, start_indices, updates)):
return SliceUpdate().symbolic_call(inputs, start_indices, updates)
return backend.core.slice_update(inputs, start_indices, updates)
| SliceUpdate |
python | getsentry__sentry | src/sentry/db/postgres/schema.py | {
"start": 4781,
"end": 6227
} | class ____:
"""
Wrapper that allows us to use either the `SafePostgresDatabaseSchemaEditor` or
`PostgresDatabaseSchemaEditor`. Can be configured by setting the `safe` property
before using to edit the schema. If already in use, attempts to modify `safe` will
fail.
"""
class AlreadyInUse(Exception):
pass
def __init__(self, *args, **kwargs):
self.args = args
self.kwargs = kwargs
self._safe = False
self._schema_editor = None
@property
def safe(self):
return self._safe
@safe.setter
def safe(self, safe):
if self._schema_editor is not None:
raise self.AlreadyInUse("Schema editor already in use, can't set `safe`")
self._safe = safe
@property
def schema_editor(self):
if self._schema_editor is None:
schema_editor_cls = (
SafePostgresDatabaseSchemaEditor if self.safe else MakeBtreeGistSchemaEditor
)
schema_editor = schema_editor_cls(*self.args, **self.kwargs)
schema_editor.__enter__()
self._schema_editor = schema_editor
return self._schema_editor
def __getattr__(self, name):
return getattr(self.schema_editor, name)
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.schema_editor.__exit__(exc_type, exc_val, exc_tb)
| DatabaseSchemaEditorProxy |
python | conda__conda | conda/core/prefix_data.py | {
"start": 2112,
"end": 3063
} | class ____(type):
"""Basic caching of PrefixData instance objects."""
@deprecated.argument(
"25.9", "26.3", "pip_interop_enabled", rename="interoperability"
)
def __call__(
cls,
prefix_path: PathType,
interoperability: bool | None = None,
) -> PrefixData:
if isinstance(prefix_path, PrefixData):
return prefix_path
prefix_path = Path(prefix_path)
interoperability = (
interoperability
if interoperability is not None
else context.prefix_data_interoperability
)
cache_key = prefix_path, interoperability
if cache_key in PrefixData._cache_:
return PrefixData._cache_[cache_key]
else:
prefix_data_instance = super().__call__(prefix_path, interoperability)
PrefixData._cache_[cache_key] = prefix_data_instance
return prefix_data_instance
| PrefixDataType |
python | getsentry__sentry | src/sentry/integrations/github/integration.py | {
"start": 41063,
"end": 45943
} | class ____:
client: GithubSetupApiClient
def dispatch(self, request: HttpRequest, pipeline: IntegrationPipeline) -> HttpResponseBase:
with record_event(IntegrationPipelineViewType.OAUTH_LOGIN).capture() as lifecycle:
self.active_user_organization = determine_active_organization(request)
lifecycle.add_extra(
"organization_id",
(
self.active_user_organization.organization.id
if self.active_user_organization
else None
),
)
ghip = GitHubIdentityProvider()
github_client_id = ghip.get_oauth_client_id()
github_client_secret = ghip.get_oauth_client_secret()
installation_id = request.GET.get("installation_id")
if installation_id:
pipeline.bind_state("installation_id", installation_id)
if not request.GET.get("state"):
state = pipeline.signature
redirect_uri = absolute_uri(
reverse(
"sentry-extension-setup",
kwargs={"provider_id": IntegrationProviderSlug.GITHUB.value},
)
)
return HttpResponseRedirect(
f"{ghip.get_oauth_authorize_url()}?client_id={github_client_id}&state={state}&redirect_uri={redirect_uri}"
)
# At this point, we are past the GitHub "authorize" step
if request.GET.get("state") != pipeline.signature:
lifecycle.record_failure(GitHubInstallationError.INVALID_STATE)
return error(
request,
self.active_user_organization,
error_short=GitHubInstallationError.INVALID_STATE,
)
# similar to OAuth2CallbackView.get_token_params
data = {
"code": request.GET.get("code"),
"client_id": github_client_id,
"client_secret": github_client_secret,
}
# similar to OAuth2CallbackView.exchange_token
req = safe_urlopen(url=ghip.get_oauth_access_token_url(), data=data)
try:
body = safe_urlread(req).decode("utf-8")
payload = dict(parse_qsl(body))
except Exception:
payload = {}
if "access_token" not in payload:
lifecycle.record_failure(GitHubInstallationError.MISSING_TOKEN)
return error(
request,
self.active_user_organization,
error_short=GitHubInstallationError.MISSING_TOKEN,
)
self.client = GithubSetupApiClient(access_token=payload["access_token"])
authenticated_user_info = self.client.get_user_info()
if self.active_user_organization is not None:
owner_orgs = self._get_owner_github_organizations()
installation_info = self._get_eligible_multi_org_installations(
owner_orgs=owner_orgs
)
pipeline.bind_state("existing_installation_info", installation_info)
if "login" not in authenticated_user_info:
lifecycle.record_failure(GitHubInstallationError.MISSING_LOGIN)
return error(
request,
self.active_user_organization,
error_short=GitHubInstallationError.MISSING_LOGIN,
)
pipeline.bind_state("github_authenticated_user", authenticated_user_info["login"])
return pipeline.next_step()
def _get_owner_github_organizations(self) -> list[str]:
user_org_membership_details = self.client.get_organization_memberships_for_user()
return [
gh_org.get("organization", {}).get("login")
for gh_org in user_org_membership_details
if (
gh_org.get("role", "").lower() == "admin"
and gh_org.get("state", "").lower() == "active"
)
]
def _get_eligible_multi_org_installations(
self, owner_orgs: list[str]
) -> list[GithubInstallationInfo]:
installed_orgs = self.client.get_user_info_installations()
return [
{
"installation_id": str(installation.get("id")),
"github_account": installation.get("account").get("login"),
"avatar_url": installation.get("account").get("avatar_url"),
}
for installation in installed_orgs["installations"]
if (
installation.get("account").get("login") in owner_orgs
or installation.get("target_type") == "User"
)
]
| OAuthLoginView |
python | apache__airflow | providers/apache/hive/tests/unit/apache/hive/macros/test_hive.py | {
"start": 910,
"end": 1901
} | class ____:
def test_closest_ds_partition(self):
date1 = datetime.strptime("2017-04-24", "%Y-%m-%d")
date2 = datetime.strptime("2017-04-25", "%Y-%m-%d")
date3 = datetime.strptime("2017-04-26", "%Y-%m-%d")
date4 = datetime.strptime("2017-04-28", "%Y-%m-%d")
date5 = datetime.strptime("2017-04-29", "%Y-%m-%d")
target_dt = datetime.strptime("2017-04-27", "%Y-%m-%d")
date_list = [date1, date2, date3, date4, date5]
assert str(hive._closest_date(target_dt, date_list, True)) == "2017-04-26"
assert str(hive._closest_date(target_dt, date_list, False)) == "2017-04-28"
# when before is not set, the closest date should be returned
assert str(hive._closest_date(target_dt, [date1, date2, date3, date5], None)) == "2017-04-26"
assert str(hive._closest_date(target_dt, [date1, date2, date4, date5])) == "2017-04-28"
assert str(hive._closest_date(target_dt, date_list)) == "2017-04-26"
| TestHive |
python | simonw__sqlite-utils | sqlite_utils/db.py | {
"start": 158595,
"end": 160696
} | class ____(Queryable):
def exists(self):
return True
def __repr__(self) -> str:
return "<View {} ({})>".format(
self.name, ", ".join(c.name for c in self.columns)
)
def drop(self, ignore=False):
"""
Drop this view.
:param ignore: Set to ``True`` to ignore the error if the view does not exist
"""
try:
self.db.execute("DROP VIEW {}".format(quote_identifier(self.name)))
except sqlite3.OperationalError:
if not ignore:
raise
def enable_fts(self, *args, **kwargs):
"``enable_fts()`` is supported on tables but not on views."
raise NotImplementedError(
"enable_fts() is supported on tables but not on views"
)
def jsonify_if_needed(value):
if isinstance(value, decimal.Decimal):
return float(value)
if isinstance(value, (dict, list, tuple)):
return json.dumps(value, default=repr, ensure_ascii=False)
elif isinstance(value, (datetime.time, datetime.date, datetime.datetime)):
return value.isoformat()
elif isinstance(value, datetime.timedelta):
return str(value)
elif isinstance(value, uuid.UUID):
return str(value)
else:
return value
def resolve_extracts(
extracts: Optional[Union[Dict[str, str], List[str], Tuple[str]]],
) -> dict:
if extracts is None:
extracts = {}
if isinstance(extracts, (list, tuple)):
extracts = {item: item for item in extracts}
return extracts
def _decode_default_value(value):
if value.startswith("'") and value.endswith("'"):
# It's a string
return value[1:-1]
if value.isdigit():
# It's an integer
return int(value)
if value.startswith("X'") and value.endswith("'"):
# It's a binary string, stored as hex
to_decode = value[2:-1]
return binascii.unhexlify(to_decode)
# If it is a string containing a floating point number:
try:
return float(value)
except ValueError:
pass
return value
| View |
python | dagster-io__dagster | python_modules/dagster/dagster/_core/definitions/assets/graph/asset_graph_differ.py | {
"start": 1223,
"end": 1832
} | class ____:
"""Represents the diff information for changes between assets.
Change types in change_types should have diff info for their corresponding fields
if diff info is requested.
"NEW" and "REMOVED" change types do not have diff info.
"""
change_types: Set[AssetDefinitionChangeType]
code_version: Optional[ValueDiff[Optional[str]]] = None
dependencies: Optional[DictDiff[AssetKey]] = None
partitions_definition: Optional[ValueDiff[Optional[str]]] = None
tags: Optional[DictDiff[str]] = None
metadata: Optional[DictDiff[str]] = None
| AssetDefinitionDiffDetails |
python | apache__airflow | providers/google/tests/unit/google/cloud/hooks/vertex_ai/test_batch_prediction_job.py | {
"start": 6725,
"end": 10643
} | class ____:
def setup_method(self):
with mock.patch(
BASE_STRING.format("GoogleBaseHook.__init__"), new=mock_base_gcp_hook_no_default_project_id
):
self.hook = BatchPredictionJobHook(gcp_conn_id=TEST_GCP_CONN_ID)
@mock.patch(BATCH_PREDICTION_JOB_STRING.format("BatchPredictionJob.create"))
@mock.patch(BATCH_PREDICTION_JOB_STRING.format("BatchPredictionJobHook.get_credentials"))
def test_create_create_batch_prediction_job(self, mock_get_credentials, mock_create):
expected_job = mock_create.return_value
invoke_params = deepcopy(TEST_CREATE_BATCH_PREDICTION_JOB_PARAMETERS)
invoke_params["sync"] = True
expected_params = deepcopy(invoke_params)
expected_params["credentials"] = mock_get_credentials.return_value
expected_params["project"] = expected_params.pop("project_id")
expected_params["location"] = expected_params.pop("region")
actual_job = self.hook.create_batch_prediction_job(**invoke_params)
mock_get_credentials.assert_called_once()
mock_create.assert_called_once_with(**expected_params)
assert actual_job == expected_job
@mock.patch(BATCH_PREDICTION_JOB_STRING.format("BatchPredictionJobHook.get_job_service_client"))
def test_delete_batch_prediction_job(self, mock_client) -> None:
self.hook.delete_batch_prediction_job(
project_id=TEST_PROJECT_ID,
region=TEST_REGION,
batch_prediction_job=TEST_BATCH_PREDICTION_JOB,
)
mock_client.assert_called_once_with(TEST_REGION)
mock_client.return_value.delete_batch_prediction_job.assert_called_once_with(
request=dict(
name=mock_client.return_value.batch_prediction_job_path.return_value,
),
metadata=(),
retry=DEFAULT,
timeout=None,
)
mock_client.return_value.batch_prediction_job_path.assert_called_once_with(
TEST_PROJECT_ID,
TEST_REGION,
TEST_BATCH_PREDICTION_JOB,
)
@mock.patch(BATCH_PREDICTION_JOB_STRING.format("BatchPredictionJobHook.get_job_service_client"))
def test_get_batch_prediction_job(self, mock_client) -> None:
self.hook.get_batch_prediction_job(
project_id=TEST_PROJECT_ID,
region=TEST_REGION,
batch_prediction_job=TEST_BATCH_PREDICTION_JOB,
)
mock_client.assert_called_once_with(TEST_REGION)
mock_client.return_value.get_batch_prediction_job.assert_called_once_with(
request=dict(
name=mock_client.return_value.batch_prediction_job_path.return_value,
),
metadata=(),
retry=DEFAULT,
timeout=None,
)
mock_client.return_value.batch_prediction_job_path.assert_called_once_with(
TEST_PROJECT_ID,
TEST_REGION,
TEST_BATCH_PREDICTION_JOB,
)
@mock.patch(BATCH_PREDICTION_JOB_STRING.format("BatchPredictionJobHook.get_job_service_client"))
def test_list_batch_prediction_jobs(self, mock_client) -> None:
self.hook.list_batch_prediction_jobs(
project_id=TEST_PROJECT_ID,
region=TEST_REGION,
)
mock_client.assert_called_once_with(TEST_REGION)
mock_client.return_value.list_batch_prediction_jobs.assert_called_once_with(
request=dict(
parent=mock_client.return_value.common_location_path.return_value,
filter=None,
page_size=None,
page_token=None,
read_mask=None,
),
metadata=(),
retry=DEFAULT,
timeout=None,
)
mock_client.return_value.common_location_path.assert_called_once_with(TEST_PROJECT_ID, TEST_REGION)
| TestBatchPredictionJobWithoutDefaultProjectIdHook |
python | mkdocstrings__mkdocstrings | src/mkdocstrings/_internal/handlers/base.py | {
"start": 2099,
"end": 2729
} | class ____(Exception): # noqa: N818
"""An exception raised to tell a theme is not supported."""
def do_any(seq: Sequence, attribute: str | None = None) -> bool:
"""Check if at least one of the item in the sequence evaluates to true.
The `any` builtin as a filter for Jinja templates.
Arguments:
seq: An iterable object.
attribute: The attribute name to use on each object of the iterable.
Returns:
A boolean telling if any object of the iterable evaluated to True.
"""
if attribute is None:
return any(seq)
return any(_[attribute] for _ in seq)
| ThemeNotSupported |
python | huggingface__transformers | src/transformers/models/mllama/modeling_mllama.py | {
"start": 70975,
"end": 79957
} | class ____(MllamaPreTrainedModel, GenerationMixin):
_checkpoint_conversion_mapping = {
r"^language_model.model": "model.language_model",
r"^vision_model": "model.vision_model",
r"^multi_modal_projector": "model.multi_modal_projector",
r"^language_model.lm_head": "lm_head",
}
# _tied_weights_keys = {"lm_head.weight": "model.language_moddel.embed_tokens.weight"}
def __init__(self, config: MllamaConfig):
super().__init__(config)
self.model = MllamaModel(config)
self.lm_head = nn.Linear(config.text_config.hidden_size, config.text_config.vocab_size, bias=False)
self.post_init()
def get_input_embeddings(self):
return self.model.get_input_embeddings()
def set_input_embeddings(self, value):
self.model.set_input_embeddings(value)
@can_return_tuple
@auto_docstring
def forward(
self,
input_ids: Optional[torch.LongTensor] = None,
pixel_values: Optional[torch.FloatTensor] = None,
aspect_ratio_mask: Optional[torch.Tensor] = None,
aspect_ratio_ids: Optional[torch.Tensor] = None,
attention_mask: Optional[torch.Tensor] = None,
cross_attention_mask: Optional[torch.Tensor] = None,
cross_attention_states: Optional[torch.Tensor] = None,
position_ids: Optional[torch.LongTensor] = None,
past_key_values: Optional[Cache] = None,
inputs_embeds: Optional[torch.FloatTensor] = None,
labels: Optional[torch.LongTensor] = None,
use_cache: Optional[bool] = None,
cache_position: Optional[torch.LongTensor] = None,
logits_to_keep: Union[int, torch.Tensor] = 0,
**kwargs: Unpack[TransformersKwargs],
) -> Union[tuple, CausalLMOutputWithPast]:
r"""
aspect_ratio_mask (`torch.Tensor` of shape `(batch_size, max_num_images, max_num_tiles)`, *optional*):
Mask to avoid performing attention on padding tiles. Mask values selected in `[0, 1]`:
- 1 for tiles that are **not masked**,
- 0 for tiles that are **masked**.
aspect_ratio_ids (`torch.Tensor` of shape `(batch_size, max_num_images)`, *optional*):
Aspect ratio ids used to select the appropriate precomputed tile embeddings based on the aspect ratio of each input image.
These ids correspond to indices in the model's list of supported aspect ratios, offset by 1.
For example, if the model supports aspect ratios [[1, 1], [1, 2], [2, 1]]:
- An image with aspect ratio [1, 1] would have ID 1
- An image with aspect ratio [1, 2] would have ID 2
- An image with aspect ratio [2, 1] would have ID 3
The id 0 is reserved for padding (i.e., no image).
If an image has aspect ratio [1, 2], that means it was split into 2 tiles horizontally, and its `aspect_ratio_id` would be 2.
cross_attention_mask (`torch.Tensor` of shape `(batch_size, seq_length, max_num_images, max_num_tiles)`, *optional*):
Cross-attention mask to control the interaction between text tokens and image tiles.
This 4D tensor defines which image tiles each text token should attend to.
For each text token (in seq_length):
- 1 indicates the token **should attend** to the corresponding image tile
- 0 indicates the token **should not attend** to the corresponding image tile
cross_attention_states (`torch.FloatTensor`, *optional*):
Output of the vision model, used for cross-attention. This tensor contains the processed image features that
the language model will attend to.
labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
Labels for computing the masked language modeling loss. Indices should either be in `[0, ...,
config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored
(masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`.
Example:
```python
>>> from PIL import Image
>>> import requests
>>> from transformers import AutoProcessor, MllamaForConditionalGeneration
>>> checkpoint = "meta-llama/Llama-3.2-11B-Vision"
>>> model = MllamaForConditionalGeneration.from_pretrained(checkpoint)
>>> processor = AutoProcessor.from_pretrained(checkpoint)
>>> prompt = "<|image|>If I had to write a haiku for this one"
>>> url = "https://www.ilankelman.org/stopsigns/australia.jpg"
>>> image = Image.open(requests.get(url, stream=True).raw)
>>> inputs = processor(text=prompt, images=image, return_tensors="pt")
>>> # Generate
>>> output = model.generate(**inputs, max_new_tokens=15)
>>> prompt_len = inputs.input_ids.shape[-1]
>>> generated_ids = output[:, prompt_len:]
>>> generated_text = processor.batch_decode(generated_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)
>>> print(generated_text)
[', it would be:.\\nA stop sign in Chinatown.\\n']
```
"""
outputs = self.model(
input_ids=input_ids,
pixel_values=pixel_values,
aspect_ratio_mask=aspect_ratio_mask,
aspect_ratio_ids=aspect_ratio_ids,
cross_attention_mask=cross_attention_mask,
cross_attention_states=cross_attention_states,
attention_mask=attention_mask,
position_ids=position_ids,
past_key_values=past_key_values,
inputs_embeds=inputs_embeds,
use_cache=use_cache,
cache_position=cache_position,
**kwargs,
)
hidden_states = outputs.last_hidden_state
# Only compute necessary logits, and do not upcast them to float if we are not computing the loss
slice_indices = slice(-logits_to_keep, None) if isinstance(logits_to_keep, int) else logits_to_keep
logits = self.lm_head(hidden_states[:, slice_indices, :])
loss = None
if labels is not None:
loss = self.loss_function(logits, labels, self.config.text_config.vocab_size, **kwargs)
return CausalLMOutputWithPast(
loss=loss,
logits=logits,
past_key_values=outputs.past_key_values,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
def prepare_inputs_for_generation(
self,
input_ids=None,
inputs_embeds=None,
attention_mask=None,
position_ids=None,
pixel_values=None,
aspect_ratio_ids=None,
aspect_ratio_mask=None,
cross_attention_mask=None,
past_key_values=None,
use_cache=False,
cache_position=None,
logits_to_keep=None,
**kwargs,
):
# Overwritten -- in specific circumstances we don't want to forward image inputs to the model
model_inputs = super().prepare_inputs_for_generation(
input_ids,
past_key_values=past_key_values,
use_cache=use_cache,
inputs_embeds=inputs_embeds,
position_ids=position_ids,
attention_mask=attention_mask,
pixel_values=pixel_values,
aspect_ratio_ids=aspect_ratio_ids,
aspect_ratio_mask=aspect_ratio_mask,
cross_attention_mask=cross_attention_mask,
cache_position=cache_position,
logits_to_keep=logits_to_keep,
**kwargs,
)
# If we're in pre-fill or cacheless decoding step, then we need pixel_values and aspect ratios
# to compute image hidden states, otherwise they are cached within each cross attn layer
if cache_position[0] != 0:
model_inputs["pixel_values"] = None
model_inputs["aspect_ratio_ids"] = None
model_inputs["aspect_ratio_mask"] = None
return model_inputs
def _update_model_kwargs_for_generation(self, outputs, model_kwargs, is_encoder_decoder, **kwargs):
cross_attention_mask_prev = model_kwargs.get("cross_attention_mask", None)
model_kwargs = super()._update_model_kwargs_for_generation(
outputs=outputs,
model_kwargs=model_kwargs,
is_encoder_decoder=is_encoder_decoder,
**kwargs,
)
# add cross-attn mask for new token
if cross_attention_mask_prev is not None:
model_kwargs["cross_attention_mask"] = torch.cat(
[cross_attention_mask_prev, cross_attention_mask_prev[:, -1:, ...]], dim=1
)
return model_kwargs
__all__ = [
"MllamaForConditionalGeneration",
"MllamaForCausalLM",
"MllamaTextModel",
"MllamaVisionModel",
"MllamaPreTrainedModel",
"MllamaModel",
]
| MllamaForConditionalGeneration |
python | faif__python-patterns | patterns/creational/builder.py | {
"start": 1279,
"end": 1622
} | class ____:
def __init__(self) -> None:
self.build_floor()
self.build_size()
def build_floor(self):
raise NotImplementedError
def build_size(self):
raise NotImplementedError
def __repr__(self) -> str:
return "Floor: {0.floor} | Size: {0.size}".format(self)
# Concrete Buildings
| Building |
python | apache__airflow | providers/microsoft/azure/tests/unit/microsoft/azure/operators/test_asb.py | {
"start": 5208,
"end": 8804
} | class ____:
@pytest.mark.parametrize(
("mock_message", "mock_batch_flag", "mock_message_id", "mock_reply_to", "mock_headers"),
[
(MESSAGE, True, None, None, None),
(MESSAGE, False, "test_message_id", "test_reply_to", {"test_header": "test_value"}),
(MESSAGE_LIST, True, None, None, None),
(MESSAGE_LIST, False, None, None, None),
],
)
def test_init(self, mock_message, mock_batch_flag, mock_message_id, mock_reply_to, mock_headers):
"""
Test init by creating AzureServiceBusSendMessageOperator with task id, queue_name, message,
batch, message_id, reply_to, and message headers and asserting with values
"""
asb_send_message_queue_operator = AzureServiceBusSendMessageOperator(
task_id="asb_send_message_queue_without_batch",
queue_name=QUEUE_NAME,
message=mock_message,
batch=mock_batch_flag,
message_id=mock_message_id,
reply_to=mock_reply_to,
message_headers=mock_headers,
)
assert asb_send_message_queue_operator.task_id == "asb_send_message_queue_without_batch"
assert asb_send_message_queue_operator.queue_name == QUEUE_NAME
assert asb_send_message_queue_operator.message == mock_message
assert asb_send_message_queue_operator.batch is mock_batch_flag
assert asb_send_message_queue_operator.message_id == mock_message_id
assert asb_send_message_queue_operator.reply_to == mock_reply_to
assert asb_send_message_queue_operator.message_headers == mock_headers
@mock.patch("airflow.providers.microsoft.azure.hooks.asb.MessageHook.send_message")
def test_send_message_queue(self, mock_send_message):
"""
Test AzureServiceBusSendMessageOperator with queue name, batch boolean flag, mock
the send_messages of azure service bus function
"""
TASK_ID = "task-id"
MSG_BODY = "test message body"
MSG_ID = None
REPLY_TO = None
HDRS = None
asb_send_message_queue_operator = AzureServiceBusSendMessageOperator(
task_id=TASK_ID,
queue_name=QUEUE_NAME,
message=MSG_BODY,
batch=False,
)
asb_send_message_queue_operator.execute(None)
expected_calls = [mock.call(QUEUE_NAME, MSG_BODY, False, MSG_ID, REPLY_TO, HDRS)]
mock_send_message.assert_has_calls(expected_calls, any_order=False)
@mock.patch("airflow.providers.microsoft.azure.hooks.asb.MessageHook.send_message")
def test_send_message_queue_with_id_hdrs_and_reply_to(self, mock_send_message):
"""
Test AzureServiceBusSendMessageOperator with queue name, batch boolean flag, mock
the send_messages of azure service bus function
"""
TASK_ID = "task-id"
MSG_ID = "test_message_id"
MSG_BODY = "test message body"
REPLY_TO = "test_reply_to"
HDRS = {"test_header": "test_value"}
asb_send_message_queue_operator = AzureServiceBusSendMessageOperator(
task_id=TASK_ID,
queue_name=QUEUE_NAME,
message=MSG_BODY,
batch=False,
message_id=MSG_ID,
reply_to=REPLY_TO,
message_headers=HDRS,
)
asb_send_message_queue_operator.execute(None)
expected_calls = [mock.call(QUEUE_NAME, MSG_BODY, False, MSG_ID, REPLY_TO, HDRS)]
mock_send_message.assert_has_calls(expected_calls, any_order=False)
| TestAzureServiceBusSendMessageOperator |
python | coleifer__peewee | peewee.py | {
"start": 176039,
"end": 179666
} | class ____(BigIntegerField):
# Support second -> microsecond resolution.
valid_resolutions = [10**i for i in range(7)]
def __init__(self, *args, **kwargs):
self.resolution = kwargs.pop('resolution', None)
if not self.resolution:
self.resolution = 1
elif self.resolution in range(2, 7):
self.resolution = 10 ** self.resolution
elif self.resolution not in self.valid_resolutions:
raise ValueError('TimestampField resolution must be one of: %s' %
', '.join(str(i) for i in self.valid_resolutions))
self.ticks_to_microsecond = 1000000 // self.resolution
self.utc = kwargs.pop('utc', False) or False
dflt = utcnow if self.utc else datetime.datetime.now
kwargs.setdefault('default', dflt)
super(TimestampField, self).__init__(*args, **kwargs)
def local_to_utc(self, dt):
# Convert naive local datetime into naive UTC, e.g.:
# 2019-03-01T12:00:00 (local=US/Central) -> 2019-03-01T18:00:00.
# 2019-05-01T12:00:00 (local=US/Central) -> 2019-05-01T17:00:00.
# 2019-03-01T12:00:00 (local=UTC) -> 2019-03-01T12:00:00.
return datetime.datetime(*time.gmtime(time.mktime(dt.timetuple()))[:6])
def utc_to_local(self, dt):
# Convert a naive UTC datetime into local time, e.g.:
# 2019-03-01T18:00:00 (local=US/Central) -> 2019-03-01T12:00:00.
# 2019-05-01T17:00:00 (local=US/Central) -> 2019-05-01T12:00:00.
# 2019-03-01T12:00:00 (local=UTC) -> 2019-03-01T12:00:00.
ts = calendar.timegm(dt.utctimetuple())
return datetime.datetime.fromtimestamp(ts)
def get_timestamp(self, value):
if self.utc:
# If utc-mode is on, then we assume all naive datetimes are in UTC.
return calendar.timegm(value.utctimetuple())
else:
return time.mktime(value.timetuple())
def db_value(self, value):
if value is None:
return
if isinstance(value, datetime.datetime):
pass
elif isinstance(value, datetime.date):
value = datetime.datetime(value.year, value.month, value.day)
else:
return int(round(value * self.resolution))
timestamp = self.get_timestamp(value)
if self.resolution > 1:
timestamp += (value.microsecond * .000001)
timestamp *= self.resolution
return int(round(timestamp))
def python_value(self, value):
if value is not None and isinstance(value, (int, float, long)):
if self.resolution > 1:
value, ticks = divmod(value, self.resolution)
microseconds = int(ticks * self.ticks_to_microsecond)
else:
microseconds = 0
if self.utc:
value = utcfromtimestamp(value)
else:
value = datetime.datetime.fromtimestamp(value)
if microseconds:
value = value.replace(microsecond=microseconds)
return value
def from_timestamp(self):
expr = ((self / Value(self.resolution, converter=False))
if self.resolution > 1 else self)
return self.model._meta.database.from_timestamp(expr)
year = property(_timestamp_date_part('year'))
month = property(_timestamp_date_part('month'))
day = property(_timestamp_date_part('day'))
hour = property(_timestamp_date_part('hour'))
minute = property(_timestamp_date_part('minute'))
second = property(_timestamp_date_part('second'))
| TimestampField |
python | django__django | django/contrib/admin/exceptions.py | {
"start": 426,
"end": 507
} | class ____(Exception):
"""The model is not registered."""
pass
| NotRegistered |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-github/unit_tests/integration/config.py | {
"start": 125,
"end": 1186
} | class ____:
def __init__(self) -> None:
self._config: Dict[str, Any] = {
"credentials": {"option_title": "PAT Credentials", "personal_access_token": "GITHUB_TEST_TOKEN"},
"start_date": "2020-05-01T00:00:00Z",
}
def with_repositories(self, repositories: List[str]) -> "ConfigBuilder":
self._config["repositories"] = repositories
return self
def with_client_secret(self, client_secret: str) -> "ConfigBuilder":
self._config["client_secret"] = client_secret
return self
def with_start_date(self, start_datetime: datetime) -> "ConfigBuilder":
self._config["start_date"] = start_datetime.isoformat()[:-13] + "Z"
return self
def with_branches(self, branches: List[str]) -> "ConfigBuilder":
self._config["branches"] = branches
return self
def with_api_url(self, api_url: str) -> "ConfigBuilder":
self._config["api_url"] = api_url
return self
def build(self) -> Dict[str, Any]:
return self._config
| ConfigBuilder |
python | dagster-io__dagster | python_modules/libraries/dagster-airbyte/dagster_airbyte/managed/generated/sources.py | {
"start": 112646,
"end": 116073
} | class ____(GeneratedAirbyteSource):
class GoogleCredentials:
@public
def __init__(
self,
developer_token: str,
client_id: str,
client_secret: str,
refresh_token: str,
access_token: Optional[str] = None,
):
self.developer_token = check.str_param(developer_token, "developer_token")
self.client_id = check.str_param(client_id, "client_id")
self.client_secret = check.str_param(client_secret, "client_secret")
self.refresh_token = check.str_param(refresh_token, "refresh_token")
self.access_token = check.opt_str_param(access_token, "access_token")
class CustomGAQLQueriesEntry:
@public
def __init__(self, query: str, table_name: str):
self.query = check.str_param(query, "query")
self.table_name = check.str_param(table_name, "table_name")
@public
def __init__(
self,
name: str,
credentials: "GoogleAdsSource.GoogleCredentials",
customer_id: str,
start_date: str,
end_date: Optional[str] = None,
custom_queries: Optional[list[CustomGAQLQueriesEntry]] = None,
login_customer_id: Optional[str] = None,
conversion_window_days: Optional[int] = None,
):
"""Airbyte Source for Google Ads.
Documentation can be found at https://docs.airbyte.com/integrations/sources/google-ads
Args:
name (str): The name of the destination.
customer_id (str): Comma separated list of (client) customer IDs. Each customer ID must be specified as a 10-digit number without dashes. More instruction on how to find this value in our docs. Metrics streams like AdGroupAdReport cannot be requested for a manager account.
start_date (str): UTC date and time in the format 2017-01-25. Any data before this date will not be replicated.
end_date (Optional[str]): UTC date and time in the format 2017-01-25. Any data after this date will not be replicated.
login_customer_id (Optional[str]): If your access to the customer account is through a manager account, this field is required and must be set to the customer ID of the manager account (10-digit number without dashes). More information about this field you can see here
conversion_window_days (Optional[int]): A conversion window is the period of time after an ad interaction (such as an ad click or video view) during which a conversion, such as a purchase, is recorded in Google Ads. For more information, see Google's documentation.
"""
self.credentials = check.inst_param(
credentials, "credentials", GoogleAdsSource.GoogleCredentials
)
self.customer_id = check.str_param(customer_id, "customer_id")
self.start_date = check.str_param(start_date, "start_date")
self.end_date = check.opt_str_param(end_date, "end_date")
self.custom_queries = check.opt_nullable_list_param(
custom_queries, "custom_queries", GoogleAdsSource.CustomGAQLQueriesEntry
)
self.login_customer_id = check.opt_str_param(login_customer_id, "login_customer_id")
self.conversion_window_days = check.opt_int_param(
conversion_window_days, "conversion_window_days"
)
super().__init__("Google Ads", name)
| GoogleAdsSource |
python | yaml__pyyaml | lib/yaml/tokens.py | {
"start": 865,
"end": 1109
} | class ____(Token):
id = '<stream start>'
def __init__(self, start_mark=None, end_mark=None,
encoding=None):
self.start_mark = start_mark
self.end_mark = end_mark
self.encoding = encoding
| StreamStartToken |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-github/source_github/github_schema.py | {
"start": 18101,
"end": 18612
} | class ____(sgqlc.types.Enum):
"""The possible values for an enabled/disabled enterprise setting.
Enumeration Choices:
* `DISABLED`: The setting is disabled for organizations in the
enterprise.
* `ENABLED`: The setting is enabled for organizations in the
enterprise.
* `NO_POLICY`: There is no policy set for organizations in the
enterprise.
"""
__schema__ = github_schema
__choices__ = ("DISABLED", "ENABLED", "NO_POLICY")
| EnterpriseEnabledDisabledSettingValue |
python | google__jax | jax/_src/errors.py | {
"start": 717,
"end": 1197
} | class ____:
"""Mixin for JAX-specific errors"""
_error_page = 'https://docs.jax.dev/en/latest/errors.html'
_module_name = "jax.errors"
def __init__(self, message: str):
error_page = self._error_page
module_name = self._module_name
class_name = self.__class__.__name__
error_msg = f'{message}\nSee {error_page}#{module_name}.{class_name}'
# https://github.com/python/mypy/issues/5887
super().__init__(error_msg) # type: ignore
@export
| _JAXErrorMixin |
python | python-excel__xlwt | tests/test_unicode1.py | {
"start": 1190,
"end": 1502
} | class ____(unittest.TestCase):
def test_example_xls(self):
create_example_xls(in_tst_output_dir(EXAMPLE_XLS))
self.assertTrue(filecmp.cmp(in_tst_dir(EXAMPLE_XLS),
in_tst_output_dir(EXAMPLE_XLS),
shallow=False))
| TestUnicode1 |
python | pypa__pip | tests/lib/__init__.py | {
"start": 5952,
"end": 6084
} | class ____(AssertionError):
"""
An "assertion" failed during testing.
"""
StrPath = Union[str, pathlib.Path]
| TestFailure |
python | numpy__numpy | numpy/distutils/tests/test_system_info.py | {
"start": 2614,
"end": 3582
} | class ____(system_info):
def __init__(self,
default_lib_dirs=default_lib_dirs,
default_include_dirs=default_include_dirs,
verbosity=1,
):
self.__class__.info = {}
self.local_prefixes = []
defaults = {'library_dirs': '',
'include_dirs': '',
'runtime_library_dirs': '',
'rpath': '',
'src_dirs': '',
'search_static_first': "0",
'extra_compile_args': '',
'extra_link_args': ''}
self.cp = ConfigParser(defaults)
# We have to parse the config files afterwards
# to have a consistent temporary filepath
def _check_libs(self, lib_dirs, libs, opt_libs, exts):
"""Override _check_libs to return with all dirs """
info = {'libraries': libs, 'library_dirs': lib_dirs}
return info
| _system_info |
python | astropy__astropy | astropy/modeling/polynomial.py | {
"start": 715,
"end": 1769
} | class ____(FittableModel):
"""
Base class for all polynomial-like models with an arbitrary number of
parameters in the form of coefficients.
In this case Parameter instances are returned through the class's
``__getattr__`` rather than through class descriptors.
"""
# Default _param_names list; this will be filled in by the implementation's
# __init__
_param_names = ()
linear = True
col_fit_deriv = False
@property
def param_names(self):
"""Coefficient names generated based on the model's polynomial degree
and number of dimensions.
Subclasses should implement this to return parameter names in the
desired format.
On most `Model` classes this is a class attribute, but for polynomial
models it is an instance attribute since each polynomial model instance
can have different parameters depending on the degree of the polynomial
and the number of dimensions, for example.
"""
return self._param_names
| PolynomialBase |
python | walkccc__LeetCode | solutions/3351. Sum of Good Subsequences/3351.py | {
"start": 0,
"end": 544
} | class ____:
def sumOfGoodSubsequences(self, nums: list[int]) -> int:
MOD = 10**9 + 7
maxNum = max(nums)
# endsIn[i] := the number of good subsequences ending in i
endsIn = [0] * (maxNum + 2)
# dp[i] := the sum of good subsequences ending in i
dp = [0] * (maxNum + 2)
for num in nums:
seqsToAppend = 1 + endsIn[num - 1] + endsIn[num + 1]
dp[num] = (seqsToAppend * num +
(dp[num] + dp[num - 1] + dp[num + 1])) % MOD
endsIn[num] += seqsToAppend % MOD
return sum(dp) % MOD
| Solution |
python | scipy__scipy | scipy/odr/_odrpack.py | {
"start": 22508,
"end": 42707
} | class ____:
"""
The ODR class gathers all information and coordinates the running of the
main fitting routine.
Members of instances of the ODR class have the same names as the arguments
to the initialization routine.
Parameters
----------
data : Data class instance
instance of the Data class
model : Model class instance
instance of the Model class
Other Parameters
----------------
beta0 : array_like of rank-1
a rank-1 sequence of initial parameter values. Optional if
model provides an "estimate" function to estimate these values.
delta0 : array_like of floats of rank-1, optional
a (double-precision) float array to hold the initial values of
the errors in the input variables. Must be same shape as data.x
ifixb : array_like of ints of rank-1, optional
sequence of integers with the same length as beta0 that determines
which parameters are held fixed. A value of 0 fixes the parameter,
a value > 0 makes the parameter free.
ifixx : array_like of ints with same shape as data.x, optional
an array of integers with the same shape as data.x that determines
which input observations are treated as fixed. One can use a sequence
of length m (the dimensionality of the input observations) to fix some
dimensions for all observations. A value of 0 fixes the observation,
a value > 0 makes it free.
job : int, optional
an integer telling ODRPACK what tasks to perform. See p. 31 of the
ODRPACK User's Guide if you absolutely must set the value here. Use the
method set_job post-initialization for a more readable interface.
iprint : int, optional
an integer telling ODRPACK what to print. See pp. 33-34 of the
ODRPACK User's Guide if you absolutely must set the value here. Use the
method set_iprint post-initialization for a more readable interface.
errfile : str, optional
string with the filename to print ODRPACK errors to. If the file already
exists, an error will be thrown. The `overwrite` argument can be used to
prevent this. *Do Not Open This File Yourself!*
rptfile : str, optional
string with the filename to print ODRPACK summaries to. If the file
already exists, an error will be thrown. The `overwrite` argument can be
used to prevent this. *Do Not Open This File Yourself!*
ndigit : int, optional
integer specifying the number of reliable digits in the computation
of the function.
taufac : float, optional
float specifying the initial trust region. The default value is 1.
The initial trust region is equal to taufac times the length of the
first computed Gauss-Newton step. taufac must be less than 1.
sstol : float, optional
float specifying the tolerance for convergence based on the relative
change in the sum-of-squares. The default value is eps**(1/2) where eps
is the smallest value such that 1 + eps > 1 for double precision
computation on the machine. sstol must be less than 1.
partol : float, optional
float specifying the tolerance for convergence based on the relative
change in the estimated parameters. The default value is eps**(2/3) for
explicit models and ``eps**(1/3)`` for implicit models. partol must be less
than 1.
maxit : int, optional
integer specifying the maximum number of iterations to perform. For
first runs, maxit is the total number of iterations performed and
defaults to 50. For restarts, maxit is the number of additional
iterations to perform and defaults to 10.
stpb : array_like, optional
sequence (``len(stpb) == len(beta0)``) of relative step sizes to compute
finite difference derivatives wrt the parameters.
stpd : optional
array (``stpd.shape == data.x.shape`` or ``stpd.shape == (m,)``) of relative
step sizes to compute finite difference derivatives wrt the input
variable errors. If stpd is a rank-1 array with length m (the
dimensionality of the input variable), then the values are broadcast to
all observations.
sclb : array_like, optional
sequence (``len(stpb) == len(beta0)``) of scaling factors for the
parameters. The purpose of these scaling factors are to scale all of
the parameters to around unity. Normally appropriate scaling factors
are computed if this argument is not specified. Specify them yourself
if the automatic procedure goes awry.
scld : array_like, optional
array (scld.shape == data.x.shape or scld.shape == (m,)) of scaling
factors for the *errors* in the input variables. Again, these factors
are automatically computed if you do not provide them. If scld.shape ==
(m,), then the scaling factors are broadcast to all observations.
work : ndarray, optional
array to hold the double-valued working data for ODRPACK. When
restarting, takes the value of self.output.work.
iwork : ndarray, optional
array to hold the integer-valued working data for ODRPACK. When
restarting, takes the value of self.output.iwork.
overwrite : bool, optional
If it is True, output files defined by `errfile` and `rptfile` are
overwritten. The default is False.
Attributes
----------
data : Data
The data for this fit
model : Model
The model used in fit
output : Output
An instance if the Output class containing all of the returned
data from an invocation of ODR.run() or ODR.restart()
"""
def __init__(self, data, model, beta0=None, delta0=None, ifixb=None,
ifixx=None, job=None, iprint=None, errfile=None, rptfile=None,
ndigit=None, taufac=None, sstol=None, partol=None, maxit=None,
stpb=None, stpd=None, sclb=None, scld=None, work=None, iwork=None,
overwrite=False):
self.data = data
self.model = model
if beta0 is None:
if self.model.estimate is not None:
self.beta0 = _conv(self.model.estimate(self.data))
else:
raise ValueError(
"must specify beta0 or provide an estimator with the model"
)
else:
self.beta0 = _conv(beta0)
if ifixx is None and data.fix is not None:
ifixx = data.fix
if overwrite:
# remove output files for overwriting.
if rptfile is not None and os.path.exists(rptfile):
os.remove(rptfile)
if errfile is not None and os.path.exists(errfile):
os.remove(errfile)
self.delta0 = _conv(delta0)
# These really are 32-bit integers in FORTRAN (gfortran), even on 64-bit
# platforms.
# XXX: some other FORTRAN compilers may not agree.
self.ifixx = _conv(ifixx, dtype=np.int32)
self.ifixb = _conv(ifixb, dtype=np.int32)
self.job = job
self.iprint = iprint
self.errfile = errfile
self.rptfile = rptfile
self.ndigit = ndigit
self.taufac = taufac
self.sstol = sstol
self.partol = partol
self.maxit = maxit
self.stpb = _conv(stpb)
self.stpd = _conv(stpd)
self.sclb = _conv(sclb)
self.scld = _conv(scld)
self.work = _conv(work)
self.iwork = _conv(iwork)
self.output = None
self._check()
def _check(self):
""" Check the inputs for consistency, but don't bother checking things
that the builtin function odr will check.
"""
x_s = list(self.data.x.shape)
if isinstance(self.data.y, np.ndarray):
y_s = list(self.data.y.shape)
if self.model.implicit:
raise OdrError("an implicit model cannot use response data")
if self.job is not None and (self.job % 10) == 1:
raise OdrError("job parameter requests an implicit model,"
" but an explicit model was passed")
else:
# implicit model with q == self.data.y
y_s = [self.data.y, x_s[-1]]
if not self.model.implicit:
raise OdrError("an explicit model needs response data")
self.set_job(fit_type=1)
if x_s[-1] != y_s[-1]:
raise OdrError("number of observations do not match")
n = x_s[-1]
if len(x_s) == 2:
m = x_s[0]
else:
m = 1
if len(y_s) == 2:
q = y_s[0]
else:
q = 1
p = len(self.beta0)
# permissible output array shapes
fcn_perms = [(q, n)]
fjacd_perms = [(q, m, n)]
fjacb_perms = [(q, p, n)]
if q == 1:
fcn_perms.append((n,))
fjacd_perms.append((m, n))
fjacb_perms.append((p, n))
if m == 1:
fjacd_perms.append((q, n))
if p == 1:
fjacb_perms.append((q, n))
if m == q == 1:
fjacd_perms.append((n,))
if p == q == 1:
fjacb_perms.append((n,))
# try evaluating the supplied functions to make sure they provide
# sensible outputs
arglist = (self.beta0, self.data.x)
if self.model.extra_args is not None:
arglist = arglist + self.model.extra_args
res = self.model.fcn(*arglist)
if res.shape not in fcn_perms:
print(res.shape)
print(fcn_perms)
raise OdrError(f"fcn does not output {y_s}-shaped array")
if self.model.fjacd is not None:
res = self.model.fjacd(*arglist)
if res.shape not in fjacd_perms:
raise OdrError(
f"fjacd does not output {repr((q, m, n))}-shaped array")
if self.model.fjacb is not None:
res = self.model.fjacb(*arglist)
if res.shape not in fjacb_perms:
raise OdrError(
f"fjacb does not output {repr((q, p, n))}-shaped array")
# check shape of delta0
if self.delta0 is not None and self.delta0.shape != self.data.x.shape:
raise OdrError(
f"delta0 is not a {repr(self.data.x.shape)}-shaped array")
if self.data.x.size == 0:
warn("Empty data detected for ODR instance. "
"Do not expect any fitting to occur",
OdrWarning, stacklevel=3)
def _gen_work(self):
""" Generate a suitable work array if one does not already exist.
"""
n = self.data.x.shape[-1]
p = self.beta0.shape[0]
if len(self.data.x.shape) == 2:
m = self.data.x.shape[0]
else:
m = 1
if self.model.implicit:
q = self.data.y
elif len(self.data.y.shape) == 2:
q = self.data.y.shape[0]
else:
q = 1
if self.data.we is None:
ldwe = ld2we = 1
elif len(self.data.we.shape) == 3:
ld2we, ldwe = self.data.we.shape[1:]
else:
we = self.data.we
ldwe = 1
ld2we = 1
if we.ndim == 1 and q == 1:
ldwe = n
elif we.ndim == 2:
if we.shape == (q, q):
ld2we = q
elif we.shape == (q, n):
ldwe = n
if self.job % 10 < 2:
# ODR not OLS
lwork = (18 + 11*p + p*p + m + m*m + 4*n*q + 6*n*m + 2*n*q*p +
2*n*q*m + q*q + 5*q + q*(p+m) + ldwe*ld2we*q)
else:
# OLS not ODR
lwork = (18 + 11*p + p*p + m + m*m + 4*n*q + 2*n*m + 2*n*q*p +
5*q + q*(p+m) + ldwe*ld2we*q)
if isinstance(self.work, np.ndarray) and self.work.shape == (lwork,)\
and self.work.dtype.str.endswith('f8'):
# the existing array is fine
return
else:
self.work = np.zeros((lwork,), float)
def set_job(self, fit_type=None, deriv=None, var_calc=None,
del_init=None, restart=None):
"""
Sets the "job" parameter is a hopefully comprehensible way.
If an argument is not specified, then the value is left as is. The
default value from class initialization is for all of these options set
to 0.
Parameters
----------
fit_type : {0, 1, 2} int
0 -> explicit ODR
1 -> implicit ODR
2 -> ordinary least-squares
deriv : {0, 1, 2, 3} int
0 -> forward finite differences
1 -> central finite differences
2 -> user-supplied derivatives (Jacobians) with results
checked by ODRPACK
3 -> user-supplied derivatives, no checking
var_calc : {0, 1, 2} int
0 -> calculate asymptotic covariance matrix and fit
parameter uncertainties (V_B, s_B) using derivatives
recomputed at the final solution
1 -> calculate V_B and s_B using derivatives from last iteration
2 -> do not calculate V_B and s_B
del_init : {0, 1} int
0 -> initial input variable offsets set to 0
1 -> initial offsets provided by user in variable "work"
restart : {0, 1} int
0 -> fit is not a restart
1 -> fit is a restart
Notes
-----
The permissible values are different from those given on pg. 31 of the
ODRPACK User's Guide only in that one cannot specify numbers greater than
the last value for each variable.
If one does not supply functions to compute the Jacobians, the fitting
procedure will change deriv to 0, finite differences, as a default. To
initialize the input variable offsets by yourself, set del_init to 1 and
put the offsets into the "work" variable correctly.
"""
if self.job is None:
job_l = [0, 0, 0, 0, 0]
else:
job_l = [self.job // 10000 % 10,
self.job // 1000 % 10,
self.job // 100 % 10,
self.job // 10 % 10,
self.job % 10]
if fit_type in (0, 1, 2):
job_l[4] = fit_type
if deriv in (0, 1, 2, 3):
job_l[3] = deriv
if var_calc in (0, 1, 2):
job_l[2] = var_calc
if del_init in (0, 1):
job_l[1] = del_init
if restart in (0, 1):
job_l[0] = restart
self.job = (job_l[0]*10000 + job_l[1]*1000 +
job_l[2]*100 + job_l[3]*10 + job_l[4])
def set_iprint(self, init=None, so_init=None,
iter=None, so_iter=None, iter_step=None, final=None, so_final=None):
""" Set the iprint parameter for the printing of computation reports.
If any of the arguments are specified here, then they are set in the
iprint member. If iprint is not set manually or with this method, then
ODRPACK defaults to no printing. If no filename is specified with the
member rptfile, then ODRPACK prints to stdout. One can tell ODRPACK to
print to stdout in addition to the specified filename by setting the
so_* arguments to this function, but one cannot specify to print to
stdout but not a file since one can do that by not specifying a rptfile
filename.
There are three reports: initialization, iteration, and final reports.
They are represented by the arguments init, iter, and final
respectively. The permissible values are 0, 1, and 2 representing "no
report", "short report", and "long report" respectively.
The argument iter_step (0 <= iter_step <= 9) specifies how often to make
the iteration report; the report will be made for every iter_step'th
iteration starting with iteration one. If iter_step == 0, then no
iteration report is made, regardless of the other arguments.
If the rptfile is None, then any so_* arguments supplied will raise an
exception.
"""
if self.iprint is None:
self.iprint = 0
ip = [self.iprint // 1000 % 10,
self.iprint // 100 % 10,
self.iprint // 10 % 10,
self.iprint % 10]
# make a list to convert iprint digits to/from argument inputs
# rptfile, stdout
ip2arg = [[0, 0], # none, none
[1, 0], # short, none
[2, 0], # long, none
[1, 1], # short, short
[2, 1], # long, short
[1, 2], # short, long
[2, 2]] # long, long
if (self.rptfile is None and
(so_init is not None or
so_iter is not None or
so_final is not None)):
raise OdrError(
"no rptfile specified, cannot output to stdout twice")
iprint_l = ip2arg[ip[0]] + ip2arg[ip[1]] + ip2arg[ip[3]]
if init is not None:
iprint_l[0] = init
if so_init is not None:
iprint_l[1] = so_init
if iter is not None:
iprint_l[2] = iter
if so_iter is not None:
iprint_l[3] = so_iter
if final is not None:
iprint_l[4] = final
if so_final is not None:
iprint_l[5] = so_final
if iter_step in range(10):
# 0..9
ip[2] = iter_step
ip[0] = ip2arg.index(iprint_l[0:2])
ip[1] = ip2arg.index(iprint_l[2:4])
ip[3] = ip2arg.index(iprint_l[4:6])
self.iprint = ip[0]*1000 + ip[1]*100 + ip[2]*10 + ip[3]
def run(self):
""" Run the fitting routine with all of the information given and with ``full_output=1``.
Returns
-------
output : Output instance
This object is also assigned to the attribute .output .
""" # noqa: E501
args = (self.model.fcn, self.beta0, self.data.y, self.data.x)
kwds = {'full_output': 1}
kwd_l = ['ifixx', 'ifixb', 'job', 'iprint', 'errfile', 'rptfile',
'ndigit', 'taufac', 'sstol', 'partol', 'maxit', 'stpb',
'stpd', 'sclb', 'scld', 'work', 'iwork']
if self.delta0 is not None and (self.job // 10000) % 10 == 0:
# delta0 provided and fit is not a restart
self._gen_work()
d0 = np.ravel(self.delta0)
self.work[:len(d0)] = d0
# set the kwds from other objects explicitly
if self.model.fjacb is not None:
kwds['fjacb'] = self.model.fjacb
if self.model.fjacd is not None:
kwds['fjacd'] = self.model.fjacd
if self.data.we is not None:
kwds['we'] = self.data.we
if self.data.wd is not None:
kwds['wd'] = self.data.wd
if self.model.extra_args is not None:
kwds['extra_args'] = self.model.extra_args
# implicitly set kwds from self's members
for attr in kwd_l:
obj = getattr(self, attr)
if obj is not None:
kwds[attr] = obj
with ODR_LOCK:
self.output = Output(odr(*args, **kwds))
return self.output
def restart(self, iter=None):
""" Restarts the run with iter more iterations.
Parameters
----------
iter : int, optional
ODRPACK's default for the number of new iterations is 10.
Returns
-------
output : Output instance
This object is also assigned to the attribute .output .
"""
if self.output is None:
raise OdrError("cannot restart: run() has not been called before")
self.set_job(restart=1)
self.work = self.output.work
self.iwork = self.output.iwork
self.maxit = iter
return self.run()
| ODR |
python | apache__airflow | airflow-core/tests/unit/models/test_taskinstance.py | {
"start": 114641,
"end": 117716
} | class ____:
"""Test TI.xcom_push() correctly records return values for task-mapping."""
def setup_class(self):
"""Ensure we start fresh."""
with create_session() as session:
session.query(TaskMap).delete()
@pytest.mark.parametrize("xcom_value", [[1, 2, 3], {"a": 1, "b": 2}, "abc"])
def test_not_recorded_if_leaf(self, dag_maker, xcom_value):
"""Return value should not be recorded if there are no downstreams."""
with dag_maker(dag_id="test_not_recorded_for_unused") as dag:
@dag.task()
def push_something():
return xcom_value
push_something()
ti = next(ti for ti in dag_maker.create_dagrun().task_instances if ti.task_id == "push_something")
ti.run()
assert dag_maker.session.query(TaskMap).count() == 0
@pytest.mark.parametrize("xcom_value", [[1, 2, 3], {"a": 1, "b": 2}, "abc"])
def test_not_recorded_if_not_used(self, dag_maker, xcom_value):
"""Return value should not be recorded if no downstreams are mapped."""
with dag_maker(dag_id="test_not_recorded_for_unused") as dag:
@dag.task()
def push_something():
return xcom_value
@dag.task()
def completely_different():
pass
push_something() >> completely_different()
ti = next(ti for ti in dag_maker.create_dagrun().task_instances if ti.task_id == "push_something")
ti.run()
assert dag_maker.session.query(TaskMap).count() == 0
@pytest.mark.parametrize("xcom_1", [[1, 2, 3], {"a": 1, "b": 2}, "abc"])
@pytest.mark.parametrize("xcom_4", [[1, 2, 3], {"a": 1, "b": 2}])
def test_not_recorded_if_irrelevant(self, dag_maker, xcom_1, xcom_4):
"""Return value should only be recorded if a mapped downstream uses the it."""
with dag_maker(dag_id="test_not_recorded_for_unused") as dag:
@dag.task()
def push_1():
return xcom_1
@dag.task()
def push_2():
return [-1, -2]
@dag.task()
def push_3():
return ["x", "y"]
@dag.task()
def push_4():
return xcom_4
@dag.task()
def show(arg1, arg2):
print(arg1, arg2)
@task_group()
def tg(arg):
show(arg1=task_3, arg2=arg)
task_3 = push_3()
show.partial(arg1=push_1()).expand(arg2=push_2())
tg.expand(arg=push_4())
tis = {ti.task_id: ti for ti in dag_maker.create_dagrun().task_instances}
tis["push_1"].run()
assert dag_maker.session.query(TaskMap).count() == 0
tis["push_2"].run()
assert dag_maker.session.query(TaskMap).count() == 1
tis["push_3"].run()
assert dag_maker.session.query(TaskMap).count() == 1
tis["push_4"].run()
assert dag_maker.session.query(TaskMap).count() == 2
| TestTaskInstanceRecordTaskMapXComPush |
python | google__flatbuffers | tests/monster_test_generated.py | {
"start": 2252,
"end": 3170
} | class ____(object):
__slots__ = ['_tab']
@classmethod
def GetRootAs(cls, buf, offset=0):
n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset)
x = InParentNamespace()
x.Init(buf, n + offset)
return x
@classmethod
def GetRootAsInParentNamespace(cls, buf, offset=0):
"""This method is deprecated. Please switch to GetRootAs."""
return cls.GetRootAs(buf, offset)
@classmethod
def InParentNamespaceBufferHasIdentifier(cls, buf, offset, size_prefixed=False):
return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x4D\x4F\x4E\x53", size_prefixed=size_prefixed)
# InParentNamespace
def Init(self, buf, pos):
self._tab = flatbuffers.table.Table(buf, pos)
def InParentNamespaceStart(builder):
builder.StartObject(0)
def InParentNamespaceEnd(builder):
return builder.EndObject()
| InParentNamespace |
python | zarr-developers__zarr-python | src/zarr/errors.py | {
"start": 3502,
"end": 3555
} | class ____(IndexError): ...
| VindexInvalidSelectionError |
python | crytic__slither | slither/solc_parsing/declarations/modifier.py | {
"start": 614,
"end": 3897
} | class ____(FunctionSolc):
def __init__(
self,
modifier: Modifier,
function_data: Dict,
contract_parser: "ContractSolc",
slither_parser: "SlitherCompilationUnitSolc",
) -> None:
super().__init__(modifier, function_data, contract_parser, slither_parser)
# _modifier is equal to _function, but keep it here to prevent
# confusion for mypy in underlying_function
self._modifier = modifier
@property
def underlying_function(self) -> Modifier:
return self._modifier
def analyze_params(self) -> None:
# Can be re-analyzed due to inheritance
if self._params_was_analyzed:
return
self._params_was_analyzed = True
self._analyze_attributes()
if self.is_compact_ast:
params = self._functionNotParsed["parameters"]
else:
children = self._functionNotParsed["children"]
# It uses to be
# params = children[0]
# But from Solidity 0.6.3 to 0.6.10 (included)
# Comment above a function might be added in the children
params = next(child for child in children if child[self.get_key()] == "ParameterList")
if params:
self._parse_params(params)
def analyze_content(self) -> None:
if self._content_was_analyzed:
return
self._content_was_analyzed = True
if self.is_compact_ast:
body = self._functionNotParsed.get("body", None)
if body and body[self.get_key()] == "Block":
self._function.is_implemented = True
self._parse_cfg(body)
else:
children = self._functionNotParsed["children"]
self._function.is_implemented = False
if len(children) > 1:
# It uses to be
# params = children[1]
# But from Solidity 0.6.3 to 0.6.10 (included)
# Comment above a function might be added in the children
block = next(child for child in children if child[self.get_key()] == "Block")
self._function.is_implemented = True
self._parse_cfg(block)
for local_var_parser in self._local_variables_parser:
local_var_parser.analyze(self)
for node in self._node_to_nodesolc.values():
node.analyze_expressions(self)
for yul_parser in self._node_to_yulobject.values():
yul_parser.analyze_expressions()
self._rewrite_ternary_as_if_else()
self._remove_alone_endif()
if self._function.entry_point:
self._update_reachability(self._function.entry_point)
# self._analyze_read_write()
# self._analyze_calls()
def _parse_statement(
self, statement: Dict, node: NodeSolc, scope: Union[Scope, "Function"]
) -> NodeSolc:
name = statement[self.get_key()]
if name == "PlaceholderStatement":
placeholder_node = self._new_node(NodeType.PLACEHOLDER, statement["src"], scope)
link_nodes(node.underlying_node, placeholder_node.underlying_node)
return placeholder_node
return super()._parse_statement(statement, node, scope)
| ModifierSolc |
python | pytorch__pytorch | torch/_inductor/codecache.py | {
"start": 36942,
"end": 41956
} | class ____(Generic[T]):
"""
Mixin for caches that have guards associated with their entries.
"""
@classmethod
def _get_tmp_dir_for_key(cls: type[GuardedCache[T]], _key: str) -> str:
raise NotImplementedError("Implement _get_tmp_dir_for_key on parent class")
@classmethod
def iterate_over_candidates(
cls: type[GuardedCache[T]],
local: bool,
remote_cache: RemoteCache[JsonDataTy] | None,
key: str,
) -> Generator[tuple[T, bytes], None, None]:
if local:
subdir = cls._get_tmp_dir_for_key(key)
if os.path.exists(subdir):
for path in sorted(os.listdir(subdir)):
try:
with open(os.path.join(subdir, path), "rb") as f:
content = f.read()
yield pickle.loads(content), content
except Exception:
log.warning(
"fx graph cache unable to load compiled graph",
exc_info=True,
)
if remote_cache:
try:
if (cache_data := remote_cache.get(key)) is not None:
assert isinstance(cache_data, dict)
data = cache_data["data"]
assert isinstance(data, (str, bytes))
content = base64.b64decode(data)
yield pickle.loads(content), content
except Exception:
log.warning(
"%s unable to load compiled graph", cls.__name__, exc_info=True
)
@classmethod
def find_guarded_entry(
cls: type[GuardedCache[T]],
key: str,
local: bool,
remote_cache: RemoteCache[JsonDataTy] | None,
evaluate_guards: Callable[[str, list[int] | list[torch.SymInt]], bool],
hints: list[int],
) -> tuple[T | None, bytes | None, dict[str, str]]:
"""
Find the first cache entry in iterate_over_candidates that passes `evaluate_guards`.
Args:
key: The cache key to look up
local: Whether to check the local cache
remote_cache: The remote cache to check, if any
evaluate_guards: Function that evaluates whether a guard passes the check,
given a list of hint values and the guard expression.
hints: List of symint hints paired with evaluate_guards
Returns:
A tuple of (graph, pickled_content) if found, or (None, None) if not found
"""
graph = None
pickled_content = None
result_status = "full_miss"
sample_guards_expr = None
# Iterate over any entries in the subdir for this key and evaluate
# guards to determine whether there's a hit.
for candidate, content in cls.iterate_over_candidates(local, remote_cache, key):
assert hasattr(candidate, "guards_expr")
if not candidate.guards_expr: # type: ignore[attr-defined]
# No guards to evaluate, so this is a hit.
graph = candidate
pickled_content = content
result_status = "hit"
break
# Evaluate the guard expression in the current context.
# If there's not a cache hit, we don't want the evaluation to
# affect the current env, e.g., cause the creation of new guards,
# so we evaluate with the hints instead of the symbols.
hit = bool(evaluate_guards(candidate.guards_expr, hints)) # type: ignore[attr-defined]
if hit:
graph = candidate
pickled_content = content
result_status = "hit"
sample_guards_expr = candidate.guards_expr
break
else:
# At least one guard missed, log this
result_status = "guard_miss"
sample_guards_expr = candidate.guards_expr
info = {"cache_status_detailed": result_status}
if sample_guards_expr is not None:
info["cache_status_guard_expr"] = sample_guards_expr
return graph, pickled_content, info
@classmethod
def _filter_backed_symints(
cls: type[GuardedCache[T]], inputs: Sequence[InputType]
) -> list[torch.SymInt]:
"""
Get the backed SymInt objects from the input list. Note that we can never
have guards that depend on unbacked symint.
"""
return [s for s in inputs if isinstance(s, torch.SymInt) and has_hint(s)]
@classmethod
def _get_shape_env(cls: type[GuardedCache[T]]) -> ShapeEnv | None:
"""
Helper to get the shape env from the tracing context.
"""
ctx = torch._guards.TracingContext.try_get()
if not ctx or not ctx.fake_mode:
return None
return ctx.fake_mode.shape_env
@CacheArtifactFactory.register
| GuardedCache |
python | encode__httpx | httpx/_content.py | {
"start": 1681,
"end": 2543
} | class ____(AsyncByteStream):
CHUNK_SIZE = 65_536
def __init__(self, stream: AsyncIterable[bytes]) -> None:
self._stream = stream
self._is_stream_consumed = False
self._is_generator = inspect.isasyncgen(stream)
async def __aiter__(self) -> AsyncIterator[bytes]:
if self._is_stream_consumed and self._is_generator:
raise StreamConsumed()
self._is_stream_consumed = True
if hasattr(self._stream, "aread"):
# File-like interfaces should use 'aread' directly.
chunk = await self._stream.aread(self.CHUNK_SIZE)
while chunk:
yield chunk
chunk = await self._stream.aread(self.CHUNK_SIZE)
else:
# Otherwise iterate.
async for part in self._stream:
yield part
| AsyncIteratorByteStream |
python | apache__airflow | providers/microsoft/azure/src/airflow/providers/microsoft/azure/hooks/cosmos.py | {
"start": 1901,
"end": 17486
} | class ____(BaseHook):
"""
Interact with Azure CosmosDB.
login should be the endpoint uri, password should be the master key
optionally, you can use the following extras to default these values
{"database_name": "<DATABASE_NAME>", "collection_name": "COLLECTION_NAME"}.
:param azure_cosmos_conn_id: Reference to the
:ref:`Azure CosmosDB connection<howto/connection:azure_cosmos>`.
"""
conn_name_attr = "azure_cosmos_conn_id"
default_conn_name = "azure_cosmos_default"
conn_type = "azure_cosmos"
hook_name = "Azure CosmosDB"
@classmethod
@add_managed_identity_connection_widgets
def get_connection_form_widgets(cls) -> dict[str, Any]:
"""Return connection widgets to add to connection form."""
from flask_appbuilder.fieldwidgets import BS3TextFieldWidget
from flask_babel import lazy_gettext
from wtforms import StringField
return {
"database_name": StringField(
lazy_gettext("Cosmos Database Name (optional)"), widget=BS3TextFieldWidget()
),
"collection_name": StringField(
lazy_gettext("Cosmos Collection Name (optional)"), widget=BS3TextFieldWidget()
),
"subscription_id": StringField(
lazy_gettext("Subscription ID (optional)"),
widget=BS3TextFieldWidget(),
),
"resource_group_name": StringField(
lazy_gettext("Resource Group Name (optional)"),
widget=BS3TextFieldWidget(),
),
}
@classmethod
def get_ui_field_behaviour(cls) -> dict[str, Any]:
"""Return custom field behaviour."""
return {
"hidden_fields": ["schema", "port", "host", "extra"],
"relabeling": {
"login": "Cosmos Endpoint URI",
"password": "Cosmos Master Key Token",
},
"placeholders": {
"login": "endpoint uri",
"password": "master key (not needed for Azure AD authentication)",
"database_name": "database name",
"collection_name": "collection name",
"subscription_id": "Subscription ID (required for Azure AD authentication)",
"resource_group_name": "Resource Group Name (required for Azure AD authentication)",
},
}
def __init__(self, azure_cosmos_conn_id: str = default_conn_name) -> None:
super().__init__()
self.conn_id = azure_cosmos_conn_id
self._conn: CosmosClient | None = None
self.default_database_name = None
self.default_collection_name = None
self.default_partition_key = None
def _get_field(self, extras, name):
return get_field(
conn_id=self.conn_id,
conn_type=self.conn_type,
extras=extras,
field_name=name,
)
def get_conn(self) -> CosmosClient:
"""Return a cosmos db client."""
if not self._conn:
conn = self.get_connection(self.conn_id)
extras = conn.extra_dejson
endpoint_uri = conn.login
endpoint_uri = cast("str", endpoint_uri)
resource_group_name = self._get_field(extras, "resource_group_name")
if conn.password:
master_key = conn.password
elif resource_group_name:
managed_identity_client_id = self._get_field(extras, "managed_identity_client_id")
workload_identity_tenant_id = self._get_field(extras, "workload_identity_tenant_id")
subscritption_id = self._get_field(extras, "subscription_id")
credential = get_sync_default_azure_credential(
managed_identity_client_id=managed_identity_client_id,
workload_identity_tenant_id=workload_identity_tenant_id,
)
management_client = CosmosDBManagementClient(
credential=credential,
subscription_id=subscritption_id,
)
conn.login = cast("str", conn.login)
database_account = urlparse(conn.login).netloc.split(".")[0]
database_account_keys = management_client.database_accounts.list_keys(
resource_group_name, database_account
)
master_key = cast("str", database_account_keys.primary_master_key)
else:
raise AirflowException("Either password or resource_group_name is required")
self.default_database_name = self._get_field(extras, "database_name")
self.default_collection_name = self._get_field(extras, "collection_name")
self.default_partition_key = self._get_field(extras, "partition_key")
# Initialize the Python Azure Cosmos DB client
self._conn = CosmosClient(endpoint_uri, {"masterKey": master_key})
return self._conn
def __get_database_name(self, database_name: str | None = None) -> str:
self.get_conn()
db_name = database_name
if db_name is None:
db_name = self.default_database_name
if db_name is None:
raise AirflowBadRequest("Database name must be specified")
return db_name
def __get_collection_name(self, collection_name: str | None = None) -> str:
self.get_conn()
coll_name = collection_name
if coll_name is None:
coll_name = self.default_collection_name
if coll_name is None:
raise AirflowBadRequest("Collection name must be specified")
return coll_name
def __get_partition_key(self, partition_key: PartitionKeyType | None = None) -> PartitionKeyType:
self.get_conn()
if partition_key is None:
part_key = self.default_partition_key
else:
part_key = partition_key
if part_key is None:
raise AirflowBadRequest("Partition key must be specified")
return part_key
def does_collection_exist(self, collection_name: str, database_name: str) -> bool:
"""Check if a collection exists in CosmosDB."""
if collection_name is None:
raise AirflowBadRequest("Collection name cannot be None.")
# The ignores below is due to typing bug in azure-cosmos 9.2.0
# https://github.com/Azure/azure-sdk-for-python/issues/31811
existing_container = list(
self.get_conn()
.get_database_client(self.__get_database_name(database_name))
.query_containers(
"SELECT * FROM r WHERE r.id=@id",
parameters=[{"name": "@id", "value": collection_name}],
)
)
if not existing_container:
return False
return True
def create_collection(
self,
collection_name: str,
database_name: str | None = None,
partition_key: PartitionKeyType | None = None,
) -> None:
"""Create a new collection in the CosmosDB database."""
if collection_name is None:
raise AirflowBadRequest("Collection name cannot be None.")
# We need to check to see if this container already exists so we don't try
# to create it twice
# The ignores below is due to typing bug in azure-cosmos 9.2.0
# https://github.com/Azure/azure-sdk-for-python/issues/31811
existing_container = list(
self.get_conn()
.get_database_client(self.__get_database_name(database_name))
.query_containers(
"SELECT * FROM r WHERE r.id=@id",
parameters=[{"name": "@id", "value": collection_name}],
)
)
# Only create if we did not find it already existing
if not existing_container:
self.get_conn().get_database_client(self.__get_database_name(database_name)).create_container(
collection_name,
partition_key=PartitionKey(path=self.__get_partition_key(partition_key)),
)
def does_database_exist(self, database_name: str) -> bool:
"""Check if a database exists in CosmosDB."""
if database_name is None:
raise AirflowBadRequest("Database name cannot be None.")
# The ignores below is due to typing bug in azure-cosmos 9.2.0
# https://github.com/Azure/azure-sdk-for-python/issues/31811
existing_database = list(
self.get_conn().query_databases(
"SELECT * FROM r WHERE r.id=@id",
parameters=[{"name": "@id", "value": database_name}],
)
)
if not existing_database:
return False
return True
def create_database(self, database_name: str) -> None:
"""Create a new database in CosmosDB."""
if database_name is None:
raise AirflowBadRequest("Database name cannot be None.")
# We need to check to see if this database already exists so we don't try
# to create it twice
# The ignores below is due to typing bug in azure-cosmos 9.2.0
# https://github.com/Azure/azure-sdk-for-python/issues/31811
existing_database = list(
self.get_conn().query_databases(
"SELECT * FROM r WHERE r.id=@id",
parameters=[{"name": "@id", "value": database_name}],
)
)
# Only create if we did not find it already existing
if not existing_database:
self.get_conn().create_database(database_name)
def delete_database(self, database_name: str) -> None:
"""Delete an existing database in CosmosDB."""
if database_name is None:
raise AirflowBadRequest("Database name cannot be None.")
self.get_conn().delete_database(database_name)
def delete_collection(self, collection_name: str, database_name: str | None = None) -> None:
"""Delete an existing collection in the CosmosDB database."""
if collection_name is None:
raise AirflowBadRequest("Collection name cannot be None.")
self.get_conn().get_database_client(self.__get_database_name(database_name)).delete_container(
collection_name
)
def upsert_document(self, document, database_name=None, collection_name=None, document_id=None):
"""Insert or update a document into an existing collection in the CosmosDB database."""
# Assign unique ID if one isn't provided
if document_id is None:
document_id = str(uuid.uuid4())
if document is None:
raise AirflowBadRequest("You cannot insert a None document")
# Add document id if isn't found
if document.get("id") is None:
document["id"] = document_id
created_document = (
self.get_conn()
.get_database_client(self.__get_database_name(database_name))
.get_container_client(self.__get_collection_name(collection_name))
.upsert_item(document)
)
return created_document
def insert_documents(
self, documents, database_name: str | None = None, collection_name: str | None = None
) -> list:
"""Insert a list of new documents into an existing collection in the CosmosDB database."""
if documents is None:
raise AirflowBadRequest("You cannot insert empty documents")
created_documents = []
for single_document in documents:
created_documents.append(
self.get_conn()
.get_database_client(self.__get_database_name(database_name))
.get_container_client(self.__get_collection_name(collection_name))
.create_item(single_document)
)
return created_documents
def delete_document(
self,
document_id: str,
database_name: str | None = None,
collection_name: str | None = None,
partition_key: PartitionKeyType | None = None,
) -> None:
"""Delete an existing document out of a collection in the CosmosDB database."""
if document_id is None:
raise AirflowBadRequest("Cannot delete a document without an id")
(
self.get_conn()
.get_database_client(self.__get_database_name(database_name))
.get_container_client(self.__get_collection_name(collection_name))
.delete_item(document_id, partition_key=self.__get_partition_key(partition_key))
)
def get_document(
self,
document_id: str,
database_name: str | None = None,
collection_name: str | None = None,
partition_key: PartitionKeyType | None = None,
):
"""Get a document from an existing collection in the CosmosDB database."""
if document_id is None:
raise AirflowBadRequest("Cannot get a document without an id")
try:
return (
self.get_conn()
.get_database_client(self.__get_database_name(database_name))
.get_container_client(self.__get_collection_name(collection_name))
.read_item(document_id, partition_key=self.__get_partition_key(partition_key))
)
except CosmosHttpResponseError:
return None
def get_documents(
self,
sql_string: str,
database_name: str | None = None,
collection_name: str | None = None,
partition_key: PartitionKeyType | None = None,
) -> list | None:
"""Get a list of documents from an existing collection in the CosmosDB database via SQL query."""
if sql_string is None:
raise AirflowBadRequest("SQL query string cannot be None")
try:
result_iterable = (
self.get_conn()
.get_database_client(self.__get_database_name(database_name))
.get_container_client(self.__get_collection_name(collection_name))
.query_items(sql_string, partition_key=self.__get_partition_key(partition_key))
)
return list(result_iterable)
except CosmosHttpResponseError:
return None
def test_connection(self):
"""Test a configured Azure Cosmos connection."""
try:
# Attempt to list existing databases under the configured subscription and retrieve the first in
# the returned iterator. The Azure Cosmos API does allow for creation of a
# CosmosClient with incorrect values but then will fail properly once items are
# retrieved using the client. We need to _actually_ try to retrieve an object to properly test the
# connection.
next(iter(self.get_conn().list_databases()), None)
except Exception as e:
return False, str(e)
return True, "Successfully connected to Azure Cosmos."
def get_database_link(database_id: str) -> str:
"""Get Azure CosmosDB database link."""
return "dbs/" + database_id
def get_collection_link(database_id: str, collection_id: str) -> str:
"""Get Azure CosmosDB collection link."""
return get_database_link(database_id) + "/colls/" + collection_id
def get_document_link(database_id: str, collection_id: str, document_id: str) -> str:
"""Get Azure CosmosDB document link."""
return get_collection_link(database_id, collection_id) + "/docs/" + document_id
| AzureCosmosDBHook |
python | google__flatbuffers | python/flatbuffers/number_types.py | {
"start": 1259,
"end": 1406
} | class ____(object):
bytewidth = 2
min_val = 0
max_val = (2**16) - 1
py_type = int
name = "uint16"
packer_type = packer.uint16
| Uint16Flags |
python | bokeh__bokeh | src/bokeh/models/renderers/glyph_renderer.py | {
"start": 2075,
"end": 8942
} | class ____(DataRenderer):
'''
'''
# explicit __init__ to support Init signatures
def __init__(self, *args: Any, **kwargs: Any) -> None:
super().__init__(*args, **kwargs)
@error(CDSVIEW_FILTERS_WITH_CONNECTED)
def _check_cdsview_filters_with_connected(self):
if isinstance(self.glyph, ConnectedXYGlyph) and not isinstance(self.view.filter, AllIndices):
return str(self)
@error(BAD_COLUMN_NAME)
def _check_bad_column_name(self):
source = self.data_source
if not isinstance(source, ColumnDataSource) or isinstance(source, WebDataSource):
return
colnames = source.column_names
props = self.glyph.properties_with_values(include_defaults=False)
specs = self.glyph.dataspecs().keys() & props.keys()
missing = []
for spec in sorted(specs):
if isinstance(props[spec], Field) and (field := props[spec].field) not in colnames:
if close := get_close_matches(field, colnames, n=1):
missing.append(f"{spec}={field!r} [closest match: {close[0]!r}]")
else:
missing.append(f"{spec}={field!r} [no close matches]")
if missing:
return f"{', '.join(missing)} {{renderer: {self}}}"
data_source = Required(Instance(DataSource), help="""
Local data source to use when rendering glyphs on the plot.
""")
view = Instance(CDSView, default=InstanceDefault(CDSView), help="""
A view into the data source to use when rendering glyphs. A default view
of the entire data source is created when a view is not passed in during
initialization.
.. note:
Only the default (filterless) CDSView is compatible with glyphs that
have connected topology, such as Line and Patch. Setting filters on
views for these glyphs will result in a warning and undefined behavior.
""")
glyph = Required(Instance(Glyph), help="""
The glyph to render, in conjunction with the supplied data source
and ranges.
""")
selection_glyph = Nullable(Either(Auto, Instance(Glyph)), default="auto", help="""
An optional glyph used for selected points.
If set to "auto" then the standard glyph will be used for selected
points.
""")
nonselection_glyph = Nullable(Either(Auto, Instance(Glyph)), default="auto", help="""
An optional glyph used for explicitly non-selected points
(i.e., non-selected when there are other points that are selected,
but not when no points at all are selected.)
If set to "auto" then a glyph with a low alpha value (0.1) will
be used for non-selected points.
""")
hover_glyph = Nullable(Instance(Glyph), help="""
An optional glyph used for inspected points, e.g., those that are
being hovered over by a ``HoverTool``.
""")
muted_glyph = Nullable(Either(Auto, Instance(Glyph)), default="auto", help="""
An optional glyph that replaces the primary glyph when ``muted`` is set. If
set to ``"auto"``, it will create a new glyph based off the primary glyph
with predefined visual properties.
""")
muted = Bool(default=False, help="""
Defines whether this glyph renderer is muted or not. Muted renderer will use
the muted glyph instead of the primary glyph for rendering. Usually renderers
are muted by the user through an UI action, e.g. by clicking a legend item, if
a legend was configured with ``click_policy = "mute"``.
""")
def add_decoration(self, marking: Marking, node: Literal["start", "middle", "end"]) -> Decoration:
glyphs = [self.glyph, self.selection_glyph, self.nonselection_glyph, self.hover_glyph, self.muted_glyph]
decoration = Decoration(marking=marking, node=node)
for glyph in glyphs:
if isinstance(glyph, Glyph):
glyph.decorations.append(decoration)
return decoration
def construct_color_bar(self, **kwargs: Any) -> ColorBar:
''' Construct and return a new ``ColorBar`` for this ``GlyphRenderer``.
The function will check for a color mapper on an appropriate property
of the GlyphRenderer's main glyph, in this order:
* ``fill_color.transform`` for FillGlyph
* ``line_color.transform`` for LineGlyph
* ``text_color.transform`` for TextGlyph
* ``color_mapper`` for Image
In general, the function will "do the right thing" based on glyph type.
If different behavior is needed, ColorBars can be constructed by hand.
Extra keyword arguments may be passed in to control ``ColorBar``
properties such as `title`.
Returns:
ColorBar
'''
from ...core.property.vectorization import Field
from ..annotations import ColorBar
from ..glyph import FillGlyph, LineGlyph, TextGlyph
from ..glyphs import Image, ImageStack
from ..mappers import ColorMapper
if isinstance(self.glyph, FillGlyph):
fill_color = self.glyph.fill_color
if not (isinstance(fill_color, Field) and isinstance(fill_color.transform, ColorMapper)):
raise ValueError("expected fill_color to be a field with a ColorMapper transform")
return ColorBar(color_mapper=fill_color.transform, **kwargs)
elif isinstance(self.glyph, LineGlyph):
line_color = self.glyph.line_color
if not (isinstance(line_color, Field) and isinstance(line_color.transform, ColorMapper)):
raise ValueError("expected line_color to be a field with a ColorMapper transform")
return ColorBar(color_mapper=line_color.transform, **kwargs)
elif isinstance(self.glyph, TextGlyph):
text_color = self.glyph.text_color
if not (isinstance(text_color, Field) and isinstance(text_color.transform, ColorMapper)):
raise ValueError("expected text_color to be a field with a ColorMapper transform")
return ColorBar(color_mapper=text_color.transform, **kwargs)
elif isinstance(self.glyph, (Image, ImageStack)):
return ColorBar(color_mapper=self.glyph.color_mapper, **kwargs)
else:
raise ValueError(f"construct_color_bar does not handle glyph type {type(self.glyph).__name__}")
#-----------------------------------------------------------------------------
# Dev API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Private API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Code
#-----------------------------------------------------------------------------
| GlyphRenderer |
python | scipy__scipy | scipy/_build_utils/tempita/_looper.py | {
"start": 1338,
"end": 4011
} | class ____:
def __init__(self, seq, pos):
self.seq = seq
self.pos = pos
def __repr__(self):
return '<loop pos=%r at %r>' % (
self.seq[self.pos], self.pos)
def index(self):
return self.pos
index = property(index)
def number(self):
return self.pos + 1
number = property(number)
def item(self):
return self.seq[self.pos]
item = property(item)
def __next__(self):
try:
return self.seq[self.pos + 1]
except IndexError:
return None
__next__ = property(__next__)
def previous(self):
if self.pos == 0:
return None
return self.seq[self.pos - 1]
previous = property(previous)
def odd(self):
return not self.pos % 2
odd = property(odd)
def even(self):
return self.pos % 2
even = property(even)
def first(self):
return self.pos == 0
first = property(first)
def last(self):
return self.pos == len(self.seq) - 1
last = property(last)
def length(self):
return len(self.seq)
length = property(length)
def first_group(self, getter=None):
"""
Returns true if this item is the start of a new group,
where groups mean that some attribute has changed. The getter
can be None (the item itself changes), an attribute name like
``'.attr'``, a function, or a dict key or list index.
"""
if self.first:
return True
return self._compare_group(self.item, self.previous, getter)
def last_group(self, getter=None):
"""
Returns true if this item is the end of a new group,
where groups mean that some attribute has changed. The getter
can be None (the item itself changes), an attribute name like
``'.attr'``, a function, or a dict key or list index.
"""
if self.last:
return True
return self._compare_group(self.item, self.__next__, getter)
def _compare_group(self, item, other, getter):
if getter is None:
return item != other
elif (isinstance(getter, basestring_)
and getter.startswith('.')):
getter = getter[1:]
if getter.endswith('()'):
getter = getter[:-2]
return getattr(item, getter)() != getattr(other, getter)()
else:
return getattr(item, getter) != getattr(other, getter)
elif hasattr(getter, '__call__'):
return getter(item) != getter(other)
else:
return item[getter] != other[getter]
| loop_pos |
python | tensorflow__tensorflow | tensorflow/python/tpu/tpu_hardware_feature.py | {
"start": 892,
"end": 3041
} | class ____(object):
"""class holds all the feature info about the TPU."""
def __init__(self, tpu_hardware_feature_proto):
"""Store TPU hardware feature info.
Args:
tpu_hardware_feature_proto: protobuf which describe the tpu hardware
feature.
"""
self.tpu_hardware_feature_proto = tpu_hardware_feature_proto
class EmbeddingFeature(enum.Enum):
"""Embedding feature flag strings.
UNSUPPORTED: No embedding lookup accelerator available on the tpu.
V1: Embedding lookup accelerator V1. The embedding lookup operation can only
be placed at the beginning of computation. Only one instance of
embedding
lookup layer is allowed.
V2: Embedding lookup accelerator V2. The embedding lookup operation can be
placed anywhere of the computation. Multiple instances of embedding
lookup layer is allowed.
"""
UNSUPPORTED = "UNSUPPORTED"
V1 = "V1"
V2 = "V2"
@classmethod
def _embedding_feature_proto_to_string(cls, embedding_feature_proto):
"""Convert the embedding feature proto to enum string."""
embedding_feature_proto_to_string_map = {
topology_pb2.TPUHardwareFeature.EmbeddingFeature.UNSUPPORTED:
HardwareFeature.EmbeddingFeature.UNSUPPORTED,
topology_pb2.TPUHardwareFeature.EmbeddingFeature.V1:
HardwareFeature.EmbeddingFeature.V1,
topology_pb2.TPUHardwareFeature.EmbeddingFeature.V2:
HardwareFeature.EmbeddingFeature.V2
}
return embedding_feature_proto_to_string_map.get(
embedding_feature_proto, HardwareFeature.EmbeddingFeature.UNSUPPORTED)
@property
def embedding_feature(self):
"""TPU embedding feature.
Returns:
An EmbeddingFeature enum.
"""
return HardwareFeature._embedding_feature_proto_to_string(
self.tpu_hardware_feature_proto.embedding_feature)
@property
def num_embedding_devices_per_chip(self):
"""Number of embedding accelerator devices per chip.
Returns:
Number of embedding devices per chip.
"""
return self.tpu_hardware_feature_proto.num_embedding_devices_per_chip
| HardwareFeature |
python | kubernetes-client__python | kubernetes/client/models/v1_pod_ip.py | {
"start": 383,
"end": 3466
} | class ____(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'ip': 'str'
}
attribute_map = {
'ip': 'ip'
}
def __init__(self, ip=None, local_vars_configuration=None): # noqa: E501
"""V1PodIP - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._ip = None
self.discriminator = None
self.ip = ip
@property
def ip(self):
"""Gets the ip of this V1PodIP. # noqa: E501
IP is the IP address assigned to the pod # noqa: E501
:return: The ip of this V1PodIP. # noqa: E501
:rtype: str
"""
return self._ip
@ip.setter
def ip(self, ip):
"""Sets the ip of this V1PodIP.
IP is the IP address assigned to the pod # noqa: E501
:param ip: The ip of this V1PodIP. # noqa: E501
:type: str
"""
if self.local_vars_configuration.client_side_validation and ip is None: # noqa: E501
raise ValueError("Invalid value for `ip`, must not be `None`") # noqa: E501
self._ip = ip
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, V1PodIP):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, V1PodIP):
return True
return self.to_dict() != other.to_dict()
| V1PodIP |
python | sympy__sympy | sympy/matrices/repmatrix.py | {
"start": 1100,
"end": 18762
} | class ____(MatrixBase):
"""Matrix implementation based on DomainMatrix as an internal representation.
The RepMatrix class is a superclass for Matrix, ImmutableMatrix,
SparseMatrix and ImmutableSparseMatrix which are the main usable matrix
classes in SymPy. Most methods on this class are simply forwarded to
DomainMatrix.
"""
#
# MatrixBase is the common superclass for all of the usable explicit matrix
# classes in SymPy. The idea is that MatrixBase is an abstract class though
# and that subclasses will implement the lower-level methods.
#
# RepMatrix is a subclass of MatrixBase that uses DomainMatrix as an
# internal representation and delegates lower-level methods to
# DomainMatrix. All of SymPy's standard explicit matrix classes subclass
# RepMatrix and so use DomainMatrix internally.
#
# A RepMatrix uses an internal DomainMatrix with the domain set to ZZ, QQ
# or EXRAW. The EXRAW domain is equivalent to the previous implementation
# of Matrix that used Expr for the elements. The ZZ and QQ domains are used
# when applicable just because they are compatible with the previous
# implementation but are much more efficient. Other domains such as QQ[x]
# are not used because they differ from Expr in some way (e.g. automatic
# expansion of powers and products).
#
_rep: DomainMatrix
@classmethod
@abstractmethod
def _fromrep(cls, rep):
raise NotImplementedError("Subclasses must implement this method")
def __eq__(self, other):
# Skip sympify for mutable matrices...
if not isinstance(other, RepMatrix):
try:
other = _sympify(other)
except SympifyError:
return NotImplemented
if not isinstance(other, RepMatrix):
return NotImplemented
return self._rep.unify_eq(other._rep)
def to_DM(self, domain=None, **kwargs):
"""Convert to a :class:`~.DomainMatrix`.
Examples
========
>>> from sympy import Matrix
>>> M = Matrix([[1, 2], [3, 4]])
>>> M.to_DM()
DomainMatrix({0: {0: 1, 1: 2}, 1: {0: 3, 1: 4}}, (2, 2), ZZ)
The :meth:`DomainMatrix.to_Matrix` method can be used to convert back:
>>> M.to_DM().to_Matrix() == M
True
The domain can be given explicitly or otherwise it will be chosen by
:func:`construct_domain`. Any keyword arguments (besides ``domain``)
are passed to :func:`construct_domain`:
>>> from sympy import QQ, symbols
>>> x = symbols('x')
>>> M = Matrix([[x, 1], [1, x]])
>>> M
Matrix([
[x, 1],
[1, x]])
>>> M.to_DM().domain
ZZ[x]
>>> M.to_DM(field=True).domain
ZZ(x)
>>> M.to_DM(domain=QQ[x]).domain
QQ[x]
See Also
========
DomainMatrix
DomainMatrix.to_Matrix
DomainMatrix.convert_to
DomainMatrix.choose_domain
construct_domain
"""
if domain is not None:
if kwargs:
raise TypeError("Options cannot be used with domain parameter")
return self._rep.convert_to(domain)
rep = self._rep
dom = rep.domain
# If the internal DomainMatrix is already ZZ or QQ then we can maybe
# bypass calling construct_domain or performing any conversions. Some
# kwargs might affect this though e.g. field=True (not sure if there
# are others).
if not kwargs:
if dom.is_ZZ:
return rep.copy()
elif dom.is_QQ:
# All elements might be integers
try:
return rep.convert_to(ZZ)
except CoercionFailed:
pass
return rep.copy()
# Let construct_domain choose a domain
rep_dom = rep.choose_domain(**kwargs)
# XXX: There should be an option to construct_domain to choose EXRAW
# instead of EX. At least converting to EX does not initially trigger
# EX.simplify which is what we want here but should probably be
# considered a bug in EX. Perhaps also this could be handled in
# DomainMatrix.choose_domain rather than here...
if rep_dom.domain.is_EX:
rep_dom = rep_dom.convert_to(EXRAW)
return rep_dom
@classmethod
def _unify_element_sympy(cls, rep, element):
domain = rep.domain
element = _sympify(element)
if domain != EXRAW:
# The domain can only be ZZ, QQ or EXRAW
if element.is_Integer:
new_domain = domain
elif element.is_Rational:
new_domain = QQ
else:
new_domain = EXRAW
# XXX: This converts the domain for all elements in the matrix
# which can be slow. This happens e.g. if __setitem__ changes one
# element to something that does not fit in the domain
if new_domain != domain:
rep = rep.convert_to(new_domain)
domain = new_domain
if domain != EXRAW:
element = new_domain.from_sympy(element)
if domain == EXRAW and not isinstance(element, Expr):
sympy_deprecation_warning(
"""
non-Expr objects in a Matrix is deprecated. Matrix represents
a mathematical matrix. To represent a container of non-numeric
entities, Use a list of lists, TableForm, NumPy array, or some
other data structure instead.
""",
deprecated_since_version="1.9",
active_deprecations_target="deprecated-non-expr-in-matrix",
stacklevel=4,
)
return rep, element
@classmethod
def _dod_to_DomainMatrix(cls, rows, cols, dod, types):
if not all(issubclass(typ, Expr) for typ in types):
sympy_deprecation_warning(
"""
non-Expr objects in a Matrix is deprecated. Matrix represents
a mathematical matrix. To represent a container of non-numeric
entities, Use a list of lists, TableForm, NumPy array, or some
other data structure instead.
""",
deprecated_since_version="1.9",
active_deprecations_target="deprecated-non-expr-in-matrix",
stacklevel=6,
)
rep = DomainMatrix(dod, (rows, cols), EXRAW)
if all(issubclass(typ, Rational) for typ in types):
if all(issubclass(typ, Integer) for typ in types):
rep = rep.convert_to(ZZ)
else:
rep = rep.convert_to(QQ)
return rep
@classmethod
def _flat_list_to_DomainMatrix(cls, rows, cols, flat_list):
elements_dod = defaultdict(dict)
for n, element in enumerate(flat_list):
if element != 0:
i, j = divmod(n, cols)
elements_dod[i][j] = element
types = set(map(type, flat_list))
rep = cls._dod_to_DomainMatrix(rows, cols, elements_dod, types)
return rep
@classmethod
def _smat_to_DomainMatrix(cls, rows, cols, smat):
elements_dod = defaultdict(dict)
for (i, j), element in smat.items():
if element != 0:
elements_dod[i][j] = element
types = set(map(type, smat.values()))
rep = cls._dod_to_DomainMatrix(rows, cols, elements_dod, types)
return rep
def flat(self):
return self._rep.to_sympy().to_list_flat()
def _eval_tolist(self):
return self._rep.to_sympy().to_list()
def _eval_todok(self):
return self._rep.to_sympy().to_dok()
@classmethod
def _eval_from_dok(cls, rows, cols, dok):
return cls._fromrep(cls._smat_to_DomainMatrix(rows, cols, dok))
def _eval_values(self):
return list(self._eval_iter_values())
def _eval_iter_values(self):
rep = self._rep
K = rep.domain
values = rep.iter_values()
if not K.is_EXRAW:
values = map(K.to_sympy, values)
return values
def _eval_iter_items(self):
rep = self._rep
K = rep.domain
to_sympy = K.to_sympy
items = rep.iter_items()
if not K.is_EXRAW:
items = ((i, to_sympy(v)) for i, v in items)
return items
def copy(self):
return self._fromrep(self._rep.copy())
@property
def kind(self) -> MatrixKind:
domain = self._rep.domain
element_kind: Kind
if domain in (ZZ, QQ):
element_kind = NumberKind
elif domain == EXRAW:
kinds = {e.kind for e in self.values()}
if len(kinds) == 1:
[element_kind] = kinds
else:
element_kind = UndefinedKind
else: # pragma: no cover
raise RuntimeError("Domain should only be ZZ, QQ or EXRAW")
return MatrixKind(element_kind)
def _eval_has(self, *patterns):
# if the matrix has any zeros, see if S.Zero
# has the pattern. If _smat is full length,
# the matrix has no zeros.
zhas = False
dok = self.todok()
if len(dok) != self.rows*self.cols:
zhas = S.Zero.has(*patterns)
return zhas or any(value.has(*patterns) for value in dok.values())
def _eval_is_Identity(self):
if not all(self[i, i] == 1 for i in range(self.rows)):
return False
return len(self.todok()) == self.rows
def _eval_is_symmetric(self, simpfunc):
diff = (self - self.T).applyfunc(simpfunc)
return len(diff.values()) == 0
def _eval_transpose(self):
"""Returns the transposed SparseMatrix of this SparseMatrix.
Examples
========
>>> from sympy import SparseMatrix
>>> a = SparseMatrix(((1, 2), (3, 4)))
>>> a
Matrix([
[1, 2],
[3, 4]])
>>> a.T
Matrix([
[1, 3],
[2, 4]])
"""
return self._fromrep(self._rep.transpose())
def _eval_col_join(self, other):
return self._fromrep(self._rep.vstack(other._rep))
def _eval_row_join(self, other):
return self._fromrep(self._rep.hstack(other._rep))
def _eval_extract(self, rowsList, colsList):
return self._fromrep(self._rep.extract(rowsList, colsList))
@overload
def __getitem__(self, key: tuple[int, int], /) -> Expr: ...
@overload
def __getitem__(self, key: tuple[int, Slice], /) -> Self: ...
@overload
def __getitem__(self, key: tuple[Slice, int], /) -> Self: ...
@overload
def __getitem__(self, key: tuple[Slice, Slice], /) -> Self: ...
@overload
def __getitem__(self, key: int, /) -> Expr: ...
@overload
def __getitem__(self, key: slice, /) -> list[Expr]: ...
def __getitem__(self, key: tuple[int | Slice, int | Slice] | int | slice, /
) -> Expr | Self | list[Expr]:
return _getitem_RepMatrix(self, key)
@classmethod
def _eval_zeros(cls, rows, cols):
rep = DomainMatrix.zeros((rows, cols), ZZ)
return cls._fromrep(rep)
@classmethod
def _eval_eye(cls, rows, cols):
rep = DomainMatrix.eye((rows, cols), ZZ)
return cls._fromrep(rep)
def _eval_add(self, other):
return classof(self, other)._fromrep(self._rep + other._rep)
def _eval_matrix_mul(self, other: RepMatrix): # type: ignore
return classof(self, other)._fromrep(self._rep * other._rep)
def _eval_matrix_mul_elementwise(self, other: RepMatrix): # type: ignore
selfrep, otherrep = self._rep.unify(other._rep)
newrep = selfrep.mul_elementwise(otherrep)
return classof(self, other)._fromrep(newrep)
def _eval_scalar_mul(self, other):
rep, other = self._unify_element_sympy(self._rep, other)
return self._fromrep(rep.scalarmul(other))
def _eval_scalar_rmul(self, other):
rep, other = self._unify_element_sympy(self._rep, other)
return self._fromrep(rep.rscalarmul(other))
def _eval_Abs(self):
return self._fromrep(self._rep.applyfunc(abs))
def _eval_conjugate(self):
rep = self._rep
domain = rep.domain
if domain in (ZZ, QQ):
return self.copy()
else:
return self._fromrep(rep.applyfunc(lambda e: e.conjugate()))
def equals(self, other, failing_expression=False):
"""Applies ``equals`` to corresponding elements of the matrices,
trying to prove that the elements are equivalent, returning True
if they are, False if any pair is not, and None (or the first
failing expression if failing_expression is True) if it cannot
be decided if the expressions are equivalent or not. This is, in
general, an expensive operation.
Examples
========
>>> from sympy import Matrix
>>> from sympy.abc import x
>>> A = Matrix([x*(x - 1), 0])
>>> B = Matrix([x**2 - x, 0])
>>> A == B
False
>>> A.simplify() == B.simplify()
True
>>> A.equals(B)
True
>>> A.equals(2)
False
See Also
========
sympy.core.expr.Expr.equals
"""
if self.shape != getattr(other, 'shape', None):
return False
rv = True
for i in range(self.rows):
for j in range(self.cols):
ans = self[i, j].equals(other[i, j], failing_expression)
if ans is False:
return False
elif ans is not True and rv is True:
rv = ans
return rv
def inv_mod(M, m):
r"""
Returns the inverse of the integer matrix ``M`` modulo ``m``.
Examples
========
>>> from sympy import Matrix
>>> A = Matrix(2, 2, [1, 2, 3, 4])
>>> A.inv_mod(5)
Matrix([
[3, 1],
[4, 2]])
>>> A.inv_mod(3)
Matrix([
[1, 1],
[0, 1]])
"""
if not M.is_square:
raise NonSquareMatrixError()
try:
m = as_int(m)
except ValueError:
raise TypeError("inv_mod: modulus m must be an integer")
K = GF(m, symmetric=False)
try:
dM = M.to_DM(K)
except CoercionFailed:
raise ValueError("inv_mod: matrix entries must be integers")
if K.is_Field:
try:
dMi = dM.inv()
except DMNonInvertibleMatrixError as exc:
msg = f'Matrix is not invertible (mod {m})'
raise NonInvertibleMatrixError(msg) from exc
else:
dMadj, det = dM.adj_det()
try:
detinv = 1 / det
except NotInvertible:
msg = f'Matrix is not invertible (mod {m})'
raise NonInvertibleMatrixError(msg)
dMi = dMadj * detinv
return dMi.to_Matrix()
def lll(self, delta=0.75):
"""LLL-reduced basis for the rowspace of a matrix of integers.
Performs the Lenstra–Lenstra–Lovász (LLL) basis reduction algorithm.
The implementation is provided by :class:`~DomainMatrix`. See
:meth:`~DomainMatrix.lll` for more details.
Examples
========
>>> from sympy import Matrix
>>> M = Matrix([[1, 0, 0, 0, -20160],
... [0, 1, 0, 0, 33768],
... [0, 0, 1, 0, 39578],
... [0, 0, 0, 1, 47757]])
>>> M.lll()
Matrix([
[ 10, -3, -2, 8, -4],
[ 3, -9, 8, 1, -11],
[ -3, 13, -9, -3, -9],
[-12, -7, -11, 9, -1]])
See Also
========
lll_transform
sympy.polys.matrices.domainmatrix.DomainMatrix.lll
"""
delta = QQ.from_sympy(_sympify(delta))
dM = self._rep.convert_to(ZZ)
basis = dM.lll(delta=delta)
return self._fromrep(basis)
def lll_transform(self, delta=0.75):
"""LLL-reduced basis and transformation matrix.
Performs the Lenstra–Lenstra–Lovász (LLL) basis reduction algorithm.
The implementation is provided by :class:`~DomainMatrix`. See
:meth:`~DomainMatrix.lll_transform` for more details.
Examples
========
>>> from sympy import Matrix
>>> M = Matrix([[1, 0, 0, 0, -20160],
... [0, 1, 0, 0, 33768],
... [0, 0, 1, 0, 39578],
... [0, 0, 0, 1, 47757]])
>>> B, T = M.lll_transform()
>>> B
Matrix([
[ 10, -3, -2, 8, -4],
[ 3, -9, 8, 1, -11],
[ -3, 13, -9, -3, -9],
[-12, -7, -11, 9, -1]])
>>> T
Matrix([
[ 10, -3, -2, 8],
[ 3, -9, 8, 1],
[ -3, 13, -9, -3],
[-12, -7, -11, 9]])
The transformation matrix maps the original basis to the LLL-reduced
basis:
>>> T * M == B
True
See Also
========
lll
sympy.polys.matrices.domainmatrix.DomainMatrix.lll_transform
"""
delta = QQ.from_sympy(_sympify(delta))
dM = self._rep.convert_to(ZZ)
basis, transform = dM.lll_transform(delta=delta)
B = self._fromrep(basis)
T = self._fromrep(transform)
return B, T
| RepMatrix |
python | altair-viz__altair | altair/vegalite/v6/schema/core.py | {
"start": 1400409,
"end": 1402259
} | class ____(VegaLiteSchema):
"""
TimeLocale schema wrapper.
Locale definition for formatting dates and times.
Parameters
----------
date : str
The date (%x) format specifier (e.g., "%m/%d/%Y").
dateTime : str
The date and time (%c) format specifier (e.g., "%a %b %e %X %Y").
days : Sequence[str], :class:`Vector7string`
The full names of the weekdays, starting with Sunday.
months : Sequence[str], :class:`Vector12string`
The full names of the months (starting with January).
periods : Sequence[str], :class:`Vector2string`
The A.M. and P.M. equivalents (e.g., ["AM", "PM"]).
shortDays : Sequence[str], :class:`Vector7string`
The abbreviated names of the weekdays, starting with Sunday.
shortMonths : Sequence[str], :class:`Vector12string`
The abbreviated names of the months (starting with January).
time : str
The time (%X) format specifier (e.g., "%H:%M:%S").
"""
_schema = {"$ref": "#/definitions/TimeLocale"}
def __init__(
self,
date: Optional[str] = Undefined,
dateTime: Optional[str] = Undefined,
days: Optional[SchemaBase | Sequence[str]] = Undefined,
months: Optional[SchemaBase | Sequence[str]] = Undefined,
periods: Optional[SchemaBase | Sequence[str]] = Undefined,
shortDays: Optional[SchemaBase | Sequence[str]] = Undefined,
shortMonths: Optional[SchemaBase | Sequence[str]] = Undefined,
time: Optional[str] = Undefined,
**kwds,
):
super().__init__(
date=date,
dateTime=dateTime,
days=days,
months=months,
periods=periods,
shortDays=shortDays,
shortMonths=shortMonths,
time=time,
**kwds,
)
| TimeLocale |
python | ansible__ansible | lib/ansible/_internal/ansible_collections/ansible/_protomatter/plugins/test/tagged_with.py | {
"start": 330,
"end": 453
} | class ____:
@staticmethod
def tests() -> dict[str, t.Callable]:
return dict(tagged_with=tagged_with)
| TestModule |
python | cookiecutter__cookiecutter | cookiecutter/exceptions.py | {
"start": 3899,
"end": 4064
} | class ____(CookiecutterException):
"""
Exception for un-cloneable repo.
Raised when a cookiecutter template can't be cloned.
"""
| RepositoryCloneFailed |
python | pytest-dev__pytest | testing/test_config.py | {
"start": 63080,
"end": 71855
} | class ____:
def test_simple_noini(self, tmp_path: Path, monkeypatch: MonkeyPatch) -> None:
assert get_common_ancestor(Path.cwd(), [tmp_path]) == tmp_path
a = tmp_path / "a"
a.mkdir()
assert get_common_ancestor(Path.cwd(), [a, tmp_path]) == tmp_path
assert get_common_ancestor(Path.cwd(), [tmp_path, a]) == tmp_path
monkeypatch.chdir(tmp_path)
assert get_common_ancestor(Path.cwd(), []) == tmp_path
no_path = tmp_path / "does-not-exist"
assert get_common_ancestor(Path.cwd(), [no_path]) == tmp_path
assert get_common_ancestor(Path.cwd(), [no_path / "a"]) == tmp_path
@pytest.mark.parametrize(
"name, contents",
[
pytest.param("pytest.ini", "[pytest]\nx=10", id="pytest.ini"),
pytest.param(
"pyproject.toml", "[tool.pytest.ini_options]\nx=10", id="pyproject.toml"
),
pytest.param("tox.ini", "[pytest]\nx=10", id="tox.ini"),
pytest.param("setup.cfg", "[tool:pytest]\nx=10", id="setup.cfg"),
],
)
def test_with_ini(self, tmp_path: Path, name: str, contents: str) -> None:
inipath = tmp_path / name
inipath.write_text(contents, encoding="utf-8")
a = tmp_path / "a"
a.mkdir()
b = a / "b"
b.mkdir()
for args in ([str(tmp_path)], [str(a)], [str(b)]):
rootpath, parsed_inipath, *_ = determine_setup(
inifile=None,
override_ini=None,
args=args,
rootdir_cmd_arg=None,
invocation_dir=Path.cwd(),
)
assert rootpath == tmp_path
assert parsed_inipath == inipath
rootpath, parsed_inipath, ini_config, _ = determine_setup(
inifile=None,
override_ini=None,
args=[str(b), str(a)],
rootdir_cmd_arg=None,
invocation_dir=Path.cwd(),
)
assert rootpath == tmp_path
assert parsed_inipath == inipath
assert ini_config["x"] == ConfigValue("10", origin="file", mode="ini")
@pytest.mark.parametrize("pytest_ini", ["pytest.ini", ".pytest.ini"])
@pytest.mark.parametrize("other", ["setup.cfg", "tox.ini"])
def test_pytestini_overrides_empty_other(
self, tmp_path: Path, pytest_ini: str, other: str
) -> None:
inipath = tmp_path / pytest_ini
inipath.touch()
a = tmp_path / "a"
a.mkdir()
(a / other).touch()
rootpath, parsed_inipath, *_ = determine_setup(
inifile=None,
override_ini=None,
args=[str(a)],
rootdir_cmd_arg=None,
invocation_dir=Path.cwd(),
)
assert rootpath == tmp_path
assert parsed_inipath == inipath
def test_setuppy_fallback(self, tmp_path: Path) -> None:
a = tmp_path / "a"
a.mkdir()
(a / "setup.cfg").touch()
(tmp_path / "setup.py").touch()
rootpath, inipath, inicfg, _ = determine_setup(
inifile=None,
override_ini=None,
args=[str(a)],
rootdir_cmd_arg=None,
invocation_dir=Path.cwd(),
)
assert rootpath == tmp_path
assert inipath is None
assert inicfg == {}
def test_nothing(self, tmp_path: Path, monkeypatch: MonkeyPatch) -> None:
monkeypatch.chdir(tmp_path)
rootpath, inipath, inicfg, _ = determine_setup(
inifile=None,
override_ini=None,
args=[str(tmp_path)],
rootdir_cmd_arg=None,
invocation_dir=Path.cwd(),
)
assert rootpath == tmp_path
assert inipath is None
assert inicfg == {}
@pytest.mark.parametrize(
"name, contents",
[
# pytest.param("pytest.ini", "[pytest]\nx=10", id="pytest.ini"),
pytest.param(
"pyproject.toml", "[tool.pytest.ini_options]\nx=10", id="pyproject.toml"
),
# pytest.param("tox.ini", "[pytest]\nx=10", id="tox.ini"),
# pytest.param("setup.cfg", "[tool:pytest]\nx=10", id="setup.cfg"),
],
)
def test_with_specific_inifile(
self, tmp_path: Path, name: str, contents: str
) -> None:
p = tmp_path / name
p.touch()
p.write_text(contents, encoding="utf-8")
rootpath, inipath, ini_config, _ = determine_setup(
inifile=str(p),
override_ini=None,
args=[str(tmp_path)],
rootdir_cmd_arg=None,
invocation_dir=Path.cwd(),
)
assert rootpath == tmp_path
assert inipath == p
assert ini_config["x"] == ConfigValue("10", origin="file", mode="ini")
def test_explicit_config_file_sets_rootdir(
self, tmp_path: Path, monkeypatch: pytest.MonkeyPatch
) -> None:
tests_dir = tmp_path / "tests"
tests_dir.mkdir()
monkeypatch.chdir(tmp_path)
# No config file is explicitly given: rootdir is determined to be cwd.
rootpath, found_inipath, *_ = determine_setup(
inifile=None,
override_ini=None,
args=[str(tests_dir)],
rootdir_cmd_arg=None,
invocation_dir=Path.cwd(),
)
assert rootpath == tmp_path
assert found_inipath is None
# Config file is explicitly given: rootdir is determined to be inifile's directory.
inipath = tmp_path / "pytest.ini"
inipath.touch()
rootpath, found_inipath, *_ = determine_setup(
inifile=str(inipath),
override_ini=None,
args=[str(tests_dir)],
rootdir_cmd_arg=None,
invocation_dir=Path.cwd(),
)
assert rootpath == tmp_path
assert found_inipath == inipath
def test_with_arg_outside_cwd_without_inifile(
self, tmp_path: Path, monkeypatch: MonkeyPatch
) -> None:
monkeypatch.chdir(tmp_path)
a = tmp_path / "a"
a.mkdir()
b = tmp_path / "b"
b.mkdir()
rootpath, inifile, *_ = determine_setup(
inifile=None,
override_ini=None,
args=[str(a), str(b)],
rootdir_cmd_arg=None,
invocation_dir=Path.cwd(),
)
assert rootpath == tmp_path
assert inifile is None
def test_with_arg_outside_cwd_with_inifile(self, tmp_path: Path) -> None:
a = tmp_path / "a"
a.mkdir()
b = tmp_path / "b"
b.mkdir()
inipath = a / "pytest.ini"
inipath.touch()
rootpath, parsed_inipath, *_ = determine_setup(
inifile=None,
override_ini=None,
args=[str(a), str(b)],
rootdir_cmd_arg=None,
invocation_dir=Path.cwd(),
)
assert rootpath == a
assert inipath == parsed_inipath
@pytest.mark.parametrize("dirs", ([], ["does-not-exist"], ["a/does-not-exist"]))
def test_with_non_dir_arg(
self, dirs: Sequence[str], tmp_path: Path, monkeypatch: MonkeyPatch
) -> None:
monkeypatch.chdir(tmp_path)
rootpath, inipath, *_ = determine_setup(
inifile=None,
override_ini=None,
args=dirs,
rootdir_cmd_arg=None,
invocation_dir=Path.cwd(),
)
assert rootpath == tmp_path
assert inipath is None
def test_with_existing_file_in_subdir(
self, tmp_path: Path, monkeypatch: MonkeyPatch
) -> None:
a = tmp_path / "a"
a.mkdir()
(a / "exists").touch()
monkeypatch.chdir(tmp_path)
rootpath, inipath, *_ = determine_setup(
inifile=None,
override_ini=None,
args=["a/exist"],
rootdir_cmd_arg=None,
invocation_dir=Path.cwd(),
)
assert rootpath == tmp_path
assert inipath is None
def test_with_config_also_in_parent_directory(
self, tmp_path: Path, monkeypatch: MonkeyPatch
) -> None:
"""Regression test for #7807."""
(tmp_path / "setup.cfg").write_text("[tool:pytest]\n", "utf-8")
(tmp_path / "myproject").mkdir()
(tmp_path / "myproject" / "setup.cfg").write_text("[tool:pytest]\n", "utf-8")
(tmp_path / "myproject" / "tests").mkdir()
monkeypatch.chdir(tmp_path / "myproject")
rootpath, inipath, *_ = determine_setup(
inifile=None,
override_ini=None,
args=["tests/"],
rootdir_cmd_arg=None,
invocation_dir=Path.cwd(),
)
assert rootpath == tmp_path / "myproject"
assert inipath == tmp_path / "myproject" / "setup.cfg"
| TestRootdir |
python | doocs__leetcode | solution/1000-1099/1071.Greatest Common Divisor of Strings/Solution2.py | {
"start": 0,
"end": 196
} | class ____:
def gcdOfStrings(self, str1: str, str2: str) -> str:
if str1 + str2 != str2 + str1:
return ''
n = gcd(len(str1), len(str2))
return str1[:n]
| Solution |
python | walkccc__LeetCode | solutions/2215. Find the Difference of Two Arrays/2215.py | {
"start": 0,
"end": 202
} | class ____:
def findDifference(self, nums1: list[int],
nums2: list[int]) -> list[list[int]]:
set1 = set(nums1)
set2 = set(nums2)
return [set1 - set2, set2 - set1]
| Solution |
python | openai__openai-python | tests/test_transform.py | {
"start": 4323,
"end": 4755
} | class ____(TypedDict, total=False):
foo: Annotated[datetime, PropertyInfo(format="iso8601")]
bar: Annotated[Optional[datetime], PropertyInfo(format="iso8601")]
required: Required[Annotated[Optional[datetime], PropertyInfo(format="iso8601")]]
list_: Required[Annotated[Optional[List[datetime]], PropertyInfo(format="iso8601")]]
union: Annotated[Union[int, datetime], PropertyInfo(format="iso8601")]
| DatetimeDict |
python | airbytehq__airbyte | airbyte-ci/connectors/metadata_service/orchestrator/orchestrator/models/metadata.py | {
"start": 1136,
"end": 1319
} | class ____:
def __getitem__(self, key: str):
return self.__dict__[key]
def __setitem__(self, key: str, value: Any):
self.__dict__[key] = value
| PydanticDictMixin |
python | scipy__scipy | scipy/signal/tests/test_filter_design.py | {
"start": 44831,
"end": 55165
} | class ____:
def test_freqz_sos_basic(self, xp):
# Compare the results of freqz and freqz_sos for a low order
# Butterworth filter.
N = 500
b, a = butter(4, 0.2)
sos = butter(4, 0.2, output='sos')
w, h = freqz(b, a, worN=N)
w, h, sos = map(xp.asarray, (w, h, sos))
w2, h2 = freqz_sos(sos, worN=N)
xp_assert_close(w2, w, rtol=1e-15)
xp_assert_close(h2, h, rtol=1e-10, atol=1e-14)
b, a = ellip(3, 1, 30, (0.2, 0.3), btype='bandpass')
sos = ellip(3, 1, 30, (0.2, 0.3), btype='bandpass', output='sos')
w, h = freqz(b, a, worN=N)
w, h, sos = map(xp.asarray, (w, h, sos))
w2, h2 = freqz_sos(sos, worN=N)
xp_assert_close(w2, w, rtol=1e-15)
xp_assert_close(h2, h, rtol=1e-10, atol=1e-14)
# must have at least one section
with assert_raises(ValueError):
freqz_sos(sos[:0, ...])
def test_backward_compat(self, xp):
# For backward compatibility, test if None act as a wrapper for default
N = 500
sos = butter(4, 0.2, output='sos')
w2, h2 = sosfreqz(sos, worN=N)
sos, w2, h2 = map(xp.asarray, (sos, w2, h2))
w1, h1 = freqz_sos(sos, worN=N)
assert_array_almost_equal(w1, w2)
assert_array_almost_equal(h1, h2)
@skip_xp_backends("dask.array", reason="float cannot be interpreted as an integer")
def test_freqz_sos_design(self, xp):
# Compare freqz_sos output against expected values for different
# filter types
# TODO: split into multiple tests, or parameterize across filter types
# in some way.
# from cheb2ord
N, Wn = cheb2ord([0.1, 0.6], [0.2, 0.5], 3, 60)
sos = cheby2(N, 60, Wn, 'stop', output='sos')
sos = xp.asarray(sos) # XXX
zero = xp.asarray(0., dtype=xp.float64)
w, h = freqz_sos(sos)
h = xp.abs(h)
w = w / xp.pi
xp_assert_close(20 * xp.log10(h[w <= 0.1]),
zero, atol=3.01,
check_shape=False)
xp_assert_close(20 * xp.log10(h[w >= 0.6]),
zero, atol=3.01,
check_shape=False)
xp_assert_close(h[(w >= 0.2) & (w <= 0.5)],
zero, atol=1e-3,
check_shape=False) # <= -60 dB
N, Wn = cheb2ord([0.1, 0.6], [0.2, 0.5], 3, 150)
sos = cheby2(N, 150, Wn, 'stop', output='sos')
sos = xp.asarray(sos)
w, h = freqz_sos(sos)
dB = 20*xp.log10(xp.abs(h))
w = w / xp.pi
xp_assert_close(dB[w <= 0.1], zero, atol=3.01, check_shape=False)
xp_assert_close(dB[w >= 0.6], zero, atol=3.01, check_shape=False)
assert xp.all(dB[(w >= 0.2) & (w <= 0.5)] < -149.9)
# from cheb1ord
N, Wn = cheb1ord(0.2, 0.3, 3, 40)
sos = cheby1(N, 3, Wn, 'low', output='sos')
sos = xp.asarray(sos)
w, h = freqz_sos(sos)
h = xp.abs(h)
w = w / xp.pi
xp_assert_close(20 * xp.log10(h[w <= 0.2]), zero, atol=3.01,
check_shape=False)
xp_assert_close(h[w >= 0.3], zero, atol=1e-2,
check_shape=False) # <= -40 dB
N, Wn = cheb1ord(0.2, 0.3, 1, 150)
sos = cheby1(N, 1, Wn, 'low', output='sos')
sos = xp.asarray(sos)
w, h = freqz_sos(sos)
dB = 20*xp.log10(xp.abs(h))
w /= np.pi
xp_assert_close(dB[w <= 0.2], zero, atol=1.01, check_shape=False)
assert xp.all(dB[w >= 0.3] < -149.9)
# adapted from ellipord
N, Wn = ellipord(0.3, 0.2, 3, 60)
sos = ellip(N, 0.3, 60, Wn, 'high', output='sos')
sos = xp.asarray(sos)
w, h = freqz_sos(sos)
h = xp.abs(h)
w = w / xp.pi
xp_assert_close(20 * xp.log10(h[w >= 0.3]), zero, atol=3.01,
check_shape=False)
xp_assert_close(h[w <= 0.1], zero, atol=1.5e-3,
check_shape=False) # <= -60 dB (approx)
# adapted from buttord
N, Wn = buttord([0.2, 0.5], [0.14, 0.6], 3, 40)
sos = butter(N, Wn, 'band', output='sos')
sos = xp.asarray(sos)
w, h = freqz_sos(sos)
h = xp.abs(h)
w = w / xp.pi
h014 = h[w <= 0.14]
xp_assert_close(h014, xp.zeros_like(h014), atol=1e-2) # <= -40 dB
h06 = h[w >= 0.6]
xp_assert_close(h06, xp.zeros_like(h06), atol=1e-2) # <= -40 dB
h0205 = 20 * xp.log10(h[(w >= 0.2) & (w <= 0.5)])
xp_assert_close(h0205, xp.zeros_like(h0205), atol=3.01)
N, Wn = buttord([0.2, 0.5], [0.14, 0.6], 3, 100)
sos = butter(N, Wn, 'band', output='sos')
sos = xp.asarray(sos)
w, h = freqz_sos(sos)
dB = 20*xp.log10(xp.maximum(xp.abs(h), xp.asarray(1e-10)))
w = w / xp.pi
assert xp.all(dB[(w > 0) & (w <= 0.14)] < -99.9)
assert xp.all(dB[w >= 0.6] < -99.9)
db0205 = dB[(w >= 0.2) & (w <= 0.5)]
xp_assert_close(db0205, xp.zeros_like(db0205), atol=3.01)
def test_freqz_sos_design_ellip(self, xp):
N, Wn = ellipord(0.3, 0.1, 3, 60)
sos = ellip(N, 0.3, 60, Wn, 'high', output='sos')
sos = xp.asarray(sos)
w, h = freqz_sos(sos)
h = xp.abs(h)
w = w / xp.pi
h03 = 20 * xp.log10(h[w >= 0.3])
xp_assert_close(h03, xp.zeros_like(h03), atol=3.01)
h01 = h[w <= 0.1]
xp_assert_close(h01, xp.zeros_like(h01), atol=1.5e-3) # <= -60 dB (approx)
N, Wn = ellipord(0.3, 0.2, .5, 150)
sos = ellip(N, .5, 150, Wn, 'high', output='sos')
sos = xp.asarray(sos)
w, h = freqz_sos(sos)
dB = 20*xp.log10(xp.maximum(xp.abs(h), xp.asarray(1e-10)))
w = w / xp.pi
db03 = dB[w >= 0.3]
xp_assert_close(db03, xp.zeros_like(db03), atol=.55)
# Allow some numerical slop in the upper bound -150, so this is
# a check that dB[w <= 0.2] is less than or almost equal to -150.
assert xp.max(dB[w <= 0.2]) < -150*(1 - 1e-12)
@pytest.mark.thread_unsafe(
reason=("mpmath gmpy2 backend is not thread-safe, "
"see https://github.com/mpmath/mpmath/issues/974"))
@mpmath_check("0.10")
def test_freqz_sos_against_mp(self, xp):
# Compare the result of freqz_sos applied to a high order Butterworth
# filter against the result computed using mpmath. (signal.freqz fails
# miserably with such high order filters.)
from . import mpsig
N = 500
order = 25
Wn = 0.15
with mpmath.workdps(80):
z_mp, p_mp, k_mp = mpsig.butter_lp(order, Wn)
w_mp, h_mp = mpsig.zpkfreqz(z_mp, p_mp, k_mp, N)
w_mp = xp.asarray([float(x) for x in w_mp], dtype=xp.float64)
h_mp = xp.asarray([complex(x) for x in h_mp], dtype=xp.complex128)
sos = butter(order, Wn, output='sos')
sos = xp.asarray(sos, dtype=xp.float64)
w, h = freqz_sos(sos, worN=N)
xp_assert_close(w, w_mp, rtol=1e-12, atol=1e-14)
xp_assert_close(h, h_mp, rtol=1e-12, atol=1e-14)
def test_fs_param(self, xp):
fs = 900
sos = xp.asarray(
[[0.03934683014103762, 0.07869366028207524, 0.03934683014103762,
1.0, -0.37256600288916636, 0.0],
[1.0, 1.0, 0.0, 1.0, -0.9495739996946778, 0.45125966317124144]]
)
# N = None, whole=False
w1, h1 = freqz_sos(sos, fs=fs)
w2, h2 = freqz_sos(sos)
xp_assert_close(h1, h2)
xp_assert_close(w1, xp.linspace(0, fs/2, 512, endpoint=False))
# N = None, whole=True
w1, h1 = freqz_sos(sos, whole=True, fs=fs)
w2, h2 = freqz_sos(sos, whole=True)
xp_assert_close(h1, h2, atol=1e-27)
xp_assert_close(w1, xp.linspace(0, fs, 512, endpoint=False))
# N = 5, whole=False
w1, h1 = freqz_sos(sos, 5, fs=fs)
w2, h2 = freqz_sos(sos, 5)
xp_assert_close(h1, h2)
xp_assert_close(w1, xp.linspace(0, fs/2, 5, endpoint=False))
# N = 5, whole=True
w1, h1 = freqz_sos(sos, 5, whole=True, fs=fs)
w2, h2 = freqz_sos(sos, 5, whole=True)
xp_assert_close(h1, h2)
xp_assert_close(w1, xp.linspace(0, fs, 5, endpoint=False))
@skip_xp_backends(np_only=True, reason="array-likes")
def test_fs_param2(self, xp):
fs = 900
sos = xp.asarray(
[[0.03934683014103762, 0.07869366028207524, 0.03934683014103762,
1.0, -0.37256600288916636, 0.0],
[1.0, 1.0, 0.0, 1.0, -0.9495739996946778, 0.45125966317124144]]
)
# w is an array_like
for w in ([123], (123,), xp.asarray([123]), (50, 123, 230),
xp.asarray([50, 123, 230])):
w1, h1 = freqz_sos(sos, w, fs=fs)
w1, h1 = map(xp.asarray, (w1, h1))
w2, h2 = freqz_sos(sos, 2*pi*xp.asarray(w, dtype=sos.dtype)/fs)
xp_assert_close(h1, h2)
xp_assert_close(w, w1, check_dtype=False)
def test_w_or_N_types(self):
# Measure at 7 (polyval) or 8 (fft) equally-spaced points
for N in (7, np.int8(7), np.int16(7), np.int32(7), np.int64(7),
np.array(7),
8, np.int8(8), np.int16(8), np.int32(8), np.int64(8),
np.array(8)):
w, h = freqz_sos([1, 0, 0, 1, 0, 0], worN=N)
assert_array_almost_equal(w, np.pi * np.arange(N) / N)
assert_array_almost_equal(h, np.ones(N))
w, h = freqz_sos([1, 0, 0, 1, 0, 0], worN=N, fs=100)
assert_array_almost_equal(w, np.linspace(0, 50, N, endpoint=False))
assert_array_almost_equal(h, np.ones(N))
# Measure at frequency 8 Hz
for w in (8.0, 8.0+0j):
# Only makes sense when fs is specified
w_out, h = freqz_sos([1, 0, 0, 1, 0, 0], worN=w, fs=100)
assert_array_almost_equal(w_out, [8])
assert_array_almost_equal(h, [1])
def test_fs_validation(self):
sos = butter(4, 0.2, output='sos')
with pytest.raises(ValueError, match="Sampling.*single scalar"):
freqz_sos(sos, fs=np.array([10, 20]))
@make_xp_test_case(freqz_zpk)
| TestFreqz_sos |
python | doocs__leetcode | solution/0300-0399/0355.Design Twitter/Solution.py | {
"start": 0,
"end": 1833
} | class ____:
def __init__(self):
"""
Initialize your data structure here.
"""
self.user_tweets = defaultdict(list)
self.user_following = defaultdict(set)
self.tweets = defaultdict()
self.time = 0
def postTweet(self, userId: int, tweetId: int) -> None:
"""
Compose a new tweet.
"""
self.time += 1
self.user_tweets[userId].append(tweetId)
self.tweets[tweetId] = self.time
def getNewsFeed(self, userId: int) -> List[int]:
"""
Retrieve the 10 most recent tweet ids in the user's news feed. Each item in the news feed must be posted by users who the user followed or by the user herself. Tweets must be ordered from most recent to least recent.
"""
following = self.user_following[userId]
users = set(following)
users.add(userId)
tweets = [self.user_tweets[u][::-1][:10] for u in users]
tweets = sum(tweets, [])
return nlargest(10, tweets, key=lambda tweet: self.tweets[tweet])
def follow(self, followerId: int, followeeId: int) -> None:
"""
Follower follows a followee. If the operation is invalid, it should be a no-op.
"""
self.user_following[followerId].add(followeeId)
def unfollow(self, followerId: int, followeeId: int) -> None:
"""
Follower unfollows a followee. If the operation is invalid, it should be a no-op.
"""
following = self.user_following[followerId]
if followeeId in following:
following.remove(followeeId)
# Your Twitter object will be instantiated and called as such:
# obj = Twitter()
# obj.postTweet(userId,tweetId)
# param_2 = obj.getNewsFeed(userId)
# obj.follow(followerId,followeeId)
# obj.unfollow(followerId,followeeId)
| Twitter |
python | spack__spack | lib/spack/spack/package_base.py | {
"start": 107139,
"end": 107251
} | class ____(PackageError):
"""Superclass for all errors having to do with extension packages."""
| ExtensionError |
python | pyparsing__pyparsing | examples/simpleBool.py | {
"start": 983,
"end": 1225
} | class ____:
def __init__(self, t):
self.arg = t[0][1]
def __bool__(self) -> bool:
v = bool(self.arg)
return not v
def __str__(self) -> str:
return "~" + str(self.arg)
__repr__ = __str__
| BoolNot |
python | django__django | django/forms/fields.py | {
"start": 19459,
"end": 20476
} | class ____(Field):
default_error_messages = {
"invalid": _("Enter a valid duration."),
"overflow": _("The number of days must be between {min_days} and {max_days}."),
}
def prepare_value(self, value):
if isinstance(value, datetime.timedelta):
return duration_string(value)
return value
def to_python(self, value):
if value in self.empty_values:
return None
if isinstance(value, datetime.timedelta):
return value
try:
value = parse_duration(str(value))
except OverflowError:
raise ValidationError(
self.error_messages["overflow"].format(
min_days=datetime.timedelta.min.days,
max_days=datetime.timedelta.max.days,
),
code="overflow",
)
if value is None:
raise ValidationError(self.error_messages["invalid"], code="invalid")
return value
| DurationField |
python | PyCQA__bandit | bandit/core/utils.py | {
"start": 2644,
"end": 2909
} | class ____(Exception):
"""Raised when the config file fails validation."""
def __init__(self, message, config_file):
self.config_file = config_file
self.message = f"{config_file} : {message}"
super().__init__(self.message)
| ConfigError |
python | Textualize__textual | src/textual/widgets/_key_panel.py | {
"start": 3771,
"end": 5569
} | class ____(VerticalScroll, can_focus=False):
"""
Shows bindings for currently focused widget.
"""
DEFAULT_CSS = """
KeyPanel {
split: right;
width: 33%;
min-width: 30;
max-width: 60;
border-left: vkey $foreground 30%;
padding: 0 1;
height: 1fr;
padding-right: 1;
align: center top;
&> BindingsTable > .bindings-table--key {
color: $text-accent;
text-style: bold;
padding: 0 1;
}
&> BindingsTable > .bindings-table--description {
color: $foreground;
}
&> BindingsTable > .bindings-table--divider {
color: transparent;
}
&> BindingsTable > .bindings-table--header {
color: $text-primary;
text-style: underline;
}
#bindings-table {
width: auto;
height: auto;
}
}
"""
DEFAULT_CLASSES = "-textual-system"
def compose(self) -> ComposeResult:
yield BindingsTable(shrink=True, expand=False)
async def on_mount(self) -> None:
mount_screen = self.screen
async def bindings_changed(screen: Screen) -> None:
"""Update bindings."""
if not screen.app.app_focus:
return
if self.is_attached and screen is mount_screen:
await self.recompose()
def _bindings_changed(screen: Screen) -> None:
self.call_after_refresh(bindings_changed, screen)
self.set_class(self.app.ansi_color, "-ansi-scrollbar")
self.screen.bindings_updated_signal.subscribe(self, _bindings_changed)
def on_unmount(self) -> None:
self.screen.bindings_updated_signal.unsubscribe(self)
| KeyPanel |
python | kamyu104__LeetCode-Solutions | Python/check-if-two-expression-trees-are-equivalent.py | {
"start": 214,
"end": 1560
} | class ____(object):
def checkEquivalence(self, root1, root2):
"""
:type root1: Node
:type root2: Node
:rtype: bool
"""
def add_counter(counter, prev, d, val):
if val.isalpha():
counter[ord(val)-ord('a')] += d if prev[0] == '+' else -d
prev[0] = val
def morris_inorder_traversal(root, cb):
curr = root
while curr:
if curr.left is None:
cb(curr.val)
curr = curr.right
else:
node = curr.left
while node.right and node.right != curr:
node = node.right
if node.right is None:
node.right = curr
curr = curr.left
else:
cb(curr.val)
node.right = None
curr = curr.right
counter = collections.defaultdict(int)
morris_inorder_traversal(root1, functools.partial(add_counter, counter, ['+'], 1))
morris_inorder_traversal(root2, functools.partial(add_counter, counter, ['+'], -1))
return all(v == 0 for v in counter.itervalues())
# Time: O(n)
# Space: O(h)
import collections
import functools
| Solution |
python | pytorch__pytorch | torch/fx/graph_module.py | {
"start": 5385,
"end": 13310
} | class ____(torch.nn.Module):
def __init__(self, body):
super().__init__()
self.__dict__ = body
def _deserialize_graph_module(
forward, body: dict[Any, Any], graph_module_cls=None
) -> torch.nn.Module:
"""
Deserialize a GraphModule given the dictionary of the original module,
using the code to reconstruct the graph. We delete the actual graph before
saving the dictionary so that changes to the in-memory graph format do not
get serialized.
"""
# Try to retrieve the forward source in a backward-compatible way
_CodeOnlyModule.forward = forward
tracer_cls = body.get("_tracer_cls")
if tracer_cls is None:
from ._symbolic_trace import Tracer
tracer_cls = Tracer
graphmodule_cls_name = body.get("_graphmodule_cls_name", "GraphModule")
# This is a workaround for a mypy linter issue related to
# passing base class as an argument - https://github.com/python/mypy/issues/5865.
cls_tracer: Any = tracer_cls
class KeepModules(cls_tracer):
# we shouldn't trace into any of the submodules,
# because they were not traced in the original GraphModule
def is_leaf_module(self, _: torch.nn.Module, __: str) -> bool:
return True
com = _CodeOnlyModule(body)
tracer_extras = body.get("_tracer_extras", {})
graph = KeepModules().trace(com, **tracer_extras)
# Recover node.meta["stack_trace"] after re-tracing
node_meta_stack_trace = body.get("_graphmodule_graph_node_meta_stack_trace")
if node_meta_stack_trace is not None:
del body["_graphmodule_graph_node_meta_stack_trace"]
for node in graph.nodes:
if node_meta_stack_trace.get(node.name, None) is not None:
node.meta["stack_trace"] = node_meta_stack_trace[node.name]
# Manually set Tracer class on the reconstructed Graph, to avoid
# referencing the private local subclass KeepModules.
graph._tracer_cls = tracer_cls
from ._lazy_graph_module import _make_graph_module
gm = _make_graph_module(
com, graph, class_name=graphmodule_cls_name, graph_module_cls=graph_module_cls
)
# The GraphModule constructor only retains attributes referenced by the graph.
# In this case, our goal is return a GraphModule as close to identical as the one
# put into the package. If any additional attributes were present in body,
# we should keep them.
for k, v in body.items():
if not hasattr(gm, k):
setattr(gm, k, v)
return gm
# copy an attribute value with qualified name 'target' from 'from_module' to 'to_module'
# This installs empty Modules where none exist yet if they are subpaths of target
def _copy_attr(from_module: torch.nn.Module, to_module: torch.nn.Module, target: str):
*prefix, field = target.split(".")
for item in prefix:
f = getattr(from_module, item)
t = getattr(to_module, item, None)
if f is t:
# we have already installed one of its parents
# (e.g. target = root.linear.weight, but we have already installed root.linear)
# once we install a parent, we no longer need to copy the children
# since all the needed properties will already be present
return
if t is None:
t = torch.nn.Module()
setattr(to_module, item, t)
from_module, to_module = f, t
orig = getattr(from_module, field)
# If it is a tensor and not a parameter attribute of a module, it should be a named buffer.
# So, we register it as a named buffer in the target module.
if isinstance(orig, torch.Tensor) and not isinstance(orig, torch.nn.Parameter):
to_module.register_buffer(field, orig)
else:
setattr(to_module, field, orig)
# Assign attribute 'from_obj' to the qualified name 'target' on 'to_module
# This installs empty Modules where none exist yet if they are subpaths of target
def _assign_attr(from_obj: Any, to_module: torch.nn.Module, target: str):
*prefix, field = target.split(".")
for item in prefix:
t = getattr(to_module, item, None)
if t is None:
t = torch.nn.Module()
setattr(to_module, item, t)
to_module = t
# If it is a tensor and not a parameter attribute of a module, it should be a named buffer.
# So, we register it as a named buffer in the target module.
if isinstance(from_obj, torch.Tensor) and not isinstance(
from_obj, torch.nn.Parameter
):
to_module.register_buffer(field, from_obj)
else:
setattr(to_module, field, from_obj)
# Recursively look up target from a graph module.
def _get_attr(model: torch.nn.Module, attr_name: str):
return _get_attr_via_attr_list(model, attr_name.split("."))
def _del_attr(model: torch.nn.Module, attr_name: str):
attr_names = attr_name.split(".")
t = _get_attr_via_attr_list(model, attr_names[:-1])
return delattr(t, attr_names[-1])
def _get_attr_via_attr_list(model: torch.nn.Module, attr_list: list[str]):
if len(attr_list) == 0:
return model
*prefix, field = attr_list
t = model
for item in prefix:
t = getattr(t, item, None) # type: ignore[assignment]
assert t is not None
return getattr(t, field)
def _has_attr(model: torch.nn.Module, attr_name: str):
*prefix, field = attr_name.split(".")
t = model
for item in prefix:
t = hasattr(t, item) # type: ignore[assignment]
if t is False:
return False
return hasattr(t, field)
def _print_readable(
module,
module_name,
print_output=True,
include_stride=False,
include_device=False,
colored=False,
expanded_def=False,
):
graph = module.graph
assert graph is not None and isinstance(graph, torch.fx.Graph), (
"print_readable must be used on a module with a graph"
)
verbose_python_code = graph.python_code(
root_module="self",
verbose=True,
include_stride=include_stride,
include_device=include_device,
colored=colored,
expanded_def=expanded_def,
)
module_code = verbose_python_code.src
module_code = module_code.lstrip("\n")
module_code = f"class {module_name}(torch.nn.Module):\n" + module_code
module_code = _addindent(module_code, 4)
submodule_code_list = [""]
for submodule_name, submodule in module.named_children():
if hasattr(submodule, "graph"):
submodule_code_list.append(
_print_readable(
submodule,
submodule_name,
print_output=False,
include_stride=include_stride,
include_device=include_device,
colored=colored,
)
)
submodule_code = "\n".join(submodule_code_list)
submodule_code = _addindent(submodule_code, 4)
output = module_code + submodule_code
if print_output:
print(module_code + submodule_code)
return output
def _metadata_hash(code: str, node_metadata: dict) -> str:
"""
Create a content-addressed hash from code and metadata.
Args:
code: The source code string
lineno_map: Mapping from line numbers to node indices
node_metadata: Metadata for each node
Returns:
A 51-character base32-encoded hash
"""
import json
# Create a deterministic string representation of all components
# We use JSON to ensure consistent serialization
hash_data = {
"code": code,
"node_metadata": node_metadata,
}
hashing_str = json.dumps(hash_data).encode("utf-8")
# [:51] to strip off the "Q====" suffix common to every hash value.
return (
base64.b32encode(hashlib.sha256(hashing_str).digest())[:51]
.decode("utf-8")
.lower()
)
| _CodeOnlyModule |
python | Pylons__pyramid | docs/quick_tutorial/forms/tutorial/tests.py | {
"start": 431,
"end": 1800
} | class ____(unittest.TestCase):
def setUp(self):
from tutorial import main
app = main({})
from webtest import TestApp
self.testapp = TestApp(app)
def tearDown(self):
testing.tearDown()
def test_home(self):
res = self.testapp.get('/', status=200)
self.assertIn(b'<title>Wiki: View</title>', res.body)
def test_add_page(self):
res = self.testapp.get('/add', status=200)
self.assertIn(b'<h1>Wiki</h1>', res.body)
def test_edit_page(self):
res = self.testapp.get('/101/edit', status=200)
self.assertIn(b'<h1>Wiki</h1>', res.body)
def test_post_wiki(self):
self.testapp.post('/add', {
"title": "New Title",
"body": "<p>New Body</p>",
"submit": "submit"
}, status=302)
res = self.testapp.get('/103', status=200)
self.assertIn(b'<h1>New Title</h1>', res.body)
self.assertIn(b'<p>New Body</p>', res.body)
def test_edit_wiki(self):
self.testapp.post('/102/edit', {
"title": "New Title",
"body": "<p>New Body</p>",
"submit": "submit"
}, status=302)
res = self.testapp.get('/102', status=200)
self.assertIn(b'<h1>New Title</h1>', res.body)
self.assertIn(b'<p>New Body</p>', res.body)
| TutorialFunctionalTests |
python | django__django | django/template/exceptions.py | {
"start": 1208,
"end": 1342
} | class ____(Exception):
"""
The exception used for syntax errors during parsing or rendering.
"""
pass
| TemplateSyntaxError |
python | numpy__numpy | numpy/lib/tests/test_io.py | {
"start": 1002,
"end": 1948
} | class ____(BytesIO):
"""Helper IO class.
Writes encode strings to bytes if needed, reads return bytes.
This makes it easier to emulate files opened in binary mode
without needing to explicitly convert strings to bytes in
setting up the test data.
"""
def __init__(self, s=""):
BytesIO.__init__(self, asbytes(s))
def write(self, s):
BytesIO.write(self, asbytes(s))
def writelines(self, lines):
BytesIO.writelines(self, [asbytes(s) for s in lines])
IS_64BIT = sys.maxsize > 2**32
try:
import bz2
HAS_BZ2 = True
except ImportError:
HAS_BZ2 = False
try:
import lzma
HAS_LZMA = True
except ImportError:
HAS_LZMA = False
def strptime(s, fmt=None):
"""
This function is available in the datetime module only from Python >=
2.5.
"""
if isinstance(s, bytes):
s = s.decode("latin1")
return datetime(*time.strptime(s, fmt)[:3])
| TextIO |
python | getsentry__sentry | src/sentry/api/endpoints/organization_onboarding_continuation_email.py | {
"start": 794,
"end": 1762
} | class ____(CamelSnakeSerializer):
platforms = serializers.ListField(
child=serializers.CharField(max_length=255),
)
def get_request_builder_args(user: User, organization: Organization, platforms: list[str]):
num_platforms = len(platforms)
context = {
"recipient_name": user.get_display_name(),
"onboarding_link": organization.absolute_url(
f"/onboarding/{organization.slug}/", query="referrer=onboarding_continuation-email"
),
"organization_name": organization.name,
"num_platforms": num_platforms,
"platforms": oxfordize_list(platforms),
}
return {
"subject": "Finish Onboarding",
"type": "organization.onboarding-continuation-email",
"context": context,
"template": "sentry/emails/onboarding-continuation.txt",
"html_template": "sentry/emails/onboarding-continuation.html",
}
@region_silo_endpoint
| OnboardingContinuationSerializer |
python | automl__auto-sklearn | test/test_pipeline/components/regression/test_k_nearest_neighbors.py | {
"start": 191,
"end": 793
} | class ____(BaseRegressionComponentTest):
__test__ = True
res = dict()
res["default_boston"] = 0.18393287980040374
res["default_boston_iterative"] = None
res["default_boston_sparse"] = -0.23029229186279609
res["default_boston_iterative_sparse"] = None
res["default_diabetes"] = 0.068600456340847438
res["default_diabetes_iterative"] = None
res["default_diabetes_sparse"] = -0.16321841460809972
res["default_diabetes_iterative_sparse"] = None
sk_mod = sklearn.neighbors.KNeighborsRegressor
module = KNearestNeighborsRegressor
| KNearestNeighborsComponentTest |
python | apache__airflow | providers/google/src/airflow/providers/google/cloud/operators/compute.py | {
"start": 1954,
"end": 3228
} | class ____(GoogleCloudBaseOperator):
"""Abstract base operator for Google Compute Engine operators to inherit from."""
def __init__(
self,
*,
zone: str,
resource_id: str | None = None,
project_id: str = PROVIDE_PROJECT_ID,
gcp_conn_id: str = "google_cloud_default",
api_version: str = "v1",
impersonation_chain: str | Sequence[str] | None = None,
**kwargs,
) -> None:
self.project_id = project_id
self.zone = zone
self.resource_id = resource_id
self.gcp_conn_id = gcp_conn_id
self.api_version = api_version
self.impersonation_chain = impersonation_chain
self._validate_inputs()
super().__init__(**kwargs)
def _validate_inputs(self) -> None:
if self.project_id == "":
raise AirflowException("The required parameter 'project_id' is missing")
if not self.zone:
raise AirflowException("The required parameter 'zone' is missing")
@property
def extra_links_params(self) -> dict[str, Any]:
return {
"location_id": self.zone,
"resource_id": self.resource_id,
}
def execute(self, context: Context):
pass
| ComputeEngineBaseOperator |
python | psf__black | tests/data/cases/preview_long_strings__regression.py | {
"start": 20,
"end": 691
} | class ____:
def foo():
result = type(message)("")
# Don't merge multiline (e.g. triple-quoted) strings.
def foo():
query = (
"""SELECT xxxxxxxxxxxxxxxxxxxx(xxx)"""
""" FROM xxxxxxxxxxxxxxxx WHERE xxxxxxxxxx AND xxx <> xxxxxxxxxxxxxx()""")
# There was a bug where tuples were being identified as long strings.
long_tuple = ('Apple', 'Berry', 'Cherry', 'Dill', 'Evergreen', 'Fig',
'Grape', 'Harry', 'Iglu', 'Jaguar')
stupid_format_method_bug = "Some really long string that just so happens to be the {} {} to force the 'format' method to hang over the line length boundary. This is pretty annoying.".format("perfect", "length")
| A |
python | rapidsai__cudf | python/cudf/cudf/pandas/module_accelerator.py | {
"start": 11089,
"end": 23140
} | class ____(ModuleAcceleratorBase):
"""
A finder and loader that produces "accelerated" modules.
When someone attempts to import the specified slow library with
this finder enabled, we intercept the import and deliver an
equivalent, accelerated, version of the module. This provides
attributes and modules that check if they are being used from
"within" the slow (or fast) library themselves. If this is the
case, the implementation is forwarded to the actual slow library
implementation, otherwise a proxy implementation is used (which
attempts to call the fast version first).
"""
_denylist: tuple[str]
_disable_count: defaultdict[int, int]
_module_cache_prefix: str = "_slow_lib_"
# TODO: Add possibility for either an explicit allow-list of
# libraries where the slow_lib should be wrapped, or, more likely
# a block-list that adds to the set of libraries where no proxying occurs.
def __new__(
cls,
fast_lib,
slow_lib,
):
self = super().__new__(
cls,
slow_lib,
fast_lib,
slow_lib,
)
# Import the real versions of the modules so that we can
# rewrite the sys.modules cache.
slow_module = importlib.import_module(slow_lib)
fast_module = importlib.import_module(fast_lib)
# Note, this is not thread safe, but install() below grabs the
# lock for the whole initialisation and modification of
# sys.meta_path.
for mod in sys.modules.copy():
if mod.startswith(self.slow_lib):
if mod == "pandas._config.config":
# Since it is possible for state to diverge between the proxy and real
# module, we skip wrapping this one entirely.
# For example, running tests in pandas/tests/config/test_config.py
# mutates internal globals like _registered_options on the proxy,
# while register_option() reads them from the real module.
# Therefore the two get out of sync and raise duplicate registration errors.
# Keeping the real module here avoids that split state.
continue
sys.modules[self._module_cache_prefix + mod] = sys.modules[mod]
del sys.modules[mod]
self._denylist = (*slow_module.__path__, *fast_module.__path__)
# This initialization does not need to be protected since a given instance is
# always being created on a given thread.
self._disable_count = defaultdict(int)
return self
def _populate_module(self, mod: ModuleType):
mod_name = mod.__name__
# Here we attempt to import "_fsproxy_slow_lib.x.y.z", but
# "_fsproxy_slow_lib" does not exist anywhere as a real file, so
# how does this work?
# The importer attempts to import ".z" by first importing
# "_fsproxy_slow_lib.x.y", this recurses until we find
# "_fsproxy_slow_lib.x" (say), which does exist because we set that up
# in __init__. Now the importer looks at the __path__
# attribute of "x" and uses that to find the relative location
# to look for "y". This __path__ points to the real location
# of "slow_lib.x". So, as long as we rewire the _already imported_
# slow_lib modules in sys.modules to _fsproxy_slow_lib, when we
# get here this will find the right thing.
# The above exposition is for lazily imported submodules (e.g.
# avoiding circular imports by putting an import at function
# level). For everything that is eagerly imported when we do
# "import slow_lib" this import line is trivial because we
# immediately pull the correct result out of sys.modules.
slow_mod = importlib.import_module(
rename_root_module(
mod_name,
self.slow_lib,
self._module_cache_prefix + self.slow_lib,
)
)
try:
fast_mod = importlib.import_module(
rename_root_module(mod_name, self.slow_lib, self.fast_lib)
)
except Exception:
fast_mod = None
# The version that will be used if called within a denylist
# package
real_attributes = {}
# The version that will be used outside denylist packages
for key in slow_mod.__dir__():
with warnings.catch_warnings():
warnings.simplefilter("ignore", FutureWarning)
slow_attr = getattr(slow_mod, key)
fast_attr = getattr(fast_mod, key, _Unusable())
real_attributes[key] = slow_attr
try:
wrapped_attr = self._wrap_attribute(slow_attr, fast_attr, key)
self._wrapped_objs[slow_attr] = wrapped_attr
except TypeError:
# slow_attr is not hashable
pass
# Our module has (basically) no static attributes and instead
# always delivers them dynamically where the behaviour is
# dependent on the calling module.
setattr(
mod,
"__getattr__",
functools.partial(
self.getattr_real_or_wrapped,
real=real_attributes,
wrapped_objs=self._wrapped_objs,
loader=self,
),
)
# ...but, we want to pretend like we expose the same attributes
# as the equivalent slow module
setattr(mod, "__dir__", slow_mod.__dir__)
# We set __path__ to the real path so that importers like
# jinja2.PackageLoader("slow_mod") work correctly.
if getattr(slow_mod, "__path__", False):
assert mod.__spec__
mod.__path__ = slow_mod.__path__
mod.__spec__.submodule_search_locations = [*slow_mod.__path__]
return self._postprocess_module(mod, slow_mod, fast_mod)
@contextlib.contextmanager
def disabled(self):
"""Return a context manager for disabling the module accelerator.
Within the block, any wrapped objects will instead deliver
attributes from their real counterparts (as if the current
nested block were in the denylist).
Returns
-------
Context manager for disabling things
"""
self._disable_count[threading.get_ident()] += 1
try:
yield
finally:
self._disable_count[threading.get_ident()] -= 1
@staticmethod
def getattr_real_or_wrapped(
name: str,
*,
real: dict[str, Any],
wrapped_objs,
loader: ModuleAccelerator,
) -> Any:
"""
Obtain an attribute from a module from either the real or
wrapped namespace.
Parameters
----------
name
Attribute to return
real
Unwrapped "original" attributes
wrapped
Wrapped attributes
loader
Loader object that manages denylist and other skipping
Returns
-------
The requested attribute (either real or wrapped)
"""
use_real = (
loader._disable_count[threading.get_ident()] > 0
# If acceleration was disabled on the main thread, we should respect that.
# This only works because we currently have no way to re-enable other than
# exiting the disable context, so disabling on the parent thread means that
# the inner threads will also typically be disabled. This logic breaks if
# the parent thread queues work on a thread and only then disables
# acceleration because in that case there is a potential race condition by
# which the child thread may wind up disabled even though the parent was not
# disabled when the child was launched. That is a fairly rare pattern though
# and we can document the limitations.
# The main thread is always started, so the ident is always an int
or loader._disable_count[threading.main_thread().ident] > 0 # type: ignore[index]
)
if not use_real:
# Only need to check the denylist if we're not turned off.
frame = sys._getframe()
# We cannot possibly be at the top level.
assert frame.f_back
calling_module = pathlib.PurePath(frame.f_back.f_code.co_filename)
use_real = _caller_in_denylist(
calling_module, tuple(loader._denylist)
)
try:
if use_real:
return real[name]
else:
return wrapped_objs[real[name]]
except KeyError:
raise AttributeError(f"No attribute '{name}'")
except TypeError:
# real[name] is an unhashable type
return real[name]
@classmethod
def install(
cls,
destination_module: str,
fast_lib: str,
slow_lib: str,
) -> Self | None:
# This grabs the global _import_ lock to avoid concurrent
# threads modifying sys.modules.
# We also make sure that we finish installing ourselves in
# sys.meta_path before releasing the lock so that there isn't
# a race between our modification of sys.modules and someone
# else importing the slow_lib before we have added ourselves
# to the meta_path
with ImportLock():
if destination_module != slow_lib:
raise RuntimeError(
f"Destination module '{destination_module}' must match"
f"'{slow_lib}' for this to work."
)
mode = deduce_cudf_pandas_mode(slow_lib, fast_lib)
if mode.use_fast_lib:
lib_wrappers = importlib.import_module(
f".._wrappers.{mode.slow_lib}", __name__
)
lib_wrappers.initial_setup()
try:
(self,) = (
p
for p in sys.meta_path
if isinstance(p, cls)
and p.slow_lib == mode.slow_lib
and p.fast_lib == mode.fast_lib
)
except ValueError:
self = cls(mode.fast_lib, mode.slow_lib)
sys.meta_path.insert(0, self)
return self
def disable_module_accelerator() -> contextlib.ExitStack:
"""
Temporarily disable any module acceleration.
This function only offers limited guarantees of thread safety.
Cases that will work:
- multiple threads are launched and each independently turns off acceleration
- a single thread turns off acceleration and then launches multiple threads
inside the context manager
Cases that trigger race conditions:
- a single thread launches multiple threads and then enters the context manager
while those threads are still running
- nested thread launching and acceleration disabling, i.e. if a thread launches
a thread that disables acceleration and then launches another thread, the
innermost thread will not have the accelerator disabled.
"""
with ImportLock(), contextlib.ExitStack() as stack:
for finder in sys.meta_path:
if isinstance(finder, ModuleAcceleratorBase):
stack.enter_context(finder.disabled())
return stack.pop_all()
assert False # pacify type checker
# because this function gets called so often and is quite
# expensive to run, we cache the results:
@functools.lru_cache(maxsize=1024)
def _caller_in_denylist(calling_module, denylist):
CUDF_PANDAS_PATH = __file__.rsplit("/", 1)[0]
return not calling_module.is_relative_to(CUDF_PANDAS_PATH) and any(
calling_module.is_relative_to(path) for path in denylist
)
| ModuleAccelerator |
python | huggingface__transformers | tests/models/clipseg/test_modeling_clipseg.py | {
"start": 12092,
"end": 15171
} | class ____:
def __init__(
self,
parent,
text_kwargs=None,
vision_kwargs=None,
is_training=True,
# This should respect the `num_hidden_layers` in `CLIPSegVisionModelTester`
extract_layers=(1,),
):
if text_kwargs is None:
text_kwargs = {}
if vision_kwargs is None:
vision_kwargs = {}
self.parent = parent
self.text_model_tester = CLIPSegTextModelTester(parent, **text_kwargs)
self.vision_model_tester = CLIPSegVisionModelTester(parent, **vision_kwargs)
self.batch_size = self.text_model_tester.batch_size # need bs for batching_equivalence test
self.is_training = is_training
self.extract_layers = extract_layers
def prepare_config_and_inputs(self):
text_config, input_ids, attention_mask = self.text_model_tester.prepare_config_and_inputs()
vision_config, pixel_values = self.vision_model_tester.prepare_config_and_inputs()
config = self.get_config()
return config, input_ids, attention_mask, pixel_values
def get_config(self):
return CLIPSegConfig(
text_config=self.text_model_tester.get_config().to_dict(),
vision_config=self.vision_model_tester.get_config().to_dict(),
projection_dim=64,
reduce_dim=32,
extract_layers=self.extract_layers,
)
def create_and_check_model(self, config, input_ids, attention_mask, pixel_values):
model = CLIPSegModel(config).to(torch_device).eval()
with torch.no_grad():
result = model(input_ids, pixel_values, attention_mask)
self.parent.assertEqual(
result.logits_per_image.shape, (self.vision_model_tester.batch_size, self.text_model_tester.batch_size)
)
self.parent.assertEqual(
result.logits_per_text.shape, (self.text_model_tester.batch_size, self.vision_model_tester.batch_size)
)
def create_and_check_model_for_image_segmentation(self, config, input_ids, attention_mask, pixel_values):
model = CLIPSegForImageSegmentation(config).to(torch_device).eval()
with torch.no_grad():
result = model(input_ids, pixel_values)
self.parent.assertEqual(
result.logits.shape,
(
self.vision_model_tester.batch_size,
self.vision_model_tester.image_size,
self.vision_model_tester.image_size,
),
)
self.parent.assertEqual(
result.conditional_embeddings.shape, (self.text_model_tester.batch_size, config.projection_dim)
)
def prepare_config_and_inputs_for_common(self):
config_and_inputs = self.prepare_config_and_inputs()
config, input_ids, attention_mask, pixel_values = config_and_inputs
inputs_dict = {
"input_ids": input_ids,
"attention_mask": attention_mask,
"pixel_values": pixel_values,
}
return config, inputs_dict
@require_torch
| CLIPSegModelTester |
python | huggingface__transformers | src/transformers/models/fnet/modeling_fnet.py | {
"start": 7303,
"end": 7634
} | class ____(nn.Module):
def __init__(self, config):
super().__init__()
self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
def forward(self, hidden_states, input_tensor):
hidden_states = self.LayerNorm(input_tensor + hidden_states)
return hidden_states
| FNetBasicOutput |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.