language stringclasses 1 value | repo stringclasses 346 values | path stringlengths 6 201 | class_span dict | source stringlengths 21 2.38M | target stringlengths 1 96 |
|---|---|---|---|---|---|
python | getsentry__sentry | src/sentry/monitors/endpoints/base.py | {
"start": 901,
"end": 3204
} | class ____(Endpoint):
"""
Base endpoint class for monitors which will look up the monitor and
convert it to a Monitor object.
"""
permission_classes: tuple[type[BasePermission], ...] = (ProjectAlertRulePermission,)
def convert_args(
self,
request: Request,
organization_id_or_slug: int | str,
monitor_id_or_slug: str,
environment: str | None = None,
checkin_id: str | None = None,
*args,
**kwargs,
):
try:
if str(organization_id_or_slug).isdigit():
organization = Organization.objects.get_from_cache(id=organization_id_or_slug)
else:
organization = Organization.objects.get_from_cache(slug=organization_id_or_slug)
except Organization.DoesNotExist:
raise ResourceDoesNotExist
try:
monitor = get_monitor_by_org_id_or_slug(organization, monitor_id_or_slug)
except Monitor.DoesNotExist:
raise ResourceDoesNotExist
project = Project.objects.get_from_cache(id=monitor.project_id)
if project.status != ObjectStatus.ACTIVE:
raise ResourceDoesNotExist
if environment:
try:
environment_object = Environment.objects.get(
organization_id=organization.id, name=environment
)
monitor_environment = MonitorEnvironment.objects.get(
monitor_id=monitor.id, environment_id=environment_object.id
)
kwargs["monitor_environment"] = monitor_environment
except (Environment.DoesNotExist, MonitorEnvironment.DoesNotExist):
raise ResourceDoesNotExist
self.check_object_permissions(request, project)
Scope.get_isolation_scope().set_tag("project", project.id)
bind_organization_context(project.organization)
request._request.organization = project.organization # type: ignore[attr-defined]
kwargs["organization"] = organization
kwargs["project"] = project
kwargs["monitor"] = monitor
if checkin_id:
checkin = try_checkin_lookup(monitor, checkin_id)
kwargs["checkin"] = checkin
return args, kwargs
| MonitorEndpoint |
python | django__django | tests/logging_tests/tests.py | {
"start": 1961,
"end": 3016
} | class ____(
SetupDefaultLoggingMixin, LoggingCaptureMixin, SimpleTestCase
):
def test_django_logger(self):
"""
The 'django' base logger only output anything when DEBUG=True.
"""
self.logger.error("Hey, this is an error.")
self.assertEqual(self.logger_output.getvalue(), "")
with self.settings(DEBUG=True):
self.logger.error("Hey, this is an error.")
self.assertEqual(self.logger_output.getvalue(), "Hey, this is an error.\n")
@override_settings(DEBUG=True)
def test_django_logger_warning(self):
self.logger.warning("warning")
self.assertEqual(self.logger_output.getvalue(), "warning\n")
@override_settings(DEBUG=True)
def test_django_logger_info(self):
self.logger.info("info")
self.assertEqual(self.logger_output.getvalue(), "info\n")
@override_settings(DEBUG=True)
def test_django_logger_debug(self):
self.logger.debug("debug")
self.assertEqual(self.logger_output.getvalue(), "")
| DefaultLoggingTests |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-github/source_github/github_schema.py | {
"start": 573873,
"end": 574273
} | class ____(sgqlc.types.Type):
"""An edge in a connection."""
__schema__ = github_schema
__field_names__ = ("cursor", "node")
cursor = sgqlc.types.Field(sgqlc.types.non_null(String), graphql_name="cursor")
"""A cursor for use in pagination."""
node = sgqlc.types.Field("DeploymentReviewer", graphql_name="node")
"""The item at the end of the edge."""
| DeploymentReviewerEdge |
python | plotly__plotly.py | plotly/graph_objs/scattergeo/marker/colorbar/_title.py | {
"start": 233,
"end": 4042
} | class ____(_BaseTraceHierarchyType):
_parent_path_str = "scattergeo.marker.colorbar"
_path_str = "scattergeo.marker.colorbar.title"
_valid_props = {"font", "side", "text"}
@property
def font(self):
"""
Sets this color bar's title font.
The 'font' property is an instance of Font
that may be specified as:
- An instance of :class:`plotly.graph_objs.scattergeo.marker.colorbar.title.Font`
- A dict of string/value properties that will be passed
to the Font constructor
Returns
-------
plotly.graph_objs.scattergeo.marker.colorbar.title.Font
"""
return self["font"]
@font.setter
def font(self, val):
self["font"] = val
@property
def side(self):
"""
Determines the location of color bar's title with respect to
the color bar. Defaults to "top" when `orientation` if "v" and
defaults to "right" when `orientation` if "h".
The 'side' property is an enumeration that may be specified as:
- One of the following enumeration values:
['right', 'top', 'bottom']
Returns
-------
Any
"""
return self["side"]
@side.setter
def side(self, val):
self["side"] = val
@property
def text(self):
"""
Sets the title of the color bar.
The 'text' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["text"]
@text.setter
def text(self, val):
self["text"] = val
@property
def _prop_descriptions(self):
return """\
font
Sets this color bar's title font.
side
Determines the location of color bar's title with
respect to the color bar. Defaults to "top" when
`orientation` if "v" and defaults to "right" when
`orientation` if "h".
text
Sets the title of the color bar.
"""
def __init__(self, arg=None, font=None, side=None, text=None, **kwargs):
"""
Construct a new Title object
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of :class:`plotly.graph_objs.scattergeo.mar
ker.colorbar.Title`
font
Sets this color bar's title font.
side
Determines the location of color bar's title with
respect to the color bar. Defaults to "top" when
`orientation` if "v" and defaults to "right" when
`orientation` if "h".
text
Sets the title of the color bar.
Returns
-------
Title
"""
super().__init__("title")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError("""\
The first argument to the plotly.graph_objs.scattergeo.marker.colorbar.Title
constructor must be a dict or
an instance of :class:`plotly.graph_objs.scattergeo.marker.colorbar.Title`""")
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
self._set_property("font", arg, font)
self._set_property("side", arg, side)
self._set_property("text", arg, text)
self._process_kwargs(**dict(arg, **kwargs))
self._skip_invalid = False
| Title |
python | google__pytype | pytype/tests/test_fiddle_overlay.py | {
"start": 13691,
"end": 15211
} | class ____(TestDataclassConfig):
"""Test fiddle.Partial over dataclasses."""
@property
def buildable_type_name(self) -> str:
return "Partial"
def test_nested_partial_assignment(self):
with self.DepTree([("fiddle.pyi", _FIDDLE_PYI)]):
self.Check("""
import dataclasses
import fiddle
from typing import Callable
@dataclasses.dataclass
class DataClass:
x: int
y: str
class RegularClass:
def __init__(self, a, b):
self.a = a
self.b = b
@dataclasses.dataclass
class Parent:
data_factory: Callable[..., DataClass]
regular_factory: Callable[..., RegularClass]
def data_builder(x: int = 1) -> DataClass:
return DataClass(x=x, y='y')
def regular_builder() -> RegularClass:
return RegularClass(1, 2)
c = fiddle.Partial(Parent)
c.child_data = data_builder
c.child_data = fiddle.Partial(DataClass)
c.regular_factory = regular_builder
c.regular_factory = fiddle.Partial(RegularClass)
""")
def test_config_partial_mismatch(self):
with self.DepTree([("fiddle.pyi", _FIDDLE_PYI)]):
self.CheckWithErrors("""
import dataclasses
import fiddle
@dataclasses.dataclass
class DataClass:
x: int
y: str
def f() -> fiddle.Config:
return fiddle.Partial(DataClass) # bad-return-type
""")
| TestDataclassPartial |
python | streamlit__streamlit | lib/streamlit/components/types/base_component_registry.py | {
"start": 833,
"end": 3137
} | class ____(Protocol):
"""Interface for ComponentRegistries."""
@abstractmethod
def register_component(self, component: BaseCustomComponent) -> None:
"""Register a CustomComponent.
Parameters
----------
component : CustomComponent
The component to register.
"""
raise NotImplementedError
@abstractmethod
def get_component_path(self, name: str) -> str | None:
"""Return the filesystem path for the component with the given name.
If no such component is registered, or if the component exists but is
being served from a URL, return None instead.
Parameters
----------
name: name of the component
Returns
-------
str or None
The name of the specified component or None if no component with the given name has been registered.
"""
raise NotImplementedError
@abstractmethod
def get_module_name(self, name: str) -> str | None:
"""Return the module name for the component with the given name.
If no such component is registered, return None instead.
Parameters
----------
name: name of the component
Returns
-------
str or None
The module_name of the specified component or None if no component with the given name has been registered.
"""
raise NotImplementedError
@abstractmethod
def get_component(self, name: str) -> BaseCustomComponent | None:
"""Return the registered component with the given name.
If no such component is registered, return None instead.
Parameters
----------
name: name of the component
Returns
-------
component or None
The component with the provided name or None if component with the given name has been registered.
"""
raise NotImplementedError
@abstractmethod
def get_components(self) -> list[BaseCustomComponent]:
"""Returns a list of custom components that are registered in this registry.
Returns
-------
list[CustomComponents]
A list of registered custom components.
"""
raise NotImplementedError
| BaseComponentRegistry |
python | explosion__spaCy | spacy/lang/la/__init__.py | {
"start": 219,
"end": 405
} | class ____(BaseDefaults):
tokenizer_exceptions = TOKENIZER_EXCEPTIONS
stop_words = STOP_WORDS
lex_attr_getters = LEX_ATTRS
syntax_iterators = SYNTAX_ITERATORS
| LatinDefaults |
python | apache__airflow | providers/teradata/src/airflow/providers/teradata/operators/tpt.py | {
"start": 1368,
"end": 8852
} | class ____(BaseOperator):
"""
Operator to execute one or more DDL (Data Definition Language) statements on a Teradata Database.
This operator is designed to facilitate DDL operations such as creating, altering, or dropping tables, indexes, views, or other database objects in a scalable and efficient manner.
It leverages the TPT (Teradata Parallel Transporter) utility to perform the operations and supports templating for SQL statements, allowing dynamic generation of SQL at runtime.
Key Features:
- Executes one or more DDL statements sequentially on Teradata using TPT
- Supports error handling with customizable error code list
- Supports XCom push to share execution results with downstream tasks
- Integrates with Airflow's templating engine for dynamic SQL generation
- Can execute statements via SSH connection if needed
:param ddl: A list of DDL statements to be executed. Each item should be a valid SQL
DDL command supported by Teradata.
:param error_list: Optional integer or list of error codes to ignore during execution.
If provided, the operator will not fail when these specific error codes occur.
Example: error_list=3803 or error_list=[3803, 3807]
:param teradata_conn_id: The connection ID for the Teradata database.
Defaults to TeradataHook.default_conn_name.
:param ssh_conn_id: Optional SSH connection ID if the commands need to be executed through SSH.
:param remote_working_dir: Directory on the remote server where temporary files will be stored.
:param ddl_job_name: Optional name for the DDL job.
:raises ValueError: If the ddl parameter or error_list is invalid.
:raises RuntimeError: If underlying TPT execution (tbuild) fails with non-zero exit status.
:raises ConnectionError: If remote SSH connection cannot be established.
:raises TimeoutError: If SSH connection attempt times out.
:raises FileNotFoundError: If required TPT utility (tbuild) is missing locally or on remote host.
Example usage::
# Example of creating tables using DdlOperator
create_tables = DdlOperator(
task_id="create_tables_task",
ddl=[
"CREATE TABLE my_database.my_table1 (id INT, name VARCHAR(100))",
"CREATE TABLE my_database.my_table2 (id INT, value FLOAT)",
],
teradata_conn_id="my_teradata_conn",
error_list=[3803], # Ignore "Table already exists" errors
ddl_job_name="create_tables_job",
)
# Example of dropping tables using DdlOperator
drop_tables = DdlOperator(
task_id="drop_tables_task",
ddl=["DROP TABLE my_database.my_table1", "DROP TABLE my_database.my_table2"],
teradata_conn_id="my_teradata_conn",
error_list=3807, # Ignore "Object does not exist" errors
ddl_job_name="drop_tables_job",
)
# Example using templated SQL file
alter_table = DdlOperator(
task_id="alter_table_task",
ddl="{{ var.value.get('ddl_directory') }}/alter_table.sql",
teradata_conn_id="my_teradata_conn",
ssh_conn_id="my_ssh_conn",
ddl_job_name="alter_table_job",
)
"""
template_fields = ("ddl", "ddl_job_name")
template_ext = (".sql",)
ui_color = "#a8e4b1"
def __init__(
self,
*,
ddl: list[str],
error_list: int | list[int] | None = None,
teradata_conn_id: str = TeradataHook.default_conn_name,
ssh_conn_id: str | None = None,
remote_working_dir: str | None = None,
ddl_job_name: str | None = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.ddl = ddl
self.error_list = error_list
self.teradata_conn_id = teradata_conn_id
self.ssh_conn_id = ssh_conn_id
self.remote_working_dir = remote_working_dir
self.ddl_job_name = ddl_job_name
self._hook: TptHook | None = None
self._ssh_hook: SSHHook | None = None
def execute(self, context: Context) -> int | None:
"""Execute the DDL operations using the TptHook."""
# Validate the ddl parameter
if (
not self.ddl
or not isinstance(self.ddl, list)
or not all(isinstance(stmt, str) and stmt.strip() for stmt in self.ddl)
):
raise ValueError(
"ddl parameter must be a non-empty list of non-empty strings representing DDL statements."
)
# Normalize error_list to a list of ints
normalized_error_list = self._normalize_error_list(self.error_list)
self.log.info("Initializing Teradata connection using teradata_conn_id: %s", self.teradata_conn_id)
self._hook = TptHook(teradata_conn_id=self.teradata_conn_id, ssh_conn_id=self.ssh_conn_id)
self._ssh_hook = SSHHook(ssh_conn_id=self.ssh_conn_id) if self.ssh_conn_id else None
try:
# Prepare TPT script for DDL execution
tpt_ddl_script = prepare_tpt_ddl_script(
sql=self.ddl,
error_list=normalized_error_list,
source_conn=self._hook.get_conn(),
job_name=self.ddl_job_name,
)
# Set remote working directory if SSH is used
if self._ssh_hook and not self.remote_working_dir:
self.remote_working_dir = get_remote_temp_directory(
self._ssh_hook.get_conn(), logging.getLogger(__name__)
)
# Ensure remote_working_dir has a value even for local execution
if not self.remote_working_dir:
self.remote_working_dir = "/tmp"
return self._hook.execute_ddl(
tpt_ddl_script,
self.remote_working_dir,
)
except Exception as e:
self.log.error("Failed to execute DDL operations: %s", str(e))
raise
def _normalize_error_list(self, error_list: int | list[int] | None) -> list[int]:
"""
Normalize error_list parameter to a list of integers.
Args:
error_list: An integer, list of integers, or None
Returns:
A list of integers representing error codes to ignore
Raises:
ValueError: If error_list is not of the expected type
"""
if error_list is None:
return []
if isinstance(error_list, int):
return [error_list]
if isinstance(error_list, list) and all(isinstance(err, int) for err in error_list):
return error_list
raise ValueError(
f"error_list must be an int or a list of ints, got {type(error_list).__name__}. "
"Example: error_list=3803 or error_list=[3803, 3807]"
)
def on_kill(self):
"""Handle termination signals and ensure the hook is properly cleaned up."""
self.log.info("Cleaning up TPT DDL connections on task kill")
if self._hook:
try:
self._hook.on_kill()
self.log.info("TPT DDL hook cleaned up successfully")
except Exception as e:
self.log.error("Error cleaning up TPT DDL hook: %s", str(e))
else:
self.log.warning("No TptHook initialized to clean up on task kill")
| DdlOperator |
python | apache__airflow | airflow-core/tests/unit/api_fastapi/core_api/routes/public/test_monitor.py | {
"start": 1434,
"end": 5861
} | class ____(TestMonitorEndpoint):
@provide_session
def test_healthy_scheduler_status(self, test_client, session):
last_scheduler_heartbeat_for_testing_1 = timezone.utcnow()
job = Job(state=State.RUNNING, latest_heartbeat=last_scheduler_heartbeat_for_testing_1)
SchedulerJobRunner(job=job)
session.add(job)
session.commit()
response = test_client.get("/monitor/health")
assert response.status_code == 200
body = response.json()
assert body["metadatabase"]["status"] == "healthy"
assert body["scheduler"]["status"] == "healthy"
assert (
last_scheduler_heartbeat_for_testing_1.isoformat()
== body["scheduler"]["latest_scheduler_heartbeat"]
)
@provide_session
def test_unhealthy_scheduler_is_slow(self, test_client, session):
last_scheduler_heartbeat_for_testing_2 = timezone.utcnow() - timedelta(minutes=1)
job = Job(state=State.RUNNING, latest_heartbeat=last_scheduler_heartbeat_for_testing_2)
SchedulerJobRunner(job=job)
session.add(job)
session.commit()
response = test_client.get("/monitor/health")
assert response.status_code == 200
body = response.json()
assert body["metadatabase"]["status"] == "healthy"
assert body["scheduler"]["status"] == "unhealthy"
assert (
last_scheduler_heartbeat_for_testing_2.isoformat()
== body["scheduler"]["latest_scheduler_heartbeat"]
)
def test_unhealthy_scheduler_no_job(self, test_client):
response = test_client.get("/monitor/health")
assert response.status_code == 200
body = response.json()
assert body["metadatabase"]["status"] == "healthy"
assert body["scheduler"]["status"] == "unhealthy"
assert body["scheduler"]["latest_scheduler_heartbeat"] is None
@mock.patch.object(SchedulerJobRunner, "most_recent_job")
def test_unhealthy_metadatabase_status(self, most_recent_job_mock, test_client):
most_recent_job_mock.side_effect = Exception
response = test_client.get("/monitor/health")
assert response.status_code == 200
body = response.json()
assert body["metadatabase"]["status"] == "unhealthy"
assert body["scheduler"]["latest_scheduler_heartbeat"] is None
@mock.patch("airflow.api_fastapi.core_api.routes.public.monitor.get_airflow_health")
def test_health_with_dag_processor(self, mock_get_airflow_health, test_client):
mock_get_airflow_health.return_value = {
"metadatabase": {"status": HEALTHY},
"scheduler": {
"status": HEALTHY,
"latest_scheduler_heartbeat": "2024-11-23T11:09:16.663124+00:00",
},
"triggerer": {
"status": HEALTHY,
"latest_triggerer_heartbeat": "2024-11-23T11:09:15.815483+00:00",
},
"dag_processor": {
"status": HEALTHY,
"latest_dag_processor_heartbeat": "2024-11-23T11:09:15.815483+00:00",
},
}
response = test_client.get("/monitor/health")
assert response.status_code == 200
body = response.json()
assert "dag_processor" in body
assert body["metadatabase"]["status"] == HEALTHY
assert body["scheduler"]["status"] == HEALTHY
assert body["triggerer"]["status"] == HEALTHY
@mock.patch("airflow.api_fastapi.core_api.routes.public.monitor.get_airflow_health")
def test_health_without_dag_processor(self, mock_get_airflow_health, test_client):
mock_get_airflow_health.return_value = {
"metadatabase": {"status": HEALTHY},
"scheduler": {
"status": HEALTHY,
"latest_scheduler_heartbeat": "2024-11-23T11:09:16.663124+00:00",
},
"triggerer": {
"status": HEALTHY,
"latest_triggerer_heartbeat": "2024-11-23T11:09:15.815483+00:00",
},
}
response = test_client.get("/monitor/health")
assert response.status_code == 200
body = response.json()
assert "dag_processor" not in body
assert body["metadatabase"]["status"] == HEALTHY
assert body["scheduler"]["status"] == HEALTHY
assert body["triggerer"]["status"] == HEALTHY
| TestGetHealth |
python | tensorflow__tensorflow | tensorflow/python/distribute/experimental/rpc/rpc_ops.py | {
"start": 10185,
"end": 12319
} | class ____(Server):
"""GrpcServer object encapsulates a resource with GRPC server.
Functions can be registered locally and are exposed via RPCs.
Example:
```
server = rpc_ops.GrpcServer("host:port")
@tf.function
def add(a, b):
return a + b
server.register("add", add)
server.start()
```
"""
def __init__(self, address: str):
self._server_handle = gen_rpc_ops.rpc_server(address)
if context.executing_eagerly():
self._handle_deleter = resource_variable_ops.EagerResourceDeleter(
handle=self._server_handle, handle_device=self._server_handle.device)
else:
raise NotImplementedError("Please create the server outside tf.function.")
def register(self, method_name: str,
func: Union[def_function.Function,
tf_function.ConcreteFunction]):
"""Method for registering functions."""
if isinstance(func, def_function.Function):
if func.function_spec.arg_names:
if func.input_signature is None:
raise ValueError("Input signature not specified for the function.")
concrete_fn = func.get_concrete_function()
gen_rpc_ops.rpc_server_register(
self._server_handle,
method_name=method_name,
captured_inputs=concrete_fn.captured_inputs,
input_specs=get_input_specs_from_function(concrete_fn),
output_specs=get_output_specs_from_function(concrete_fn),
f=concrete_fn)
elif isinstance(func, tf_function.ConcreteFunction):
gen_rpc_ops.rpc_server_register(
self._server_handle,
method_name=method_name,
captured_inputs=func.captured_inputs,
input_specs=get_input_specs_from_function(func),
output_specs=get_output_specs_from_function(func),
f=func)
else:
# Python functions
# TODO(b/186762191): Add an implementation to support python functions.
raise ValueError("Only TF functions are supported with Register method")
def start(self):
"""Starts GRPC server."""
gen_rpc_ops.rpc_server_start(self._server_handle)
| GrpcServer |
python | huggingface__transformers | src/transformers/models/aria/modular_aria.py | {
"start": 54918,
"end": 55504
} | class ____(AriaTextPreTrainedModel, LlamaForCausalLM):
_tied_weights_keys = {"lm_head.weight": "model.embed_tokens.weight"}
def __init__(self, config: AriaTextConfig):
super().__init__(config)
self.model = AriaTextModel(config)
self.vocab_size = config.vocab_size
self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
# Initialize weights and apply final processing
self.post_init()
@auto_docstring
def forward(self, **super_kwargs):
super().forward(self, **super_kwargs)
| AriaTextForCausalLM |
python | ethereum__web3.py | tests/core/middleware/test_filter_middleware.py | {
"start": 6343,
"end": 10016
} | class ____(AsyncBaseProvider):
async def make_request(self, method, params):
raise NotImplementedError(f"Cannot make request for {method}:{params}")
@pytest_asyncio.fixture(scope="function")
async def async_w3(request_mocker, iter_block_number):
async_w3_base = AsyncWeb3(provider=AsyncDummyProvider(), middleware=[])
async_w3_base.middleware_onion.add(AttributeDictMiddleware)
async_w3_base.middleware_onion.add(LocalFilterMiddleware)
async with request_mocker(
async_w3_base,
mock_results={
"eth_getLogs": lambda *_: FILTER_LOG,
"eth_getBlockByNumber": lambda *_: {"hash": BLOCK_HASH},
"net_version": lambda *_: 1,
"eth_blockNumber": lambda *_: next(iter_block_number),
},
):
yield async_w3_base
@pytest.mark.parametrize(
"from_block,to_block,current_block,expected",
[
(
0,
10,
[10],
[
(0, 10),
],
),
(
0,
55,
[0, 19, 55],
[
(0, 0),
(1, 19),
(20, 55),
],
),
(
0,
None,
[10],
[
(0, 10),
],
),
(
0,
10,
[12],
[
(None, None),
],
),
(
12,
10,
[12],
[
(None, None),
],
),
(
12,
10,
[None],
[
(None, None),
],
),
(
10,
10,
[10, 10],
[
(10, 10),
(None, None),
],
),
],
)
@pytest.mark.asyncio
async def test_async_iter_latest_block_ranges(
async_w3, iter_block_number, from_block, to_block, current_block, expected
):
latest_block_ranges = async_iter_latest_block_ranges(async_w3, from_block, to_block)
for index, block in enumerate(current_block):
iter_block_number.send(block)
expected_tuple = expected[index]
actual_tuple = await latest_block_ranges.__anext__()
assert actual_tuple == expected_tuple
@pytest.mark.asyncio
async def test_async_LocalFilterMiddleware(async_w3, iter_block_number):
block_filter = await async_w3.eth.filter("latest")
await block_filter.get_new_entries()
iter_block_number.send(1)
block_changes = await async_w3.eth.get_filter_changes(block_filter.filter_id)
assert block_changes == [HexBytes(BLOCK_HASH)]
log_filter = await async_w3.eth.filter(filter_params={"fromBlock": "latest"})
iter_block_number.send(2)
log_changes = await async_w3.eth.get_filter_changes(log_filter.filter_id)
assert log_changes == FILTER_LOG
logs = await async_w3.eth.get_filter_logs(log_filter.filter_id)
assert logs == FILTER_LOG
log_filter_from_hex_string = await async_w3.eth.filter(
filter_params={"fromBlock": "0x0", "toBlock": "0x2"}
)
log_filter_from_int = await async_w3.eth.filter(
filter_params={"fromBlock": 1, "toBlock": 3}
)
filter_ids = (
block_filter.filter_id,
log_filter.filter_id,
log_filter_from_hex_string.filter_id,
log_filter_from_int.filter_id,
)
# Test that all ids are str types
assert all(isinstance(_filter_id, (str,)) for _filter_id in filter_ids)
# Test that all ids are unique
assert len(filter_ids) == len(set(filter_ids))
| AsyncDummyProvider |
python | numba__numba | numba/parfors/array_analysis.py | {
"start": 6233,
"end": 11220
} | class ____(object):
"""EquivSet keeps track of equivalence relations between
a set of objects.
"""
def __init__(self, obj_to_ind=None, ind_to_obj=None, next_ind=0):
"""Create a new EquivSet object. Optional keyword arguments are for
internal use only.
"""
# obj_to_ind maps object to equivalence index (sometimes also called
# equivalence class) is a non-negative number that uniquely identifies
# a set of objects that are equivalent.
self.obj_to_ind = obj_to_ind if obj_to_ind else {}
# ind_to_obj maps equivalence index to a list of objects.
self.ind_to_obj = ind_to_obj if ind_to_obj else {}
# next index number that is incremented each time a new equivalence
# relation is created.
self.next_ind = next_ind
def empty(self):
"""Return an empty EquivSet object.
"""
return EquivSet()
def clone(self):
"""Return a new copy.
"""
return EquivSet(
obj_to_ind=copy.deepcopy(self.obj_to_ind),
ind_to_obj=copy.deepcopy(self.ind_to_obj),
next_id=self.next_ind,
)
def __repr__(self):
return "EquivSet({})".format(self.ind_to_obj)
def is_empty(self):
"""Return true if the set is empty, or false otherwise.
"""
return self.obj_to_ind == {}
def _get_ind(self, x):
"""Return the internal index (greater or equal to 0) of the given
object, or -1 if not found.
"""
return self.obj_to_ind.get(x, -1)
def _get_or_add_ind(self, x):
"""Return the internal index (greater or equal to 0) of the given
object, or create a new one if not found.
"""
if x in self.obj_to_ind:
i = self.obj_to_ind[x]
else:
i = self.next_ind
self.next_ind += 1
return i
def _insert(self, objs):
"""Base method that inserts a set of equivalent objects by modifying
self.
"""
assert len(objs) > 1
inds = tuple(self._get_or_add_ind(x) for x in objs)
ind = min(inds)
if config.DEBUG_ARRAY_OPT >= 2:
print("_insert:", objs, inds)
if not (ind in self.ind_to_obj):
self.ind_to_obj[ind] = []
for i, obj in zip(inds, objs):
if i == ind:
if not (obj in self.ind_to_obj[ind]):
self.ind_to_obj[ind].append(obj)
self.obj_to_ind[obj] = ind
else:
if i in self.ind_to_obj:
# those already existing are reassigned
for x in self.ind_to_obj[i]:
self.obj_to_ind[x] = ind
self.ind_to_obj[ind].append(x)
del self.ind_to_obj[i]
else:
# those that are new are assigned.
self.obj_to_ind[obj] = ind
self.ind_to_obj[ind].append(obj)
def is_equiv(self, *objs):
"""Try to derive if given objects are equivalent, return true
if so, or false otherwise.
"""
inds = [self._get_ind(x) for x in objs]
ind = max(inds)
if ind != -1:
return all(i == ind for i in inds)
else:
return all([x == objs[0] for x in objs])
def get_equiv_const(self, obj):
"""Check if obj is equivalent to some int constant, and return
the constant if found, or None otherwise.
"""
ind = self._get_ind(obj)
if ind >= 0:
objs = self.ind_to_obj[ind]
for x in objs:
if isinstance(x, int):
return x
return None
def get_equiv_set(self, obj):
"""Return the set of equivalent objects.
"""
ind = self._get_ind(obj)
if ind >= 0:
return set(self.ind_to_obj[ind])
return set()
def insert_equiv(self, *objs):
"""Insert a set of equivalent objects by modifying self. This
method can be overloaded to transform object type before insertion.
"""
return self._insert(objs)
def intersect(self, equiv_set):
""" Return the intersection of self and the given equiv_set,
without modifying either of them. The result will also keep
old equivalence indices unchanged.
"""
new_set = self.empty()
new_set.next_ind = self.next_ind
for objs in equiv_set.ind_to_obj.values():
inds = tuple(self._get_ind(x) for x in objs)
ind_to_obj = {}
for i, x in zip(inds, objs):
if i in ind_to_obj:
ind_to_obj[i].append(x)
elif i >= 0:
ind_to_obj[i] = [x]
for v in ind_to_obj.values():
if len(v) > 1:
new_set._insert(v)
return new_set
| EquivSet |
python | coleifer__peewee | peewee.py | {
"start": 260098,
"end": 260307
} | class ____(ModelSelect):
def __sql__(self, ctx):
return self.model._meta.database.get_noop_select(ctx)
def _get_cursor_wrapper(self, cursor):
return CursorWrapper(cursor)
| NoopModelSelect |
python | python-jsonschema__jsonschema | jsonschema/_utils.py | {
"start": 923,
"end": 10659
} | class ____:
"""
An as-of-yet unset attribute or unprovided default parameter.
"""
def __repr__(self): # pragma: no cover
return "<unset>"
def format_as_index(container, indices):
"""
Construct a single string containing indexing operations for the indices.
For example for a container ``bar``, [1, 2, "foo"] -> bar[1][2]["foo"]
Arguments:
container (str):
A word to use for the thing being indexed
indices (sequence):
The indices to format.
"""
if not indices:
return container
return f"{container}[{']['.join(repr(index) for index in indices)}]"
def find_additional_properties(instance, schema):
"""
Return the set of additional properties for the given ``instance``.
Weeds out properties that should have been validated by ``properties`` and
/ or ``patternProperties``.
Assumes ``instance`` is dict-like already.
"""
properties = schema.get("properties", {})
patterns = "|".join(schema.get("patternProperties", {}))
for property in instance:
if property not in properties:
if patterns and re.search(patterns, property):
continue
yield property
def extras_msg(extras):
"""
Create an error message for extra items or properties.
"""
verb = "was" if len(extras) == 1 else "were"
return ", ".join(repr(extra) for extra in extras), verb
def ensure_list(thing):
"""
Wrap ``thing`` in a list if it's a single str.
Otherwise, return it unchanged.
"""
if isinstance(thing, str):
return [thing]
return thing
def _mapping_equal(one, two):
"""
Check if two mappings are equal using the semantics of `equal`.
"""
if len(one) != len(two):
return False
return all(
key in two and equal(value, two[key])
for key, value in one.items()
)
def _sequence_equal(one, two):
"""
Check if two sequences are equal using the semantics of `equal`.
"""
if len(one) != len(two):
return False
return all(equal(i, j) for i, j in zip(one, two))
def equal(one, two):
"""
Check if two things are equal evading some Python type hierarchy semantics.
Specifically in JSON Schema, evade `bool` inheriting from `int`,
recursing into sequences to do the same.
"""
if one is two:
return True
if isinstance(one, str) or isinstance(two, str):
return one == two
if isinstance(one, Sequence) and isinstance(two, Sequence):
return _sequence_equal(one, two)
if isinstance(one, Mapping) and isinstance(two, Mapping):
return _mapping_equal(one, two)
return unbool(one) == unbool(two)
def unbool(element, true=object(), false=object()):
"""
A hack to make True and 1 and False and 0 unique for ``uniq``.
"""
if element is True:
return true
elif element is False:
return false
return element
def uniq(container):
"""
Check if all of a container's elements are unique.
Tries to rely on the container being recursively sortable, or otherwise
falls back on (slow) brute force.
"""
try:
sort = sorted(unbool(i) for i in container)
sliced = itertools.islice(sort, 1, None)
for i, j in zip(sort, sliced):
if equal(i, j):
return False
except (NotImplementedError, TypeError):
seen = []
for e in container:
e = unbool(e)
for i in seen:
if equal(i, e):
return False
seen.append(e)
return True
def find_evaluated_item_indexes_by_schema(validator, instance, schema):
"""
Get all indexes of items that get evaluated under the current schema.
Covers all keywords related to unevaluatedItems: items, prefixItems, if,
then, else, contains, unevaluatedItems, allOf, oneOf, anyOf
"""
if validator.is_type(schema, "boolean"):
return []
evaluated_indexes = []
if "items" in schema:
return list(range(len(instance)))
ref = schema.get("$ref")
if ref is not None:
resolved = validator._resolver.lookup(ref)
evaluated_indexes.extend(
find_evaluated_item_indexes_by_schema(
validator.evolve(
schema=resolved.contents,
_resolver=resolved.resolver,
),
instance,
resolved.contents,
),
)
dynamicRef = schema.get("$dynamicRef")
if dynamicRef is not None:
resolved = validator._resolver.lookup(dynamicRef)
evaluated_indexes.extend(
find_evaluated_item_indexes_by_schema(
validator.evolve(
schema=resolved.contents,
_resolver=resolved.resolver,
),
instance,
resolved.contents,
),
)
if "prefixItems" in schema:
evaluated_indexes += list(range(len(schema["prefixItems"])))
if "if" in schema:
if validator.evolve(schema=schema["if"]).is_valid(instance):
evaluated_indexes += find_evaluated_item_indexes_by_schema(
validator, instance, schema["if"],
)
if "then" in schema:
evaluated_indexes += find_evaluated_item_indexes_by_schema(
validator, instance, schema["then"],
)
elif "else" in schema:
evaluated_indexes += find_evaluated_item_indexes_by_schema(
validator, instance, schema["else"],
)
for keyword in ["contains", "unevaluatedItems"]:
if keyword in schema:
for k, v in enumerate(instance):
if validator.evolve(schema=schema[keyword]).is_valid(v):
evaluated_indexes.append(k)
for keyword in ["allOf", "oneOf", "anyOf"]:
if keyword in schema:
for subschema in schema[keyword]:
errs = next(validator.descend(instance, subschema), None)
if errs is None:
evaluated_indexes += find_evaluated_item_indexes_by_schema(
validator, instance, subschema,
)
return evaluated_indexes
def find_evaluated_property_keys_by_schema(validator, instance, schema):
"""
Get all keys of items that get evaluated under the current schema.
Covers all keywords related to unevaluatedProperties: properties,
additionalProperties, unevaluatedProperties, patternProperties,
dependentSchemas, allOf, oneOf, anyOf, if, then, else
"""
if validator.is_type(schema, "boolean"):
return []
evaluated_keys = []
ref = schema.get("$ref")
if ref is not None:
resolved = validator._resolver.lookup(ref)
evaluated_keys.extend(
find_evaluated_property_keys_by_schema(
validator.evolve(
schema=resolved.contents,
_resolver=resolved.resolver,
),
instance,
resolved.contents,
),
)
dynamicRef = schema.get("$dynamicRef")
if dynamicRef is not None:
resolved = validator._resolver.lookup(dynamicRef)
evaluated_keys.extend(
find_evaluated_property_keys_by_schema(
validator.evolve(
schema=resolved.contents,
_resolver=resolved.resolver,
),
instance,
resolved.contents,
),
)
properties = schema.get("properties")
if validator.is_type(properties, "object"):
evaluated_keys += properties.keys() & instance.keys()
for keyword in ["additionalProperties", "unevaluatedProperties"]:
if (subschema := schema.get(keyword)) is None:
continue
evaluated_keys += (
key
for key, value in instance.items()
if is_valid(validator.descend(value, subschema))
)
if "patternProperties" in schema:
for property in instance:
for pattern in schema["patternProperties"]:
if re.search(pattern, property):
evaluated_keys.append(property)
if "dependentSchemas" in schema:
for property, subschema in schema["dependentSchemas"].items():
if property not in instance:
continue
evaluated_keys += find_evaluated_property_keys_by_schema(
validator, instance, subschema,
)
for keyword in ["allOf", "oneOf", "anyOf"]:
for subschema in schema.get(keyword, []):
if not is_valid(validator.descend(instance, subschema)):
continue
evaluated_keys += find_evaluated_property_keys_by_schema(
validator, instance, subschema,
)
if "if" in schema:
if validator.evolve(schema=schema["if"]).is_valid(instance):
evaluated_keys += find_evaluated_property_keys_by_schema(
validator, instance, schema["if"],
)
if "then" in schema:
evaluated_keys += find_evaluated_property_keys_by_schema(
validator, instance, schema["then"],
)
elif "else" in schema:
evaluated_keys += find_evaluated_property_keys_by_schema(
validator, instance, schema["else"],
)
return evaluated_keys
def is_valid(errs_it):
"""Whether there are no errors in the given iterator."""
return next(errs_it, None) is None
| Unset |
python | apache__airflow | dev/breeze/src/airflow_breeze/commands/ci_commands.py | {
"start": 8897,
"end": 28837
} | class ____(NamedTuple):
event_name: str
pull_request_labels: list[str]
target_repo: str
head_repo: str
ref: str | None
ref_name: str | None
pr_number: int | None
head_ref: str | None = None
def get_all_ga_outputs(self) -> Iterable[str]:
from airflow_breeze.utils.github import get_ga_output
yield get_ga_output(name="pr_labels", value=str(self.pull_request_labels))
yield get_ga_output(name="target_repo", value=self.target_repo)
yield get_ga_output(name="head_repo", value=self.head_repo)
yield get_ga_output(name="pr_number", value=str(self.pr_number) if self.pr_number else "")
yield get_ga_output(name="event_name", value=str(self.event_name))
yield get_ga_output(name="runs-on", value=self.get_runs_on())
yield get_ga_output(name="canary-run", value=self.is_canary_run())
yield get_ga_output(name="run-coverage", value=self.run_coverage())
yield get_ga_output(name="head-ref", value=self.head_ref)
def print_all_ga_outputs(self):
for output in self.get_all_ga_outputs():
print(output, file=sys.stderr)
def get_runs_on(self) -> str:
for label in self.pull_request_labels:
if "use public runners" in label:
get_console().print("[info]Force running on public runners")
return PUBLIC_AMD_RUNNERS
return PUBLIC_AMD_RUNNERS
def is_canary_run(self) -> str:
if (
self.event_name
in [
GithubEvents.PUSH.value,
GithubEvents.WORKFLOW_DISPATCH.value,
GithubEvents.SCHEDULE.value,
]
and self.head_repo == "apache/airflow"
and self.ref_name
and (self.ref_name == "main" or TEST_BRANCH_MATCHER.match(self.ref_name))
):
return "true"
if "canary" in self.pull_request_labels:
return "true"
return "false"
def run_coverage(self) -> str:
if (
self.event_name == GithubEvents.PUSH.value
and self.head_repo == "apache/airflow"
and self.ref == "refs/heads/main"
):
return "true"
return "false"
def workflow_info(context: str) -> WorkflowInfo:
ctx: dict[Any, Any] = json.loads(context)
event_name = ctx.get("event_name")
if not event_name:
get_console().print(f"[error]Missing event_name in: {ctx}")
sys.exit(1)
pull_request_labels = []
head_repo = ""
target_repo = ""
pr_number: int | None = None
ref_name = ctx.get("ref_name")
ref = ctx.get("ref")
head_ref = ctx.get("head_ref")
if event_name == GithubEvents.PULL_REQUEST.value:
event = ctx.get("event")
if event:
pr = event.get(GithubEvents.PULL_REQUEST.value)
if pr:
labels = pr.get("labels")
if labels:
for label in labels:
pull_request_labels.append(label["name"])
target_repo = pr["base"]["repo"]["full_name"]
head_repo = pr["head"]["repo"]["full_name"]
pr_number = pr["number"]
elif event_name == GithubEvents.PUSH.value:
target_repo = ctx["repository"]
head_repo = ctx["repository"]
event_name = ctx["event_name"]
elif event_name == GithubEvents.SCHEDULE.value:
target_repo = ctx["repository"]
head_repo = ctx["repository"]
event_name = ctx["event_name"]
elif event_name == GithubEvents.WORKFLOW_DISPATCH.value:
target_repo = ctx["repository"]
head_repo = ctx["repository"]
event_name = ctx["event_name"]
elif event_name == GithubEvents.PULL_REQUEST_TARGET.value:
target_repo = ctx["repository"]
head_repo = ctx["repository"]
event_name = ctx["event_name"]
else:
get_console().print(f"[error]Wrong event name: {event_name}")
sys.exit(1)
return WorkflowInfo(
event_name=event_name,
pull_request_labels=pull_request_labels,
target_repo=target_repo,
head_repo=head_repo,
pr_number=pr_number,
ref=ref,
ref_name=ref_name,
head_ref=head_ref,
)
@ci_group.command(
name="get-workflow-info",
help="Retrieve information about current workflow in the CI"
"and produce github actions output extracted from it.",
)
@click.option("--github-context", help="JSON-formatted github context", envvar="GITHUB_CONTEXT")
@click.option(
"--github-context-input",
help="file input (might be `-`) with JSON-formatted github context",
type=click.File("rt"),
envvar="GITHUB_CONTEXT_INPUT",
)
def get_workflow_info(github_context: str, github_context_input: StringIO):
if github_context and github_context_input:
get_console().print(
"[error]You can only specify one of the two --github-context or --github-context-file"
)
sys.exit(1)
if github_context:
context = github_context
elif github_context_input:
context = github_context_input.read()
else:
get_console().print(
"[error]You must specify one of the two --github-context or --github-context-file"
)
sys.exit(1)
wi = workflow_info(context=context)
wi.print_all_ga_outputs()
@ci_group.command(
name="upgrade",
help="Perform important upgrade steps of the CI environment. And create a PR",
)
@click.option(
"--target-branch",
default=AIRFLOW_BRANCH,
help="Branch to work on and make PR against (e.g., 'main' or 'vX-Y-test')",
show_default=True,
)
@click.option(
"--create-pr/--no-create-pr",
default=None,
help="Automatically create a PR with the upgrade changes (if not specified, will ask)",
is_flag=True,
)
@click.option(
"--switch-to-base/--no-switch-to-base",
default=None,
help="Automatically switch to the base branch if not already on it (if not specified, will ask)",
is_flag=True,
)
@option_answer
@option_verbose
@option_dry_run
def upgrade(target_branch: str, create_pr: bool | None, switch_to_base: bool | None):
# Validate target_branch pattern
target_branch_pattern = re.compile(r"^(main|v\d+-\d+-test)$")
if not target_branch_pattern.match(target_branch):
get_console().print(
f"[error]Invalid target branch: '{target_branch}'. "
"Must be 'main' or follow pattern 'vX-Y-test' where X and Y are numbers (e.g., 'v2-10-test').[/]"
)
sys.exit(1)
# Check if we're on the main branch
branch_result = run_command(
["git", "rev-parse", "--abbrev-ref", "HEAD"], capture_output=True, text=True, check=False
)
current_branch = branch_result.stdout.strip() if branch_result.returncode == 0 else ""
# Store the original branch/commit to restore later if needed
original_branch = current_branch
original_commit_result = run_command(
["git", "rev-parse", "HEAD"], capture_output=True, text=True, check=False
)
original_commit = original_commit_result.stdout.strip() if original_commit_result.returncode == 0 else ""
# Check if the working directory is clean
status_result = run_command(["git", "status", "--porcelain"], capture_output=True, text=True, check=False)
is_clean = status_result.returncode == 0 and not status_result.stdout.strip()
# Check if we have the apache remote and get its name
remote_result = run_command(["git", "remote", "-v"], capture_output=True, text=True, check=False)
apache_remote_name = None
origin_remote_name = None
origin_repo = None # Store the user's fork repo (e.g., "username/airflow")
if remote_result.returncode == 0:
# Parse remote output to find apache/airflow remote and origin remote
# Format: remote_name\turl (fetch|push)
for line in remote_result.stdout.splitlines():
parts = line.split()
if len(parts) >= 2:
remote_name = parts[0]
remote_url = parts[1]
if "apache/airflow" in remote_url and apache_remote_name is None:
apache_remote_name = remote_name
# Also track origin remote for pushing
if remote_name == "origin" and origin_remote_name is None:
origin_remote_name = remote_name
# Extract repo from origin URL (supports both HTTPS and SSH formats)
# HTTPS: https://github.com/username/airflow.git
# SSH: git@github.com:username/airflow.git
if "github.com" in remote_url:
if "git@github.com:" in remote_url:
# SSH format
repo_part = remote_url.split("git@github.com:")[1]
elif "github.com/" in remote_url:
# HTTPS format
repo_part = remote_url.split("github.com/")[1]
else:
repo_part = None
if repo_part:
# Remove .git suffix if present
origin_repo = repo_part.replace(".git", "").strip()
has_apache_remote = apache_remote_name is not None
# Check if we're up to date with apache/airflow on the specified branch
if has_apache_remote:
# Fetch apache remote to get latest info
run_command(["git", "fetch", apache_remote_name], check=False)
# Check if the target branch exists in the apache remote
branch_exists = run_command(
["git", "rev-parse", "--verify", f"{apache_remote_name}/{target_branch}"],
capture_output=True,
check=False,
)
if branch_exists.returncode != 0:
get_console().print(
f"[error]Target branch '{target_branch}' does not exist in remote '{apache_remote_name}'.[/]"
)
sys.exit(1)
# Check if current HEAD matches apache_remote/<branch>
local_head = run_command(["git", "rev-parse", "HEAD"], capture_output=True, text=True, check=False)
remote_head = run_command(
["git", "rev-parse", f"{apache_remote_name}/{target_branch}"],
capture_output=True,
text=True,
check=False,
)
at_apache_branch = (
current_branch == target_branch
and local_head.returncode == 0
and remote_head.returncode == 0
and local_head.stdout.strip() == remote_head.stdout.strip()
)
else:
at_apache_branch = False
get_console().print(
"[warning]No apache remote found. The command expects remote pointing to apache/airflow[/]"
)
# Track whether user chose to reset to target branch
user_switched_to_target = False
if not at_apache_branch or not is_clean:
get_console().print()
if not at_apache_branch:
get_console().print(
f"[warning]You are not at the top of apache/airflow {target_branch} branch.[/]"
)
get_console().print(f"[info]Current branch: {current_branch}[/]")
if not is_clean:
get_console().print("[warning]Your repository has uncommitted changes.[/]")
get_console().print()
# Determine whether to switch to base branch
should_switch = switch_to_base
if should_switch is None:
# Not specified, ask the user
get_console().print(
f"[warning]Attempting to switch to switch to {target_branch}. "
f"This will lose not committed code.[/]\n\n"
"NO will continue to get changes on top of current branch, QUIT will exit."
)
response = user_confirm("Do you want to switch")
if response == Answer.YES:
should_switch = True
elif response == Answer.QUIT:
get_console().print(
f"[error]Upgrade cancelled. Please ensure you are on apache/airflow {target_branch} with a clean repository.[/]"
)
sys.exit(1)
else:
should_switch = False
if should_switch:
user_switched_to_target = True
get_console().print(f"[info]Resetting to apache/airflow {target_branch}...[/]")
if current_branch != target_branch:
run_command(["git", "checkout", target_branch])
run_command(["git", "fetch", apache_remote_name])
run_command(["git", "reset", "--hard", f"{apache_remote_name}/{target_branch}"])
run_command(["git", "clean", "-fdx"])
get_console().print(
f"[success]Successfully reset to apache/airflow {target_branch} and cleaned repository.[/]"
)
else:
get_console().print(
f"[info]Continuing with current branch {current_branch}. Changes will be on top of it.[/]"
)
get_console().print("[info]Running upgrade of important CI environment.[/]")
# Get GitHub token from gh CLI and set it in environment copy
gh_token_result = run_command(
["gh", "auth", "token"],
capture_output=True,
text=True,
check=False,
)
# Create a copy of the environment to pass to commands
command_env = os.environ.copy()
if gh_token_result.returncode == 0 and gh_token_result.stdout.strip():
github_token = gh_token_result.stdout.strip()
command_env["GITHUB_TOKEN"] = github_token
get_console().print("[success]GitHub token retrieved from gh CLI and set in environment.[/]")
else:
get_console().print(
"[warning]Could not retrieve GitHub token from gh CLI. "
"Commands may fail if they require authentication.[/]"
)
# Define all upgrade commands to run (all run with check=False to continue on errors)
upgrade_commands = [
"prek autoupdate --freeze",
"prek autoupdate --bleeding-edge --freeze --repo https://github.com/Lucas-C/pre-commit-hooks",
"prek autoupdate --bleeding-edge --freeze --repo https://github.com/eclipse-csi/octopin",
"prek --all-files --verbose --hook-stage manual pin-versions",
"prek --all-files --show-diff-on-failure --color always --verbose --hook-stage manual update-chart-dependencies",
"prek --all-files --show-diff-on-failure --color always --verbose --hook-stage manual upgrade-important-versions",
]
# Execute all upgrade commands with the environment containing GitHub token
for command in upgrade_commands:
run_command(command.split(), check=False, env=command_env)
res = run_command(["git", "diff", "--exit-code"], check=False)
if res.returncode == 0:
get_console().print("[success]No changes were made during the upgrade. Exiting[/]")
sys.exit(0)
# Determine whether to create a PR
should_create_pr = create_pr
if should_create_pr is None:
# Not specified, ask the user
should_create_pr = user_confirm("Do you want to create a PR with the upgrade changes?") == Answer.YES
if should_create_pr:
# Get current HEAD commit hash for unique branch name
head_result = run_command(
["git", "rev-parse", "--short", "HEAD"], capture_output=True, text=True, check=False
)
commit_hash = head_result.stdout.strip() if head_result.returncode == 0 else "unknown"
branch_name = f"ci-upgrade-{commit_hash}"
# Check if branch already exists and delete it
branch_check = run_command(
["git", "rev-parse", "--verify", branch_name], capture_output=True, check=False
)
if branch_check.returncode == 0:
get_console().print(f"[info]Branch {branch_name} already exists, deleting it...[/]")
run_command(["git", "branch", "-D", branch_name])
run_command(["git", "checkout", "-b", branch_name])
run_command(["git", "add", "."])
run_command(["git", "commit", "-m", "CI: Upgrade important CI environment"])
# Push the branch to origin (use detected origin or fallback to 'origin')
push_remote = origin_remote_name if origin_remote_name else "origin"
get_console().print(f"[info]Pushing branch {branch_name} to {push_remote}...[/]")
push_result = run_command(
["git", "push", "-u", push_remote, branch_name, "--force"],
capture_output=True,
text=True,
check=False,
)
if push_result.returncode != 0:
get_console().print(
f"[error]Failed to push branch:\n{push_result.stdout}\n{push_result.stderr}[/]"
)
sys.exit(1)
get_console().print(f"[success]Branch {branch_name} pushed to {push_remote}.[/]")
# Create PR from the pushed branch
# gh pr create needs --head in format "username:branch" when creating a PR from a fork
# Extract username from origin_repo (e.g., "username/airflow" -> "username")
if origin_repo:
owner = origin_repo.split("/")[0]
head_ref = f"{owner}:{branch_name}"
get_console().print(
f"[info]Creating PR from {origin_repo} branch {branch_name} to apache/airflow {target_branch}...[/]"
)
else:
# Fallback to just branch name if we couldn't determine the fork
head_ref = branch_name
get_console().print("[warning]Could not determine fork repository. Using branch name only.[/]")
pr_result = run_command(
[
"gh",
"pr",
"create",
"-w",
"--repo",
"apache/airflow",
"--head",
head_ref,
"--base",
target_branch,
"--title",
f"[{target_branch}] Upgrade important CI environment",
"--body",
"This PR upgrades important dependencies of the CI environment.",
],
capture_output=True,
text=True,
check=False,
env=command_env,
)
if pr_result.returncode != 0:
get_console().print(f"[error]Failed to create PR:\n{pr_result.stdout}\n{pr_result.stderr}[/]")
sys.exit(1)
pr_url = pr_result.stdout.strip() if pr_result.returncode == 0 else ""
get_console().print(f"[success]PR created successfully: {pr_url}.[/]")
# Switch back to appropriate branch and delete the temporary branch
get_console().print(f"[info]Cleaning up temporary branch {branch_name}...[/]")
if user_switched_to_target:
# User explicitly chose to switch to target branch, so stay there
run_command(["git", "checkout", target_branch])
else:
# User didn't switch initially, restore to original branch/commit
if original_branch == "HEAD":
# Detached HEAD state, restore to original commit
get_console().print(f"[info]Restoring to original commit {original_commit[:8]}...[/]")
run_command(["git", "checkout", original_commit])
else:
# Named branch, restore to it
get_console().print(f"[info]Restoring to original branch {original_branch}...[/]")
run_command(["git", "checkout", original_branch])
# Delete local branch
run_command(["git", "branch", "-D", branch_name])
get_console().print(f"[success]Local branch {branch_name} deleted.[/]")
else:
get_console().print("[info]PR creation skipped. Changes are committed locally.[/]")
| WorkflowInfo |
python | sympy__sympy | sympy/functions/combinatorial/numbers.py | {
"start": 21905,
"end": 26707
} | class ____(DefinedFunction):
r"""
Bell numbers / Bell polynomials
The Bell numbers satisfy `B_0 = 1` and
.. math:: B_n = \sum_{k=0}^{n-1} \binom{n-1}{k} B_k.
They are also given by:
.. math:: B_n = \frac{1}{e} \sum_{k=0}^{\infty} \frac{k^n}{k!}.
The Bell polynomials are given by `B_0(x) = 1` and
.. math:: B_n(x) = x \sum_{k=1}^{n-1} \binom{n-1}{k-1} B_{k-1}(x).
The second kind of Bell polynomials (are sometimes called "partial" Bell
polynomials or incomplete Bell polynomials) are defined as
.. math:: B_{n,k}(x_1, x_2,\dotsc x_{n-k+1}) =
\sum_{j_1+j_2+j_2+\dotsb=k \atop j_1+2j_2+3j_2+\dotsb=n}
\frac{n!}{j_1!j_2!\dotsb j_{n-k+1}!}
\left(\frac{x_1}{1!} \right)^{j_1}
\left(\frac{x_2}{2!} \right)^{j_2} \dotsb
\left(\frac{x_{n-k+1}}{(n-k+1)!} \right) ^{j_{n-k+1}}.
* ``bell(n)`` gives the `n^{th}` Bell number, `B_n`.
* ``bell(n, x)`` gives the `n^{th}` Bell polynomial, `B_n(x)`.
* ``bell(n, k, (x1, x2, ...))`` gives Bell polynomials of the second kind,
`B_{n,k}(x_1, x_2, \dotsc, x_{n-k+1})`.
Notes
=====
Not to be confused with Bernoulli numbers and Bernoulli polynomials,
which use the same notation.
Examples
========
>>> from sympy import bell, Symbol, symbols
>>> [bell(n) for n in range(11)]
[1, 1, 2, 5, 15, 52, 203, 877, 4140, 21147, 115975]
>>> bell(30)
846749014511809332450147
>>> bell(4, Symbol('t'))
t**4 + 6*t**3 + 7*t**2 + t
>>> bell(6, 2, symbols('x:6')[1:])
6*x1*x5 + 15*x2*x4 + 10*x3**2
See Also
========
bernoulli, catalan, euler, fibonacci, harmonic, lucas, genocchi, partition, tribonacci
References
==========
.. [1] https://en.wikipedia.org/wiki/Bell_number
.. [2] https://mathworld.wolfram.com/BellNumber.html
.. [3] https://mathworld.wolfram.com/BellPolynomial.html
"""
@staticmethod
@recurrence_memo([1, 1])
def _bell(n, prev):
s = 1
a = 1
for k in range(1, n):
a = a * (n - k) // k
s += a * prev[k]
return s
@staticmethod
@recurrence_memo([S.One, _sym])
def _bell_poly(n, prev):
s = 1
a = 1
for k in range(2, n + 1):
a = a * (n - k + 1) // (k - 1)
s += a * prev[k - 1]
return expand_mul(_sym * s)
@staticmethod
def _bell_incomplete_poly(n, k, symbols):
r"""
The second kind of Bell polynomials (incomplete Bell polynomials).
Calculated by recurrence formula:
.. math:: B_{n,k}(x_1, x_2, \dotsc, x_{n-k+1}) =
\sum_{m=1}^{n-k+1}
\x_m \binom{n-1}{m-1} B_{n-m,k-1}(x_1, x_2, \dotsc, x_{n-m-k})
where
`B_{0,0} = 1;`
`B_{n,0} = 0; for n \ge 1`
`B_{0,k} = 0; for k \ge 1`
"""
if (n == 0) and (k == 0):
return S.One
elif (n == 0) or (k == 0):
return S.Zero
s = S.Zero
a = S.One
for m in range(1, n - k + 2):
s += a * bell._bell_incomplete_poly(
n - m, k - 1, symbols) * symbols[m - 1]
a = a * (n - m) / m
return expand_mul(s)
@classmethod
def eval(cls, n, k_sym=None, symbols=None):
if n is S.Infinity:
if k_sym is None:
return S.Infinity
else:
raise ValueError("Bell polynomial is not defined")
if n.is_negative or n.is_integer is False:
raise ValueError("a non-negative integer expected")
if n.is_Integer and n.is_nonnegative:
if k_sym is None:
return Integer(cls._bell(int(n)))
elif symbols is None:
return cls._bell_poly(int(n)).subs(_sym, k_sym)
else:
r = cls._bell_incomplete_poly(int(n), int(k_sym), symbols)
return r
def _eval_rewrite_as_Sum(self, n, k_sym=None, symbols=None, **kwargs):
from sympy.concrete.summations import Sum
if (k_sym is not None) or (symbols is not None):
return self
# Dobinski's formula
if not n.is_nonnegative:
return self
k = Dummy('k', integer=True, nonnegative=True)
return 1 / E * Sum(k**n / factorial(k), (k, 0, S.Infinity))
#----------------------------------------------------------------------------#
# #
# Harmonic numbers #
# #
#----------------------------------------------------------------------------#
| bell |
python | allegroai__clearml | clearml/automation/job.py | {
"start": 32560,
"end": 32900
} | class ____(ClearmlJob):
"""
Deprecated, use ClearmlJob
"""
def __init__(self, **kwargs: Any) -> None:
super(TrainsJob, self).__init__(**kwargs)
warnings.warn(
"Use clearml.automation.ClearmlJob",
DeprecationWarning,
)
# noinspection PyMethodMayBeStatic, PyUnusedLocal
| TrainsJob |
python | kamyu104__LeetCode-Solutions | Python/find-the-string-with-lcp.py | {
"start": 69,
"end": 856
} | class ____(object):
def findTheString(self, lcp):
"""
:type lcp: List[List[int]]
:rtype: str
"""
result = [-1]*len(lcp)
curr = 0
for i in xrange(len(lcp)):
if result[i] != -1:
continue
if curr == 26:
return ""
for j in xrange(i, len(lcp[0])):
if lcp[i][j]:
result[j] = curr
curr += 1
for i in reversed(xrange(len(lcp))):
for j in reversed(xrange(len(lcp[0]))):
if lcp[i][j] != ((lcp[i+1][j+1]+1 if i+1 < len(lcp) and j+1 < len(lcp[0]) else 1) if result[i] == result[j] else 0):
return ''
return "".join(map(lambda x: chr(ord('a')+x), result))
| Solution |
python | sqlalchemy__sqlalchemy | lib/sqlalchemy/sql/sqltypes.py | {
"start": 15478,
"end": 20991
} | class ____(NumericCommon[_N], TypeEngine[_N]):
"""Base for non-integer numeric types, such as
``NUMERIC``, ``FLOAT``, ``DECIMAL``, and other variants.
The :class:`.Numeric` datatype when used directly will render DDL
corresponding to precision numerics if available, such as
``NUMERIC(precision, scale)``. The :class:`.Float` subclass will
attempt to render a floating-point datatype such as ``FLOAT(precision)``.
:class:`.Numeric` returns Python ``decimal.Decimal`` objects by default,
based on the default value of ``True`` for the
:paramref:`.Numeric.asdecimal` parameter. If this parameter is set to
False, returned values are coerced to Python ``float`` objects.
The :class:`.Float` subtype, being more specific to floating point,
defaults the :paramref:`.Float.asdecimal` flag to False so that the
default Python datatype is ``float``.
.. note::
When using a :class:`.Numeric` datatype against a database type that
returns Python floating point values to the driver, the accuracy of the
decimal conversion indicated by :paramref:`.Numeric.asdecimal` may be
limited. The behavior of specific numeric/floating point datatypes
is a product of the SQL datatype in use, the Python :term:`DBAPI`
in use, as well as strategies that may be present within
the SQLAlchemy dialect in use. Users requiring specific precision/
scale are encouraged to experiment with the available datatypes
in order to determine the best results.
"""
__visit_name__ = "numeric"
@overload
def __init__(
self: Numeric[decimal.Decimal],
precision: Optional[int] = ...,
scale: Optional[int] = ...,
decimal_return_scale: Optional[int] = ...,
asdecimal: Literal[True] = ...,
): ...
@overload
def __init__(
self: Numeric[float],
precision: Optional[int] = ...,
scale: Optional[int] = ...,
decimal_return_scale: Optional[int] = ...,
asdecimal: Literal[False] = ...,
): ...
def __init__(
self,
precision: Optional[int] = None,
scale: Optional[int] = None,
decimal_return_scale: Optional[int] = None,
asdecimal: bool = True,
):
"""
Construct a Numeric.
:param precision: the numeric precision for use in DDL ``CREATE
TABLE``.
:param scale: the numeric scale for use in DDL ``CREATE TABLE``.
:param asdecimal: default True. Return whether or not
values should be sent as Python Decimal objects, or
as floats. Different DBAPIs send one or the other based on
datatypes - the Numeric type will ensure that return values
are one or the other across DBAPIs consistently.
:param decimal_return_scale: Default scale to use when converting
from floats to Python decimals. Floating point values will typically
be much longer due to decimal inaccuracy, and most floating point
database types don't have a notion of "scale", so by default the
float type looks for the first ten decimal places when converting.
Specifying this value will override that length. Types which
do include an explicit ".scale" value, such as the base
:class:`.Numeric` as well as the MySQL float types, will use the
value of ".scale" as the default for decimal_return_scale, if not
otherwise specified.
When using the ``Numeric`` type, care should be taken to ensure
that the asdecimal setting is appropriate for the DBAPI in use -
when Numeric applies a conversion from Decimal->float or float->
Decimal, this conversion incurs an additional performance overhead
for all result columns received.
DBAPIs that return Decimal natively (e.g. psycopg2) will have
better accuracy and higher performance with a setting of ``True``,
as the native translation to Decimal reduces the amount of floating-
point issues at play, and the Numeric type itself doesn't need
to apply any further conversions. However, another DBAPI which
returns floats natively *will* incur an additional conversion
overhead, and is still subject to floating point data loss - in
which case ``asdecimal=False`` will at least remove the extra
conversion overhead.
"""
super().__init__(
precision=precision,
scale=scale,
decimal_return_scale=decimal_return_scale,
asdecimal=asdecimal,
)
@property
def _type_affinity(self):
return Numeric
def result_processor(self, dialect, coltype):
if self.asdecimal:
if dialect.supports_native_decimal:
# we're a "numeric", DBAPI will give us Decimal directly
return None
else:
# we're a "numeric", DBAPI returns floats, convert.
return processors.to_decimal_processor_factory(
decimal.Decimal,
(
self.scale
if self.scale is not None
else self._default_decimal_return_scale
),
)
else:
if dialect.supports_native_decimal:
return processors.to_float
else:
return None
| Numeric |
python | encode__django-rest-framework | rest_framework/fields.py | {
"start": 7671,
"end": 8461
} | class ____:
"""
This class may be used to provide default values that are only used
for create operations, but that do not return any value for update
operations.
"""
requires_context = True
def __init__(self, default):
self.default = default
def __call__(self, serializer_field):
is_update = serializer_field.parent.instance is not None
if is_update:
raise SkipField()
if callable(self.default):
if getattr(self.default, 'requires_context', False):
return self.default(serializer_field)
else:
return self.default()
return self.default
def __repr__(self):
return '%s(%s)' % (self.__class__.__name__, repr(self.default))
| CreateOnlyDefault |
python | huggingface__transformers | tests/models/glpn/test_modeling_glpn.py | {
"start": 13135,
"end": 14061
} | class ____(unittest.TestCase):
@slow
def test_inference_depth_estimation(self):
image_processor = GLPNImageProcessor.from_pretrained("vinvino02/glpn-kitti")
model = GLPNForDepthEstimation.from_pretrained("vinvino02/glpn-kitti").to(torch_device)
image = prepare_img()
inputs = image_processor(images=image, return_tensors="pt").to(torch_device)
# forward pass
with torch.no_grad():
outputs = model(**inputs)
# verify the predicted depth
expected_shape = torch.Size([1, 480, 640])
self.assertEqual(outputs.predicted_depth.shape, expected_shape)
expected_slice = torch.tensor(
[[3.4291, 2.7865, 2.5151], [3.2841, 2.7021, 2.3502], [3.1147, 2.4625, 2.2481]]
).to(torch_device)
torch.testing.assert_close(outputs.predicted_depth[0, :3, :3], expected_slice, rtol=1e-4, atol=1e-4)
| GLPNModelIntegrationTest |
python | sqlalchemy__sqlalchemy | lib/sqlalchemy/ext/asyncio/base.py | {
"start": 2963,
"end": 3720
} | class ____(Awaitable[_T_co], abc.ABC):
__slots__ = ()
@abc.abstractmethod
async def start(self, is_ctxmanager: bool = False) -> _T_co:
raise NotImplementedError()
def __await__(self) -> Generator[Any, Any, _T_co]:
return self.start().__await__()
async def __aenter__(self) -> _T_co:
return await self.start(is_ctxmanager=True)
@abc.abstractmethod
async def __aexit__(
self, type_: Any, value: Any, traceback: Any
) -> Optional[bool]:
pass
def _raise_for_not_started(self) -> NoReturn:
raise async_exc.AsyncContextNotStarted(
"%s context has not been started and object has not been awaited."
% (self.__class__.__name__)
)
| StartableContext |
python | spack__spack | lib/spack/spack/modules/lmod.py | {
"start": 9507,
"end": 15823
} | class ____(BaseFileLayout):
"""File layout for lmod module files."""
#: file extension of lua module files
extension = "lua"
@property
def arch_dirname(self):
"""Returns the root folder for THIS architecture"""
# Architecture sub-folder
arch_folder_conf = spack.config.get("modules:%s:arch_folder" % self.conf.name, True)
if arch_folder_conf:
# include an arch specific folder between root and filename
arch_folder = "-".join(
[str(self.spec.platform), str(self.spec.os), str(self.spec.target.family)]
)
return os.path.join(self.dirname(), arch_folder)
return self.dirname()
@property
def filename(self):
"""Returns the filename for the current module file"""
# Get the list of requirements and build an **ordered**
# list of the path parts
requires = self.conf.requires
hierarchy = self.conf.hierarchy_tokens
path_parts = lambda x: self.token_to_path(x, requires[x])
parts = [path_parts(x) for x in hierarchy if x in requires]
# My relative path if just a join of all the parts
hierarchy_name = os.path.join(*parts)
# Compute the absolute path
return os.path.join(
self.arch_dirname, # root for lmod files on this architecture
hierarchy_name, # relative path
f"{self.use_name}.{self.extension}", # file name
)
@property
def modulerc(self):
"""Returns the modulerc file associated with current module file"""
return os.path.join(os.path.dirname(self.filename), f".modulerc.{self.extension}")
def token_to_path(self, name, value):
"""Transforms a hierarchy token into the corresponding path part.
Args:
name (str): name of the service in the hierarchy
value: actual provider of the service
Returns:
str: part of the path associated with the service
"""
# General format for the path part
def path_part_fmt(token):
return fs.polite_path([f"{token.name}", f"{token.version}"])
# If we are dealing with a core compiler, return 'Core'
core_compilers = self.conf.core_compilers
if name == "compiler" and any(spack.spec.Spec(value).satisfies(c) for c in core_compilers):
return "Core"
# Spec does not have a hash, as we are not allowed to
# use different flavors of the same compiler
if name == "compiler":
return path_part_fmt(token=value)
# In case the hierarchy token refers to a virtual provider
# we need to append a hash to the version to distinguish
# among flavors of the same library (e.g. openblas~openmp vs.
# openblas+openmp)
return f"{path_part_fmt(token=value)}-{value.dag_hash(length=7)}"
@property
def available_path_parts(self):
"""List of path parts that are currently available. Needed to
construct the file name.
"""
# List of available services
available = self.conf.available
# List of services that are part of the hierarchy
hierarchy = self.conf.hierarchy_tokens
# Tokenize each part that is both in the hierarchy and available
return [self.token_to_path(x, available[x]) for x in hierarchy if x in available]
@property
@lang.memoized
def unlocked_paths(self):
"""Returns a dictionary mapping conditions to a list of unlocked
paths.
The paths that are unconditionally unlocked are under the
key 'None'. The other keys represent the list of services you need
loaded to unlock the corresponding paths.
"""
unlocked = collections.defaultdict(list)
# Get the list of services we require and we provide
requires_key = list(self.conf.requires)
provides_key = list(self.conf.provides)
# A compiler is always required. To avoid duplication pop the
# 'compiler' item from required if we also **provide** one
if "compiler" in provides_key:
requires_key.remove("compiler")
# Compute the unique combinations of the services we provide
combinations = []
for ii in range(len(provides_key)):
combinations += itertools.combinations(provides_key, ii + 1)
# Attach the services required to each combination
to_be_processed = [x + tuple(requires_key) for x in combinations]
# Compute the paths that are unconditionally added
# and append them to the dictionary (key = None)
available_combination = []
for item in to_be_processed:
hierarchy = self.conf.hierarchy_tokens
available = self.conf.available
ac = [x for x in hierarchy if x in item]
available_combination.append(tuple(ac))
parts = [self.token_to_path(x, available[x]) for x in ac]
unlocked[None].append(tuple([self.arch_dirname] + parts))
# Deduplicate the list
unlocked[None] = list(lang.dedupe(unlocked[None]))
# Compute the combination of missing requirements: this will lead to
# paths that are unlocked conditionally
missing = self.conf.missing
missing_combinations = []
for ii in range(len(missing)):
missing_combinations += itertools.combinations(missing, ii + 1)
# Attach the services required to each combination
for m in missing_combinations:
to_be_processed = [m + x for x in available_combination]
for item in to_be_processed:
hierarchy = self.conf.hierarchy_tokens
available = self.conf.available
token2path = lambda x: self.token_to_path(x, available[x])
parts = []
for x in hierarchy:
if x not in item:
continue
value = token2path(x) if x in available else x
parts.append(value)
unlocked[m].append(tuple([self.arch_dirname] + parts))
# Deduplicate the list
unlocked[m] = list(lang.dedupe(unlocked[m]))
return unlocked
| LmodFileLayout |
python | qdrant__qdrant-client | qdrant_client/http/models/models.py | {
"start": 86782,
"end": 86882
} | class ____(BaseModel, extra="forbid"):
pow: "PowParams" = Field(..., description="")
| PowExpression |
python | spyder-ide__spyder | spyder/plugins/completion/providers/languageserver/client.py | {
"start": 1732,
"end": 24111
} | class ____(QObject, LSPMethodProviderMixIn, SpyderConfigurationAccessor):
"""Language Server Protocol v3.0 client implementation."""
#: Signal to inform the editor plugin that the client has
# started properly and it's ready to be used.
sig_initialize = Signal(dict, str)
#: Signal to report internal server errors through Spyder's
# facilities.
sig_server_error = Signal(str)
#: Signal to warn the user when either the transport layer or the
# server went down
sig_went_down = Signal(str)
def __init__(self, parent,
server_settings={},
folder=getcwd_or_home(),
language='python'):
QObject.__init__(self)
self.manager = parent
self.zmq_in_socket = None
self.zmq_out_socket = None
self.zmq_in_port = None
self.zmq_out_port = None
self.transport = None
self.server = None
self.stdio_pid = None
self.notifier = None
self.language = language
self.initialized = False
self.ready_to_close = False
self.request_seq = 1
self.req_status = {}
self.watched_files = {}
self.watched_folders = {}
self.req_reply = {}
self.server_unresponsive = False
self.transport_unresponsive = False
# Select a free port to start the server.
# NOTE: Don't use the new value to set server_setttings['port']!!
# That's not required because this doesn't really correspond to a
# change in the config settings of the server. Else a server
# restart would be generated when doing a
# workspace/didChangeConfiguration request.
if not server_settings['external']:
self.server_port = select_port(
default_port=server_settings['port'])
else:
self.server_port = server_settings['port']
self.server_host = server_settings['host']
self.external_server = server_settings.get('external', False)
self.stdio = server_settings.get('stdio', False)
# Setting stdio on implies that external_server is off
if self.stdio and self.external_server:
error = ('If server is set to use stdio communication, '
'then it cannot be an external server')
logger.error(error)
raise AssertionError(error)
self.folder = folder
self.configurations = server_settings.get('configurations', {})
self.client_capabilites = CLIENT_CAPABILITES
self.server_capabilites = SERVER_CAPABILITES
self.context = zmq.Context()
# To set server args
self._server_args = server_settings.get('args', '')
self._server_cmd = server_settings['cmd']
# Save requests name and id. This is only necessary for testing.
self._requests = []
def _get_log_filename(self, kind):
"""
Get filename to redirect server or transport logs to in
debugging mode.
Parameters
----------
kind: str
It can be "server" or "transport".
"""
if get_debug_level() == 0:
return None
fname = '{0}_{1}_{2}.log'.format(kind, self.language, os.getpid())
location = get_conf_path(osp.join('lsp_logs', fname))
# Create directory that contains the file, in case it doesn't
# exist
if not osp.exists(osp.dirname(location)):
os.makedirs(osp.dirname(location))
return location
@property
def server_log_file(self):
"""
Filename to redirect the server process stdout/stderr output.
"""
return self._get_log_filename('server')
@property
def transport_log_file(self):
"""
Filename to redirect the transport process stdout/stderr
output.
"""
return self._get_log_filename('transport')
@property
def server_args(self):
"""Arguments for the server process."""
args = []
if self.language == 'python':
args += [sys.executable, '-m']
args += [self._server_cmd]
# Replace host and port placeholders
host_and_port = self._server_args.format(
host=self.server_host,
port=self.server_port)
if len(host_and_port) > 0:
args += host_and_port.split(' ')
if self.language == 'python' and get_debug_level() > 0:
args += ['--log-file', self.server_log_file]
if get_debug_level() == 2:
args.append('-v')
elif get_debug_level() == 3:
args.append('-vv')
return args
@property
def transport_args(self):
"""Arguments for the transport process."""
args = [
sys.executable,
'-u',
osp.join(LOCATION, 'transport', 'main.py'),
'--folder', self.folder,
'--transport-debug', str(get_debug_level())
]
# Replace host and port placeholders
host_and_port = '--server-host {host} --server-port {port} '.format(
host=self.server_host,
port=self.server_port)
args += host_and_port.split(' ')
# Add socket ports
args += ['--zmq-in-port', str(self.zmq_out_port),
'--zmq-out-port', str(self.zmq_in_port)]
# Adjustments for stdio/tcp
if self.stdio:
args += ['--stdio-server']
if get_debug_level() > 0:
args += ['--server-log-file', self.server_log_file]
args += self.server_args
else:
args += ['--external-server']
return args
def create_transport_sockets(self):
"""Create PyZMQ sockets for transport."""
self.zmq_out_socket = self.context.socket(zmq.PAIR)
self.zmq_out_port = self.zmq_out_socket.bind_to_random_port(
'tcp://{}'.format(LOCALHOST))
self.zmq_in_socket = self.context.socket(zmq.PAIR)
self.zmq_in_socket.set_hwm(0)
self.zmq_in_port = self.zmq_in_socket.bind_to_random_port(
'tcp://{}'.format(LOCALHOST))
@Slot(QProcess.ProcessError)
def handle_process_errors(self, error):
"""Handle errors with the transport layer or server processes."""
self.sig_went_down.emit(self.language)
def start_server(self):
"""Start server."""
# This is not necessary if we're trying to connect to an
# external server
if self.external_server or self.stdio:
return
logger.info('Starting server: {0}'.format(' '.join(self.server_args)))
# Create server process
self.server = QProcess(self)
env = self.server.processEnvironment()
# Adjustments for the Python language server.
if self.language == 'python':
# Set the PyLS current working to an empty dir inside
# our config one. This avoids the server to pick up user
# files such as random.py or string.py instead of the
# standard library modules named the same.
cwd = osp.join(get_conf_path(), 'lsp_paths', 'cwd')
if not osp.exists(cwd):
os.makedirs(cwd)
if os.name == "nt":
# On Windows, some modules (notably Matplotlib)
# cause exceptions if they cannot get the user home.
# So, we need to pass the USERPROFILE env variable to
# the PyLSP.
if "USERPROFILE" in os.environ:
env.insert("USERPROFILE", os.environ["USERPROFILE"])
# The PyLSP can't start on pip installations if APPDATA
# is missing and the user has installed their packages on
# that directory.
# Fixes spyder-ide/spyder#17661
if not is_conda_env(sys.prefix) and "APPDATA" in os.environ:
env.insert("APPDATA", os.environ["APPDATA"])
else:
# There's no need to define a cwd for other servers.
cwd = None
# Most LSP servers spawn other processes, which may require
# some environment variables.
for var in os.environ:
env.insert(var, os.environ[var])
logger.info('Server process env variables: {0}'.format(env.keys()))
# Setup server
self.server.setProcessEnvironment(env)
self.server.errorOccurred.connect(self.handle_process_errors)
self.server.setWorkingDirectory(cwd)
self.server.setProcessChannelMode(QProcess.MergedChannels)
if self.server_log_file is not None:
self.server.setStandardOutputFile(self.server_log_file)
# Start server
self.server.start(self.server_args[0], self.server_args[1:])
def start_transport(self):
"""Start transport layer."""
logger.info('Starting transport for {1}: {0}'
.format(' '.join(self.transport_args), self.language))
# Create transport process
self.transport = QProcess(self)
env = self.transport.processEnvironment()
# Most LSP servers spawn other processes other than Python, which may
# require some environment variables
if self.language != 'python' and self.stdio:
for var in os.environ:
env.insert(var, os.environ[var])
logger.info('Transport process env variables: {0}'.format(
env.keys()))
self.transport.setProcessEnvironment(env)
# Set up transport
self.transport.errorOccurred.connect(self.handle_process_errors)
if self.stdio:
self.transport.setProcessChannelMode(QProcess.SeparateChannels)
if self.transport_log_file is not None:
self.transport.setStandardErrorFile(self.transport_log_file)
else:
self.transport.setProcessChannelMode(QProcess.MergedChannels)
if self.transport_log_file is not None:
self.transport.setStandardOutputFile(self.transport_log_file)
# Start transport
self.transport.start(self.transport_args[0], self.transport_args[1:])
def start(self):
"""Start client."""
# NOTE: DO NOT change the order in which these methods are called.
self.create_transport_sockets()
self.start_server()
self.start_transport()
# Create notifier
fid = self.zmq_in_socket.getsockopt(zmq.FD)
self.notifier = QSocketNotifier(fid, QSocketNotifier.Read, self)
self.notifier.activated.connect(self.on_msg_received)
# This is necessary for tests to pass locally!
logger.debug('LSP {} client started!'.format(self.language))
def stop(self):
"""Stop transport and server."""
logger.info('Stopping {} client...'.format(self.language))
if self.notifier is not None:
self.notifier.activated.disconnect(self.on_msg_received)
self.notifier.setEnabled(False)
self.notifier = None
# waitForFinished(): Wait some time for process to exit. This fixes an
# error message by Qt (“QProcess: Destroyed while process (…) is still
# running.”). No further error handling because we are out of luck
# anyway if the process doesn’t finish.
if self.transport is not None:
self.transport.close()
self.transport.waitForFinished(1000)
self.context.destroy()
if self.server is not None:
self.server.close()
self.server.waitForFinished(1000)
def is_transport_alive(self):
"""Detect if transport layer is alive."""
state = self.transport.state()
return state != QProcess.NotRunning
def is_stdio_alive(self):
"""Check if an stdio server is alive."""
alive = True
if not psutil.pid_exists(self.stdio_pid):
alive = False
else:
try:
pid_status = psutil.Process(self.stdio_pid).status()
except psutil.NoSuchProcess:
pid_status = ''
if pid_status == psutil.STATUS_ZOMBIE:
alive = False
return alive
def is_server_alive(self):
"""Detect if a tcp server is alive."""
state = self.server.state()
return state != QProcess.NotRunning
def is_down(self):
"""
Detect if the transport layer or server are down to inform our
users about it.
"""
is_down = False
if self.transport and not self.is_transport_alive():
logger.debug(
"Transport layer for {} is down!!".format(self.language))
if not self.transport_unresponsive:
self.transport_unresponsive = True
self.sig_went_down.emit(self.language)
is_down = True
if self.server and not self.is_server_alive():
logger.debug("LSP server for {} is down!!".format(self.language))
if not self.server_unresponsive:
self.server_unresponsive = True
self.sig_went_down.emit(self.language)
is_down = True
if self.stdio_pid and not self.is_stdio_alive():
logger.debug("LSP server for {} is down!!".format(self.language))
if not self.server_unresponsive:
self.server_unresponsive = True
self.sig_went_down.emit(self.language)
is_down = True
return is_down
def send(self, method, params, kind):
"""Send message to transport."""
if self.is_down():
return
# Don't send requests to the server before it's been initialized.
if not self.initialized and method != 'initialize':
return
if ClientConstants.CANCEL in params:
return
_id = self.request_seq
if kind == MessageKind.REQUEST:
msg = {
'id': self.request_seq,
'method': method,
'params': params
}
self.req_status[self.request_seq] = method
elif kind == MessageKind.RESPONSE:
msg = {
'id': self.request_seq,
'result': params
}
elif kind == MessageKind.NOTIFICATION:
msg = {
'method': method,
'params': params
}
logger.debug('Perform request {0} with id {1}'.format(method, _id))
# Save requests to check their ordering.
if running_under_pytest():
self._requests.append((_id, method))
# Try sending a message. If the send queue is full, keep trying for a
# a second before giving up.
timeout = 1
start_time = time.time()
timeout_time = start_time + timeout
while True:
try:
self.zmq_out_socket.send_pyobj(msg, flags=zmq.NOBLOCK)
self.request_seq += 1
return int(_id)
except zmq.error.Again:
if time.time() > timeout_time:
self.sig_went_down.emit(self.language)
return
# The send queue is full! wait 0.1 seconds before retrying.
if self.initialized:
logger.warning("The send queue is full! Retrying...")
time.sleep(.1)
@Slot()
def on_msg_received(self):
"""Process received messages."""
self.notifier.setEnabled(False)
while True:
try:
# events = self.zmq_in_socket.poll(1500)
resp = self.zmq_in_socket.recv_pyobj(flags=zmq.NOBLOCK)
try:
method = resp['method']
logger.debug(
'{} response: {}'.format(self.language, method))
except KeyError:
pass
if 'error' in resp:
logger.debug('{} Response error: {}'
.format(self.language, repr(resp['error'])))
if self.language == 'python':
# Show PyLS errors in our error report dialog only in
# debug or development modes
if get_debug_level() > 0 or DEV:
message = resp['error'].get('message', '')
traceback = (resp['error'].get('data', {}).
get('traceback'))
if traceback is not None:
traceback = ''.join(traceback)
traceback = traceback + '\n' + message
self.sig_server_error.emit(traceback)
req_id = resp['id']
if req_id in self.req_reply:
self.req_reply[req_id](None, {'params': []})
elif 'method' in resp:
if resp['method'][0] != '$':
if 'id' in resp:
self.request_seq = int(resp['id'])
if resp['method'] in self.handler_registry:
handler_name = (
self.handler_registry[resp['method']])
handler = getattr(self, handler_name)
handler(resp['params'])
elif 'result' in resp:
if resp['result'] is not None:
req_id = resp['id']
if req_id in self.req_status:
req_type = self.req_status[req_id]
if req_type in self.handler_registry:
handler_name = self.handler_registry[req_type]
handler = getattr(self, handler_name)
handler(resp['result'], req_id)
self.req_status.pop(req_id)
if req_id in self.req_reply:
self.req_reply.pop(req_id)
except RuntimeError:
# This is triggered when a codeeditor instance has been
# removed before the response can be processed.
pass
except zmq.ZMQError:
self.notifier.setEnabled(True)
return
def perform_request(self, method, params):
if method in self.sender_registry:
handler_name = self.sender_registry[method]
handler = getattr(self, handler_name)
_id = handler(params)
if 'response_callback' in params:
if params['requires_response']:
self.req_reply[_id] = params['response_callback']
return _id
# ------ LSP initialization methods --------------------------------
@handles(SERVER_READY)
@send_request(method=CompletionRequestTypes.INITIALIZE)
def initialize(self, params, *args, **kwargs):
self.stdio_pid = params['pid']
pid = self.transport.processId() if not self.external_server else None
params = {
'processId': pid,
'rootUri': pathlib.Path(osp.abspath(self.folder)).as_uri(),
'capabilities': self.client_capabilites,
'trace': TRACE
}
return params
@send_request(method=CompletionRequestTypes.SHUTDOWN)
def shutdown(self):
params = {}
return params
@handles(CompletionRequestTypes.SHUTDOWN)
def handle_shutdown(self, response, *args):
self.ready_to_close = True
@send_notification(method=CompletionRequestTypes.EXIT)
def exit(self):
params = {}
return params
@handles(CompletionRequestTypes.INITIALIZE)
def process_server_capabilities(self, server_capabilites, *args):
"""
Register server capabilities and inform other plugins that it's
available.
"""
# Update server capabilities with the info sent by the server.
server_capabilites = server_capabilites['capabilities']
if isinstance(server_capabilites['textDocumentSync'], int):
kind = server_capabilites['textDocumentSync']
server_capabilites['textDocumentSync'] = TEXT_DOCUMENT_SYNC_OPTIONS
server_capabilites['textDocumentSync']['change'] = kind
if server_capabilites['textDocumentSync'] is None:
server_capabilites.pop('textDocumentSync')
self.server_capabilites.update(server_capabilites)
# The initialized notification needs to be the first request sent by
# the client according to the protocol.
self.initialized = True
self.initialized_call()
# This sends a DidChangeConfiguration request to pass to the server
# the configurations set by the user in our config system.
self.send_configurations(self.configurations)
# Inform other plugins that the server is up.
self.sig_initialize.emit(self.server_capabilites, self.language)
@send_notification(method=CompletionRequestTypes.INITIALIZED)
def initialized_call(self):
params = {}
return params
# ------ Settings queries --------------------------------
@property
def support_multiple_workspaces(self):
workspace_settings = self.server_capabilites['workspace']
return workspace_settings['workspaceFolders']['supported']
@property
def support_workspace_update(self):
workspace_settings = self.server_capabilites['workspace']
return workspace_settings['workspaceFolders']['changeNotifications']
def test():
"""Test LSP client."""
from spyder.utils.qthelpers import qapplication
app = qapplication(test_time=8)
server_args_fmt = '--host %(host)s --port %(port)s --tcp'
server_settings = {'host': '127.0.0.1', 'port': 2087, 'cmd': 'pyls'}
lsp = LSPClient(app, server_args_fmt, server_settings)
lsp.start()
app.aboutToQuit.connect(lsp.stop)
signal.signal(signal.SIGINT, signal.SIG_DFL)
sys.exit(app.exec_())
if __name__ == "__main__":
test()
| LSPClient |
python | Textualize__textual | tests/css/test_screen_css.py | {
"start": 203,
"end": 418
} | class ____(Screen):
def compose(self):
yield Label("Hello, world!", id="app-css")
yield Label("Hello, world!", id="screen-css-path")
yield Label("Hello, world!", id="screen-css")
| BaseScreen |
python | kamyu104__LeetCode-Solutions | Python/minimum-length-of-anagram-concatenation.py | {
"start": 206,
"end": 960
} | class ____(object):
def minAnagramLength(self, s):
"""
:type s: str
:rtype: int
"""
def factors(n):
for i in xrange(1, n+1):
if i*i > n:
break
if n%i:
continue
yield i
if n//i != i:
yield n//i
def check(l):
def count(i):
cnt = [0]*26
for j in xrange(i, i+l):
cnt[ord(s[j])-ord('a')] += 1
return cnt
cnt = count(0)
return all(count(i) == cnt for i in xrange(l, len(s), l))
return min(l for l in factors(len(s)) if check(l))
| Solution |
python | google__jax | jax/_src/checkify.py | {
"start": 17480,
"end": 58194
} | class ____(ValueError):
pass
@check_p.def_impl
def check_impl(*args, err_tree, debug):
if debug:
# NOOP (check will only trigger when discharged)
return []
error = tree_unflatten(err_tree, args)
exc = error.get_exception()
if exc:
filtered_tb = traceback_util.filter_traceback(
exc.traceback_info.as_python_traceback())
exc.with_traceback(filtered_tb)
raise JaxRuntimeError(str(exc)) from exc
return []
@check_p.def_effectful_abstract_eval
def check_abstract_eval(*args, err_tree, debug):
del debug
return [], set(tree_unflatten(err_tree, args)._pred.keys())
# TODO(lenamartens) add in-depth error explanation to link to in module docs.
functionalization_error = ValueError(
'Cannot abstractly evaluate a checkify.check which was not'
' functionalized. This probably means you tried to stage'
' (jit/scan/pmap/...) a `check` without functionalizing it'
' through `checkify.checkify`.'
)
def check_lowering_rule(ctx, *args, err_tree, debug):
if debug:
# NOOP (check will only trigger when discharged)
return []
if not config.xla_runtime_errors.value:
raise functionalization_error
out_op, _, _ = callback.emit_python_callback(
ctx, callback=functools.partial(python_err, err_tree),
token=None,
operands=args,
operand_avals=list(ctx.avals_in),
result_avals=list(ctx.avals_out),
has_side_effect=True,
returns_token=False)
return out_op
def check_lowering_rule_unsupported(*a, debug, **k):
if debug:
return []
raise functionalization_error
def python_err(err_tree, *args):
error = tree_unflatten(err_tree, args)
_check_error(error)
return []
mlir.register_lowering(check_p, check_lowering_rule_unsupported,
platform='tpu')
mlir.register_lowering(check_p, check_lowering_rule,
platform='cpu')
mlir.register_lowering(check_p, check_lowering_rule,
platform='gpu')
def check_batching_rule(batched_args, batch_dims, *, err_tree, debug):
size = next(x.shape[dim] for x, dim in zip(batched_args, batch_dims)
if dim is not batching.not_mapped)
batched_args = (batching.bdim_at_front(a, d, size)
for a, d in zip(batched_args, batch_dims))
err = tree_unflatten(err_tree, batched_args)
_check_error(err, debug=debug)
return [], []
batching.primitive_batchers[check_p] = check_batching_rule
def check_jvp_rule(primals, _, *, err_tree, debug):
# Check primals, discard tangents.
check_p.bind(*primals, err_tree=err_tree, debug=debug)
return [], []
ad.primitive_jvps[check_p] = check_jvp_rule
## checkify rules
ErrorCheckRule = Callable # (Error, FrozenSet[ErrorCategory], *in_vals, **params) -> (Any, Error)
error_checks: dict[core.Primitive, ErrorCheckRule] = {}
def get_traceback():
return source_info_util.current().traceback
def nan_error_check(prim, error, enabled_errors, *in_vals, **params):
out = prim.bind(*in_vals, **params)
err = check_nans(prim, error, enabled_errors, out)
return err, out
def check_nans(prim, error, enabled_errors, out):
if NaNError not in enabled_errors:
return error
def isnan(x):
if dtypes.issubdtype(x.dtype, dtypes.prng_key):
return False
return jnp.any(jnp.isnan(x))
any_nans = (jnp.any(jnp.array([isnan(x) for x in out]))
if prim.multiple_results else isnan(out))
return assert_func(error, any_nans, NaNError(get_traceback(), prim.name))
# All primitives which can generate a NaN.
nan_primitives = [lax.acos_p, lax.acosh_p, lax.add_p, lax.asin_p, lax.asinh_p,
lax.atan2_p, lax.atan_p, lax.atanh_p, lax.bessel_i0e_p,
lax.bessel_i1e_p, lax.cbrt_p, lax.conv_general_dilated_p,
lax.cos_p, lax.cosh_p, lax.cumlogsumexp_p, lax.cummax_p,
lax.cummin_p, lax.cumprod_p, lax.cumsum_p, lax.digamma_p,
lax.dot_general_p, lax.erf_inv_p, lax.erf_p, lax.erfc_p,
lax.exp_p, lax.expm1_p, lax.fft_p, lax.igamma_grad_a_p,
lax.igamma_p, lax.igammac_p, lax.integer_pow_p, lax.lgamma_p,
lax.linear_solve_p, lax.log1p_p, lax.log_p, lax.logistic_p,
lax.mul_p, lax.pad_p, lax.pow_p, lax.psum_p,
lax.reduce_p, lax.reduce_prod_p,
lax.reduce_sum_p, lax.reduce_window_p,
lax.reduce_window_sum_p, lax.regularized_incomplete_beta_p,
lax.rem_p, lax.rng_uniform_p, lax.rsqrt_p, lax.sin_p,
lax.sinh_p, lax.sqrt_p, lax.sub_p, lax.tan_p, lax.tanh_p]
for _prim in nan_primitives:
error_checks[_prim] = functools.partial(nan_error_check, _prim)
def dynamic_slice_error_check(error, enabled_errors, operand, *start_indices, slice_sizes):
out = lax.dynamic_slice_p.bind(operand, *start_indices, slice_sizes=slice_sizes)
if OOBError not in enabled_errors:
return error, out
start_indices = jnp.array(start_indices)
operand_dims = np.array(operand.shape, dtype=start_indices.dtype)
slice_sizes = np.array(slice_sizes, dtype=start_indices.dtype)
oob_mask = (start_indices < 0) | (start_indices + slice_sizes > operand_dims)
payload = oob_payload(oob_mask, start_indices, range(operand.ndim), operand.shape)
error = assert_func(error, jnp.any(oob_mask), OOBError(get_traceback(), "dynamic_slice", operand.shape, payload))
return error, out
error_checks[lax.dynamic_slice_p] = dynamic_slice_error_check
def dynamic_update_slice_error_check(error, enabled_errors, operand, update, *start_indices):
out = lax.dynamic_update_slice_p.bind(operand, update, *start_indices)
if OOBError not in enabled_errors:
return error, out
operand_dims = np.array(operand.shape)
update_dims = np.array(update.shape)
start_indices = jnp.array(start_indices)
oob_mask = (start_indices < 0) | (start_indices + update_dims > operand_dims)
payload = oob_payload(oob_mask, start_indices, range(operand.ndim), operand.shape)
error = assert_func(error, jnp.any(oob_mask), OOBError(get_traceback(), "dynamic_update_slice", operand.shape, payload))
return error, out
error_checks[lax.dynamic_update_slice_p] = dynamic_update_slice_error_check
def gather_error_check(error, enabled_errors, operand, start_indices, *,
dimension_numbers, slice_sizes, unique_indices,
indices_are_sorted, mode, fill_value):
out = lax.gather_p.bind(
operand, start_indices, dimension_numbers=dimension_numbers,
slice_sizes=slice_sizes, unique_indices=unique_indices,
indices_are_sorted=indices_are_sorted, mode=mode, fill_value=fill_value)
if OOBError not in enabled_errors:
return error, out
# compare to OOB masking logic in lax._gather_translation_rule
dnums = dimension_numbers
operand_dims = np.array(operand.shape)
num_batch_dims = len(start_indices.shape) - 1
upper_bound = operand_dims[np.array(dnums.start_index_map)]
upper_bound -= np.array(slice_sizes)[np.array(dnums.start_index_map)]
upper_bound = jnp.expand_dims(upper_bound, axis=tuple(range(num_batch_dims)))
oob_mask = (start_indices < 0) | (start_indices > upper_bound.astype(start_indices.dtype))
payload = oob_payload(oob_mask, start_indices, dnums.start_index_map, operand.shape)
error = assert_func(error, jnp.any(oob_mask), OOBError(get_traceback(), "gather", operand.shape, payload))
return error, out
error_checks[lax.gather_p] = gather_error_check
def div_error_check(error, enabled_errors, x, y):
"""Checks for division by zero and NaN."""
if DivisionByZeroError in enabled_errors:
any_zero = jnp.any(jnp.equal(y, 0))
error = assert_func(error, any_zero, DivisionByZeroError(get_traceback()))
return nan_error_check(lax.div_p, error, enabled_errors, x, y)
error_checks[lax.div_p] = div_error_check
def oob_payload(oob_mask, indices, dims_map, operand_shape):
# Get first OOB index, axis and axis size so it can be added to the error msg.
flat_idx = jnp.argmin(jnp.logical_not(oob_mask))
multi_idx = jnp.unravel_index(flat_idx, indices.shape)
oob_axis = jnp.array(dims_map)[multi_idx[-1]]
oob_axis_size = jnp.array(operand_shape)[oob_axis]
oob_index = jnp.ravel(indices)[flat_idx]
payload = jnp.array([oob_index, oob_axis, oob_axis_size], dtype=np.int32)
return payload
def scatter_oob(operand, indices, updates, dnums):
# Ref: see clamping code used in scatter_translation_rule
slice_sizes = []
pos = 0
for i in range(len(operand.shape)):
if i in dnums.inserted_window_dims:
slice_sizes.append(1)
else:
slice_sizes.append(updates.shape[dnums.update_window_dims[pos]])
pos += 1
upper_bound = np.array([operand.shape[i] - slice_sizes[i]
for i in dnums.scatter_dims_to_operand_dims],
np.int64)
upper_bound = np.minimum(upper_bound, np.iinfo(indices.dtype).max)
upper_bound = lax.broadcast_in_dim(upper_bound, indices.shape,
(len(indices.shape) - 1,))
lower_oob = jnp.less(indices, 0)
upper_oob = jnp.greater(indices, upper_bound.astype(indices.dtype))
oob_mask = jnp.logical_or(lower_oob, upper_oob)
payload = oob_payload(oob_mask, indices,
dnums.scatter_dims_to_operand_dims, operand.shape)
return jnp.any(oob_mask), payload
def scatter_error_check(prim, error, enabled_errors, operand, indices, updates,
*, update_jaxpr, update_consts, dimension_numbers,
indices_are_sorted, unique_indices, mode):
"""Checks if indices are within bounds and update does not generate NaN."""
out = prim.bind(
operand, indices, updates, update_jaxpr=update_jaxpr,
update_consts=update_consts, dimension_numbers=dimension_numbers,
indices_are_sorted=indices_are_sorted, unique_indices=unique_indices,
mode=mode)
if OOBError not in enabled_errors:
return error, out
out_of_bounds, payload = scatter_oob(operand, indices, updates, dimension_numbers)
oob_error = OOBError(get_traceback(), prim.name, operand.shape, payload)
error = assert_func(error, out_of_bounds, oob_error)
error = check_nans(prim, error, enabled_errors, out)
return error, out
error_checks[lax.scatter_p] = functools.partial(scatter_error_check, lax.scatter_p)
error_checks[lax.scatter_add_p] = functools.partial(scatter_error_check,
lax.scatter_add_p)
error_checks[lax.scatter_mul_p] = functools.partial(scatter_error_check,
lax.scatter_mul_p)
error_checks[lax.scatter_min_p] = functools.partial(scatter_error_check,
lax.scatter_min_p)
error_checks[lax.scatter_max_p] = functools.partial(scatter_error_check,
lax.scatter_max_p)
# HOP error check rules
@weakref_lru_cache
def jaxpr_to_checkify_jaxpr(
jaxpr: core.ClosedJaxpr, enabled_errors, err_tree: PyTreeDef,
*flat_err_and_in_vals) -> tuple[core.ClosedJaxpr, PyTreeDef, set[ErrorEffect]]:
checkify_jaxpr_partial = functools.partial(checkify_jaxpr_flat, jaxpr.jaxpr,
jaxpr.consts, enabled_errors,
err_tree)
fun = lu.wrap_init(checkify_jaxpr_partial,
debug_info=jaxpr.jaxpr.debug_info.with_unknown_names())
fun, metadata = _flatten_and_get_error_metadata_thunk(fun)
new_jaxpr, _, consts = pe.trace_to_jaxpr_dynamic(fun, flat_err_and_in_vals)
checked_jaxpr = core.ClosedJaxpr(new_jaxpr, consts)
out_tree, error_effects = metadata()
return checked_jaxpr, out_tree, error_effects
def cond_error_check(error: Error, enabled_errors, index, *ops,
branches, **params):
# Get the error-effects out of all branches so the cond can be called with
# a merged error with all these effects.
err_vals, err_tree = jtu.tree_flatten(error)
in_avals = map(core.get_aval, [*err_vals, *ops])
def get_error_effects_from_jaxpr(jxpr):
_, _, effects = jaxpr_to_checkify_jaxpr(jxpr, enabled_errors, err_tree,
*in_avals)
return effects
effects = [get_error_effects_from_jaxpr(jxpr) for jxpr in branches]
merged_error = error._add_placeholder_effects(set().union(*effects))
err_vals, err_tree = jtu.tree_flatten(merged_error)
# Update branch jaxprs to be checkified jaxprs.
in_avals = map(core.get_aval, [*err_vals, *ops])
new_branches, out_trees, _ = unzip3(
jaxpr_to_checkify_jaxpr(
jxpr, enabled_errors, err_tree, *in_avals) for jxpr in branches)
err_and_outs = lax.cond_p.bind(
index, *err_vals, *ops,
branches=tuple(new_branches), **params)
# we need to merge metadata across out_trees (a tuple)
err0, out = tree_unflatten(out_trees[0], err_and_outs)
merged_metadata = err0._metadata
for tr in out_trees[1:]:
err, _ = tree_unflatten(tr, err_and_outs)
merged_metadata = {**merged_metadata, **err._metadata}
return err0._replace(_metadata=merged_metadata), out
error_checks[lax.cond_p] = cond_error_check
def scan_error_check(error, enabled_errors, *in_flat, reverse, length, jaxpr,
num_consts, num_carry, linear, unroll, _split_transpose):
consts, carry, xs = split_list(in_flat, [num_consts, num_carry])
xs_mapped = [core.mapped_aval(length, 0, core.get_aval(val)) for val in xs]
# Query body effects to create a merged error containing all effects (such
# that in and out carried error are of the same type).
err_vals, err_tree = jtu.tree_flatten(error)
new_in_aval = map(core.get_aval, [*err_vals, *consts, *carry]) + xs_mapped
_, _, effects = jaxpr_to_checkify_jaxpr(jaxpr, enabled_errors,
err_tree, *new_in_aval)
merged_error = error._add_placeholder_effects(effects)
err_vals, err_tree = jtu.tree_flatten(merged_error)
# Create checked-jaxpr, with the needed pre-processing on the inputs.
new_in_aval = map(core.get_aval, [*err_vals, *consts, *carry]) + xs_mapped
checked_jaxpr_, out_tree, _ = jaxpr_to_checkify_jaxpr(jaxpr, enabled_errors,
err_tree, *new_in_aval)
tomove = ([False] * len(err_vals) + [True] * len(consts)
+ [False] * (len(carry) + len(xs)))
checked_jaxpr = pe.move_binders_to_front(checked_jaxpr_, tomove)
new_in_flat = [*consts, *err_vals, *carry, *xs]
new_linear = (*[False] * len(err_vals), *linear)
err_and_out = lax.scan_p.bind(
*new_in_flat, reverse=reverse, length=length, jaxpr=checked_jaxpr,
num_consts=len(consts), num_carry=len(carry)+len(err_vals),
linear=new_linear, unroll=unroll, _split_transpose=_split_transpose)
err, out = tree_unflatten(out_tree, err_and_out)
return err, out
error_checks[lax.scan_p] = scan_error_check
def checkify_while_body_jaxpr(
cond_jaxpr: core.ClosedJaxpr, body_jaxpr: core.ClosedJaxpr,
enabled_errors, error: Error,
c_consts_num: int) -> tuple[core.ClosedJaxpr, PyTreeDef, set[ErrorEffect]]:
cond_f = core.jaxpr_as_fun(cond_jaxpr)
body_f = core.jaxpr_as_fun(body_jaxpr)
def new_body_f(*c_consts_and_vals):
c_consts, vals = split_list(c_consts_and_vals, [c_consts_num])
out = body_f(*vals)
# This checks if the next cond application will error
lax.dce_sink(cond_f(*c_consts, *out))
return out
new_body_f_ = lu.wrap_init(
new_body_f,
debug_info=body_jaxpr.jaxpr.debug_info.with_unknown_names())
c_consts_avals = cond_jaxpr.in_avals[:c_consts_num]
jaxpr, _, () = pe.trace_to_jaxpr_dynamic(
new_body_f_, [*c_consts_avals, *body_jaxpr.in_avals])
closed_jaxpr = pe.close_jaxpr(jaxpr)
err_vals, err_tree = jtu.tree_flatten(error)
err_vals = map(core.get_aval, err_vals)
flat_err_and_in_vals = [*err_vals, *c_consts_avals, *body_jaxpr.in_avals]
jaxpr, out_tree, error_effects = jaxpr_to_checkify_jaxpr(
closed_jaxpr, enabled_errors, err_tree, *flat_err_and_in_vals)
return jaxpr, out_tree, error_effects
@weakref_lru_cache
def ignore_error_output_jaxpr(jaxpr, num_error_vals: int):
"""Constructs a checked jaxpr which does not output its error value."""
consts = jaxpr.consts
jaxpr = jaxpr.jaxpr
new_jaxpr = jaxpr.replace(outvars=jaxpr.outvars[num_error_vals:])
return core.ClosedJaxpr(new_jaxpr, consts)
def while_loop_error_check(error, enabled_errors, *in_flat, cond_nconsts,
cond_jaxpr, body_nconsts, body_jaxpr):
if cond_jaxpr.out_avals[0].shape:
# TODO(lenamartens, sharadmv): support batched while.
raise ValueError('Checkify does not support batched while-loops '
'(checkify-of-vmap-of-while). \nHint: if possible, move '
'the vmap to the outer level to get '
'vmap-of-checkify-of-while.')
c_consts, b_consts, carry = split_list(in_flat, [cond_nconsts, body_nconsts])
# Check if the first cond application will error.
error, _ = checkify_jaxpr(cond_jaxpr, enabled_errors, error, *c_consts, *carry)
_, _, error_effects = checkify_while_body_jaxpr(cond_jaxpr, body_jaxpr,
enabled_errors, error,
cond_nconsts)
# merged error!
error = error._add_placeholder_effects(error_effects)
err_vals, err_tree = jtu.tree_flatten(error)
checked_body_jaxpr_, body_out_tree, _ = checkify_while_body_jaxpr(
cond_jaxpr, body_jaxpr, enabled_errors, error, cond_nconsts)
num_error_vals = len(err_vals)
to_move = ([False] * num_error_vals + [True] * cond_nconsts
+ [True] * body_nconsts + [False] * len(carry))
checked_body_jaxpr = pe.move_binders_to_front(checked_body_jaxpr_, to_move)
cond_in_flat = [*err_vals, *c_consts, *carry]
cond_in_flat = map(core.get_aval, cond_in_flat)
checked_cond_jaxpr, _, _ = jaxpr_to_checkify_jaxpr(cond_jaxpr, enabled_errors,
err_tree, *cond_in_flat)
compat_cond_jaxpr_ = ignore_error_output_jaxpr(checked_cond_jaxpr, num_error_vals)
to_move = [False] * num_error_vals + [True] * cond_nconsts + [False] * len(carry)
compat_cond_jaxpr = pe.move_binders_to_front(compat_cond_jaxpr_, to_move)
new_in_flat = [*c_consts, *c_consts, *b_consts, *err_vals, *carry]
all_out_vals = lax.while_p.bind(
*new_in_flat, cond_nconsts=cond_nconsts, cond_jaxpr=compat_cond_jaxpr,
body_nconsts=cond_nconsts+body_nconsts, body_jaxpr=checked_body_jaxpr)
# body_out_tree will have all the metadata of cond because it executes a cond!
error, out = tree_unflatten(body_out_tree, all_out_vals)
return error, out
error_checks[lax.while_p] = while_loop_error_check
def pjit_error_check(error, enabled_errors, *vals_in, jaxpr,
in_shardings, out_shardings,
in_layouts, out_layouts,
donated_invars, ctx_mesh, name, inline, keep_unused,
compiler_options_kvs):
# jaxpr to checked_jaxpr
err_vals, err_tree = jtu.tree_flatten(error)
new_vals_in = [*err_vals, *vals_in]
in_avals = tuple(map(core.get_aval, new_vals_in))
checked_jaxpr, out_tree, _ = jaxpr_to_checkify_jaxpr(jaxpr, enabled_errors,
err_tree, *in_avals)
# Update pjit params to account for extra error values.
num_error_vals = len(err_vals)
num_out_error_vals = out_tree.num_leaves - len(out_shardings)
sharding = sharding_impls.UNSPECIFIED
new_in_shardings = (*[sharding] * num_error_vals, *in_shardings)
new_in_layouts = (*[None] * num_error_vals, *in_layouts)
new_donated_invars = (*[False] * num_error_vals, *donated_invars)
new_out_shardings = (*[sharding] * num_out_error_vals, *out_shardings)
new_out_layouts = (*[None] * num_out_error_vals, *out_layouts)
err_and_out = pjit.jit_p.bind(
*new_vals_in,
jaxpr=checked_jaxpr,
in_shardings=new_in_shardings,
out_shardings=new_out_shardings,
in_layouts=new_in_layouts,
out_layouts=new_out_layouts,
donated_invars=new_donated_invars,
ctx_mesh=ctx_mesh,
name=name,
inline=inline,
keep_unused=keep_unused,
compiler_options_kvs=compiler_options_kvs,
)
return tree_unflatten(out_tree, err_and_out)
error_checks[pjit.jit_p] = pjit_error_check
def remat_error_check(error, enabled_errors, *vals_in, jaxpr, **params):
err_vals, err_tree = jtu.tree_flatten(error)
new_vals_in = [*err_vals, *vals_in]
in_avals = tuple(map(core.get_aval, new_vals_in))
checked_jaxpr_, out_tree, _ = jaxpr_to_checkify_jaxpr(
pe.close_jaxpr(jaxpr), enabled_errors, err_tree, *in_avals)
checked_jaxpr, () = checked_jaxpr_.jaxpr, checked_jaxpr_.consts
err_and_out = ad_checkpoint.remat_p.bind(*new_vals_in, jaxpr=checked_jaxpr,
**params)
return tree_unflatten(out_tree, err_and_out)
error_checks[ad_checkpoint.remat_p] = remat_error_check
def shard_map_error_check(
error: Error, enabled_errors, *vals_in,
jaxpr: core.Jaxpr, in_specs, out_specs, **kwargs
):
if (mesh := kwargs.get('mesh')) is None:
raise ValueError('Mesh must be provided for shard_map with checkify.')
err_vals, err_tree = jtu.tree_flatten(error)
num_error_vals = len(err_vals)
# Replicated sharding for in errors.
new_in_specs = (*([P()] * num_error_vals), *in_specs)
new_vals_in = [*err_vals, *vals_in]
in_avals = list(map(core.get_aval, new_vals_in))
manual_axes = kwargs.get('manual_axes')
check_vma = kwargs.get('check_vma')
for i, v in enumerate(in_avals):
if not (sharder := core.shard_aval_handlers.get(type(v))):
raise ValueError(f'Unsupported aval type: {type(v)}')
in_avals[i] = sharder(mesh, manual_axes, check_vma, new_in_specs[i], v)
with (jshmap._extend_axis_env(mesh, manual_axes),
mesh_lib.use_abstract_mesh(jshmap._as_manual_mesh(mesh, manual_axes)), # type: ignore[arg-type]
config._check_vma(check_vma)):
# jaxpr to checked_jaxpr
checked_jaxpr, out_tree, _ = jaxpr_to_checkify_jaxpr(
pe.close_jaxpr(jaxpr), enabled_errors, err_tree, *in_avals
)
num_out_error_vals = out_tree.num_leaves - len(out_specs)
def expand_errors_leading_dim(*xs):
outs = core.eval_jaxpr(checked_jaxpr.jaxpr, checked_jaxpr.consts, *xs)
errs, outs = split_list(outs, [num_out_error_vals])
errs = [lax.expand_dims(e, [0]) for e in errs]
return *errs, *outs
with core.extend_axis_env_nd(mesh.shape.items()), config._check_vma(check_vma):
jaxpr, _, consts = pe.trace_to_jaxpr_dynamic(
lu.wrap_init(expand_errors_leading_dim,
debug_info=checked_jaxpr.jaxpr.debug_info),
checked_jaxpr.in_avals
)
checked_jaxpr = core.ClosedJaxpr(jaxpr, consts)
# Update shard_map params to account for extra error values.
# Use fully sharded partitioning for out errors.
new_out_specs = (*([P(mesh.axis_names)] * num_out_error_vals), *out_specs)
subfun = lu.hashable_partial(
lu.wrap_init(core.eval_jaxpr, debug_info=checked_jaxpr.jaxpr.debug_info),
checked_jaxpr.jaxpr, checked_jaxpr.consts
)
new_params = dict(
jaxpr=checked_jaxpr.jaxpr,
in_specs=new_in_specs,
out_specs=new_out_specs,
**kwargs,
)
_, new_params = jshmap.shard_map_p.get_bind_params(new_params)
err_and_out = jshmap.shard_map_p.bind(subfun, *new_vals_in, **new_params)
return tree_unflatten(out_tree, err_and_out)
error_checks[jshmap.shard_map_p] = shard_map_error_check
def custom_jvp_call_rule(in_err: Error,
enabled_errors: set, *in_vals, num_consts,
jvp_jaxpr_fun: lu.WrappedFun,
call_jaxpr: core.ClosedJaxpr, **params):
# The types to have in mind are:
# jvp : (a -> b) -> (a, T a) -> (b, T b)
# checkify : (a -> b) -> a -> Err b
# jvp-of-checkify : (a -> b) -> (a, T a) -> (Err b, T (Err b))
# where because Err is a pytree, we necessarily have T (Err b) = Err' (T b)
# where the other Err' components are trivial (of float0 dtype).
# Semantically, we don't add checks to the JVP rule. To check the result of a
# JVP rule, one must instead use checkify-of-jvp. Thus this implementation
# just forwards the input error and code (and trivial tangents) to the output.
err_vals, err_tree = jtu.tree_flatten(in_err)
partial_checkify = lu.wrap_init(
functools.partial(checkify_jaxpr_flat, call_jaxpr.jaxpr,
call_jaxpr.consts, enabled_errors, err_tree),
debug_info=call_jaxpr.jaxpr.debug_info)
partial_checkify, f_metadata = _flatten_and_get_error_metadata_thunk(
partial_checkify)
jvp = lift_jvp(err_tree.num_leaves, num_consts, jvp_jaxpr_fun)
jvp, jvp_out_tree = flatten_fun_output(jvp)
all_outs = custom_derivatives.custom_jvp_call_p.bind(
partial_checkify, jvp, *err_vals, *in_vals, **params)
fst, out_metadata = lu.merge_linear_aux(f_metadata, jvp_out_tree)
if fst:
err_and_out_tree, _ = out_metadata
out_err, out_vals = tree_unflatten(err_and_out_tree, all_outs)
else:
err_vals, out_vals = split_list(all_outs, [len(err_vals)])
# forward input error to output
out_err = jtu.tree_unflatten(err_tree, err_vals)
return out_err, out_vals
error_checks[custom_derivatives.custom_jvp_call_p] = custom_jvp_call_rule
# Compared to custom_derivatives.lift_jvp, we're handling the extra inputs and
# outputs that checkify adds (just forwarding the error data's primal and
# tangent components). The jaxpr in jvp_jaxpr_fun doesn't expect those.
# TODO(mattjj): can we simplify this, or dedup with custom_derivatives.lift_jvp?
# Adding another layer of lu.transformation was tricky, though maybe doable.
def lift_jvp(num_errs: int, num_consts: int,
jvp_jaxpr_fun: lu.WrappedFun) -> lu.WrappedFun:
def jvp(*xs):
n, ragged = divmod(len(xs), 2)
assert not ragged
primals, tangents = xs[num_consts+num_errs:n], xs[n+num_consts+num_errs:]
zeros = [type(t) is SymbolicZero for t in tangents]
jvp_jaxpr, jvp_consts, out_zeros = jvp_jaxpr_fun.call_wrapped(*zeros)
nonzero_tangents = [t for t in tangents if type(t) is not SymbolicZero]
out = core.eval_jaxpr(jvp_jaxpr, jvp_consts, *primals, *nonzero_tangents)
out_primals, nz_out_tangents = split_list(out, [len(out_zeros)])
nz_out_tangents_ = iter(nz_out_tangents)
out_tangents = [SymbolicZero(core.get_aval(p).to_tangent_aval())
if z else next(nz_out_tangents_)
for p, z in zip(out_primals, out_zeros)]
assert next(nz_out_tangents_, None) is None
primal_errs = xs[num_consts:num_consts+num_errs]
tangent_errs = xs[n+num_consts:n+num_consts+num_errs]
return [*primal_errs, *out_primals, *tangent_errs, *out_tangents]
return lu.wrap_init(jvp, debug_info=jvp_jaxpr_fun.debug_info)
def custom_vjp_call_rule(in_err, enabled_errors, *in_vals,
call_jaxpr: core.ClosedJaxpr,
fwd_jaxpr_thunk, num_consts,
bwd: lu.WrappedFun, out_trees,
symbolic_zeros: bool):
err_vals, err_tree = jtu.tree_flatten(in_err)
num_errs = err_tree.num_leaves
checkified_fun = lu.wrap_init(
functools.partial(checkify_jaxpr_flat, call_jaxpr.jaxpr,
call_jaxpr.consts, enabled_errors, err_tree),
debug_info=call_jaxpr.jaxpr.debug_info)
checkified_fun, fun_metadata = _flatten_and_get_error_metadata_thunk(
checkified_fun)
def checkified_fwd(*args):
# TODO(lenamartens, sharadmv): why not checkify here?
xs, zeros = args[::2], args[1::2]
xs, zeros = xs[num_errs:], zeros[num_errs:]
fwd_jaxpr, fwd_consts = fwd_jaxpr_thunk.call_wrapped(*zeros)
xs_without_consts = xs[num_consts:]
return core.eval_jaxpr(fwd_jaxpr, fwd_consts, *xs_without_consts)
# TODO(necula): the fwd result_paths are not quite the same as fun_jaxpr
checkified_fwd_wrapped = lu.wrap_init(checkified_fwd,
debug_info=fwd_jaxpr_thunk.debug_info)
bwd_ = lu.wrap_init(lambda *args: (*(None,)*num_errs, *bwd.call_wrapped(*args)),
debug_info=bwd.debug_info)
checkified_fwd_wrapped, fwd_out_tree = flatten_fun_output(checkified_fwd_wrapped)
all_outs = custom_derivatives.custom_vjp_call_p.bind(
checkified_fun, checkified_fwd_wrapped,
bwd_, *err_vals, *in_vals, out_trees=out_trees,
symbolic_zeros=symbolic_zeros)
fst, out_metadata = lu.merge_linear_aux(fun_metadata, fwd_out_tree)
if fst:
err_and_out_tree, _ = out_metadata
out_err, out_vals = tree_unflatten(err_and_out_tree, all_outs)
else:
out_err, out_vals = in_err, all_outs
return out_err, out_vals
error_checks[custom_derivatives.custom_vjp_call_p] = custom_vjp_call_rule
def check_discharge_rule(error, enabled_errors, *args, err_tree, debug):
del debug
new_error = tree_unflatten(err_tree, args)
# Split up new_error into error to be functionalized if it's included in
# enabled_errors (=discharged_error) and an error to be defunctionalized if
# it's not included (=recharged_error)
discharged_error = error
recharged_error = init_error
for error_effect in new_error._pred.keys():
pred = new_error._pred[error_effect]
code = new_error._code[error_effect]
payload = new_error._payload[error_effect]
if error_effect.error_type in enabled_errors:
discharged_error = update_error(discharged_error, pred, code, {}, payload,
error_effect)
else:
recharged_error = update_error(recharged_error, pred, code, {}, payload,
error_effect)
discharged_error = discharged_error._replace(
_metadata={**new_error._metadata, **discharged_error._metadata})
recharged_error = recharged_error._replace(_metadata=new_error._metadata)
# TODO(lenamartens): we actually need to recharge, but this would be a
# breaking API change so leaving for a follow-up.
# check_error(recharged_error)
return discharged_error, []
error_checks[check_p] = check_discharge_rule
## checkify public api
user_checks = frozenset({FailedCheckError})
nan_checks = frozenset({NaNError})
index_checks = frozenset({OOBError})
div_checks = frozenset({DivisionByZeroError})
float_checks = nan_checks | div_checks
automatic_checks = float_checks | index_checks
all_checks = automatic_checks | user_checks
def checkify(f: Callable[..., Out],
errors: frozenset[ErrorCategory] = user_checks
) -> Callable[..., tuple[Error, Out]]:
"""Functionalize `check` calls in `fun`, and optionally add run-time error checks.
Run-time errors are either user-added :func:`~check` assertions, or
automatically added checks like NaN checks, depending on the ``errors``
argument.
The returned function will return an Error object `err` along with the output
of the original function. ``err.get()`` will either return ``None`` (if no
error occurred) or a string containing an error message. This error message
will correspond to the first error which occurred. ``err.throw()`` will raise
a ValueError with the error message if an error occurred.
By default only user-added :func:`~check` assertions are enabled. You can
enable automatic checks through the ``errors`` argument.
The automatic check sets which can be enabled, and when an error is generated:
- ``user_checks``: a :func:`~check` evaluated to False.
- ``nan_checks``: a floating-point operation generated a NaN value
as output.
- ``div_checks``: a division by zero.
- ``index_checks``: an index was out-of-bounds.
Multiple categories can be enabled together by passing in an error `Set` (eg.
``errors=nan_checks``). Multiple sets can be re-combined (eg.
``errors=float_checks|user_checks``)
Args:
fun: Callable which can contain user checks (see :func:`~check`).
errors: A set of ErrorCategory values which defines the set of enabled
checks. By default only explicit ``checks`` are enabled
(``user_checks``). You can also for example enable NAN and
DIV errors by passing the ``float_checks`` set, or for
example combine multiple sets through set operations
(``float_checks | user_checks``)
Returns:
A function which accepts the same arguments as ``fun`` and returns as output
a pair where the first element is an ``Error`` value, representing the first
failed :func:`~check`, and the second element is the original output of
``fun``.
For example:
>>> import jax
>>> import jax.numpy as jnp
>>> from jax.experimental import checkify
>>>
>>> @jax.jit
... def f(x):
... y = jnp.sin(x)
... return x+y
>>> err, out = checkify.checkify(f, errors=checkify.float_checks)(jnp.inf)
>>> err.throw() # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
jax._src.checkify.JaxRuntimeError: nan generated by primitive: sin
"""
@traceback_util.api_boundary
def checked_fun(*args, **kwargs):
# close over all arguments so they're not turned into abstract values.
in_tree = jtu.tree_structure(((), {}))
closed_f = lambda: f(*args, **kwargs)
# stage:
debug = api_util.debug_info("checkify", f, args, kwargs)
fun_, out_tree = api_util.flatten_fun(
lu.wrap_init(closed_f, debug_info=debug.with_unknown_names()), in_tree)
jaxpr_, _, consts = pe.trace_to_jaxpr_dynamic(fun_, ())
jaxpr = pe.close_jaxpr(pe.convert_constvars_jaxpr(jaxpr_))
# checkify:
error, out_flat = checkify_jaxpr(jaxpr, errors, init_error, *consts)
return error, jtu.tree_unflatten(out_tree(), out_flat)
return checked_fun
def check(pred: Bool, msg: str,
*fmt_args,
debug: bool = False,
**fmt_kwargs,
) -> None:
"""Check a predicate, add an error with msg if predicate is False.
This is an effectful operation, and can't be staged (jitted/scanned/...).
Before staging a function with checks, :func:`~checkify` it!
Args:
pred: if False, a FailedCheckError error is added.
msg: error message if error is added. Can be a format string.
debug: Whether to turn on debugging mode. If True, check will be removed
during execution. If False, the the check must be functionalized using
checkify.checkify.
fmt_args, fmt_kwargs: Positional and keyword formatting arguments for
`msg`, eg.:
``check(.., "check failed on values {} and {named_arg}", x, named_arg=y)``
Note that these arguments can be traced values allowing you to add
run-time values to the error message.
Note that tracking these run-time arrays will increase your memory usage,
even if no error happens.
For example:
>>> import jax
>>> import jax.numpy as jnp
>>> from jax.experimental import checkify
>>> def f(x):
... checkify.check(x>0, "{x} needs to be positive!", x=x)
... return 1/x
>>> checked_f = checkify.checkify(f)
>>> err, out = jax.jit(checked_f)(-3.)
>>> err.throw() # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
jax._src.checkify.JaxRuntimeError: -3. needs to be positive!
"""
_check(pred, msg, debug, *fmt_args, **fmt_kwargs)
def _check(pred, msg, debug, *fmt_args, **fmt_kwargs):
if not is_scalar_pred(pred):
prim_name = 'debug_check' if debug else 'check'
raise TypeError(f'{prim_name} takes a scalar pred as argument, got {pred}')
for arg in jtu.tree_leaves((fmt_args, fmt_kwargs)):
if not isinstance(arg, (Array, np.ndarray)):
raise TypeError('Formatting arguments to checkify.check need to be '
'PyTrees of arrays, but got '
f'{arg!r} of type {type(arg)}.')
new_error = FailedCheckError(get_traceback(), msg, *fmt_args, **fmt_kwargs)
error = assert_func(init_error, jnp.logical_not(pred), new_error)
_check_error(error, debug=debug)
def _check_error(error, *, debug=False):
if any(map(np.shape, error._pred.values())):
error = _reduce_any_error(error)
err_args, tree_def = tree_flatten(error)
return check_p.bind(*err_args, err_tree=tree_def, debug=debug)
def is_scalar_pred(pred) -> bool:
return (isinstance(pred, bool) or
isinstance(pred, Array) and pred.shape == () and
pred.dtype == np.dtype('bool'))
def debug_check(pred: Bool, msg: str, *fmt_args, **fmt_kwargs) -> None:
"""Check a predicate when running under checkify, otherwise is a no-op.
A `debug_check` will only be run if it is transformed by :func:`~checkify`,
otherwise the check will be dropped.
Args:
pred: if False, a FailedCheckError error is added.
msg: error message if error is added.
fmt_args, fmt_kwargs: Positional and keyword formatting arguments for
`msg`, eg.:
``debug_check(.., "check failed on values {} and {named}", x, named=y)``
Note that these arguments can be traced values allowing you to add
run-time values to the error message.
Note that tracking these run-time arrays will increase your memory usage,
even if no error happens.
For example:
>>> import jax
>>> import jax.numpy as jnp
>>> from jax.experimental import checkify
>>> def f(x):
... checkify.debug_check(x!=0, "cannot be zero!")
... return x
>>> _ = f(0) # running without checkify means no debug_check is run.
>>> checked_f = checkify.checkify(f)
>>> err, out = jax.jit(checked_f)(0) # running with checkify runs debug_check.
>>> err.throw() # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
jax._src.checkify.JaxRuntimeError: cannot be zero!
"""
_check(pred, msg, True, *fmt_args, **fmt_kwargs)
def check_error(error: Error) -> None:
"""Raise an Exception if ``error`` represents a failure. Functionalized by :func:`~checkify`.
The semantics of this function are equivalent to:
>>> def check_error(err: Error) -> None:
... err.throw() # can raise ValueError
But unlike that implementation, ``check_error`` can be functionalized using
the :func:`~checkify` transformation.
This function is similar to :func:`~check` but with a different signature: whereas
:func:`~check` takes as arguments a boolean predicate and a new error message
string, this function takes an ``Error`` value as argument. Both :func:`~check`
and this function raise a Python Exception on failure (a side-effect), and
thus cannot be staged out by :func:`~jax.jit`, :func:`~jax.pmap`,
:func:`~jax.lax.scan`, etc. Both also can
be functionalized by using :func:`~checkify`.
But unlike :func:`~check`, this function is like a direct inverse of
:func:`~checkify`:
whereas :func:`~checkify` takes as input a function which
can raise a Python
Exception and produces a new function without that effect but which produces
an ``Error`` value as output, this ``check_error`` function can accept an
``Error`` value as input and can produce the side-effect of raising an
Exception. That is, while :func:`~checkify` goes from
functionalizable Exception
effect to error value, this ``check_error`` goes from error value to
functionalizable Exception effect.
``check_error`` is useful when you want to turn checks represented by an
``Error`` value (produced by functionalizing ``checks`` via
:func:`~checkify`) back into Python Exceptions.
Args:
error: Error to check.
For example, you might want to functionalize part of your program through
checkify, stage out your functionalized code through :func:`~jax.jit`, then
re-inject your error value outside of the :func:`~jax.jit`:
>>> import jax
>>> from jax.experimental import checkify
>>> def f(x):
... checkify.check(x>0, "must be positive!")
... return x
>>> def with_inner_jit(x):
... checked_f = checkify.checkify(f)
... # a checkified function can be jitted
... error, out = jax.jit(checked_f)(x)
... checkify.check_error(error)
... return out
>>> _ = with_inner_jit(1) # no failed check
>>> with_inner_jit(-1) # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
jax._src.JaxRuntimeError: must be positive!
>>> # can re-checkify
>>> error, _ = checkify.checkify(with_inner_jit)(-1)
"""
if not isinstance(error, Error):
raise TypeError('check_error takes an Error as argument, '
f'got type {type(error)} instead.')
_check_error(error, debug=False)
| JaxRuntimeError |
python | huggingface__transformers | src/transformers/models/whisper/modeling_whisper.py | {
"start": 55981,
"end": 56803
} | class ____(WhisperPreTrainedModel):
"""
This wrapper class is a helper class to correctly load pretrained checkpoints when the causal language model is
used in combination with the [`EncoderDecoderModel`] framework.
"""
def __init__(self, config):
super().__init__(config)
config.is_encoder_decoder = False
self.decoder = WhisperDecoder(config)
def get_input_embeddings(self):
return self.decoder.embed_tokens
def set_input_embeddings(self, value):
self.decoder.embed_tokens = value
def forward(self, *args, **kwargs):
return self.decoder(*args, **kwargs)
@auto_docstring(
custom_intro="""
Whisper decoder with a language modeling head on top (linear layer with weights tied to the input embeddings).
"""
)
| WhisperDecoderWrapper |
python | geekcomputers__Python | Test-Case-Generator/test_case.py | {
"start": 19995,
"end": 20965
} | class ____(Case):
def __init__(self, master):
super(Type1, self).__init__(master) # Type 1
self.forget_home()
self.take_input()
def take_input(self):
try:
self.try_forget() # Type 1
except AttributeError:
pass
self.get_t(0)
self.get_n(1)
self.get_a(2)
self.show_button(3)
def generate(self): # Type 1
self.forget_testcase_take_input_screen()
self.output.delete("1.0", END)
self.output.insert(END, self.t)
self.output.insert(END, "\n")
for i in range(self.t):
self.n = randint(self.n_min, self.n_max)
self.output.insert(END, self.n)
self.output.insert(END, "\n")
self.a = [0] * self.n
for j in range(self.n):
self.a[j] = randint(self.a_min, self.a_max)
self.output.insert(END, self.a)
self.output.insert(END, "\n")
| Type1 |
python | django__django | django/utils/timezone.py | {
"start": 2763,
"end": 7291
} | class ____(ContextDecorator):
"""
Temporarily set the time zone for the current thread.
This is a context manager that uses django.utils.timezone.activate()
to set the timezone on entry and restores the previously active timezone
on exit.
The ``timezone`` argument must be an instance of a ``tzinfo`` subclass, a
time zone name, or ``None``. If it is ``None``, Django enables the default
time zone.
"""
def __init__(self, timezone):
self.timezone = timezone
def __enter__(self):
self.old_timezone = getattr(_active, "value", None)
if self.timezone is None:
deactivate()
else:
activate(self.timezone)
def __exit__(self, exc_type, exc_value, traceback):
if self.old_timezone is None:
deactivate()
else:
_active.value = self.old_timezone
# Templates
def template_localtime(value, use_tz=None):
"""
Check if value is a datetime and converts it to local time if necessary.
If use_tz is provided and is not None, that will force the value to
be converted (or not), overriding the value of settings.USE_TZ.
This function is designed for use by the template engine.
"""
should_convert = (
isinstance(value, datetime)
and (settings.USE_TZ if use_tz is None else use_tz)
and not is_naive(value)
and getattr(value, "convert_to_local_time", True)
)
return localtime(value) if should_convert else value
# Utilities
def localtime(value=None, timezone=None):
"""
Convert an aware datetime.datetime to local time.
Only aware datetimes are allowed. When value is omitted, it defaults to
now().
Local time is defined by the current time zone, unless another time zone
is specified.
"""
if value is None:
value = now()
if timezone is None:
timezone = get_current_timezone()
# Emulate the behavior of astimezone() on Python < 3.6.
if is_naive(value):
raise ValueError("localtime() cannot be applied to a naive datetime")
return value.astimezone(timezone)
def localdate(value=None, timezone=None):
"""
Convert an aware datetime to local time and return the value's date.
Only aware datetimes are allowed. When value is omitted, it defaults to
now().
Local time is defined by the current time zone, unless another time zone is
specified.
"""
return localtime(value, timezone).date()
def now():
"""
Return an aware or naive datetime.datetime, depending on settings.USE_TZ.
"""
return datetime.now(tz=UTC if settings.USE_TZ else None)
# By design, these four functions don't perform any checks on their arguments.
# The caller should ensure that they don't receive an invalid value like None.
def is_aware(value):
"""
Determine if a given datetime.datetime is aware.
The concept is defined in Python's docs:
https://docs.python.org/library/datetime.html#datetime.tzinfo
Assuming value.tzinfo is either None or a proper datetime.tzinfo,
value.utcoffset() implements the appropriate logic.
"""
return value.utcoffset() is not None
def is_naive(value):
"""
Determine if a given datetime.datetime is naive.
The concept is defined in Python's docs:
https://docs.python.org/library/datetime.html#datetime.tzinfo
Assuming value.tzinfo is either None or a proper datetime.tzinfo,
value.utcoffset() implements the appropriate logic.
"""
return value.utcoffset() is None
def make_aware(value, timezone=None):
"""Make a naive datetime.datetime in a given time zone aware."""
if timezone is None:
timezone = get_current_timezone()
# Check that we won't overwrite the timezone of an aware datetime.
if is_aware(value):
raise ValueError("make_aware expects a naive datetime, got %s" % value)
# This may be wrong around DST changes!
return value.replace(tzinfo=timezone)
def make_naive(value, timezone=None):
"""Make an aware datetime.datetime naive in a given time zone."""
if timezone is None:
timezone = get_current_timezone()
# Emulate the behavior of astimezone() on Python < 3.6.
if is_naive(value):
raise ValueError("make_naive() cannot be applied to a naive datetime")
return value.astimezone(timezone).replace(tzinfo=None)
def _datetime_ambiguous_or_imaginary(dt, tz):
return tz.utcoffset(dt.replace(fold=not dt.fold)) != tz.utcoffset(dt)
| override |
python | django__django | tests/template_tests/test_library.py | {
"start": 3273,
"end": 4511
} | class ____(SimpleTestCase):
def setUp(self):
self.library = Library()
def test_simple_block_tag(self):
@self.library.simple_block_tag
def func(content):
return content
self.assertIn("func", self.library.tags)
def test_simple_block_tag_parens(self):
@self.library.simple_tag()
def func(content):
return content
self.assertIn("func", self.library.tags)
def test_simple_block_tag_name_kwarg(self):
@self.library.simple_block_tag(name="name")
def func(content):
return content
self.assertIn("name", self.library.tags)
def test_simple_block_tag_invalid(self):
msg = "Invalid arguments provided to simple_block_tag"
with self.assertRaisesMessage(ValueError, msg):
self.library.simple_block_tag("invalid")
def test_simple_tag_wrapped(self):
@self.library.simple_block_tag
@functools.lru_cache(maxsize=32)
def func(content):
return content
func_wrapped = self.library.tags["func"].__wrapped__
self.assertIs(func_wrapped, func)
self.assertTrue(hasattr(func_wrapped, "cache_info"))
| SimpleBlockTagRegistrationTests |
python | coleifer__peewee | playhouse/postgres_ext.py | {
"start": 9270,
"end": 10326
} | class ____(IndexedFieldMixin, JSONField):
field_type = 'JSONB'
_json_datatype = 'jsonb'
__hash__ = Field.__hash__
def contains(self, other):
if isinstance(other, JSONField):
return Expression(self, JSONB_CONTAINS, other)
return Expression(self, JSONB_CONTAINS, Json(other))
def contained_by(self, other):
return Expression(cast_jsonb(self), JSONB_CONTAINED_BY, Json(other))
def contains_any(self, *items):
return Expression(
cast_jsonb(self),
JSONB_CONTAINS_ANY_KEY,
Value(list(items), unpack=False))
def contains_all(self, *items):
return Expression(
cast_jsonb(self),
JSONB_CONTAINS_ALL_KEYS,
Value(list(items), unpack=False))
def has_key(self, key):
return Expression(cast_jsonb(self), JSONB_CONTAINS_KEY, key)
def remove(self, *items):
return Expression(
cast_jsonb(self),
JSONB_REMOVE,
Value(list(items), unpack=False))
| BinaryJSONField |
python | prompt-toolkit__python-prompt-toolkit | examples/prompts/auto-completion/colored-completions-with-formatted-text.py | {
"start": 2314,
"end": 3759
} | class ____(Completer):
def get_completions(self, document, complete_event):
word = document.get_word_before_cursor()
for animal in animals:
if animal.startswith(word):
if animal in animal_family:
family = animal_family[animal]
family_color = family_colors.get(family, "default")
display = HTML(
"%s<b>:</b> <ansired>(<"
+ family_color
+ ">%s</"
+ family_color
+ ">)</ansired>"
) % (animal, family)
else:
display = animal
yield Completion(
animal,
start_position=-len(word),
display=display,
display_meta=meta.get(animal),
)
def main():
# Simple completion menu.
print("(The completion menu displays colors.)")
prompt("Type an animal: ", completer=AnimalCompleter())
# Multi-column menu.
prompt(
"Type an animal: ",
completer=AnimalCompleter(),
complete_style=CompleteStyle.MULTI_COLUMN,
)
# Readline-like
prompt(
"Type an animal: ",
completer=AnimalCompleter(),
complete_style=CompleteStyle.READLINE_LIKE,
)
if __name__ == "__main__":
main()
| AnimalCompleter |
python | MongoEngine__mongoengine | tests/test_connection_mongomock.py | {
"start": 427,
"end": 6589
} | class ____(unittest.TestCase):
@classmethod
def setUpClass(cls):
disconnect_all()
@classmethod
def tearDownClass(cls):
disconnect_all()
def tearDown(self):
mongoengine.connection._connection_settings = {}
mongoengine.connection._connections = {}
mongoengine.connection._dbs = {}
@require_mongomock
def test_connect_raise_if_mongomock_uri_provided(self):
with pytest.raises(
Exception, match="Use of mongomock:// URI or 'is_mock' were removed"
):
connect("test", host="mongomock://localhost")
@require_mongomock
def test_connect_raise_if_is_mock_provided(self):
with pytest.raises(
Exception, match="Use of mongomock:// URI or 'is_mock' were removed"
):
connect("test", host="mongodb://localhost", is_mock=True)
@require_mongomock
def test_connect_in_mocking(self):
"""Ensure that the connect() method works properly in mocking."""
connect(
"mongoenginetest",
host="mongodb://localhost",
mongo_client_class=mongomock.MongoClient,
)
conn = get_connection()
assert isinstance(conn, mongomock.MongoClient)
connect(
"mongoenginetest2",
host="mongodb://localhost",
mongo_client_class=mongomock.MongoClient,
alias="testdb2",
)
conn = get_connection("testdb2")
assert isinstance(conn, mongomock.MongoClient)
connect(
"mongoenginetest3",
host="mongodb://localhost",
mongo_client_class=mongomock.MongoClient,
alias="testdb3",
)
conn = get_connection("testdb3")
assert isinstance(conn, mongomock.MongoClient)
connect(
"mongoenginetest4",
mongo_client_class=mongomock.MongoClient,
alias="testdb4",
)
conn = get_connection("testdb4")
assert isinstance(conn, mongomock.MongoClient)
connect(
host="mongodb://localhost:27017/mongoenginetest5",
mongo_client_class=mongomock.MongoClient,
alias="testdb5",
)
conn = get_connection("testdb5")
assert isinstance(conn, mongomock.MongoClient)
connect(
host="mongodb://localhost:27017/mongoenginetest6",
mongo_client_class=mongomock.MongoClient,
alias="testdb6",
)
conn = get_connection("testdb6")
assert isinstance(conn, mongomock.MongoClient)
connect(
host="mongodb://localhost:27017/mongoenginetest7",
mongo_client_class=mongomock.MongoClient,
alias="testdb7",
)
conn = get_connection("testdb7")
assert isinstance(conn, mongomock.MongoClient)
@require_mongomock
def test_default_database_with_mocking(self):
"""Ensure that the default database is correctly set when using mongomock."""
disconnect_all()
class SomeDocument(Document):
pass
conn = connect(
host="mongodb://localhost:27017/mongoenginetest",
mongo_client_class=mongomock.MongoClient,
)
some_document = SomeDocument()
# database won't exist until we save a document
some_document.save()
assert SomeDocument.objects.count() == 1
assert conn.get_default_database().name == "mongoenginetest"
assert conn.list_database_names()[0] == "mongoenginetest"
@require_mongomock
def test_basic_queries_against_mongomock(self):
disconnect_all()
connect(
host="mongodb://localhost:27017/mongoenginetest",
mongo_client_class=mongomock.MongoClient,
)
class Person(Document):
name = StringField()
Person.drop_collection()
assert Person.objects.limit(0).count(with_limit_and_skip=True) == 0
bob = Person(name="Bob").save()
john = Person(name="John").save()
assert Person.objects.count() == 2
qs = Person.objects(name="Bob")
assert qs.count() == 1
assert qs.first() == bob
assert list(qs.as_pymongo()) == [{"_id": bob.id, "name": "Bob"}]
pipeline = [{"$project": {"name": {"$toUpper": "$name"}}}]
data = Person.objects.order_by("name").aggregate(pipeline)
assert list(data) == [
{"_id": bob.id, "name": "BOB"},
{"_id": john.id, "name": "JOHN"},
]
Person.drop_collection()
assert Person.objects.count() == 0
@require_mongomock
def test_connect_with_host_list(self):
"""Ensure that the connect() method works when host is a list
Uses mongomock to test w/o needing multiple mongod/mongos processes
"""
connect(host=["mongodb://localhost"], mongo_client_class=mongomock.MongoClient)
conn = get_connection()
assert isinstance(conn, mongomock.MongoClient)
connect(
host=["localhost"],
mongo_client_class=mongomock.MongoClient,
alias="testdb3",
)
conn = get_connection("testdb3")
assert isinstance(conn, mongomock.MongoClient)
connect(
host=["mongodb://localhost:27017", "mongodb://localhost:27018"],
alias="testdb4",
mongo_client_class=mongomock.MongoClient,
)
conn = get_connection("testdb4")
assert isinstance(conn, mongomock.MongoClient)
connect(
host=["mongodb://localhost:27017", "mongodb://localhost:27018"],
mongo_client_class=mongomock.MongoClient,
alias="testdb5",
)
conn = get_connection("testdb5")
assert isinstance(conn, mongomock.MongoClient)
connect(
host=["localhost:27017", "localhost:27018"],
mongo_client_class=mongomock.MongoClient,
alias="testdb6",
)
conn = get_connection("testdb6")
assert isinstance(conn, mongomock.MongoClient)
if __name__ == "__main__":
unittest.main()
| MongoMockConnectionTest |
python | ray-project__ray | python/ray/data/_internal/util.py | {
"start": 31036,
"end": 42547
} | class ____(Queue):
"""Extension of Python's `queue.Queue` providing ability to get interrupt its
method callers in other threads"""
INTERRUPTION_CHECK_FREQUENCY_SEC = 0.5
def __init__(
self, max_size: int, interrupted_event: Optional[threading.Event] = None
):
super().__init__(maxsize=max_size)
self._interrupted_event = interrupted_event or threading.Event()
def get(self, block=True, timeout=None):
if not block or timeout is not None:
return super().get(block, timeout)
# In case when the call is blocking and no timeout is specified (ie blocking
# indefinitely) we apply the following protocol to make it interruptible:
#
# 1. `Queue.get` is invoked w/ 500ms timeout
# 2. `Empty` exception is intercepted (will be raised upon timeout elapsing)
# 3. If interrupted flag is set `InterruptedError` is raised
# 4. Otherwise, protocol retried (until interrupted or queue
# becoming non-empty)
while True:
if self._interrupted_event.is_set():
raise InterruptedError()
try:
return super().get(
block=True, timeout=self.INTERRUPTION_CHECK_FREQUENCY_SEC
)
except Empty:
pass
def put(self, item, block=True, timeout=None):
if not block or timeout is not None:
super().put(item, block, timeout)
return
# In case when the call is blocking and no timeout is specified (ie blocking
# indefinitely) we apply the following protocol to make it interruptible:
#
# 1. `Queue.pet` is invoked w/ 500ms timeout
# 2. `Full` exception is intercepted (will be raised upon timeout elapsing)
# 3. If interrupted flag is set `InterruptedError` is raised
# 4. Otherwise, protocol retried (until interrupted or queue
# becomes non-full)
while True:
if self._interrupted_event.is_set():
raise InterruptedError()
try:
super().put(
item, block=True, timeout=self.INTERRUPTION_CHECK_FREQUENCY_SEC
)
return
except Full:
pass
def make_async_gen(
base_iterator: Iterator[T],
fn: Callable[[Iterator[T]], Iterator[U]],
preserve_ordering: bool,
num_workers: int = 1,
buffer_size: int = 1,
) -> Generator[U, None, None]:
"""Returns a generator (iterator) mapping items from the
provided iterator applying provided transformation in parallel (using a
thread-pool).
NOTE: There are some important constraints that needs to be carefully
understood before using this method
1. If `preserve_ordering` is True
a. This method would unroll input iterator eagerly (irrespective
of the speed of resulting generator being consumed). This is necessary
as we can not guarantee liveness of the algorithm AND preserving of the
original ordering at the same time.
b. Resulting ordering of the output will "match" ordering of the input, ie
that:
iterator = [A1, A2, ... An]
output iterator = [map(A1), map(A2), ..., map(An)]
2. If `preserve_ordering` is False
a. No more than `num_workers * (queue_buffer_size + 1)` elements will be
fetched from the iterator
b. Resulting ordering of the output is unspecified (and is
non-deterministic)
Args:
base_iterator: Iterator yielding elements to map
fn: Transformation to apply to each element
preserve_ordering: Whether ordering has to be preserved
num_workers: The number of threads to use in the threadpool (defaults to 1)
buffer_size: Number of objects to be buffered in its input/output
queues (per queue; defaults to 2). Total number of objects held
in memory could be calculated as:
num_workers * buffer_size * 2 (input and output)
Returns:
An generator (iterator) of the elements corresponding to the source
elements mapped by provided transformation (while *preserving the ordering*)
"""
gen_id = random.randint(0, 2**31 - 1)
if num_workers < 1:
raise ValueError("Size of threadpool must be at least 1.")
# Signal handler used to interrupt workers when terminating
interrupted_event = threading.Event()
# To apply transformations to elements in parallel *and* preserve the ordering
# following invariants are established:
# - Every worker is handled by standalone thread
# - Every worker is assigned an input and an output queue
#
# And following protocol is implemented:
# - Filling worker traverses input iterator round-robin'ing elements across
# the input queues (in order!)
# - Transforming workers traverse respective input queue in-order: de-queueing
# element, applying transformation and enqueuing the result into the output
# queue
# - Generator (returned from this method) traverses output queues (in the same
# order as input queues) dequeues 1 mapped element at a time from each output
# queue and yields it
#
# However, in case when we're preserving the ordering we can not enforce the input
# queue size as this could result in deadlocks since transformations could be
# producing sequences of arbitrary length.
#
# Check `test_make_async_gen_varying_seq_length_stress_test` for more context on
# this problem.
if preserve_ordering:
input_queue_buf_size = -1
num_input_queues = num_workers
else:
input_queue_buf_size = (buffer_size + 1) * num_workers
num_input_queues = 1
input_queues = [
_InterruptibleQueue(input_queue_buf_size, interrupted_event)
for _ in range(num_input_queues)
]
output_queues = [
_InterruptibleQueue(buffer_size, interrupted_event) for _ in range(num_workers)
]
# Filling worker
def _run_filling_worker():
try:
# First, round-robin elements from the iterator into
# corresponding input queues (one by one)
for idx, item in enumerate(base_iterator):
input_queues[idx % num_input_queues].put(item)
# NOTE: We have to Enqueue sentinel objects for every transforming
# worker:
# - In case of preserving order of ``num_queues`` == ``num_workers``
# we will enqueue 1 sentinel per queue
# - In case of NOT preserving order all ``num_workers`` sentinels
# will be enqueued into a single queue
for idx in range(num_workers):
input_queues[idx % num_input_queues].put(SENTINEL)
except InterruptedError:
pass
except Exception as e:
logger.warning("Caught exception in filling worker!", exc_info=e)
# In case of filling worker encountering an exception we have to propagate
# it back to the (main) iterating thread. To achieve that we're traversing
# output queues *backwards* relative to the order of iterator-thread such
# that they are more likely to meet w/in a single iteration.
for output_queue in reversed(output_queues):
output_queue.put(e)
# Transforming worker
def _run_transforming_worker(input_queue, output_queue):
try:
# Create iterator draining the queue, until it receives sentinel
#
# NOTE: `queue.get` is blocking!
input_queue_iter = iter(input_queue.get, SENTINEL)
for result in fn(input_queue_iter):
# Enqueue result of the transformation
output_queue.put(result)
# Enqueue sentinel (to signal that transformations are completed)
output_queue.put(SENTINEL)
except InterruptedError:
pass
except Exception as e:
logger.warning("Caught exception in transforming worker!", exc_info=e)
# NOTE: In this case we simply enqueue the exception rather than
# interrupting
output_queue.put(e)
# Start workers threads
filling_worker_thread = threading.Thread(
target=_run_filling_worker,
name=f"map_tp_filling_worker-{gen_id}",
daemon=True,
)
filling_worker_thread.start()
transforming_worker_threads = [
threading.Thread(
target=_run_transforming_worker,
name=f"map_tp_transforming_worker-{gen_id}-{idx}",
args=(input_queues[idx % num_input_queues], output_queues[idx]),
daemon=True,
)
for idx in range(num_workers)
]
for t in transforming_worker_threads:
t.start()
# Use main thread to yield output batches
try:
# Keep track of remaining non-empty output queues
remaining_output_queues = output_queues
while len(remaining_output_queues) > 0:
# To provide deterministic ordering of the produced iterator we rely
# on the following invariants:
#
# - Elements from the original iterator are round-robin'd into
# input queues (in order)
# - Individual workers drain their respective input queues populating
# output queues with the results of applying transformation to the
# original item (and hence preserving original ordering of the input
# queue)
# - To yield from the generator output queues are traversed in the same
# order and one single element is dequeued (in a blocking way!) at a
# time from every individual output queue
#
empty_queues = []
# At every iteration only remaining non-empty queues
# are traversed (to prevent blocking on exhausted queue)
for output_queue in remaining_output_queues:
# NOTE: This is blocking!
item = output_queue.get()
if isinstance(item, Exception):
raise item
if item is SENTINEL:
empty_queues.append(output_queue)
else:
yield item
if empty_queues:
remaining_output_queues = [
q for q in remaining_output_queues if q not in empty_queues
]
finally:
# Set flag to interrupt workers (to make sure no dangling
# threads holding the objects are left behind)
#
# NOTE: Interrupted event is set to interrupt the running threads
# that might be blocked otherwise waiting on inputs from respective
# queues. However, even though we're interrupting the threads we can't
# guarantee that threads will be interrupted in time (as this is
# dependent on Python's GC finalizer to close the generator by raising
# `GeneratorExit`) and hence we can't join on either filling or
# transforming workers.
interrupted_event.set()
| _InterruptibleQueue |
python | pytorch__pytorch | tools/testing/test_selections.py | {
"start": 2102,
"end": 10019
} | class ____:
def __init__(self) -> None:
self.serial: list[ShardedTest] = []
self.parallel: list[ShardedTest] = []
def get_total_time(self) -> float:
"""Default is the value for which to substitute if a test has no time"""
procs = [0.0 for _ in range(NUM_PROCS_FOR_SHARDING_CALC)]
for test in self.parallel:
min_index = procs.index(min(procs))
procs[min_index] += test.get_time()
time = max(procs) + sum(test.get_time() for test in self.serial)
return time
def convert_to_tuple(self) -> tuple[float, list[ShardedTest]]:
return (self.get_total_time(), self.serial + self.parallel)
def get_with_pytest_shard(
tests: Sequence[TestRun],
test_file_times: dict[str, float],
test_class_times: dict[str, dict[str, float]] | None,
) -> list[ShardedTest]:
sharded_tests: list[ShardedTest] = []
for test in tests:
duration = get_duration(test, test_file_times, test_class_times or {})
if duration and duration > THRESHOLD:
num_shards = math.ceil(duration / THRESHOLD)
for i in range(num_shards):
sharded_tests.append(
ShardedTest(test, i + 1, num_shards, duration / num_shards)
)
else:
sharded_tests.append(ShardedTest(test, 1, 1, duration))
return sharded_tests
def get_duration(
test: TestRun,
test_file_times: dict[str, float],
test_class_times: dict[str, dict[str, float]],
) -> float | None:
"""Calculate the time for a TestRun based on the given test_file_times and
test_class_times. Returns None if the time is unknown."""
file_duration = test_file_times.get(test.test_file, None)
if test.is_full_file():
return file_duration
def get_duration_for_classes(
test_file: str, test_classes: frozenset[str]
) -> float | None:
duration: float = 0
for test_class in test_classes:
class_duration = test_class_times.get(test_file, {}).get(test_class, None)
if class_duration is None:
return None
duration += class_duration
return duration
included = test.included()
excluded = test.excluded()
included_classes_duration = get_duration_for_classes(test.test_file, included)
excluded_classes_duration = get_duration_for_classes(test.test_file, excluded)
if included_classes_duration is None or excluded_classes_duration is None:
# Didn't get the time for all classes, so time is unknown
return None
if included:
return included_classes_duration
assert excluded, (
f"TestRun {test} is not full file but doesn't have included or excluded classes"
)
if file_duration is None:
return None
return file_duration - excluded_classes_duration
def shard(
sharded_jobs: list[ShardJob],
pytest_sharded_tests: Sequence[ShardedTest],
estimated_time_limit: float | None = None,
serial: bool = False,
) -> None:
# Modifies sharded_jobs in place
if len(sharded_jobs) == 0:
assert len(pytest_sharded_tests) == 0, (
"No shards provided but there are tests to shard"
)
return
round_robin_index = 0
def _get_min_sharded_job(
sharded_jobs: list[ShardJob], test: ShardedTest
) -> ShardJob:
if test.time is None:
nonlocal round_robin_index
job = sharded_jobs[round_robin_index % len(sharded_jobs)]
round_robin_index += 1
return job
return min(sharded_jobs, key=lambda j: j.get_total_time())
def _shard_serial(
tests: Sequence[ShardedTest], sharded_jobs: list[ShardJob]
) -> None:
assert estimated_time_limit is not None, "Estimated time limit must be provided"
new_sharded_jobs = sharded_jobs
for test in tests:
if (
len(sharded_jobs) > 1
and sharded_jobs[-1].get_total_time() > estimated_time_limit
):
new_sharded_jobs = sharded_jobs[:-1]
min_sharded_job = _get_min_sharded_job(new_sharded_jobs, test)
min_sharded_job.serial.append(test)
def _shard_parallel(
tests: Sequence[ShardedTest], sharded_jobs: list[ShardJob]
) -> None:
for test in tests:
min_sharded_job = _get_min_sharded_job(sharded_jobs, test)
min_sharded_job.parallel.append(test)
if serial:
_shard_serial(pytest_sharded_tests, sharded_jobs)
else:
_shard_parallel(pytest_sharded_tests, sharded_jobs)
return
def calculate_shards(
num_shards: int,
tests: Sequence[TestRun],
test_file_times: dict[str, float],
test_class_times: dict[str, dict[str, float]] | None,
must_serial: Callable[[str], bool] | None = None,
sort_by_time: bool = True,
) -> list[tuple[float, list[ShardedTest]]]:
must_serial = must_serial or (lambda x: True)
test_class_times = test_class_times or {}
# Divide tests into pytest shards
if sort_by_time:
known_tests = [
x
for x in tests
if get_duration(x, test_file_times, test_class_times) is not None
]
unknown_tests = [x for x in tests if x not in known_tests]
pytest_sharded_tests = sorted(
get_with_pytest_shard(known_tests, test_file_times, test_class_times),
key=lambda j: j.get_time(),
reverse=True,
) + get_with_pytest_shard(unknown_tests, test_file_times, test_class_times)
else:
pytest_sharded_tests = get_with_pytest_shard(
tests, test_file_times, test_class_times
)
del tests
serial_tests = [test for test in pytest_sharded_tests if must_serial(test.name)]
parallel_tests = [test for test in pytest_sharded_tests if test not in serial_tests]
serial_time = sum(test.get_time() for test in serial_tests)
parallel_time = sum(test.get_time() for test in parallel_tests)
total_time = serial_time + parallel_time / NUM_PROCS_FOR_SHARDING_CALC
estimated_time_per_shard = total_time / num_shards
# Separate serial tests from parallel tests as much as possible to maximize
# parallelism by putting all the serial tests on the first num_serial_shards
# shards. The estimated_time_limit is the estimated time it should take for
# the least filled serial shard. Ex if we have 8 min of serial tests, 20 min
# of parallel tests, 6 shards, and 2 procs per machine, we would expect each
# machine to take 3 min and should aim for 3 serial shards, with shards 1
# and 2 taking 3 min and shard 3 taking 2 min. The estimated time limit
# would be 2 min. This ensures that the first few shard contains as many
# serial tests as possible and as few parallel tests as possible. The least
# filled/last (in the example, the 3rd) shard may contain a lot of both
# serial and parallel tests.
estimated_time_limit = 0.0
if estimated_time_per_shard != 0:
estimated_time_limit = serial_time % estimated_time_per_shard
if estimated_time_limit <= 0.01:
estimated_time_limit = estimated_time_per_shard
if total_time == 0:
num_serial_shards = num_shards
else:
num_serial_shards = max(math.ceil(serial_time / total_time * num_shards), 1)
sharded_jobs = [ShardJob() for _ in range(num_shards)]
shard(
sharded_jobs=sharded_jobs[:num_serial_shards],
pytest_sharded_tests=serial_tests,
estimated_time_limit=estimated_time_limit,
serial=True,
)
shard(
sharded_jobs=sharded_jobs,
pytest_sharded_tests=parallel_tests,
serial=False,
)
return [job.convert_to_tuple() for job in sharded_jobs]
def get_test_case_configs(dirpath: str) -> None:
get_disabled_tests(dirpath=dirpath)
| ShardJob |
python | numpy__numpy | numpy/linalg/tests/test_linalg.py | {
"start": 58057,
"end": 58133
} | class ____(_TestNormBase):
dt = np.float32
dec = 6
| _TestNormSingleBase |
python | tensorflow__tensorflow | tensorflow/python/data/kernel_tests/skip_test.py | {
"start": 2251,
"end": 3273
} | class ____(checkpoint_test_base.CheckpointTestBase,
parameterized.TestCase):
def _build_skip_dataset(self, count, options=None):
dataset = dataset_ops.Dataset.range(100).skip(count)
if options:
dataset = dataset.with_options(options)
return dataset
@combinations.generate(
combinations.times(
test_base.default_test_combinations(),
checkpoint_test_base.default_test_combinations(),
combinations.combine(symbolic_checkpoint=[False, True]),
combinations.combine(count=[50], num_outputs=[50]) +
combinations.combine(count=[200, 100, -1], num_outputs=[0]) +
combinations.combine(count=[0], num_outputs=[100])))
def test(self, verify_fn, count, num_outputs, symbolic_checkpoint):
options = options_lib.Options()
options.experimental_symbolic_checkpoint = symbolic_checkpoint
verify_fn(self, lambda: self._build_skip_dataset(count, options),
num_outputs)
| SkipDatasetCheckpointTest |
python | getsentry__sentry | src/sentry/discover/endpoints/serializers.py | {
"start": 12315,
"end": 13907
} | class ____(serializers.Serializer):
transaction = serializers.CharField(required=True, max_length=200)
team = serializers.ListField(child=serializers.IntegerField())
def validate_team(self, team_ids: Sequence[int]) -> QuerySet[Team]:
request = self.context["request"]
organization = self.context["organization"]
verified_teams = {team.id for team in Team.objects.get_for_user(organization, request.user)}
teams = Team.objects.filter(id__in=team_ids)
for team in teams:
if team.id in verified_teams:
continue
if not request.access.has_team_access(team):
raise serializers.ValidationError(
f"You do not have permission to access {team.name}"
)
return teams
def validate(self, data):
data = super().validate(data)
if self.context.get("mode") == "create":
team = data["team"]
count = (
TeamKeyTransaction.objects.values("project_team")
.filter(project_team__team_id__in=[item.id for item in team])
.annotate(total=Count("project_team"))
.aggregate(max=Max("total"))
)
# Limit the number of key transactions for a team
if count["max"] and count["max"] >= MAX_TEAM_KEY_TRANSACTIONS:
raise serializers.ValidationError(
f"At most {MAX_TEAM_KEY_TRANSACTIONS} Key Transactions can be added for a team"
)
return data
| TeamKeyTransactionSerializer |
python | neetcode-gh__leetcode | python/0056-merge-intervals.py | {
"start": 0,
"end": 431
} | class ____:
def merge(self, intervals: List[List[int]]) -> List[List[int]]:
intervals.sort(key=lambda pair: pair[0])
output = [intervals[0]]
for start, end in intervals:
lastEnd = output[-1][1]
if start <= lastEnd:
# merge
output[-1][1] = max(lastEnd, end)
else:
output.append([start, end])
return output
| Solution |
python | donnemartin__interactive-coding-challenges | recursion_dynamic/power_set/test_power_set.py | {
"start": 18,
"end": 1069
} | class ____(unittest.TestCase):
def test_power_set(self):
input_set = ''
expected = ['']
self.run_test(input_set, expected)
input_set = 'a'
expected = ['a', '']
self.run_test(input_set, expected)
input_set = 'ab'
expected = ['a', 'ab', 'b', '']
self.run_test(input_set, expected)
input_set = 'abc'
expected = ['a', 'ab', 'abc', 'ac',
'b', 'bc', 'c', '']
self.run_test(input_set, expected)
input_set = 'aabc'
expected = ['a', 'aa', 'aab', 'aabc',
'aac', 'ab', 'abc', 'ac',
'b', 'bc', 'c', '']
self.run_test(input_set, expected)
print('Success: test_power_set')
def run_test(self, input_set, expected):
combinatoric = Combinatoric()
result = combinatoric.find_power_set(input_set)
self.assertEqual(result, expected)
def main():
test = TestPowerSet()
test.test_power_set()
if __name__ == '__main__':
main()
| TestPowerSet |
python | RaRe-Technologies__gensim | gensim/models/word2vec.py | {
"start": 94392,
"end": 95570
} | class ____:
def __init__(self, dirname):
"""Iterate over sentences from the `Brown corpus <https://en.wikipedia.org/wiki/Brown_Corpus>`_
(part of `NLTK data <https://www.nltk.org/data.html>`_).
"""
self.dirname = dirname
def __iter__(self):
for fname in os.listdir(self.dirname):
fname = os.path.join(self.dirname, fname)
if not os.path.isfile(fname):
continue
with utils.open(fname, 'rb') as fin:
for line in fin:
line = utils.to_unicode(line)
# each file line is a single sentence in the Brown corpus
# each token is WORD/POS_TAG
token_tags = [t.split('/') for t in line.split() if len(t.split('/')) == 2]
# ignore words with non-alphabetic tags like ",", "!" etc (punctuation, weird stuff)
words = ["%s/%s" % (token.lower(), tag[:2]) for token, tag in token_tags if tag[:2].isalpha()]
if not words: # don't bother sending out empty sentences
continue
yield words
| BrownCorpus |
python | wandb__wandb | wandb/vendor/pygments/filters/__init__.py | {
"start": 9314,
"end": 10539
} | class ____(Filter):
"""Gobbles source code lines (eats initial characters).
This filter drops the first ``n`` characters off every line of code. This
may be useful when the source code fed to the lexer is indented by a fixed
amount of space that isn't desired in the output.
Options accepted:
`n` : int
The number of characters to gobble.
.. versionadded:: 1.2
"""
def __init__(self, **options):
Filter.__init__(self, **options)
self.n = get_int_opt(options, 'n', 0)
def gobble(self, value, left):
if left < len(value):
return value[left:], 0
else:
return u'', left - len(value)
def filter(self, lexer, stream):
n = self.n
left = n # How many characters left to gobble.
for ttype, value in stream:
# Remove ``left`` tokens from first line, ``n`` from all others.
parts = value.split('\n')
(parts[0], left) = self.gobble(parts[0], left)
for i in range(1, len(parts)):
(parts[i], left) = self.gobble(parts[i], n)
value = u'\n'.join(parts)
if value != '':
yield ttype, value
| GobbleFilter |
python | huggingface__transformers | src/transformers/models/resnet/modeling_resnet.py | {
"start": 9036,
"end": 10081
} | class ____(PreTrainedModel):
config: ResNetConfig
base_model_prefix = "resnet"
main_input_name = "pixel_values"
input_modalities = ("image",)
_no_split_modules = ["ResNetConvLayer", "ResNetShortCut"]
@torch.no_grad()
def _init_weights(self, module):
if isinstance(module, nn.Conv2d):
init.kaiming_normal_(module.weight, mode="fan_out", nonlinearity="relu")
# copied from the `reset_parameters` method of `class Linear(Module)` in `torch`.
elif isinstance(module, nn.Linear):
init.kaiming_uniform_(module.weight, a=math.sqrt(5))
if module.bias is not None:
fan_in, _ = torch.nn.init._calculate_fan_in_and_fan_out(module.weight)
bound = 1 / math.sqrt(fan_in) if fan_in > 0 else 0
init.uniform_(module.bias, -bound, bound)
elif isinstance(module, (nn.BatchNorm2d, nn.GroupNorm)):
init.constant_(module.weight, 1)
init.constant_(module.bias, 0)
@auto_docstring
| ResNetPreTrainedModel |
python | run-llama__llama_index | llama-index-integrations/readers/llama-index-readers-macrometa-gdn/llama_index/readers/macrometa_gdn/base.py | {
"start": 182,
"end": 2784
} | class ____(BaseReader):
"""
Macrometa GDN Reader.
Reads vectors from Macrometa GDN
"""
def __init__(self, url: str, apikey: str):
self.url = url
self.apikey = apikey
def load_data(self, collection_list: List[str]) -> List[Document]:
"""
Loads data from the input directory.
Args:
api: Macrometa GDN API key
collection_name: Name of the collection to read from
"""
if collection_list is None:
raise ValueError("Must specify collection name(s)")
results = []
for collection_name in collection_list:
collection = self._load_collection(collection_name)
results.append(
Document(
text=collection, extra_info={"collection_name": collection_name}
)
)
return results
def _load_collection(self, collection_name: str) -> str:
all_documents = []
"""Loads a collection from the database.
Args:
collection_name: Name of the collection to read from
"""
url = self.url + "/_fabric/_system/_api/cursor"
headers = {
"accept": "application/json",
"content-type": "application/json",
"Authorization": "apikey " + self.apikey,
}
data = {
"batchSize": 1000,
"ttl": 60,
"query": "FOR doc IN " + collection_name + " RETURN doc",
}
response = requests.post(url, headers=headers, data=json.dumps(data))
response_json = response.json()
if response.status_code == 201:
all_documents.extend(response_json.get("result", []))
while response_json.get("hasMore"):
cursor_id = response_json.get("id")
next_url = self.url + "/_fabric/_system/_api/cursor/" + cursor_id
response = requests.put(next_url, headers=headers)
if response.status_code == 200:
response_json = response.json()
all_documents.extend(response_json.get("result", []))
else:
print(f"Request failed with status code {response.status_code}")
break
else:
print(f"Initial request failed with status code {response.status_code}")
return str(all_documents)
if __name__ == "__main__":
reader = MacrometaGDNReader("https://api-anurag.eng.macrometa.io", "test")
print(reader.load_data(collection_list=["test"]))
| MacrometaGDNReader |
python | Unity-Technologies__ml-agents | ml-agents/mlagents/trainers/torch_entities/encoders.py | {
"start": 4392,
"end": 5120
} | class ____(nn.Module):
def __init__(
self, height: int, width: int, initial_channels: int, output_size: int
):
super().__init__()
self.output_size = output_size
self.input_size = height * width * initial_channels
self.dense = nn.Sequential(
linear_layer(
self.input_size,
self.output_size,
kernel_init=Initialization.KaimingHeNormal,
kernel_gain=1.41, # Use ReLU gain
),
nn.LeakyReLU(),
)
def forward(self, visual_obs: torch.Tensor) -> torch.Tensor:
hidden = visual_obs.reshape(-1, self.input_size)
return self.dense(hidden)
| FullyConnectedVisualEncoder |
python | python-openxml__python-docx | src/docx/opc/pkgreader.py | {
"start": 332,
"end": 3741
} | class ____:
"""Provides access to the contents of a zip-format OPC package via its
:attr:`serialized_parts` and :attr:`pkg_srels` attributes."""
def __init__(self, content_types, pkg_srels, sparts):
super(PackageReader, self).__init__()
self._pkg_srels = pkg_srels
self._sparts = sparts
@staticmethod
def from_file(pkg_file):
"""Return a |PackageReader| instance loaded with contents of `pkg_file`."""
phys_reader = PhysPkgReader(pkg_file)
content_types = _ContentTypeMap.from_xml(phys_reader.content_types_xml)
pkg_srels = PackageReader._srels_for(phys_reader, PACKAGE_URI)
sparts = PackageReader._load_serialized_parts(phys_reader, pkg_srels, content_types)
phys_reader.close()
return PackageReader(content_types, pkg_srels, sparts)
def iter_sparts(self):
"""Generate a 4-tuple `(partname, content_type, reltype, blob)` for each of the
serialized parts in the package."""
for s in self._sparts:
yield (s.partname, s.content_type, s.reltype, s.blob)
def iter_srels(self):
"""Generate a 2-tuple `(source_uri, srel)` for each of the relationships in the
package."""
for srel in self._pkg_srels:
yield (PACKAGE_URI, srel)
for spart in self._sparts:
for srel in spart.srels:
yield (spart.partname, srel)
@staticmethod
def _load_serialized_parts(phys_reader, pkg_srels, content_types):
"""Return a list of |_SerializedPart| instances corresponding to the parts in
`phys_reader` accessible by walking the relationship graph starting with
`pkg_srels`."""
sparts = []
part_walker = PackageReader._walk_phys_parts(phys_reader, pkg_srels)
for partname, blob, reltype, srels in part_walker:
content_type = content_types[partname]
spart = _SerializedPart(partname, content_type, reltype, blob, srels)
sparts.append(spart)
return tuple(sparts)
@staticmethod
def _srels_for(phys_reader, source_uri):
"""Return |_SerializedRelationships| instance populated with relationships for
source identified by `source_uri`."""
rels_xml = phys_reader.rels_xml_for(source_uri)
return _SerializedRelationships.load_from_xml(source_uri.baseURI, rels_xml)
@staticmethod
def _walk_phys_parts(phys_reader, srels, visited_partnames=None):
"""Generate a 4-tuple `(partname, blob, reltype, srels)` for each of the parts
in `phys_reader` by walking the relationship graph rooted at srels."""
if visited_partnames is None:
visited_partnames = []
for srel in srels:
if srel.is_external:
continue
partname = srel.target_partname
if partname in visited_partnames:
continue
visited_partnames.append(partname)
reltype = srel.reltype
part_srels = PackageReader._srels_for(phys_reader, partname)
blob = phys_reader.blob_for(partname)
yield (partname, blob, reltype, part_srels)
next_walker = PackageReader._walk_phys_parts(phys_reader, part_srels, visited_partnames)
for partname, blob, reltype, srels in next_walker:
yield (partname, blob, reltype, srels)
| PackageReader |
python | getsentry__sentry | src/sentry/migrations/0922_dashboard_starred_add_position_column_and_constraint.py | {
"start": 230,
"end": 2450
} | class ____(CheckedMigration):
# This flag is used to mark that a migration shouldn't be automatically run in production.
# This should only be used for operations where it's safe to run the migration after your
# code has deployed. So this should not be used for most operations that alter the schema
# of a table.
# Here are some things that make sense to mark as post deployment:
# - Large data migrations. Typically we want these to be run manually so that they can be
# monitored and not block the deploy for a long period of time while they run.
# - Adding indexes to large tables. Since this can take a long time, we'd generally prefer to
# run this outside deployments so that we don't block them. Note that while adding an index
# is a schema change, it's completely safe to run the operation after the code has deployed.
# Once deployed, run these manually via: https://develop.sentry.dev/database-migrations/#migration-deployment
is_post_deployment = False
dependencies = [
("sentry", "0921_convert_org_saved_searches_to_views_rerevised"),
]
operations = [
migrations.AlterUniqueTogether(
name="dashboardfavoriteuser",
unique_together=set(),
),
migrations.AddField(
model_name="dashboardfavoriteuser",
name="organization",
field=sentry.db.models.fields.foreignkey.FlexibleForeignKey(
null=True, on_delete=django.db.models.deletion.CASCADE, to="sentry.organization"
),
),
migrations.AddField(
model_name="dashboardfavoriteuser",
name="position",
field=models.PositiveSmallIntegerField(null=True),
),
migrations.SeparateDatabaseAndState(
state_operations=[
migrations.AddConstraint(
model_name="dashboardfavoriteuser",
constraint=models.UniqueConstraint(
fields=("user_id", "dashboard"),
name="sentry_dashboardfavoriteuser_user_id_dashboard_id_2c7267a5_uniq",
),
),
],
),
]
| Migration |
python | marshmallow-code__apispec | tests/schemas.py | {
"start": 1785,
"end": 1827
} | class ____(fields.List):
pass
| CustomList |
python | bokeh__bokeh | tests/unit/bokeh/core/property/test_color__property.py | {
"start": 1359,
"end": 4169
} | class ____:
def test_valid(self) -> None:
prop = bcpc.Color()
assert prop.is_valid((0, 127, 255))
assert prop.is_valid((0, 127, 255, 1.0))
assert prop.is_valid("#00aaff")
assert prop.is_valid("#00AAFF")
assert prop.is_valid("#00AaFf")
assert prop.is_valid("blue")
assert prop.is_valid("BLUE")
assert prop.is_valid('rgb(10, 20, 30)')
assert prop.is_valid('rgba(10, 20, 30, 1)')
assert prop.is_valid('rgba(10, 20, 30, 0.5)')
assert prop.is_valid(RGB(10, 20, 30))
assert prop.is_valid(0xFFFF0088)
def test_invalid(self) -> None:
prop = bcpc.Color()
assert not prop.is_valid(None)
assert not prop.is_valid(False)
assert not prop.is_valid(True)
assert not prop.is_valid(0.0)
assert not prop.is_valid(1.0)
assert not prop.is_valid(1.0 + 1.0j)
assert not prop.is_valid("")
assert not prop.is_valid(())
assert not prop.is_valid([])
assert not prop.is_valid({})
assert not prop.is_valid(_TestHasProps())
assert not prop.is_valid(_TestModel())
assert not prop.is_valid((0, -127, 255))
assert not prop.is_valid((0, 127))
assert not prop.is_valid((0, 127, 1.0))
assert not prop.is_valid((0, 127, 255, 255))
assert not prop.is_valid('(0, 127, 255)')
assert not prop.is_valid('rgb(0, -127, 255)')
assert not prop.is_valid('rgb(0, 127)')
assert not prop.is_valid('rgb(0, 127, 1.0)')
assert not prop.is_valid('rgb(256, 1, 1)')
assert not prop.is_valid('rgb(256, 1, 1, 1.0)')
assert not prop.is_valid('(10, 20, 30')
assert not prop.is_valid('rgba(10, 20, 30')
assert not prop.is_valid('rgba(10, 20, 30)')
assert not prop.is_valid('rgba(10, 20, 30,)')
assert not prop.is_valid('rgba(10, 20)')
assert not prop.is_valid('rgba(10, 20, 256, 1)')
assert not prop.is_valid('rgba(10, 20, 256, 10)')
assert not prop.is_valid('rgba(10, 20, 30, 50)')
assert not prop.is_valid("00aaff")
assert not prop.is_valid("00AAFF")
assert not prop.is_valid("00AaFf")
assert not prop.is_valid("#00AaFg")
assert not prop.is_valid("#00AaFff")
assert not prop.is_valid("foobar")
assert not prop.is_valid(3.14)
def test_transform(self) -> None:
prop = bcpc.Color()
assert prop.transform((0, 127, 255)) == "rgb(0, 127, 255)"
assert prop.transform((0, 127, 255, 0.1)) == "rgba(0, 127, 255, 0.1)"
def test_has_ref(self) -> None:
prop = bcpc.Color()
assert not prop.has_ref
def test_str(self) -> None:
prop = bcpc.Color()
assert str(prop) == "Color"
| Test_Color |
python | kamyu104__LeetCode-Solutions | Python/find-and-replace-in-string.py | {
"start": 61,
"end": 907
} | class ____(object):
def findReplaceString(self, S, indexes, sources, targets):
"""
:type S: str
:type indexes: List[int]
:type sources: List[str]
:type targets: List[str]
:rtype: str
"""
bucket = [None] * len(S)
for i in xrange(len(indexes)):
if all(indexes[i]+k < len(S) and S[indexes[i]+k] == sources[i][k]
for k in xrange(len(sources[i]))):
bucket[indexes[i]] = (len(sources[i]), list(targets[i]))
result = []
i = 0
while i < len(S):
if bucket[i]:
result.extend(bucket[i][1])
i += bucket[i][0]
else:
result.append(S[i])
i += 1
return "".join(result)
# Time: O(mlogm + m * n)
# Space: O(n + m)
| Solution |
python | numba__numba | numba/cuda/cudadrv/driver.py | {
"start": 70847,
"end": 72250
} | class ____(AutoFreePointer):
"""A memory pointer that refers to a managed memory buffer (can be accessed
on both host and device).
:param context: The context in which the pointer was mapped.
:type context: Context
:param pointer: The address of the buffer.
:type pointer: ctypes.c_void_p
:param size: The size of the buffer in bytes.
:type size: int
:param owner: The owner is sometimes set by the internals of this class, or
used for Numba's internal memory management. It should not be
provided by an external user of the ``ManagedMemory`` class
(e.g. from within an EMM Plugin); the default of `None`
should always suffice.
:type owner: NoneType
:param finalizer: A function that is called when the buffer is to be freed.
:type finalizer: function
"""
__cuda_memory__ = True
def __init__(self, context, pointer, size, owner=None, finalizer=None):
self.owned = owner
devptr = pointer
super().__init__(context, devptr, size, finalizer=finalizer)
# For buffer interface
self._buflen_ = self.size
if USE_NV_BINDING:
self._bufptr_ = self.device_pointer
else:
self._bufptr_ = self.device_pointer.value
def own(self):
return ManagedOwnedPointer(weakref.proxy(self))
| ManagedMemory |
python | django__django | django/contrib/gis/db/models/functions.py | {
"start": 19072,
"end": 19502
} | class ____(SQLiteDecimalToFloatMixin, GeomOutputGeoFunc):
def __init__(self, expression, x, y, z=0.0, **extra):
expressions = [
expression,
self._handle_param(x, "x", NUMERIC_TYPES),
self._handle_param(y, "y", NUMERIC_TYPES),
]
if z != 0.0:
expressions.append(self._handle_param(z, "z", NUMERIC_TYPES))
super().__init__(*expressions, **extra)
| Scale |
python | GoogleCloudPlatform__python-docs-samples | service_extensions/callouts/add_header/service_pb2_grpc.py | {
"start": 4570,
"end": 6798
} | class ____(object):
"""[#protodoc-title: External processing service]
A service that can access and modify HTTP requests and responses
as part of a filter chain.
The overall external processing protocol works like this:
1. Envoy sends to the service information about the HTTP request.
2. The service sends back a ProcessingResponse message that directs Envoy
to either stop processing, continue without it, or send it the
next chunk of the message body.
3. If so requested, Envoy sends the server chunks of the message body,
or the entire body at once. In either case, the server sends back
a ProcessingResponse after each message it receives.
4. If so requested, Envoy sends the server the HTTP trailers,
and the server sends back a ProcessingResponse.
5. At this point, request processing is done, and we pick up again
at step 1 when Envoy receives a response from the upstream server.
6. At any point above, if the server closes the gRPC stream cleanly,
then Envoy proceeds without consulting the server.
7. At any point above, if the server closes the gRPC stream with an error,
then Envoy returns a 500 error to the client, unless the filter
was configured to ignore errors.
In other words, the process is a request/response conversation, but
using a gRPC stream to make it easier for the server to
maintain state.
"""
@staticmethod
def Process(
request_iterator,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None,
):
return grpc.experimental.stream_stream(
request_iterator,
target,
"/envoy.service.ext_proc.v3.ExternalProcessor/Process",
service__pb2.ProcessingRequest.SerializeToString,
service__pb2.ProcessingResponse.FromString,
options,
channel_credentials,
insecure,
call_credentials,
compression,
wait_for_ready,
timeout,
metadata,
)
| ExternalProcessor |
python | jazzband__django-redis | django_redis/serializers/pickle.py | {
"start": 154,
"end": 1254
} | class ____(BaseSerializer):
def __init__(self, options) -> None:
self._pickle_version = pickle.DEFAULT_PROTOCOL
self.setup_pickle_version(options)
super().__init__(options=options)
def setup_pickle_version(self, options) -> None:
if "PICKLE_VERSION" in options:
try:
self._pickle_version = int(options["PICKLE_VERSION"])
if self._pickle_version > pickle.HIGHEST_PROTOCOL:
error_message = (
f"PICKLE_VERSION can't be higher than pickle.HIGHEST_PROTOCOL:"
f" {pickle.HIGHEST_PROTOCOL}"
)
raise ImproperlyConfigured(error_message)
except (ValueError, TypeError) as e:
error_message = "PICKLE_VERSION value must be an integer"
raise ImproperlyConfigured(error_message) from e
def dumps(self, value: Any) -> bytes:
return pickle.dumps(value, self._pickle_version)
def loads(self, value: bytes) -> Any:
return pickle.loads(value)
| PickleSerializer |
python | realpython__materials | python-class/animals.py | {
"start": 306,
"end": 383
} | class ____(Mammal):
def walk(self):
print("The dog is walking")
| Dog |
python | kamyu104__LeetCode-Solutions | Python/set-mismatch.py | {
"start": 594,
"end": 1103
} | class ____(object):
def findErrorNums(self, nums):
"""
:type nums: List[int]
:rtype: List[int]
"""
result = [0] * 2
for i in nums:
if nums[abs(i)-1] < 0:
result[0] = abs(i)
else:
nums[abs(i)-1] *= -1
for i in xrange(len(nums)):
if nums[i] > 0:
result[1] = i+1
else:
nums[i] *= -1
return result
# Time: O(n)
# Space: O(1)
| Solution2 |
python | keras-team__keras | keras/src/random/seed_generator.py | {
"start": 307,
"end": 5590
} | class ____:
"""Generates variable seeds upon each call to a function generating
random numbers.
In Keras, all random number generators (such as
`keras.random.normal()`) are stateless, meaning that if you pass an
integer seed to them (such as `seed=42`), they will return the same
values for repeated calls. To get different values for each
call, a `SeedGenerator` providing the state of the random generator
has to be used.
Note that all the random number generators have a default seed of None,
which implies that an internal global SeedGenerator is used.
If you need to decouple the RNG from the global state you can provide
a local `StateGenerator` with either a deterministic or random initial
state.
Remark concerning the JAX backen: Note that the use of a local
`StateGenerator` as seed argument is required for JIT compilation of
RNG with the JAX backend, because the use of global state is not
supported.
Example:
```python
seed_gen = keras.random.SeedGenerator(seed=42)
values = keras.random.normal(shape=(2, 3), seed=seed_gen)
new_values = keras.random.normal(shape=(2, 3), seed=seed_gen)
```
Usage in a layer:
```python
class Dropout(keras.Layer):
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.seed_generator = keras.random.SeedGenerator(1337)
def call(self, x, training=False):
if training:
return keras.random.dropout(
x, rate=0.5, seed=self.seed_generator
)
return x
```
"""
def __init__(self, seed=None, name=None, **kwargs):
if name is None:
name = auto_name(self.__class__.__name__)
self.name = name
custom_backend = kwargs.pop("backend", None)
if kwargs:
raise ValueError(f"Unrecognized keyword arguments: {kwargs}")
if custom_backend is not None:
self.backend = custom_backend
else:
self.backend = backend
self._initial_seed = seed
if seed is None:
seed = make_default_seed()
if not isinstance(seed, int):
raise ValueError(
f"Argument `seed` must be an integer. Received: seed={seed}"
)
def seed_initializer(*args, **kwargs):
dtype = kwargs.get("dtype", None)
return self.backend.convert_to_tensor([seed, 0], dtype=dtype)
with self.backend.name_scope(self.name, caller=self):
self.state = self.backend.Variable(
seed_initializer,
shape=(2,),
dtype=self.backend.random_seed_dtype(),
trainable=False,
aggregation="none",
name="seed_generator_state",
)
def next(self, ordered=True):
seed_state = self.state
# Use * 1 to create a copy
new_seed_value = seed_state.value * 1
if ordered:
increment = self.backend.convert_to_tensor(
np.array([0, 1]), dtype=seed_state.dtype
)
self.state.assign(self.backend.numpy.add(seed_state, increment))
else:
# This produces a sequence of near-unique numbers
# between 0 and 1M
self.state.assign((seed_state + 1) * 5387 % 933199)
return new_seed_value
def get_config(self):
return {"seed": self._initial_seed}
@classmethod
def from_config(cls, config):
return cls(**config)
def global_seed_generator():
if jax_utils.is_in_jax_tracing_scope():
raise ValueError(
"[JAX RNG] When tracing a JAX function, "
"you should only use seeded random ops, e.g. "
"you should create a `SeedGenerator` instance, attach it "
"to your layer/model, and pass the instance as the `seed` "
"argument when calling random ops. Unseeded random ops "
"would get incorrectly traced by JAX and would become constant "
"after tracing. Example:\n\n"
"```\n"
"# Make sure to set the seed generator as a layer attribute\n"
"self.seed_generator = keras.random.SeedGenerator(seed=1337)\n"
"...\n"
"out = keras.random.normal(shape=(1,), seed=self.seed_generator)\n"
"```"
)
gen = global_state.get_global_attribute("global_seed_generator")
if gen is None:
gen = SeedGenerator()
global_state.set_global_attribute("global_seed_generator", gen)
return gen
def make_default_seed():
return python_random.randint(1, int(1e9))
def draw_seed(seed):
from keras.src.backend import convert_to_tensor
from keras.src.backend import random_seed_dtype
if isinstance(seed, SeedGenerator):
return seed.next()
elif isinstance(seed, int):
return convert_to_tensor([seed, 0], dtype=random_seed_dtype())
elif seed is None:
return global_seed_generator().next(ordered=False)
raise ValueError(
"Argument `seed` must be either an integer "
"or an instance of `SeedGenerator`. "
f"Received: seed={seed} (of type {type(seed)})"
)
| SeedGenerator |
python | tensorflow__tensorflow | tensorflow/lite/python/lite_v2_test.py | {
"start": 171399,
"end": 186865
} | class ____(lite_v2_test_util.ModelTest):
@test_util.run_v2_only
def testVariants(self):
@tf.function(input_signature=[tf.TensorSpec(shape=[1], dtype=tf.float32)])
def model(v):
m = map_ops.empty_tensor_map()
k = tf.constant(1.0)
p = tf.add(k, v)
with ops.control_dependencies([m]):
m2 = map_ops.tensor_map_insert(m, p, v)
with ops.control_dependencies([m2]):
return map_ops.tensor_map_size(m2)
concrete_func = model.get_concrete_function()
converter = lite.TFLiteConverterV2.from_concrete_functions(
[concrete_func], model
)
converter.target_spec.supported_ops = [
lite.OpsSet.TFLITE_BUILTINS,
lite.OpsSet.SELECT_TF_OPS,
]
tflite_model = converter.convert()
self.assertIsNotNone(tflite_model)
# Check values from converted model.
interp = interpreter.Interpreter(model_content=tflite_model)
input_details = interp.get_input_details()
output_details = interp.get_output_details()
interp.allocate_tensors()
input_data = np.array([1.0], dtype=np.float32)
interp.set_tensor(input_details[0]['index'], input_data)
interp.invoke()
actual_value = interp.get_tensor(output_details[0]['index'])
self.assertEqual(1, actual_value)
interp.invoke()
actual_value = interp.get_tensor(output_details[0]['index'])
self.assertEqual(1, actual_value)
interp.invoke()
actual_value = interp.get_tensor(output_details[0]['index'])
self.assertEqual(1, actual_value)
@test_util.run_v2_only
def testVariantsWithCond(self):
def create_v1_saved_model():
saved_model_dir = os.path.join(self.get_temp_dir(), 'variants_with_cond')
with tf.Graph().as_default():
with tf.compat.v1.Session() as sess:
m = map_ops.empty_tensor_map()
def body(i, m):
m = map_ops.tensor_map_insert(m, i, i)
return i + 1, m
in_tensor = tf.compat.v1.placeholder(
shape=[1], dtype=tf.int32, name='input'
)
_, result_m = tf.cond(
in_tensor < 10,
lambda: body(in_tensor, m),
lambda: body(in_tensor + 1, m),
)
out_tensor = in_tensor + map_ops.tensor_map_size(result_m)
inputs = {'x': in_tensor}
outputs = {'z': out_tensor}
saved_model.simple_save(sess, saved_model_dir, inputs, outputs)
return saved_model_dir
saved_model_dir = create_v1_saved_model()
converter = lite.TFLiteConverterV2.from_saved_model(saved_model_dir)
converter.target_spec.supported_ops = [
lite.OpsSet.TFLITE_BUILTINS,
lite.OpsSet.SELECT_TF_OPS,
]
tflite_model = converter.convert()
self.assertIsNotNone(tflite_model)
# Check values from converted model.
interp = interpreter.Interpreter(model_content=tflite_model)
input_details = interp.get_input_details()
output_details = interp.get_output_details()
interp.allocate_tensors()
input_data = np.array([0], dtype=np.int32)
interp.set_tensor(input_details[0]['index'], input_data)
interp.invoke()
expected_value = np.array([1], dtype=np.int32)
actual_value = interp.get_tensor(output_details[0]['index'])
self.assertEqual(expected_value, actual_value)
interp.invoke()
actual_value = interp.get_tensor(output_details[0]['index'])
self.assertEqual(expected_value, actual_value)
interp.invoke()
actual_value = interp.get_tensor(output_details[0]['index'])
self.assertEqual(expected_value, actual_value)
@test_util.run_v2_only
def testVariantsWithWhile(self):
def create_v1_saved_model():
saved_model_dir = os.path.join(self.get_temp_dir(), 'variants_with_while')
with tf.Graph().as_default():
with tf.compat.v1.Session() as sess:
m = map_ops.empty_tensor_map()
def cond(i, m):
del m
return i < 10
def body(i, m):
m = map_ops.tensor_map_insert(m, i, i)
return i + 1, m
_, result_m = tf.while_loop(cond, body, [0, m])
in_tensor = tf.compat.v1.placeholder(
shape=[1], dtype=tf.int32, name='input'
)
out_tensor = in_tensor + map_ops.tensor_map_size(result_m)
inputs = {'x': in_tensor}
outputs = {'z': out_tensor}
saved_model.simple_save(sess, saved_model_dir, inputs, outputs)
return saved_model_dir
saved_model_dir = create_v1_saved_model()
converter = lite.TFLiteConverterV2.from_saved_model(saved_model_dir)
converter.target_spec.supported_ops = [
lite.OpsSet.TFLITE_BUILTINS,
lite.OpsSet.SELECT_TF_OPS,
]
tflite_model = converter.convert()
self.assertIsNotNone(tflite_model)
# Check values from converted model.
interp = interpreter.Interpreter(model_content=tflite_model)
input_details = interp.get_input_details()
output_details = interp.get_output_details()
interp.allocate_tensors()
input_data = np.array([0], dtype=np.int32)
interp.set_tensor(input_details[0]['index'], input_data)
interp.invoke()
actual_value = interp.get_tensor(output_details[0]['index'])
self.assertEqual(10, actual_value)
interp.invoke()
actual_value = interp.get_tensor(output_details[0]['index'])
self.assertEqual(10, actual_value)
interp.invoke()
actual_value = interp.get_tensor(output_details[0]['index'])
self.assertEqual(10, actual_value)
@test_util.run_v2_only
def testResources(self):
def create_v1_saved_model():
saved_model_dir = os.path.join(self.get_temp_dir(), 'simple_resources')
with tf.Graph().as_default():
with tf.compat.v1.Session() as sess:
in_tensor = tf.compat.v1.placeholder(
shape=[1], dtype=tf.float32, name='input'
)
stack = tf.raw_ops.StackV2(max_size=10, elem_type=tf.float32)
w = tf.raw_ops.StackPushV2(handle=stack, elem=in_tensor)
with ops.control_dependencies([w]):
a = in_tensor + in_tensor
with ops.control_dependencies([a]):
out_tensor = a + tf.raw_ops.StackPopV2(
handle=stack, elem_type=tf.float32
)
inputs = {'x': in_tensor}
outputs = {'z': out_tensor}
saved_model.simple_save(sess, saved_model_dir, inputs, outputs)
return saved_model_dir
saved_model_dir = create_v1_saved_model()
converter = lite.TFLiteConverterV2.from_saved_model(saved_model_dir)
converter.target_spec.supported_ops = [
lite.OpsSet.TFLITE_BUILTINS,
lite.OpsSet.SELECT_TF_OPS,
]
tflite_model = converter.convert()
self.assertIsNotNone(tflite_model)
# Check values from converted model.
interp = interpreter.Interpreter(model_content=tflite_model)
input_details = interp.get_input_details()
output_details = interp.get_output_details()
interp.allocate_tensors()
input_data = np.array([1.0], dtype=np.float32)
interp.set_tensor(input_details[0]['index'], input_data)
interp.invoke()
actual_value = interp.get_tensor(output_details[0]['index'])
self.assertEqual(3.0, actual_value)
interp.invoke()
actual_value = interp.get_tensor(output_details[0]['index'])
self.assertEqual(3.0, actual_value)
interp.invoke()
actual_value = interp.get_tensor(output_details[0]['index'])
self.assertEqual(3.0, actual_value)
@test_util.run_v2_only
def testResourcesWithCond(self):
def create_v1_saved_model():
saved_model_dir = os.path.join(self.get_temp_dir(), 'resources_with_cond')
with tf.Graph().as_default():
with tf.compat.v1.Session() as sess:
in_tensor = tf.compat.v1.placeholder(
shape=[1], dtype=tf.float32, name='input'
)
def body(i, arr):
n = tf.raw_ops.StackPushV2(
handle=arr, elem=tf.cast(i, dtype=tf.float32)
)
return n, arr
arr = tf.raw_ops.StackV2(max_size=10, elem_type=tf.float32)
n, result_arr = tf.cond(
in_tensor < 10, lambda: body(0, arr), lambda: body(1, arr)
)
with ops.control_dependencies([result_arr, n]):
out_tensor = tf.raw_ops.StackPopV2(
handle=result_arr, elem_type=tf.float32
)
inputs = {'x': in_tensor}
outputs = {'a': out_tensor}
saved_model.simple_save(sess, saved_model_dir, inputs, outputs)
return saved_model_dir
saved_model_dir = create_v1_saved_model()
converter = lite.TFLiteConverterV2.from_saved_model(saved_model_dir)
converter.target_spec.supported_ops = [
lite.OpsSet.TFLITE_BUILTINS,
lite.OpsSet.SELECT_TF_OPS,
]
tflite_model = converter.convert()
self.assertIsNotNone(tflite_model)
# Check values from converted model.
interp = interpreter.Interpreter(model_content=tflite_model)
input_details = interp.get_input_details()
output_details = interp.get_output_details()
interp.allocate_tensors()
input_data = np.array([1.0], dtype=np.float32)
interp.set_tensor(input_details[0]['index'], input_data)
interp.invoke()
actual_value = interp.get_tensor(output_details[0]['index'])
self.assertEqual(0.0, actual_value)
@test_util.run_v2_only
def testResourcesWithWhile(self):
def create_v1_saved_model():
saved_model_dir = os.path.join(
self.get_temp_dir(), 'resources_with_while'
)
with tf.Graph().as_default():
with tf.compat.v1.Session() as sess:
in_tensor = tf.compat.v1.placeholder(
shape=[1], dtype=tf.float32, name='input'
)
def cond(i, arr, m):
del arr
del m
return i < 10
def body(i, arr, m):
del m
n = tf.raw_ops.StackPushV2(
handle=arr, elem=tf.cast(i, dtype=tf.float32)
)
return i + 1, arr, n
arr = tf.raw_ops.StackV2(max_size=10, elem_type=tf.float32)
_, result_arr, n = tf.while_loop(cond, body, [0, arr, 0.0])
with ops.control_dependencies([result_arr, n]):
out_tensor = tf.raw_ops.StackPopV2(
handle=result_arr, elem_type=tf.float32
)
inputs = {'x': in_tensor}
outputs = {'a': out_tensor}
saved_model.simple_save(sess, saved_model_dir, inputs, outputs)
return saved_model_dir
saved_model_dir = create_v1_saved_model()
converter = lite.TFLiteConverterV2.from_saved_model(saved_model_dir)
converter.target_spec.supported_ops = [
lite.OpsSet.TFLITE_BUILTINS,
lite.OpsSet.SELECT_TF_OPS,
]
tflite_model = converter.convert()
self.assertIsNotNone(tflite_model)
# Check values from converted model.
interp = interpreter.Interpreter(model_content=tflite_model)
input_details = interp.get_input_details()
output_details = interp.get_output_details()
interp.allocate_tensors()
input_data = np.array([1.0], dtype=np.float32)
interp.set_tensor(input_details[0]['index'], input_data)
interp.invoke()
actual_value = interp.get_tensor(output_details[0]['index'])
self.assertEqual(9.0, actual_value)
@parameterized.named_parameters(
('EnableLoweringTensorListOps', True),
('DisableLoweringTensorListOps', False),
)
@test_util.run_v2_only
def testTensorListWithStaticSize(self, lower_tensor_list_ops):
def create_v1_saved_model():
saved_model_dir = os.path.join(
self.get_temp_dir(), 'simple_mutable_variable'
)
with tf.Graph().as_default():
with tf.compat.v1.Session() as sess:
in_tensor = tf.compat.v1.placeholder(
shape=[1], dtype=tf.float32, name='input'
)
ta = tf.TensorArray(
tf.float32, size=3, dynamic_size=False, clear_after_read=False
)
ta = ta.write(0, 10.0)
ta = ta.write(1, 20.0)
ta = ta.write(2, 30.0)
out_tensor = ta.read(0) + ta.read(2)
inputs = {'x': in_tensor}
outputs = {'z': out_tensor}
saved_model.simple_save(sess, saved_model_dir, inputs, outputs)
return saved_model_dir
saved_model_dir = create_v1_saved_model()
converter = lite.TFLiteConverterV2.from_saved_model(saved_model_dir)
if not lower_tensor_list_ops:
converter.target_spec.supported_ops = [
lite.OpsSet.TFLITE_BUILTINS,
lite.OpsSet.SELECT_TF_OPS,
]
converter._experimental_lower_tensor_list_ops = lower_tensor_list_ops
tflite_model = converter.convert()
self.assertIsNotNone(tflite_model)
# Check values from converted model.
interp = interpreter.Interpreter(model_content=tflite_model)
input_details = interp.get_input_details()
output_details = interp.get_output_details()
interp.allocate_tensors()
input_data = np.array([1.0], dtype=np.float32)
interp.set_tensor(input_details[0]['index'], input_data)
interp.invoke()
actual_value = interp.get_tensor(output_details[0]['index'])
self.assertEqual(40.0, actual_value)
@parameterized.named_parameters(
('EnableLoweringTensorListOps', True),
('DisableLoweringTensorListOps', False),
)
@test_util.run_v2_only
def testTensorListWithDynamicSize(self, lower_tensor_list_ops):
def create_v1_saved_model():
saved_model_dir = os.path.join(
self.get_temp_dir(), 'simple_mutable_variable'
)
with tf.Graph().as_default():
with tf.compat.v1.Session() as sess:
in_tensor = tf.compat.v1.placeholder(
shape=[1], dtype=tf.float32, name='input'
)
ta = tf.TensorArray(
tf.float32, size=0, dynamic_size=True, clear_after_read=False
)
ta = ta.write(0, 10.0)
ta = ta.write(1, 20.0)
ta = ta.write(2, 30.0)
out_tensor = ta.read(0) + ta.read(2)
inputs = {'x': in_tensor}
outputs = {'z': out_tensor}
saved_model.simple_save(sess, saved_model_dir, inputs, outputs)
return saved_model_dir
saved_model_dir = create_v1_saved_model()
converter = lite.TFLiteConverterV2.from_saved_model(saved_model_dir)
if lower_tensor_list_ops:
with self.assertRaises(convert.ConverterError) as error:
converter.convert()
self.assertIn(
'Lowering tensor list ops is failed. Please consider using Select '
'TF ops and disabling `_experimental_lower_tensor_list_ops` flag in '
'the TFLite converter object.',
str(error.exception),
)
converter.target_spec.supported_ops = [
lite.OpsSet.TFLITE_BUILTINS,
lite.OpsSet.SELECT_TF_OPS,
]
tflite_model = converter.convert()
self.assertIsNotNone(tflite_model)
# Check values from converted model.
interp = interpreter.Interpreter(model_content=tflite_model)
input_details = interp.get_input_details()
output_details = interp.get_output_details()
interp.allocate_tensors()
input_data = np.array([1.0], dtype=np.float32)
interp.set_tensor(input_details[0]['index'], input_data)
interp.invoke()
actual_value = interp.get_tensor(output_details[0]['index'])
self.assertEqual(40.0, actual_value)
| ResourceAndVariantTypes |
python | scrapy__scrapy | tests/test_spidermiddleware_referer.py | {
"start": 31740,
"end": 31972
} | class ____(MixinUnsafeUrl, TestRefererMiddleware):
settings = {"REFERRER_POLICY": "scrapy.spidermiddlewares.referer.SameOriginPolicy"}
resp_headers = {"Referrer-Policy": POLICY_UNSAFE_URL.upper()}
| TestPolicyHeaderPrecedence001 |
python | numba__numba | numba/cuda/simulator/cudadrv/driver.py | {
"start": 557,
"end": 658
} | class ____(object):
def get_device_count(self):
return 1
driver = FakeDriver()
| FakeDriver |
python | pytorch__pytorch | torch/_dynamo/variables/script_object.py | {
"start": 2086,
"end": 5957
} | class ____(UserDefinedObjectVariable):
_fake_script_object_cache: dict[int, "TorchScriptObjectVariable"] = {}
@classmethod
def is_matching_cls(cls, user_cls: type) -> bool:
return issubclass(user_cls, torch.ScriptObject) or is_opaque_type(user_cls)
@staticmethod
def create(proxy: Proxy, value: Any, **options: Any) -> "TorchScriptObjectVariable":
return TorchScriptObjectVariable(proxy, value, **options)
def __init__(self, proxy: Proxy, value: Any, source: Source, **kwargs: Any) -> None:
super().__init__(value, **kwargs)
self.proxy = proxy
self.proxy.node.meta["example_value"] = value
self.source = source
def as_proxy(self) -> Proxy:
return self.proxy
@_raise_hard_error_if_graph_break(
"Dynamo cannot safely trace script object due to graph break."
)
def var_getattr(self, tx: "InstructionTranslator", name: str) -> VariableTracker:
if getattr(self.value, "script_class_name", "") == OpaqueTypeStr:
unimplemented(
gb_type="Attempted to access attributes/methods on an OpaqueObject",
context=f"value={self.value}, attr={name}",
explanation="Attribute/method access of OpaqueObjects is not supported.",
hints=[
"Use custom operators instead of direct attribute/method access.",
],
)
from torch._higher_order_ops.torchbind import call_torchbind
from ..source import AttrSource
from .higher_order_ops import TorchHigherOrderOperatorVariable
method = getattr(self.value, name, None)
if method is None:
unimplemented(
gb_type="FakeScriptObject missing method implementation",
context=f"value={self.value}, method={name}",
explanation=f"TorchScript object {self.value} doesn't define the method {name}.",
hints=[
f"Ensure the method {name} is implemented in {self.value}.",
*graph_break_hints.USER_ERROR,
],
)
if not callable(method):
unimplemented(
gb_type="Attempted to access non-callable attribute of TorchScript object",
context=f"value={self.value}, method={name}",
explanation="Attribute accesses of TorchScript objects to non-callable attributes are not supported.",
hints=[
"Use method calls instead of attribute access.",
],
)
assert self.source is not None
return TorchHigherOrderOperatorVariable.make(
call_torchbind,
source=AttrSource(self.source, name),
script_obj_var=self,
method_name=name,
)
# We only support method calls on script objects. Interpreting the bytecodes
# should go through var_getattr then call_function instead of call_method.
#
# However, it's possible for call_method to be used directly e.g. for __setattr__.
@_raise_hard_error_if_graph_break(
"Dynamo cannot safely trace script object due to graph break."
)
def call_method(
self,
tx: "InstructionTranslator",
name: str,
args: Iterable[Any],
kwargs: dict[str, Any],
) -> VariableTracker:
unimplemented(
gb_type="Weird method call on TorchScript object",
context=f"value={self.value}, method={name}",
explanation=(
f"This particular method call ({name}) is not supported (e.g. calling `__setattr__`). "
"Most method calls to TorchScript objects should be supported."
),
hints=[
"Avoid calling this method.",
],
)
| TorchScriptObjectVariable |
python | marshmallow-code__apispec | tests/test_ext_marshmallow.py | {
"start": 48348,
"end": 48693
} | class ____:
def test_circular_referencing_schemas(self, spec):
spec.components.schema("Analysis", schema=AnalysisSchema)
definitions = get_schemas(spec)
ref = definitions["Analysis"]["properties"]["sample"]
assert ref == build_ref(spec, "schema", "Sample")
# Regression tests for issue #55
| TestCircularReference |
python | networkx__networkx | benchmarks/benchmarks/benchmark_to_networkx_graph.py | {
"start": 24,
"end": 1294
} | class ____:
params = [nx.Graph, nx.DiGraph]
param_names = ["graph_type"]
def setup(self, graph_type):
self.edges = [(0, 1), (1, 2), (2, 3), (3, 4), (4, 5), (5, 6)]
def time_to_networkx_graph_direct(self, graph_type):
_ = nx.to_networkx_graph(self.edges, create_using=graph_type)
def time_to_networkx_graph_via_constructor(self, graph_type):
_ = graph_type(self.edges)
### NOTE: Multi-instance checks are explicitly included to cover the case
# where many graph instances are created, which is not uncommon in graph
# analysis. The reason why multi-instance is explicitly probed (rather than
# relying solely on the number of repeats/runs from `timeit` in the benchmark
# suite) is to capture/amplify any distinctions from potential import
# caching of the try-excepts in the *same* run
def time_to_networkx_graph_direct_multi_instance(self, graph_type):
for _ in range(500): # Creating many graph instances
_ = nx.to_networkx_graph(self.edges, create_using=graph_type)
def time_to_networkx_graph_via_constructor_multi_instance(self, graph_type):
for _ in range(500): # Creating many graph instances
_ = graph_type(self.edges)
| ToNetworkXGraphBenchmark |
python | google__jax | tests/string_array_test.py | {
"start": 841,
"end": 6894
} | class ____(jtu.JaxTestCase):
def make_test_string_array(self, device=None):
"""Makes and returns a simple 2x1 string array on the first CPU device."""
if device is None:
cpu_devices = jax.devices("cpu")
if len(cpu_devices) < 1:
self.skipTest(
"Skipping this test because no CPU devices are available."
)
device = cpu_devices[0]
numpy_string_array = np.array(
["abcd", "efgh"], dtype=np.dtypes.StringDType() # type: ignore
)
jax_string_array = jax.device_put(numpy_string_array, device=device)
jax_string_array.block_until_ready()
return jax_string_array
@parameterized.named_parameters(
("asarray", True),
("device_put", False),
)
@jtu.run_on_devices("cpu")
def test_single_device_array(self, asarray):
cpu_devices = jax.devices("cpu")
if len(cpu_devices) < 1:
self.skipTest("Skipping this test because no CPU devices are available.")
numpy_string_array = np.array(
["abcdefghijklmnopqrstuvwxyz", "cba"], dtype=np.dtypes.StringDType() # type: ignore
)
if asarray:
jax_string_array = jnp.asarray(numpy_string_array, device=cpu_devices[0])
else:
jax_string_array = jax.device_put(
numpy_string_array, device=cpu_devices[0]
)
jax_string_array.block_until_ready()
array_read_back = jax.device_get(jax_string_array)
self.assertEqual(array_read_back.dtype, np.dtypes.StringDType()) # type: ignore
np.testing.assert_array_equal(array_read_back, numpy_string_array)
@parameterized.named_parameters(
("asarray", True),
("device_put", False),
)
@jtu.run_on_devices("cpu")
def test_multi_device_array(self, asarray):
cpu_devices = jax.devices("cpu")
if len(cpu_devices) < 2:
self.skipTest(
f"Skipping this test because only {len(cpu_devices)} host"
" devices are available. Need at least 2."
)
numpy_string_array = np.array(
[["abcd", "efgh"], ["ijkl", "mnop"]], dtype=np.dtypes.StringDType() # type: ignore
)
mesh = jax.sharding.Mesh(np.array(cpu_devices)[:2].reshape((2, 1)), ("x", "y"))
sharding = jax.sharding.NamedSharding(
mesh, jax.sharding.PartitionSpec("x", "y")
)
if asarray:
jax_string_array = jnp.asarray(numpy_string_array, device=sharding)
else:
jax_string_array = jax.device_put(numpy_string_array, device=sharding)
jax_string_array.block_until_ready()
array_read_back = jax.device_get(jax_string_array)
self.assertEqual(array_read_back.dtype, np.dtypes.StringDType()) # type: ignore
np.testing.assert_array_equal(array_read_back, numpy_string_array)
@jtu.run_on_devices("cpu")
def test_dtype_conversions(self):
cpu_devices = jax.devices("cpu")
if len(cpu_devices) < 1:
self.skipTest("Skipping this test because no CPU devices are available.")
# Explicitly specifying the dtype should work with StringDType numpy arrays.
numpy_string_array = np.array(
["abcd", "efgh"], dtype=np.dtypes.StringDType() # type: ignore
)
jax_string_array = jnp.asarray(
numpy_string_array,
device=cpu_devices[0],
dtype=np.dtypes.StringDType(),
) # type: ignore
jax_string_array.block_until_ready()
# Cannot make a non-StringDType array from a StringDType numpy array.
with self.assertRaisesRegex(
TypeError,
r"Cannot make an array with dtype bfloat16 from an object with dtype"
r" StringDType.*",
):
jnp.asarray(
numpy_string_array,
device=cpu_devices[0],
dtype=jnp.bfloat16,
)
# Cannot make a StringDType array from a numeric numpy array.
numpy_int_array = np.arange(2, dtype=np.int32)
with self.assertRaisesRegex(
TypeError,
r"Cannot make an array with dtype StringDType.*from an object with"
r" dtype int32.",
):
jnp.asarray(
numpy_int_array,
device=cpu_devices[0],
dtype=np.dtypes.StringDType(), # type: ignore
)
@parameterized.named_parameters(
("asarray", True),
("device_put", False),
)
@jtu.skip_on_devices("cpu")
def test_string_array_cannot_be_non_cpu_devices(self, asarray):
devices = jax.devices()
if len(devices) < 1:
self.skipTest("Skipping this test because no devices are available.")
numpy_string_array = np.array(
["abcdefghijklmnopqrstuvwxyz", "cba"], dtype=np.dtypes.StringDType() # type: ignore
)
with self.assertRaisesRegex(
TypeError, "String arrays can only be sharded to CPU devices"
):
if asarray:
jax_string_array = jnp.asarray(numpy_string_array, device=devices[0])
else:
jax_string_array = jax.device_put(numpy_string_array, device=devices[0])
jax_string_array.block_until_ready()
def test_jit_fails_with_string_arrays(self):
f = jax.jit(lambda x: x)
input_array = self.make_test_string_array()
self.assertRaisesRegex(
TypeError,
r"Argument.*is not a valid JAX type.",
lambda: f(input_array),
)
def test_grad_fails_with_string_arrays(self):
f = jax.grad(lambda x: x)
input_array = self.make_test_string_array()
self.assertRaisesRegex(
TypeError,
r"Argument.*is not a valid JAX type.",
lambda: f(input_array),
)
def test_vmap_without_jit_works_with_string_arrays(self):
f = jax.vmap(lambda x: x)
input_array = self.make_test_string_array()
output_array = f(input_array)
self.assertEqual(output_array.dtype, input_array.dtype)
np.testing.assert_array_equal(output_array, input_array)
def test_vmap_with_jit_fails_with_string_arrays(self):
f = jax.vmap(lambda x: x + jnp.arange(2))
input_array = self.make_test_string_array()
self.assertRaisesRegex(
ValueError,
r".*StringDType.*is not a valid dtype",
lambda: f(input_array),
)
if __name__ == "__main__":
absltest.main(testLoader=jtu.JaxTestLoader())
| StringArrayTest |
python | openai__openai-python | src/openai/types/responses/response_computer_tool_call_param.py | {
"start": 574,
"end": 1105
} | class ____(TypedDict, total=False):
button: Required[Literal["left", "right", "wheel", "back", "forward"]]
"""Indicates which mouse button was pressed during the click.
One of `left`, `right`, `wheel`, `back`, or `forward`.
"""
type: Required[Literal["click"]]
"""Specifies the event type. For a click action, this property is always `click`."""
x: Required[int]
"""The x-coordinate where the click occurred."""
y: Required[int]
"""The y-coordinate where the click occurred."""
| ActionClick |
python | sqlalchemy__sqlalchemy | test/orm/test_of_type.py | {
"start": 21786,
"end": 35982
} | class ____(
testing.AssertsCompiledSQL, fixtures.DeclarativeMappedTest
):
"""There's overlap here vs. the ones above."""
run_setup_classes = "once"
run_setup_mappers = "once"
run_inserts = "once"
run_deletes = None
__dialect__ = "default"
@classmethod
def setup_classes(cls):
Base = cls.DeclarativeBasic
class Job(ComparableEntity, Base):
__tablename__ = "job"
id = Column(
Integer, primary_key=True, test_needs_autoincrement=True
)
type = Column(String(10))
widget_id = Column(ForeignKey("widget.id"))
widget = relationship("Widget")
container_id = Column(Integer, ForeignKey("data_container.id"))
__mapper_args__ = {"polymorphic_on": type}
class SubJob(Job):
__tablename__ = "subjob"
id = Column(Integer, ForeignKey("job.id"), primary_key=True)
attr = Column(String(10))
__mapper_args__ = {"polymorphic_identity": "sub"}
class ParentThing(ComparableEntity, Base):
__tablename__ = "parent"
id = Column(
Integer, primary_key=True, test_needs_autoincrement=True
)
container_id = Column(Integer, ForeignKey("data_container.id"))
container = relationship("DataContainer")
class DataContainer(ComparableEntity, Base):
__tablename__ = "data_container"
id = Column(
Integer, primary_key=True, test_needs_autoincrement=True
)
name = Column(String(10))
jobs = relationship(Job, order_by=Job.id)
class Widget(ComparableEntity, Base):
__tablename__ = "widget"
id = Column(
Integer, primary_key=True, test_needs_autoincrement=True
)
name = Column(String(10))
@classmethod
def insert_data(cls, connection):
s = Session(connection)
s.add_all(cls._fixture())
s.commit()
@classmethod
def _fixture(cls):
ParentThing, DataContainer, SubJob, Widget = (
cls.classes.ParentThing,
cls.classes.DataContainer,
cls.classes.SubJob,
cls.classes.Widget,
)
return [
ParentThing(
container=DataContainer(
name="d1",
jobs=[
SubJob(attr="s1", widget=Widget(name="w1")),
SubJob(attr="s2", widget=Widget(name="w2")),
],
)
),
ParentThing(
container=DataContainer(
name="d2",
jobs=[
SubJob(attr="s3", widget=Widget(name="w3")),
SubJob(attr="s4", widget=Widget(name="w4")),
],
)
),
]
@classmethod
def _dc_fixture(cls):
return [p.container for p in cls._fixture()]
def test_contains_eager_wpoly(self):
DataContainer, Job, SubJob = (
self.classes.DataContainer,
self.classes.Job,
self.classes.SubJob,
)
Job_P = with_polymorphic(Job, SubJob, aliased=True)
s = Session(testing.db)
q = (
s.query(DataContainer)
.join(DataContainer.jobs.of_type(Job_P))
.options(contains_eager(DataContainer.jobs.of_type(Job_P)))
)
def go():
eq_(q.all(), self._dc_fixture())
self.assert_sql_count(testing.db, go, 5)
def test_joinedload_wpoly(self):
DataContainer, Job, SubJob = (
self.classes.DataContainer,
self.classes.Job,
self.classes.SubJob,
)
Job_P = with_polymorphic(Job, SubJob, aliased=True)
s = Session(testing.db)
q = s.query(DataContainer).options(
joinedload(DataContainer.jobs.of_type(Job_P))
)
def go():
eq_(q.all(), self._dc_fixture())
self.assert_sql_count(testing.db, go, 5)
def test_joinedload_wsubclass(self):
DataContainer, SubJob = (
self.classes.DataContainer,
self.classes.SubJob,
)
s = Session(testing.db)
q = s.query(DataContainer).options(
joinedload(DataContainer.jobs.of_type(SubJob))
)
def go():
eq_(q.all(), self._dc_fixture())
self.assert_sql_count(testing.db, go, 5)
def test_lazyload(self):
DataContainer = self.classes.DataContainer
s = Session(testing.db)
q = s.query(DataContainer)
def go():
eq_(q.all(), self._dc_fixture())
# SELECT data container
# SELECT job * 2 container rows
# SELECT subjob * 4 rows
# SELECT widget * 4 rows
self.assert_sql_count(testing.db, go, 11)
def test_subquery_wsubclass(self):
DataContainer, SubJob = (
self.classes.DataContainer,
self.classes.SubJob,
)
s = Session(testing.db)
q = s.query(DataContainer).options(
subqueryload(DataContainer.jobs.of_type(SubJob))
)
def go():
eq_(q.all(), self._dc_fixture())
self.assert_sql_count(testing.db, go, 6)
def test_twolevel_subqueryload_wsubclass(self):
ParentThing, DataContainer, SubJob = (
self.classes.ParentThing,
self.classes.DataContainer,
self.classes.SubJob,
)
s = Session(testing.db)
q = s.query(ParentThing).options(
subqueryload(ParentThing.container).subqueryload(
DataContainer.jobs.of_type(SubJob)
)
)
def go():
eq_(q.all(), self._fixture())
self.assert_sql_count(testing.db, go, 7)
def test_twolevel_subqueryload_wsubclass_mapper_term(self):
DataContainer, SubJob = self.classes.DataContainer, self.classes.SubJob
s = Session(testing.db)
sj_alias = aliased(SubJob)
q = s.query(DataContainer).options(
subqueryload(DataContainer.jobs.of_type(sj_alias)).subqueryload(
sj_alias.widget
)
)
def go():
eq_(q.all(), self._dc_fixture())
self.assert_sql_count(testing.db, go, 3)
def test_twolevel_joinedload_wsubclass(self):
ParentThing, DataContainer, SubJob = (
self.classes.ParentThing,
self.classes.DataContainer,
self.classes.SubJob,
)
s = Session(testing.db)
q = s.query(ParentThing).options(
joinedload(ParentThing.container).joinedload(
DataContainer.jobs.of_type(SubJob)
)
)
def go():
eq_(q.all(), self._fixture())
self.assert_sql_count(testing.db, go, 5)
def test_any_wpoly(self):
DataContainer, Job, SubJob = (
self.classes.DataContainer,
self.classes.Job,
self.classes.SubJob,
)
Job_P = with_polymorphic(Job, SubJob, aliased=True, flat=True)
s = fixture_session()
q = (
s.query(Job)
.join(DataContainer.jobs)
.filter(DataContainer.jobs.of_type(Job_P).any(Job_P.id < Job.id))
)
self.assert_compile(
q,
"SELECT job.id AS job_id, job.type AS job_type, "
"job.widget_id AS job_widget_id, "
"job.container_id "
"AS job_container_id "
"FROM data_container "
"JOIN job ON data_container.id = job.container_id "
"WHERE EXISTS (SELECT 1 "
"FROM job AS job_1 LEFT OUTER JOIN subjob AS subjob_1 "
"ON job_1.id = subjob_1.id "
"WHERE data_container.id = job_1.container_id "
"AND job_1.id < job.id)",
)
def test_any_walias(self):
(
DataContainer,
Job,
) = (self.classes.DataContainer, self.classes.Job)
Job_A = aliased(Job)
s = fixture_session()
q = (
s.query(Job)
.join(DataContainer.jobs)
.filter(
DataContainer.jobs.of_type(Job_A).any(
and_(Job_A.id < Job.id, Job_A.type == "fred")
)
)
)
self.assert_compile(
q,
"SELECT job.id AS job_id, job.type AS job_type, "
"job.widget_id AS job_widget_id, "
"job.container_id AS job_container_id "
"FROM data_container JOIN job "
"ON data_container.id = job.container_id "
"WHERE EXISTS (SELECT 1 "
"FROM job AS job_1 "
"WHERE data_container.id = job_1.container_id "
"AND job_1.id < job.id AND job_1.type = :type_1)",
)
def test_join_wpoly(self):
DataContainer, Job, SubJob = (
self.classes.DataContainer,
self.classes.Job,
self.classes.SubJob,
)
Job_P = with_polymorphic(Job, SubJob)
s = fixture_session()
q = s.query(DataContainer).join(DataContainer.jobs.of_type(Job_P))
self.assert_compile(
q,
"SELECT data_container.id AS data_container_id, "
"data_container.name AS data_container_name "
"FROM data_container JOIN "
"(job LEFT OUTER JOIN subjob "
"ON job.id = subjob.id) "
"ON data_container.id = job.container_id",
)
def test_join_wsubclass(self):
DataContainer, SubJob = (
self.classes.DataContainer,
self.classes.SubJob,
)
s = fixture_session()
q = s.query(DataContainer).join(DataContainer.jobs.of_type(SubJob))
# note the of_type() here renders JOIN for the Job->SubJob.
# this is because it's using the SubJob mapper directly within
# query.join(). When we do joinedload() etc., we're instead
# doing a with_polymorphic(), and there we need the join to be
# outer by default.
self.assert_compile(
q,
"SELECT data_container.id AS data_container_id, "
"data_container.name AS data_container_name "
"FROM data_container JOIN (job JOIN subjob ON job.id = subjob.id) "
"ON data_container.id = job.container_id",
)
def test_join_wpoly_innerjoin(self):
DataContainer, Job, SubJob = (
self.classes.DataContainer,
self.classes.Job,
self.classes.SubJob,
)
Job_P = with_polymorphic(Job, SubJob, innerjoin=True)
s = fixture_session()
q = s.query(DataContainer).join(DataContainer.jobs.of_type(Job_P))
self.assert_compile(
q,
"SELECT data_container.id AS data_container_id, "
"data_container.name AS data_container_name "
"FROM data_container JOIN "
"(job JOIN subjob ON job.id = subjob.id) "
"ON data_container.id = job.container_id",
)
def test_join_walias(self):
(
DataContainer,
Job,
) = (self.classes.DataContainer, self.classes.Job)
Job_A = aliased(Job)
s = fixture_session()
q = s.query(DataContainer).join(DataContainer.jobs.of_type(Job_A))
self.assert_compile(
q,
"SELECT data_container.id AS data_container_id, "
"data_container.name AS data_container_name "
"FROM data_container JOIN job AS job_1 "
"ON data_container.id = job_1.container_id",
)
def test_join_explicit_wpoly_noalias(self):
DataContainer, Job, SubJob = (
self.classes.DataContainer,
self.classes.Job,
self.classes.SubJob,
)
Job_P = with_polymorphic(Job, SubJob)
s = fixture_session()
q = s.query(DataContainer).join(Job_P, DataContainer.jobs)
self.assert_compile(
q,
"SELECT data_container.id AS data_container_id, "
"data_container.name AS data_container_name "
"FROM data_container JOIN "
"(job LEFT OUTER JOIN subjob "
"ON job.id = subjob.id) "
"ON data_container.id = job.container_id",
)
def test_join_explicit_wpoly_flat(self):
DataContainer, Job, SubJob = (
self.classes.DataContainer,
self.classes.Job,
self.classes.SubJob,
)
Job_P = with_polymorphic(Job, SubJob, flat=True)
s = fixture_session()
q = s.query(DataContainer).join(Job_P, DataContainer.jobs)
self.assert_compile(
q,
"SELECT data_container.id AS data_container_id, "
"data_container.name AS data_container_name "
"FROM data_container JOIN "
"(job AS job_1 LEFT OUTER JOIN subjob AS subjob_1 "
"ON job_1.id = subjob_1.id) "
"ON data_container.id = job_1.container_id",
)
def test_join_explicit_wpoly_full_alias(self):
DataContainer, Job, SubJob = (
self.classes.DataContainer,
self.classes.Job,
self.classes.SubJob,
)
Job_P = with_polymorphic(Job, SubJob, aliased=True)
s = fixture_session()
q = s.query(DataContainer).join(Job_P, DataContainer.jobs)
self.assert_compile(
q,
"SELECT data_container.id AS data_container_id, "
"data_container.name AS data_container_name "
"FROM data_container JOIN "
"(SELECT job.id AS job_id, job.type AS job_type, "
"job.widget_id AS job_widget_id, "
"job.container_id AS job_container_id, "
"subjob.id AS subjob_id, subjob.attr AS subjob_attr "
"FROM job LEFT OUTER JOIN subjob ON job.id = subjob.id) "
"AS anon_1 ON data_container.id = anon_1.job_container_id",
)
| SubclassRelationshipTest |
python | sqlalchemy__sqlalchemy | test/sql/test_resultset.py | {
"start": 2335,
"end": 75459
} | class ____(fixtures.TablesTest):
__sparse_driver_backend__ = True
@classmethod
def define_tables(cls, metadata):
Table(
"users",
metadata,
Column(
"user_id", INT, primary_key=True, test_needs_autoincrement=True
),
Column("user_name", VARCHAR(20)),
test_needs_acid=True,
)
Table(
"addresses",
metadata,
Column(
"address_id",
Integer,
primary_key=True,
test_needs_autoincrement=True,
),
Column("user_id", Integer, ForeignKey("users.user_id")),
Column("address", String(30)),
test_needs_acid=True,
)
Table(
"users2",
metadata,
Column("user_id", INT, primary_key=True),
Column("user_name", VARCHAR(20)),
test_needs_acid=True,
)
Table(
"test",
metadata,
Column(
"x", Integer, primary_key=True, test_needs_autoincrement=False
),
Column("y", String(50)),
)
@testing.variation(
"type_", ["text", "driversql", "core", "textstar", "driverstar"]
)
def test_freeze(self, type_, connection):
"""test #8963"""
users = self.tables.users
connection.execute(
users.insert(),
[
dict(user_id=1, user_name="john"),
dict(user_id=2, user_name="jack"),
],
)
if type_.core:
stmt = select(users).order_by(users.c.user_id)
else:
if "star" in type_.name:
stmt = "select * from users order by user_id"
else:
stmt = "select user_id, user_name from users order by user_id"
if "text" in type_.name:
stmt = text(stmt)
if "driver" in type_.name:
result = connection.exec_driver_sql(stmt)
else:
result = connection.execute(stmt)
frozen = result.freeze()
unfrozen = frozen()
eq_(unfrozen.keys(), ["user_id", "user_name"])
eq_(unfrozen.all(), [(1, "john"), (2, "jack")])
unfrozen = frozen()
eq_(
unfrozen.mappings().all(),
[
{"user_id": 1, "user_name": "john"},
{"user_id": 2, "user_name": "jack"},
],
)
@testing.requires.insert_executemany_returning
@testing.variation("filters", ["unique", "sliced", "plain"])
def test_splice_horizontally(self, connection, filters):
users = self.tables.users
addresses = self.tables.addresses
if filters.unique:
r1 = connection.execute(
users.insert().returning(users.c.user_name),
[
dict(user_id=1, user_name="john"),
dict(user_id=2, user_name="john"),
],
)
r2 = connection.execute(
addresses.insert().returning(
addresses.c.address,
),
[
dict(address_id=1, user_id=1, address="foo@bar.com"),
dict(address_id=2, user_id=2, address="foo@bar.com"),
],
)
else:
r1 = connection.execute(
users.insert().returning(users.c.user_name, users.c.user_id),
[
dict(user_id=1, user_name="john"),
dict(user_id=2, user_name="jack"),
],
)
r2 = connection.execute(
addresses.insert().returning(
addresses.c.address_id,
addresses.c.address,
addresses.c.user_id,
),
[
dict(address_id=1, user_id=1, address="foo@bar.com"),
dict(address_id=2, user_id=2, address="bar@bat.com"),
],
)
if filters.sliced:
r1 = r1.columns(users.c.user_name)
r2 = r2.columns(addresses.c.address, addresses.c.user_id)
elif filters.unique:
r1 = r1.unique()
r2 = r2.unique()
rows = r1.splice_horizontally(r2).all()
if filters.sliced:
eq_(
rows,
[
("john", "foo@bar.com", 1),
("jack", "bar@bat.com", 2),
],
)
eq_(rows[0]._mapping[users.c.user_name], "john")
eq_(rows[0].address, "foo@bar.com")
elif filters.unique:
eq_(
rows,
[
("john", "foo@bar.com"),
],
)
eq_(rows[0]._mapping[users.c.user_name], "john")
eq_(rows[0].address, "foo@bar.com")
elif filters.plain:
eq_(
rows,
[
("john", 1, 1, "foo@bar.com", 1),
("jack", 2, 2, "bar@bat.com", 2),
],
)
eq_(rows[0]._mapping[users.c.user_id], 1)
eq_(rows[0]._mapping[addresses.c.user_id], 1)
eq_(rows[1].address, "bar@bat.com")
with expect_raises_message(
exc.InvalidRequestError, "Ambiguous column name 'user_id'"
):
rows[0].user_id
else:
filters.fail()
def test_keys_no_rows(self, connection):
for i in range(2):
r = connection.execute(
text("update users set user_name='new' where user_id=10")
)
with expect_raises_message(
exc.ResourceClosedError,
"This result object does not return rows",
):
r.keys()
def test_row_keys_removed(self, connection):
r = connection.execute(
text("select * from users where user_id=2")
).first()
with expect_raises(AttributeError):
r.keys()
def test_row_contains_key_no_strings(self, connection):
users = self.tables.users
connection.execute(
users.insert(),
[
dict(user_id=1, user_name="john"),
dict(user_id=2, user_name="jack"),
],
)
r = connection.execute(
text("select * from users where user_id=2")
).first()
not_in("user_name", r)
in_("user_name", r._mapping)
not_in("foobar", r)
not_in("foobar", r._mapping)
def test_row_iteration(self, connection):
users = self.tables.users
connection.execute(
users.insert(),
[
{"user_id": 7, "user_name": "jack"},
{"user_id": 8, "user_name": "ed"},
{"user_id": 9, "user_name": "fred"},
],
)
r = connection.execute(users.select())
rows = []
for row in r:
rows.append(row)
eq_(len(rows), 3)
def test_scalars(self, connection):
users = self.tables.users
connection.execute(
users.insert(),
[
{"user_id": 7, "user_name": "jack"},
{"user_id": 8, "user_name": "ed"},
{"user_id": 9, "user_name": "fred"},
],
)
r = connection.scalars(users.select().order_by(users.c.user_id))
eq_(r.all(), [7, 8, 9])
@expect_deprecated(".*is deprecated, Row now behaves like a tuple.*")
def test_result_tuples(self, connection):
users = self.tables.users
connection.execute(
users.insert(),
[
{"user_id": 7, "user_name": "jack"},
{"user_id": 8, "user_name": "ed"},
{"user_id": 9, "user_name": "fred"},
],
)
r = connection.execute(
users.select().order_by(users.c.user_id)
).tuples()
eq_(r.all(), [(7, "jack"), (8, "ed"), (9, "fred")])
@expect_deprecated(".*is deprecated, Row now behaves like a tuple.*")
def test_row_tuple(self, connection):
users = self.tables.users
connection.execute(
users.insert(),
[
{"user_id": 7, "user_name": "jack"},
{"user_id": 8, "user_name": "ed"},
{"user_id": 9, "user_name": "fred"},
],
)
r = connection.execute(users.select().order_by(users.c.user_id)).all()
exp = [(7, "jack"), (8, "ed"), (9, "fred")]
eq_([row._t for row in r], exp)
eq_([row._tuple() for row in r], exp)
with assertions.expect_deprecated(
r"The Row.t attribute is deprecated in favor of Row._t"
):
eq_([row.t for row in r], exp)
with assertions.expect_deprecated(
r"The Row.tuple\(\) method is deprecated in "
r"favor of Row._tuple\(\)"
):
eq_([row.tuple() for row in r], exp)
def test_row_next(self, connection):
users = self.tables.users
connection.execute(
users.insert(),
[
{"user_id": 7, "user_name": "jack"},
{"user_id": 8, "user_name": "ed"},
{"user_id": 9, "user_name": "fred"},
],
)
r = connection.execute(users.select())
rows = []
while True:
row = next(r, "foo")
if row == "foo":
break
rows.append(row)
eq_(len(rows), 3)
@testing.requires.subqueries
def test_anonymous_rows(self, connection):
users = self.tables.users
connection.execute(
users.insert(),
[
{"user_id": 7, "user_name": "jack"},
{"user_id": 8, "user_name": "ed"},
{"user_id": 9, "user_name": "fred"},
],
)
sel = (
select(users.c.user_id)
.where(users.c.user_name == "jack")
.scalar_subquery()
)
for row in connection.execute(select(sel + 1, sel + 3)):
eq_(row._mapping["anon_1"], 8)
eq_(row._mapping["anon_2"], 10)
def test_row_comparison(self, connection):
users = self.tables.users
connection.execute(users.insert(), dict(user_id=7, user_name="jack"))
rp = connection.execute(users.select()).first()
eq_(rp, rp)
is_(not (rp != rp), True)
equal = (7, "jack")
eq_(rp, equal)
eq_(equal, rp)
is_((not (rp != equal)), True)
is_(not (equal != equal), True)
def endless():
while True:
yield 1
ne_(rp, endless())
ne_(endless(), rp)
# test that everything compares the same
# as it would against a tuple
for compare in [False, 8, endless(), "xyz", (7, "jack")]:
for op in [
operator.eq,
operator.ne,
operator.gt,
operator.lt,
operator.ge,
operator.le,
]:
try:
control = op(equal, compare)
except TypeError:
# Py3K raises TypeError for some invalid comparisons
assert_raises(TypeError, op, rp, compare)
else:
eq_(control, op(rp, compare))
try:
control = op(compare, equal)
except TypeError:
# Py3K raises TypeError for some invalid comparisons
assert_raises(TypeError, op, compare, rp)
else:
eq_(control, op(compare, rp))
@testing.provide_metadata
def test_column_label_overlap_fallback(self, connection):
content = Table("content", self.metadata, Column("type", String(30)))
bar = Table("bar", self.metadata, Column("content_type", String(30)))
self.metadata.create_all(connection)
connection.execute(content.insert().values(type="t1"))
row = connection.execute(
content.select().set_label_style(LABEL_STYLE_TABLENAME_PLUS_COL)
).first()
in_(content.c.type, row._mapping)
not_in(bar.c.content_type, row._mapping)
# in 1.x, would warn for string match, but return a result
not_in(sql.column("content_type"), row)
not_in(bar.c.content_type, row._mapping)
row = connection.execute(
select(func.now().label("content_type"))
).first()
not_in(content.c.type, row._mapping)
not_in(bar.c.content_type, row._mapping)
# in 1.x, would warn for string match, but return a result
not_in(sql.column("content_type"), row._mapping)
def _pickle_row_data(self, connection, use_labels):
users = self.tables.users
connection.execute(
users.insert(),
[
{"user_id": 7, "user_name": "jack"},
{"user_id": 8, "user_name": "ed"},
{"user_id": 9, "user_name": "fred"},
],
)
result = connection.execute(
users.select()
.order_by(users.c.user_id)
.set_label_style(
LABEL_STYLE_TABLENAME_PLUS_COL
if use_labels
else LABEL_STYLE_NONE
)
).all()
return result
@testing.variation("use_pickle", [True, False])
@testing.variation("use_labels", [True, False])
def test_pickled_rows(self, connection, use_pickle, use_labels):
users = self.tables.users
addresses = self.tables.addresses
result = self._pickle_row_data(connection, use_labels)
if use_pickle:
result = pickle.loads(pickle.dumps(result))
eq_(result, [(7, "jack"), (8, "ed"), (9, "fred")])
if use_labels:
eq_(result[0]._mapping["users_user_id"], 7)
eq_(
list(result[0]._fields),
["users_user_id", "users_user_name"],
)
else:
eq_(result[0]._mapping["user_id"], 7)
eq_(list(result[0]._fields), ["user_id", "user_name"])
eq_(result[0][0], 7)
assert_raises(
exc.NoSuchColumnError,
lambda: result[0]._mapping["fake key"],
)
# previously would warn
if use_pickle:
with expect_raises_message(
exc.NoSuchColumnError,
"Row was unpickled; lookup by ColumnElement is unsupported",
):
result[0]._mapping[users.c.user_id]
else:
eq_(result[0]._mapping[users.c.user_id], 7)
if use_pickle:
with expect_raises_message(
exc.NoSuchColumnError,
"Row was unpickled; lookup by ColumnElement is unsupported",
):
result[0]._mapping[users.c.user_name]
else:
eq_(result[0]._mapping[users.c.user_name], "jack")
assert_raises(
exc.NoSuchColumnError,
lambda: result[0]._mapping[addresses.c.user_id],
)
assert_raises(
exc.NoSuchColumnError,
lambda: result[0]._mapping[addresses.c.address_id],
)
@testing.variation("use_labels", [True, False])
def test_pickle_rows_other_process(self, connection, use_labels):
result = self._pickle_row_data(connection, use_labels)
f, name = mkstemp("pkl")
with os.fdopen(f, "wb") as f:
pickle.dump(result, f)
name = name.replace(os.sep, "/")
code = (
"import sqlalchemy; import pickle; print(["
f"r[0] for r in pickle.load(open('''{name}''', 'rb'))])"
)
parts = list(sys.path)
if os.environ.get("PYTHONPATH"):
parts.append(os.environ["PYTHONPATH"])
pythonpath = os.pathsep.join(parts)
proc = subprocess.run(
[sys.executable, "-c", code],
stdout=subprocess.PIPE,
env={**os.environ, "PYTHONPATH": pythonpath},
)
exp = str([r[0] for r in result]).encode()
eq_(proc.returncode, 0)
eq_(proc.stdout.strip(), exp)
os.unlink(name)
def test_column_error_printing(self, connection):
result = connection.execute(select(1))
row = result.first()
class unprintable:
def __str__(self):
raise ValueError("nope")
msg = r"Could not locate column in row for column '%s'"
for accessor, repl in [
("x", "x"),
(Column("q", Integer), "q"),
(Column("q", Integer) + 12, r"q \+ :q_1"),
(unprintable(), "unprintable element.*"),
]:
assert_raises_message(
exc.NoSuchColumnError, msg % repl, result._getter, accessor
)
is_(result._getter(accessor, False), None)
assert_raises_message(
exc.NoSuchColumnError,
msg % repl,
lambda: row._mapping[accessor],
)
def test_fetchmany(self, connection):
users = self.tables.users
connection.execute(
users.insert(),
[{"user_id": i, "user_name": "n%d" % i} for i in range(7, 15)],
)
r = connection.execute(users.select())
rows = []
for row in r.fetchmany(size=2):
rows.append(row)
eq_(len(rows), 2)
@testing.requires.arraysize
def test_fetchmany_arraysize_default(self, connection):
users = self.tables.users
connection.execute(
users.insert(),
[{"user_id": i, "user_name": "n%d" % i} for i in range(1, 150)],
)
r = connection.execute(users.select())
arraysize = r.cursor.arraysize
rows = list(r.fetchmany())
eq_(len(rows), min(arraysize, 150))
@testing.requires.arraysize
def test_fetchmany_arraysize_set(self, connection):
users = self.tables.users
connection.execute(
users.insert(),
[{"user_id": i, "user_name": "n%d" % i} for i in range(7, 15)],
)
r = connection.execute(users.select())
r.cursor.arraysize = 4
rows = list(r.fetchmany())
eq_(len(rows), 4)
def test_column_slices(self, connection):
users = self.tables.users
addresses = self.tables.addresses
connection.execute(users.insert(), dict(user_id=1, user_name="john"))
connection.execute(users.insert(), dict(user_id=2, user_name="jack"))
connection.execute(
addresses.insert(),
dict(address_id=1, user_id=2, address="foo@bar.com"),
)
r = connection.execute(text("select * from addresses")).first()
eq_(r[0:1], (1,))
eq_(r[1:], (2, "foo@bar.com"))
eq_(r[:-1], (1, 2))
def test_mappings(self, connection):
users = self.tables.users
addresses = self.tables.addresses
connection.execute(users.insert(), dict(user_id=1, user_name="john"))
connection.execute(users.insert(), dict(user_id=2, user_name="jack"))
connection.execute(
addresses.insert(),
dict(address_id=1, user_id=2, address="foo@bar.com"),
)
r = connection.execute(text("select * from addresses"))
eq_(
r.mappings().all(),
[{"address_id": 1, "user_id": 2, "address": "foo@bar.com"}],
)
def test_column_accessor_basic_compiled_mapping(self, connection):
users = self.tables.users
connection.execute(
users.insert(),
[
dict(user_id=1, user_name="john"),
dict(user_id=2, user_name="jack"),
],
)
r = connection.execute(
users.select().where(users.c.user_id == 2)
).first()
eq_(r.user_id, 2)
eq_(r._mapping["user_id"], 2)
eq_(r._mapping[users.c.user_id], 2)
eq_(r.user_name, "jack")
eq_(r._mapping["user_name"], "jack")
eq_(r._mapping[users.c.user_name], "jack")
def test_column_accessor_basic_compiled_traditional(self, connection):
users = self.tables.users
connection.execute(
users.insert(),
[
dict(user_id=1, user_name="john"),
dict(user_id=2, user_name="jack"),
],
)
r = connection.execute(
users.select().where(users.c.user_id == 2)
).first()
eq_(r.user_id, 2)
eq_(r._mapping["user_id"], 2)
eq_(r._mapping[users.c.user_id], 2)
eq_(r.user_name, "jack")
eq_(r._mapping["user_name"], "jack")
eq_(r._mapping[users.c.user_name], "jack")
@testing.combinations(
(select(literal_column("1").label("col1")), ("col1",)),
(
select(
literal_column("1").label("col1"),
literal_column("2").label("col2"),
),
("col1", "col2"),
),
argnames="sql,cols",
)
def test_compiled_star_doesnt_interfere_w_description(
self, connection, sql, cols
):
"""test #6665"""
row = connection.execute(
select("*").select_from(sql.subquery())
).first()
eq_(row._fields, cols)
eq_(row._mapping["col1"], 1)
def test_row_getitem_string(self, connection):
users = self.tables.users
connection.execute(
users.insert(),
[
dict(user_id=1, user_name="john"),
dict(user_id=2, user_name="jack"),
],
)
r = connection.execute(
text("select * from users where user_id=2")
).first()
with expect_raises_message(TypeError, "tuple indices must be"):
r["foo"]
eq_(r._mapping["user_name"], "jack")
def test_row_getitem_column(self, connection):
col = literal_column("1").label("foo")
row = connection.execute(select(col)).first()
with expect_raises_message(TypeError, "tuple indices must be"):
row[col]
eq_(row._mapping[col], 1)
def test_column_accessor_basic_text(self, connection):
users = self.tables.users
connection.execute(
users.insert(),
[
dict(user_id=1, user_name="john"),
dict(user_id=2, user_name="jack"),
],
)
r = connection.execute(
text("select * from users where user_id=2")
).first()
eq_(r.user_id, 2)
eq_(r.user_name, "jack")
eq_(r._mapping["user_id"], 2)
eq_(r.user_name, "jack")
eq_(r._mapping["user_name"], "jack")
# cases which used to succeed w warning
with expect_raises_message(
exc.NoSuchColumnError, "Could not locate column in row"
):
r._mapping[users.c.user_id]
with expect_raises_message(
exc.NoSuchColumnError, "Could not locate column in row"
):
r._mapping[users.c.user_name]
def test_column_accessor_text_colexplicit(self, connection):
users = self.tables.users
connection.execute(
users.insert(),
[
dict(user_id=1, user_name="john"),
dict(user_id=2, user_name="jack"),
],
)
r = connection.execute(
text("select * from users where user_id=2").columns(
users.c.user_id, users.c.user_name
)
).first()
eq_(r.user_id, 2)
eq_(r._mapping["user_id"], 2)
eq_(r._mapping[users.c.user_id], 2)
eq_(r.user_name, "jack")
eq_(r._mapping["user_name"], "jack")
eq_(r._mapping[users.c.user_name], "jack")
def test_column_accessor_textual_select(self, connection):
users = self.tables.users
connection.execute(
users.insert(),
[
dict(user_id=1, user_name="john"),
dict(user_id=2, user_name="jack"),
],
)
# this will create column() objects inside
# the select(), these need to match on name anyway
r = connection.execute(
select(column("user_id"), column("user_name"))
.select_from(table("users"))
.where(text("user_id=2"))
).first()
# keyed access works in many ways
eq_(r.user_id, 2)
eq_(r.user_name, "jack")
eq_(r._mapping["user_id"], 2)
eq_(r.user_name, "jack")
eq_(r._mapping["user_name"], "jack")
# error cases that previously would warn
with expect_raises_message(
exc.NoSuchColumnError, "Could not locate column in row"
):
r._mapping[users.c.user_id]
with expect_raises_message(
exc.NoSuchColumnError, "Could not locate column in row"
):
r._mapping[users.c.user_name]
def test_column_accessor_dotted_union(self, connection):
users = self.tables.users
connection.execute(users.insert(), dict(user_id=1, user_name="john"))
# test a little sqlite < 3.10.0 weirdness - with the UNION,
# cols come back as "users.user_id" in cursor.description
r = connection.execute(
text(
"select users.user_id, users.user_name "
"from users "
"UNION select users.user_id, "
"users.user_name from users"
)
).first()
eq_(r._mapping["user_id"], 1)
eq_(r._mapping["user_name"], "john")
eq_(list(r._fields), ["user_id", "user_name"])
def test_column_accessor_sqlite_raw(self, connection):
users = self.tables.users
connection.execute(users.insert(), dict(user_id=1, user_name="john"))
r = connection.execute(
text(
"select users.user_id, users.user_name "
"from users "
"UNION select users.user_id, "
"users.user_name from users",
).execution_options(sqlite_raw_colnames=True)
).first()
if testing.against("sqlite < 3.10.0"):
not_in("user_id", r)
not_in("user_name", r)
eq_(r["users.user_id"], 1)
eq_(r["users.user_name"], "john")
eq_(list(r._fields), ["users.user_id", "users.user_name"])
else:
not_in("users.user_id", r._mapping)
not_in("users.user_name", r._mapping)
eq_(r._mapping["user_id"], 1)
eq_(r._mapping["user_name"], "john")
eq_(list(r._fields), ["user_id", "user_name"])
def test_column_accessor_sqlite_translated(self, connection):
users = self.tables.users
connection.execute(users.insert(), dict(user_id=1, user_name="john"))
r = connection.execute(
text(
"select users.user_id, users.user_name "
"from users "
"UNION select users.user_id, "
"users.user_name from users",
)
).first()
eq_(r._mapping["user_id"], 1)
eq_(r._mapping["user_name"], "john")
if testing.against("sqlite < 3.10.0"):
eq_(r._mapping["users.user_id"], 1)
eq_(r._mapping["users.user_name"], "john")
else:
not_in("users.user_id", r._mapping)
not_in("users.user_name", r._mapping)
eq_(list(r._fields), ["user_id", "user_name"])
def test_column_accessor_labels_w_dots(self, connection):
users = self.tables.users
connection.execute(users.insert(), dict(user_id=1, user_name="john"))
# test using literal tablename.colname
r = connection.execute(
text(
'select users.user_id AS "users.user_id", '
'users.user_name AS "users.user_name" '
"from users",
).execution_options(sqlite_raw_colnames=True)
).first()
eq_(r._mapping["users.user_id"], 1)
eq_(r._mapping["users.user_name"], "john")
not_in("user_name", r._mapping)
eq_(list(r._fields), ["users.user_id", "users.user_name"])
def test_column_accessor_unary(self, connection):
users = self.tables.users
connection.execute(users.insert(), dict(user_id=1, user_name="john"))
# unary expressions
r = connection.execute(
select(users.c.user_name.distinct()).order_by(users.c.user_name)
).first()
eq_(r._mapping[users.c.user_name], "john")
eq_(r.user_name, "john")
@testing.fixture
def _ab_row_fixture(self, connection):
r = connection.execute(
select(literal(1).label("a"), literal(2).label("b"))
).first()
return r
def test_named_tuple_access(self, _ab_row_fixture):
r = _ab_row_fixture
eq_(r.a, 1)
eq_(r.b, 2)
def test_named_tuple_missing_attr(self, _ab_row_fixture):
r = _ab_row_fixture
with expect_raises_message(
AttributeError, "Could not locate column in row for column 'c'"
):
r.c
def test_named_tuple_no_delete_present(self, _ab_row_fixture):
r = _ab_row_fixture
with expect_raises_message(AttributeError, "can't delete attribute"):
del r.a
def test_named_tuple_no_delete_missing(self, _ab_row_fixture):
r = _ab_row_fixture
# including for non-existent attributes
with expect_raises_message(AttributeError, "can't delete attribute"):
del r.c
def test_named_tuple_no_assign_present(self, _ab_row_fixture):
r = _ab_row_fixture
with expect_raises_message(AttributeError, "can't set attribute"):
r.a = 5
with expect_raises_message(AttributeError, "can't set attribute"):
r.a += 5
def test_named_tuple_no_assign_missing(self, _ab_row_fixture):
r = _ab_row_fixture
# including for non-existent attributes
with expect_raises_message(AttributeError, "can't set attribute"):
r.c = 5
def test_named_tuple_no_self_assign_missing(self, _ab_row_fixture):
r = _ab_row_fixture
with expect_raises_message(
AttributeError, "Could not locate column in row for column 'c'"
):
r.c += 5
def test_mapping_tuple_readonly_errors(self, connection):
r = connection.execute(
select(literal(1).label("a"), literal(2).label("b"))
).first()
r = r._mapping
eq_(r["a"], 1)
eq_(r["b"], 2)
with expect_raises_message(
KeyError, "Could not locate column in row for column 'c'"
):
r["c"]
with expect_raises_message(
TypeError, "'RowMapping' object does not support item assignment"
):
r["a"] = 5
with expect_raises_message(
TypeError, "'RowMapping' object does not support item assignment"
):
r["a"] += 5
def test_column_accessor_err(self, connection):
r = connection.execute(select(1)).first()
with expect_raises_message(
AttributeError, "Could not locate column in row for column 'foo'"
):
r.foo
with expect_raises_message(
KeyError, "Could not locate column in row for column 'foo'"
):
r._mapping["foo"],
def test_graceful_fetch_on_non_rows(self):
"""test that calling fetchone() etc. on a result that doesn't
return rows fails gracefully.
"""
# these proxies don't work with no cursor.description present.
# so they don't apply to this test at the moment.
# result.FullyBufferedCursorResult,
# result.BufferedRowCursorResult,
# result.BufferedColumnCursorResult
users = self.tables.users
with testing.db.connect() as conn:
keys_lambda = lambda r: r.keys() # noqa: E731
for meth in [
lambda r: r.fetchone(),
lambda r: r.fetchall(),
lambda r: r.first(),
lambda r: r.scalar(),
lambda r: r.fetchmany(),
lambda r: r._getter("user"),
keys_lambda,
lambda r: r.columns("user"),
lambda r: r.cursor_strategy.fetchone(r, r.cursor),
]:
trans = conn.begin()
result = conn.execute(users.insert(), dict(user_id=1))
assert_raises_message(
exc.ResourceClosedError,
"This result object does not return rows. "
"It has been closed automatically.",
meth,
result,
)
trans.rollback()
def test_fetchone_til_end(self, connection):
result = connection.exec_driver_sql("select * from users")
eq_(result.fetchone(), None)
eq_(result.fetchone(), None)
eq_(result.fetchone(), None)
result.close()
assert_raises_message(
exc.ResourceClosedError,
"This result object is closed.",
result.fetchone,
)
def test_row_case_sensitive(self, connection):
row = connection.execute(
select(
literal_column("1").label("case_insensitive"),
literal_column("2").label("CaseSensitive"),
)
).first()
eq_(list(row._fields), ["case_insensitive", "CaseSensitive"])
in_("case_insensitive", row._parent._keymap)
in_("CaseSensitive", row._parent._keymap)
not_in("casesensitive", row._parent._keymap)
eq_(row._mapping["case_insensitive"], 1)
eq_(row._mapping["CaseSensitive"], 2)
assert_raises(KeyError, lambda: row._mapping["Case_insensitive"])
assert_raises(KeyError, lambda: row._mapping["casesensitive"])
def test_row_case_sensitive_unoptimized(self, testing_engine):
with testing_engine().connect() as ins_conn:
row = ins_conn.execute(
select(
literal_column("1").label("case_insensitive"),
literal_column("2").label("CaseSensitive"),
text("3 AS screw_up_the_cols"),
)
).first()
eq_(
list(row._fields),
["case_insensitive", "CaseSensitive", "screw_up_the_cols"],
)
in_("case_insensitive", row._parent._keymap)
in_("CaseSensitive", row._parent._keymap)
not_in("casesensitive", row._parent._keymap)
eq_(row._mapping["case_insensitive"], 1)
eq_(row._mapping["CaseSensitive"], 2)
eq_(row._mapping["screw_up_the_cols"], 3)
assert_raises(KeyError, lambda: row._mapping["Case_insensitive"])
assert_raises(KeyError, lambda: row._mapping["casesensitive"])
assert_raises(KeyError, lambda: row._mapping["screw_UP_the_cols"])
def test_row_as_args(self, connection):
users = self.tables.users
connection.execute(users.insert(), dict(user_id=1, user_name="john"))
r = connection.execute(
users.select().where(users.c.user_id == 1)
).first()
connection.execute(users.delete())
connection.execute(users.insert(), r._mapping)
eq_(connection.execute(users.select()).fetchall(), [(1, "john")])
@testing.requires.tuple_in
def test_row_tuple_interpretation(self, connection):
"""test #7292"""
users = self.tables.users
connection.execute(
users.insert(),
[
dict(user_id=1, user_name="u1"),
dict(user_id=2, user_name="u2"),
dict(user_id=3, user_name="u3"),
],
)
rows = connection.execute(
select(users.c.user_id, users.c.user_name)
).all()
# was previously needed
# rows = [(x, y) for x, y in rows]
new_stmt = (
select(users)
.where(tuple_(users.c.user_id, users.c.user_name).in_(rows))
.order_by(users.c.user_id)
)
eq_(
connection.execute(new_stmt).all(),
[(1, "u1"), (2, "u2"), (3, "u3")],
)
def test_result_as_args(self, connection):
users = self.tables.users
users2 = self.tables.users2
connection.execute(
users.insert(),
[
dict(user_id=1, user_name="john"),
dict(user_id=2, user_name="ed"),
],
)
r = connection.execute(users.select())
connection.execute(users2.insert(), [row._mapping for row in r])
eq_(
connection.execute(
users2.select().order_by(users2.c.user_id)
).fetchall(),
[(1, "john"), (2, "ed")],
)
connection.execute(users2.delete())
r = connection.execute(users.select())
connection.execute(users2.insert(), [row._mapping for row in r])
eq_(
connection.execute(
users2.select().order_by(users2.c.user_id)
).fetchall(),
[(1, "john"), (2, "ed")],
)
@testing.requires.duplicate_names_in_cursor_description
def test_ambiguous_column(self, connection):
users = self.tables.users
addresses = self.tables.addresses
connection.execute(users.insert(), dict(user_id=1, user_name="john"))
result = connection.execute(
users.outerjoin(addresses)
.select()
.set_label_style(LABEL_STYLE_NONE)
)
r = result.first()
assert_raises_message(
exc.InvalidRequestError,
"Ambiguous column name",
lambda: r._mapping["user_id"],
)
assert_raises_message(
exc.InvalidRequestError,
"Ambiguous column name",
result._getter,
"user_id",
)
# pure positional targeting; users.c.user_id
# and addresses.c.user_id are known!
# works as of 1.1 issue #3501
eq_(r._mapping[users.c.user_id], 1)
eq_(r._mapping[addresses.c.user_id], None)
# try to trick it - fake_table isn't in the result!
# we get the correct error
fake_table = Table("fake", MetaData(), Column("user_id", Integer))
assert_raises_message(
exc.InvalidRequestError,
"Could not locate column in row for column 'fake.user_id'",
lambda: r._mapping[fake_table.c.user_id],
)
r = pickle.loads(pickle.dumps(r))
assert_raises_message(
exc.InvalidRequestError,
"Ambiguous column name",
lambda: r._mapping["user_id"],
)
@testing.requires.duplicate_names_in_cursor_description
def test_ambiguous_column_by_col(self, connection):
users = self.tables.users
connection.execute(users.insert(), dict(user_id=1, user_name="john"))
ua = users.alias()
u2 = users.alias()
result = connection.execute(
select(users.c.user_id, ua.c.user_id)
.select_from(users.join(ua, true()))
.set_label_style(LABEL_STYLE_NONE)
)
row = result.first()
# as of 1.1 issue #3501, we use pure positional
# targeting for the column objects here
eq_(row._mapping[users.c.user_id], 1)
eq_(row._mapping[ua.c.user_id], 1)
# this now works as of 1.1 issue #3501;
# previously this was stuck on "ambiguous column name"
assert_raises_message(
exc.InvalidRequestError,
"Could not locate column in row",
lambda: row._mapping[u2.c.user_id],
)
@testing.requires.duplicate_names_in_cursor_description
def test_ambiguous_column_contains(self, connection):
users = self.tables.users
addresses = self.tables.addresses
# ticket 2702. in 0.7 we'd get True, False.
# in 0.8, both columns are present so it's True;
# but when they're fetched you'll get the ambiguous error.
connection.execute(users.insert(), dict(user_id=1, user_name="john"))
result = connection.execute(
select(users.c.user_id, addresses.c.user_id).select_from(
users.outerjoin(addresses)
)
)
row = result.first()
eq_(
{
users.c.user_id in row._mapping,
addresses.c.user_id in row._mapping,
},
{True},
)
@testing.combinations(
(("name_label", "*"), False),
(("*", "name_label"), False),
(("user_id", "name_label", "user_name"), False),
(("user_id", "name_label", "*", "user_name"), True),
argnames="cols,other_cols_are_ambiguous",
)
@testing.requires.select_star_mixed
def test_label_against_star(
self, connection, cols, other_cols_are_ambiguous
):
"""test #8536"""
users = self.tables.users
connection.execute(users.insert(), dict(user_id=1, user_name="john"))
stmt = select(
*[
(
text("*")
if colname == "*"
else (
users.c.user_name.label("name_label")
if colname == "name_label"
else users.c[colname]
)
)
for colname in cols
]
)
row = connection.execute(stmt).first()
eq_(row._mapping["name_label"], "john")
if other_cols_are_ambiguous:
with expect_raises_message(
exc.InvalidRequestError, "Ambiguous column name"
):
row._mapping["user_id"]
with expect_raises_message(
exc.InvalidRequestError, "Ambiguous column name"
):
row._mapping["user_name"]
else:
eq_(row._mapping["user_id"], 1)
eq_(row._mapping["user_name"], "john")
def test_loose_matching_one(self, connection):
users = self.tables.users
addresses = self.tables.addresses
connection.execute(users.insert(), {"user_id": 1, "user_name": "john"})
connection.execute(
addresses.insert(),
{"address_id": 1, "user_id": 1, "address": "email"},
)
# use some column labels in the SELECT
result = connection.execute(
TextualSelect(
text(
"select users.user_name AS users_user_name, "
"users.user_id AS user_id, "
"addresses.address_id AS address_id "
"FROM users JOIN addresses "
"ON users.user_id = addresses.user_id "
"WHERE users.user_id=1 "
),
[users.c.user_id, users.c.user_name, addresses.c.address_id],
positional=False,
)
)
row = result.first()
eq_(row._mapping[users.c.user_id], 1)
eq_(row._mapping[users.c.user_name], "john")
def test_loose_matching_two(self, connection):
users = self.tables.users
addresses = self.tables.addresses
connection.execute(users.insert(), {"user_id": 1, "user_name": "john"})
connection.execute(
addresses.insert(),
{"address_id": 1, "user_id": 1, "address": "email"},
)
# use some column labels in the SELECT
result = connection.execute(
TextualSelect(
text(
"select users.user_name AS users_user_name, "
"users.user_id AS user_id, "
"addresses.user_id "
"FROM users JOIN addresses "
"ON users.user_id = addresses.user_id "
"WHERE users.user_id=1 "
),
[users.c.user_id, users.c.user_name, addresses.c.user_id],
positional=False,
)
)
row = result.first()
assert_raises_message(
exc.InvalidRequestError,
"Ambiguous column name",
lambda: row._mapping[users.c.user_id],
)
assert_raises_message(
exc.InvalidRequestError,
"Ambiguous column name",
lambda: row._mapping[addresses.c.user_id],
)
eq_(row._mapping[users.c.user_name], "john")
def test_ambiguous_column_by_col_plus_label(self, connection):
users = self.tables.users
connection.execute(users.insert(), dict(user_id=1, user_name="john"))
result = connection.execute(
select(
users.c.user_id,
type_coerce(users.c.user_id, Integer).label("foo"),
)
)
row = result.first()
eq_(row._mapping[users.c.user_id], 1)
eq_(row[1], 1)
def test_fetch_partial_result_map(self, connection):
users = self.tables.users
connection.execute(users.insert(), dict(user_id=7, user_name="ed"))
t = text("select * from users").columns(user_name=String())
eq_(connection.execute(t).fetchall(), [(7, "ed")])
def test_fetch_unordered_result_map(self, connection):
users = self.tables.users
connection.execute(users.insert(), dict(user_id=7, user_name="ed"))
class Goofy1(TypeDecorator):
impl = String
cache_ok = True
def process_result_value(self, value, dialect):
return value + "a"
class Goofy2(TypeDecorator):
impl = String
cache_ok = True
def process_result_value(self, value, dialect):
return value + "b"
class Goofy3(TypeDecorator):
impl = String
cache_ok = True
def process_result_value(self, value, dialect):
return value + "c"
t = text(
"select user_name as a, user_name as b, "
"user_name as c from users"
).columns(a=Goofy1(), b=Goofy2(), c=Goofy3())
eq_(connection.execute(t).fetchall(), [("eda", "edb", "edc")])
@testing.requires.subqueries
def test_column_label_targeting(self, connection):
users = self.tables.users
connection.execute(users.insert(), dict(user_id=7, user_name="ed"))
for s in (
users.select().alias("foo"),
users.select().alias(users.name),
):
row = connection.execute(
s.select().set_label_style(LABEL_STYLE_TABLENAME_PLUS_COL)
).first()
eq_(row._mapping[s.c.user_id], 7)
eq_(row._mapping[s.c.user_name], "ed")
def test_ro_mapping_py3k(self, connection):
users = self.tables.users
connection.execute(users.insert(), dict(user_id=1, user_name="foo"))
result = connection.execute(users.select())
row = result.first()
dict_row = row._asdict()
odict_row = dict([("user_id", 1), ("user_name", "foo")])
eq_(dict_row, odict_row)
mapping_row = row._mapping
eq_(list(mapping_row), list(mapping_row.keys()))
eq_(odict_row.keys(), mapping_row.keys())
eq_(odict_row.values(), mapping_row.values())
eq_(odict_row.items(), mapping_row.items())
@testing.combinations(
(lambda result: result),
(lambda result: result.first(),),
(lambda result: result.first()._mapping),
argnames="get_object",
)
def test_keys(self, connection, get_object):
users = self.tables.users
addresses = self.tables.addresses
connection.execute(users.insert(), dict(user_id=1, user_name="foo"))
result = connection.execute(users.select())
obj = get_object(result)
if isinstance(obj, Row):
keys = obj._mapping.keys()
else:
keys = obj.keys()
# in 1.4, keys() is now a view that includes support for testing
# of columns and other objects
eq_(len(keys), 2)
eq_(list(keys), ["user_id", "user_name"])
eq_(keys, ["user_id", "user_name"])
ne_(keys, ["user_name", "user_id"])
in_("user_id", keys)
not_in("foo", keys)
in_(users.c.user_id, keys)
not_in(0, keys)
not_in(addresses.c.user_id, keys)
not_in(addresses.c.address, keys)
if isinstance(obj, Row):
eq_(obj._fields, ("user_id", "user_name"))
def test_row_mapping_keys(self, connection):
users = self.tables.users
connection.execute(users.insert(), dict(user_id=1, user_name="foo"))
result = connection.execute(users.select())
eq_(result.keys(), ["user_id", "user_name"])
row = result.first()
eq_(list(row._mapping.keys()), ["user_id", "user_name"])
eq_(row._fields, ("user_id", "user_name"))
in_("user_id", row._fields)
not_in("foo", row._fields)
in_(users.c.user_id, row._mapping.keys())
def test_row_keys_legacy_dont_warn(self, connection):
users = self.tables.users
connection.execute(users.insert(), dict(user_id=1, user_name="foo"))
result = connection.execute(users.select())
row = result.first()
eq_(dict(row._mapping), {"user_id": 1, "user_name": "foo"})
eq_(row._fields, ("user_id", "user_name"))
def test_row_namedtuple_legacy_ok(self, connection):
users = self.tables.users
connection.execute(users.insert(), dict(user_id=1, user_name="foo"))
result = connection.execute(users.select())
row = result.first()
eq_(row.user_id, 1)
eq_(row.user_name, "foo")
def test_keys_anon_labels(self, connection):
"""test [ticket:3483]"""
users = self.tables.users
connection.execute(users.insert(), dict(user_id=1, user_name="foo"))
result = connection.execute(
select(
users.c.user_id,
users.c.user_name.label(None),
func.count(literal_column("1")),
).group_by(users.c.user_id, users.c.user_name)
)
eq_(result.keys(), ["user_id", "user_name_1", "count_1"])
row = result.first()
eq_(row._fields, ("user_id", "user_name_1", "count_1"))
eq_(list(row._mapping.keys()), ["user_id", "user_name_1", "count_1"])
def test_items(self, connection):
users = self.tables.users
connection.execute(users.insert(), dict(user_id=1, user_name="foo"))
r = connection.execute(users.select()).first()
eq_(
[(x[0].lower(), x[1]) for x in list(r._mapping.items())],
[("user_id", 1), ("user_name", "foo")],
)
def test_len(self, connection):
users = self.tables.users
connection.execute(users.insert(), dict(user_id=1, user_name="foo"))
r = connection.execute(users.select()).first()
eq_(len(r), 2)
r = connection.exec_driver_sql(
"select user_name, user_id from users"
).first()
eq_(len(r), 2)
r = connection.exec_driver_sql("select user_name from users").first()
eq_(len(r), 1)
def test_row_mapping_get(self, connection):
users = self.tables.users
connection.execute(users.insert(), dict(user_id=1, user_name="foo"))
result = connection.execute(users.select())
row = result.first()
eq_(row._mapping.get("user_id"), 1)
eq_(row._mapping.get(users.c.user_id), 1)
def test_sorting_in_python(self, connection):
users = self.tables.users
connection.execute(
users.insert(),
[
dict(user_id=1, user_name="foo"),
dict(user_id=2, user_name="bar"),
dict(user_id=3, user_name="def"),
],
)
rows = connection.execute(
users.select().order_by(users.c.user_name)
).fetchall()
eq_(rows, [(2, "bar"), (3, "def"), (1, "foo")])
eq_(sorted(rows), [(1, "foo"), (2, "bar"), (3, "def")])
def test_column_order_with_simple_query(self, connection):
# should return values in column definition order
users = self.tables.users
connection.execute(users.insert(), dict(user_id=1, user_name="foo"))
r = connection.execute(
users.select().where(users.c.user_id == 1)
).first()
eq_(r[0], 1)
eq_(r[1], "foo")
eq_([x.lower() for x in r._fields], ["user_id", "user_name"])
eq_(list(r._mapping.values()), [1, "foo"])
def test_column_order_with_text_query(self, connection):
# should return values in query order
users = self.tables.users
connection.execute(users.insert(), dict(user_id=1, user_name="foo"))
r = connection.exec_driver_sql(
"select user_name, user_id from users"
).first()
eq_(r[0], "foo")
eq_(r[1], 1)
eq_([x.lower() for x in r._fields], ["user_name", "user_id"])
eq_(list(r._mapping.values()), ["foo", 1])
@testing.crashes("oracle", "FIXME: unknown, verify not fails_on()")
@testing.provide_metadata
def test_column_accessor_shadow(self, connection):
shadowed = Table(
"test_shadowed",
self.metadata,
Column("shadow_id", INT, primary_key=True),
Column("shadow_name", VARCHAR(20)),
Column("parent", VARCHAR(20)),
Column("row", VARCHAR(40)),
Column("_parent", VARCHAR(20)),
Column("_row", VARCHAR(20)),
)
self.metadata.create_all(connection)
connection.execute(
shadowed.insert(),
dict(
shadow_id=1,
shadow_name="The Shadow",
parent="The Light",
row="Without light there is no shadow",
_parent="Hidden parent",
_row="Hidden row",
),
)
r = connection.execute(
shadowed.select().where(shadowed.c.shadow_id == 1)
).first()
eq_(r.shadow_id, 1)
eq_(r._mapping["shadow_id"], 1)
eq_(r._mapping[shadowed.c.shadow_id], 1)
eq_(r.shadow_name, "The Shadow")
eq_(r._mapping["shadow_name"], "The Shadow")
eq_(r._mapping[shadowed.c.shadow_name], "The Shadow")
eq_(r.parent, "The Light")
eq_(r._mapping["parent"], "The Light")
eq_(r._mapping[shadowed.c.parent], "The Light")
eq_(r.row, "Without light there is no shadow")
eq_(r._mapping["row"], "Without light there is no shadow")
eq_(r._mapping[shadowed.c.row], "Without light there is no shadow")
eq_(r._mapping["_parent"], "Hidden parent")
eq_(r._mapping["_row"], "Hidden row")
def test_nontuple_row(self):
"""ensure the C version of BaseRow handles
duck-type-dependent rows.
As of 1.4 they are converted internally to tuples in any case.
"""
class MyList:
def __init__(self, data):
self.internal_list = data
def __len__(self):
return len(self.internal_list)
def __getitem__(self, i):
return list.__getitem__(self.internal_list, i)
parent = SimpleResultMetaData(["key"])
proxy = Row(parent, [None], parent._key_to_index, MyList(["value"]))
eq_(list(proxy), ["value"])
eq_(proxy[0], "value")
eq_(proxy.key, "value")
eq_(proxy._mapping["key"], "value")
@contextmanager
def cursor_wrapper(self, engine):
calls = defaultdict(int)
class CursorWrapper:
def __init__(self, real_cursor):
self.real_cursor = real_cursor
def __getattr__(self, name):
calls[name] += 1
return getattr(self.real_cursor, name)
create_cursor = engine.dialect.execution_ctx_cls.create_cursor
def new_create(context):
cursor = create_cursor(context)
return CursorWrapper(cursor)
with patch.object(
engine.dialect.execution_ctx_cls, "create_cursor", new_create
):
yield calls
def test_no_rowcount_on_selects_inserts(self, metadata, testing_engine):
"""assert that rowcount is only called on deletes and updates.
This because cursor.rowcount may can be expensive on some dialects
such as Firebird, however many dialects require it be called
before the cursor is closed.
"""
engine = testing_engine()
req = testing.requires
t = Table("t1", metadata, Column("data", String(10)))
metadata.create_all(engine)
count = 0
with self.cursor_wrapper(engine) as call_counts:
with engine.begin() as conn:
conn.execute(
t.insert(),
[{"data": "d1"}, {"data": "d2"}, {"data": "d3"}],
)
if (
req.rowcount_always_cached.enabled
or req.rowcount_always_cached_on_insert.enabled
):
count += 1
eq_(call_counts["rowcount"], count)
eq_(
conn.execute(t.select()).fetchall(),
[("d1",), ("d2",), ("d3",)],
)
if req.rowcount_always_cached.enabled:
count += 1
eq_(call_counts["rowcount"], count)
conn.execute(t.update(), {"data": "d4"})
count += 1
eq_(call_counts["rowcount"], count)
conn.execute(t.delete())
count += 1
eq_(call_counts["rowcount"], count)
def test_rowcount_always_called_when_preserve_rowcount(
self, metadata, testing_engine
):
"""assert that rowcount is called on any statement when
``preserve_rowcount=True``.
"""
engine = testing_engine()
t = Table("t1", metadata, Column("data", String(10)))
metadata.create_all(engine)
with self.cursor_wrapper(engine) as call_counts:
with engine.begin() as conn:
conn = conn.execution_options(preserve_rowcount=True)
# Do not use insertmanyvalues on any driver
conn.execute(t.insert(), {"data": "d1"})
eq_(call_counts["rowcount"], 1)
eq_(conn.execute(t.select()).fetchall(), [("d1",)])
eq_(call_counts["rowcount"], 2)
conn.execute(t.update(), {"data": "d4"})
eq_(call_counts["rowcount"], 3)
conn.execute(t.delete())
eq_(call_counts["rowcount"], 4)
def test_row_is_sequence(self):
row = Row(object(), [None], {}, ["value"])
is_true(isinstance(row, collections_abc.Sequence))
def test_row_special_names(self):
metadata = SimpleResultMetaData(["key", "count", "index", "foo"])
row = Row(
metadata,
[None, None, None, None],
metadata._key_to_index,
["kv", "cv", "iv", "f"],
)
is_true(isinstance(row, collections_abc.Sequence))
eq_(row.key, "kv")
eq_(row.count, "cv")
eq_(row.index, "iv")
eq_(row.foo, "f")
eq_(row._mapping["foo"], "f")
eq_(row._mapping["count"], "cv")
eq_(row._mapping["index"], "iv")
metadata = SimpleResultMetaData(["key", "q", "p"])
row = Row(
metadata,
[None, None, None],
metadata._key_to_index,
["kv", "cv", "iv"],
)
is_true(isinstance(row, collections_abc.Sequence))
eq_(row.key, "kv")
eq_(row.q, "cv")
eq_(row.p, "iv")
eq_(row.index("cv"), 1)
eq_(row.count("cv"), 1)
eq_(row.count("x"), 0)
def test_row_precedence_normal_names(self):
f = ("_fields", "_asdict", "_mapping", "as_tuple")
v = ["ff", "ad", "mm", "at"]
metadata = SimpleResultMetaData(f)
class SubRow(Row):
# use subclass to ensure there is always a public method
@property
def as_tuple(self):
return tuple(self)
row = SubRow(metadata, None, metadata._key_to_index, v)
eq_(row._fields, f)
eq_(row._asdict(), dict(zip(f, v)))
eq_(row._mapping, dict(zip(f, v)))
eq_(row.as_tuple, tuple(v))
with expect_raises(AttributeError):
getattr(row, "") # test cython getattr edge case
def test_new_row_no_dict_behaviors(self):
"""This mode is not used currently but will be once we are in 2.0."""
metadata = SimpleResultMetaData(["a", "b", "count"])
row = Row(
metadata,
[None, None, None],
metadata._key_to_index,
["av", "bv", "cv"],
)
eq_(dict(row._mapping), {"a": "av", "b": "bv", "count": "cv"})
with assertions.expect_raises(
TypeError,
"tuple indices must be integers or slices, not str",
):
eq_(row["a"], "av")
with assertions.expect_raises_message(
TypeError,
"tuple indices must be integers or slices, not str",
):
eq_(row["count"], "cv")
eq_(list(row._mapping), ["a", "b", "count"])
def test_row_is_hashable(self):
row = Row(object(), [None, None, None], {}, (1, "value", "foo"))
eq_(hash(row), hash((1, "value", "foo")))
@testing.provide_metadata
def test_row_getitem_indexes_compiled(self, connection):
values = Table(
"rp",
self.metadata,
Column("key", String(10), primary_key=True),
Column("value", String(10)),
)
values.create(connection)
connection.execute(values.insert(), dict(key="One", value="Uno"))
row = connection.execute(values.select()).first()
eq_(row._mapping["key"], "One")
eq_(row._mapping["value"], "Uno")
eq_(row[0], "One")
eq_(row[1], "Uno")
eq_(row[-2], "One")
eq_(row[-1], "Uno")
eq_(row[1:0:-1], ("Uno",))
@testing.only_on("sqlite")
def test_row_getitem_indexes_raw(self, connection):
row = connection.exec_driver_sql(
"select 'One' as key, 'Uno' as value"
).first()
eq_(row._mapping["key"], "One")
eq_(row._mapping["value"], "Uno")
eq_(row[0], "One")
eq_(row[1], "Uno")
eq_(row[-2], "One")
eq_(row[-1], "Uno")
eq_(row[1:0:-1], ("Uno",))
@testing.requires.cextensions
@testing.provide_metadata
def test_row_c_sequence_check(self, connection):
users = self.tables.users2
connection.execute(users.insert(), dict(user_id=1, user_name="Test"))
row = connection.execute(
users.select().where(users.c.user_id == 1)
).fetchone()
s = StringIO()
writer = csv.writer(s)
# csv performs PySequenceCheck call
writer.writerow(row)
assert s.getvalue().strip() == "1,Test"
@testing.requires.selectone
def test_empty_accessors(self, connection):
statements = [
(
"select 1",
[
lambda r: r.last_inserted_params(),
lambda r: r.last_updated_params(),
lambda r: r.prefetch_cols(),
lambda r: r.postfetch_cols(),
lambda r: r.inserted_primary_key,
],
"Statement is not a compiled expression construct.",
),
(
select(1),
[
lambda r: r.last_inserted_params(),
lambda r: r.inserted_primary_key,
],
r"Statement is not an insert\(\) expression construct.",
),
(
select(1),
[lambda r: r.last_updated_params()],
r"Statement is not an update\(\) expression construct.",
),
(
select(1),
[lambda r: r.prefetch_cols(), lambda r: r.postfetch_cols()],
r"Statement is not an insert\(\) "
r"or update\(\) expression construct.",
),
]
for stmt, meths, msg in statements:
if isinstance(stmt, str):
r = connection.exec_driver_sql(stmt)
else:
r = connection.execute(stmt)
try:
for meth in meths:
assert_raises_message(
sa_exc.InvalidRequestError, msg, meth, r
)
finally:
r.close()
@testing.requires.dbapi_lastrowid
def test_lastrowid(self, connection):
users = self.tables.users
r = connection.execute(
users.insert(), dict(user_id=1, user_name="Test")
)
eq_(r.lastrowid, r.context.get_lastrowid())
def test_raise_errors(self, connection):
users = self.tables.users
class Wrapper:
def __init__(self, context):
self.context = context
def __getattr__(self, name):
if name in ("rowcount", "get_lastrowid"):
raise Exception("canary")
return getattr(self.context, name)
r = connection.execute(
users.insert(), dict(user_id=1, user_name="Test")
)
r.context = Wrapper(r.context)
with expect_raises_message(Exception, "canary"):
r.rowcount
with expect_raises_message(Exception, "canary"):
r.lastrowid
@testing.combinations("plain", "mapping", "scalar", argnames="result_type")
@testing.combinations(
"stream_results", "yield_per", "yield_per_meth", argnames="optname"
)
@testing.combinations(10, 50, argnames="value")
@testing.combinations(
"meth", "passed_in", "stmt", argnames="send_opts_how"
)
def test_stream_options(
self,
connection,
optname,
value,
send_opts_how,
result_type,
close_result_when_finished,
):
table = self.tables.test
connection.execute(
table.insert(),
[{"x": i, "y": "t_%d" % i} for i in range(15, 3000)],
)
if optname == "stream_results":
opts = {"stream_results": True, "max_row_buffer": value}
elif optname == "yield_per":
opts = {"yield_per": value}
elif optname == "yield_per_meth":
opts = {"stream_results": True}
else:
assert False
if send_opts_how == "meth":
result = connection.execution_options(**opts).execute(
table.select()
)
elif send_opts_how == "passed_in":
result = connection.execute(table.select(), execution_options=opts)
elif send_opts_how == "stmt":
result = connection.execute(
table.select().execution_options(**opts)
)
else:
assert False
if result_type == "mapping":
result = result.mappings()
real_result = result._real_result
elif result_type == "scalar":
result = result.scalars()
real_result = result._real_result
else:
real_result = result
if optname == "yield_per_meth":
result = result.yield_per(value)
if result_type == "mapping" or result_type == "scalar":
real_result = result._real_result
else:
real_result = result
close_result_when_finished(result, consume=True)
if optname == "yield_per" and value is not None:
expected_opt = {
"stream_results": True,
"max_row_buffer": value,
"yield_per": value,
}
elif optname == "stream_results" and value is not None:
expected_opt = {
"stream_results": True,
"max_row_buffer": value,
}
else:
expected_opt = None
if expected_opt is not None:
eq_(real_result.context.execution_options, expected_opt)
if value is None:
assert isinstance(
real_result.cursor_strategy, _cursor.CursorFetchStrategy
)
return
assert isinstance(
real_result.cursor_strategy, _cursor.BufferedRowCursorFetchStrategy
)
eq_(real_result.cursor_strategy._max_row_buffer, value)
if optname == "yield_per" or optname == "yield_per_meth":
eq_(real_result.cursor_strategy._bufsize, value)
else:
eq_(real_result.cursor_strategy._bufsize, min(value, 5))
eq_(len(real_result.cursor_strategy._rowbuffer), 1)
next(result)
next(result)
if optname == "yield_per" or optname == "yield_per_meth":
eq_(len(real_result.cursor_strategy._rowbuffer), value - 1)
else:
# based on default growth of 5
eq_(len(real_result.cursor_strategy._rowbuffer), 4)
for i, row in enumerate(result):
if i == 186:
break
if optname == "yield_per" or optname == "yield_per_meth":
eq_(
len(real_result.cursor_strategy._rowbuffer),
value - (188 % value),
)
else:
# based on default growth of 5
eq_(
len(real_result.cursor_strategy._rowbuffer),
7 if value == 10 else 42,
)
if optname == "yield_per" or optname == "yield_per_meth":
# ensure partition is set up to same size
partition = next(result.partitions())
eq_(len(partition), value)
@testing.fixture
def autoclose_row_fixture(self, connection):
users = self.tables.users
connection.execute(
users.insert(),
[
{"user_id": 1, "name": "u1"},
{"user_id": 2, "name": "u2"},
{"user_id": 3, "name": "u3"},
{"user_id": 4, "name": "u4"},
{"user_id": 5, "name": "u5"},
],
)
@testing.fixture(params=["plain", "scalars", "mapping"])
def result_fixture(self, request, connection):
users = self.tables.users
result_type = request.param
if result_type == "plain":
result = connection.execute(select(users))
elif result_type == "scalars":
result = connection.scalars(select(users))
elif result_type == "mapping":
result = connection.execute(select(users)).mappings()
else:
assert False
return result
def test_results_can_close(self, autoclose_row_fixture, result_fixture):
"""test #8710"""
r1 = result_fixture
is_false(r1.closed)
is_false(r1._soft_closed)
r1._soft_close()
is_false(r1.closed)
is_true(r1._soft_closed)
r1.close()
is_true(r1.closed)
is_true(r1._soft_closed)
def test_autoclose_rows_exhausted_plain(
self, connection, autoclose_row_fixture, result_fixture
):
result = result_fixture
assert not result._soft_closed
assert not result.closed
read_iterator = list(result)
eq_(len(read_iterator), 5)
assert result._soft_closed
assert not result.closed
result.close()
assert result.closed
def test_result_ctxmanager(
self, connection, autoclose_row_fixture, result_fixture
):
"""test #8710"""
result = result_fixture
with expect_raises_message(Exception, "hi"):
with result:
assert not result._soft_closed
assert not result.closed
for i, obj in enumerate(result):
if i > 2:
raise Exception("hi")
assert result._soft_closed
assert result.closed
| CursorResultTest |
python | spack__spack | lib/spack/spack/test/llnl/util/lock.py | {
"start": 9319,
"end": 9815
} | class ____:
def __init__(self, lock_path, start=0, length=0):
self.lock_path = lock_path
self.start = start
self.length = length
@property
def __name__(self):
return self.__class__.__name__
def __call__(self, barrier):
lock = lk.Lock(self.lock_path, start=self.start, length=self.length)
lock.acquire_read() # grab shared lock
barrier.wait()
barrier.wait() # hold the lock until timeout in other procs.
| AcquireRead |
python | getsentry__sentry | src/sentry/api/permissions.py | {
"start": 1452,
"end": 1581
} | class ____(BasePermission):
def has_permission(self, request: Request, view: object) -> bool:
return False
| NoPermission |
python | allegroai__clearml | clearml/backend_api/services/v2_13/tasks.py | {
"start": 377446,
"end": 379294
} | class ____(Response):
"""
Response of tasks.stop endpoint.
:param updated: Number of tasks updated (0 or 1)
:type updated: int
:param fields: Updated fields names and values
:type fields: dict
"""
_service = "tasks"
_action = "stop"
_version = "2.13"
_schema = {
"definitions": {},
"properties": {
"fields": {
"additionalProperties": True,
"description": "Updated fields names and values",
"type": ["object", "null"],
},
"updated": {
"description": "Number of tasks updated (0 or 1)",
"enum": [0, 1],
"type": ["integer", "null"],
},
},
"type": "object",
}
def __init__(self, updated: Optional[int] = None, fields: Optional[dict] = None, **kwargs: Any) -> None:
super(StopResponse, self).__init__(**kwargs)
self.updated = updated
self.fields = fields
@schema_property("updated")
def updated(self) -> Optional[int]:
return self._property_updated
@updated.setter
def updated(self, value: Optional[int]) -> None:
if value is None:
self._property_updated = None
return
if isinstance(value, float) and value.is_integer():
value = int(value)
self.assert_isinstance(value, "updated", six.integer_types)
self._property_updated = value
@schema_property("fields")
def fields(self) -> Optional[dict]:
return self._property_fields
@fields.setter
def fields(self, value: Optional[dict]) -> None:
if value is None:
self._property_fields = None
return
self.assert_isinstance(value, "fields", (dict,))
self._property_fields = value
| StopResponse |
python | kubernetes-client__python | kubernetes/client/models/v1_pod_resource_claim.py | {
"start": 383,
"end": 7425
} | class ____(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'name': 'str',
'resource_claim_name': 'str',
'resource_claim_template_name': 'str'
}
attribute_map = {
'name': 'name',
'resource_claim_name': 'resourceClaimName',
'resource_claim_template_name': 'resourceClaimTemplateName'
}
def __init__(self, name=None, resource_claim_name=None, resource_claim_template_name=None, local_vars_configuration=None): # noqa: E501
"""V1PodResourceClaim - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._name = None
self._resource_claim_name = None
self._resource_claim_template_name = None
self.discriminator = None
self.name = name
if resource_claim_name is not None:
self.resource_claim_name = resource_claim_name
if resource_claim_template_name is not None:
self.resource_claim_template_name = resource_claim_template_name
@property
def name(self):
"""Gets the name of this V1PodResourceClaim. # noqa: E501
Name uniquely identifies this resource claim inside the pod. This must be a DNS_LABEL. # noqa: E501
:return: The name of this V1PodResourceClaim. # noqa: E501
:rtype: str
"""
return self._name
@name.setter
def name(self, name):
"""Sets the name of this V1PodResourceClaim.
Name uniquely identifies this resource claim inside the pod. This must be a DNS_LABEL. # noqa: E501
:param name: The name of this V1PodResourceClaim. # noqa: E501
:type: str
"""
if self.local_vars_configuration.client_side_validation and name is None: # noqa: E501
raise ValueError("Invalid value for `name`, must not be `None`") # noqa: E501
self._name = name
@property
def resource_claim_name(self):
"""Gets the resource_claim_name of this V1PodResourceClaim. # noqa: E501
ResourceClaimName is the name of a ResourceClaim object in the same namespace as this pod. Exactly one of ResourceClaimName and ResourceClaimTemplateName must be set. # noqa: E501
:return: The resource_claim_name of this V1PodResourceClaim. # noqa: E501
:rtype: str
"""
return self._resource_claim_name
@resource_claim_name.setter
def resource_claim_name(self, resource_claim_name):
"""Sets the resource_claim_name of this V1PodResourceClaim.
ResourceClaimName is the name of a ResourceClaim object in the same namespace as this pod. Exactly one of ResourceClaimName and ResourceClaimTemplateName must be set. # noqa: E501
:param resource_claim_name: The resource_claim_name of this V1PodResourceClaim. # noqa: E501
:type: str
"""
self._resource_claim_name = resource_claim_name
@property
def resource_claim_template_name(self):
"""Gets the resource_claim_template_name of this V1PodResourceClaim. # noqa: E501
ResourceClaimTemplateName is the name of a ResourceClaimTemplate object in the same namespace as this pod. The template will be used to create a new ResourceClaim, which will be bound to this pod. When this pod is deleted, the ResourceClaim will also be deleted. The pod name and resource name, along with a generated component, will be used to form a unique name for the ResourceClaim, which will be recorded in pod.status.resourceClaimStatuses. This field is immutable and no changes will be made to the corresponding ResourceClaim by the control plane after creating the ResourceClaim. Exactly one of ResourceClaimName and ResourceClaimTemplateName must be set. # noqa: E501
:return: The resource_claim_template_name of this V1PodResourceClaim. # noqa: E501
:rtype: str
"""
return self._resource_claim_template_name
@resource_claim_template_name.setter
def resource_claim_template_name(self, resource_claim_template_name):
"""Sets the resource_claim_template_name of this V1PodResourceClaim.
ResourceClaimTemplateName is the name of a ResourceClaimTemplate object in the same namespace as this pod. The template will be used to create a new ResourceClaim, which will be bound to this pod. When this pod is deleted, the ResourceClaim will also be deleted. The pod name and resource name, along with a generated component, will be used to form a unique name for the ResourceClaim, which will be recorded in pod.status.resourceClaimStatuses. This field is immutable and no changes will be made to the corresponding ResourceClaim by the control plane after creating the ResourceClaim. Exactly one of ResourceClaimName and ResourceClaimTemplateName must be set. # noqa: E501
:param resource_claim_template_name: The resource_claim_template_name of this V1PodResourceClaim. # noqa: E501
:type: str
"""
self._resource_claim_template_name = resource_claim_template_name
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, V1PodResourceClaim):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, V1PodResourceClaim):
return True
return self.to_dict() != other.to_dict()
| V1PodResourceClaim |
python | great-expectations__great_expectations | contrib/great_expectations_semantic_types_expectations/great_expectations_semantic_types_expectations/expectations/expect_column_values_to_be_ascii.py | {
"start": 2356,
"end": 11525
} | class ____(ColumnMapExpectation):
"""Expect the set of column values to be ASCII characters.
expect_column_values_to_be_ascii is a \
[Column Map Expectation](https://docs.greatexpectations.io/docs/guides/expectations/creating_custom_expectations/how_to_create_custom_column_map_expectations).
Args:
column (str): \
The provided column name
Other Parameters:
result_format (str or None): \
Which output mode to use: BOOLEAN_ONLY, BASIC, COMPLETE, or SUMMARY. \
For more detail, see [result_format](https://docs.greatexpectations.io/docs/reference/expectations/result_format).
catch_exceptions (boolean or None): \
If True, then catch exceptions and include them as part of the result object. \
For more detail, see [catch_exceptions](https://docs.greatexpectations.io/docs/reference/expectations/standard_arguments/#catch_exceptions).
meta (dict or None): \
A JSON-serializable dictionary (nesting allowed) that will be included in the output without \
modification. For more detail, see [meta](https://docs.greatexpectations.io/docs/reference/expectations/standard_arguments/#meta).
Returns:
An [ExpectationSuiteValidationResult](https://docs.greatexpectations.io/docs/terms/validation_result)
Exact fields vary depending on the values passed to result_format, catch_exceptions, and meta.
"""
# These examples will be shown in the public gallery, and also executed as unit tests for your Expectation
examples = [
{
"data": {
"mostly_ascii": [1, 12.34, "a;lskdjfji", "””", "#$%^&*(", None, None],
"mostly_numbers": [42, 404, 858, 8675309, 62, 48, 17],
"mostly_characters": [
"Lantsberger",
"Gil Pasternak",
"Vincent",
"J@sse",
"R!ck R00l",
"A_",
"B+",
],
"not_ascii": [
"ဟယ်လို",
" שלום",
"नमस्ते",
"رحبا",
"ନମସ୍କାର",
"สวัสดี",
"ಹಲೋ ",
],
},
"schemas": {
"spark": {
"mostly_ascii": "StringType",
"mostly_numbers": "StringType",
"mostly_characters": "StringType",
"not_ascii": "StringType",
}
},
"tests": [
{
"title": "positive_test_with_mostly_ascii",
"exact_match_out": False,
"include_in_gallery": True,
"in": {"column": "mostly_ascii", "mostly": 0.6},
"out": {
"success": True,
"unexpected_index_list": [3],
"unexpected_list": ["””"],
},
},
{
"title": "positive_test_with_mostly_numbers",
"exact_match_out": False,
"include_in_gallery": True,
"in": {"column": "mostly_numbers", "mostly": 1.0},
"out": {
"success": True,
"unexpected_index_list": [],
"unexpected_list": [],
},
},
{
"title": "positive_test_with_mostly_ascii",
"exact_match_out": False,
"include_in_gallery": True,
"in": {"column": "mostly_characters", "mostly": 1.0},
"out": {
"success": True,
"unexpected_index_list": [],
"unexpected_list": [],
},
},
{
"title": "negative_test_with_non_ascii",
"exact_match_out": False,
"include_in_gallery": True,
"in": {"column": "not_ascii", "mostly": 1.0},
"out": {
"success": False,
"unexpected_index_list": [0, 1, 2, 3, 4, 5, 6],
},
},
],
}
]
# This dictionary contains metadata for display in the public gallery
library_metadata = {
"maturity": "experimental", # "experimental", "beta", or "production"
"tags": [ # Tags for this Expectation in the gallery
"experimental",
"hackathon-20200123",
],
"contributors": [ # Github handles for all contributors to this Expectation.
"@jsteinberg4",
"@vraimondi04",
"@talagluck",
"@lodeous",
"@rexboyce",
"@bragleg",
],
}
# This is the id string of the Metric used by this Expectation.
# For most Expectations, it will be the same as the `condition_metric_name` defined in your Metric class above.
map_metric = "column_values.are_ascii"
# This is a list of parameter names that can affect whether the Expectation evaluates to True or False
# Please see {some doc} for more information about domain and success keys, and other arguments to Expectations
success_keys = ("mostly",)
# This dictionary contains default values for any parameters that should have default values
default_kwarg_values = {}
# This method defines a question Renderer
# For more info on Renderers, see {some doc}
#!!! This example renderer should render RenderedStringTemplateContent, not just a string
# @classmethod
# @renderer(renderer_type="renderer.question")
# def _question_renderer(
# cls, configuration, result=None, runtime_configuration=None
# ):
# column = configuration.kwargs.get("column")
# mostly = configuration.kwargs.get("mostly")
# return f'Do at least {mostly * 100}% of values in column "{column}" equal 3?'
# This method defines an answer Renderer
#!!! This example renderer should render RenderedStringTemplateContent, not just a string
# @classmethod
# @renderer(renderer_type="renderer.answer")
# def _answer_renderer(
# cls, configuration=None, result=None, runtime_configuration=None
# ):
# column = result.expectation_config.kwargs.get("column")
# mostly = result.expectation_config.kwargs.get("mostly")
# regex = result.expectation_config.kwargs.get("regex")
# if result.success:
# return f'At least {mostly * 100}% of values in column "{column}" equal 3.'
# else:
# return f'Less than {mostly * 100}% of values in column "{column}" equal 3.'
# This method defines a prescriptive Renderer
# @classmethod
# @renderer(renderer_type="renderer.prescriptive")
# @render_suite_parameter_string
# def _prescriptive_renderer(
# cls,
# configuration=None,
# result=None,
# runtime_configuration=None,
# **kwargs,
# ):
#!!! This example renderer should be shorter
# runtime_configuration = runtime_configuration or {}
# include_column_name = False if runtime_configuration.get("include_column_name") is False else True
# styling = runtime_configuration.get("styling")
# params = substitute_none_for_missing(
# configuration.kwargs,
# ["column", "regex", "mostly", "row_condition", "condition_parser"],
# )
# template_str = "values must be equal to 3"
# if params["mostly"] is not None:
# params["mostly_pct"] = num_to_str(
# params["mostly"] * 100, precision=15, no_scientific=True
# )
# # params["mostly_pct"] = "{:.14f}".format(params["mostly"]*100).rstrip("0").rstrip(".")
# template_str += ", at least $mostly_pct % of the time."
# else:
# template_str += "."
# if include_column_name:
# template_str = "$column " + template_str
# if params["row_condition"] is not None:
# (
# conditional_template_str,
# conditional_params,
# ) = parse_row_condition_string_pandas_engine(params["row_condition"])
# template_str = conditional_template_str + ", then " + template_str
# params.update(conditional_params)
# return [
# RenderedStringTemplateContent(
# **{
# "content_block_type": "string_template",
# "string_template": {
# "template": template_str,
# "params": params,
# "styling": styling,
# },
# }
# )
# ]
if __name__ == "__main__":
ExpectColumnValuesToBeAscii().print_diagnostic_checklist()
| ExpectColumnValuesToBeAscii |
python | django__django | tests/forms_tests/tests/test_input_formats.py | {
"start": 12630,
"end": 17152
} | class ____(SimpleTestCase):
@classmethod
def setUpClass(cls):
cls.enterClassContext(translation.override("de"))
super().setUpClass()
def test_dateField(self):
"DateFields can parse dates in the default format"
f = forms.DateField()
# Parse a date in an unaccepted format; get an error
with self.assertRaises(ValidationError):
f.clean("21/12/2010")
# ISO formats are accepted, even if not specified in formats.py
self.assertEqual(f.clean("2010-12-21"), date(2010, 12, 21))
# Parse a date in a valid format, get a parsed result
result = f.clean("21.12.2010")
self.assertEqual(result, date(2010, 12, 21))
# The parsed result does a round trip
text = f.widget.format_value(result)
self.assertEqual(text, "21.12.2010")
# Parse a date in a valid, but non-default format, get a parsed result
result = f.clean("21.12.10")
self.assertEqual(result, date(2010, 12, 21))
# The parsed result does a round trip to default format
text = f.widget.format_value(result)
self.assertEqual(text, "21.12.2010")
def test_localized_dateField(self):
"Localized DateFields act as unlocalized widgets"
f = forms.DateField(localize=True)
# Parse a date in an unaccepted format; get an error
with self.assertRaises(ValidationError):
f.clean("21/12/2010")
# Parse a date in a valid format, get a parsed result
result = f.clean("21.12.2010")
self.assertEqual(result, date(2010, 12, 21))
# The parsed result does a round trip to the same format
text = f.widget.format_value(result)
self.assertEqual(text, "21.12.2010")
# Parse a date in a valid format, get a parsed result
result = f.clean("21.12.10")
self.assertEqual(result, date(2010, 12, 21))
# The parsed result does a round trip to default format
text = f.widget.format_value(result)
self.assertEqual(text, "21.12.2010")
def test_dateField_with_inputformat(self):
"""
DateFields with manually specified input formats can accept those
formats
"""
f = forms.DateField(input_formats=["%m.%d.%Y", "%m-%d-%Y"])
# Parse a date in an unaccepted format; get an error
with self.assertRaises(ValidationError):
f.clean("2010-12-21")
with self.assertRaises(ValidationError):
f.clean("21/12/2010")
with self.assertRaises(ValidationError):
f.clean("21.12.2010")
# Parse a date in a valid format, get a parsed result
result = f.clean("12.21.2010")
self.assertEqual(result, date(2010, 12, 21))
# The parsed result does a round trip to the same format
text = f.widget.format_value(result)
self.assertEqual(text, "21.12.2010")
# Parse a date in a valid format, get a parsed result
result = f.clean("12-21-2010")
self.assertEqual(result, date(2010, 12, 21))
# The parsed result does a round trip to default format
text = f.widget.format_value(result)
self.assertEqual(text, "21.12.2010")
def test_localized_dateField_with_inputformat(self):
"""
Localized DateFields with manually specified input formats can accept
those formats.
"""
f = forms.DateField(input_formats=["%m.%d.%Y", "%m-%d-%Y"], localize=True)
# Parse a date in an unaccepted format; get an error
with self.assertRaises(ValidationError):
f.clean("2010-12-21")
with self.assertRaises(ValidationError):
f.clean("21/12/2010")
with self.assertRaises(ValidationError):
f.clean("21.12.2010")
# Parse a date in a valid format, get a parsed result
result = f.clean("12.21.2010")
self.assertEqual(result, date(2010, 12, 21))
# The parsed result does a round trip to the same format
text = f.widget.format_value(result)
self.assertEqual(text, "21.12.2010")
# Parse a date in a valid format, get a parsed result
result = f.clean("12-21-2010")
self.assertEqual(result, date(2010, 12, 21))
# The parsed result does a round trip to default format
text = f.widget.format_value(result)
self.assertEqual(text, "21.12.2010")
@override_settings(DATE_INPUT_FORMATS=["%d.%m.%Y", "%d-%m-%Y"])
| LocalizedDateTests |
python | openai__openai-python | src/openai/types/evals/run_cancel_response.py | {
"start": 7166,
"end": 7635
} | class ____(BaseModel):
item_reference: str
"""A reference to a variable in the `item` namespace. Ie, "item.name" """
type: Literal["item_reference"]
"""The type of input messages. Always `item_reference`."""
DataSourceResponsesInputMessages: TypeAlias = Annotated[
Union[DataSourceResponsesInputMessagesTemplate, DataSourceResponsesInputMessagesItemReference],
PropertyInfo(discriminator="type"),
]
| DataSourceResponsesInputMessagesItemReference |
python | django__django | tests/admin_docs/test_views.py | {
"start": 21124,
"end": 23762
} | class ____(TestDataMixin, AdminDocsTestCase):
def test_model_index_superuser(self):
self.client.force_login(self.superuser)
index_url = reverse("django-admindocs-models-index")
response = self.client.get(index_url)
self.assertContains(
response,
'<a href="/admindocs/models/admin_docs.family/">Family</a>',
html=True,
)
self.assertContains(
response,
'<a href="/admindocs/models/admin_docs.person/">Person</a>',
html=True,
)
self.assertContains(
response,
'<a href="/admindocs/models/admin_docs.company/">Company</a>',
html=True,
)
def test_model_index_with_model_permission(self):
staff_user = User.objects.create_user(
username="staff", password="secret", is_staff=True
)
self.client.force_login(staff_user)
index_url = reverse("django-admindocs-models-index")
response = self.client.get(index_url)
# Models are not listed without permissions.
self.assertNotContains(
response,
'<a href="/admindocs/models/admin_docs.family/">Family</a>',
html=True,
)
self.assertNotContains(
response,
'<a href="/admindocs/models/admin_docs.person/">Person</a>',
html=True,
)
self.assertNotContains(
response,
'<a href="/admindocs/models/admin_docs.company/">Company</a>',
html=True,
)
company_content_type = ContentType.objects.get_for_model(Company)
person_content_type = ContentType.objects.get_for_model(Person)
view_company = Permission.objects.get(
codename="view_company", content_type=company_content_type
)
change_person = Permission.objects.get(
codename="change_person", content_type=person_content_type
)
staff_user.user_permissions.add(view_company, change_person)
response = self.client.get(index_url)
# View or change permission grants access.
self.assertNotContains(
response,
'<a href="/admindocs/models/admin_docs.family/">Family</a>',
html=True,
)
self.assertContains(
response,
'<a href="/admindocs/models/admin_docs.person/">Person</a>',
html=True,
)
self.assertContains(
response,
'<a href="/admindocs/models/admin_docs.company/">Company</a>',
html=True,
)
| TestModelIndexView |
python | huggingface__transformers | src/transformers/models/csm/modeling_csm.py | {
"start": 13390,
"end": 16534
} | class ____(nn.Module):
"""Multi-headed attention from 'Attention Is All You Need' paper"""
def __init__(self, config: CsmConfig, layer_idx: int):
super().__init__()
self.config = config
self.layer_idx = layer_idx
self.head_dim = getattr(config, "head_dim", config.hidden_size // config.num_attention_heads)
self.num_key_value_groups = config.num_attention_heads // config.num_key_value_heads
self.scaling = self.head_dim**-0.5
self.attention_dropout = config.attention_dropout
self.is_causal = True
self.q_proj = nn.Linear(
config.hidden_size, config.num_attention_heads * self.head_dim, bias=config.attention_bias
)
self.k_proj = nn.Linear(
config.hidden_size, config.num_key_value_heads * self.head_dim, bias=config.attention_bias
)
self.v_proj = nn.Linear(
config.hidden_size, config.num_key_value_heads * self.head_dim, bias=config.attention_bias
)
self.o_proj = nn.Linear(
config.num_attention_heads * self.head_dim, config.hidden_size, bias=config.attention_bias
)
self.rotary_fn = apply_rotary_pos_emb
def forward(
self,
hidden_states: torch.Tensor,
position_embeddings: Optional[tuple[torch.Tensor, torch.Tensor]] = None,
attention_mask: Optional[torch.Tensor] = None,
past_key_values: Optional[Cache] = None,
cache_position: Optional[torch.LongTensor] = None,
**kwargs: Unpack[TransformersKwargs],
) -> tuple[torch.Tensor, torch.Tensor]:
input_shape = hidden_states.shape[:-1]
hidden_shape = (*input_shape, -1, self.head_dim)
query_states = self.q_proj(hidden_states).view(hidden_shape).transpose(1, 2)
key_states = self.k_proj(hidden_states).view(hidden_shape).transpose(1, 2)
value_states = self.v_proj(hidden_states).view(hidden_shape).transpose(1, 2)
cos, sin = position_embeddings
query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin)
if past_key_values is not None:
# sin and cos are specific to RoPE models; cache_position needed for the static cache
cache_kwargs = {"sin": sin, "cos": cos, "cache_position": cache_position}
key_states, value_states = past_key_values.update(key_states, value_states, self.layer_idx, cache_kwargs)
attention_interface: Callable = eager_attention_forward
if self.config._attn_implementation != "eager":
attention_interface = ALL_ATTENTION_FUNCTIONS[self.config._attn_implementation]
attn_output, attn_weights = attention_interface(
self,
query_states,
key_states,
value_states,
attention_mask,
dropout=0.0 if not self.training else self.attention_dropout,
scaling=self.scaling,
**kwargs,
)
attn_output = attn_output.reshape(*input_shape, -1).contiguous()
attn_output = self.o_proj(attn_output)
return attn_output, attn_weights
| CsmAttention |
python | django__django | django/contrib/gis/db/models/functions.py | {
"start": 7853,
"end": 8164
} | class ____(GeoFunc):
output_field = TextField()
def __init__(self, expression, precision=8, **extra):
expressions = [expression]
if precision is not None:
expressions.append(self._handle_param(precision, "precision", int))
super().__init__(*expressions, **extra)
| AsKML |
python | google__flatbuffers | tests/MyGame/Example/StructOfStructsOfStructs.py | {
"start": 176,
"end": 1114
} | class ____(object):
__slots__ = ['_tab']
@classmethod
def SizeOf(cls):
return 20
# StructOfStructsOfStructs
def Init(self, buf, pos):
self._tab = flatbuffers.table.Table(buf, pos)
# StructOfStructsOfStructs
def A(self, obj):
obj.Init(self._tab.Bytes, self._tab.Pos + 0)
return obj
def CreateStructOfStructsOfStructs(builder, a_a_id, a_a_distance, a_b_a, a_b_b, a_c_id, a_c_distance):
builder.Prep(4, 20)
builder.Prep(4, 20)
builder.Prep(4, 8)
builder.PrependUint32(a_c_distance)
builder.PrependUint32(a_c_id)
builder.Prep(2, 4)
builder.Pad(1)
builder.PrependInt8(a_b_b)
builder.PrependInt16(a_b_a)
builder.Prep(4, 8)
builder.PrependUint32(a_a_distance)
builder.PrependUint32(a_a_id)
return builder.Offset()
import MyGame.Example.StructOfStructs
try:
from typing import Optional
except:
pass
| StructOfStructsOfStructs |
python | spack__spack | lib/spack/spack/buildcache_migrate.py | {
"start": 1669,
"end": 13075
} | class ____(spack.error.SpackError):
"""
Raised when migration fails irrevocably
"""
def __init__(self, msg):
super().__init__(msg)
def _migrate_spec(
s: spack.spec.Spec, mirror_url: str, tmpdir: str, unsigned: bool = False, signing_key: str = ""
) -> MigrateSpecResult:
"""Parallelizable function to migrate a single spec"""
print_spec = f"{s.name}/{s.dag_hash()[:7]}"
# Check if the spec file exists in the new location and exit early if so
v3_cache_class = get_url_buildcache_class(layout_version=3)
v3_cache_entry = v3_cache_class(mirror_url, s, allow_unsigned=unsigned)
exists = v3_cache_entry.exists([BuildcacheComponent.SPEC, BuildcacheComponent.TARBALL])
v3_cache_entry.destroy()
if exists:
msg = f"No need to migrate {print_spec}"
return MigrateSpecResult(True, msg)
# Try to fetch the spec metadata
v2_metadata_urls = [
url_util.join(mirror_url, "build_cache", v2_tarball_name(s, ".spec.json.sig"))
]
if unsigned:
v2_metadata_urls.append(
url_util.join(mirror_url, "build_cache", v2_tarball_name(s, ".spec.json"))
)
spec_contents = None
for meta_url in v2_metadata_urls:
try:
_, _, meta_file = web_util.read_from_url(meta_url)
spec_contents = codecs.getreader("utf-8")(meta_file).read()
v2_spec_url = meta_url
break
except (web_util.SpackWebError, OSError):
pass
else:
msg = f"Unable to read metadata for {print_spec}"
return MigrateSpecResult(False, msg)
spec_dict = {}
if unsigned:
# User asked for unsigned, if we found a signed specfile, just ignore
# the signature
if v2_spec_url.endswith(".sig"):
spec_dict = spack.spec.Spec.extract_json_from_clearsig(spec_contents)
else:
spec_dict = json.loads(spec_contents)
else:
# User asked for signed, we must successfully verify the signature
local_signed_pre_verify = os.path.join(
tmpdir, f"{s.name}_{s.dag_hash()}_verify.spec.json.sig"
)
with open(local_signed_pre_verify, "w", encoding="utf-8") as fd:
fd.write(spec_contents)
if not try_verify(local_signed_pre_verify):
return MigrateSpecResult(False, f"Failed to verify signature of {print_spec}")
with open(local_signed_pre_verify, encoding="utf-8") as fd:
spec_dict = spack.spec.Spec.extract_json_from_clearsig(fd.read())
# Read out and remove the bits needed to rename and position the archive
bcc = spec_dict.pop("binary_cache_checksum", None)
if not bcc:
msg = "Cannot migrate a spec that does not have 'binary_cache_checksum'"
return MigrateSpecResult(False, msg)
algorithm = bcc["hash_algorithm"]
checksum = bcc["hash"]
# TODO: Remove this key once oci buildcache no longer uses it
spec_dict["buildcache_layout_version"] = 2
v2_archive_url = url_util.join(mirror_url, "build_cache", v2_tarball_path_name(s, ".spack"))
# spacks web utilities do not include direct copying of s3 objects, so we
# need to download the archive locally, and then push it back to the target
# location
archive_stage_path = os.path.join(tmpdir, f"archive_stage_{s.name}_{s.dag_hash()}")
archive_stage = spack.stage.Stage(v2_archive_url, path=archive_stage_path)
try:
archive_stage.create()
archive_stage.fetch()
except spack.error.FetchError:
return MigrateSpecResult(False, f"Unable to fetch archive for {print_spec}")
local_tarfile_path = archive_stage.save_filename
# As long as we have to download the tarball anyway, we might as well compute the
# checksum locally and check it against the expected value
local_checksum = spack.util.crypto.checksum(
spack.util.crypto.hash_fun_for_algo(algorithm), local_tarfile_path
)
if local_checksum != checksum:
return MigrateSpecResult(
False, f"Checksum mismatch for {print_spec}: expected {checksum}, got {local_checksum}"
)
spec_dict["archive_size"] = os.stat(local_tarfile_path).st_size
# Compress the spec dict and compute its checksum
metadata_checksum_algo = "sha256"
spec_json_path = os.path.join(tmpdir, f"{s.name}_{s.dag_hash()}.spec.json")
metadata_checksum, metadata_size = compressed_json_from_dict(
spec_json_path, spec_dict, metadata_checksum_algo
)
tarball_blob_record = BlobRecord(
spec_dict["archive_size"], v3_cache_class.TARBALL_MEDIATYPE, "gzip", algorithm, checksum
)
metadata_blob_record = BlobRecord(
metadata_size,
v3_cache_class.SPEC_MEDIATYPE,
"gzip",
metadata_checksum_algo,
metadata_checksum,
)
# Compute the urls to the new blobs
v3_archive_url = v3_cache_class.get_blob_url(mirror_url, tarball_blob_record)
v3_spec_url = v3_cache_class.get_blob_url(mirror_url, metadata_blob_record)
# First push the tarball
tty.debug(f"Pushing {local_tarfile_path} to {v3_archive_url}")
try:
web_util.push_to_url(local_tarfile_path, v3_archive_url, keep_original=True)
except Exception:
return MigrateSpecResult(False, f"Failed to push archive for {print_spec}")
# Then push the spec file
tty.debug(f"Pushing {spec_json_path} to {v3_spec_url}")
try:
web_util.push_to_url(spec_json_path, v3_spec_url, keep_original=True)
except Exception:
return MigrateSpecResult(False, f"Failed to push spec metadata for {print_spec}")
# Generate the manifest and write it to a temporary location
manifest = {
"version": v3_cache_class.get_layout_version(),
"data": [tarball_blob_record.to_dict(), metadata_blob_record.to_dict()],
}
manifest_path = os.path.join(tmpdir, f"{s.dag_hash()}.manifest.json")
with open(manifest_path, "w", encoding="utf-8") as f:
json.dump(manifest, f, indent=0, separators=(",", ":"))
# Note: when using gpg clear sign, we need to avoid long lines (19995
# chars). If lines are longer, they are truncated without error. So,
# here we still add newlines, but no indent, so save on file size and
# line length.
# Possibly sign the manifest
if not unsigned:
manifest_path = sign_file(signing_key, manifest_path)
v3_manifest_url = v3_cache_class.get_manifest_url(s, mirror_url)
# Push the manifest
try:
web_util.push_to_url(manifest_path, v3_manifest_url, keep_original=True)
except Exception:
return MigrateSpecResult(False, f"Failed to push manifest for {print_spec}")
return MigrateSpecResult(True, f"Successfully migrated {print_spec}")
def migrate(
mirror: spack.mirrors.mirror.Mirror, unsigned: bool = False, delete_existing: bool = False
) -> None:
"""Perform migration of the given mirror
If unsigned is True, signatures on signed specs will be ignored, and specs
will not be re-signed before pushing to the new location. Otherwise, spack
will attempt to verify signatures and re-sign specs, and will fail if not
able to do so. If delete_existing is True, spack will delete the original
contents of the mirror once the migration is complete."""
signing_key = ""
if not unsigned:
try:
signing_key = spack.binary_distribution.select_signing_key()
except (
spack.binary_distribution.NoKeyException,
spack.binary_distribution.PickKeyException,
):
raise MigrationException(
"Signed migration requires exactly one secret key in keychain"
)
delete_action = "deleting" if delete_existing else "keeping"
sign_action = "an unsigned" if unsigned else "a signed"
mirror_url = mirror.fetch_url
tty.msg(
f"Performing {sign_action} migration of {mirror.push_url} "
f"and {delete_action} existing contents"
)
index_url = url_util.join(mirror_url, "build_cache", spack_db.INDEX_JSON_FILE)
contents = None
try:
_, _, index_file = web_util.read_from_url(index_url)
contents = codecs.getreader("utf-8")(index_file).read()
except (web_util.SpackWebError, OSError):
raise MigrationException("Buildcache migration requires a buildcache index")
with tempfile.TemporaryDirectory(dir=spack.stage.get_stage_root()) as tmpdir:
index_path = os.path.join(tmpdir, "_tmp_index.json")
with open(index_path, "w", encoding="utf-8") as fd:
fd.write(contents)
db = spack.binary_distribution.BuildCacheDatabase(tmpdir)
db._read_from_file(pathlib.Path(index_path))
specs_to_migrate = [
s
for s in db.query_local(installed=InstallRecordStatus.ANY)
# todo, make it easer to get install records associated with specs
if not s.external and db._data[s.dag_hash()].in_buildcache
]
# Run the tasks in parallel if possible
executor = spack.util.parallel.make_concurrent_executor()
migrate_futures = [
executor.submit(_migrate_spec, spec, mirror_url, tmpdir, unsigned, signing_key)
for spec in specs_to_migrate
]
success_count = 0
tty.msg("Migration summary:")
for spec, migrate_future in zip(specs_to_migrate, migrate_futures):
result = migrate_future.result()
msg = f" {spec.name}/{spec.dag_hash()[:7]}: {result.message}"
if result.success:
success_count += 1
tty.msg(msg)
else:
tty.error(msg)
# The migrated index should have the same specs as the original index,
# modulo any specs that we failed to migrate for whatever reason. So
# to avoid having to re-fetch all the spec files now, just mark them
# appropriately in the existing database and push that.
db.mark(spec, "in_buildcache", result.success)
if success_count > 0:
tty.msg("Updating index and pushing keys")
# If the layout.json doesn't yet exist on this mirror, push it
v3_cache_class = get_url_buildcache_class(layout_version=3)
v3_cache_class.maybe_push_layout_json(mirror_url)
# Push the migrated mirror index
index_tmpdir = os.path.join(tmpdir, "rebuild_index")
os.mkdir(index_tmpdir)
spack.binary_distribution._push_index(db, index_tmpdir, mirror_url)
# Push the public part of the signing key
if not unsigned:
keys_tmpdir = os.path.join(tmpdir, "keys")
os.mkdir(keys_tmpdir)
spack.binary_distribution._url_push_keys(
mirror_url, keys=[signing_key], update_index=True, tmpdir=keys_tmpdir
)
else:
tty.warn("No specs migrated, did you mean to perform an unsigned migration instead?")
# Delete the old layout if the user requested it
if delete_existing:
delete_prefix = url_util.join(mirror_url, "build_cache")
tty.msg(f"Recursively deleting {delete_prefix}")
web_util.remove_url(delete_prefix, recursive=True)
tty.msg("Migration complete")
| MigrationException |
python | huggingface__transformers | src/transformers/models/blip_2/modeling_blip_2.py | {
"start": 57369,
"end": 61193
} | class ____(Blip2PreTrainedModel):
main_input_name = "pixel_values"
input_modalities = ("image",)
_keep_in_fp32_modules = ["query_tokens", "qformer"]
_supports_flash_attn = False # because self.qformer does not support FA2
def __init__(self, config: Blip2Config):
super().__init__(config)
self.vision_model = Blip2VisionModel._from_config(config.vision_config)
self.query_tokens = nn.Parameter(torch.zeros(1, config.num_query_tokens, config.qformer_config.hidden_size))
self.qformer = Blip2QFormerModel._from_config(config.qformer_config)
# vision projection layer
self.vision_projection = nn.Linear(config.qformer_config.hidden_size, config.image_text_hidden_size)
# Initialize weights and apply final processing
self.post_init()
def get_input_embeddings(self) -> nn.Module:
return self.vision_model.embeddings.patch_embedding
@can_return_tuple
@auto_docstring
def forward(
self,
pixel_values: Optional[torch.FloatTensor] = None,
**kwargs: Unpack[TransformersKwargs],
) -> Union[tuple, Blip2VisionModelOutput]:
r"""
Examples:
```python
>>> import torch
>>> from transformers import AutoProcessor, Blip2VisionModelWithProjection
>>> from transformers.image_utils import load_image
>>> device = "cuda" if torch.cuda.is_available() else "cpu"
>>> processor = AutoProcessor.from_pretrained("Salesforce/blip2-itm-vit-g")
>>> model = Blip2VisionModelWithProjection.from_pretrained(
... "Salesforce/blip2-itm-vit-g", dtype=torch.float16
... )
>>> model.to(device) # doctest: +IGNORE_RESULT
>>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
>>> image = load_image(url)
>>> inputs = processor(images=image, return_tensors="pt").to(device, torch.float16)
>>> with torch.inference_mode():
... outputs = model(**inputs)
>>> image_embeds = outputs.image_embeds
>>> print(image_embeds.shape)
torch.Size([1, 32, 256])
```"""
vision_outputs = self.vision_model(
pixel_values=pixel_values,
**kwargs,
)
pooled_output = vision_outputs[0]
image_attention_mask = torch.ones(pooled_output.size()[:-1], dtype=torch.long, device=pooled_output.device)
query_tokens = self.query_tokens.expand(pooled_output.shape[0], -1, -1)
query_outputs = self.qformer(
query_embeds=query_tokens,
encoder_hidden_states=pooled_output,
encoder_attention_mask=image_attention_mask,
**kwargs,
)
embeds = query_outputs[0]
embeds = embeds.to(dtype=self.vision_projection.weight.dtype)
image_embeds = self.vision_projection(embeds)
image_embeds = nn.functional.normalize(image_embeds, dim=-1)
return Blip2VisionModelOutput(
image_embeds=image_embeds,
last_hidden_state=vision_outputs.last_hidden_state,
hidden_states=vision_outputs.hidden_states,
attentions=vision_outputs.attentions,
)
@auto_docstring(
custom_intro="""
BLIP-2 Model for generating text given an image and an optional text prompt. The model consists of a vision
encoder, Querying Transformer (Q-Former) and a language model.
One can optionally pass `input_ids` to the model, which serve as a text prompt, to make the language model continue
the prompt. Otherwise, the language model starts generating text from the [BOS] (beginning-of-sequence) token.
<Tip>
Note that Flan-T5 checkpoints cannot be cast to float16. They are pre-trained using bfloat16.
</Tip>
"""
)
| Blip2VisionModelWithProjection |
python | PyCQA__pylint | tests/functional/i/invalid/invalid_name/invalid_name_enum.py | {
"start": 826,
"end": 1022
} | class ____(str, Enum):
""" A false positive for ``invalid-name``
which should be fixed by https://github.com/pylint-dev/astroid/issues/2317
"""
ASD: str = 1 # [invalid-name]
| Something |
python | pytorch__pytorch | torch/_vendor/packaging/version.py | {
"start": 974,
"end": 1479
} | class ____(NamedTuple):
epoch: int
release: Tuple[int, ...]
dev: Optional[Tuple[str, int]]
pre: Optional[Tuple[str, int]]
post: Optional[Tuple[str, int]]
local: Optional[LocalType]
def parse(version: str) -> "Version":
"""Parse the given version string.
>>> parse('1.0.dev1')
<Version('1.0.dev1')>
:param version: The version string to parse.
:raises InvalidVersion: When the version string is not a valid version.
"""
return Version(version)
| _Version |
python | PrefectHQ__prefect | tests/test_settings.py | {
"start": 39749,
"end": 48191
} | class ____:
def test_get_value_root_setting(self):
with temporary_settings(
updates={PREFECT_API_URL: "test"}
): # Set a value so its not null
assert PREFECT_API_URL.value() == "test"
assert get_current_settings().api.url == "test"
def test_get_value_nested_setting(self):
value = prefect.settings.PREFECT_LOGGING_LEVEL.value()
value_of = get_current_settings().logging.level
value_from = PREFECT_LOGGING_LEVEL.value_from(get_current_settings())
assert value == value_of == value_from
def test_test_mode_access(self):
assert PREFECT_TEST_MODE.value() is True
def test_settings_in_truthy_statements_use_value(self):
if PREFECT_TEST_MODE:
assert True, "Treated as truth"
else:
assert False, "Not treated as truth"
with temporary_settings(updates={PREFECT_TEST_MODE: False}):
if not PREFECT_TEST_MODE:
assert True, "Treated as truth"
else:
assert False, "Not treated as truth"
# Test with a non-boolean setting
if PREFECT_SERVER_API_HOST:
assert True, "Treated as truth"
else:
assert False, "Not treated as truth"
with temporary_settings(updates={PREFECT_SERVER_API_HOST: ""}):
if not PREFECT_SERVER_API_HOST:
assert True, "Treated as truth"
else:
assert False, "Not treated as truth"
@pytest.mark.parametrize(
"value,expected",
[
(None, []),
("foo", ["foo"]),
("foo,bar", ["foo", "bar"]),
("foo, bar, foobar ", ["foo", "bar", "foobar"]),
(["foo", "bar"], ["foo", "bar"]),
],
ids=[
"none",
"string",
"comma_separated",
"comma_separated_with_spaces",
"python_list",
],
)
def test_extra_loggers(self, value: str | list[str], expected: list[str]):
settings = Settings(logging=LoggingSettings(extra_loggers=value))
assert set(PREFECT_LOGGING_EXTRA_LOGGERS.value_from(settings)) == set(expected)
def test_prefect_home_expands_tilde_in_path(self):
settings = Settings(home="~/test") # type: ignore
assert PREFECT_HOME.value_from(settings) == Path("~/test").expanduser()
@pytest.mark.parametrize(
"api_url,ui_url",
[
(None, None),
(
"https://api.prefect.cloud/api/accounts/ACCOUNT/workspaces/WORKSPACE",
"https://app.prefect.cloud/account/ACCOUNT/workspace/WORKSPACE",
),
("http://my-orion/api", "http://my-orion"),
("https://api.foo.bar", "https://api.foo.bar"),
],
)
def test_ui_url_inferred_from_api_url(self, api_url: str, ui_url: str):
with temporary_settings({PREFECT_API_URL: api_url}):
assert PREFECT_UI_URL.value() == ui_url
def test_ui_url_set_directly(self):
with temporary_settings({PREFECT_UI_URL: "test"}):
assert PREFECT_UI_URL.value() == "test"
@pytest.mark.parametrize(
"api_url,ui_url",
[
# We'll infer that app. and api. subdomains go together for prefect domains
(
"https://api.prefect.cloud/api",
"https://app.prefect.cloud",
),
(
"https://api.theoretical.prefect.bonkers/api",
"https://app.theoretical.prefect.bonkers",
),
(
"https://api.prefect.banooners/api",
"https://app.prefect.banooners",
),
# We'll leave URLs with non-prefect TLDs alone
(
"https://api.theoretical.prefect.customer.com/api",
"https://api.theoretical.prefect.customer.com",
),
# Some day, some day...
(
"https://api.prefect/api",
"https://api.prefect",
),
# We'll leave all other URLs alone
("http://prefect/api", "http://prefect"),
("http://my-cloud/api", "http://my-cloud"),
("https://api.foo.bar", "https://api.foo.bar"),
],
)
def test_cloud_ui_url_inferred_from_cloud_api_url(self, api_url: str, ui_url: str):
with temporary_settings({PREFECT_CLOUD_API_URL: api_url}):
assert PREFECT_CLOUD_UI_URL.value() == ui_url
def test_cloud_ui_url_set_directly(self):
with temporary_settings({PREFECT_CLOUD_UI_URL: "test"}):
assert PREFECT_CLOUD_UI_URL.value() == "test"
def test_ui_api_url_inferred_from_api_url(self):
with temporary_settings({PREFECT_API_URL: "http://my-domain/api"}):
assert PREFECT_UI_API_URL.value() == "http://my-domain/api"
def test_ui_api_url_set_directly(self):
with temporary_settings({PREFECT_UI_API_URL: "http://my-foo-domain/api"}):
assert PREFECT_UI_API_URL.value() == "http://my-foo-domain/api"
def test_ui_api_url_default(self):
default_api_url = PREFECT_API_URL.value()
assert PREFECT_UI_API_URL.value() == default_api_url
assert default_api_url.startswith("http://localhost")
assert default_api_url.endswith("/api")
@pytest.mark.parametrize(
"extra_codes,expected",
[
("", set()),
("400", {400}),
("400,400,400", {400}),
("400,500", {400, 500}),
("400, 401, 402", {400, 401, 402}),
],
)
def test_client_retry_extra_codes(self, extra_codes: str, expected: set[int]):
with temporary_settings({PREFECT_CLIENT_RETRY_EXTRA_CODES: extra_codes}):
assert PREFECT_CLIENT_RETRY_EXTRA_CODES.value() == expected
@pytest.mark.parametrize(
"extra_codes",
[
"foo",
"-1",
"0",
"10",
"400,foo",
"400,500,foo",
],
)
def test_client_retry_extra_codes_invalid(self, extra_codes: str):
with pytest.raises(ValueError):
with temporary_settings({PREFECT_CLIENT_RETRY_EXTRA_CODES: extra_codes}):
PREFECT_CLIENT_RETRY_EXTRA_CODES.value()
def test_default_task_retry_delay_seconds(self):
sample_values_and_expected = (
(None, None),
("", None),
("10", 10.0),
("10,20,30", [10.0, 20.0, 30.0]),
("10.0", 10.0),
(10, 10.0),
([10, 20, 30], [10.0, 20.0, 30.0]),
)
for retry_delay_plaintext_value, expected in sample_values_and_expected:
with temporary_settings(
{PREFECT_TASK_DEFAULT_RETRY_DELAY_SECONDS: retry_delay_plaintext_value}
):
assert PREFECT_TASK_DEFAULT_RETRY_DELAY_SECONDS.value() == expected
def test_deprecated_ENV_VAR_attribute_access(self):
settings = Settings()
value = None
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
value = settings.PREFECT_TEST_MODE
assert len(w) == 1
assert issubclass(w[-1].category, DeprecationWarning)
assert (
"Accessing `Settings().PREFECT_TEST_MODE` is deprecated. Use `Settings().testing.test_mode` instead."
in str(w[-1].message)
)
assert value == settings.testing.test_mode
def test_settings_with_serialization_alias(self, monkeypatch: pytest.MonkeyPatch):
assert not Settings().client.metrics.enabled
# Use old value
monkeypatch.setenv("PREFECT_CLIENT_ENABLE_METRICS", "True")
assert Settings().client.metrics.enabled
monkeypatch.delenv("PREFECT_CLIENT_ENABLE_METRICS", raising=False)
assert not Settings().client.metrics.enabled
# Use new value
monkeypatch.setenv("PREFECT_CLIENT_METRICS_ENABLED", "True")
assert Settings().client.metrics.enabled
# Check both can be imported
from prefect.settings import (
PREFECT_CLIENT_ENABLE_METRICS,
PREFECT_CLIENT_METRICS_ENABLED,
)
assert isinstance(PREFECT_CLIENT_ENABLE_METRICS, Setting)
assert isinstance(PREFECT_CLIENT_METRICS_ENABLED, Setting)
| TestSettingAccess |
python | python__mypy | mypy/partially_defined.py | {
"start": 5736,
"end": 6488
} | class ____:
def __init__(self, stmts: list[BranchStatement], scope_type: ScopeType) -> None:
self.branch_stmts: list[BranchStatement] = stmts
self.scope_type = scope_type
self.undefined_refs: dict[str, set[NameExpr]] = {}
def copy(self) -> Scope:
result = Scope([s.copy() for s in self.branch_stmts], self.scope_type)
result.undefined_refs = self.undefined_refs.copy()
return result
def record_undefined_ref(self, o: NameExpr) -> None:
if o.name not in self.undefined_refs:
self.undefined_refs[o.name] = set()
self.undefined_refs[o.name].add(o)
def pop_undefined_ref(self, name: str) -> set[NameExpr]:
return self.undefined_refs.pop(name, set())
| Scope |
python | prabhupant__python-ds | data_structures/graphs/same_path_recursive.py | {
"start": 413,
"end": 1705
} | class ____:
def __init__(self, vertices):
self.graph = defaultdict(list)
self.vertices = vertices
def add_edge(self, u, v):
self.graph[u].append(v)
self.graph[v].append(u)
def dfs(self, vertex, intime, outtime, timer, visited):
visited[vertex] = True
timer += 1
intime[vertex] = timer
for v in self.graph[vertex]:
if visited[v] == False:
self.dfs(v, intime, outtime, timer, visited)
timer += 1
outtime[vertex] = timer
def on_same_path(self, u, v):
intime = [-1] * self.vertices
outtime = [-1] * self.vertices
timer = 0
visited = [False] * self.vertices
for vertex in self.graph:
if visited[vertex] == False:
self.dfs(vertex, intime, outtime, timer, visited)
if (intime[u] < intime[v] and outtime[u] > outtime[v]) \
or (intime[v] < intime[u] and outtime[v] > outtime[u]):
return True
return False
g = Graph(9)
g.add_edge(0, 1)
g.add_edge(0, 2)
g.add_edge(2, 5)
g.add_edge(1, 3)
g.add_edge(1, 4)
g.add_edge(4, 6)
g.add_edge(4, 7)
g.add_edge(4, 8)
print(g.on_same_path(0, 4))
print(g.on_same_path(1, 8))
print(g.on_same_path(1, 5)) | Graph |
python | tensorflow__tensorflow | tensorflow/python/trackable/layer_utils.py | {
"start": 1676,
"end": 2079
} | class ____(object):
"""Container for tracking whether a property is in a cached state."""
_in_cached_state = False
def mark_as(self, value): # type: (MutationSentinel, bool) -> bool
may_affect_upstream = (value != self._in_cached_state)
self._in_cached_state = value
return may_affect_upstream
@property
def in_cached_state(self):
return self._in_cached_state
| MutationSentinel |
python | huggingface__transformers | src/transformers/models/evolla/configuration_evolla.py | {
"start": 5051,
"end": 13894
} | class ____(PreTrainedConfig):
r"""
This is the configuration class to store the configuration of a [`EvollaModel`]. It is used to instantiate an
Evolla model according to the specified arguments, defining the model architecture. Instantiating a configuration
with the defaults will yield a similar configuration to that of the Evolla-10B.
e.g. [westlake-repl/Evolla-10B-hf](https://huggingface.co/westlake-repl/Evolla-10B-hf)
Configuration objects inherit from [`PreTrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PreTrainedConfig`] for more information.
Args:
protein_encoder_config (`dict`, *optional*):
Dictionary of configuration options used to initialize [`SaProtConfig`].
vocab_size (`int`, *optional*, defaults to 128256):
Vocabulary size of the Evolla llama model. Defines the number of different tokens that can be represented by the
`inputs_ids` passed when calling [`EvollaModel`].
hidden_size (`int`, *optional*, defaults to 4096):
Dimensionality of the llama layers and the pooler layer.
intermediate_size (`int`, *optional*, defaults to 14336):
Dimensionality of the intermediate layers in the llama model.
num_hidden_layers (`int`, *optional*, defaults to 32):
Number of hidden layers in the llama model.
num_attention_heads (`int`, *optional*, defaults to 32):
Number of attention heads for each attention layer in the llama model.
num_key_value_heads (`int`, *optional*, defaults to 8):
Number of key-value pairs for each attention layer in the llama model.
hidden_act (`str` or `function`, *optional*, defaults to `"silu"`):
The non-linear activation function (function or string) in the llama model. If string, `"gelu"`, `"relu"`,
`"selu"` and `"silu"` are supported.
max_position_embeddings (`int`, *optional*, defaults to 8192):
The maximum sequence length that this model might ever be used with. Typically set this to something large
just in case (e.g., 512 or 1024 or 2048).
rms_norm_eps (`float`, *optional*, defaults to 1e-05):
The epsilon value for the RMS-norm layer in the llama model.
rope_parameters (`float`, *optional*):
The scaling factor for the RoPE layer in the llama model.
attention_bias (`bool`, *optional*, defaults to `False`):
Whether to use bias in the attention layer.
attention_dropout (`float`, *optional*, defaults to 0.0):
The dropout ratio for the attention layer.
mlp_bias (`bool`, *optional*, defaults to `False`):
Whether to use bias in the MLP layer.
aligner_ffn_mult (`int`, *optional*, defaults to 4):
The FFN multiplier for the aligner layer.
aligner_enable_bias (`bool`, *optional*, defaults to `True`):
Whether to use bias in the aligner layer.
aligner_attention_probs_dropout_prob (`float`, *optional*, defaults to 0.1):
The dropout ratio for the attention probabilities in the aligner layer.
aligner_num_add_layers (`int`, *optional*, defaults to 8):
The number of additional layers for the aligner layer.
resampler_depth (`int`, *optional*, defaults to 6):
The depth of the resampler layer in the llama model.
resampler_dim_head (`int`, *optional*, defaults to 64):
The dimension of the heads in the resampler layer in the llama model.
resampler_heads (`int`, *optional*, defaults to 8):
The number of heads in the resampler layer in the llama model.
resampler_num_latents (`int`, *optional*, defaults to 64):
The number of latents in the resampler layer in the llama model.
resampler_ff_mult (`int`, *optional*, defaults to 4):
The FFN multiplier for the resampler layer.
initializer_range (`float`, *optional*, defaults to 0.02):
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
pad_token_id (`int`, *optional*):
The id of the *padding* token.
bos_token_id (`int`, *optional*, defaults to 128000):
The id of the *beginning-of-sequence* token.
eos_token_id (`int`, *optional*, defaults to 128009):
The id of the *end-of-sequence* token.
use_cache (`bool`, *optional*, defaults to `False`):
Whether or not the model should return the last key/values attentions (not used by all models).
tie_word_embeddings (`bool`, *optional*, defaults to `False`):
Whether or not to tie the input and output word embeddings.
Example:
```python
>>> from transformers import EvollaModel, EvollaConfig
>>> # Initializing a Evolla evolla-10b style configuration
>>> configuration = EvollaConfig()
>>> # Initializing a model from the evolla-10b style configuration
>>> model = EvollaModel(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
```"""
model_type = "EvollaModel"
sub_configs = {"protein_encoder_config": SaProtConfig}
default_theta = 500000.0
def __init__(
self,
protein_encoder_config: Optional[dict] = None,
vocab_size: Optional[int] = 128256, # llama vocab size
hidden_size: Optional[int] = 4096, # llama hidden size
intermediate_size: Optional[int] = 14336, # llama intermediate size
num_hidden_layers: Optional[int] = 32, # llama num layers
num_attention_heads: Optional[int] = 32, # llama num heads
num_key_value_heads: Optional[int] = 8, # llama num key-value heads
hidden_act: Optional[str] = "silu", # llama activation function
max_position_embeddings: Optional[int] = 8192, # llama rope max length
rms_norm_eps: Optional[int] = 1e-05,
rope_parameters: Optional[RopeParameters | dict[str, RopeParameters]] = None,
attention_bias: Optional[bool] = False,
attention_dropout: Optional[float] = 0.0,
mlp_bias: Optional[bool] = False,
aligner_ffn_mult: Optional[int] = 4,
aligner_enable_bias: Optional[bool] = True,
aligner_attention_probs_dropout_prob: Optional[float] = 0.1,
aligner_num_add_layers: Optional[int] = 8,
resampler_depth: Optional[int] = 6,
resampler_dim_head: Optional[int] = 64,
resampler_heads: Optional[int] = 8,
resampler_num_latents: Optional[int] = 64,
resampler_ff_mult: Optional[int] = 4,
initializer_range: Optional[float] = 0.02,
pad_token_id: Optional[int] = None,
bos_token_id: Optional[int] = 128000,
eos_token_id: Optional[int] = 128009,
use_cache: Optional[bool] = False,
tie_word_embeddings: Optional[bool] = False,
**kwargs,
):
self.vocab_size = vocab_size
self.hidden_size = hidden_size
self.intermediate_size = intermediate_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.num_key_value_heads = num_key_value_heads
self.hidden_act = hidden_act
self.max_position_embeddings = max_position_embeddings
self.rms_norm_eps = rms_norm_eps
self.tie_word_embeddings = tie_word_embeddings
self.attention_bias = attention_bias
self.attention_dropout = attention_dropout
self.mlp_bias = mlp_bias
self.aligner_ffn_mult = aligner_ffn_mult
self.aligner_enable_bias = aligner_enable_bias
self.aligner_attention_probs_dropout_prob = aligner_attention_probs_dropout_prob
self.aligner_num_add_layers = aligner_num_add_layers
self.use_cache = use_cache
self.initializer_range = initializer_range
self.resampler_depth = resampler_depth
self.resampler_dim_head = resampler_dim_head
self.resampler_heads = resampler_heads
self.resampler_num_latents = resampler_num_latents
self.resampler_ff_mult = resampler_ff_mult
self.rope_parameters = rope_parameters
# Subconfig
if protein_encoder_config is None:
protein_encoder_config = {}
logger.info("`protein_encoder_config` is `None`. Initializing the `SaProtConfig` with default values.")
self.protein_encoder_config = SaProtConfig(**protein_encoder_config)
super().__init__(
pad_token_id=pad_token_id,
bos_token_id=bos_token_id,
eos_token_id=eos_token_id,
tie_word_embeddings=tie_word_embeddings,
**kwargs,
)
__all__ = ["EvollaConfig"]
| EvollaConfig |
python | pytorch__pytorch | test/distributed/test_c10d_pypg.py | {
"start": 4710,
"end": 4851
} | class ____(AbstractDDPSingleRank, MultiThreadedTestCase):
@property
def use_wrapper(self):
return False
| TestDDPWithWorkSubclass |
python | pytorch__pytorch | benchmarks/dynamo/pr_time_benchmarks/benchmarks/aotdispatcher_partitioner.py | {
"start": 69,
"end": 1245
} | class ____(BenchmarkBase):
def __init__(self):
super().__init__(
category="aotdispatcher_partitioner",
backend="aot_eager_decomp_partition",
device="cpu",
)
def name(self):
return f"{self.category()}_{self.device()}"
def description(self):
return "partitioner benchmark 1 input and 100 weights, mix of recompute and non-recompute ops"
def _prepare_once(self):
self.weights = [torch.randn(16, 16, requires_grad=True) for _ in range(100)]
self.inp = torch.randn(16, 16)
def _prepare(self):
torch._dynamo.reset()
def _work(self):
@torch.compile(backend=self.backend(), fullgraph=True)
def f(inp, *weights):
x = inp
for w in weights:
x = torch.matmul(w, x).sin().sin()
return x
f(self.inp, *self.weights)
def main():
result_path = sys.argv[1]
all = [
Benchmark(),
]
for benchmark in all:
benchmark.enable_compile_time_instruction_count().collect_all().append_results(
result_path
)
if __name__ == "__main__":
main()
| Benchmark |
python | spack__spack | lib/spack/spack/solver/asp.py | {
"start": 138441,
"end": 139308
} | class ____:
"""ASP functions used to express spec clauses in the BODY of a rule"""
node = fn.attr("node")
namespace = fn.attr("namespace")
virtual_node = fn.attr("virtual_node")
node_platform = fn.attr("node_platform")
node_os = fn.attr("node_os")
node_target = fn.attr("node_target")
variant_value = fn.attr("variant_value")
node_flag = fn.attr("node_flag")
propagate = fn.attr("propagate")
def strip_asp_problem(asp_problem: Iterable[str]) -> List[str]:
"""Remove comments and empty lines from an ASP program."""
def strip_statement(stmt: str) -> str:
lines = [line for line in stmt.split("\n") if not line.startswith("%")]
return "".join(line.strip() for line in lines if line)
value = [strip_statement(stmt) for stmt in asp_problem]
value = [s for s in value if s]
return value
| _Body |
python | apache__thrift | test/py/SerializationTest.py | {
"start": 1515,
"end": 13232
} | class ____(unittest.TestCase):
def setUp(self):
self.v1obj = VersioningTestV1(
begin_in_both=12345,
old_string='aaa',
end_in_both=54321,
)
self.v2obj = VersioningTestV2(
begin_in_both=12345,
newint=1,
newbyte=2,
newshort=3,
newlong=4,
newdouble=5.0,
newstruct=Bonk(message="Hello!", type=123),
newlist=[7, 8, 9],
newset=set([42, 1, 8]),
newmap={1: 2, 2: 3},
newstring="Hola!",
end_in_both=54321,
)
self.bools = Bools(im_true=True, im_false=False)
self.bools_flipped = Bools(im_true=False, im_false=True)
self.large_deltas = LargeDeltas(
b1=self.bools,
b10=self.bools_flipped,
b100=self.bools,
check_true=True,
b1000=self.bools_flipped,
check_false=False,
vertwo2000=VersioningTestV2(newstruct=Bonk(message='World!', type=314)),
a_set2500=set(['lazy', 'brown', 'cow']),
vertwo3000=VersioningTestV2(newset=set([2, 3, 5, 7, 11])),
big_numbers=[2 ** 8, 2 ** 16, 2 ** 31 - 1, -(2 ** 31 - 1)]
)
self.compact_struct = CompactProtoTestStruct(
a_byte=127,
a_i16=32000,
a_i32=1000000000,
a_i64=0xffffffffff,
a_double=5.6789,
a_string="my string",
true_field=True,
false_field=False,
empty_struct_field=Empty(),
byte_list=[-127, -1, 0, 1, 127],
i16_list=[-1, 0, 1, 0x7fff],
i32_list=[-1, 0, 0xff, 0xffff, 0xffffff, 0x7fffffff],
i64_list=[-1, 0, 0xff, 0xffff, 0xffffff, 0xffffffff, 0xffffffffff, 0xffffffffffff, 0xffffffffffffff, 0x7fffffffffffffff],
double_list=[0.1, 0.2, 0.3],
string_list=["first", "second", "third"],
boolean_list=[True, True, True, False, False, False],
struct_list=[Empty(), Empty()],
byte_set=set([-127, -1, 0, 1, 127]),
i16_set=set([-1, 0, 1, 0x7fff]),
i32_set=set([1, 2, 3]),
i64_set=set([-1, 0, 0xff, 0xffff, 0xffffff, 0xffffffff, 0xffffffffff, 0xffffffffffff, 0xffffffffffffff, 0x7fffffffffffffff]),
double_set=set([0.1, 0.2, 0.3]),
string_set=set(["first", "second", "third"]),
boolean_set=set([True, False]),
# struct_set=set([Empty()]), # unhashable instance
byte_byte_map={1: 2},
i16_byte_map={1: 1, -1: 1, 0x7fff: 1},
i32_byte_map={1: 1, -1: 1, 0x7fffffff: 1},
i64_byte_map={0: 1, 1: 1, -1: 1, 0x7fffffffffffffff: 1},
double_byte_map={-1.1: 1, 1.1: 1},
string_byte_map={"first": 1, "second": 2, "third": 3, "": 0},
boolean_byte_map={True: 1, False: 0},
byte_i16_map={1: 1, 2: -1, 3: 0x7fff},
byte_i32_map={1: 1, 2: -1, 3: 0x7fffffff},
byte_i64_map={1: 1, 2: -1, 3: 0x7fffffffffffffff},
byte_double_map={1: 0.1, 2: -0.1, 3: 1000000.1},
byte_string_map={1: "", 2: "blah", 3: "loooooooooooooong string"},
byte_boolean_map={1: True, 2: False},
# list_byte_map # unhashable
# set_byte_map={set([1, 2, 3]) : 1, set([0, 1]) : 2, set([]) : 0}, # unhashable
# map_byte_map # unhashable
byte_map_map={0: {}, 1: {1: 1}, 2: {1: 1, 2: 2}},
byte_set_map={0: set([]), 1: set([1]), 2: set([1, 2])},
byte_list_map={0: [], 1: [1], 2: [1, 2]},
)
self.nested_lists_i32x2 = NestedListsI32x2(
[
[1, 1, 2],
[2, 7, 9],
[3, 5, 8]
]
)
self.nested_lists_i32x3 = NestedListsI32x3(
[
[
[2, 7, 9],
[3, 5, 8]
],
[
[1, 1, 2],
[1, 4, 9]
]
]
)
self.nested_mixedx2 = NestedMixedx2(int_set_list=[
set([1, 2, 3]),
set([1, 4, 9]),
set([1, 2, 3, 5, 8, 13, 21]),
set([-1, 0, 1])
],
# note, the sets below are sets of chars, since the strings are iterated
map_int_strset={10: set('abc'), 20: set('def'), 30: set('GHI')},
map_int_strset_list=[
{10: set('abc'), 20: set('def'), 30: set('GHI')},
{100: set('lmn'), 200: set('opq'), 300: set('RST')},
{1000: set('uvw'), 2000: set('wxy'), 3000: set('XYZ')}]
)
self.nested_lists_bonk = NestedListsBonk(
[
[
[
Bonk(message='inner A first', type=1),
Bonk(message='inner A second', type=1)
],
[
Bonk(message='inner B first', type=2),
Bonk(message='inner B second', type=2)
]
]
]
)
self.list_bonks = ListBonks(
[
Bonk(message='inner A', type=1),
Bonk(message='inner B', type=2),
Bonk(message='inner C', type=0)
]
)
def _serialize(self, obj):
trans = TTransport.TMemoryBuffer()
prot = self.protocol_factory.getProtocol(trans)
obj.write(prot)
return trans.getvalue()
def _deserialize(self, objtype, data):
prot = self.protocol_factory.getProtocol(TTransport.TMemoryBuffer(data))
ret = objtype()
ret.read(prot)
return ret
def testForwards(self):
obj = self._deserialize(VersioningTestV2, self._serialize(self.v1obj))
self.assertEqual(obj.begin_in_both, self.v1obj.begin_in_both)
self.assertEqual(obj.end_in_both, self.v1obj.end_in_both)
def testBackwards(self):
obj = self._deserialize(VersioningTestV1, self._serialize(self.v2obj))
self.assertEqual(obj.begin_in_both, self.v2obj.begin_in_both)
self.assertEqual(obj.end_in_both, self.v2obj.end_in_both)
def testSerializeV1(self):
obj = self._deserialize(VersioningTestV1, self._serialize(self.v1obj))
self.assertEqual(obj, self.v1obj)
def testSerializeV2(self):
obj = self._deserialize(VersioningTestV2, self._serialize(self.v2obj))
self.assertEqual(obj, self.v2obj)
def testBools(self):
self.assertNotEqual(self.bools, self.bools_flipped)
self.assertNotEqual(self.bools, self.v1obj)
obj = self._deserialize(Bools, self._serialize(self.bools))
self.assertEqual(obj, self.bools)
obj = self._deserialize(Bools, self._serialize(self.bools_flipped))
self.assertEqual(obj, self.bools_flipped)
rep = repr(self.bools)
self.assertTrue(len(rep) > 0)
def testLargeDeltas(self):
# test large field deltas (meaningful in CompactProto only)
obj = self._deserialize(LargeDeltas, self._serialize(self.large_deltas))
self.assertEqual(obj, self.large_deltas)
rep = repr(self.large_deltas)
self.assertTrue(len(rep) > 0)
def testNestedListsI32x2(self):
obj = self._deserialize(NestedListsI32x2, self._serialize(self.nested_lists_i32x2))
self.assertEqual(obj, self.nested_lists_i32x2)
rep = repr(self.nested_lists_i32x2)
self.assertTrue(len(rep) > 0)
def testNestedListsI32x3(self):
obj = self._deserialize(NestedListsI32x3, self._serialize(self.nested_lists_i32x3))
self.assertEqual(obj, self.nested_lists_i32x3)
rep = repr(self.nested_lists_i32x3)
self.assertTrue(len(rep) > 0)
def testNestedMixedx2(self):
obj = self._deserialize(NestedMixedx2, self._serialize(self.nested_mixedx2))
self.assertEqual(obj, self.nested_mixedx2)
rep = repr(self.nested_mixedx2)
self.assertTrue(len(rep) > 0)
def testNestedListsBonk(self):
obj = self._deserialize(NestedListsBonk, self._serialize(self.nested_lists_bonk))
self.assertEqual(obj, self.nested_lists_bonk)
rep = repr(self.nested_lists_bonk)
self.assertTrue(len(rep) > 0)
def testListBonks(self):
obj = self._deserialize(ListBonks, self._serialize(self.list_bonks))
self.assertEqual(obj, self.list_bonks)
rep = repr(self.list_bonks)
self.assertTrue(len(rep) > 0)
def testCompactStruct(self):
# test large field deltas (meaningful in CompactProto only)
obj = self._deserialize(CompactProtoTestStruct, self._serialize(self.compact_struct))
self.assertEqual(obj, self.compact_struct)
rep = repr(self.compact_struct)
self.assertTrue(len(rep) > 0)
def testIntegerLimits(self):
bad_values = [CompactProtoTestStruct(a_byte=128), CompactProtoTestStruct(a_byte=-129),
CompactProtoTestStruct(a_i16=32768), CompactProtoTestStruct(a_i16=-32769),
CompactProtoTestStruct(a_i32=2147483648), CompactProtoTestStruct(a_i32=-2147483649),
CompactProtoTestStruct(a_i64=9223372036854775808), CompactProtoTestStruct(a_i64=-9223372036854775809)
]
for value in bad_values:
self.assertRaises(Exception, self._serialize, value)
def testRecTree(self):
"""Ensure recursive tree node can be created."""
children = []
for idx in range(1, 5):
node = RecTree(item=idx, children=None)
children.append(node)
parent = RecTree(item=0, children=children)
serde_parent = self._deserialize(RecTree, self._serialize(parent))
self.assertEqual(0, serde_parent.item)
self.assertEqual(4, len(serde_parent.children))
for child in serde_parent.children:
# Cannot use assertIsInstance in python 2.6?
self.assertTrue(isinstance(child, RecTree))
def _buildLinkedList(self):
head = cur = RecList(item=0)
for idx in range(1, 5):
node = RecList(item=idx)
cur.nextitem = node
cur = node
return head
def _collapseLinkedList(self, head):
out_list = []
cur = head
while cur is not None:
out_list.append(cur.item)
cur = cur.nextitem
return out_list
def testRecList(self):
"""Ensure recursive linked list can be created."""
rec_list = self._buildLinkedList()
serde_list = self._deserialize(RecList, self._serialize(rec_list))
out_list = self._collapseLinkedList(serde_list)
self.assertEqual([0, 1, 2, 3, 4], out_list)
def testCoRec(self):
"""Ensure co-recursive structures can be created."""
item1 = CoRec()
item2 = CoRec2()
item1.other = item2
item2.other = item1
# NOTE [econner724,2017-06-21]: These objects cannot be serialized as serialization
# results in an infinite loop. fbthrift also suffers from this
# problem.
def testRecVector(self):
"""Ensure a list of recursive nodes can be created."""
mylist = [self._buildLinkedList(), self._buildLinkedList()]
myvec = VectorTest(lister=mylist)
serde_vec = self._deserialize(VectorTest, self._serialize(myvec))
golden_list = [0, 1, 2, 3, 4]
for cur_list in serde_vec.lister:
out_list = self._collapseLinkedList(cur_list)
self.assertEqual(golden_list, out_list)
| AbstractTest |
python | getsentry__sentry | src/sentry/auth/view.py | {
"start": 84,
"end": 288
} | class ____(BaseView):
"""
A segment of Provider's auth pipeline.
See ``BaseView`` for capabilities.
"""
auth_required = False
sudo_required = False
__all__ = ("AuthView",)
| AuthView |
python | apache__airflow | providers/yandex/src/airflow/providers/yandex/hooks/yandex.py | {
"start": 1307,
"end": 6451
} | class ____(BaseHook):
"""
A base hook for Yandex.Cloud related tasks.
:param yandex_conn_id: The connection ID to use when fetching connection info
:param default_folder_id: The folder ID to use instead of connection folder ID
:param default_public_ssh_key: The key to use instead of connection key
:param default_service_account_id: The service account ID to use instead of key service account ID
"""
conn_name_attr = conn_name_attr
default_conn_name = default_conn_name
conn_type = conn_type
hook_name = hook_name
@classmethod
def get_connection_form_widgets(cls) -> dict[str, Any]:
"""Return connection widgets to add to Yandex connection form."""
from flask_appbuilder.fieldwidgets import BS3PasswordFieldWidget, BS3TextFieldWidget
from flask_babel import lazy_gettext
from wtforms import PasswordField, StringField
return {
"service_account_json": PasswordField(
lazy_gettext("Service account auth JSON"),
widget=BS3PasswordFieldWidget(),
description="Service account auth JSON. Looks like "
'{"id": "...", "service_account_id": "...", "private_key": "..."}. '
"Will be used instead of OAuth token and SA JSON file path field if specified.",
),
"service_account_json_path": StringField(
lazy_gettext("Service account auth JSON file path"),
widget=BS3TextFieldWidget(),
description="Service account auth JSON file path. File content looks like "
'{"id": "...", "service_account_id": "...", "private_key": "..."}. '
"Will be used instead of OAuth token if specified.",
),
"oauth": PasswordField(
lazy_gettext("OAuth Token"),
widget=BS3PasswordFieldWidget(),
description="User account OAuth token. "
"Either this or service account JSON must be specified.",
),
"folder_id": StringField(
lazy_gettext("Default folder ID"),
widget=BS3TextFieldWidget(),
description="Optional. "
"If specified, this ID will be used by default when creating nodes and clusters.",
),
"public_ssh_key": StringField(
lazy_gettext("Public SSH key"),
widget=BS3TextFieldWidget(),
description="Optional. The key will be placed to all created Compute nodes, "
"allowing you to have a root shell there.",
),
"endpoint": StringField(
lazy_gettext("API endpoint"),
widget=BS3TextFieldWidget(),
description="Optional. Specify an API endpoint. Leave blank to use default.",
),
}
@classmethod
def get_ui_field_behaviour(cls) -> dict[str, Any]:
"""Return custom UI field behaviour for Yandex connection."""
return {
"hidden_fields": ["host", "schema", "login", "password", "port", "extra"],
"relabeling": {},
}
def __init__(
self,
yandex_conn_id: str | None = None,
default_folder_id: str | None = None,
default_public_ssh_key: str | None = None,
default_service_account_id: str | None = None,
) -> None:
super().__init__()
self.connection_id = yandex_conn_id or default_conn_name
self.connection = self.get_connection(self.connection_id)
self.extras = self.connection.extra_dejson
self.credentials: CredentialsType = get_credentials(
oauth_token=self._get_field("oauth"),
service_account_json=self._get_field("service_account_json"),
service_account_json_path=self._get_field("service_account_json_path"),
)
sdk_config = self._get_endpoint()
self.sdk = yandexcloud.SDK(
user_agent=provider_user_agent(),
token=self.credentials.get("token"),
service_account_key=self.credentials.get("service_account_key"),
endpoint=sdk_config.get("endpoint"),
)
self.default_folder_id = default_folder_id or self._get_field("folder_id")
self.default_public_ssh_key = default_public_ssh_key or self._get_field("public_ssh_key")
self.default_service_account_id = default_service_account_id or get_service_account_id(
service_account_json=self._get_field("service_account_json"),
service_account_json_path=self._get_field("service_account_json_path"),
)
self.client = self.sdk.client
def _get_endpoint(self) -> dict[str, str]:
sdk_config = {}
endpoint = self._get_field("endpoint")
if endpoint:
sdk_config["endpoint"] = endpoint
return sdk_config
def _get_field(self, field_name: str, default: Any = None) -> Any:
if not hasattr(self, "extras"):
return default
return get_field_from_extras(self.extras, field_name, default)
| YandexCloudBaseHook |
python | langchain-ai__langchain | libs/core/tests/unit_tests/_api/test_beta_decorator.py | {
"start": 1675,
"end": 10944
} | class ____:
def __init__(self) -> None:
"""Original doc."""
@beta()
def beta_method(self) -> str:
"""Original doc."""
return "This is a beta method."
@beta()
async def beta_async_method(self) -> str:
"""Original doc."""
return "This is a beta async method."
@classmethod
@beta()
def beta_classmethod(cls) -> str:
"""Original doc."""
return "This is a beta classmethod."
@staticmethod
@beta()
def beta_staticmethod() -> str:
"""Original doc."""
return "This is a beta staticmethod."
@property
def beta_property(self) -> str:
"""Original doc."""
return "This is a beta property."
@beta_property.setter
def beta_property(self, _value: str) -> None:
pass
@beta() # type: ignore[misc]
@beta_property.deleter
def beta_property(self) -> None:
pass
def test_beta_function() -> None:
"""Test beta function."""
with warnings.catch_warnings(record=True) as warning_list:
warnings.simplefilter("always")
assert beta_function() == "This is a beta function."
assert len(warning_list) == 1
warning = warning_list[0].message
assert str(warning) == (
"The function `beta_function` is in beta. It is actively being "
"worked on, "
"so the API may change."
)
doc = beta_function.__doc__
assert isinstance(doc, str)
assert doc.startswith(".. beta::")
assert not inspect.iscoroutinefunction(beta_function)
async def test_beta_async_function() -> None:
"""Test beta async function."""
with warnings.catch_warnings(record=True) as warning_list:
warnings.simplefilter("always")
assert await beta_async_function() == "This is a beta async function."
assert len(warning_list) == 1
warning = warning_list[0].message
assert str(warning) == (
"The function `beta_async_function` is in beta. "
"It is actively being worked on, so the API may change."
)
doc = beta_function.__doc__
assert isinstance(doc, str)
assert doc.startswith(".. beta::")
assert inspect.iscoroutinefunction(beta_async_function)
def test_beta_method() -> None:
"""Test beta method."""
with warnings.catch_warnings(record=True) as warning_list:
warnings.simplefilter("always")
obj = ClassWithBetaMethods()
assert obj.beta_method() == "This is a beta method."
assert len(warning_list) == 1
warning = warning_list[0].message
assert str(warning) == (
"The method `ClassWithBetaMethods.beta_method` is in beta. It is actively "
"being worked on, so "
"the API may change."
)
doc = obj.beta_method.__doc__
assert isinstance(doc, str)
assert doc.startswith(".. beta::")
assert not inspect.iscoroutinefunction(obj.beta_method)
async def test_beta_async_method() -> None:
"""Test beta method."""
with warnings.catch_warnings(record=True) as warning_list:
warnings.simplefilter("always")
obj = ClassWithBetaMethods()
assert await obj.beta_async_method() == "This is a beta async method."
assert len(warning_list) == 1
warning = warning_list[0].message
assert str(warning) == (
"The method `ClassWithBetaMethods.beta_async_method` is in beta. "
"It is actively being worked on, so the API may change."
)
doc = obj.beta_method.__doc__
assert isinstance(doc, str)
assert doc.startswith(".. beta::")
assert inspect.iscoroutinefunction(obj.beta_async_method)
def test_beta_classmethod() -> None:
"""Test beta classmethod."""
with warnings.catch_warnings(record=True) as warning_list:
warnings.simplefilter("always")
ClassWithBetaMethods.beta_classmethod()
assert len(warning_list) == 1
warning = warning_list[0].message
assert str(warning) == (
"The method `ClassWithBetaMethods.beta_classmethod` is in beta. "
"It is actively being worked on, so the API may change."
)
doc = ClassWithBetaMethods.beta_classmethod.__doc__
assert isinstance(doc, str)
assert doc.startswith(".. beta::")
def test_beta_staticmethod() -> None:
"""Test beta staticmethod."""
with warnings.catch_warnings(record=True) as warning_list:
warnings.simplefilter("always")
assert (
ClassWithBetaMethods.beta_staticmethod() == "This is a beta staticmethod."
)
assert len(warning_list) == 1
warning = warning_list[0].message
assert str(warning) == (
"The method `ClassWithBetaMethods.beta_staticmethod` is in beta. "
"It is actively being worked on, so the API may change."
)
doc = ClassWithBetaMethods.beta_staticmethod.__doc__
assert isinstance(doc, str)
assert doc.startswith(".. beta::")
def test_beta_property() -> None:
"""Test beta staticmethod."""
with warnings.catch_warnings(record=True) as warning_list:
warnings.simplefilter("always")
obj = ClassWithBetaMethods()
assert obj.beta_property == "This is a beta property."
obj.beta_property = "foo"
del obj.beta_property
assert len(warning_list) == 3
for warning in warning_list:
assert str(warning.message) == (
"The attribute `ClassWithBetaMethods.beta_property` is in beta. "
"It is actively being worked on, so the API may change."
)
doc = ClassWithBetaMethods.beta_property.__doc__
assert isinstance(doc, str)
assert doc.startswith(".. beta::")
def test_whole_class_beta() -> None:
"""Test whole class beta status."""
@beta()
class BetaClass:
def __init__(self) -> None:
"""Original doc."""
@beta()
def beta_method(self) -> str:
"""Original doc."""
return "This is a beta method."
with warnings.catch_warnings(record=True) as warning_list:
warnings.simplefilter("always")
obj = BetaClass()
assert obj.beta_method() == "This is a beta method."
assert len(warning_list) == 2
warning = warning_list[0].message
assert str(warning) == (
"The class `test_whole_class_beta.<locals>.BetaClass` is in beta. "
"It is actively being worked on, so the "
"API may change."
)
warning = warning_list[1].message
assert str(warning) == (
"The method `test_whole_class_beta.<locals>.BetaClass.beta_method` "
"is in beta. It is actively being worked on, so "
"the API may change."
)
def test_whole_class_inherited_beta() -> None:
"""Test whole class beta status for inherited class.
The original version of beta decorator created duplicates with
'.. beta::'.
"""
# Test whole class beta status
@beta()
class BetaClass:
@beta()
def beta_method(self) -> str:
"""Original doc."""
return "This is a beta method."
@beta()
class InheritedBetaClass(BetaClass):
@beta()
def beta_method(self) -> str:
"""Original doc."""
return "This is a beta method 2."
with warnings.catch_warnings(record=True) as warning_list:
warnings.simplefilter("always")
obj = BetaClass()
assert obj.beta_method() == "This is a beta method."
assert len(warning_list) == 2
warning = warning_list[0].message
assert str(warning) == (
"The class `test_whole_class_inherited_beta.<locals>.BetaClass` "
"is in beta. It is actively being worked on, so the "
"API may change."
)
warning = warning_list[1].message
assert str(warning) == (
"The method `test_whole_class_inherited_beta.<locals>.BetaClass."
"beta_method` is in beta. It is actively being worked on, so "
"the API may change."
)
with warnings.catch_warnings(record=True) as warning_list:
warnings.simplefilter("always")
obj = InheritedBetaClass()
assert obj.beta_method() == "This is a beta method 2."
assert len(warning_list) == 2
warning = warning_list[0].message
assert str(warning) == (
"The class `test_whole_class_inherited_beta.<locals>.InheritedBetaClass` "
"is in beta. "
"It is actively being worked on, so the "
"API may change."
)
warning = warning_list[1].message
assert str(warning) == (
"The method `test_whole_class_inherited_beta.<locals>.InheritedBetaClass."
"beta_method` is in beta. "
"It is actively being worked on, so "
"the API may change."
)
# if .. beta:: was inserted only once:
if obj.__doc__ is not None:
assert obj.__doc__.count(".. beta::") == 1
# Tests with pydantic models
| ClassWithBetaMethods |
python | huggingface__transformers | src/transformers/models/vjepa2/configuration_vjepa2.py | {
"start": 715,
"end": 7055
} | class ____(PreTrainedConfig):
r"""
This is the configuration class to store the configuration of a [`VJEPA2Model`]. It is used to instantiate an
VJEPA2 model according to the specified arguments, defining the model architecture. Instantiating a configuration
with the defaults will yield a similar configuration to that of the VJEPA2
[facebook/vjepa2-vitl-fpc64-256](https://huggingface.co/facebook/vjepa2-vitl-fpc64-256) architecture.
Configuration objects inherit from [`PreTrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PreTrainedConfig`] for more information.
Args:
patch_size (`int`, *optional*, defaults to 16):
The size (resolution) of each patch.
crop_size (`int`, *optional*, defaults to 256):
Input resolution of the model
frames_per_clip (`int`, *optional*, defaults to 64):
The number of frames the model has been pretrained with. Does not impact inference.
tubelet_size (`int`, *optional*, defaults to 2):
The number of temporal frames used for a single rastor, check paper for more information.
hidden_size (`int`, *optional*, defaults to 1024):
Dimensionality of the encoder layers
in_chans (`int`, *optional*, defaults to 3):
The number of input channels
num_attention_heads (`int`, *optional*, defaults to 16):
Number of attention heads for each attention layer in the Encoder
num_hidden_layers (`int`, *optional*, defaults to 24):
The number of hidden layers
drop_path_rate (`float`, *optional*, defaults to 0.0):
Stochastic depth rate per sample (when applied in the main path of residual layers).
mlp_ratio (`float`, *optional*, defaults to 4.0):
Ratio of the hidden size of the MLPs used in Encoder relative to the `hidden_size`.
layer_norm_eps (`float`, *optional*, defaults to 1e-06):
The epsilon used by the layer normalization layers.
qkv_bias (`bool`, *optional*, defaults to `True`):
Whether to add a bias to the queries, keys and values.
attention_probs_dropout_prob (`float`, *optional*, defaults to 0.0):
The dropout probability for attentions.
The dropout probability for all fully connected layers.
hidden_act (`str`, *optional*, defaults to `"gelu"`):
The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
`"relu"`, `"selu"` and `"gelu_new"` are supported.
initializer_range (`float`, *optional*, defaults to 0.02):
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
attention_dropout (`float`, *optional*, defaults to 0.0):
The dropout probability for attentions.
num_pooler_layers (`int`, *optional*, defaults to 3):
The number of self-attention layers in the pooler.
pred_hidden_size (`int`, *optional*, defaults to 384):
Dimensionality of the predictor layers
pred_num_attention_heads (`int`, *optional*, defaults to 12):
Number of attention heads for each attention layer in the Predictor
pred_num_hidden_layers (`int`, *optional*, defaults to 12):
Number of hidden layers in the Predictor
pred_num_mask_tokens (`int`, *optional*, defaults to 10):
Define the number of mask tokens to use in the Predictor
pred_zero_init_mask_tokens (`bool`, *optional*, defaults to `True`):
Initialize the mask tokens in the predictor with 0.
pred_mlp_ratio (`float`, *optional*, defaults to 4.0):
Ratio of the hidden size of the MLPs used in Predictor relative to the `pred_hidden_size`.
Example:
```python
>>> from transformers import VJEPA2Config, VJEPA2Model
>>> # Initializing a VJEPA2 vjepa2-vitl-fpc64-256 style configuration
>>> configuration = VJEPA2Config()
>>> # Initializing a model (with random weights) from the vjepa2-vitl-fpc64-256 style configuration
>>> model = VJEPA2Model(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
```"""
model_type = "vjepa2"
def __init__(
self,
patch_size=16,
crop_size=256,
frames_per_clip=64,
tubelet_size=2,
hidden_size=1024,
in_chans=3,
num_attention_heads=16,
num_hidden_layers=24,
drop_path_rate=0.0,
mlp_ratio=4.0,
layer_norm_eps=1e-6,
qkv_bias=True,
attention_probs_dropout_prob=0.0,
hidden_act="gelu",
initializer_range=0.02,
attention_dropout=0.0,
num_pooler_layers=3,
# predictor params
pred_hidden_size=384,
pred_num_attention_heads=12,
pred_num_hidden_layers=12,
pred_num_mask_tokens=10,
pred_zero_init_mask_tokens=True,
pred_mlp_ratio=4.0,
**kwargs,
):
super().__init__(**kwargs)
self.crop_size = crop_size
self.frames_per_clip = frames_per_clip
self.patch_size = patch_size
self.tubelet_size = tubelet_size
self.hidden_size = hidden_size
self.in_chans = in_chans
self.num_attention_heads = num_attention_heads
self.num_hidden_layers = num_hidden_layers
self.drop_path_rate = drop_path_rate
self.mlp_ratio = mlp_ratio
self.layer_norm_eps = layer_norm_eps
self.qkv_bias = qkv_bias
self.attention_probs_dropout_prob = attention_probs_dropout_prob
self.hidden_act = hidden_act
self.initializer_range = initializer_range
self.image_size = crop_size
self.attention_dropout = attention_dropout
self.num_pooler_layers = num_pooler_layers
# predictor params
self.pred_hidden_size = pred_hidden_size
self.pred_num_attention_heads = pred_num_attention_heads
self.pred_num_hidden_layers = pred_num_hidden_layers
self.pred_num_mask_tokens = pred_num_mask_tokens
self.pred_zero_init_mask_tokens = pred_zero_init_mask_tokens
self.pred_mlp_ratio = pred_mlp_ratio
__all__ = ["VJEPA2Config"]
| VJEPA2Config |
python | google__jax | jax/_src/sharding_impls.py | {
"start": 12330,
"end": 18075
} | class ____(jsharding.Sharding):
_devices: xc.DeviceList
_hlo_sharding: xc.HloSharding
_memory_kind: str | None
_internal_device_list: xc.DeviceList
@use_cpp_method()
def __init__(self, devices: Sequence[Device] | xc.DeviceList,
op_sharding: xc.OpSharding | xc.HloSharding,
*, memory_kind: str | None = None):
self._devices = (devices if isinstance(devices, xc.DeviceList) else
xc.DeviceList(tuple(devices)))
self._hlo_sharding = (xc.HloSharding.from_proto(op_sharding)
if isinstance(op_sharding, xc.OpSharding) else
op_sharding)
self._memory_kind = memory_kind
def __reduce__(self):
return (_unpickle_gspmd_sharding,
(self._devices, self._hlo_sharding.to_proto(), self._memory_kind))
@functools.cached_property
def _hlo_sharding_hash(self):
if self.is_fully_replicated:
return hash(replicated_hlo_sharding)
return hash(self._hlo_sharding)
def __eq__(self, other):
if not isinstance(other, GSPMDSharding):
return False
if self is other:
return True
return (are_hlo_shardings_equal(self._hlo_sharding, other._hlo_sharding)
and self.memory_kind == other.memory_kind
and self._internal_device_list == other._internal_device_list)
def __hash__(self):
if not hasattr(self, '_hash'):
self._hash = hash((self._internal_device_list, self._hlo_sharding_hash,
self.memory_kind))
return self._hash
def __repr__(self):
mem = '' if self._memory_kind is None else f', memory_kind={self._memory_kind}'
return f'GSPMDSharding({self._hlo_sharding!r}{mem})'
def check_compatible_aval(self, aval_shape: Shape) -> None:
num_ways_dim_sharded, _ = get_num_ways_dim_sharded(self._hlo_sharding)
if len(aval_shape) < len(num_ways_dim_sharded):
raise ValueError(
f"Sharding {self} is only valid for values of rank at least "
f"{len(num_ways_dim_sharded)}, but was applied to a value of rank "
f"{len(aval_shape)}")
@property
def num_devices(self) -> int:
return len(self._internal_device_list)
@functools.cached_property
def device_set(self) -> set[Device]:
return set(self._devices)
@property
def memory_kind(self) -> str | None:
return self._memory_kind
def with_memory_kind(self, kind: str) -> GSPMDSharding:
return GSPMDSharding(self._devices, self._hlo_sharding, memory_kind=kind)
@property
def _device_assignment(self) -> XLADeviceAssignment:
return tuple(self._devices)
def _to_xla_hlo_sharding(self, num_dimensions: int) -> xc.HloSharding:
return self._hlo_sharding
def _to_sdy_sharding(self, num_dimensions: int) -> SdyArray:
if self._hlo_sharding.tuple_elements():
raise TypeError(
f'Cannot convert GSPMDSharding {self._hlo_sharding} into SdyArray.')
elif self._hlo_sharding.is_replicated():
empty_mesh = mesh_lib.AbstractMesh((), ())
return NamedSharding(empty_mesh, PartitionSpec())._to_sdy_sharding(
num_dimensions)
elif self._hlo_sharding.is_tiled():
if not self._hlo_sharding.is_tile_assignment_iota():
raise TypeError(
f'Cannot convert GSPMDSharding {self._hlo_sharding} into SdyArray.')
axis_sizes = tuple(self._hlo_sharding.get_axis_sizes())
axis_names = tuple(f'_axis_{i}' for i in range(len(axis_sizes)))
mesh = mesh_lib.AbstractMesh(axis_sizes, axis_names)
return _gspmd_to_named_sharding_via_mesh(self, mesh)._to_sdy_sharding(
num_dimensions)
else:
raise TypeError(
f'Cannot convert GSPMDSharding {self._hlo_sharding} into SdyArray.')
@functools.cached_property
def is_fully_replicated(self) -> bool:
return is_hlo_sharding_replicated(self._hlo_sharding)
@functools.cached_property
def is_fully_addressable(self) -> bool:
return self._internal_device_list.is_fully_addressable
@classmethod
def get_replicated(cls, device_assignment, *, memory_kind: str | None = None):
return cls(device_assignment, replicated_hlo_sharding,
memory_kind=memory_kind)
MeshAxisName = Any
def prepare_axis_resources(axis_resources, arg_name,
allow_unconstrained_dims=False):
# PyTrees don't treat None values as leaves, so we use an is_leaf function.
entries, treedef = tree_util.tree_flatten(
axis_resources, is_leaf=lambda x: x is None)
what = f"{arg_name} leaf specifications"
new_entries = []
for entry in entries:
if isinstance(entry, (UnspecifiedValue, AUTO)) or entry is None:
new_entries.append(entry)
elif isinstance(entry, jsharding.Sharding):
if isinstance(entry, PmapSharding):
raise ValueError(f'One of {what} got sharding {entry} which is not '
'allowed.')
if (not allow_unconstrained_dims and isinstance(entry, NamedSharding) and
PartitionSpec.UNCONSTRAINED in entry.spec):
raise ValueError(
f'Unconstrained dims are not allowed when passed to {arg_name}:'
f' {entry}')
new_entries.append(entry)
else:
if not isinstance(entry, PartitionSpec):
raise TypeError(f"{what} are expected to be "
f"PartitionSpec instances or None, but got {entry}")
if not allow_unconstrained_dims and PartitionSpec.UNCONSTRAINED in entry:
raise ValueError(
f'Unconstrained dims are not allowed when passed to {arg_name}:'
f' {entry}')
_check_unique_resources(entry, arg_name)
new_entries.append(entry)
return tree_util.tree_unflatten(treedef, new_entries)
# Axis environments
| GSPMDSharding |
python | doocs__leetcode | solution/2700-2799/2790.Maximum Number of Groups With Increasing Length/Solution.py | {
"start": 0,
"end": 361
} | class ____:
def maxIncreasingGroups(self, usageLimits: List[int]) -> int:
usageLimits.sort()
k, n = 0, len(usageLimits)
for i in range(n):
if usageLimits[i] > k:
k += 1
usageLimits[i] -= k
if i + 1 < n:
usageLimits[i + 1] += usageLimits[i]
return k
| Solution |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.