language stringclasses 1 value | repo stringclasses 346 values | path stringlengths 6 201 | class_span dict | source stringlengths 21 2.38M | target stringlengths 1 96 |
|---|---|---|---|---|---|
python | apache__airflow | providers/google/src/airflow/providers/google/cloud/operators/cloud_composer.py | {
"start": 26401,
"end": 32625
} | class ____(GoogleCloudBaseOperator):
"""
Run Airflow command for provided Composer environment.
:param project_id: The ID of the Google Cloud project that the service belongs to.
:param region: The ID of the Google Cloud region that the service belongs to.
:param environment_id: The ID of the Google Cloud environment that the service belongs to.
:param command: Airflow command.
:param retry: Designation of what errors, if any, should be retried.
:param timeout: The timeout for this request.
:param metadata: Strings which should be sent along with the request as metadata.
:param gcp_conn_id: The connection ID used to connect to Google Cloud Platform.
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
:param deferrable: Run operator in the deferrable mode
:param poll_interval: Optional: Control the rate of the poll for the result of deferrable run.
By default, the trigger will poll every 10 seconds.
"""
template_fields = (
"project_id",
"region",
"environment_id",
"command",
"impersonation_chain",
)
def __init__(
self,
*,
project_id: str,
region: str,
environment_id: str,
command: str,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
gcp_conn_id: str = "google_cloud_default",
impersonation_chain: str | Sequence[str] | None = None,
deferrable: bool = conf.getboolean("operators", "default_deferrable", fallback=False),
poll_interval: int = 10,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.project_id = project_id
self.region = region
self.environment_id = environment_id
self.command = command
self.retry = retry
self.timeout = timeout
self.metadata = metadata
self.gcp_conn_id = gcp_conn_id
self.impersonation_chain = impersonation_chain
self.deferrable = deferrable
self.poll_interval = poll_interval
def execute(self, context: Context):
hook = CloudComposerHook(
gcp_conn_id=self.gcp_conn_id,
impersonation_chain=self.impersonation_chain,
)
self.log.info("Executing the command: [ airflow %s ]...", self.command)
cmd, subcommand, parameters = self._parse_cmd_to_args(self.command)
execution_cmd_info = hook.execute_airflow_command(
project_id=self.project_id,
region=self.region,
environment_id=self.environment_id,
command=cmd,
subcommand=subcommand,
parameters=parameters,
retry=self.retry,
timeout=self.timeout,
metadata=self.metadata,
)
execution_cmd_info_dict = ExecuteAirflowCommandResponse.to_dict(execution_cmd_info)
self.log.info("Command has been started. execution_id=%s", execution_cmd_info_dict["execution_id"])
if self.deferrable:
self.defer(
trigger=CloudComposerAirflowCLICommandTrigger(
project_id=self.project_id,
region=self.region,
environment_id=self.environment_id,
execution_cmd_info=execution_cmd_info_dict,
gcp_conn_id=self.gcp_conn_id,
impersonation_chain=self.impersonation_chain,
poll_interval=self.poll_interval,
),
method_name=GOOGLE_DEFAULT_DEFERRABLE_METHOD_NAME,
)
return
result = hook.wait_command_execution_result(
project_id=self.project_id,
region=self.region,
environment_id=self.environment_id,
execution_cmd_info=execution_cmd_info_dict,
retry=self.retry,
timeout=self.timeout,
metadata=self.metadata,
poll_interval=self.poll_interval,
)
exit_code = result.get("exit_info", {}).get("exit_code")
if exit_code == 0:
result_str = self._merge_cmd_output_result(result)
self.log.info("Command execution result:\n%s", result_str)
return result
error_output = "".join(line["content"] for line in result.get("error", []))
message = f"Airflow CLI command failed with exit code {exit_code}.\nError output:\n{error_output}"
raise AirflowException(message)
def execute_complete(self, context: Context, event: dict) -> dict:
if event and event["status"] == "error":
raise AirflowException(event["message"])
result: dict = event["result"]
result_str = self._merge_cmd_output_result(result)
self.log.info("Command execution result:\n%s", result_str)
return result
def _parse_cmd_to_args(self, cmd: str) -> tuple:
"""Parse user command to command, subcommand and parameters."""
cmd_dict = shlex.split(cmd)
if not cmd_dict:
raise AirflowException("The provided command is empty.")
command = cmd_dict[0] if len(cmd_dict) >= 1 else None
subcommand = cmd_dict[1] if len(cmd_dict) >= 2 else None
parameters = cmd_dict[2:] if len(cmd_dict) >= 3 else None
return command, subcommand, parameters
def _merge_cmd_output_result(self, result) -> str:
"""Merge output to one string."""
result_str = "\n".join(line_dict["content"] for line_dict in result["output"])
return result_str
| CloudComposerRunAirflowCLICommandOperator |
python | dagster-io__dagster | python_modules/dagster-test/dagster_test/toys/user_computed_data_versions/external_system.py | {
"start": 4630,
"end": 5632
} | class ____:
def __init__(self, storage_path: str):
self.storage_path = storage_path
if not os.path.exists(self.storage_path):
os.mkdir(self.storage_path)
for k, v in _SOURCE_ASSETS.items():
path = self.asset_path(k)
if not os.path.exists(path):
with open(self.asset_path(k), "w") as fd: # source asset
record = _DatabaseRecord(v, _get_hash(v))
fd.write(json.dumps(asdict(record)))
def asset_path(self, key: str) -> str:
return f"{self.storage_path}/{key}.json"
def get(self, key: str) -> _DatabaseRecord:
with open(self.asset_path(key)) as fd:
return _DatabaseRecord(**json.load(fd))
def has(self, key: str) -> bool:
return os.path.exists(self.asset_path(key))
def set(self, key: str, record: _DatabaseRecord) -> None:
with open(self.asset_path(key), "w") as fd:
fd.write(json.dumps(asdict(record)))
| _Database |
python | mlflow__mlflow | mlflow/metrics/genai/prompts/v1.py | {
"start": 7960,
"end": 12489
} | class ____:
definition = (
"Faithfulness is only evaluated with the provided output and provided context, please "
"ignore the provided input entirely when scoring faithfulness. Faithfulness assesses "
"how much of the provided output is factually consistent with the provided context. A "
"higher score indicates that a higher proportion of claims present in the output can be "
"derived from the provided context. Faithfulness does not consider how much extra "
"information from the context is not present in the output."
)
grading_prompt = (
"Faithfulness: Below are the details for different scores:\n"
"- Score 1: None of the claims in the output can be inferred from the provided context.\n"
"- Score 2: Some of the claims in the output can be inferred from the provided context, "
"but the majority of the output is missing from, inconsistent with, or contradictory to "
"the provided context.\n"
"- Score 3: Half or more of the claims in the output can be inferred from the provided "
"context.\n"
"- Score 4: Most of the claims in the output can be inferred from the provided context, "
"with very little information that is not directly supported by the provided context.\n"
"- Score 5: All of the claims in the output are directly supported by the provided "
"context, demonstrating high faithfulness to the provided context."
)
grading_context_columns = ["context"]
parameters = default_parameters
default_model = default_model
example_score_2 = EvaluationExample(
input="How is MLflow related to Databricks?",
output="Databricks is a company that specializes in big data and machine learning "
"solutions. MLflow has nothing to do with Databricks. MLflow is an open-source platform "
"for managing the end-to-end machine learning (ML) lifecycle.",
score=2,
justification='The output claims that "MLflow has nothing to do with Databricks" which is '
'contradictory to the provided context that states "It was developed by Databricks". This '
'is a major inconsistency. However, the output correctly identifies that "MLflow is an '
'open-source platform for managing the end-to-end machine learning (ML) lifecycle" and '
'"Databricks is a company that specializes in big data and machine learning solutions", '
"which are both supported by the context. Therefore, some of the claims in the output can "
"be inferred from the provided context, but the majority of the output is inconsistent "
"with the provided context, leading to a faithfulness score of 2.",
grading_context={
"context": "MLflow is an open-source platform for managing the end-to-end machine "
"learning (ML) lifecycle. It was developed by Databricks, a company that specializes "
"in big data and machine learning solutions. MLflow is designed to address the "
"challenges that data scientists and machine learning engineers face when developing, "
"training, and deploying machine learning models."
},
)
example_score_5 = EvaluationExample(
input="How is MLflow related to Databricks?",
output="Databricks is a company that specializes in big data and machine learning "
"solutions.",
score=5,
justification='The output states that "Databricks is a company that specializes in big data'
' and machine learning solutions." This claim is directly supported by the context, which '
'states "It was developed by Databricks, a company that specializes in big data and '
'machine learning solutions." Therefore, the faithfulness score is 5 as all the claims in '
'the output are directly supported by the provided context."',
grading_context={
"context": "MLflow is an open-source platform for managing the end-to-end "
"machine learning (ML) lifecycle. It was developed by Databricks, a company "
"that specializes in big data and machine learning solutions. MLflow is "
"designed to address the challenges that data scientists and machine learning "
"engineers face when developing, training, and deploying machine learning "
"models."
},
)
default_examples = [example_score_2, example_score_5]
@dataclass
| FaithfulnessMetric |
python | pytorch__pytorch | test/test_static_runtime.py | {
"start": 5232,
"end": 5443
} | class ____(nn.Module):
def __init__(self) -> None:
super().__init__()
self.a = 12
self.b = 2
def forward(self, x):
self.b = 30
return self.a + self.b + x
| SubModule2 |
python | tensorflow__tensorflow | tensorflow/python/framework/kernels_test.py | {
"start": 884,
"end": 1097
} | class ____(test_util.TensorFlowTestCase):
def testFindsAtLeastOneKernel(self):
kernel_list = kernels.get_all_registered_kernels()
self.assertGreater(len(kernel_list.kernel), 0)
| GetAllRegisteredKernelsTest |
python | facebook__pyre-check | tools/incremental_test/batch.py | {
"start": 747,
"end": 1279
} | class ____(ABC):
_input: Specification
def __init__(self, input: Specification) -> None:
self._input = input
@property
def input(self) -> Specification:
return self._input
@abstractmethod
def get_status(self) -> str:
raise NotImplementedError()
@abstractmethod
def to_json(self, dont_show_discrepancy: bool) -> Dict[str, Any]:
raise NotImplementedError()
@abstractmethod
def to_logger_sample(self) -> Sample:
raise NotImplementedError()
| RunnerResult |
python | pyinstaller__pyinstaller | bootloader/waflib/Tools/python.py | {
"start": 3453,
"end": 3860
} | class ____(Task.Task):
color = 'PINK'
def __str__(self):
node = self.outputs[0]
return node.path_from(node.ctx.launch_node())
def run(self):
cmd = [
Utils.subst_vars('${PYTHON}', self.env), '-c', INST, self.inputs[0].abspath(), self.outputs[0].abspath(),
self.pyd
]
ret = self.generator.bld.exec_command(cmd)
return ret
| pyc |
python | tiangolo__fastapi | docs_src/path_operation_configuration/tutorial005_py310.py | {
"start": 78,
"end": 698
} | class ____(BaseModel):
name: str
description: str | None = None
price: float
tax: float | None = None
tags: set[str] = set()
@app.post(
"/items/",
response_model=Item,
summary="Create an item",
response_description="The created item",
)
async def create_item(item: Item):
"""
Create an item with all the information:
- **name**: each item must have a name
- **description**: a long description
- **price**: required
- **tax**: if the item doesn't have tax, you can omit this
- **tags**: a set of unique tag strings for this item
"""
return item
| Item |
python | jazzband__django-oauth-toolkit | tests/db_router.py | {
"start": 193,
"end": 1041
} | class ____:
# alpha is where the core Django models are stored including user. To keep things
# simple this is where the oauth2 provider models are stored as well because they
# have a foreign key to User.
def db_for_read(self, model, **hints):
if model._meta.app_label not in apps_in_beta:
return "alpha"
return None
def db_for_write(self, model, **hints):
if model._meta.app_label not in apps_in_beta:
return "alpha"
return None
def allow_relation(self, obj1, obj2, **hints):
if obj1._state.db == "alpha" and obj2._state.db == "alpha":
return True
return None
def allow_migrate(self, db, app_label, model_name=None, **hints):
if app_label not in apps_in_beta:
return db == "alpha"
return None
| AlphaRouter |
python | huggingface__transformers | src/transformers/models/dpr/modeling_dpr.py | {
"start": 8267,
"end": 8536
} | class ____(DPRPreTrainedModel):
"""
An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
models.
"""
config: DPRConfig
base_model_prefix = "question_encoder"
| DPRPretrainedQuestionEncoder |
python | PrefectHQ__prefect | src/integrations/prefect-databricks/prefect_databricks/models/jobs.py | {
"start": 60151,
"end": 65092
} | class ____(str, Enum):
"""
* USER_REQUEST: A user terminated the cluster directly. Parameters should include a `username` field that indicates the specific user who terminated the cluster.
* JOB_FINISHED: The cluster was launched by a job, and terminated when the job completed.
* INACTIVITY: The cluster was terminated since it was idle.
* CLOUD_PROVIDER_SHUTDOWN: The instance that hosted the Spark driver was terminated by the cloud provider. In AWS, for example, AWS may retire instances and directly shut them down. Parameters should include an `aws_instance_state_reason` field indicating the AWS-provided reason why the instance was terminated.
* COMMUNICATION_LOST: Databricks lost connection to services on the driver instance. For example, this can happen when problems arise in cloud networking infrastructure, or when the instance itself becomes unhealthy.
* CLOUD_PROVIDER_LAUNCH_FAILURE: Databricks experienced a cloud provider failure when requesting instances to launch clusters. For example, AWS limits the number of running instances and EBS volumes. If you ask Databricks to launch a cluster that requires instances or EBS volumes that exceed your AWS limit, the cluster fails with this status code. Parameters should include one of `aws_api_error_code`, `aws_instance_state_reason`, or `aws_spot_request_status` to indicate the AWS-provided reason why Databricks could not request the required instances for the cluster.
* SPARK_STARTUP_FAILURE: The cluster failed to initialize. Possible reasons may include failure to create the environment for Spark or issues launching the Spark master and worker processes.
* INVALID_ARGUMENT: Cannot launch the cluster because the user specified an invalid argument. For example, the user might specify an invalid runtime version for the cluster.
* UNEXPECTED_LAUNCH_FAILURE: While launching this cluster, Databricks failed to complete critical setup steps, terminating the cluster.
* INTERNAL_ERROR: Databricks encountered an unexpected error that forced the running cluster to be terminated. Contact Databricks support for additional details.
* SPARK_ERROR: The Spark driver failed to start. Possible reasons may include incompatible libraries and initialization scripts that corrupted the Spark container.
* METASTORE_COMPONENT_UNHEALTHY: The cluster failed to start because the external metastore could not be reached. Refer to [Troubleshooting](https://docs.databricks.com/data/metastores/external-hive-metastore.html#troubleshooting).
* DBFS_COMPONENT_UNHEALTHY: The cluster failed to start because Databricks File System (DBFS) could not be reached.
* DRIVER_UNREACHABLE: Databricks was not able to access the Spark driver, because it was not reachable.
* DRIVER_UNRESPONSIVE: Databricks was not able to access the Spark driver, because it was unresponsive.
* INSTANCE_UNREACHABLE: Databricks was not able to access instances in order to start the cluster. This can be a transient networking issue. If the problem persists, this usually indicates a networking environment misconfiguration.
* CONTAINER_LAUNCH_FAILURE: Databricks was unable to launch containers on worker nodes for the cluster. Have your admin check your network configuration.
* INSTANCE_POOL_CLUSTER_FAILURE: Pool backed cluster specific failure. Refer to [Pools](https://docs.databricks.com/clusters/instance-pools/index.html) for details.
* REQUEST_REJECTED: Databricks cannot handle the request at this moment. Try again later and contact Databricks if the problem persists.
* INIT_SCRIPT_FAILURE: Databricks cannot load and run a cluster-scoped init script on one of the cluster’s nodes, or the init script terminates with a non-zero exit code. Refer to [Init script logs](https://docs.databricks.com/clusters/init-scripts.html#init-script-log).
* TRIAL_EXPIRED: The Databricks trial subscription expired.
"""
userrequest = "USER_REQUEST"
jobfinished = "JOB_FINISHED"
inactivity = "INACTIVITY"
cloudprovidershutdown = "CLOUD_PROVIDER_SHUTDOWN"
communicationlost = "COMMUNICATION_LOST"
cloudproviderlaunchfailure = "CLOUD_PROVIDER_LAUNCH_FAILURE"
sparkstartupfailure = "SPARK_STARTUP_FAILURE"
invalidargument = "INVALID_ARGUMENT"
unexpectedlaunchfailure = "UNEXPECTED_LAUNCH_FAILURE"
internalerror = "INTERNAL_ERROR"
sparkerror = "SPARK_ERROR"
metastorecomponentunhealthy = "METASTORE_COMPONENT_UNHEALTHY"
dbfscomponentunhealthy = "DBFS_COMPONENT_UNHEALTHY"
driverunreachable = "DRIVER_UNREACHABLE"
driverunresponsive = "DRIVER_UNRESPONSIVE"
instanceunreachable = "INSTANCE_UNREACHABLE"
containerlaunchfailure = "CONTAINER_LAUNCH_FAILURE"
instancepoolclusterfailure = "INSTANCE_POOL_CLUSTER_FAILURE"
requestrejected = "REQUEST_REJECTED"
initscriptfailure = "INIT_SCRIPT_FAILURE"
trialexpired = "TRIAL_EXPIRED"
| TerminationCode |
python | django__django | tests/async/test_async_related_managers.py | {
"start": 99,
"end": 4944
} | class ____(TestCase):
@classmethod
def setUpTestData(cls):
cls.mtm1 = ManyToManyModel.objects.create()
cls.s1 = SimpleModel.objects.create(field=0)
cls.mtm2 = ManyToManyModel.objects.create()
cls.mtm2.simples.set([cls.s1])
async def test_acreate(self):
await self.mtm1.simples.acreate(field=2)
new_simple = await self.mtm1.simples.aget()
self.assertEqual(new_simple.field, 2)
async def test_acreate_reverse(self):
await self.s1.relatedmodel_set.acreate()
new_relatedmodel = await self.s1.relatedmodel_set.aget()
self.assertEqual(new_relatedmodel.simple, self.s1)
async def test_aget_or_create(self):
new_simple, created = await self.mtm1.simples.aget_or_create(field=2)
self.assertIs(created, True)
self.assertEqual(await self.mtm1.simples.acount(), 1)
self.assertEqual(new_simple.field, 2)
new_simple, created = await self.mtm1.simples.aget_or_create(
id=new_simple.id, through_defaults={"field": 3}
)
self.assertIs(created, False)
self.assertEqual(await self.mtm1.simples.acount(), 1)
self.assertEqual(new_simple.field, 2)
async def test_aget_or_create_reverse(self):
new_relatedmodel, created = await self.s1.relatedmodel_set.aget_or_create()
self.assertIs(created, True)
self.assertEqual(await self.s1.relatedmodel_set.acount(), 1)
self.assertEqual(new_relatedmodel.simple, self.s1)
async def test_aupdate_or_create(self):
new_simple, created = await self.mtm1.simples.aupdate_or_create(field=2)
self.assertIs(created, True)
self.assertEqual(await self.mtm1.simples.acount(), 1)
self.assertEqual(new_simple.field, 2)
new_simple1, created = await self.mtm1.simples.aupdate_or_create(
id=new_simple.id, defaults={"field": 3}
)
self.assertIs(created, False)
self.assertEqual(new_simple1.field, 3)
new_simple2, created = await self.mtm1.simples.aupdate_or_create(
field=4, defaults={"field": 6}, create_defaults={"field": 5}
)
self.assertIs(created, True)
self.assertEqual(new_simple2.field, 5)
self.assertEqual(await self.mtm1.simples.acount(), 2)
async def test_aupdate_or_create_reverse(self):
new_relatedmodel, created = await self.s1.relatedmodel_set.aupdate_or_create()
self.assertIs(created, True)
self.assertEqual(await self.s1.relatedmodel_set.acount(), 1)
self.assertEqual(new_relatedmodel.simple, self.s1)
async def test_aadd(self):
await self.mtm1.simples.aadd(self.s1)
self.assertEqual(await self.mtm1.simples.aget(), self.s1)
async def test_aadd_reverse(self):
r1 = await RelatedModel.objects.acreate()
await self.s1.relatedmodel_set.aadd(r1, bulk=False)
self.assertEqual(await self.s1.relatedmodel_set.aget(), r1)
async def test_aremove(self):
self.assertEqual(await self.mtm2.simples.acount(), 1)
await self.mtm2.simples.aremove(self.s1)
self.assertEqual(await self.mtm2.simples.acount(), 0)
async def test_aremove_reverse(self):
r1 = await RelatedModel.objects.acreate(simple=self.s1)
self.assertEqual(await self.s1.relatedmodel_set.acount(), 1)
await self.s1.relatedmodel_set.aremove(r1)
self.assertEqual(await self.s1.relatedmodel_set.acount(), 0)
async def test_aset(self):
await self.mtm1.simples.aset([self.s1])
self.assertEqual(await self.mtm1.simples.aget(), self.s1)
await self.mtm1.simples.aset([])
self.assertEqual(await self.mtm1.simples.acount(), 0)
await self.mtm1.simples.aset([self.s1], clear=True)
self.assertEqual(await self.mtm1.simples.aget(), self.s1)
async def test_aset_reverse(self):
r1 = await RelatedModel.objects.acreate()
await self.s1.relatedmodel_set.aset([r1])
self.assertEqual(await self.s1.relatedmodel_set.aget(), r1)
await self.s1.relatedmodel_set.aset([])
self.assertEqual(await self.s1.relatedmodel_set.acount(), 0)
await self.s1.relatedmodel_set.aset([r1], bulk=False, clear=True)
self.assertEqual(await self.s1.relatedmodel_set.aget(), r1)
async def test_aclear(self):
self.assertEqual(await self.mtm2.simples.acount(), 1)
await self.mtm2.simples.aclear()
self.assertEqual(await self.mtm2.simples.acount(), 0)
async def test_aclear_reverse(self):
await RelatedModel.objects.acreate(simple=self.s1)
self.assertEqual(await self.s1.relatedmodel_set.acount(), 1)
await self.s1.relatedmodel_set.aclear(bulk=False)
self.assertEqual(await self.s1.relatedmodel_set.acount(), 0)
| AsyncRelatedManagersOperationTest |
python | sanic-org__sanic | sanic/server/protocols/base_protocol.py | {
"start": 372,
"end": 9334
} | class ____(asyncio.Protocol):
__slots__ = (
"app",
# event loop, connection
"loop",
"transport",
"connections",
"conn_info",
"signal",
"_can_write",
"_time",
"_task",
"_unix",
"_data_received",
)
def __init__(
self,
*,
loop,
app: Sanic,
signal=None,
connections=None,
unix=None,
**kwargs,
):
asyncio.set_event_loop(loop)
self.loop = loop
self.app: Sanic = app
self.signal = signal or Signal()
self.transport: Optional[Transport] = None
self.connections = connections if connections is not None else set()
self.conn_info: Optional[ConnInfo] = None
self._can_write = asyncio.Event()
self._can_write.set()
self._unix = unix
self._time = 0.0 # type: float
self._task = None # type: Optional[asyncio.Task]
self._data_received = asyncio.Event()
@property
def ctx(self):
if self.conn_info is not None:
return self.conn_info.ctx
else:
return None
async def send(self, data):
"""
Generic data write implementation with backpressure control.
"""
await self._can_write.wait()
if self.transport.is_closing():
raise RequestCancelled
self.transport.write(data)
self._time = current_time()
async def receive_more(self):
"""
Wait until more data is received into the Server protocol's buffer
"""
self.transport.resume_reading()
self._data_received.clear()
await self._data_received.wait()
def close(self, timeout: Optional[float] = None):
"""
Attempt close the connection.
"""
if self.transport is None or self.transport.is_closing():
# do not attempt to close again, already aborted or closing
return
# Check if write is already paused _before_ close() is called.
write_was_paused = not self._can_write.is_set()
# Trigger the UVLoop Stream Transport Close routine
# Causes a call to connection_lost where further cleanup occurs
# Close may fully close the connection now, but if there is still
# data in the libuv buffer, then close becomes an async operation
self.transport.close()
try:
# Check write-buffer data left _after_ close is called.
# in UVLoop, get the data in the libuv transport write-buffer
data_left = self.transport.get_write_buffer_size()
# Some asyncio implementations don't support get_write_buffer_size
except (AttributeError, NotImplementedError):
data_left = 0
if write_was_paused or data_left > 0:
# don't call resume_writing here, it gets called by the transport
# to unpause the protocol when it is ready for more data
# Schedule the async close checker, to close the connection
# after the transport is done, and clean everything up.
if timeout is None:
# This close timeout needs to be less than the graceful
# shutdown timeout. The graceful shutdown _could_ be waiting
# for this transport to close before shutting down the app.
timeout = self.app.config.GRACEFUL_TCP_CLOSE_TIMEOUT
# This is 5s by default.
else:
# Schedule the async close checker but with no timeout,
# this will ensure abort() is called if required.
if timeout is None:
timeout = 0
self.loop.call_soon(
_async_protocol_transport_close,
self,
self.loop,
timeout,
)
def abort(self):
"""
Force close the connection.
"""
# Cause a call to connection_lost where further cleanup occurs
if self.transport:
self.transport.abort()
self.transport = None
# asyncio.Protocol API Callbacks #
# ------------------------------ #
def connection_made(self, transport):
"""
Generic connection-made, with no connection_task, and no recv_buffer.
Override this for protocol-specific connection implementations.
"""
try:
transport.set_write_buffer_limits(low=16384, high=65536)
self.connections.add(self)
self.transport = transport
self.conn_info = ConnInfo(self.transport, unix=self._unix)
except Exception:
error_logger.exception("protocol.connect_made")
def connection_lost(self, exc):
"""
This is a callback handler that is called from the asyncio
transport layer implementation (eg, UVLoop's UVStreamTransport).
It is scheduled to be called async after the transport has closed.
When data is still in the send buffer, this call to connection_lost
will be delayed until _after_ the buffer is finished being sent.
So we can use this callback as a confirmation callback
that the async write-buffer transfer is finished.
"""
try:
self.connections.discard(self)
# unblock the send queue if it is paused,
# this allows the route handler to see
# the CancelledError exception
self.resume_writing()
self.conn_info.lost = True
if self._task:
self._task.cancel()
except BaseException:
error_logger.exception("protocol.connection_lost")
def pause_writing(self):
self._can_write.clear()
def resume_writing(self):
self._can_write.set()
def data_received(self, data: bytes):
try:
self._time = current_time()
if not data:
return self.close()
if self._data_received:
self._data_received.set()
except BaseException:
error_logger.exception("protocol.data_received")
def _async_protocol_transport_close(
protocol: SanicProtocol,
loop: asyncio.AbstractEventLoop,
timeout: float,
):
"""
This function is scheduled to be called after close() is called.
It checks that the transport has shut down properly, or waits
for any remaining data to be sent, and aborts after a timeout.
This is required if the transport is closed while there is an async
large async transport write operation in progress.
This is observed when NGINX reverse-proxy is the client.
"""
if protocol.transport is None:
# abort() is the only method that can make
# protocol.transport be None, so abort was already called
return
# protocol.connection_lost does not set protocol.transport to None
# so to detect it a different way with conninfo.lost
elif protocol.conn_info is not None and protocol.conn_info.lost:
# Terminus. Most connections finish the protocol here!
# Connection_lost callback was executed already,
# so transport did complete and close itself properly.
# No need to call abort().
# This is the last part of cleanup to do
# that is not done by connection_lost handler.
# Ensure transport is cleaned up by GC.
protocol.transport = None
return
elif not protocol.transport.is_closing():
raise RuntimeError(
"You must call transport.close() before "
"protocol._async_transport_close() runs."
)
write_is_paused = not protocol._can_write.is_set()
try:
# in UVLoop, get the data in the libuv write-buffer
data_left = protocol.transport.get_write_buffer_size()
# Some asyncio implementations don't support get_write_buffer_size
except (AttributeError, NotImplementedError):
data_left = 0
if write_is_paused or data_left > 0:
# don't need to call resume_writing here to unpause
if timeout <= 0:
# timeout is 0 or less, so we can simply abort now
loop.call_soon(SanicProtocol.abort, protocol)
else:
next_check_interval = min(timeout, 0.1)
next_check_timeout = timeout - next_check_interval
loop.call_later(
# Recurse back in after the timeout, to check again
next_check_interval,
# this next time with reduced timeout.
_async_protocol_transport_close,
protocol,
loop,
next_check_timeout,
)
else:
# Not paused, and no data left in the buffer, but transport
# is still open, connection_lost has not been called yet.
# We can call abort() to fix that.
loop.call_soon(SanicProtocol.abort, protocol)
| SanicProtocol |
python | huggingface__transformers | src/transformers/models/qwen2_5_omni/modular_qwen2_5_omni.py | {
"start": 115861,
"end": 126626
} | class ____(Qwen2_5OmniPreTrainedModelForConditionalGeneration, GenerationMixin):
config: Qwen2_5OmniTalkerConfig
base_model_prefix = "talker"
output_modalities = ("audio",)
def __init__(self, config: Qwen2_5OmniTalkerConfig):
super().__init__(config)
self.thinker_to_talker_proj = nn.Linear(config.embedding_size, config.hidden_size)
self.model = Qwen2_5OmniTalkerModel(config)
self.codebook_size = config.vocab_size
self.codec_head = nn.Linear(config.hidden_size, self.codebook_size, bias=False)
self.codec_bos_token = config.tts_codec_start_token_id
self.codec_eos_token = config.tts_codec_end_token_id
self.codec_pad_token = config.tts_codec_pad_token_id
self.codec_mask_token = config.tts_codec_mask_token_id
self.text_bos_token = config.tts_text_start_token_id
self.text_eos_token = config.tts_text_end_token_id
self.text_pad_token = config.tts_text_pad_token_id
self.spatial_merge_size = self.config.spatial_merge_size
self.rope_deltas = None
self.post_init()
def get_input_embeddings(self):
return self.model.get_input_embeddings()
def set_input_embeddings(self, value):
self.model.set_input_embeddings(value)
@auto_docstring
def forward(
self,
input_ids: Optional[torch.LongTensor] = None,
attention_mask: Optional[torch.Tensor] = None,
position_ids: Optional[torch.LongTensor] = None,
past_key_values: Optional[Cache] = None,
thinker_reply_part: Optional[torch.FloatTensor] = None,
inputs_embeds: Optional[torch.FloatTensor] = None,
rope_deltas: Optional[torch.LongTensor] = None,
use_cache: Optional[bool] = None,
cache_position: Optional[torch.LongTensor] = None,
input_text_ids: Optional[torch.LongTensor] = None,
image_grid_thw: Optional[torch.LongTensor] = None,
video_grid_thw: Optional[torch.LongTensor] = None,
use_audio_in_video: Optional[bool] = None,
audio_feature_lengths: Optional[torch.LongTensor] = None,
video_second_per_grid: Optional[torch.LongTensor] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
) -> Union[tuple, Qwen2_5OmniTalkerCausalLMOutputWithPast]:
r"""
thinker_reply_part (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
Hidden states from the thinker model's output that represent the text reply part to be processed.
rope_deltas (`torch.LongTensor` of shape `(batch_size, )`, *optional*):
The rope index difference between sequence length and multimodal rope.
input_text_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
Input token IDs for text-only content, used for position calculation in multimodal contexts.
image_grid_thw (`torch.LongTensor` of shape `(num_images, 3)`, *optional*):
The temporal, height and width of feature shape of each image in LLM.
video_grid_thw (`torch.LongTensor` of shape `(num_videos, 3)`, *optional*):
The temporal, height and width of feature shape of each video in LLM.
use_audio_in_video (`bool`, *optional*):
Whether or not use audio track in video, should same as the parameter in `process_audio_info`.
audio_feature_lengths (`torch.LongTensor` of shape `(num_audios)`, *optional*):
The length of feature shape of each audio in LLM.
video_second_per_grid (`torch.LongTensor` of shape `(num_videos)`, *optional*):
Number of seconds per grid for each video, used for temporal feature mapping.
Example:
```python
>>> from io import BytesIO
>>> from urllib.request import urlopen
>>> import librosa
>>> from transformers import AutoProcessor, Qwen2_5OmniTalkerForConditionalGeneration
>>> model = Qwen2_5OmniTalkerForConditionalGeneration.from_pretrained("Qwen/Qwen2-Audio-7B")
>>> processor = AutoProcessor.from_pretrained("Qwen/Qwen2-Audio-7B")
>>> prompt = "<|audio_bos|><|AUDIO|><|audio_eos|>Generate the caption in English:"
>>> url = "https://qianwen-res.oss-cn-beijing.aliyuncs.com/Qwen2-Audio/audio/glass-breaking-151256.mp3"
>>> audio, _ = librosa.load(BytesIO(urlopen(url).read()), sr=self.processor.feature_extractor.sampling_rate)
>>> inputs = processor(text=prompt, audio=audio, return_tensors="pt")
>>> # Generate
>>> generate_ids = model.generate(**inputs, max_length=30)
>>> processor.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0]
"Generate the caption in English: Glass is breaking."
```"""
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
if attention_mask is not None and position_ids is None:
if (
cache_position is None
or (cache_position is not None and cache_position[0] == 0)
or self.rope_deltas is None
):
position_ids, rope_deltas = self.get_rope_index(
input_text_ids,
image_grid_thw,
video_grid_thw,
attention_mask,
use_audio_in_video,
audio_feature_lengths,
video_second_per_grid,
)
inputs_embeds[:, -1, :] += self.get_input_embeddings()(
torch.tensor([self.codec_bos_token], dtype=torch.long, device=inputs_embeds.device)
)
inputs_embeds[:, -2, :] += self.get_input_embeddings()(
torch.tensor([self.codec_pad_token], dtype=torch.long, device=inputs_embeds.device)
)
self.rope_deltas = rope_deltas
else:
batch_size, seq_length = input_ids.shape
delta = cache_position[0] + self.rope_deltas if cache_position is not None else 0
position_ids = torch.arange(seq_length, device=input_ids.device)
position_ids = position_ids.view(1, -1).expand(batch_size, -1)
position_ids = position_ids.add(delta)
position_ids = position_ids.unsqueeze(0).expand(3, -1, -1)
if inputs_embeds is None:
# 1. Inference tokens after second token
codec_embeds = self.get_input_embeddings()(input_ids)
inputs_embeds = codec_embeds + thinker_reply_part[:, :1, :]
if thinker_reply_part.shape[1] > 1:
thinker_reply_part = thinker_reply_part[:, 1:, :]
talker_lm_input = self.thinker_to_talker_proj(inputs_embeds)
if attention_mask is not None:
attention_mask = attention_mask.to(inputs_embeds.device)
outputs = self.model(
attention_mask=attention_mask,
position_ids=position_ids,
past_key_values=past_key_values,
inputs_embeds=talker_lm_input,
use_cache=use_cache,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
hidden_states = outputs[0]
logits = self.codec_head(hidden_states)
logits = logits.float()
loss = None
if not return_dict:
output = (logits,) + outputs[1:]
return (loss,) + output if loss is not None else output
return Qwen2_5OmniTalkerCausalLMOutputWithPast(
loss=loss,
logits=logits,
past_key_values=outputs.past_key_values,
hidden_states=hidden_states,
attentions=outputs.attentions,
rope_deltas=self.rope_deltas,
thinker_reply_part=thinker_reply_part,
)
def _get_initial_cache_position(self, seq_length, device, model_kwargs):
# Talker needs to calculate cache_position with input_ids, so pop inputs_embeds temporarily
inputs_embeds = model_kwargs.pop("inputs_embeds")
model_kwargs = super()._get_initial_cache_position(seq_length, device, model_kwargs)
model_kwargs["inputs_embeds"] = inputs_embeds
return model_kwargs
# prepare inputs for talker lm generation
def prepare_inputs_for_generation(
self,
input_ids,
input_text_ids,
past_key_values=None,
attention_mask=None,
inputs_embeds=None,
thinker_reply_part=None,
cache_position=None,
position_ids=None,
use_cache=True,
pixel_values=None,
pixel_values_videos=None,
image_grid_thw=None,
video_grid_thw=None,
input_audio_features=None,
audio_feature_attention_mask=None,
audio_feature_lengths=None,
use_audio_in_video=False,
video_second_per_grid=None,
**kwargs,
):
model_inputs = super().prepare_inputs_for_generation(
input_ids,
past_key_values,
attention_mask,
inputs_embeds,
cache_position,
use_cache=use_cache,
thinker_reply_part=thinker_reply_part,
input_text_ids=input_text_ids,
image_grid_thw=image_grid_thw,
video_grid_thw=video_grid_thw,
use_audio_in_video=use_audio_in_video,
audio_feature_lengths=audio_feature_lengths,
video_second_per_grid=video_second_per_grid,
**kwargs,
)
model_inputs["position_ids"] = None
return model_inputs
def _update_model_kwargs_for_generation(
self,
outputs: ModelOutput,
model_kwargs: dict[str, Any],
is_encoder_decoder: bool = False,
num_new_tokens: int = 1,
) -> dict[str, Any]:
model_kwargs = super()._update_model_kwargs_for_generation(
outputs, model_kwargs, is_encoder_decoder, num_new_tokens
)
if getattr(outputs, "thinker_reply_part", None) is not None:
model_kwargs["thinker_reply_part"] = outputs.thinker_reply_part
return model_kwargs
############################
# Start Token2Wav #
############################
| Qwen2_5OmniTalkerForConditionalGeneration |
python | langchain-ai__langchain | libs/text-splitters/langchain_text_splitters/jsx.py | {
"start": 147,
"end": 3250
} | class ____(RecursiveCharacterTextSplitter):
"""Text splitter that handles React (JSX), Vue, and Svelte code.
This splitter extends RecursiveCharacterTextSplitter to handle
React (JSX), Vue, and Svelte code by:
1. Detecting and extracting custom component tags from the text
2. Using those tags as additional separators along with standard JS syntax
The splitter combines:
* Custom component tags as separators (e.g. <Component, <div)
* JavaScript syntax elements (function, const, if, etc)
* Standard text splitting on newlines
This allows chunks to break at natural boundaries in
React, Vue, and Svelte component code.
"""
def __init__(
self,
separators: list[str] | None = None,
chunk_size: int = 2000,
chunk_overlap: int = 0,
**kwargs: Any,
) -> None:
"""Initialize the JS Framework text splitter.
Args:
separators: Optional list of custom separator strings to use
chunk_size: Maximum size of chunks to return
chunk_overlap: Overlap in characters between chunks
**kwargs: Additional arguments to pass to parent class
"""
super().__init__(chunk_size=chunk_size, chunk_overlap=chunk_overlap, **kwargs)
self._separators = separators or []
def split_text(self, text: str) -> list[str]:
"""Split text into chunks.
This method splits the text into chunks by:
* Extracting unique opening component tags using regex
* Creating separators list with extracted tags and JS separators
* Splitting the text using the separators by calling the parent class method
Args:
text: String containing code to split
Returns:
List of text chunks split on component and JS boundaries
"""
# Extract unique opening component tags using regex
# Regex to match opening tags, excluding self-closing tags
opening_tags = re.findall(r"<\s*([a-zA-Z0-9]+)[^>]*>", text)
component_tags = []
for tag in opening_tags:
if tag not in component_tags:
component_tags.append(tag)
component_separators = [f"<{tag}" for tag in component_tags]
js_separators = [
"\nexport ",
" export ",
"\nfunction ",
"\nasync function ",
" async function ",
"\nconst ",
"\nlet ",
"\nvar ",
"\nclass ",
" class ",
"\nif ",
" if ",
"\nfor ",
" for ",
"\nwhile ",
" while ",
"\nswitch ",
" switch ",
"\ncase ",
" case ",
"\ndefault ",
" default ",
]
separators = (
self._separators
+ js_separators
+ component_separators
+ ["<>", "\n\n", "&&\n", "||\n"]
)
self._separators = separators
return super().split_text(text)
| JSFrameworkTextSplitter |
python | PrefectHQ__prefect | tests/server/models/test_workers.py | {
"start": 14182,
"end": 15570
} | class ____:
async def test_update_work_queue(self, session, work_queue):
assert await models.workers.update_work_queue(
session=session,
work_queue_id=work_queue.id,
work_queue=schemas.actions.WorkQueueUpdate(
is_paused=True, concurrency_limit=5
),
)
result = await models.workers.read_work_queue(
session=session, work_queue_id=work_queue.id
)
assert result.is_paused is True
assert result.concurrency_limit == 5
async def test_update_work_queue_invalid_concurrency(self, session, work_queue):
with pytest.raises(pydantic.ValidationError):
await models.workers.update_work_queue(
session=session,
work_queue_id=work_queue.id,
work_queue=schemas.actions.WorkQueueUpdate(concurrency_limit=-5),
)
async def test_update_work_queue_zero_concurrency(self, session, work_queue):
assert await models.workers.update_work_queue(
session=session,
work_queue_id=work_queue.id,
work_queue=schemas.actions.WorkQueueUpdate(concurrency_limit=0),
)
result = await models.workers.read_work_queue(
session=session, work_queue_id=work_queue.id
)
assert result.concurrency_limit == 0
| TestUpdateWorkQueue |
python | tensorflow__tensorflow | tensorflow/python/eager/core_test.py | {
"start": 37545,
"end": 39790
} | class ____(test_util.TensorFlowTestCase):
cpu_device = '/job:localhost/replica:0/task:0/device:CPU:0'
def _send(self, tensor, tensor_name, to_device):
return execute(
b'_Send', num_outputs=0, inputs=[tensor],
attrs=('T', tensor.dtype.as_datatype_enum,
'tensor_name', tensor_name,
'send_device', tensor.device,
'send_device_incarnation', 0,
'recv_device', to_device,
'client_terminated', True))
def _recv(self, dtype, tensor_name, from_device):
device_name = context.context().device_name
if not device_name:
device_name = self.cpu_device
return execute(
b'_Recv', num_outputs=1, inputs=[],
attrs=('tensor_type', dtype.as_datatype_enum,
'tensor_name', tensor_name,
'send_device', from_device,
'send_device_incarnation', 0,
'recv_device', device_name,
'client_terminated', False))[0]
def setUp(self):
super(SendRecvTest, self).setUp()
context._reset_context()
configure_virtual_cpus()
@test_util.disable_tfrt('Send/Receive not supported in TFRT yet.')
def testBasic(self):
with ops.device(self.cpu_device):
t0 = constant_op.constant(1.0)
t1 = constant_op.constant(2.0)
self._send(t0, 't0', self.cpu_device)
self._send(t1, 't1', self.cpu_device)
self.assertAllEqual(
self._recv(dtypes.float32, 't0', self.cpu_device),
1.0)
self.assertAllEqual(
self._recv(dtypes.float32, 't1', self.cpu_device),
2.0)
@test_util.run_gpu_only
@test_util.disable_tfrt('Send/Receive not supported in TFRT yet.')
def testLocalCrossDevice(self):
gpu_device_name = '/job:localhost/replica:0/task:0/device:GPU:0'
with ops.device('GPU:0'):
t0 = array_ops.identity(1.0)
self._send(t0, 't0', self.cpu_device)
with ops.device('cpu:0'):
self.assertAllEqual(
self._recv(dtypes.float32, 't0', gpu_device_name),
1.0)
self._send(constant_op.constant(2.0), 't1', gpu_device_name)
with ops.device('GPU:0'):
self.assertAllEqual(
self._recv(dtypes.float32, 't1', self.cpu_device),
2.0)
| SendRecvTest |
python | getsentry__sentry | src/sentry/sentry_apps/token_exchange/refresher.py | {
"start": 1080,
"end": 5916
} | class ____:
"""
Exchanges a Refresh Token for a new Access Token
"""
install: SentryAppInstallation
refresh_token: str
client_id: str
user: User
def run(self) -> ApiToken:
with SentryAppInteractionEvent(
operation_type=SentryAppInteractionType.AUTHORIZATIONS,
event_type=SentryAppEventType.REFRESHER,
).capture() as lifecycle:
context = {
"installation_uuid": self.install.uuid,
"client_id": self.application.client_id[:SENSITIVE_CHARACTER_LIMIT],
"sentry_app_id": self.install.sentry_app.id,
}
lifecycle.add_extras(context)
try:
token = None
with transaction.atomic(router.db_for_write(ApiToken)):
self._validate()
self.token.delete()
self._record_analytics()
token = self._create_new_token()
return token
except (OutboxDatabaseError, OutboxFlushError) as e:
if token is not None:
logger.warning(
"refresher.outbox-failure",
extra=context,
exc_info=e,
)
return token
raise SentryAppSentryError(
message="Failed to refresh given token",
status_code=500,
webhook_context=context,
) from e
except SentryAppIntegratorError as e:
lifecycle.record_halt(halt_reason=e)
raise
def _record_analytics(self) -> None:
analytics.record(
SentryAppTokenExchangedEvent(
sentry_app_installation_id=self.install.id,
exchange_type="refresh",
)
)
def _validate(self) -> None:
Validator(install=self.install, client_id=self.client_id, user=self.user).run()
if self.token.application != self.application:
assert self.token.application is not None, "Application must exist on ApiToken"
webhook_context = {
"client_id_installation_uuid": self.install.uuid,
"client_id": self.client_id,
}
try:
token_installation = ApiToken.objects.get(
refresh_token=self.refresh_token
).sentry_app_installation
webhook_context.update({"token_installation": token_installation.uuid})
except SentryAppInstallation.DoesNotExist:
pass
raise SentryAppIntegratorError(
message="Token does not belong to the application", webhook_context=webhook_context
)
def _create_new_token(self) -> ApiToken:
token = ApiToken.objects.create(
user=self.user,
application=self.application,
scope_list=self.sentry_app.scope_list,
expires_at=token_expiration(),
)
try:
SentryAppInstallation.objects.get(id=self.install.id).update(api_token=token)
except SentryAppInstallation.DoesNotExist:
pass
return token
@cached_property
def token(self) -> ApiToken:
try:
return ApiToken.objects.get(refresh_token=self.refresh_token)
except ApiToken.DoesNotExist:
raise SentryAppIntegratorError(
message="Given refresh token does not exist",
status_code=401,
webhook_context={
"installation_uuid": self.install.uuid,
},
)
@cached_property
def application(self) -> ApiApplication:
try:
return ApiApplication.objects.get(client_id=self.client_id)
except ApiApplication.DoesNotExist:
raise SentryAppSentryError(
message="Could not find matching Application for given client_id",
status_code=401,
webhook_context={
"client_id": self.client_id,
"installation_uuid": self.install.uuid,
},
)
@property
def sentry_app(self) -> SentryApp:
try:
return self.application.sentry_app
except SentryApp.DoesNotExist:
raise SentryAppSentryError(
message="Sentry App does not exist on attached Application",
status_code=401,
webhook_context={
"application_id": self.application.id,
"installation_uuid": self.install.uuid,
"client_id": self.application.client_id[:SENSITIVE_CHARACTER_LIMIT],
},
)
| Refresher |
python | pytorch__pytorch | test/distributed/nn/jit/test_instantiator.py | {
"start": 564,
"end": 647
} | class ____(nn.Module):
pass
def create_module():
return MyModule()
| MyModule |
python | pyparsing__pyparsing | examples/eval_arith.py | {
"start": 1517,
"end": 1921
} | class ____:
"Class to evaluate multiplication and division expressions"
def __init__(self, tokens):
self.value = tokens[0]
def eval(self):
prod = self.value[0].eval()
for op, val in operatorOperands(self.value[1:]):
if op == "*":
prod *= val.eval()
if op == "/":
prod /= val.eval()
return prod
| EvalMultOp |
python | matplotlib__matplotlib | lib/matplotlib/widgets.py | {
"start": 829,
"end": 2077
} | class ____:
"""
Some widgets, like the cursor, draw onto the canvas, and this is not
desirable under all circumstances, like when the toolbar is in zoom-to-rect
mode and drawing a rectangle. To avoid this, a widget can acquire a
canvas' lock with ``canvas.widgetlock(widget)`` before drawing on the
canvas; this will prevent other widgets from doing so at the same time (if
they also try to acquire the lock first).
"""
def __init__(self):
self._owner = None
def __call__(self, o):
"""Reserve the lock for *o*."""
if not self.available(o):
raise ValueError('already locked')
self._owner = o
def release(self, o):
"""Release the lock from *o*."""
if not self.available(o):
raise ValueError('you do not own this lock')
self._owner = None
def available(self, o):
"""Return whether drawing is available to *o*."""
return not self.locked() or self.isowner(o)
def isowner(self, o):
"""Return whether *o* owns this lock."""
return self._owner is o
def locked(self):
"""Return whether the lock is currently held by an owner."""
return self._owner is not None
| LockDraw |
python | mlflow__mlflow | mlflow/tracing/constant.py | {
"start": 3285,
"end": 5569
} | class ____:
# When the assessment is generated by an eval run, log the run ID here.
SOURCE_RUN_ID = "mlflow.assessment.sourceRunId"
# Total LLM cost spent for generating the feedback (llm-as-a-judge).
JUDGE_COST = "mlflow.assessment.judgeCost"
# When the scorer generates a trace for assessment scoring, log the trace ID here.
SCORER_TRACE_ID = "mlflow.assessment.scorerTraceId"
# All storage backends are guaranteed to support request_metadata key/value up to 250 characters
MAX_CHARS_IN_TRACE_INFO_METADATA = 250
# All storage backends are guaranteed to support tag keys up to 250 characters,
# values up to 4096 characters
MAX_CHARS_IN_TRACE_INFO_TAGS_KEY = 250
MAX_CHARS_IN_TRACE_INFO_TAGS_VALUE = 4096
TRUNCATION_SUFFIX = "..."
TRACE_REQUEST_RESPONSE_PREVIEW_MAX_LENGTH_DBX = 10000
TRACE_REQUEST_RESPONSE_PREVIEW_MAX_LENGTH_OSS = 1000
# Trace request ID must have the prefix "tr-" appended to the OpenTelemetry trace ID
TRACE_REQUEST_ID_PREFIX = "tr-"
# Trace ID V4 format starts with "trace:/" in the format of "trace:/<location>/<trace_id>"
TRACE_ID_V4_PREFIX = "trace:/"
# Schema version of traces and spans.
TRACE_SCHEMA_VERSION = 3
# Key for the trace schema version in the trace. This key is also used in
# Databricks model serving to be careful when modifying it.
TRACE_SCHEMA_VERSION_KEY = "mlflow.trace_schema.version"
STREAM_CHUNK_EVENT_NAME_FORMAT = "mlflow.chunk.item.{index}"
STREAM_CHUNK_EVENT_VALUE_KEY = "mlflow.chunk.value"
# Key for Databricks model serving options to return the trace in the response
DATABRICKS_OPTIONS_KEY = "databricks_options"
RETURN_TRACE_OPTION_KEY = "return_trace"
DATABRICKS_OUTPUT_KEY = "databricks_output"
# Assessment constants
ASSESSMENT_ID_PREFIX = "a-"
# Maximum number of seconds to retry getting a trace from the v4 endpoint.
# V4 traces have some delay in propagation after the log_spans call returns success response.
# To make sure get_trace API does not fail due to this delay, we retry up to a reasonable timeout.
# Setting 15 seconds because the initial version of the backend is known to have 1~5 seconds delay.
GET_TRACE_V4_RETRY_TIMEOUT_SECONDS = 15
# The location of the spans in the trace.
# This is used to determine where the spans are stored when exporting.
| AssessmentMetadataKey |
python | apache__airflow | providers/fab/tests/unit/fab/www/test_utils.py | {
"start": 950,
"end": 1719
} | class ____:
@conf_vars({("fab", "session_lifetime_minutes"): "43200"})
def test_config_val_is_default(self):
session_lifetime_config = get_session_lifetime_config()
assert session_lifetime_config == 43200
@conf_vars({("fab", "session_lifetime_minutes"): "43201"})
def test_config_val_is_not_default(self):
session_lifetime_config = get_session_lifetime_config()
assert session_lifetime_config == 43201
@conf_vars({("fab", "session_lifetime_days"): ""})
def test_uses_updated_session_timeout_config_by_default(self):
session_lifetime_config = get_session_lifetime_config()
default_timeout_minutes = 30 * 24 * 60
assert session_lifetime_config == default_timeout_minutes
| TestUpdatedConfigNames |
python | scipy__scipy | scipy/interpolate/tests/test_fitpack.py | {
"start": 11762,
"end": 17399
} | class ____:
def test_overflow(self):
from numpy.lib.stride_tricks import as_strided
if dfitpack_int.itemsize == 8:
size = 1500000**2
else:
size = 400**2
# Don't allocate a real array, as it's very big, but rely
# on that it's not referenced
x = as_strided(np.zeros(()), shape=(size,))
assert_raises(OverflowError, bisplrep, x, x, x, w=x,
xb=0, xe=1, yb=0, ye=1, s=0)
def test_regression_1310(self):
# Regression test for gh-1310
with np.load(data_file('bug-1310.npz')) as loaded_data:
data = loaded_data['data']
# Shouldn't crash -- the input data triggers work array sizes
# that caused previously some data to not be aligned on
# sizeof(double) boundaries in memory, which made the Fortran
# code to crash when compiled with -O3
bisplrep(data[:,0], data[:,1], data[:,2], kx=3, ky=3, s=0,
full_output=True)
@pytest.mark.skipif(dfitpack_int != np.int64, reason="needs ilp64 fitpack")
def test_ilp64_bisplrep(self):
check_free_memory(28000) # VM size, doesn't actually use the pages
x = np.linspace(0, 1, 400)
y = np.linspace(0, 1, 400)
x, y = np.meshgrid(x, y)
z = np.zeros_like(x)
tck = bisplrep(x, y, z, kx=3, ky=3, s=0)
xp_assert_close(bisplev(0.5, 0.5, tck), 0.0)
def test_dblint():
# Basic test to see it runs and gives the correct result on a trivial
# problem. Note that `dblint` is not exposed in the interpolate namespace.
x = np.linspace(0, 1)
y = np.linspace(0, 1)
xx, yy = np.meshgrid(x, y)
rect = RectBivariateSpline(x, y, 4 * xx * yy)
tck = list(rect.tck)
tck.extend(rect.degrees)
assert abs(dblint(0, 1, 0, 1, tck) - 1) < 1e-10
assert abs(dblint(0, 0.5, 0, 1, tck) - 0.25) < 1e-10
assert abs(dblint(0.5, 1, 0, 1, tck) - 0.75) < 1e-10
assert abs(dblint(-100, 100, -100, 100, tck) - 1) < 1e-10
def test_splev_der_k():
# regression test for gh-2188: splev(x, tck, der=k) gives garbage or crashes
# for x outside of knot range
# test case from gh-2188
tck = (np.array([0., 0., 2.5, 2.5]),
np.array([-1.56679978, 2.43995873, 0., 0.]),
1)
t, c, k = tck
x = np.array([-3, 0, 2.5, 3])
# an explicit form of the linear spline
xp_assert_close(splev(x, tck), c[0] + (c[1] - c[0]) * x/t[2])
xp_assert_close(splev(x, tck, 1),
np.ones_like(x) * (c[1] - c[0]) / t[2]
)
# now check a random spline vs splder
np.random.seed(1234)
x = np.sort(np.random.random(30))
y = np.random.random(30)
t, c, k = splrep(x, y)
x = [t[0] - 1., t[-1] + 1.]
tck2 = splder((t, c, k), k)
xp_assert_close(splev(x, (t, c, k), k), splev(x, tck2))
def test_splprep_segfault():
# regression test for gh-3847: splprep segfaults if knots are specified
# for task=-1
t = np.arange(0, 1.1, 0.1)
x = np.sin(2*np.pi*t)
y = np.cos(2*np.pi*t)
tck, u = splprep([x, y], s=0)
np.arange(0, 1.01, 0.01)
uknots = tck[0] # using the knots from the previous fitting
tck, u = splprep([x, y], task=-1, t=uknots) # here is the crash
@pytest.mark.skipif(dfitpack_int == np.int64,
reason='Will crash (see gh-23396), test only meant for 32-bit overflow')
def test_bisplev_integer_overflow():
np.random.seed(1)
x = np.linspace(0, 1, 11)
y = x
z = np.random.randn(11, 11).ravel()
kx = 1
ky = 1
nx, tx, ny, ty, c, fp, ier = regrid_smth(
x, y, z, None, None, None, None, kx=kx, ky=ky, s=0.0)
tck = (tx[:nx], ty[:ny], c[:(nx - kx - 1) * (ny - ky - 1)], kx, ky)
xp = np.zeros([2621440])
yp = np.zeros([2621440])
assert_raises((RuntimeError, MemoryError), bisplev, xp, yp, tck)
@pytest.mark.xslow
def test_gh_1766():
# this should fail gracefully instead of segfaulting (int overflow)
size = 22
kx, ky = 3, 3
def f2(x, y):
return np.sin(x+y)
x = np.linspace(0, 10, size)
y = np.linspace(50, 700, size)
xy = makepairs(x, y)
tck = bisplrep(xy[0], xy[1], f2(xy[0], xy[1]), s=0, kx=kx, ky=ky)
# the size value here can either segfault
# or produce a MemoryError on main
tx_ty_size = 500000
tck[0] = np.arange(tx_ty_size)
tck[1] = np.arange(tx_ty_size) * 4
tt_0 = np.arange(50)
tt_1 = np.arange(50) * 3
with pytest.raises(MemoryError):
bisplev(tt_0, tt_1, tck, 1, 1)
def test_spalde_scalar_input():
# Ticket #629
x = np.linspace(0, 10)
y = x**3
tck = splrep(x, y, k=3, t=[5])
res = spalde(np.float64(1), tck)
des = np.array([1., 3., 6., 6.])
assert_almost_equal(res, des)
def test_spalde_nc():
# regression test for https://github.com/scipy/scipy/issues/19002
# here len(t) = 29 and len(c) = 25 (== len(t) - k - 1)
x = np.asarray([-10., -9., -8., -7., -6., -5., -4., -3., -2.5, -2., -1.5,
-1., -0.5, 0., 0.5, 1., 1.5, 2., 2.5, 3., 4., 5., 6.],
dtype="float")
t = [-10.0, -10.0, -10.0, -10.0, -9.0, -8.0, -7.0, -6.0, -5.0, -4.0, -3.0,
-2.5, -2.0, -1.5, -1.0, -0.5, 0.0, 0.5, 1.0, 1.5, 2.0, 2.5, 3.0, 4.0,
5.0, 6.0, 6.0, 6.0, 6.0]
c = np.asarray([1., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.,
0., 0., 0., 0., 0., 0., 0., 0., 0., 0.])
k = 3
res = spalde(x, (t, c, k))
res = np.vstack(res)
res_splev = np.asarray([splev(x, (t, c, k), nu) for nu in range(4)])
xp_assert_close(res, res_splev.T, atol=1e-15)
| TestBisplrep |
python | Pylons__pyramid | tests/test_integration.py | {
"start": 17996,
"end": 18430
} | class ____(IntegrationBase, unittest.TestCase):
# view_execution_permitted bug as reported by Shane at
# http://lists.repoze.org/pipermail/repoze-dev/2010-October/003603.html
package = 'tests.pkgs.permbugapp'
def test_test(self):
res = self.testapp.get('/test', status=200)
self.assertTrue(b'ACLDenied' in res.body)
def test_x(self):
self.testapp.get('/x', status=403)
| TestViewPermissionBug |
python | yaml__pyyaml | lib/yaml/nodes.py | {
"start": 763,
"end": 1045
} | class ____(Node):
id = 'scalar'
def __init__(self, tag, value,
start_mark=None, end_mark=None, style=None):
self.tag = tag
self.value = value
self.start_mark = start_mark
self.end_mark = end_mark
self.style = style
| ScalarNode |
python | google__flatbuffers | tests/MyGame/Example/TestSimpleTableWithEnum.py | {
"start": 176,
"end": 1700
} | class ____(object):
__slots__ = ['_tab']
@classmethod
def GetRootAs(cls, buf, offset=0):
n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset)
x = TestSimpleTableWithEnum()
x.Init(buf, n + offset)
return x
@classmethod
def GetRootAsTestSimpleTableWithEnum(cls, buf, offset=0):
"""This method is deprecated. Please switch to GetRootAs."""
return cls.GetRootAs(buf, offset)
@classmethod
def TestSimpleTableWithEnumBufferHasIdentifier(cls, buf, offset, size_prefixed=False):
return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x4D\x4F\x4E\x53", size_prefixed=size_prefixed)
# TestSimpleTableWithEnum
def Init(self, buf, pos):
self._tab = flatbuffers.table.Table(buf, pos)
# TestSimpleTableWithEnum
def Color(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
if o != 0:
return self._tab.Get(flatbuffers.number_types.Uint8Flags, o + self._tab.Pos)
return 2
def TestSimpleTableWithEnumStart(builder):
builder.StartObject(1)
def Start(builder):
TestSimpleTableWithEnumStart(builder)
def TestSimpleTableWithEnumAddColor(builder, color):
builder.PrependUint8Slot(0, color, 2)
def AddColor(builder, color):
TestSimpleTableWithEnumAddColor(builder, color)
def TestSimpleTableWithEnumEnd(builder):
return builder.EndObject()
def End(builder):
return TestSimpleTableWithEnumEnd(builder)
| TestSimpleTableWithEnum |
python | getsentry__sentry | src/sentry/hybridcloud/models/cacheversion.py | {
"start": 1300,
"end": 1501
} | class ____(CacheVersionBase):
__relocation_scope__ = RelocationScope.Excluded
class Meta:
app_label = "hybridcloud"
db_table = "hybridcloud_controlcacheversion"
| ControlCacheVersion |
python | astropy__astropy | astropy/uncertainty/tests/test_distribution.py | {
"start": 22435,
"end": 22689
} | class ____(StructuredDtypeBase, TestGetSetItemAdvancedIndex):
def test_init(self):
assert self.d.shape == (3, 4)
assert self.d.n_samples == 5
assert_array_equal(self.d.distribution, self.distribution)
| TestStructuredAdvancedIndex |
python | huggingface__transformers | src/transformers/models/aya_vision/processing_aya_vision.py | {
"start": 1265,
"end": 11443
} | class ____(ProcessorMixin):
r"""
Constructs a AyaVision processor which wraps a [`AutoImageProcessor`] and
[`PretrainedTokenizerFast`] tokenizer into a single processor that inherits both the image processor and
tokenizer functionalities. See the [`~AyaVisionProcessor.__call__`] and [`~AyaVisionProcessor.decode`] for more information.
Args:
image_processor ([`AutoImageProcessor`], *optional*):
The image processor is a required input.
tokenizer ([`PreTrainedTokenizer`, `PreTrainedTokenizerFast`], *optional*):
The tokenizer is a required input.
patch_size (`int`, *optional*, defaults to 28):
The size of image patches for tokenization.
img_size (`int`, *optional*, defaults to 364):
The size of the image to be tokenized. This should correspond to the size given to the image processor.
image_token (`str`, *optional*, defaults to `"<image>"`):
The token to be used to represent an image in the text.
downsample_factor (`int`, *optional*, defaults to 1):
The factor by which to scale the patch size.
start_of_img_token (`str`, *optional*, defaults to `"<|START_OF_IMG|>"`):
The token to be used to represent the start of an image in the text.
end_of_img_token (`str`, *optional*, defaults to `"<|END_OF_IMG|>"`):
The token to be used to represent the end of an image in the text.
img_patch_token (`str`, *optional*, defaults to `"<|IMG_PATCH|>"`):
The token to be used to represent an image patch in the text.
img_line_break_token (`str`, *optional*, defaults to `"<|IMG_LINE_BREAK|>"`):
The token to be used to represent a line break in the text.
tile_token (`str`, *optional*, defaults to `"TILE"`):
The token to be used to represent an image patch in the text.
tile_global_token (`str`, *optional*, defaults to `"TILE_GLOBAL"`):
The token to be used to represent the cover image in the text.
chat_template (`str`, *optional*): A Jinja template which will be used to convert lists of messages
in a chat into a tokenizable string.
"""
def __init__(
self,
image_processor=None,
tokenizer=None,
patch_size: int = 28,
img_size: int = 364,
image_token="<image>", # set the default and let users change if they have peculiar special tokens in rare cases
downsample_factor: int = 1,
start_of_img_token="<|START_OF_IMG|>",
end_of_img_token="<|END_OF_IMG|>",
img_patch_token="<|IMG_PATCH|>",
img_line_break_token="<|IMG_LINE_BREAK|>",
tile_token="TILE",
tile_global_token="TILE_GLOBAL",
chat_template=None,
**kwargs,
):
super().__init__(image_processor, tokenizer, chat_template=chat_template)
self.image_token = image_token
self.patch_size = patch_size * downsample_factor
self.img_size = img_size
self.start_of_img_token = start_of_img_token
self.end_of_img_token = end_of_img_token
self.img_patch_token = img_patch_token
self.img_line_break_token = img_line_break_token
self.tile_token = tile_token
self.tile_global_token = tile_global_token
self.image_token_id = tokenizer.convert_tokens_to_ids(self.img_patch_token)
self.image_ids = tokenizer.convert_tokens_to_ids(
[img_patch_token, tile_token, tile_global_token, start_of_img_token, end_of_img_token]
)
def _prompt_split_image(self, num_patches):
"""
Create a structured string representation of image tokens
Args:
num_patches: Number of patches in the image
Returns:
String with appropriate image tokens
"""
img_patches_per_tile = (self.img_size // self.patch_size) ** 2
img_string = f"{self.start_of_img_token}"
if num_patches > 1:
for idx in range(1, num_patches):
img_string += f"{self.tile_token}_{idx}" + f"{self.img_patch_token}" * img_patches_per_tile
img_string += f"{self.tile_global_token}" + f"{self.img_patch_token}" * img_patches_per_tile
img_string += f"{self.end_of_img_token}"
return img_string
def __call__(
self,
images: Optional[ImageInput] = None,
text: Optional[Union[TextInput, PreTokenizedInput, list[TextInput], list[PreTokenizedInput]]] = None,
**kwargs: Unpack[AyaVisionProcessorKwargs],
) -> BatchFeature:
"""
Main method to prepare for the model one or several sequences(s) and image(s). This method forwards the `text`
and `kwargs` arguments to PreTrainedTokenizerFast's [`~PreTrainedTokenizerFast.__call__`] to encode the text.
To prepare the vision inputs, this method forwards the `images` and `kwargs` arguments to
GotOcr2ImageProcessor's [`~GotOcr2ImageProcessor.__call__`] if `images` is not `None`.
Args:
images (`PIL.Image.Image`, `np.ndarray`, `torch.Tensor`, `list[PIL.Image.Image]`, `list[np.ndarray]`, `list[torch.Tensor]`):
The image or batch of images to be prepared. Each image can be a PIL image, NumPy array or PyTorch
tensor. Both channels-first and channels-last formats are supported.
text (`str`, `list[str]`, `list[list[str]]`):
The sequence or batch of sequences to be encoded. Each sequence can be a string or a list of strings
(pretokenized string). If the sequences are provided as list of strings (pretokenized), you must set
`is_split_into_words=True` (to lift the ambiguity with a batch of sequences).
return_tensors (`str` or [`~utils.TensorType`], *optional*):
If set, will return tensors of a particular framework. Acceptable values are:
- `'pt'`: Return PyTorch `torch.Tensor` objects.
- `'np'`: Return NumPy `np.ndarray` objects.
Returns:
[`BatchFeature`]: A [`BatchFeature`] with the following fields:
- **input_ids** -- List of token ids to be fed to a model. Returned when `text` is not `None`.
- **attention_mask** -- List of indices specifying which tokens should be attended to by the model (when
`return_attention_mask=True` or if *"attention_mask"* is in `self.model_input_names` and if `text` is not
`None`).
- **pixel_values** -- Pixel values to be fed to a model. Returned when `images` is not `None`.
"""
if text is None:
raise ValueError("You have to specify text.")
output_kwargs = self._merge_kwargs(
AyaVisionProcessorKwargs,
tokenizer_init_kwargs=self.tokenizer.init_kwargs,
**kwargs,
)
if not isinstance(text, (list, tuple)):
text = [text]
# Process images
image_inputs = {}
if images is not None:
images = self.image_processor.fetch_images(images)
images = make_flat_list_of_images(images)
image_inputs = self.image_processor(images=images, **output_kwargs["images_kwargs"])
num_patches = image_inputs.pop("num_patches")
image_index = 0
processed_text = []
for prompt in text:
new_prompt = prompt
while "<image>" in new_prompt:
# Replace the image placeholder with structured image tokens
image_tokens = self._prompt_split_image(num_patches[image_index])
new_prompt = new_prompt.replace("<image>", image_tokens, 1)
image_index += 1
processed_text.append(new_prompt)
if image_index != len(images):
raise ValueError("Number of image placeholders in the prompt does not match the number of images.")
text = processed_text
return_tensors = output_kwargs["text_kwargs"].pop("return_tensors", None)
return_mm_token_type_ids = output_kwargs["text_kwargs"].pop("return_mm_token_type_ids", False)
text_inputs = self.tokenizer(text, **output_kwargs["text_kwargs"], return_tensors=None)
if return_mm_token_type_ids:
array_ids = np.array(text_inputs["input_ids"])
mm_token_type_ids = np.zeros_like(text_inputs["input_ids"])
mm_token_type_ids[np.isin(array_ids, self.image_ids)] = 1
text_inputs["mm_token_type_ids"] = mm_token_type_ids.tolist()
return BatchFeature(data={**text_inputs, **image_inputs}, tensor_type=return_tensors)
def _get_num_multimodal_tokens(self, image_sizes=None, **kwargs):
"""
Computes the number of placeholder tokens needed for multimodal inputs with the given sizes.
Args:
image_sizes (`list[list[int]]`, *optional*):
The input sizes formatted as (height, width) per each image.
Returns:
`MultiModalData`: A `MultiModalData` object holding number of tokens per each of the provided
input modalities, along with other useful data.
"""
vision_data = {}
if image_sizes is not None:
images_kwargs = AyaVisionProcessorKwargs._defaults.get("images_kwargs", {})
images_kwargs.update(kwargs)
num_image_patches = [
self.image_processor.get_number_of_image_patches(*image_size, images_kwargs)
for image_size in image_sizes
]
token_per_patch = (self.img_size // self.patch_size) ** 2
num_image_tokens = [
token_per_patch + 3 + sum(token_per_patch + 1 for _ in range(1, num_patches))
for num_patches in num_image_patches
] # Add +3 and +1 for BOI/EOI and image tile tokens
vision_data.update({"num_image_tokens": num_image_tokens, "num_image_patches": num_image_patches})
return MultiModalData(**vision_data)
__all__ = ["AyaVisionProcessor"]
| AyaVisionProcessor |
python | great-expectations__great_expectations | great_expectations/render/renderer/content_block/expectation_string.py | {
"start": 636,
"end": 5242
} | class ____(ContentBlockRenderer):
@classmethod
@override
def _missing_content_block_fn(
cls,
configuration: Optional[ExpectationConfiguration] = None,
result: Optional[ExpectationValidationResult] = None,
runtime_configuration: Optional[dict] = None,
**kwargs,
) -> List[RenderedStringTemplateContent]:
renderer_configuration: RendererConfiguration = RendererConfiguration(
configuration=configuration,
result=result,
runtime_configuration=runtime_configuration,
)
return [
RenderedStringTemplateContent(
**{ # type: ignore[arg-type] # FIXME CoP
"content_block_type": "string_template",
"styling": {"parent": {"classes": ["alert", "alert-warning"]}},
"string_template": {
"template": "$expectation_type(**$kwargs)",
"params": {
"expectation_type": renderer_configuration.expectation_type,
"kwargs": renderer_configuration.kwargs,
},
"styling": {
"params": {
"expectation_type": {
"classes": ["badge", "badge-warning"],
}
}
},
},
}
)
]
@classmethod
def _diagnostic_status_icon_renderer(
cls,
configuration: Optional[ExpectationConfiguration] = None,
result: Optional[ExpectationValidationResult] = None,
runtime_configuration: Optional[dict] = None,
**kwargs,
):
assert result, "Must provide a result object."
if result.exception_info["raised_exception"]:
return RenderedStringTemplateContent(
**{ # type: ignore[arg-type] # FIXME CoP
"content_block_type": "string_template",
"string_template": {
"template": "$icon",
"params": {"icon": "", "markdown_status_icon": "❗"},
"styling": {
"params": {
"icon": {
"classes": [
"fas",
"fa-exclamation-triangle",
"text-warning",
],
"tag": "i",
}
}
},
},
}
)
if result.success:
return RenderedStringTemplateContent(
**{ # type: ignore[arg-type] # FIXME CoP
"content_block_type": "string_template",
"string_template": {
"template": "$icon",
"params": {"icon": "", "markdown_status_icon": "✅"},
"styling": {
"params": {
"icon": {
"classes": [
"fas",
"fa-check-circle",
"text-success",
],
"tag": "i",
}
}
},
},
"styling": {"parent": {"classes": ["hide-succeeded-validation-target-child"]}},
}
)
else:
return RenderedStringTemplateContent(
**{ # type: ignore[arg-type] # FIXME CoP
"content_block_type": "string_template",
"string_template": {
"template": "$icon",
"params": {"icon": "", "markdown_status_icon": "❌"},
"styling": {
"params": {
"icon": {
"tag": "i",
"classes": ["fas", "fa-times", "text-danger"],
}
}
},
},
}
)
| ExpectationStringRenderer |
python | pytorch__pytorch | test/onnx/model_defs/super_resolution.py | {
"start": 53,
"end": 1051
} | class ____(nn.Module):
def __init__(self, upscale_factor):
super().__init__()
self.relu = nn.ReLU()
self.conv1 = nn.Conv2d(1, 64, (5, 5), (1, 1), (2, 2))
self.conv2 = nn.Conv2d(64, 64, (3, 3), (1, 1), (1, 1))
self.conv3 = nn.Conv2d(64, 32, (3, 3), (1, 1), (1, 1))
self.conv4 = nn.Conv2d(32, upscale_factor**2, (3, 3), (1, 1), (1, 1))
self.pixel_shuffle = nn.PixelShuffle(upscale_factor)
self._initialize_weights()
def forward(self, x):
x = self.relu(self.conv1(x))
x = self.relu(self.conv2(x))
x = self.relu(self.conv3(x))
x = self.pixel_shuffle(self.conv4(x))
return x
def _initialize_weights(self):
init.orthogonal_(self.conv1.weight, init.calculate_gain("relu"))
init.orthogonal_(self.conv2.weight, init.calculate_gain("relu"))
init.orthogonal_(self.conv3.weight, init.calculate_gain("relu"))
init.orthogonal_(self.conv4.weight)
| SuperResolutionNet |
python | pytorch__pytorch | torch/autograd/forward_ad.py | {
"start": 7275,
"end": 7639
} | class ____(_DecoratorContextManager):
def __init__(self, mode: bool) -> None:
self.prev = _is_fwd_grad_enabled()
torch._C._set_fwd_grad_enabled(mode)
def __enter__(self) -> None:
pass
def __exit__(self, exc_type: Any, exc_value: Any, traceback: Any) -> None:
torch._C._set_fwd_grad_enabled(self.prev)
| _set_fwd_grad_enabled |
python | pymupdf__PyMuPDF | src/__init__.py | {
"start": 839555,
"end": 845812
} | class ____(mupdf.FzDevice2):
'''
Trace TEXT device for Python method Page.get_texttrace()
'''
def __init__(self, out):
super().__init__()
self.use_virtual_fill_path()
self.use_virtual_stroke_path()
self.use_virtual_fill_text()
self.use_virtual_stroke_text()
self.use_virtual_ignore_text()
self.use_virtual_fill_shade()
self.use_virtual_fill_image()
self.use_virtual_fill_image_mask()
self.use_virtual_begin_layer()
self.use_virtual_end_layer()
self.out = out
self.seqno = 0
self.depth = 0
self.clips = 0
self.method = None
self.seqno = 0
self.pathdict = dict()
self.scissors = list()
self.linewidth = 0
self.ptm = mupdf.FzMatrix()
self.ctm = mupdf.FzMatrix()
self.rot = mupdf.FzMatrix()
self.lastpoint = mupdf.FzPoint()
self.pathrect = mupdf.FzRect()
self.pathfactor = 0
self.linecount = 0
self.path_type = 0
self.layer_name = ""
fill_path = jm_increase_seqno
stroke_path = jm_dev_linewidth
fill_text = jm_lineart_fill_text
stroke_text = jm_lineart_stroke_text
ignore_text = jm_lineart_ignore_text
fill_shade = jm_increase_seqno
fill_image = jm_increase_seqno
fill_image_mask = jm_increase_seqno
begin_layer = jm_lineart_begin_layer
end_layer = jm_lineart_end_layer
def ConversionHeader(i: str, filename: OptStr ="unknown"):
t = i.lower()
import textwrap
html = textwrap.dedent("""
<!DOCTYPE html>
<html>
<head>
<style>
body{background-color:gray}
div{position:relative;background-color:white;margin:1em auto}
p{position:absolute;margin:0}
img{position:absolute}
</style>
</head>
<body>
""")
xml = textwrap.dedent("""
<?xml version="1.0"?>
<document name="%s">
"""
% filename
)
xhtml = textwrap.dedent("""
<?xml version="1.0"?>
<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Strict//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-strict.dtd">
<html xmlns="http://www.w3.org/1999/xhtml">
<head>
<style>
body{background-color:gray}
div{background-color:white;margin:1em;padding:1em}
p{white-space:pre-wrap}
</style>
</head>
<body>
""")
text = ""
json = '{"document": "%s", "pages": [\n' % filename
if t == "html":
r = html
elif t == "json":
r = json
elif t == "xml":
r = xml
elif t == "xhtml":
r = xhtml
else:
r = text
return r
def ConversionTrailer(i: str):
t = i.lower()
text = ""
json = "]\n}"
html = "</body>\n</html>\n"
xml = "</document>\n"
xhtml = html
if t == "html":
r = html
elif t == "json":
r = json
elif t == "xml":
r = xml
elif t == "xhtml":
r = xhtml
else:
r = text
return r
def annot_preprocess(page: "Page") -> int:
"""Prepare for annotation insertion on the page.
Returns:
Old page rotation value. Temporarily sets rotation to 0 when required.
"""
CheckParent(page)
if not page.parent.is_pdf:
raise ValueError("is no PDF")
old_rotation = page.rotation
if old_rotation != 0:
page.set_rotation(0)
return old_rotation
def annot_postprocess(page: "Page", annot: "Annot") -> None:
"""Clean up after annotation insertion.
Set ownership flag and store annotation in page annotation dictionary.
"""
#annot.parent = weakref.proxy(page)
assert isinstance( page, Page)
assert isinstance( annot, Annot)
annot.parent = page
page._annot_refs[id(annot)] = annot
annot.thisown = True
def canon(c):
assert isinstance(c, int)
# TODO: proper unicode case folding
# TODO: character equivalence (a matches ä, etc)
if c == 0xA0 or c == 0x2028 or c == 0x2029:
return ord(' ')
if c == ord('\r') or c == ord('\n') or c == ord('\t'):
return ord(' ')
if c >= ord('A') and c <= ord('Z'):
return c - ord('A') + ord('a')
return c
def chartocanon(s):
assert isinstance(s, str)
n, c = mupdf.fz_chartorune(s)
c = canon(c)
return n, c
def dest_is_valid(o, page_count, page_object_nums, names_list):
p = mupdf.pdf_dict_get( o, PDF_NAME('A'))
if (
mupdf.pdf_name_eq(
mupdf.pdf_dict_get( p, PDF_NAME('S')),
PDF_NAME('GoTo')
)
and not string_in_names_list(
mupdf.pdf_dict_get( p, PDF_NAME('D')),
names_list
)
):
return 0
p = mupdf.pdf_dict_get( o, PDF_NAME('Dest'))
if not p.m_internal:
pass
elif mupdf.pdf_is_string( p):
return string_in_names_list( p, names_list)
elif not dest_is_valid_page(
mupdf.pdf_array_get( p, 0),
page_object_nums,
page_count,
):
return 0
return 1
def dest_is_valid_page(obj, page_object_nums, pagecount):
num = mupdf.pdf_to_num(obj)
if num == 0:
return 0
for i in range(pagecount):
if page_object_nums[i] == num:
return 1
return 0
def find_string(s, needle):
assert isinstance(s, str)
for i in range(len(s)):
end = match_string(s[i:], needle)
if end is not None:
end += i
return i, end
return None, None
def get_pdf_now() -> str:
'''
"Now" timestamp in PDF Format
'''
import time
tz = "%s'%s'" % (
str(abs(time.altzone // 3600)).rjust(2, "0"),
str((abs(time.altzone // 60) % 60)).rjust(2, "0"),
)
tstamp = time.strftime("D:%Y%m%d%H%M%S", time.localtime())
if time.altzone > 0:
tstamp += "-" + tz
elif time.altzone < 0:
tstamp += "+" + tz
else:
pass
return tstamp
| JM_new_texttrace_device |
python | cython__cython | Cython/Debugger/Tests/TestLibCython.py | {
"start": 2115,
"end": 5382
} | class ____(unittest.TestCase):
def setUp(self):
"""
Run gdb and have cygdb import the debug information from the code
defined in TestParseTreeTransforms's setUp method
"""
if not test_gdb():
return
self.tempdir = tempfile.mkdtemp()
self.destfile = os.path.join(self.tempdir, 'codefile.pyx')
self.debug_dest = os.path.join(self.tempdir,
'cython_debug',
'cython_debug_info_codefile')
self.cfuncs_destfile = os.path.join(self.tempdir, 'cfuncs')
self.cwd = os.getcwd()
try:
os.chdir(self.tempdir)
shutil.copy(codefile, self.destfile)
shutil.copy(cfuncs_file, self.cfuncs_destfile + '.c')
shutil.copy(cfuncs_file.replace('.c', '.h'),
self.cfuncs_destfile + '.h')
compiler = ccompiler.new_compiler()
compiler.compile(['cfuncs.c'], debug=True, extra_postargs=['-fPIC'])
opts = dict(
test_directory=self.tempdir,
module='codefile',
module_path=self.destfile,
)
optimization_disabler = build_ext.Optimization()
cython_compile_testcase = runtests.CythonCompileTestCase(
workdir=self.tempdir,
# we clean up everything (not only compiled files)
cleanup_workdir=False,
tags=runtests.parse_tags(codefile),
**opts
)
new_stderr = open(os.devnull, 'w')
stderr = sys.stderr
sys.stderr = new_stderr
optimization_disabler.disable_optimization()
try:
cython_compile_testcase.run_cython(
targetdir=self.tempdir,
incdir=None,
annotate=False,
extra_compile_options={
'gdb_debug':True,
'output_dir':self.tempdir,
},
**opts
)
cython_compile_testcase.run_distutils(
test_directory=opts['test_directory'],
module=opts['module'],
workdir=opts['test_directory'],
incdir=None,
extra_extension_args={'extra_objects':['cfuncs.o']},
)
finally:
optimization_disabler.restore_state()
sys.stderr = stderr
new_stderr.close()
# ext = Cython.Distutils.extension.Extension(
# 'codefile',
# ['codefile.pyx'],
# cython_gdb=True,
# extra_objects=['cfuncs.o'])
#
# distutils.core.setup(
# script_args=['build_ext', '--inplace'],
# ext_modules=[ext],
# cmdclass=dict(build_ext=Cython.Distutils.build_ext)
# )
except:
os.chdir(self.cwd)
raise
def tearDown(self):
if not test_gdb():
return
os.chdir(self.cwd)
shutil.rmtree(self.tempdir)
| DebuggerTestCase |
python | h5py__h5py | h5py/tests/test_slicing.py | {
"start": 10573,
"end": 14146
} | class ____(BaseSlicing):
def setUp(self):
super().setUp()
self.arr = np.arange(10)
self.dset = self.f.create_dataset('x', data=self.arr)
def test_default(self):
# Default selects entire dataset as one block
mbslice = MultiBlockSlice()
self.assertEqual(mbslice.indices(10), (0, 1, 10, 1))
np.testing.assert_array_equal(self.dset[mbslice], self.arr)
def test_default_explicit(self):
mbslice = MultiBlockSlice(start=0, count=10, stride=1, block=1)
self.assertEqual(mbslice.indices(10), (0, 1, 10, 1))
np.testing.assert_array_equal(self.dset[mbslice], self.arr)
def test_start(self):
mbslice = MultiBlockSlice(start=4)
self.assertEqual(mbslice.indices(10), (4, 1, 6, 1))
np.testing.assert_array_equal(self.dset[mbslice], np.array([4, 5, 6, 7, 8, 9]))
def test_count(self):
mbslice = MultiBlockSlice(count=7)
self.assertEqual(mbslice.indices(10), (0, 1, 7, 1))
np.testing.assert_array_equal(
self.dset[mbslice], np.array([0, 1, 2, 3, 4, 5, 6])
)
def test_count_more_than_length_error(self):
mbslice = MultiBlockSlice(count=11)
with self.assertRaises(ValueError):
mbslice.indices(10)
def test_stride(self):
mbslice = MultiBlockSlice(stride=2)
self.assertEqual(mbslice.indices(10), (0, 2, 5, 1))
np.testing.assert_array_equal(self.dset[mbslice], np.array([0, 2, 4, 6, 8]))
def test_stride_zero_error(self):
with self.assertRaises(ValueError):
# This would cause a ZeroDivisionError if not caught
MultiBlockSlice(stride=0, block=0).indices(10)
def test_stride_block_equal(self):
mbslice = MultiBlockSlice(stride=2, block=2)
self.assertEqual(mbslice.indices(10), (0, 2, 5, 2))
np.testing.assert_array_equal(self.dset[mbslice], self.arr)
def test_block_more_than_stride_error(self):
with self.assertRaises(ValueError):
MultiBlockSlice(block=3)
with self.assertRaises(ValueError):
MultiBlockSlice(stride=2, block=3)
def test_stride_more_than_block(self):
mbslice = MultiBlockSlice(stride=3, block=2)
self.assertEqual(mbslice.indices(10), (0, 3, 3, 2))
np.testing.assert_array_equal(self.dset[mbslice], np.array([0, 1, 3, 4, 6, 7]))
def test_block_overruns_extent_error(self):
# If fully described then must fit within extent
mbslice = MultiBlockSlice(start=2, count=2, stride=5, block=4)
with self.assertRaises(ValueError):
mbslice.indices(10)
def test_fully_described(self):
mbslice = MultiBlockSlice(start=1, count=2, stride=5, block=4)
self.assertEqual(mbslice.indices(10), (1, 5, 2, 4))
np.testing.assert_array_equal(
self.dset[mbslice], np.array([1, 2, 3, 4, 6, 7, 8, 9])
)
def test_count_calculated(self):
# If not given, count should be calculated to select as many full blocks as possible
mbslice = MultiBlockSlice(start=1, stride=3, block=2)
self.assertEqual(mbslice.indices(10), (1, 3, 3, 2))
np.testing.assert_array_equal(self.dset[mbslice], np.array([1, 2, 4, 5, 7, 8]))
def test_zero_count_calculated_error(self):
# In this case, there is no possible count to select even one block, so error
mbslice = MultiBlockSlice(start=8, stride=4, block=3)
with self.assertRaises(ValueError):
mbslice.indices(10)
| TestMultiBlockSlice |
python | numba__llvmlite | llvmlite/binding/value.py | {
"start": 295,
"end": 721
} | class ____(enum.IntEnum):
# The LLVMLinkage enum from llvm-c/Core.h
external = 0
available_externally = 1
linkonce_any = 2
linkonce_odr = 3
linkonce_odr_autohide = 4
weak_any = 5
weak_odr = 6
appending = 7
internal = 8
private = 9
dllimport = 10
dllexport = 11
external_weak = 12
ghost = 13
common = 14
linker_private = 15
linker_private_weak = 16
| Linkage |
python | langchain-ai__langchain | libs/langchain_v1/tests/unit_tests/agents/middleware/implementations/test_file_search.py | {
"start": 12369,
"end": 13964
} | class ____:
"""Tests for edge cases in grep search."""
def test_grep_with_special_chars_in_pattern(self, tmp_path: Path) -> None:
"""Test grep with special characters in pattern."""
(tmp_path / "test.py").write_text("def test():\n pass\n", encoding="utf-8")
middleware = FilesystemFileSearchMiddleware(root_path=str(tmp_path), use_ripgrep=False)
result = middleware.grep_search.func(pattern="def.*:")
assert "/test.py" in result
def test_grep_case_insensitive(self, tmp_path: Path) -> None:
"""Test grep with case-insensitive search."""
(tmp_path / "test.py").write_text("HELLO world\n", encoding="utf-8")
middleware = FilesystemFileSearchMiddleware(root_path=str(tmp_path), use_ripgrep=False)
result = middleware.grep_search.func(pattern="(?i)hello")
assert "/test.py" in result
def test_grep_with_large_file_skipping(self, tmp_path: Path) -> None:
"""Test that grep skips files larger than max_file_size_mb."""
# Create a file larger than 1MB
large_content = "x" * (2 * 1024 * 1024) # 2MB
(tmp_path / "large.txt").write_text(large_content, encoding="utf-8")
(tmp_path / "small.txt").write_text("x", encoding="utf-8")
middleware = FilesystemFileSearchMiddleware(
root_path=str(tmp_path),
use_ripgrep=False,
max_file_size_mb=1, # 1MB limit
)
result = middleware.grep_search.func(pattern="x")
# Large file should be skipped
assert "/small.txt" in result
| TestGrepEdgeCases |
python | mwaskom__seaborn | tests/_stats/test_counting.py | {
"start": 193,
"end": 1237
} | class ____:
@pytest.fixture
def df(self, rng):
n = 30
return pd.DataFrame(dict(
x=rng.uniform(0, 7, n).round(),
y=rng.normal(size=n),
color=rng.choice(["a", "b", "c"], n),
group=rng.choice(["x", "y"], n),
))
def get_groupby(self, df, orient):
other = {"x": "y", "y": "x"}[orient]
cols = [c for c in df if c != other]
return GroupBy(cols)
def test_single_grouper(self, df):
ori = "x"
df = df[["x"]]
gb = self.get_groupby(df, ori)
res = Count()(df, gb, ori, {})
expected = df.groupby("x").size()
assert_array_equal(res.sort_values("x")["y"], expected)
def test_multiple_groupers(self, df):
ori = "x"
df = df[["x", "group"]].sort_values("group")
gb = self.get_groupby(df, ori)
res = Count()(df, gb, ori, {})
expected = df.groupby(["x", "group"]).size()
assert_array_equal(res.sort_values(["x", "group"])["y"], expected)
| TestCount |
python | jazzband__django-oauth-toolkit | oauth2_provider/admin.py | {
"start": 1212,
"end": 1414
} | class ____(admin.ModelAdmin):
list_display = ("code", "application", "user", "expires")
raw_id_fields = ("user",)
search_fields = ("code",) + (("user__email",) if has_email else ())
| GrantAdmin |
python | celery__celery | celery/backends/database/session.py | {
"start": 684,
"end": 3011
} | class ____:
"""Manage SQLAlchemy sessions."""
def __init__(self):
self._engines = {}
self._sessions = {}
self.forked = False
self.prepared = False
if register_after_fork is not None:
register_after_fork(self, _after_fork_cleanup_session)
def _after_fork(self):
self.forked = True
def get_engine(self, dburi, **kwargs):
if self.forked:
try:
return self._engines[dburi]
except KeyError:
engine = self._engines[dburi] = create_engine(dburi, **kwargs)
return engine
else:
kwargs = {k: v for k, v in kwargs.items() if
not k.startswith('pool')}
return create_engine(dburi, poolclass=NullPool, **kwargs)
def create_session(self, dburi, short_lived_sessions=False, **kwargs):
engine = self.get_engine(dburi, **kwargs)
if self.forked:
if short_lived_sessions or dburi not in self._sessions:
self._sessions[dburi] = sessionmaker(bind=engine)
return engine, self._sessions[dburi]
return engine, sessionmaker(bind=engine)
def prepare_models(self, engine):
if not self.prepared:
# SQLAlchemy will check if the items exist before trying to
# create them, which is a race condition. If it raises an error
# in one iteration, the next may pass all the existence checks
# and the call will succeed.
retries = 0
while True:
try:
ResultModelBase.metadata.create_all(engine)
except DatabaseError:
if retries < PREPARE_MODELS_MAX_RETRIES:
sleep_amount_ms = get_exponential_backoff_interval(
10, retries, 1000, True
)
time.sleep(sleep_amount_ms / 1000)
retries += 1
else:
raise
else:
break
self.prepared = True
def session_factory(self, dburi, **kwargs):
engine, session = self.create_session(dburi, **kwargs)
self.prepare_models(engine)
return session()
| SessionManager |
python | astropy__astropy | astropy/units/tests/test_quantity_non_ufuncs.py | {
"start": 17135,
"end": 18581
} | class ____(InvariantUnitTestSetup):
def test_diag(self):
self.check(np.diag)
def test_diag_1d_input(self):
# Also check 1-D case; drops unit w/o __array_function__.
q = self.q.ravel()
o = np.diag(q)
expected = np.diag(q.value) << q.unit
assert o.unit == self.q.unit
assert o.shape == expected.shape
assert_array_equal(o, expected)
def test_diagonal(self):
self.check(np.diagonal)
def test_diagflat(self):
self.check(np.diagflat)
def test_compress(self):
o = np.compress([True, False, True], self.q, axis=0)
expected = np.compress([True, False, True], self.q.value, axis=0) * self.q.unit
assert np.all(o == expected)
def test_extract(self):
o = np.extract([True, False, True], self.q)
expected = np.extract([True, False, True], self.q.value) * self.q.unit
assert np.all(o == expected)
def test_delete(self):
self.check(np.delete, slice(1, 2), 0)
self.check(np.delete, [0, 2], 1)
def test_trim_zeros(self):
q = self.q.ravel()
out = np.trim_zeros(q)
expected = np.trim_zeros(q.value) * u.m
assert np.all(out == expected)
def test_roll(self):
self.check(np.roll, 1)
self.check(np.roll, 1, axis=0)
def test_take(self):
self.check(np.take, [0, 1], axis=1)
self.check(np.take, 1)
| TestAccessingParts |
python | bokeh__bokeh | src/bokeh/models/tickers.py | {
"start": 9022,
"end": 9395
} | class ____(BaseSingleIntervalTicker):
''' Generate ticks spaced apart by specific, even multiples of months.
'''
# explicit __init__ to support Init signatures
def __init__(self, *args: Any, **kwargs: Any) -> None:
super().__init__(*args, **kwargs)
months = Seq(Int, default=[], help="""
The intervals of months to use.
""")
| MonthsTicker |
python | python-attrs__attrs | tests/test_converters.py | {
"start": 8476,
"end": 9093
} | class ____:
def test_unhashable(self):
"""
Fails if value is unhashable.
"""
with pytest.raises(ValueError, match="Cannot convert value to bool"):
to_bool([])
def test_truthy(self):
"""
Fails if truthy values are incorrectly converted.
"""
assert to_bool("t")
assert to_bool("yes")
assert to_bool("on")
def test_falsy(self):
"""
Fails if falsy values are incorrectly converted.
"""
assert not to_bool("f")
assert not to_bool("no")
assert not to_bool("off")
| TestToBool |
python | spack__spack | lib/spack/spack/llnl/util/link_tree.py | {
"start": 20504,
"end": 20657
} | class ____(MergeConflictError):
def __init__(self, path):
super().__init__("Package merge blocked by file: %s" % path)
| SingleMergeConflictError |
python | keras-team__keras | examples/demo_custom_tf_workflow.py | {
"start": 885,
"end": 2063
} | class ____(Model):
def __init__(self, hidden_dim, output_dim):
super().__init__()
self.dense1 = MyDense(hidden_dim)
self.dense2 = MyDense(hidden_dim)
self.dense3 = MyDense(output_dim)
def call(self, x):
x = tf.nn.relu(self.dense1(x))
x = tf.nn.relu(self.dense2(x))
return self.dense3(x)
def Dataset():
for _ in range(20):
yield (
np.random.random((32, 128)).astype("float32"),
np.random.random((32, 4)).astype("float32"),
)
def loss_fn(y_true, y_pred):
return ops.sum((y_true - y_pred) ** 2)
model = MyModel(hidden_dim=256, output_dim=4)
optimizer = optimizers.SGD(learning_rate=0.001)
dataset = Dataset()
######### Custom TF workflow ###############
@tf.function(jit_compile=True)
def train_step(data):
x, y = data
with tf.GradientTape() as tape:
y_pred = model(x)
loss = loss_fn(y, y_pred)
gradients = tape.gradient(loss, model.trainable_variables)
optimizer.apply_gradients(zip(gradients, model.trainable_variables))
return loss
for data in dataset:
loss = train_step(data)
print("Loss:", float(loss))
| MyModel |
python | PyCQA__pylint | doc/test_messages_documentation.py | {
"start": 2839,
"end": 8852
} | class ____:
def __init__(
self, test_file: tuple[str, Path], multiple_file_messages: list[str]
) -> None:
self._test_file = test_file
self._multiple_file_messages = multiple_file_messages
_test_reporter = FunctionalTestReporter()
self._linter = PyLinter()
self._linter.config.persistent = 0
checkers.initialize(self._linter)
# Check if this message has a custom configuration file (e.g. for enabling optional checkers).
# If not, use the default configuration.
config_file: Path | None
msgid, full_path = test_file
pylintrc = full_path.parent / "pylintrc"
config_file = pylintrc if pylintrc.exists() else None
print(f"Config file used: {config_file}")
args = [
str(full_path),
"--disable=all",
f"--enable=F,{msgid},astroid-error,syntax-error",
]
print(f"Command used:\npylint {' '.join(args)}")
_config_initialization(
self._linter,
args_list=args,
reporter=_test_reporter,
config_file=config_file,
)
def runTest(self) -> None:
self._runTest()
def is_good_test(self) -> bool:
return self._test_file[1].stem == "good"
def is_bad_test(self) -> bool:
return self._test_file[1].stem == "bad"
@staticmethod
def get_expected_messages(stream: TextIO) -> MessageCounter:
"""Parse a file and get expected messages."""
messages: MessageCounter = Counter()
for i, line in enumerate(stream):
match = _EXPECTED_RE.search(line)
if match is None:
continue
line = match.group("line")
if line is None:
lineno = i + 1
elif line.startswith(("+", "-")):
lineno = i + 1 + int(line)
else:
lineno = int(line)
for msg_id in match.group("msgs").split(","):
messages[lineno, msg_id.strip()] += 1
return messages
def _get_expected(self) -> MessageCounter:
"""Get the expected messages for a file or directory."""
expected_msgs: MessageCounter = Counter()
if self._test_file[1].is_dir():
for test_file in self._test_file[1].iterdir():
with open(test_file, encoding="utf8") as f:
expected_msgs += self.get_expected_messages(f)
else:
with open(self._test_file[1], encoding="utf8") as f:
expected_msgs += self.get_expected_messages(f)
return expected_msgs
def _get_actual(self, messages: list[Message]) -> MessageCounter:
"""Get the actual messages after a run."""
messages.sort(key=lambda m: (m.line, m.symbol, m.msg))
received_msgs: MessageCounter = Counter()
for msg in messages:
received_msgs[msg.line, msg.symbol] += 1
return received_msgs
def _runTest(self) -> None:
"""Run the test and assert message differences."""
self._linter.check([str(self._test_file[1])])
expected_messages = self._get_expected()
actual_messages_raw = self._linter.reporter.messages
if self.is_good_test():
assert not actual_messages_raw, self.assert_message_good(
actual_messages_raw
)
if self.is_bad_test():
bad_files = [self._test_file[1]]
if self._test_file[1].is_dir() and not self.is_multifile_example():
bad_files = list(self._test_file[1].iterdir())
assert len(actual_messages_raw) >= len(bad_files), self.assert_message_bad(
bad_files, actual_messages_raw
)
assert expected_messages == self._get_actual(actual_messages_raw)
def assert_message_good(self, messages: list[Message]) -> str:
good = self._test_file[1]
msg = f"There should be no warning raised for '{good}' but these messages were raised:\n"
file_representations = {}
for message in messages:
if message.path not in file_representations:
with open(message.path) as f:
file_representations[message.path] = [
line[:-1] for line in f.readlines()
]
file_representations[message.path][
message.line - 1
] += f" # <-- /!\\ unexpected '{message.symbol}' /!\\"
for path, representation in file_representations.items():
file_representation = "\n".join(representation)
msg += f"\n\n\nIn {path}:\n\n{file_representation}\n"
return msg
def is_multifile_example(self) -> bool:
"""Multiple file example do not need to have one warning for each bad file."""
return self._test_file[0] in self._multiple_file_messages
def assert_message_bad(self, bad_files: list[Path], messages: list[Message]) -> str:
each = "each file in " if len(bad_files) > 1 else ""
msg = (
f"There should be at least one warning raised for "
f"{each}'{self._test_file[1]}' ({len(bad_files)} total)\n"
)
raised_files: set[Path] = set()
for message in messages:
raised_files.add(Path(message.path).absolute())
missing_files = set(bad_files) - raised_files
for missing_file in missing_files:
msg += f"- Missing warning in {missing_file}\n"
if messages:
msg += f"'{messages[0].symbol}' might need to be added in 'known_multiple_file_messages'.\n\n"
return msg
@pytest.mark.parametrize("test_file", TESTS, ids=TESTS_NAMES)
@pytest.mark.filterwarnings("ignore::DeprecationWarning")
def test_code_examples(test_file: tuple[str, Path]) -> None:
known_multiple_file_messages = ["cyclic-import", "duplicate-code"]
lint_test = LintModuleTest(test_file, known_multiple_file_messages)
lint_test.runTest()
| LintModuleTest |
python | walkccc__LeetCode | solutions/2332. The Latest Time to Catch a Bus/2332.py | {
"start": 0,
"end": 857
} | class ____:
def latestTimeCatchTheBus(
self,
buses: list[int],
passengers: list[int],
capacity: int,
) -> int:
buses.sort()
passengers.sort()
if passengers[0] > buses[-1]:
return buses[-1]
ans = passengers[0] - 1
i = 0 # buses' index
j = 0 # passengers' index
while i < len(buses):
# Greedily make passengers catch `buses[i]`.
arrived = 0
while arrived < capacity and j < len(passengers) and passengers[j] <= buses[i]:
if j > 0 and passengers[j] != passengers[j - 1] + 1:
ans = passengers[j] - 1
j += 1
arrived += 1
# There's room for `buses[i]` to carry a passenger arriving at the
# `buses[i]`.
if arrived < capacity and j > 0 and passengers[j - 1] != buses[i]:
ans = buses[i]
i += 1
return ans
| Solution |
python | pytorch__pytorch | torch/fx/_graph_pickler.py | {
"start": 5990,
"end": 7143
} | class ____:
data: dict[str, object]
@classmethod
def reduce_helper(
cls, pickler: GraphPickler, obj: ShapeEnv
) -> tuple[
Callable[[Self, _UnpickleState], ShapeEnv], tuple[Self, _UnpickleStateToken]
]:
return cls.unpickle, (cls(obj), pickler._unpickle_state)
def __init__(self, env: ShapeEnv) -> None:
# In theory pickle should recognize that a given ShapeEnv was already
# pickled and reuse the resulting _ShapeEnvPickleData (so two objects
# pointing at the same ShapeEnv get the same ShapeEnv out).
assert not env._translation_validation_enabled
self.data = env.__dict__.copy()
del self.data["tracked_fakes"]
del self.data["fake_tensor_cache"]
def unpickle(self, unpickle_state: _UnpickleState) -> ShapeEnv:
# Fill in the existing ShapeEnv rather than creating a new one
assert unpickle_state.fake_mode
assert unpickle_state.fake_mode.shape_env
for k, v in self.data.items():
setattr(unpickle_state.fake_mode.shape_env, k, v)
return unpickle_state.fake_mode.shape_env
| _ShapeEnvPickleData |
python | apache__airflow | providers/amazon/src/airflow/providers/amazon/aws/triggers/eks.py | {
"start": 12533,
"end": 14400
} | class ____(AwsBaseWaiterTrigger):
"""
Trigger for EksCreateNodegroupOperator.
The trigger will asynchronously poll the boto3 API and wait for the
nodegroup to be in the state specified by the waiter.
:param cluster_name: The name of the EKS cluster associated with the node group.
:param nodegroup_name: The name of the nodegroup to check.
:param waiter_delay: The amount of time in seconds to wait between attempts.
:param waiter_max_attempts: The maximum number of attempts to be made.
:param aws_conn_id: The Airflow connection used for AWS credentials.
:param region_name: Which AWS region the connection should use. (templated)
If this is None or empty then the default boto3 behaviour is used.
"""
def __init__(
self,
cluster_name: str,
nodegroup_name: str,
waiter_delay: int,
waiter_max_attempts: int,
aws_conn_id: str | None,
region_name: str | None = None,
):
super().__init__(
serialized_fields={
"cluster_name": cluster_name,
"nodegroup_name": nodegroup_name,
"region_name": region_name,
},
waiter_name="nodegroup_active",
waiter_args={"clusterName": cluster_name, "nodegroupName": nodegroup_name},
failure_message="Error creating nodegroup",
status_message="Nodegroup status is",
status_queries=["nodegroup.status", "nodegroup.health.issues"],
return_value=None,
waiter_delay=waiter_delay,
waiter_max_attempts=waiter_max_attempts,
aws_conn_id=aws_conn_id,
region_name=region_name,
)
def hook(self) -> AwsGenericHook:
return EksHook(aws_conn_id=self.aws_conn_id, region_name=self.region_name)
| EksCreateNodegroupTrigger |
python | realpython__materials | hashtable/01_hashtable_prototype/01_define_a_custom_hashtable_class/hashtable.py | {
"start": 17,
"end": 164
} | class ____:
def __init__(self, capacity):
self.values = capacity * [None]
def __len__(self):
return len(self.values)
| HashTable |
python | sqlalchemy__sqlalchemy | lib/sqlalchemy/sql/base.py | {
"start": 31561,
"end": 32169
} | class ____(Options, HasCacheKey):
__slots__ = ()
@hybridmethod
def _gen_cache_key_inst(
self, anon_map: Any, bindparams: List[BindParameter[Any]]
) -> Optional[Tuple[Any]]:
return HasCacheKey._gen_cache_key(self, anon_map, bindparams)
@_gen_cache_key_inst.classlevel
def _gen_cache_key(
cls, anon_map: "anon_map", bindparams: List[BindParameter[Any]]
) -> Tuple[CacheableOptions, Any]:
return (cls, ())
@hybridmethod
def _generate_cache_key(self) -> Optional[CacheKey]:
return HasCacheKey._generate_cache_key(self)
| CacheableOptions |
python | getsentry__sentry | src/sentry/models/releasefile.py | {
"start": 1160,
"end": 1666
} | class ____(models.Manager["ReleaseFile"]):
"""Manager for all release files that are not internal.
Internal release files include:
* Uploaded release archives
* Artifact index mapping URLs to release archives
This manager has the overhead of always joining the File table in order
to filter release files.
"""
def get_queryset(self):
return super().get_queryset().select_related("file").filter(file__type="release.file")
@region_silo_model
| PublicReleaseFileManager |
python | readthedocs__readthedocs.org | readthedocs/projects/migrations/0137_use_generic_root_selector.py | {
"start": 150,
"end": 1077
} | class ____(migrations.Migration):
safe = Safe.before_deploy()
dependencies = [
("projects", "0136_addons_customscript_notnull"),
]
operations = [
migrations.AddField(
model_name="addonsconfig",
name="options_root_selector",
field=models.CharField(
blank=True,
help_text="CSS selector for the main content of the page. Leave it blank for auto-detect.",
max_length=128,
null=True,
),
),
migrations.AddField(
model_name="historicaladdonsconfig",
name="options_root_selector",
field=models.CharField(
blank=True,
help_text="CSS selector for the main content of the page. Leave it blank for auto-detect.",
max_length=128,
null=True,
),
),
]
| Migration |
python | django-extensions__django-extensions | tests/testapp/models.py | {
"start": 6148,
"end": 6583
} | class ____(models.Model):
title = models.CharField(max_length=42)
slug = AutoSlugField(populate_from="title")
category = models.CharField(max_length=20, null=True)
class Meta:
app_label = "django_extensions"
constraints = [
UniqueConstraint(
fields=["slug", "category"],
name="unique_slug_and_category",
),
]
| SluggedWithConstraintsTestModel |
python | networkx__networkx | networkx/utils/mapped_queue.py | {
"start": 97,
"end": 2965
} | class ____:
"""This proxy class separates the heap element from its priority.
The idea is that using a 2-tuple (priority, element) works
for sorting, but not for dict lookup because priorities are
often floating point values so round-off can mess up equality.
So, we need inequalities to look at the priority (for sorting)
and equality (and hash) to look at the element to enable
updates to the priority.
Unfortunately, this class can be tricky to work with if you forget that
`__lt__` compares the priority while `__eq__` compares the element.
In `greedy_modularity_communities()` the following code is
used to check that two _HeapElements differ in either element or priority:
if d_oldmax != row_max or d_oldmax.priority != row_max.priority:
If the priorities are the same, this implementation uses the element
as a tiebreaker. This provides compatibility with older systems that
use tuples to combine priority and elements.
"""
__slots__ = ["priority", "element", "_hash"]
def __init__(self, priority, element):
self.priority = priority
self.element = element
self._hash = hash(element)
def __lt__(self, other):
try:
other_priority = other.priority
except AttributeError:
return self.priority < other
# assume comparing to another _HeapElement
if self.priority == other_priority:
try:
return self.element < other.element
except TypeError as err:
raise TypeError(
"Consider using a tuple, with a priority value that can be compared."
)
return self.priority < other_priority
def __gt__(self, other):
try:
other_priority = other.priority
except AttributeError:
return self.priority > other
# assume comparing to another _HeapElement
if self.priority == other_priority:
try:
return self.element > other.element
except TypeError as err:
raise TypeError(
"Consider using a tuple, with a priority value that can be compared."
)
return self.priority > other_priority
def __eq__(self, other):
try:
return self.element == other.element
except AttributeError:
return self.element == other
def __hash__(self):
return self._hash
def __getitem__(self, indx):
return self.priority if indx == 0 else self.element[indx - 1]
def __iter__(self):
yield self.priority
try:
yield from self.element
except TypeError:
yield self.element
def __repr__(self):
return f"_HeapElement({self.priority}, {self.element})"
| _HeapElement |
python | scrapy__scrapy | tests/test_utils_python.py | {
"start": 853,
"end": 1904
} | class ____:
@staticmethod
async def g1():
for i in range(3):
yield i
@staticmethod
async def g2():
return
yield
@staticmethod
async def g3():
for i in range(7, 10):
yield i
@staticmethod
async def g4():
for i in range(3, 5):
yield i
1 / 0
for i in range(5, 7):
yield i
@deferred_f_from_coro_f
async def test_mutableasyncchain(self):
m = MutableAsyncChain(self.g1(), as_async_generator(range(3, 7)))
m.extend(self.g2())
m.extend(self.g3())
assert await m.__anext__() == 0
results = await collect_asyncgen(m)
assert results == list(range(1, 10))
@deferred_f_from_coro_f
async def test_mutableasyncchain_exc(self):
m = MutableAsyncChain(self.g1())
m.extend(self.g4())
m.extend(self.g3())
results = await collect_asyncgen(aiter_errback(m, lambda _: None))
assert results == list(range(5))
| TestMutableAsyncChain |
python | ansible__ansible | lib/ansible/plugins/action/__init__.py | {
"start": 2349,
"end": 69313
} | class ____(ABC, _AnsiblePluginInfoMixin):
"""
This class is the base class for all action plugins, and defines
code common to all actions. The base class handles the connection
by putting/getting files and executing commands based on the current
action in use.
"""
# A set of valid arguments
_VALID_ARGS = frozenset([]) # type: frozenset[str]
# behavioral attributes
BYPASS_HOST_LOOP = False
TRANSFERS_FILES = False
_requires_connection = True
_supports_check_mode = True
_supports_async = False
supports_raw_params = False
def __init__(self, task: Task, connection: ConnectionBase, play_context: PlayContext, loader: DataLoader, templar: Templar, shared_loader_obj=None):
self._task = task
self._connection = connection
self._play_context = play_context
self._loader = loader
self._templar = templar
from ansible.plugins import loader as plugin_loaders # avoid circular global import since PluginLoader needs ActionBase
self._shared_loader_obj = plugin_loaders # shared_loader_obj was just a ref to `ansible.plugins.loader` anyway; this lets us inherit its type
self._cleanup_remote_tmp = False
# interpreter discovery state
self._discovered_interpreter_key: str | None = None
self._discovered_interpreter = False
self._used_interpreter: str | None = None
# Backwards compat: self._display isn't really needed, just import the global display and use that.
self._display = display
@abstractmethod
def run(self, tmp: str | None = None, task_vars: dict[str, t.Any] | None = None) -> dict[str, t.Any]:
""" Action Plugins should implement this method to perform their
tasks. Everything else in this base class is a helper method for the
action plugin to do that.
:kwarg tmp: Deprecated parameter. This is no longer used. An action plugin that calls
another one and wants to use the same remote tmp for both should set
self._connection._shell.tmpdir rather than this parameter.
:kwarg task_vars: The variables (host vars, group vars, config vars,
etc) associated with this task.
:returns: dictionary of results from the module
Implementers of action modules may find the following variables especially useful:
* Module parameters. These are stored in self._task.args
"""
# does not default to {'changed': False, 'failed': False}, as it used to break async
result: dict[str, t.Any] = {}
if tmp is not None:
display.warning('ActionModule.run() no longer honors the tmp parameter. Action'
' plugins should set self._connection._shell.tmpdir to share'
' the tmpdir.')
del tmp
if self._task.async_val and not self._supports_async:
raise AnsibleActionFail('This action (%s) does not support async.' % self._task.action)
elif self._task.check_mode and not self._supports_check_mode:
raise AnsibleActionSkip('This action (%s) does not support check mode.' % self._task.action)
# Error if invalid argument is passed
if self._VALID_ARGS:
task_opts = frozenset(self._task.args.keys())
bad_opts = task_opts.difference(self._VALID_ARGS)
if bad_opts:
raise AnsibleActionFail('Invalid options for %s: %s' % (self._task.action, ','.join(list(bad_opts))))
if self._connection._shell.tmpdir is None and self._early_needs_tmp_path():
self._make_tmp_path()
return result
def validate_argument_spec(self, argument_spec=None,
mutually_exclusive=None,
required_together=None,
required_one_of=None,
required_if=None,
required_by=None,
):
"""Validate an argument spec against the task args
This will return a tuple of (ValidationResult, dict) where the dict
is the validated, coerced, and normalized task args.
Be cautious when directly passing ``new_module_args`` directly to a
module invocation, as it will contain the defaults, and not only
the args supplied from the task. If you do this, the module
should not define ``mutually_exclusive`` or similar.
This code is roughly copied from the ``validate_argument_spec``
action plugin for use by other action plugins.
"""
new_module_args = self._task.args.copy()
validator = ArgumentSpecValidator(
argument_spec,
mutually_exclusive=mutually_exclusive,
required_together=required_together,
required_one_of=required_one_of,
required_if=required_if,
required_by=required_by,
)
validation_result = validator.validate(new_module_args)
new_module_args.update(validation_result.validated_parameters)
try:
error = validation_result.errors[0]
except IndexError:
error = None
# Fail for validation errors, even in check mode
if error:
msg = validation_result.errors.msg
if isinstance(error, UnsupportedError):
msg = f"Unsupported parameters for ({self._load_name}) module: {msg}"
raise AnsibleActionFail(msg, obj=self._task.args)
return validation_result, new_module_args
def cleanup(self, force=False):
"""Method to perform a clean up at the end of an action plugin execution
By default this is designed to clean up the shell tmpdir, and is toggled based on whether
async is in use
Action plugins may override this if they deem necessary, but should still call this method
via super
"""
if force or not self._task.async_val:
self._remove_tmp_path(self._connection._shell.tmpdir)
@classmethod
@contextlib.contextmanager
@_internal.experimental
def get_finalize_task_args_context(cls) -> t.Any:
"""
EXPERIMENTAL: Unstable API subject to change at any time without notice.
Wraps task arg finalization with (optional) stateful context.
The context manager is entered during `Task.post_validate_args, and may yield a single value to be passed
as `context` to Task.finalize_task_arg for each task arg.
"""
yield None
@classmethod
@_internal.experimental
def finalize_task_arg(cls, name: str, value: t.Any, templar: _engine.TemplateEngine, context: t.Any) -> t.Any:
"""
EXPERIMENTAL: Unstable API subject to change at any time without notice.
Called for each task arg to allow for custom templating.
The optional `context` value is sourced from `Task.get_finalize_task_args_context`.
"""
return templar.template(value)
def get_plugin_option(self, plugin, option, default=None):
"""Helper to get an option from a plugin without having to use
the try/except dance everywhere to set a default
"""
try:
return plugin.get_option(option)
except (AttributeError, KeyError):
return default
def get_become_option(self, option, default=None):
return self.get_plugin_option(self._connection.become, option, default=default)
def get_connection_option(self, option, default=None):
return self.get_plugin_option(self._connection, option, default=default)
def get_shell_option(self, option, default=None):
return self.get_plugin_option(self._connection._shell, option, default=default)
def _remote_file_exists(self, path):
cmd = self._connection._shell.exists(path)
result = self._low_level_execute_command(cmd=cmd, sudoable=True)
if result['rc'] == 0:
return True
return False
def _configure_module(self, module_name, module_args, task_vars) -> tuple[_BuiltModule, str]:
"""
Handles the loading and templating of the module code through the
modify_module() function.
"""
if self._task.delegate_to:
use_vars = task_vars.get('ansible_delegated_vars')[self._task.delegate_to]
else:
use_vars = task_vars
split_module_name = module_name.split('.')
collection_name = '.'.join(split_module_name[0:2]) if len(split_module_name) > 2 else ''
leaf_module_name = resource_from_fqcr(module_name)
# Search module path(s) for named module.
for mod_type in self._connection.module_implementation_preferences:
# Check to determine if PowerShell modules are supported, and apply
# some fixes (hacks) to module name + args.
if mod_type == '.ps1':
# FIXME: This should be temporary and moved to an exec subsystem plugin where we can define the mapping
# for each subsystem.
win_collection = 'ansible.windows'
rewrite_collection_names = ['ansible.builtin', 'ansible.legacy', '']
# async_status, win_stat, win_file, win_copy, and win_ping are not just like their
# python counterparts but they are compatible enough for our
# internal usage
# NB: we only rewrite the module if it's not being called by the user (eg, an action calling something else)
# and if it's unqualified or FQ to a builtin
if leaf_module_name in ('stat', 'file', 'copy', 'ping') and \
collection_name in rewrite_collection_names and self._task.action != module_name:
module_name = '%s.win_%s' % (win_collection, leaf_module_name)
elif leaf_module_name == 'async_status' and collection_name in rewrite_collection_names:
module_name = '%s.%s' % (win_collection, leaf_module_name)
result = self._shared_loader_obj.module_loader.find_plugin_with_context(module_name, mod_type, collection_list=self._task.collections)
if not result.resolved:
if result.redirect_list and len(result.redirect_list) > 1:
# take the last one in the redirect list, we may have successfully jumped through N other redirects
target_module_name = result.redirect_list[-1]
raise AnsibleError("The module {0} was redirected to {1}, which could not be loaded.".format(module_name, target_module_name))
module_path = result.plugin_resolved_path
if module_path:
break
else: # This is a for-else: http://bit.ly/1ElPkyg
raise AnsibleError("The module %s was not found in configured module paths" % (module_name))
# insert shared code and arguments into the module
final_environment: dict[str, t.Any] = {}
self._compute_environment_string(final_environment)
# modify_module will exit early if interpreter discovery is required; re-run after if necessary
for _dummy in (1, 2):
try:
module_bits = modify_module(
module_name=module_name,
module_path=module_path,
module_args=module_args,
templar=self._templar,
task_vars=use_vars,
module_compression=C.config.get_config_value('DEFAULT_MODULE_COMPRESSION', variables=task_vars),
async_timeout=self._task.async_val,
environment=final_environment,
remote_is_local=bool(getattr(self._connection, '_remote_is_local', False)),
become_plugin=self._connection.become,
)
break
except InterpreterDiscoveryRequiredError as idre:
self._discovered_interpreter = discover_interpreter(action=self, interpreter_name=idre.interpreter_name,
discovery_mode=idre.discovery_mode, task_vars=use_vars)
# update the local task_vars with the discovered interpreter (which might be None);
# we'll propagate back to the controller in the task result
discovered_key = 'discovered_interpreter_%s' % idre.interpreter_name
# update the local vars copy for the retry
use_vars['ansible_facts'][discovered_key] = self._discovered_interpreter
# TODO: this condition prevents 'wrong host' from being updated
# but in future we would want to be able to update 'delegated host facts'
# irrespective of task settings
if not self._task.delegate_to or self._task.delegate_facts:
# store in local task_vars facts collection for the retry and any other usages in this worker
task_vars['ansible_facts'][discovered_key] = self._discovered_interpreter
# preserve this so _execute_module can propagate back to controller as a fact
self._discovered_interpreter_key = discovered_key
else:
task_vars['ansible_delegated_vars'][self._task.delegate_to]['ansible_facts'][discovered_key] = self._discovered_interpreter
return module_bits, module_path
def _compute_environment_string(self, raw_environment_out=None):
"""
Builds the environment string to be used when executing the remote task.
"""
final_environment = dict()
if self._task.environment is not None:
environments = self._task.environment
if not isinstance(environments, list):
environments = [environments]
# The order of environments matters to make sure we merge
# in the parent's values first so those in the block then
# task 'win' in precedence
for environment in environments:
if environment is None or len(environment) == 0:
continue
temp_environment = self._templar.template(environment)
if not isinstance(temp_environment, dict):
raise AnsibleError("environment must be a dictionary, received %s (%s)" % (temp_environment, type(temp_environment)))
# very deliberately using update here instead of combine_vars, as
# these environment settings should not need to merge sub-dicts
final_environment.update(temp_environment)
if len(final_environment) > 0:
final_environment = self._templar.template(final_environment)
if isinstance(raw_environment_out, dict):
raw_environment_out.clear()
raw_environment_out.update(final_environment)
return self._connection._shell.env_prefix(**final_environment)
def _early_needs_tmp_path(self):
"""
Determines if a tmp path should be created before the action is executed.
"""
return getattr(self, 'TRANSFERS_FILES', False)
def _is_pipelining_enabled(self, module_style: str, wrap_async: bool = False) -> bool:
"""
Determines if we are required and can do pipelining, only 'new' style modules can support pipelining
"""
return bool(module_style == 'new' and self._connection.is_pipelining_enabled(wrap_async))
def _get_admin_users(self):
"""
Returns a list of admin users that are configured for the current shell
plugin
"""
return self.get_shell_option('admin_users', ['root'])
def _get_remote_addr(self, tvars):
""" consistently get the 'remote_address' for the action plugin """
remote_addr = tvars.get('delegated_vars', {}).get('ansible_host', tvars.get('ansible_host', tvars.get('inventory_hostname', None)))
for variation in ('remote_addr', 'host'):
try:
remote_addr = self._connection.get_option(variation)
except KeyError:
continue
break
else:
# plugin does not have, fallback to play_context
remote_addr = self._play_context.remote_addr
return remote_addr
def _get_remote_user(self):
""" consistently get the 'remote_user' for the action plugin """
# TODO: use 'current user running ansible' as fallback when moving away from play_context
# pwd.getpwuid(os.getuid()).pw_name
remote_user = None
try:
remote_user = self._connection.get_option('remote_user')
except KeyError:
# plugin does not have remote_user option, fallback to default and/play_context
remote_user = getattr(self._connection, 'default_user', None) or self._play_context.remote_user
except AttributeError:
# plugin does not use config system, fallback to old play_context
remote_user = self._play_context.remote_user
return remote_user
def _is_become_unprivileged(self):
"""
The user is not the same as the connection user and is not part of the
shell configured admin users
"""
# if we don't use become then we know we aren't switching to a
# different unprivileged user
if not self._connection.become:
return False
# if we use become and the user is not an admin (or same user) then
# we need to return become_unprivileged as True
admin_users = self._get_admin_users()
remote_user = self._get_remote_user()
become_user = self.get_become_option('become_user')
return bool(become_user and become_user not in admin_users + [remote_user])
def _make_tmp_path(self, remote_user=None):
"""
Create and return a temporary path on a remote box.
"""
# Network connection plugins (network_cli, netconf, etc.) execute on the controller, rather than the remote host.
# As such, we want to avoid using remote_user for paths as remote_user may not line up with the local user
# This is a hack and should be solved by more intelligent handling of remote_tmp in 2.7
if getattr(self._connection, '_remote_is_local', False):
tmpdir = C.DEFAULT_LOCAL_TMP
else:
# NOTE: shell plugins should populate this setting anyways, but they dont do remote expansion, which
# we need for 'non posix' systems like cloud-init and solaris
tmpdir = self._remote_expand_user(self.get_shell_option('remote_tmp', default='~/.ansible/tmp'), sudoable=False)
become_unprivileged = self._is_become_unprivileged()
basefile = self._connection._shell._generate_temp_dir_name()
cmd = self._connection._shell._mkdtemp2(basefile=basefile, system=become_unprivileged, tmpdir=tmpdir)
result = self._low_level_execute_command(cmd.command, in_data=cmd.input_data, sudoable=False)
# error handling on this seems a little aggressive?
if result['rc'] != 0:
if result['rc'] == 5:
output = 'Authentication failure.'
elif result['rc'] == 255 and self._connection.transport in ('ssh',):
if display.verbosity > 3:
output = u'SSH encountered an unknown error. The output was:\n%s%s' % (result['stdout'], result['stderr'])
else:
output = (u'SSH encountered an unknown error during the connection. '
'We recommend you re-run the command using -vvvv, which will enable SSH debugging output to help diagnose the issue')
elif u'No space left on device' in result['stderr']:
output = result['stderr']
else:
output = ('Failed to create temporary directory. '
'In some cases, you may have been able to authenticate and did not have permissions on the target directory. '
'Consider changing the remote tmp path in ansible.cfg to a path rooted in "/tmp", for more error information use -vvv. '
'Failed command was: %s, exited with result %d' % (cmd, result['rc']))
if 'stdout' in result and result['stdout'] != u'':
output = output + u", stdout output: %s" % result['stdout']
if display.verbosity > 3 and 'stderr' in result and result['stderr'] != u'':
output += u", stderr output: %s" % result['stderr']
raise AnsibleConnectionFailure(output)
else:
self._cleanup_remote_tmp = True
try:
stdout_parts = result['stdout'].strip().split('%s=' % basefile, 1)
rc = self._connection._shell.join_path(stdout_parts[-1], u'').splitlines()[-1]
except IndexError:
# stdout was empty or just space, set to / to trigger error in next if
rc = '/'
# Catch failure conditions, files should never be
# written to locations in /.
if rc == '/':
raise AnsibleError('failed to resolve remote temporary directory from %s: `%s` returned empty string' % (basefile, cmd))
self._connection._shell.tmpdir = rc
return rc
def _should_remove_tmp_path(self, tmp_path):
"""Determine if temporary path should be deleted or kept by user request/config"""
return tmp_path and self._cleanup_remote_tmp and not C.DEFAULT_KEEP_REMOTE_FILES and "-tmp-" in tmp_path
def _remove_tmp_path(self, tmp_path, force=False):
"""Remove a temporary path we created. """
if tmp_path is None and self._connection._shell.tmpdir:
tmp_path = self._connection._shell.tmpdir
if force or self._should_remove_tmp_path(tmp_path):
cmd = self._connection._shell.remove(tmp_path, recurse=True)
# If we have gotten here we have a working connection configuration.
# If the connection breaks we could leave tmp directories out on the remote system.
tmp_rm_res = self._low_level_execute_command(cmd, sudoable=False)
if tmp_rm_res.get('rc', 0) != 0:
display.warning('Error deleting remote temporary files (rc: %s, stderr: %s})'
% (tmp_rm_res.get('rc'), tmp_rm_res.get('stderr', 'No error string available.')))
else:
self._connection._shell.tmpdir = None
def _transfer_file(self, local_path: str, remote_path: str) -> str:
"""
Copy a file from the controller to a remote path
:arg local_path: Path on controller to transfer
:arg remote_path: Path on the remote system to transfer into
.. warning::
* When you use this function you likely want to use use fixup_perms2() on the
remote_path to make sure that the remote file is readable when the user becomes
a non-privileged user.
* If you use fixup_perms2() on the file and copy or move the file into place, you will
need to then remove filesystem acls on the file once it has been copied into place by
the module. See how the copy module implements this for help.
"""
self._connection.put_file(local_path, remote_path)
return remote_path
def _transfer_data(self, remote_path: str, data: str | bytes) -> str:
"""
Copies the module data out to the temporary module path.
"""
if isinstance(data, str):
data = data.encode(errors='surrogateescape')
elif not isinstance(data, bytes):
raise TypeError('data must be either a string or bytes')
afd, afile = tempfile.mkstemp(dir=C.DEFAULT_LOCAL_TMP)
afo = os.fdopen(afd, 'wb')
try:
afo.write(data)
except Exception as e:
raise AnsibleError("failure writing module data to temporary file for transfer: %s" % to_native(e))
afo.flush()
afo.close()
try:
self._transfer_file(afile, remote_path)
finally:
os.unlink(afile)
return remote_path
def _fixup_perms2(self, remote_paths, remote_user=None, execute=True):
"""
We need the files we upload to be readable (and sometimes executable)
by the user being sudo'd to but we want to limit other people's access
(because the files could contain passwords or other private
information. We achieve this in one of these ways:
* If no sudo is performed or the remote_user is sudo'ing to
themselves, we don't have to change permissions.
* If the remote_user sudo's to a privileged user (for instance, root),
we don't have to change permissions
* If the remote_user sudo's to an unprivileged user then we attempt to
grant the unprivileged user access via file system acls.
* If granting file system acls fails we try to change the owner of the
file with chown which only works in case the remote_user is
privileged or the remote systems allows chown calls by unprivileged
users (e.g. HP-UX)
* If the above fails, we next try 'chmod +a' which is a macOS way of
setting ACLs on files.
* If the above fails, we check if ansible_common_remote_group is set.
If it is, we attempt to chgrp the file to its value. This is useful
if the remote_user has a group in common with the become_user. As the
remote_user, we can chgrp the file to that group and allow the
become_user to read it.
* If (the chown fails AND ansible_common_remote_group is not set) OR
(ansible_common_remote_group is set AND the chgrp (or following chmod)
returned non-zero), we can set the file to be world readable so that
the second unprivileged user can read the file.
Since this could allow other users to get access to private
information we only do this if ansible is configured with
"allow_world_readable_tmpfiles" in the ansible.cfg. Also note that
when ansible_common_remote_group is set this final fallback is very
unlikely to ever be triggered, so long as chgrp was successful. But
just because the chgrp was successful, does not mean Ansible can
necessarily access the files (if, for example, the variable was set
to a group that remote_user is in, and can chgrp to, but does not have
in common with become_user).
"""
if remote_user is None:
remote_user = self._get_remote_user()
# Step 1: Are we on windows?
if getattr(self._connection._shell, "_IS_WINDOWS", False):
# This won't work on Powershell as-is, so we'll just completely
# skip until we have a need for it, at which point we'll have to do
# something different.
return remote_paths
# Step 2: If we're not becoming an unprivileged user, we are roughly
# done. Make the files +x if we're asked to, and return.
if not self._is_become_unprivileged():
if execute:
# Can't depend on the file being transferred with required permissions.
# Only need user perms because no become was used here
res = self._remote_chmod(remote_paths, 'u+rwx')
if res['rc'] != 0:
raise AnsibleError(
'Failed to set permissions on remote files '
'(rc: {0}, err: {1})'.format(
res['rc'],
to_native(res['stderr'])))
return remote_paths
# If we're still here, we have an unprivileged user that's different
# than the ssh user.
become_user = self.get_become_option('become_user')
# Try to use file system acls to make the files readable for sudo'd
# user
if execute:
chmod_mode = 'rx'
setfacl_mode = 'r-x'
# Apple patches their "file_cmds" chmod with ACL support
chmod_acl_mode = '{0} allow read,execute'.format(become_user)
# POSIX-draft ACL specification. Solaris, maybe others.
# See chmod(1) on something Solaris-based for syntax details.
posix_acl_mode = 'A+user:{0}:rx:allow'.format(become_user)
else:
chmod_mode = 'rX'
# TODO: this form fails silently on freebsd. We currently
# never call _fixup_perms2() with execute=False but if we
# start to we'll have to fix this.
setfacl_mode = 'r-X'
# Apple
chmod_acl_mode = '{0} allow read'.format(become_user)
# POSIX-draft
posix_acl_mode = 'A+user:{0}:r:allow'.format(become_user)
# Step 3a: Are we able to use setfacl to add user ACLs to the file?
res = self._remote_set_user_facl(
remote_paths,
become_user,
setfacl_mode)
match res.get('rc'):
case 0:
return remote_paths
case 2:
# invalid syntax (for example, missing user, missing colon)
self._display.debug(f"setfacl command failed with an invalid syntax. Trying chmod instead. Err: {res!r}")
case 127:
# setfacl binary does not exists or we don't have permission to use it.
self._display.debug(f"setfacl binary does not exist or does not have permission to use it. Trying chmod instead. Err: {res!r}")
case _:
# generic debug message
self._display.debug(f'Failed to set facl {setfacl_mode}, got:{res!r}')
# Step 3b: Set execute if we need to. We do this before anything else
# because some of the methods below might work but not let us set
# permissions as part of them.
if execute:
res = self._remote_chmod(remote_paths, 'u+rwx')
if res['rc'] != 0:
raise AnsibleError(
'Failed to set file mode or acl on remote temporary files '
'(rc: {0}, err: {1})'.format(
res['rc'],
to_native(res['stderr'])))
# Step 3c: File system ACLs failed above; try falling back to chown.
res = self._remote_chown(remote_paths, become_user)
if res['rc'] == 0:
return remote_paths
# Check if we are an admin/root user. If we are and got here, it means
# we failed to chown as root and something weird has happened.
if remote_user in self._get_admin_users():
raise AnsibleError(
'Failed to change ownership of the temporary files Ansible '
'(via chmod nor setfacl) needs to create despite connecting as a '
'privileged user. Unprivileged become user would be unable to read'
' the file.')
# Step 3d: Try macOS's special chmod + ACL
# macOS chmod's +a flag takes its own argument. As a slight hack, we
# pass that argument as the first element of remote_paths. So we end
# up running `chmod +a [that argument] [file 1] [file 2] ...`
try:
res = self._remote_chmod([chmod_acl_mode] + list(remote_paths), '+a')
except AnsibleAuthenticationFailure as e:
# Solaris-based chmod will return 5 when it sees an invalid mode,
# and +a is invalid there. Because it returns 5, which is the same
# thing sshpass returns on auth failure, our sshpass code will
# assume that auth failed. If we don't handle that case here, none
# of the other logic below will get run. This is fairly hacky and a
# corner case, but probably one that shows up pretty often in
# Solaris-based environments (and possibly others).
pass
else:
if res['rc'] == 0:
return remote_paths
# Step 3e: Try Solaris/OpenSolaris/OpenIndiana-sans-setfacl chmod
# Similar to macOS above, Solaris 11.4 drops setfacl and takes file ACLs
# via chmod instead. OpenSolaris and illumos-based distros allow for
# using either setfacl or chmod, and compatibility depends on filesystem.
# It should be possible to debug this branch by installing OpenIndiana
# (use ZFS) and going unpriv -> unpriv.
res = self._remote_chmod(remote_paths, posix_acl_mode)
if res['rc'] == 0:
return remote_paths
# we'll need this down here
become_link = get_versioned_doclink('playbook_guide/playbooks_privilege_escalation.html')
# Step 3f: Common group
# Otherwise, we're a normal user. We failed to chown the paths to the
# unprivileged user, but if we have a common group with them, we should
# be able to chown it to that.
#
# Note that we have no way of knowing if this will actually work... just
# because chgrp exits successfully does not mean that Ansible will work.
# We could check if the become user is in the group, but this would
# create an extra round trip.
#
# Also note that due to the above, this can prevent the
# world_readable_temp logic below from ever getting called. We
# leave this up to the user to rectify if they have both of these
# features enabled.
group = self.get_shell_option('common_remote_group')
if group is not None:
res = self._remote_chgrp(remote_paths, group)
if res['rc'] == 0:
# warn user that something might go weirdly here.
if self.get_shell_option('world_readable_temp'):
display.warning(
'Both common_remote_group and '
'allow_world_readable_tmpfiles are set. chgrp was '
'successful, but there is no guarantee that Ansible '
'will be able to read the files after this operation, '
'particularly if common_remote_group was set to a '
'group of which the unprivileged become user is not a '
'member. In this situation, '
'allow_world_readable_tmpfiles is a no-op. See this '
'URL for more details: %s'
'#risks-of-becoming-an-unprivileged-user' % become_link)
if execute:
group_mode = 'g+rwx'
else:
group_mode = 'g+rw'
res = self._remote_chmod(remote_paths, group_mode)
if res['rc'] == 0:
return remote_paths
# Step 4: World-readable temp directory
if self.get_shell_option('world_readable_temp'):
# chown and fs acls failed -- do things this insecure way only if
# the user opted in in the config file
display.warning(
'Using world-readable permissions for temporary files Ansible '
'needs to create when becoming an unprivileged user. This may '
'be insecure. For information on securing this, see %s'
'#risks-of-becoming-an-unprivileged-user' % become_link)
res = self._remote_chmod(remote_paths, 'a+%s' % chmod_mode)
if res['rc'] == 0:
return remote_paths
raise AnsibleError(
'Failed to set file mode on remote files '
'(rc: {0}, err: {1})'.format(
res['rc'],
to_native(res['stderr'])))
raise AnsibleError(
'Failed to set permissions on the temporary files Ansible needs '
'to create when becoming an unprivileged user '
'(rc: %s, err: %s}). For information on working around this, see %s'
'#risks-of-becoming-an-unprivileged-user' % (
res['rc'],
to_native(res['stderr']), become_link))
def _remote_chmod(self, paths, mode, sudoable=False):
"""
Issue a remote chmod command
"""
cmd = self._connection._shell.chmod(paths, mode)
res = self._low_level_execute_command(cmd, sudoable=sudoable)
return res
def _remote_chown(self, paths, user, sudoable=False):
"""
Issue a remote chown command
"""
cmd = self._connection._shell.chown(paths, user)
res = self._low_level_execute_command(cmd, sudoable=sudoable)
return res
def _remote_chgrp(self, paths, group, sudoable=False):
"""
Issue a remote chgrp command
"""
cmd = self._connection._shell.chgrp(paths, group)
res = self._low_level_execute_command(cmd, sudoable=sudoable)
return res
def _remote_set_user_facl(self, paths, user, mode, sudoable=False):
"""
Issue a remote call to setfacl
"""
cmd = self._connection._shell.set_user_facl(paths, user, mode)
res = self._low_level_execute_command(cmd, sudoable=sudoable)
return res
def _execute_remote_stat(self, path, all_vars, follow, tmp=None, checksum=True):
"""
Get information from remote file.
"""
if tmp is not None:
display.warning('_execute_remote_stat no longer honors the tmp parameter. Action'
' plugins should set self._connection._shell.tmpdir to share'
' the tmpdir')
del tmp # No longer used
module_args = dict(
path=path,
follow=follow,
get_checksum=checksum,
get_size=False, # ansible.windows.win_stat added this in 1.11.0
checksum_algorithm='sha1',
)
# Unknown opts are ignored as module_args could be specific for the
# module that is being executed.
mystat = self._execute_module(module_name='ansible.legacy.stat', module_args=module_args, task_vars=all_vars,
wrap_async=False, ignore_unknown_opts=True)
if mystat.get('failed'):
msg = mystat.get('module_stderr')
if not msg:
msg = mystat.get('module_stdout')
if not msg:
msg = mystat.get('msg')
raise AnsibleError('Failed to get information on remote file (%s): %s' % (path, msg))
if not mystat['stat']['exists']:
# empty might be matched, 1 should never match, also backwards compatible
mystat['stat']['checksum'] = '1'
# happens sometimes when it is a dir and not on bsd
if 'checksum' not in mystat['stat']:
mystat['stat']['checksum'] = ''
elif not isinstance(mystat['stat']['checksum'], str):
raise AnsibleError("Invalid checksum returned by stat: expected a string type but got %s" % type(mystat['stat']['checksum']))
return mystat['stat']
def _remote_expand_user(self, path, sudoable=True, pathsep=None):
""" takes a remote path and performs tilde/$HOME expansion on the remote host """
# We only expand ~/path and ~username/path
if not path.startswith('~'):
return path
# Per Jborean, we don't have to worry about Windows as we don't have a notion of user's home
# dir there.
split_path = path.split(os.path.sep, 1)
expand_path = split_path[0]
if expand_path == '~':
# Network connection plugins (network_cli, netconf, etc.) execute on the controller, rather than the remote host.
# As such, we want to avoid using remote_user for paths as remote_user may not line up with the local user
# This is a hack and should be solved by more intelligent handling of remote_tmp in 2.7
become_user = self.get_become_option('become_user')
if getattr(self._connection, '_remote_is_local', False):
pass
elif sudoable and self._connection.become and become_user:
expand_path = '~%s' % become_user
else:
# use remote user instead, if none set default to current user
expand_path = '~%s' % (self._get_remote_user() or '')
# use shell to construct appropriate command and execute
cmd = self._connection._shell._expand_user2(expand_path)
data = self._low_level_execute_command(cmd.command, in_data=cmd.input_data, sudoable=False)
try:
initial_fragment = data['stdout'].strip().splitlines()[-1]
except IndexError:
initial_fragment = None
if not initial_fragment:
# Something went wrong trying to expand the path remotely. Try using pwd, if not, return
# the original string
cmd = self._connection._shell.pwd()
pwd = self._low_level_execute_command(cmd, sudoable=False).get('stdout', '').strip()
if pwd:
expanded = pwd
else:
expanded = path
elif len(split_path) > 1:
expanded = self._connection._shell.join_path(initial_fragment, *split_path[1:])
else:
expanded = initial_fragment
if '..' in os.path.dirname(expanded).split('/'):
raise AnsibleError("'%s' returned an invalid relative home directory path containing '..'" % self._get_remote_addr({}))
return expanded
def _strip_success_message(self, data):
"""
Removes the BECOME-SUCCESS message from the data.
"""
if data.strip().startswith('BECOME-SUCCESS-'):
data = re.sub(r'^((\r)?\n)?BECOME-SUCCESS.*(\r)?\n', '', data)
return data
def _update_module_args(self, module_name, module_args, task_vars, ignore_unknown_opts: bool = False):
# set check mode in the module arguments, if required
if self._task.check_mode:
if not self._supports_check_mode:
raise AnsibleError("check mode is not supported for this operation")
module_args['_ansible_check_mode'] = True
else:
module_args['_ansible_check_mode'] = False
# set no log in the module arguments, if required
no_target_syslog = C.config.get_config_value('DEFAULT_NO_TARGET_SYSLOG', variables=task_vars)
module_args['_ansible_no_log'] = self._task.no_log or no_target_syslog
# set debug in the module arguments, if required
module_args['_ansible_debug'] = C.DEFAULT_DEBUG
# let module know we are in diff mode
module_args['_ansible_diff'] = self._task.diff
# let module know our verbosity
module_args['_ansible_verbosity'] = display.verbosity
# give the module information about the ansible version
module_args['_ansible_version'] = __version__
# give the module information about its name
module_args['_ansible_module_name'] = module_name
# set the syslog facility to be used in the module
module_args['_ansible_syslog_facility'] = task_vars.get('ansible_syslog_facility', C.DEFAULT_SYSLOG_FACILITY)
# let module know about filesystems that selinux treats specially
module_args['_ansible_selinux_special_fs'] = C.DEFAULT_SELINUX_SPECIAL_FS
# give the module the socket for persistent connections
module_args['_ansible_socket'] = getattr(self._connection, 'socket_path')
if not module_args['_ansible_socket']:
module_args['_ansible_socket'] = task_vars.get('ansible_socket')
# make sure all commands use the designated shell executable
module_args['_ansible_shell_executable'] = self._play_context.executable
# make sure modules are aware if they need to keep the remote files
module_args['_ansible_keep_remote_files'] = C.DEFAULT_KEEP_REMOTE_FILES
# make sure all commands use the designated temporary directory if created
if self._is_become_unprivileged(): # force fallback on remote_tmp as user cannot normally write to dir
module_args['_ansible_tmpdir'] = None
else:
module_args['_ansible_tmpdir'] = self._connection._shell.tmpdir
# make sure the remote_tmp value is sent through in case modules needs to create their own
module_args['_ansible_remote_tmp'] = self.get_shell_option('remote_tmp', default='~/.ansible/tmp')
# tells the module to ignore options that are not in its argspec.
module_args['_ansible_ignore_unknown_opts'] = ignore_unknown_opts
# allow user to insert string to add context to remote logging
module_args['_ansible_target_log_info'] = C.config.get_config_value('TARGET_LOG_INFO', variables=task_vars)
module_args['_ansible_tracebacks_for'] = _traceback.traceback_for()
def _execute_module(self, module_name=None, module_args=None, tmp=None, task_vars=None, persist_files=False, delete_remote_tmp=None, wrap_async=False,
ignore_unknown_opts: bool = False):
"""
Transfer and run a module along with its arguments.
"""
if tmp is not None:
display.warning('_execute_module no longer honors the tmp parameter. Action plugins'
' should set self._connection._shell.tmpdir to share the tmpdir')
del tmp # No longer used
if delete_remote_tmp is not None:
display.warning('_execute_module no longer honors the delete_remote_tmp parameter.'
' Action plugins should check self._connection._shell.tmpdir to'
' see if a tmpdir existed before they were called to determine'
' if they are responsible for removing it.')
del delete_remote_tmp # No longer used
tmpdir = self._connection._shell.tmpdir
# We set the module_style to new here so the remote_tmp is created
# before the module args are built if remote_tmp is needed (async).
# If the module_style turns out to not be new and we didn't create the
# remote tmp here, it will still be created. This must be done before
# calling self._update_module_args() so the module wrapper has the
# correct remote_tmp value set
if not self._is_pipelining_enabled("new", wrap_async) and tmpdir is None:
self._make_tmp_path()
tmpdir = self._connection._shell.tmpdir
if task_vars is None:
task_vars = dict()
# if a module name was not specified for this execution, use the action from the task
if module_name is None:
module_name = self._task.action
if module_args is None:
module_args = self._task.args
self._update_module_args(module_name, module_args, task_vars, ignore_unknown_opts=ignore_unknown_opts)
remove_async_dir = None
if wrap_async or self._task.async_val:
async_dir = self.get_shell_option('async_dir', default="~/.ansible_async")
remove_async_dir = len(self._task.environment)
self._task.environment.append({"ANSIBLE_ASYNC_DIR": async_dir})
# FUTURE: refactor this along with module build process to better encapsulate "smart wrapper" functionality
module_bits, module_path = self._configure_module(module_name=module_name, module_args=module_args, task_vars=task_vars)
(module_style, shebang, module_data) = (module_bits.module_style, module_bits.shebang, module_bits.b_module_data)
display.vvv("Using module file %s" % module_path)
if not shebang and module_style != 'binary':
raise AnsibleError("module (%s) is missing interpreter line" % module_name)
self._used_interpreter = shebang
remote_module_path = None
if not self._is_pipelining_enabled(module_style, wrap_async):
# we might need remote tmp dir
if tmpdir is None:
self._make_tmp_path()
tmpdir = self._connection._shell.tmpdir
remote_module_filename = self._connection._shell.get_remote_filename(module_path)
remote_module_path = self._connection._shell.join_path(tmpdir, 'AnsiballZ_%s' % remote_module_filename)
args_file_path = None
if module_style in ('old', 'non_native_want_json', 'binary'):
# we'll also need a tmp file to hold our module arguments
args_file_path = self._connection._shell.join_path(tmpdir, 'args')
if remote_module_path or module_style != 'new':
display.debug("transferring module to remote %s" % remote_module_path)
if module_style == 'binary':
self._transfer_file(module_path, remote_module_path)
else:
self._transfer_data(remote_module_path, module_data)
if module_style == 'old':
# we need to dump the module args to a k=v string in a file on
# the remote system, which can be read and parsed by the module
args_data = ""
for k, v in module_args.items():
args_data += '%s=%s ' % (k, shlex.quote(str(v)))
self._transfer_data(args_file_path, args_data)
elif module_style in ('non_native_want_json', 'binary'):
profile_encoder = get_module_encoder(module_bits.serialization_profile, Direction.CONTROLLER_TO_MODULE)
self._transfer_data(args_file_path, json.dumps(module_args, cls=profile_encoder))
display.debug("done transferring module to remote")
environment_string = self._compute_environment_string()
# remove the ANSIBLE_ASYNC_DIR env entry if we added a temporary one for
# the async_wrapper task.
if remove_async_dir is not None:
del self._task.environment[remove_async_dir]
remote_files = []
if tmpdir and remote_module_path:
remote_files = [tmpdir, remote_module_path]
if args_file_path:
remote_files.append(args_file_path)
sudoable = True
in_data = None
cmd = ""
if wrap_async and not self._connection.always_pipeline_modules:
# configure, upload, and chmod the async_wrapper module
(async_module_bits, async_module_path) = self._configure_module(module_name='ansible.legacy.async_wrapper', module_args=dict(), task_vars=task_vars)
(shebang, async_module_data) = (async_module_bits.shebang, async_module_bits.b_module_data)
async_module_remote_filename = self._connection._shell.get_remote_filename(async_module_path)
remote_async_module_path = self._connection._shell.join_path(tmpdir, async_module_remote_filename)
self._transfer_data(remote_async_module_path, async_module_data)
remote_files.append(remote_async_module_path)
async_limit = self._task.async_val
async_jid = f'j{secrets.randbelow(999999999999)}'
# call the interpreter for async_wrapper directly
# this permits use of a script for an interpreter on non-Linux platforms
interpreter = shebang.replace('#!', '').strip()
async_cmd = [interpreter, remote_async_module_path, async_jid, async_limit, remote_module_path]
if environment_string:
async_cmd.insert(0, environment_string)
if args_file_path:
async_cmd.append(args_file_path)
else:
# maintain a fixed number of positional parameters for async_wrapper
async_cmd.append('_')
if not self._should_remove_tmp_path(tmpdir):
async_cmd.append("-preserve_tmp")
cmd = " ".join(to_text(x) for x in async_cmd)
else:
if self._is_pipelining_enabled(module_style):
in_data = module_data
display.vvv("Pipelining is enabled.")
else:
cmd = remote_module_path
cmd = self._connection._shell.build_module_command(environment_string, shebang, cmd, arg_path=args_file_path).strip()
# Fix permissions of the tmpdir path and tmpdir files. This should be called after all
# files have been transferred.
if remote_files:
# remove none/empty
remote_files = [x for x in remote_files if x]
self._fixup_perms2(remote_files, self._get_remote_user())
# actually execute
res = self._low_level_execute_command(cmd, sudoable=sudoable, in_data=in_data)
# parse the main result
data = self._parse_returned_data(res, module_bits.serialization_profile)
# NOTE: INTERNAL KEYS ONLY ACCESSIBLE HERE
# get internal info before cleaning
if data.pop("_ansible_suppress_tmpdir_delete", False):
self._cleanup_remote_tmp = False
# NOTE: dnf returns results .. but that made it 'compatible' with squashing, so we allow mappings, for now
if 'results' in data and (not isinstance(data['results'], Sequence) or isinstance(data['results'], str)):
data['ansible_module_results'] = data['results']
del data['results']
display.warning("Found internal 'results' key in module return, renamed to 'ansible_module_results'.")
# remove internal keys
remove_internal_keys(data)
if wrap_async:
# async_wrapper will clean up its tmpdir on its own so we want the controller side to
# forget about it now
self._connection._shell.tmpdir = None
# FIXME: for backwards compat, figure out if still makes sense
data['changed'] = True
# pre-split stdout/stderr into lines if needed
if 'stdout' in data and 'stdout_lines' not in data:
# if the value is 'False', a default won't catch it.
txt = data.get('stdout', None) or u''
data['stdout_lines'] = txt.splitlines()
if 'stderr' in data and 'stderr_lines' not in data:
# if the value is 'False', a default won't catch it.
txt = data.get('stderr', None) or u''
data['stderr_lines'] = txt.splitlines()
# propagate interpreter discovery results back to the controller
if self._discovered_interpreter_key:
if data.get('ansible_facts') is None:
data['ansible_facts'] = {}
data['ansible_facts'][self._discovered_interpreter_key] = self._discovered_interpreter
display.debug("done with _execute_module (%s, %s)" % (module_name, module_args))
return data
def _parse_returned_data(self, res: dict[str, t.Any], profile: str) -> dict[str, t.Any]:
try:
filtered_output, warnings = _filter_non_json_lines(res.get('stdout', ''), objects_only=True)
for w in warnings:
display.warning(w)
decoder = get_module_decoder(profile, Direction.MODULE_TO_CONTROLLER)
data = json.loads(filtered_output, cls=decoder)
_captured.AnsibleModuleCapturedError.normalize_result_exception(data)
data.update(_ansible_parsed=True) # this must occur after normalize_result_exception, since it checks the type of data to ensure it's a dict
except ValueError as ex:
message = "Module result deserialization failed."
help_text = ""
include_cause_message = True
if self._used_interpreter is not None:
interpreter = self._used_interpreter.lstrip('!#')
# "not found" case is currently not tested; it was once reproducible
# see: https://github.com/ansible/ansible/pull/53534
not_found_err_re = re.compile(rf'{re.escape(interpreter)}: (?:No such file or directory|not found|command not found)')
if not_found_err_re.search(res.get('stderr', '')) or not_found_err_re.search(res.get('stdout', '')):
message = f"The module interpreter {interpreter!r} was not found."
help_text = 'Consider overriding the configured interpreter path for this host. '
include_cause_message = False # cause context *might* be useful in the traceback, but the JSON deserialization failure message is not
try:
# Because the underlying action API is built on result dicts instead of exceptions (for all but the most catastrophic failures),
# we're using a tweaked version of the module exception handler to get new ErrorDetail-backed errors from this part of the code.
# Ideally this would raise immediately on failure, but this would likely break actions that assume `ActionBase._execute_module()`
# does not raise on module failure.
error = AnsibleError(
message=message,
help_text=help_text + "See stdout/stderr for the returned output.",
)
error._include_cause_message = include_cause_message
raise error from ex
except AnsibleError as ansible_ex:
sentinel = object()
data = _error_utils.result_dict_from_exception(ansible_ex)
data.update(
_ansible_parsed=False,
module_stdout=res.get('stdout', ''),
module_stderr=res.get('stderr', sentinel),
rc=res.get('rc', sentinel),
)
data = {k: v for k, v in data.items() if v is not sentinel}
return data
# FIXME: move to connection base
def _low_level_execute_command(self, cmd, sudoable=True, in_data=None, executable=None, encoding_errors='surrogate_then_replace', chdir=None):
"""
This is the function which executes the low level shell command, which
may be commands to create/remove directories for temporary files, or to
run the module code or python directly when pipelining.
:kwarg encoding_errors: If the value returned by the command isn't
utf-8 then we have to figure out how to transform it to unicode.
If the value is just going to be displayed to the user (or
discarded) then the default of 'replace' is fine. If the data is
used as a key or is going to be written back out to a file
verbatim, then this won't work. May have to use some sort of
replacement strategy (python3 could use surrogateescape)
:kwarg chdir: cd into this directory before executing the command.
"""
display.debug("_low_level_execute_command(): starting")
# if not cmd:
# # this can happen with powershell modules when there is no analog to a Windows command (like chmod)
# display.debug("_low_level_execute_command(): no command, exiting")
# return dict(stdout='', stderr='', rc=254)
if chdir:
display.debug("_low_level_execute_command(): changing cwd to %s for this command" % chdir)
cmd = self._connection._shell.append_command('cd %s' % chdir, cmd)
# https://github.com/ansible/ansible/issues/68054
if executable:
self._connection._shell.executable = executable
ruser = self._get_remote_user()
buser = self.get_become_option('become_user')
if (sudoable and self._connection.become and # if sudoable and have become
resource_from_fqcr(self._connection.transport) != 'network_cli' and # if not using network_cli
(C.BECOME_ALLOW_SAME_USER or (buser != ruser or not any((ruser, buser))))): # if we allow same user PE or users are different and either is set
display.debug("_low_level_execute_command(): using become for this command")
cmd = self._connection.become.build_become_command(cmd, self._connection._shell)
if self._connection.allow_executable:
if executable is None:
executable = self._play_context.executable
# mitigation for SSH race which can drop stdout (https://github.com/ansible/ansible/issues/13876)
# only applied for the default executable to avoid interfering with the raw action
cmd = self._connection._shell.append_command(cmd, 'sleep 0')
if executable:
cmd = executable + ' -c ' + shlex.quote(cmd)
display.debug("_low_level_execute_command(): executing: %s" % (cmd,))
# Change directory to basedir of task for command execution when connection is local
if self._connection.transport == 'local':
self._connection.cwd = self._loader.get_basedir()
rc, stdout, stderr = self._connection.exec_command(cmd, in_data=in_data, sudoable=sudoable)
# stdout and stderr may be either a file-like or a bytes object.
# Convert either one to a text type
if isinstance(stdout, bytes):
out = to_text(stdout, errors=encoding_errors)
elif not isinstance(stdout, str):
out = to_text(b''.join(stdout.readlines()), errors=encoding_errors)
else:
out = stdout
if isinstance(stderr, bytes):
err = to_text(stderr, errors=encoding_errors)
elif not isinstance(stderr, str):
err = to_text(b''.join(stderr.readlines()), errors=encoding_errors)
else:
err = stderr
if rc is None:
rc = 0
# be sure to remove the BECOME-SUCCESS message now
out = self._strip_success_message(out)
display.debug(u"_low_level_execute_command() done: rc=%d, stdout=%s, stderr=%s" % (rc, out, err))
return dict(rc=rc, stdout=out, stdout_lines=out.splitlines(), stderr=err, stderr_lines=err.splitlines())
def _get_diff_data(self, destination, source, task_vars, content=None, source_file=True):
# Note: Since we do not diff the source and destination before we transform from bytes into
# text the diff between source and destination may not be accurate. To fix this, we'd need
# to move the diffing from the callback plugins into here.
#
# Example of data which would cause trouble is src_content == b'\xff' and dest_content ==
# b'\xfe'. Neither of those are valid utf-8 so both get turned into the replacement
# character: diff['before'] = u'�' ; diff['after'] = u'�' When the callback plugin later
# diffs before and after it shows an empty diff.
diff = {}
display.debug("Going to peek to see if file has changed permissions")
peek_result = self._execute_module(
module_name='ansible.legacy.file', module_args=dict(path=destination, _diff_peek=True),
task_vars=task_vars, persist_files=True)
if peek_result.get('failed', False):
display.warning(u"Failed to get diff between '%s' and '%s': %s" % (os.path.basename(source), destination, to_text(peek_result.get(u'msg', u''))))
return diff
if peek_result.get('rc', 0) == 0:
if peek_result.get('state') in (None, 'absent'):
diff['before'] = u''
elif peek_result.get('appears_binary'):
diff['dst_binary'] = 1
elif peek_result.get('size') and C.MAX_FILE_SIZE_FOR_DIFF > 0 and peek_result['size'] > C.MAX_FILE_SIZE_FOR_DIFF:
diff['dst_larger'] = C.MAX_FILE_SIZE_FOR_DIFF
else:
display.debug(u"Slurping the file %s" % destination)
dest_result = self._execute_module(
module_name='ansible.legacy.slurp', module_args=dict(path=destination),
task_vars=task_vars, persist_files=True)
if 'content' in dest_result:
dest_contents = dest_result['content']
if dest_result['encoding'] == u'base64':
dest_contents = base64.b64decode(dest_contents)
else:
raise AnsibleError("unknown encoding in content option, failed: %s" % to_native(dest_result))
diff['before_header'] = destination
diff['before'] = to_text(dest_contents)
if source_file:
st = os.stat(source)
if C.MAX_FILE_SIZE_FOR_DIFF > 0 and st[stat.ST_SIZE] > C.MAX_FILE_SIZE_FOR_DIFF:
diff['src_larger'] = C.MAX_FILE_SIZE_FOR_DIFF
else:
display.debug("Reading local copy of the file %s" % source)
try:
with open(source, 'rb') as src:
src_contents = src.read()
except Exception as e:
raise AnsibleError("Unexpected error while reading source (%s) for diff: %s " % (source, to_native(e)))
if b"\x00" in src_contents:
diff['src_binary'] = 1
else:
if content:
diff['after_header'] = destination
else:
diff['after_header'] = source
diff['after'] = to_text(src_contents)
else:
display.debug(u"source of file passed in")
diff['after_header'] = u'dynamically generated'
diff['after'] = source
if self._task.no_log:
if 'before' in diff:
diff["before"] = u""
if 'after' in diff:
diff["after"] = u" [[ Diff output has been hidden because 'no_log: true' was specified for this result ]]\n"
return diff
def _find_needle(self, dirname, needle):
"""
find a needle in haystack of paths, optionally using 'dirname' as a subdir.
This will build the ordered list of paths to search and pass them to dwim
to get back the first existing file found.
"""
# dwim already deals with playbook basedirs
path_stack = self._task.get_search_path()
# if missing it will return a file not found exception
return self._loader.path_dwim_relative_stack(path_stack, dirname, needle)
| ActionBase |
python | dagster-io__dagster | python_modules/dagster/dagster/_config/snap.py | {
"start": 8405,
"end": 10673
} | class ____:
all_config_snaps_by_key: Mapping[str, ConfigTypeSnap]
@property
def all_config_keys(self) -> Sequence[str]:
return list(self.all_config_snaps_by_key.keys())
def get_config_snap(self, key: str) -> ConfigTypeSnap:
check.str_param(key, "key")
return self.all_config_snaps_by_key[key]
def has_config_snap(self, key: str) -> bool:
check.str_param(key, "key")
return key in self.all_config_snaps_by_key
def minimal_config_for_type_snap(
config_schema_snap: ConfigSchemaSnapshot, config_type_snap: ConfigTypeSnap
) -> Any:
check.inst_param(config_schema_snap, "config_schema_snap", ConfigSchemaSnapshot)
check.inst_param(config_type_snap, "config_type_snap", ConfigTypeSnap)
if ConfigTypeKind.has_fields(config_type_snap.kind):
default_dict = {}
if ConfigTypeKind.is_selector(config_type_snap.kind):
return "<selector>"
for field in config_type_snap.fields: # type: ignore
if not field.is_required:
continue
default_dict[field.name] = minimal_config_for_type_snap(
config_schema_snap, config_schema_snap.get_config_snap(field.type_key)
)
return default_dict
elif config_type_snap.kind == ConfigTypeKind.ANY:
return "AnyType"
elif config_type_snap.kind == ConfigTypeKind.SCALAR:
defaults = {"String": "...", "Int": 0, "Float": 0.0, "Bool": True}
return defaults.get(config_type_snap.given_name, "<unknown>") # type: ignore
elif config_type_snap.kind == ConfigTypeKind.ARRAY:
return []
elif config_type_snap.kind == ConfigTypeKind.MAP:
return {}
elif config_type_snap.kind == ConfigTypeKind.ENUM:
# guard against the edge case that an enum is defined with zero options
return (
config_type_snap.enum_values[0].value if config_type_snap.enum_values else "<unknown>"
)
elif config_type_snap.kind == ConfigTypeKind.SCALAR_UNION:
return minimal_config_for_type_snap(
config_schema_snap,
config_schema_snap.get_config_snap(config_type_snap.type_param_keys[0]), # type: ignore
)
else:
return "<unknown>"
| ConfigSchemaSnapshot |
python | django-crispy-forms__django-crispy-forms | tests/forms.py | {
"start": 3856,
"end": 3944
} | class ____(BaseForm):
multi = forms.SplitDateTimeField()
| SampleFormWithMultiValueField |
python | ray-project__ray | rllib/connectors/common/numpy_to_tensor.py | {
"start": 659,
"end": 4632
} | class ____(ConnectorV2):
"""Converts numpy arrays across the entire input data into (framework) tensors.
The framework information is received via the provided `rl_module` arg in the
`__call__()` method.
Note: This is one of the default env-to-module or Learner ConnectorV2 pieces that
are added automatically by RLlib into every env-to-module/Learner connector
pipeline, unless `config.add_default_connectors_to_env_to_module_pipeline` or
`config.add_default_connectors_to_learner_pipeline ` are set to
False.
The default env-to-module connector pipeline is:
[
[0 or more user defined ConnectorV2 pieces],
AddObservationsFromEpisodesToBatch,
AddTimeDimToBatchAndZeroPad,
AddStatesFromEpisodesToBatch,
AgentToModuleMapping, # only in multi-agent setups!
BatchIndividualItems,
NumpyToTensor,
]
The default Learner connector pipeline is:
[
[0 or more user defined ConnectorV2 pieces],
AddObservationsFromEpisodesToBatch,
AddColumnsFromEpisodesToTrainBatch,
AddTimeDimToBatchAndZeroPad,
AddStatesFromEpisodesToBatch,
AgentToModuleMapping, # only in multi-agent setups!
BatchIndividualItems,
NumpyToTensor,
]
This ConnectorV2:
- Loops through the input `data` and converts all found numpy arrays into
framework-specific tensors (possibly on a GPU).
"""
def __init__(
self,
input_observation_space: Optional[gym.Space] = None,
input_action_space: Optional[gym.Space] = None,
*,
pin_memory: bool = False,
device: Optional["DeviceType"] = None,
**kwargs,
):
"""Initializes a NumpyToTensor instance.
Args:
pin_memory: Whether to pin memory when creating (torch) tensors.
If None (default), pins memory if `as_learner_connector` is True,
otherwise doesn't pin memory.
device: An optional device to move the resulting tensors to. If not
provided, all data will be left on the CPU.
**kwargs:
"""
super().__init__(
input_observation_space=input_observation_space,
input_action_space=input_action_space,
**kwargs,
)
self._pin_memory = pin_memory
self._device = device
@override(ConnectorV2)
def __call__(
self,
*,
rl_module: RLModule,
batch: Dict[str, Any],
episodes: List[EpisodeType],
explore: Optional[bool] = None,
shared_data: Optional[dict] = None,
**kwargs,
) -> Any:
is_single_agent = False
is_multi_rl_module = isinstance(rl_module, MultiRLModule)
# `data` already a ModuleID to batch mapping format.
if not (is_multi_rl_module and all(c in rl_module._rl_modules for c in batch)):
is_single_agent = True
batch = {DEFAULT_MODULE_ID: batch}
for module_id, module_data in batch.copy().items():
# If `rl_module` is None, leave data in numpy format.
if rl_module is not None:
infos = module_data.pop(Columns.INFOS, None)
if rl_module.framework == "torch":
module_data = convert_to_torch_tensor(
module_data, pin_memory=self._pin_memory, device=self._device
)
else:
raise ValueError(
"`NumpyToTensor`does NOT support frameworks other than torch!"
)
if infos is not None:
module_data[Columns.INFOS] = infos
# Early out with data under(!) `DEFAULT_MODULE_ID`, b/c we are in plain
# single-agent mode.
if is_single_agent:
return module_data
batch[module_id] = module_data
return batch
| NumpyToTensor |
python | keras-team__keras | keras/src/callbacks/reduce_lr_on_plateau.py | {
"start": 268,
"end": 4766
} | class ____(MonitorCallback):
"""Reduce learning rate when a metric has stopped improving.
Models often benefit from reducing the learning rate by a factor
of 2-10 once learning stagnates. This callback monitors a
quantity and if no improvement is seen for a 'patience' number
of epochs, the learning rate is reduced.
Example:
```python
reduce_lr = ReduceLROnPlateau(monitor='val_loss', factor=0.2,
patience=5, min_lr=0.001)
model.fit(x_train, y_train, callbacks=[reduce_lr])
```
Args:
monitor: String. Quantity to be monitored.
factor: Float. Factor by which the learning rate will be reduced.
`new_lr = lr * factor`.
patience: Integer. Number of epochs with no improvement after which
learning rate will be reduced.
verbose: Integer. 0: quiet, 1: update messages.
mode: String. One of `{'auto', 'min', 'max'}`. In `'min'` mode,
the learning rate will be reduced when the
quantity monitored has stopped decreasing; in `'max'` mode it will
be reduced when the quantity monitored has stopped increasing; in
`'auto'` mode, the direction is automatically inferred from the name
of the monitored quantity.
min_delta: Float. Threshold for measuring the new optimum, to only focus
on significant changes.
cooldown: Integer. Number of epochs to wait before resuming normal
operation after the learning rate has been reduced.
min_lr: Float. Lower bound on the learning rate.
"""
def __init__(
self,
monitor="val_loss",
factor=0.1,
patience=10,
verbose=0,
mode="auto",
min_delta=1e-4,
cooldown=0,
min_lr=0.0,
**kwargs,
):
super().__init__(monitor, mode, min_delta=min_delta)
if factor >= 1.0:
raise ValueError(
"ReduceLROnPlateau does not support a factor >= 1.0. "
f"Received factor={factor}"
)
self.factor = factor
self.min_lr = min_lr
self.patience = patience
self.verbose = verbose
self.cooldown = cooldown
self.cooldown_counter = 0 # Cooldown counter.
self.wait = 0
def _reset(self):
"""Resets wait counter and cooldown counter."""
self.cooldown_counter = 0
self.wait = 0
def on_train_begin(self, logs=None):
self._reset()
def on_epoch_end(self, epoch, logs=None):
if self.monitor_op is None:
# Delay setup until the model's metrics are all built
self._set_monitor_op()
logs = logs or {}
logs["learning_rate"] = float(
backend.convert_to_numpy(self.model.optimizer.learning_rate)
)
current = logs.get(self.monitor)
if current is None:
warnings.warn(
"Learning rate reduction is conditioned on metric "
f"`{self.monitor}` which is not available. Available metrics "
f"are: {','.join(list(logs.keys()))}.",
stacklevel=2,
)
else:
if self.in_cooldown():
self.cooldown_counter -= 1
self.wait = 0
if self._is_improvement(current, self.best):
self.best = current
self.wait = 0
elif not self.in_cooldown():
self.wait += 1
if self.wait >= self.patience:
old_lr = float(
backend.convert_to_numpy(
self.model.optimizer.learning_rate
)
)
if old_lr > np.float32(self.min_lr):
new_lr = old_lr * self.factor
new_lr = max(new_lr, self.min_lr)
self.model.optimizer.learning_rate = new_lr
if self.verbose > 0:
io_utils.print_msg(
f"\nEpoch {epoch + 1}: "
"ReduceLROnPlateau reducing "
f"learning rate to {new_lr}."
)
self.cooldown_counter = self.cooldown
self.wait = 0
def in_cooldown(self):
return self.cooldown_counter > 0
| ReduceLROnPlateau |
python | huggingface__transformers | src/transformers/data/processors/glue.py | {
"start": 4230,
"end": 5999
} | class ____(DataProcessor):
"""Processor for the MRPC data set (GLUE version)."""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
warnings.warn(DEPRECATION_WARNING.format("processor"), FutureWarning)
def get_example_from_tensor_dict(self, tensor_dict):
"""See base class."""
return InputExample(
tensor_dict["idx"].numpy(),
tensor_dict["sentence1"].numpy().decode("utf-8"),
tensor_dict["sentence2"].numpy().decode("utf-8"),
str(tensor_dict["label"].numpy()),
)
def get_train_examples(self, data_dir):
"""See base class."""
logger.info(f"LOOKING AT {os.path.join(data_dir, 'train.tsv')}")
return self._create_examples(self._read_tsv(os.path.join(data_dir, "train.tsv")), "train")
def get_dev_examples(self, data_dir):
"""See base class."""
return self._create_examples(self._read_tsv(os.path.join(data_dir, "dev.tsv")), "dev")
def get_test_examples(self, data_dir):
"""See base class."""
return self._create_examples(self._read_tsv(os.path.join(data_dir, "test.tsv")), "test")
def get_labels(self):
"""See base class."""
return ["0", "1"]
def _create_examples(self, lines, set_type):
"""Creates examples for the training, dev and test sets."""
examples = []
for i, line in enumerate(lines):
if i == 0:
continue
guid = f"{set_type}-{i}"
text_a = line[3]
text_b = line[4]
label = None if set_type == "test" else line[0]
examples.append(InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))
return examples
| MrpcProcessor |
python | huggingface__transformers | src/transformers/models/edgetam_video/modular_edgetam_video.py | {
"start": 19230,
"end": 19310
} | class ____(Sam2VideoVisionEncoderOutput):
pass
| EdgeTamVideoVisionEncoderOutput |
python | django__django | tests/forms_tests/field_tests/test_filefield.py | {
"start": 4608,
"end": 4681
} | class ____(FileInput):
allow_multiple_selected = True
| MultipleFileInput |
python | huggingface__transformers | src/transformers/models/glm46v/processing_glm46v.py | {
"start": 1562,
"end": 1857
} | class ____(ProcessingKwargs, total=False):
_defaults = {
"text_kwargs": {
"padding": False,
"return_token_type_ids": False,
"return_mm_token_type_ids": False,
},
"videos_kwargs": {"return_metadata": True},
}
| Glm46VProcessorKwargs |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/super10.py | {
"start": 115,
"end": 181
} | class ____(A):
def clone(self):
return super().clone()
| B |
python | PyCQA__pyflakes | pyflakes/messages.py | {
"start": 424,
"end": 622
} | class ____(Message):
message = '%r imported but unused'
def __init__(self, filename, loc, name):
Message.__init__(self, filename, loc)
self.message_args = (name,)
| UnusedImport |
python | django__django | django/contrib/sites/managers.py | {
"start": 148,
"end": 1994
} | class ____(models.Manager):
"Use this to limit objects to those associated with the current site."
use_in_migrations = True
def __init__(self, field_name=None):
super().__init__()
self.__field_name = field_name
def check(self, **kwargs):
errors = super().check(**kwargs)
errors.extend(self._check_field_name())
return errors
def _check_field_name(self):
field_name = self._get_field_name()
try:
field = self.model._meta.get_field(field_name)
except FieldDoesNotExist:
return [
checks.Error(
"CurrentSiteManager could not find a field named '%s'."
% field_name,
obj=self,
id="sites.E001",
)
]
if not field.many_to_many and not isinstance(field, (models.ForeignKey)):
return [
checks.Error(
"CurrentSiteManager cannot use '%s.%s' as it is not a foreign key "
"or a many-to-many field."
% (self.model._meta.object_name, field_name),
obj=self,
id="sites.E002",
)
]
return []
def _get_field_name(self):
"""Return self.__field_name or 'site' or 'sites'."""
if not self.__field_name:
try:
self.model._meta.get_field("site")
except FieldDoesNotExist:
self.__field_name = "sites"
else:
self.__field_name = "site"
return self.__field_name
def get_queryset(self):
return (
super()
.get_queryset()
.filter(**{self._get_field_name() + "__id": settings.SITE_ID})
)
| CurrentSiteManager |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/callbackProtocol11.py | {
"start": 241,
"end": 277
} | class ____(Generic[T_co, U_co]): ...
| A |
python | fabric__fabric | fabric/group.py | {
"start": 8527,
"end": 10469
} | class ____(Group):
"""
Subclass of `.Group` which uses threading to execute concurrently.
.. versionadded:: 2.0
"""
def _do(self, method, *args, **kwargs):
results = GroupResult()
queue = Queue()
threads = []
for cxn in self:
thread = ExceptionHandlingThread(
target=thread_worker,
kwargs=dict(
cxn=cxn,
queue=queue,
method=method,
args=args,
kwargs=kwargs,
),
)
threads.append(thread)
for thread in threads:
thread.start()
for thread in threads:
# TODO: configurable join timeout
thread.join()
# Get non-exception results from queue
while not queue.empty():
# TODO: io-sleep? shouldn't matter if all threads are now joined
cxn, result = queue.get(block=False)
# TODO: outstanding musings about how exactly aggregate results
# ought to ideally operate...heterogenous obj like this, multiple
# objs, ??
results[cxn] = result
# Get exceptions from the threads themselves.
# TODO: in a non-thread setup, this would differ, e.g.:
# - a queue if using multiprocessing
# - some other state-passing mechanism if using e.g. coroutines
# - ???
excepted = False
for thread in threads:
wrapper = thread.exception()
if wrapper is not None:
# Outer kwargs is Thread instantiation kwargs, inner is kwargs
# passed to thread target/body.
cxn = wrapper.kwargs["kwargs"]["cxn"]
results[cxn] = wrapper.value
excepted = True
if excepted:
raise GroupException(results)
return results
| ThreadingGroup |
python | sqlalchemy__sqlalchemy | lib/sqlalchemy/engine/reflection.py | {
"start": 74630,
"end": 75527
} | class ____:
columns: Dict[TableKey, List[ReflectedColumn]]
pk_constraint: Dict[TableKey, Optional[ReflectedPrimaryKeyConstraint]]
foreign_keys: Dict[TableKey, List[ReflectedForeignKeyConstraint]]
indexes: Dict[TableKey, List[ReflectedIndex]]
# optionals
unique_constraints: Dict[TableKey, List[ReflectedUniqueConstraint]]
table_comment: Dict[TableKey, Optional[ReflectedTableComment]]
check_constraints: Dict[TableKey, List[ReflectedCheckConstraint]]
table_options: Dict[TableKey, Dict[str, Any]]
unreflectable: Dict[TableKey, exc.UnreflectableTableError]
def update(self, other: _ReflectionInfo) -> None:
for k, v in self.__dict__.items():
ov = getattr(other, k)
if ov is not None:
if v is None:
setattr(self, k, ov)
else:
v.update(ov)
| _ReflectionInfo |
python | tensorflow__tensorflow | tensorflow/lite/python/lite_v2_test.py | {
"start": 205520,
"end": 206977
} | class ____(lite_v2_test_util.ModelTest):
@parameterized.named_parameters(
('EnableCanonicalizeInfAsMaxMinFloatFromSavedModel', True, True),
('DisableCanonicalizeInfAsMaxMinFloatFromSavedModel', False, True),
('EnableCanonicalizeInfAsMaxMinFloatFromConcreteFunc', True, False),
('DisableCanonicalizeInfAsMaxMinFloatFromConcreteFunc', False, False),
)
@test_util.run_v2_only
def testFloatBoundaryValue(self, is_canonicalized, is_from_saved_model):
root = self._getInfFloatModel()
input_data = None
concrete_func = root.f.get_concrete_function(input_data)
mdl = tf.Module()
mdl.f = concrete_func
def _get_converter() -> lite.TFLiteConverterV2:
if is_from_saved_model:
save_dir = os.path.join(self.get_temp_dir(), 'saved_model')
tf.saved_model.save(mdl, save_dir)
return lite.TFLiteConverterV2.from_saved_model(save_dir)
return lite.TFLiteConverterV2.from_concrete_functions(
[concrete_func], root
)
converter = _get_converter()
converter.canonicalizing_inf_as_min_max_float = is_canonicalized
tflite_model = converter.convert()
# Check output value from converted model.
expected_value = [np.finfo(np.float32).max if is_canonicalized else np.inf]
actual_value = self._evaluateTFLiteModel(tflite_model, [input_data])
self.assertEqual(expected_value, actual_value)
if __name__ == '__main__':
test.main()
| BoundaryValueTest |
python | tornadoweb__tornado | demos/chat/chatdemo.py | {
"start": 1910,
"end": 2569
} | class ____(tornado.web.RequestHandler):
"""Post a new message to the chat room."""
def post(self):
message = {"id": str(uuid.uuid4()), "body": self.get_argument("body")}
# render_string() returns a byte string, which is not supported
# in json, so we must convert it to a character string.
message["html"] = tornado.escape.to_unicode(
self.render_string("message.html", message=message)
)
if self.get_argument("next", None):
self.redirect(self.get_argument("next"))
else:
self.write(message)
global_message_buffer.add_message(message)
| MessageNewHandler |
python | run-llama__llama_index | llama-index-integrations/protocols/llama-index-protocols-ag-ui/llama_index/protocols/ag_ui/events.py | {
"start": 563,
"end": 683
} | class ____(TextMessageStartEvent, Event):
type: EventType = EventType.TEXT_MESSAGE_START
| TextMessageStartWorkflowEvent |
python | encode__starlette | starlette/responses.py | {
"start": 9665,
"end": 9785
} | class ____(Exception):
def __init__(self, max_size: int) -> None:
self.max_size = max_size
| RangeNotSatisfiable |
python | walkccc__LeetCode | solutions/3149. Find the Minimum Cost Array Permutation/3149.py | {
"start": 0,
"end": 899
} | class ____:
def findPermutation(self, nums: list[int]) -> list[int]:
n = len(nums)
bestPick = [[0] * (1 << n) for _ in range(n)]
@functools.lru_cache(None)
def getScore(last: int, mask: int) -> int:
if mask.bit_count() == len(nums):
return abs(last - nums[0])
minScore = math.inf
for i in range(1, len(nums)):
if mask >> i & 1:
continue
nextMinScore = abs(last - nums[i]) + getScore(i, mask | (1 << i))
if nextMinScore < minScore:
minScore = nextMinScore
bestPick[last][mask] = i
return minScore
getScore(0, 1)
return self._construct(bestPick)
def _construct(self, bestPick: list[list[int]]) -> list[int]:
ans = []
last = 0
mask = 1
for _ in range(len(bestPick)):
ans.append(last)
last = bestPick[last][mask]
mask |= 1 << last
return ans
| Solution |
python | allegroai__clearml | clearml/backend_api/services/v2_23/datasets.py | {
"start": 128685,
"end": 129554
} | class ____(Request):
"""
Gets dataset information
:param dataset: Dataset ID
:type dataset: str
"""
_service = "datasets"
_action = "get_by_id"
_version = "2.23"
_schema = {
"definitions": {},
"properties": {"dataset": {"description": "Dataset ID", "type": "string"}},
"required": ["dataset"],
"type": "object",
}
def __init__(self, dataset, **kwargs):
super(GetByIdRequest, self).__init__(**kwargs)
self.dataset = dataset
@schema_property("dataset")
def dataset(self):
return self._property_dataset
@dataset.setter
def dataset(self, value):
if value is None:
self._property_dataset = None
return
self.assert_isinstance(value, "dataset", six.string_types)
self._property_dataset = value
| GetByIdRequest |
python | django__django | django/contrib/sitemaps/views.py | {
"start": 406,
"end": 4648
} | class ____:
location: str
last_mod: bool = None
def x_robots_tag(func):
@wraps(func)
def inner(request, *args, **kwargs):
response = func(request, *args, **kwargs)
response.headers["X-Robots-Tag"] = "noindex, noodp, noarchive"
return response
return inner
def _get_latest_lastmod(current_lastmod, new_lastmod):
"""
Returns the latest `lastmod` where `lastmod` can be either a date or a
datetime.
"""
if not isinstance(new_lastmod, datetime.datetime):
new_lastmod = datetime.datetime.combine(new_lastmod, datetime.time.min)
if timezone.is_naive(new_lastmod):
new_lastmod = timezone.make_aware(new_lastmod, datetime.UTC)
return new_lastmod if current_lastmod is None else max(current_lastmod, new_lastmod)
@x_robots_tag
def index(
request,
sitemaps,
template_name="sitemap_index.xml",
content_type="application/xml",
sitemap_url_name="django.contrib.sitemaps.views.sitemap",
):
req_protocol = request.scheme
req_site = get_current_site(request)
sites = [] # all sections' sitemap URLs
all_indexes_lastmod = True
latest_lastmod = None
for section, site in sitemaps.items():
# For each section label, add links of all pages of its sitemap
# (usually generated by the `sitemap` view).
if callable(site):
site = site()
protocol = req_protocol if site.protocol is None else site.protocol
sitemap_url = reverse(sitemap_url_name, kwargs={"section": section})
absolute_url = "%s://%s%s" % (protocol, req_site.domain, sitemap_url)
site_lastmod = site.get_latest_lastmod()
if all_indexes_lastmod:
if site_lastmod is not None:
latest_lastmod = _get_latest_lastmod(latest_lastmod, site_lastmod)
else:
all_indexes_lastmod = False
sites.append(SitemapIndexItem(absolute_url, site_lastmod))
# Add links to all pages of the sitemap.
for page in range(2, site.paginator.num_pages + 1):
sites.append(
SitemapIndexItem("%s?p=%s" % (absolute_url, page), site_lastmod)
)
# If lastmod is defined for all sites, set header so as
# ConditionalGetMiddleware is able to send 304 NOT MODIFIED
if all_indexes_lastmod and latest_lastmod:
headers = {"Last-Modified": http_date(latest_lastmod.timestamp())}
else:
headers = None
return TemplateResponse(
request,
template_name,
{"sitemaps": sites},
content_type=content_type,
headers=headers,
)
@x_robots_tag
def sitemap(
request,
sitemaps,
section=None,
template_name="sitemap.xml",
content_type="application/xml",
):
req_protocol = request.scheme
req_site = get_current_site(request)
if section is not None:
if section not in sitemaps:
raise Http404("No sitemap available for section: %r" % section)
maps = [sitemaps[section]]
else:
maps = sitemaps.values()
page = request.GET.get("p", 1)
lastmod = None
all_sites_lastmod = True
urls = []
for site in maps:
try:
if callable(site):
site = site()
urls.extend(site.get_urls(page=page, site=req_site, protocol=req_protocol))
if all_sites_lastmod:
site_lastmod = getattr(site, "latest_lastmod", None)
if site_lastmod is not None:
lastmod = _get_latest_lastmod(lastmod, site_lastmod)
else:
all_sites_lastmod = False
except EmptyPage:
raise Http404("Page %s empty" % page)
except PageNotAnInteger:
raise Http404("No page '%s'" % page)
# If lastmod is defined for all sites, set header so as
# ConditionalGetMiddleware is able to send 304 NOT MODIFIED
if all_sites_lastmod:
headers = {"Last-Modified": http_date(lastmod.timestamp())} if lastmod else None
else:
headers = None
return TemplateResponse(
request,
template_name,
{"urlset": urls},
content_type=content_type,
headers=headers,
)
| SitemapIndexItem |
python | dagster-io__dagster | python_modules/dagster/dagster/_core/workspace/context.py | {
"start": 31998,
"end": 33425
} | class ____(ABC, Generic[TRequestContext]):
"""Class that stores process-scoped information about a webserver session.
In most cases, you will want to create a `BaseWorkspaceRequestContext` to create a request-scoped
object.
"""
@abstractmethod
def create_request_context(self, source: Optional[Any] = None) -> TRequestContext:
"""Create a usable fixed context for the scope of a request.
Args:
source (Optional[Any]):
The source of the request, such as an object representing the web request
or http connection.
"""
@property
@abstractmethod
def version(self) -> str:
pass
@abstractmethod
def reload_code_location(self, name: str) -> None:
pass
def shutdown_code_location(self, name: str) -> None:
raise NotImplementedError
@abstractmethod
def reload_workspace(self) -> None:
"""Reload the code in each code location."""
pass
@abstractmethod
def refresh_workspace(self) -> None:
"""Refresh the snapshots for each code location, without reloading the underlying code."""
pass
@property
@abstractmethod
def instance(self) -> DagsterInstance:
pass
def __enter__(self) -> Self:
return self
def __exit__(self, exception_type, exception_value, traceback) -> None:
pass
| IWorkspaceProcessContext |
python | gevent__gevent | src/gevent/tests/test__socket_send_memoryview.py | {
"start": 84,
"end": 659
} | class ____(ctypes.Structure):
_fields_ = [("x", ctypes.c_int)]
def _send(socket):
for meth in ('sendall', 'send'):
anStructure = AnStructure()
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
sock.connect((greentest.DEFAULT_CONNECT_HOST, 12345))
getattr(sock, meth)(anStructure)
sock.close()
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
sock.connect((greentest.DEFAULT_CONNECT_HOST, 12345))
sock.settimeout(1.0)
getattr(sock, meth)(anStructure)
sock.close()
| AnStructure |
python | wandb__wandb | wandb/vendor/pygments/lexers/c_like.py | {
"start": 16194,
"end": 24121
} | class ____(CppLexer):
"""
For `Arduino(tm) <https://arduino.cc/>`_ source.
This is an extension of the CppLexer, as the Arduino® Language is a superset
of C++
.. versionadded:: 2.1
"""
name = 'Arduino'
aliases = ['arduino']
filenames = ['*.ino']
mimetypes = ['text/x-arduino']
# Language sketch main structure functions
structure = set(('setup', 'loop'))
# Language operators
operators = set(('not', 'or', 'and', 'xor'))
# Language 'variables'
variables = set((
'DIGITAL_MESSAGE', 'FIRMATA_STRING', 'ANALOG_MESSAGE', 'REPORT_DIGITAL',
'REPORT_ANALOG', 'INPUT_PULLUP', 'SET_PIN_MODE', 'INTERNAL2V56', 'SYSTEM_RESET',
'LED_BUILTIN', 'INTERNAL1V1', 'SYSEX_START', 'INTERNAL', 'EXTERNAL', 'HIGH',
'LOW', 'INPUT', 'OUTPUT', 'INPUT_PULLUP', 'LED_BUILTIN', 'true', 'false',
'void', 'boolean', 'char', 'unsigned char', 'byte', 'int', 'unsigned int',
'word', 'long', 'unsigned long', 'short', 'float', 'double', 'string', 'String',
'array', 'static', 'volatile', 'const', 'boolean', 'byte', 'word', 'string',
'String', 'array', 'int', 'float', 'private', 'char', 'virtual', 'operator',
'sizeof', 'uint8_t', 'uint16_t', 'uint32_t', 'uint64_t', 'int8_t', 'int16_t',
'int32_t', 'int64_t', 'dynamic_cast', 'typedef', 'const_cast', 'const',
'struct', 'static_cast', 'union', 'unsigned', 'long', 'volatile', 'static',
'protected', 'bool', 'public', 'friend', 'auto', 'void', 'enum', 'extern',
'class', 'short', 'reinterpret_cast', 'double', 'register', 'explicit',
'signed', 'inline', 'delete', '_Bool', 'complex', '_Complex', '_Imaginary',
'atomic_bool', 'atomic_char', 'atomic_schar', 'atomic_uchar', 'atomic_short',
'atomic_ushort', 'atomic_int', 'atomic_uint', 'atomic_long', 'atomic_ulong',
'atomic_llong', 'atomic_ullong', 'PROGMEM'))
# Language shipped functions and class ( )
functions = set((
'KeyboardController', 'MouseController', 'SoftwareSerial', 'EthernetServer',
'EthernetClient', 'LiquidCrystal', 'RobotControl', 'GSMVoiceCall',
'EthernetUDP', 'EsploraTFT', 'HttpClient', 'RobotMotor', 'WiFiClient',
'GSMScanner', 'FileSystem', 'Scheduler', 'GSMServer', 'YunClient', 'YunServer',
'IPAddress', 'GSMClient', 'GSMModem', 'Keyboard', 'Ethernet', 'Console',
'GSMBand', 'Esplora', 'Stepper', 'Process', 'WiFiUDP', 'GSM_SMS', 'Mailbox',
'USBHost', 'Firmata', 'PImage', 'Client', 'Server', 'GSMPIN', 'FileIO',
'Bridge', 'Serial', 'EEPROM', 'Stream', 'Mouse', 'Audio', 'Servo', 'File',
'Task', 'GPRS', 'WiFi', 'Wire', 'TFT', 'GSM', 'SPI', 'SD',
'runShellCommandAsynchronously', 'analogWriteResolution',
'retrieveCallingNumber', 'printFirmwareVersion', 'analogReadResolution',
'sendDigitalPortPair', 'noListenOnLocalhost', 'readJoystickButton',
'setFirmwareVersion', 'readJoystickSwitch', 'scrollDisplayRight',
'getVoiceCallStatus', 'scrollDisplayLeft', 'writeMicroseconds',
'delayMicroseconds', 'beginTransmission', 'getSignalStrength',
'runAsynchronously', 'getAsynchronously', 'listenOnLocalhost',
'getCurrentCarrier', 'readAccelerometer', 'messageAvailable',
'sendDigitalPorts', 'lineFollowConfig', 'countryNameWrite', 'runShellCommand',
'readStringUntil', 'rewindDirectory', 'readTemperature', 'setClockDivider',
'readLightSensor', 'endTransmission', 'analogReference', 'detachInterrupt',
'countryNameRead', 'attachInterrupt', 'encryptionType', 'readBytesUntil',
'robotNameWrite', 'readMicrophone', 'robotNameRead', 'cityNameWrite',
'userNameWrite', 'readJoystickY', 'readJoystickX', 'mouseReleased',
'openNextFile', 'scanNetworks', 'noInterrupts', 'digitalWrite', 'beginSpeaker',
'mousePressed', 'isActionDone', 'mouseDragged', 'displayLogos', 'noAutoscroll',
'addParameter', 'remoteNumber', 'getModifiers', 'keyboardRead', 'userNameRead',
'waitContinue', 'processInput', 'parseCommand', 'printVersion', 'readNetworks',
'writeMessage', 'blinkVersion', 'cityNameRead', 'readMessage', 'setDataMode',
'parsePacket', 'isListening', 'setBitOrder', 'beginPacket', 'isDirectory',
'motorsWrite', 'drawCompass', 'digitalRead', 'clearScreen', 'serialEvent',
'rightToLeft', 'setTextSize', 'leftToRight', 'requestFrom', 'keyReleased',
'compassRead', 'analogWrite', 'interrupts', 'WiFiServer', 'disconnect',
'playMelody', 'parseFloat', 'autoscroll', 'getPINUsed', 'setPINUsed',
'setTimeout', 'sendAnalog', 'readSlider', 'analogRead', 'beginWrite',
'createChar', 'motorsStop', 'keyPressed', 'tempoWrite', 'readButton',
'subnetMask', 'debugPrint', 'macAddress', 'writeGreen', 'randomSeed',
'attachGPRS', 'readString', 'sendString', 'remotePort', 'releaseAll',
'mouseMoved', 'background', 'getXChange', 'getYChange', 'answerCall',
'getResult', 'voiceCall', 'endPacket', 'constrain', 'getSocket', 'writeJSON',
'getButton', 'available', 'connected', 'findUntil', 'readBytes', 'exitValue',
'readGreen', 'writeBlue', 'startLoop', 'IPAddress', 'isPressed', 'sendSysex',
'pauseMode', 'gatewayIP', 'setCursor', 'getOemKey', 'tuneWrite', 'noDisplay',
'loadImage', 'switchPIN', 'onRequest', 'onReceive', 'changePIN', 'playFile',
'noBuffer', 'parseInt', 'overflow', 'checkPIN', 'knobRead', 'beginTFT',
'bitClear', 'updateIR', 'bitWrite', 'position', 'writeRGB', 'highByte',
'writeRed', 'setSpeed', 'readBlue', 'noStroke', 'remoteIP', 'transfer',
'shutdown', 'hangCall', 'beginSMS', 'endWrite', 'attached', 'maintain',
'noCursor', 'checkReg', 'checkPUK', 'shiftOut', 'isValid', 'shiftIn', 'pulseIn',
'connect', 'println', 'localIP', 'pinMode', 'getIMEI', 'display', 'noBlink',
'process', 'getBand', 'running', 'beginSD', 'drawBMP', 'lowByte', 'setBand',
'release', 'bitRead', 'prepare', 'pointTo', 'readRed', 'setMode', 'noFill',
'remove', 'listen', 'stroke', 'detach', 'attach', 'noTone', 'exists', 'buffer',
'height', 'bitSet', 'circle', 'config', 'cursor', 'random', 'IRread', 'setDNS',
'endSMS', 'getKey', 'micros', 'millis', 'begin', 'print', 'write', 'ready',
'flush', 'width', 'isPIN', 'blink', 'clear', 'press', 'mkdir', 'rmdir', 'close',
'point', 'yield', 'image', 'BSSID', 'click', 'delay', 'read', 'text', 'move',
'peek', 'beep', 'rect', 'line', 'open', 'seek', 'fill', 'size', 'turn', 'stop',
'home', 'find', 'step', 'tone', 'sqrt', 'RSSI', 'SSID', 'end', 'bit', 'tan',
'cos', 'sin', 'pow', 'map', 'abs', 'max', 'min', 'get', 'run', 'put',
'isAlphaNumeric', 'isAlpha', 'isAscii', 'isWhitespace', 'isControl', 'isDigit',
'isGraph', 'isLowerCase', 'isPrintable', 'isPunct', 'isSpace', 'isUpperCase',
'isHexadecimalDigit'))
# do not highlight
suppress_highlight = set((
'namespace', 'template', 'mutable', 'using', 'asm', 'typeid',
'typename', 'this', 'alignof', 'constexpr', 'decltype', 'noexcept',
'static_assert', 'thread_local', 'restrict'))
def get_tokens_unprocessed(self, text):
for index, token, value in CppLexer.get_tokens_unprocessed(self, text):
if value in self.structure:
yield index, Name.Builtin, value
elif value in self.operators:
yield index, Operator, value
elif value in self.variables:
yield index, Keyword.Reserved, value
elif value in self.suppress_highlight:
yield index, Name, value
elif value in self.functions:
yield index, Name.Function, value
else:
yield index, token, value
| ArduinoLexer |
python | squidfunk__mkdocs-material | material/plugins/tags/structure/listing/manager/__init__.py | {
"start": 2071,
"end": 18572
} | class ____:
"""
A listing manager.
The listing manager collects all listings from the Markdown of pages, then
populates them with mappings, and renders them. Furthermore, the listing
manager allows to obtain tag references for a given mapping, which are
tags annotated with links to listings.
"""
def __init__(self, config: TagsConfig, depth: int = 6):
"""
Initialize the listing manager.
Arguments:
config: The configuration.
"""
self.config = config
self.data = set()
self.depth = depth
def __repr__(self) -> str:
"""
Return a printable representation of the listing manager.
Returns:
Printable representation.
"""
return _print(self)
def __iter__(self) -> Iterator[Listing]:
"""
Iterate over listings.
Yields:
The current listing.
"""
return iter(self.data)
def __and__(self, mapping: Mapping) -> Iterator[TagReference]:
"""
Iterate over the tag references for the mapping.
Arguments:
mapping: The mapping.
Yields:
The current tag reference.
"""
assert isinstance(mapping, Mapping)
# Iterate over sorted tags and associate tags with listings - note that
# we sort the listings for the mapping by closeness, so that the first
# listing in the list is the closest one to the page or link the
# mapping is associated with
listings = self.closest(mapping)
for tag in self._sort_tags(mapping.tags):
ref = TagReference(tag)
# Iterate over listings and add links
for listing in listings:
if tag in listing & mapping:
value = listing.page.url or "."
# Compute URL for link - make sure to remove fragments, as
# they may be present in links extracted from remote tags.
# Additionally, we need to fallback to `.` if the URL is
# empty (= homepage) or the links will be incorrect.
url = urlparse(value, allow_fragments = False)
url = url._replace(fragment = self._slugify(tag))
# Add listing link to tag reference
ref.links.append(
Link(listing.page.title, url.geturl())
)
# Yield tag reference
yield ref
# -------------------------------------------------------------------------
config: TagsConfig
"""
The configuration.
"""
data: set[Listing]
"""
The listings.
"""
depth: int
"""
Table of contents maximum depth.
"""
# -------------------------------------------------------------------------
def add(self, page: Page, markdown: str) -> str:
"""
Add page.
This method is called by the tags plugin to retrieve all listings of a
page. It will parse the page's Markdown and add injections points into
the page's Markdown, which will be replaced by the renderer with the
actual listing later on.
Note that this method is intended to be called with the page during the
`on_page_markdown` event, as it modifies the page's Markdown. Moreover,
the Markdown must be explicitly passed, as we could otherwise run into
inconsistencies when other plugins modify the Markdown.
Arguments:
page: The page.
markdown: The page's Markdown.
Returns:
The page's Markdown with injection points.
"""
assert isinstance(markdown, str)
# Replace callback
def replace(match: Match) -> str:
config = self._resolve(page, match.group(2))
# Compute listing identifier - as the author might include multiple
# listings on a single page, we must make sure that the identifier
# is unique, so we use the page source file path and the position
# of the match within the page as an identifier.
id = f"{page.file.src_uri}:{match.start()}-{match.end()}"
# Replace whitespaces in the identifier that we computed, or we
# can't just prefix it with "#" - see https://t.ly/U_hfp
id = id.replace(" ", "-")
self.data.add(Listing(page, id, config))
# Replace directive with hx headline if listings are enabled, or
# remove the listing entirely from the page and table of contents
if self.config.listings:
return "#" * self.depth + f" {id}/name {{ #{id}/slug }}"
else:
return
# Hack: replace directive with an hx headline to mark the injection
# point for the anchor links we will generate after parsing all pages.
# By using an hx headline, we can make sure that the injection point
# will always be a child of the preceding headline.
directive = self.config.listings_directive
return re.sub(
r"(<!--\s*?{directive}(.*?)\s*-->)".format(directive = directive),
replace, markdown, flags = re.I | re.M | re.S
)
def closest(self, mapping: Mapping) -> list[Listing]:
"""
Get listings for the mapping ordered by closeness.
Listings are sorted by closeness to the given page, i.e. the number of
common path components. This is useful for hierarchical listings, where
the tags of a page point to the closest listing featuring the tag, with
the option to show all listings featuring that tag.
Arguments:
mapping: The mapping.
Returns:
The listings.
"""
# Retrieve listings featuring tags of mapping
listings: list[Listing] = []
for listing in self.data:
if any(listing & mapping):
listings.append(listing)
# Ranking callback
def rank(listing: Listing) -> int:
path = posixpath.commonpath([mapping.item.url, listing.page.url])
return len(path)
# Return listings ordered by closeness to mapping
return sorted(listings, key = rank, reverse = True)
def populate(
self, listing: Listing, mappings: Iterable[Mapping], renderer: Renderer
) -> None:
"""
Populate listing with tags featured in the mappings.
Arguments:
listing: The listing.
mappings: The mappings.
renderer: The renderer.
"""
page = listing.page
assert isinstance(page.content, str)
# Add mappings to listing, passing shadow tags configuration
for mapping in mappings:
listing.add(mapping, hidden = listing.config.shadow)
# Sort listings and tags - we can only do this after all mappings have
# been added to the listing, because the tags inside the mappings do
# not have a proper order yet, and we need to order them as specified
# in the listing configuration.
listing.tags = self._sort_listing_tags(listing.tags)
# Render tags for listing headlines - the listing configuration allows
# tp specify a custom layout, so we resolve the template for tags here
name = posixpath.join(listing.config.layout, "tag.html")
for tree in listing:
tree.content = renderer.render(page, name, tag = tree.tag)
# Sort mappings and subtrees of listing tree
tree.mappings = self._sort_listing(tree.mappings)
tree.children = self._sort_listing_tags(tree.children)
# Replace callback
def replace(match: Match) -> str:
hx = match.group()
# Populate listing with anchor links to tags
anchors = toc.populate(listing, self._slugify)
if not anchors:
return
# Get reference to first tag in listing
head = next(iter(anchors.values()))
# Replace hx with actual level of listing and listing ids with
# placeholders to create a format string for the headline
hx = re.sub(
r"<(/?)h{}\b".format(self.depth),
r"<\g<1>h{}".format(head.level), hx
)
hx = re.sub(
r"{id}\/(\w+)".format(id = listing.id),
r"{\1}", hx, flags = re.I | re.M
)
# Render listing headlines
for tree in listing:
tree.content = hx.format(
slug = anchors[tree.tag].id,
name = tree.content
)
# Render listing - the listing configuration allows to specify a
# custom layout, so we resolve the template for listings here
name = posixpath.join(listing.config.layout, "listing.html")
return "\n".join([
renderer.render(page, name, listing = tree)
for tree in listing.tags.values()
])
# Hack: replace hx headlines (injection points) we added when parsing
# the page's Markdown with the actual listing content. Additionally,
# replace anchor links in the table of contents with the hierarchy
# generated from mapping over the listing, or remove them.
page.content = re.sub(
r"<h{x}[^>]+{id}.*?</h{x}>".format(
id = f"{listing.id}/slug", x = self.depth
),
replace, page.content, flags = re.I | re.M
)
def populate_all(
self, mappings: Iterable[Mapping], renderer: Renderer
) -> None:
"""
Populate all listings with tags featured in the mappings.
This method is called by the tags plugin to populate all listings with
the given mappings. It will also remove the injection points from the
page's Markdown. Note that this method is intended to be called during
the `on_env` event, after all pages have been rendered.
Arguments:
mappings: The mappings.
renderer: The renderer.
"""
for listing in self.data:
self.populate(listing, mappings, renderer)
# -------------------------------------------------------------------------
def _resolve(self, page: Page, args: str) -> ListingConfig:
"""
Resolve listing configuration.
Arguments:
page: The page the listing in embedded in.
args: The arguments, as parsed from Markdown.
Returns:
The listing configuration.
"""
data = yaml.safe_load(args)
path = page.file.abs_src_path
# Try to resolve available listing configuration
if isinstance(data, str):
config = self.config.listings_map.get(data, None)
if not config:
keys = ", ".join(self.config.listings_map.keys())
raise PluginError(
f"Couldn't find listing configuration: {data}. Available "
f"configurations: {keys}"
)
# Otherwise, handle inline listing configuration
else:
config = ListingConfig(config_file_path = path)
config.load_dict(data or {})
# Validate listing configuration
errors, warnings = config.validate()
for _, w in warnings:
path = os.path.relpath(path)
log.warning(
f"Error reading listing configuration in '{path}':\n"
f"{w}"
)
for _, e in errors:
path = os.path.relpath(path)
raise PluginError(
f"Error reading listing configuration in '{path}':\n"
f"{e}"
)
# Inherit shadow tags configuration, unless explicitly set
if not isinstance(config.shadow, bool):
config.shadow = self.config.shadow
# Inherit layout configuration, unless explicitly set
if not isinstance(config.layout, str):
config.layout = self.config.listings_layout
# Inherit table of contents configuration, unless explicitly set
if not isinstance(config.toc, bool):
config.toc = self.config.listings_toc
# Return listing configuration
return config
# -------------------------------------------------------------------------
def _slugify(self, tag: Tag) -> str:
"""
Slugify tag.
If the tag hierarchy setting is enabled, the tag is expanded into a
hierarchy of tags, all of which are then slugified and joined with the
configured separator. Otherwise, the tag is slugified directly. This is
necessary to keep the tag hierarchy in the slug.
Arguments:
tag: The tag.
Returns:
The slug.
"""
slugify = self.config.tags_slugify
tags = [tag.name]
# Compute tag hierarchy, if configured
hierarchy = self.config.tags_hierarchy_separator
if self.config.tags_hierarchy:
tags = tag.name.split(hierarchy)
# Slugify tag hierarchy and join with separator
separator = self.config.tags_slugify_separator
return self.config.tags_slugify_format.format(
slug = hierarchy.join(slugify(name, separator) for name in tags)
)
# -------------------------------------------------------------------------
def _sort_listing(
self, mappings: Iterable[Mapping]
) -> list[Mapping]:
"""
Sort listing.
When sorting a listing, we sort the mappings of the listing, which is
why the caller must pass the mappings of the listing. That way, we can
keep this implementation to be purely functional, without having to
mutate the listing, which makes testing simpler.
Arguments:
mappings: The mappings.
Returns:
The sorted mappings.
"""
return sorted(
mappings,
key = self.config.listings_sort_by,
reverse = self.config.listings_sort_reverse
)
def _sort_listing_tags(
self, children: dict[Tag, ListingTree]
) -> dict[Tag, ListingTree]:
"""
Sort listing tags.
When sorting a listing's tags, we sort the immediate subtrees of the
listing, which is why the caller must pass the children of the listing.
That way, we can keep this implementation to be purely functional,
without having to mutate the listing.
Arguments:
children: The listing trees, each of which associated with a tag.
Returns:
The sorted listing trees.
"""
return dict(sorted(
children.items(),
key = lambda item: self.config.listings_tags_sort_by(*item),
reverse = self.config.listings_tags_sort_reverse
))
def _sort_tags(
self, tags: Iterable[Tag]
) -> list[Tag]:
"""
Sort tags.
Arguments:
tags: The tags.
Returns:
The sorted tags.
"""
return sorted(
tags,
key = self.config.tags_sort_by,
reverse = self.config.tags_sort_reverse
)
# -----------------------------------------------------------------------------
# Functions
# -----------------------------------------------------------------------------
def _print(manager: ListingManager, indent: int = 0) -> str:
"""
Return a printable representation of a listing manager.
Arguments:
manager: The listing manager.
indent: The indentation level.
Returns:
Printable representation.
"""
lines: list[str] = []
lines.append(" " * indent + f"ListingManager()")
# Print listings
for listing in manager:
lines.append(" " * (indent + 2) + repr(listing))
# Concatenate everything
return "\n".join(lines)
# -----------------------------------------------------------------------------
# Data
# -----------------------------------------------------------------------------
# Set up logging
log = logging.getLogger("mkdocs.material.plugins.tags")
| ListingManager |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/paramInference1.py | {
"start": 128,
"end": 239
} | class ____:
def __init__(self, a: int, b: str): ...
def func1(self, a: int, b: str) -> float: ...
| Parent |
python | scrapy__scrapy | scrapy/extensions/feedexport.py | {
"start": 4092,
"end": 4656
} | class ____(ABC):
def open(self, spider: Spider) -> IO[bytes]:
path = spider.crawler.settings["FEED_TEMPDIR"]
if path and not Path(path).is_dir():
raise OSError("Not a Directory: " + str(path))
return NamedTemporaryFile(prefix="feed-", dir=path)
def store(self, file: IO[bytes]) -> Deferred[None] | None:
return deferToThread(self._store_in_thread, file)
@abstractmethod
def _store_in_thread(self, file: IO[bytes]) -> None:
raise NotImplementedError
@implementer(IFeedStorage)
| BlockingFeedStorage |
python | django-import-export__django-import-export | tests/core/tests/test_resources/test_relationships.py | {
"start": 2788,
"end": 9679
} | class ____(TestCase):
# issue #1270 - ensure ManyToMany fields are correctly checked for
# changes when skip_unchanged=True
fixtures = ["category", "book", "author"]
def setUp(self):
pass
def test_many_to_many_widget_create(self):
# the book is associated with 0 categories
# when we import a book with category 1, the book
# should be updated, not skipped
book = Book.objects.first()
book.categories.clear()
dataset_headers = ["id", "name", "categories"]
dataset_row = [book.id, book.name, "1"]
dataset = tablib.Dataset(headers=dataset_headers)
dataset.append(dataset_row)
book_resource = BookResource()
book_resource._meta.skip_unchanged = True
self.assertEqual(0, book.categories.count())
result = book_resource.import_data(dataset, dry_run=False)
book.refresh_from_db()
self.assertEqual(1, book.categories.count())
self.assertEqual(
result.rows[0].import_type, results.RowResult.IMPORT_TYPE_UPDATE
)
self.assertEqual(Category.objects.first(), book.categories.first())
def test_many_to_many_widget_create_with_m2m_being_compared(self):
# issue 1558 - when the object is a new instance and m2m is
# evaluated for differences
dataset_headers = ["id", "categories"]
dataset_row = ["1", "1"]
dataset = tablib.Dataset(headers=dataset_headers)
dataset.append(dataset_row)
book_resource = BookResource()
book_resource._meta.skip_unchanged = True
result = book_resource.import_data(dataset, dry_run=False)
self.assertFalse(result.has_errors())
self.assertEqual(len(result.rows), 1)
self.assertEqual(result.rows[0].import_type, results.RowResult.IMPORT_TYPE_NEW)
def test_many_to_many_widget_update(self):
# the book is associated with 1 category ('Category 2')
# when we import a book with category 1, the book
# should be updated, not skipped, so that Category 2 is replaced by Category 1
book = Book.objects.first()
dataset_headers = ["id", "name", "categories"]
dataset_row = [book.id, book.name, "1"]
dataset = tablib.Dataset(headers=dataset_headers)
dataset.append(dataset_row)
book_resource = BookResource()
book_resource._meta.skip_unchanged = True
self.assertEqual(1, book.categories.count())
result = book_resource.import_data(dataset, dry_run=False)
self.assertEqual(
result.rows[0].import_type, results.RowResult.IMPORT_TYPE_UPDATE
)
self.assertEqual(1, book.categories.count())
self.assertEqual(Category.objects.first(), book.categories.first())
def test_many_to_many_widget_no_changes(self):
# the book is associated with 1 category ('Category 2')
# when we import a row with a book with category 1, the book
# should be skipped, because there is no change
book = Book.objects.first()
dataset_headers = ["id", "name", "categories"]
dataset_row = [book.id, book.name, book.categories.first().id]
dataset = tablib.Dataset(headers=dataset_headers)
dataset.append(dataset_row)
book_resource = BookResource()
book_resource._meta.skip_unchanged = True
self.assertEqual(1, book.categories.count())
result = book_resource.import_data(dataset, dry_run=False)
self.assertEqual(result.rows[0].import_type, results.RowResult.IMPORT_TYPE_SKIP)
self.assertEqual(1, book.categories.count())
def test_many_to_many_widget_handles_ordering(self):
# the book is associated with 2 categories ('Category 1', 'Category 2')
# when we import a row with a book with both categories (in any order), the book
# should be skipped, because there is no change
book = Book.objects.first()
self.assertEqual(1, book.categories.count())
cat1 = Category.objects.get(name="Category 1")
cat2 = Category.objects.get(name="Category 2")
book.categories.add(cat1)
book.save()
self.assertEqual(2, book.categories.count())
dataset_headers = ["id", "name", "categories"]
book_resource = BookResource()
book_resource._meta.skip_unchanged = True
# import with natural order
dataset_row = [book.id, book.name, f"{cat1.id},{cat2.id}"]
dataset = tablib.Dataset(headers=dataset_headers)
dataset.append(dataset_row)
result = book_resource.import_data(dataset, dry_run=False)
self.assertEqual(result.rows[0].import_type, results.RowResult.IMPORT_TYPE_SKIP)
# import with reverse order
dataset_row = [book.id, book.name, f"{cat2.id},{cat1.id}"]
dataset = tablib.Dataset(headers=dataset_headers)
dataset.append(dataset_row)
result = book_resource.import_data(dataset, dry_run=False)
self.assertEqual(result.rows[0].import_type, results.RowResult.IMPORT_TYPE_SKIP)
self.assertEqual(2, book.categories.count())
def test_many_to_many_widget_handles_uuid(self):
# Test for #1435 - skip_row() handles M2M field when UUID pk used
class _UUIDBookResource(resources.ModelResource):
class Meta:
model = UUIDBook
uuid_resource = _UUIDBookResource()
uuid_resource._meta.skip_unchanged = True
cat1 = UUIDCategory.objects.create(name="Category 1")
cat2 = UUIDCategory.objects.create(name="Category 2")
uuid_book = UUIDBook.objects.create(name="uuid book")
uuid_book.categories.add(cat1, cat2)
uuid_book.save()
dataset_headers = ["id", "name", "categories"]
dataset_row = [uuid_book.id, uuid_book.name, f"{cat1.catid},{cat2.catid}"]
dataset = tablib.Dataset(headers=dataset_headers)
dataset.append(dataset_row)
result = uuid_resource.import_data(dataset, dry_run=False)
self.assertEqual(result.rows[0].import_type, results.RowResult.IMPORT_TYPE_SKIP)
def test_skip_row_no_m2m_data_supplied(self):
# issue #1437
# test skip_row() when the model defines a m2m field
# but it is not present in the dataset
book = Book.objects.first()
dataset_headers = ["id", "name"]
dataset_row = [book.id, book.name]
dataset = tablib.Dataset(headers=dataset_headers)
dataset.append(dataset_row)
book_resource = BookResource()
book_resource._meta.skip_unchanged = True
self.assertEqual(1, book.categories.count())
result = book_resource.import_data(dataset, dry_run=False)
self.assertEqual(result.rows[0].import_type, results.RowResult.IMPORT_TYPE_SKIP)
self.assertEqual(1, book.categories.count())
| ManyToManyWidgetDiffTest |
python | google__jax | jax/_src/export/_export.py | {
"start": 2385,
"end": 4078
} | class ____:
"""A safety check that should be skipped on (de)serialization.
Most of these checks are performed on serialization, but some are deferred to
deserialization. The list of disabled checks is attached to the serialization,
e.g., as a sequence of string attributes to :class:`jax.export.Exported` or of
``tf.XlaCallModuleOp``.
When using jax2tf, you can disable more deserialization safety checks
by passing ``TF_XLA_FLAGS=--tf_xla_call_module_disabled_checks=platform``.
"""
_impl: str
@classmethod
def platform(cls) -> DisabledSafetyCheck:
"""Allows the compilation platform to differ from the export platform.
Has effect only on deserialization.
"""
return DisabledSafetyCheck("platform")
@classmethod
def custom_call(cls, target_name: str) -> DisabledSafetyCheck:
"""Allows the serialization of a call target not known to be stable.
Has effect only on serialization.
Args:
target_name: the name of the custom call target to allow.
"""
return DisabledSafetyCheck(f"custom_call:{target_name}")
def is_custom_call(self) -> str | None:
"""Returns the custom call target allowed by this directive."""
m = re.match(r'custom_call:(.+)$', self._impl)
return m.group(1) if m else None
def __init__(self, _impl:str):
# Do not use directly, use builders `platform`, `custom_call`.
self._impl = _impl
def __str__(self):
return self._impl
__repr__ = __str__
def __eq__(self, other) -> bool:
return isinstance(other, DisabledSafetyCheck) and self._impl == other._impl
def __hash__(self) -> int:
return hash(self._impl)
@dataclasses.dataclass(frozen=True)
| DisabledSafetyCheck |
python | pdm-project__pdm | src/pdm/project/toml_file.py | {
"start": 189,
"end": 2330
} | class ____:
def __init__(self, path: str | Path, *, parse: bool = True, ui: termui.UI) -> None:
from tomlkit.toml_file import TOMLFile as TomlkitTOMLFile
self._file = TomlkitTOMLFile(path)
self.ui = ui
self._data = self._parse() if parse else {}
self._for_write = False
@property
def _path(self) -> Path:
return Path(self._file._path)
def _parse(self) -> dict[str, Any]:
# By default, use tomllib for parsing as it is much faster
try:
with open(self._path, "rb") as fp:
return tomllib.load(fp)
except FileNotFoundError:
return {}
def open_for_write(self) -> tomlkit.TOMLDocument:
# Ensure the document is re-parsed by tomlkit for writing with styles preserved
if self._for_write:
return cast(tomlkit.TOMLDocument, self._data)
try:
self._data = self._file.read()
except FileNotFoundError:
self._data = tomlkit.document()
self._for_write = True
return self._data
def open_for_read(self) -> dict[str, Any]:
"""Get the (read-only) data of the TOML file."""
if hasattr(self._data, "unwrap"):
return self._data.unwrap() # type: ignore[attr-defined]
return deepcopy(self._data)
def set_data(self, data: dict[str, Any]) -> None:
"""Set the data of the TOML file."""
self._data = data
self._for_write = True
def reload(self) -> None:
self._data = self._parse()
self._for_write = False
def write(self) -> None:
if not self._for_write:
raise RuntimeError("TOMLFile not opened for write. Call open_for_write() first.")
self._path.parent.mkdir(parents=True, exist_ok=True)
if isinstance(self._data, tomlkit.TOMLDocument):
data = self._data
else:
data = tomlkit.document()
data.update(self._data)
self._file.write(data)
def exists(self) -> bool:
return self._path.exists()
def empty(self) -> bool:
return not self._data
| TOMLFile |
python | pdm-project__pdm | src/pdm/installers/base.py | {
"start": 948,
"end": 10510
} | class ____:
"""Synchronize the working set with given installation candidates
:param candidates: a dict of candidates to be installed
:param environment: the environment associated with the project
:param clean: clean unneeded packages
:param dry_run: only prints summary but do not install or uninstall
:param retry_times: retry times when installation failed
:param install_self: whether to install self project
:param no_editable: if True, override all editable installations,
if a list, override editables with the given names
:param use_install_cache: whether to use install cache
:param reinstall: whether to reinstall all packages
:param only_keep: If true, only keep the selected candidates
:param fail_fast: If true, stop the installation on first error
"""
SEQUENTIAL_PACKAGES = ("pip", "setuptools", "wheel")
def __init__(
self,
environment: BaseEnvironment,
candidates: dict[str, Candidate] | None = None,
clean: bool = False,
dry_run: bool = False,
retry_times: int = 1,
install_self: bool = False,
no_editable: bool | Collection[str] = False,
reinstall: bool = False,
only_keep: bool = False,
fail_fast: bool = False,
use_install_cache: bool | None = None,
packages: Iterable[Package] = (),
requirements: Iterable[Requirement] | None = None,
) -> None:
if candidates: # pragma: no cover
self.requested_candidates = candidates
else:
self.requested_candidates = {entry.candidate.identify(): entry.candidate for entry in packages}
self.environment = environment
self.clean = clean
self.dry_run = dry_run
self.retry_times = retry_times
self.no_editable = no_editable
self.install_self = install_self
if use_install_cache is None:
use_install_cache = bool(environment.project.config["install.cache"])
self.use_install_cache = use_install_cache
self.reinstall = reinstall
self.only_keep = only_keep
self.parallel = environment.project.config["install.parallel"]
self.fail_fast = fail_fast
self.working_set = environment.get_working_set()
self.ui = environment.project.core.ui
self._manager: InstallManager | None = None
self.packages = packages
self.requirements = requirements
@cached_property
def self_candidate(self) -> Candidate:
"""Return the candidate for self project"""
return self.environment.project.make_self_candidate(not self.no_editable)
@cached_property
def candidates(self) -> dict[str, Candidate]:
"""Return the candidates to be installed"""
candidates = self.requested_candidates.copy()
requested = {
req.identify()
for req in (self.requirements or chain.from_iterable(self.environment.project.all_dependencies.values()))
}
if isinstance(self.no_editable, Collection):
keys = self.no_editable
elif self.no_editable:
keys = candidates.keys()
else:
keys = []
if self.should_install_editables():
# Install `editables` as well as required by self project
editables = editables_candidate(self.environment)
if editables is not None:
candidates["editables"] = editables
for key in keys:
if key in candidates and candidates[key].req.editable:
candidate = candidates[key]
# Create a new candidate with editable=False
req = dataclasses.replace(candidate.req, editable=False)
candidates[key] = candidate.copy_with(req)
for key in requested:
if key in candidates:
candidates[key].requested = True
return candidates
def should_install_editables(self) -> bool:
"""Return whether to add editables"""
if not self.install_self or "editables" in self.requested_candidates:
return False
# As editables may be added by the backend, we need to check the metadata
try:
metadata = self.self_candidate.prepare(self.environment).metadata
except BuildError:
return False
return any(req.startswith("editables") for req in metadata.requires or [])
@property
def manager(self) -> InstallManager:
if not self._manager:
self._manager = self.get_manager(rename_pth=True)
return self._manager
def get_manager(self, rename_pth: bool = False) -> InstallManager:
return self.environment.project.core.install_manager_class(
self.environment, use_install_cache=self.use_install_cache, rename_pth=rename_pth
)
@property
def self_key(self) -> str | None:
if not self.install_self:
return None
name = self.environment.project.name
if name:
return normalize_name(name)
return name
def _should_update(self, dist: Distribution, can: Candidate) -> bool:
"""Check if the candidate should be updated"""
backend = self.environment.project.backend
if self.reinstall or can.req.editable: # Always update if incoming is editable
return True
if is_editable(dist): # only update editable if no_editable is True
return bool(self.no_editable)
if not can.req.is_named:
dreq = Requirement.from_dist(dist)
if not isinstance(dreq, FileRequirement):
return True
url = dreq.get_full_url()
if dreq.is_local_dir:
# We don't know whether a local dir has been changed, always update
return True
assert can.link is not None
if url != backend.expand_line(can.link.url_without_fragment):
return True
direct_json = json.loads(content) if (content := dist.read_text("direct_url.json")) else None
if not direct_json or "archive_info" not in direct_json:
# We are not able to check, don't update
return False
dist_hash = direct_json["archive_info"]["hash"].replace("=", ":")
return not any(dist_hash == file_hash["hash"] for file_hash in can.hashes)
specifier = can.req.as_pinned_version(can.version).specifier
return not specifier.contains(dist.version, prereleases=True)
def compare_with_working_set(self) -> tuple[list[str], list[str], list[str]]:
"""Compares the candidates and return (to_add, to_update, to_remove)"""
working_set = self.working_set
candidates = self.candidates.copy()
to_update: set[str] = set()
to_remove: set[str] = set()
to_add: set[str] = set()
locked_repository = self.environment.project.get_locked_repository()
all_candidate_keys = list(locked_repository.all_candidates)
for key, dist in working_set.items():
if key == self.self_key and self.install_self:
continue
if key in candidates:
can = candidates.pop(key)
if self._should_update(dist, can):
if working_set.is_owned(key):
to_update.add(key)
else:
to_add.add(key)
elif (
(self.only_keep or (self.clean and key not in all_candidate_keys))
and key not in self.SEQUENTIAL_PACKAGES
and working_set.is_owned(key)
):
# Remove package only if it is not required by any group
# Packages for packaging will never be removed
to_remove.add(key)
to_add.update(
strip_extras(name)[0]
for name, _ in candidates.items()
if name != self.self_key and strip_extras(name)[0] not in working_set
)
return (sorted(to_add), sorted(to_update), sorted(to_remove))
def synchronize(self) -> None:
"""Synchronize the working set with pinned candidates."""
to_add, to_update, to_remove = self.compare_with_working_set()
manager = self.get_manager()
for key in to_add:
can = self.candidates[key]
termui.logger.info("Installing %s@%s...", key, can.version)
manager.install(can)
for key in to_update:
can = self.candidates[key]
dist = self.working_set[strip_extras(key)[0]]
dist_version = dist.version
termui.logger.info("Updating %s@%s -> %s...", key, dist_version, can.version)
manager.overwrite(dist, can)
for key in to_remove:
dist = self.working_set[key]
termui.logger.info("Removing %s@%s...", key, dist.version)
manager.uninstall(dist)
if self.install_self:
self_key = self.self_key
assert self_key
word = "a" if self.no_editable else "an editable"
termui.logger.info(f"Installing the project as {word} package...")
if self_key in self.working_set:
dist = self.working_set[strip_extras(self_key)[0]]
manager.overwrite(dist, self.self_candidate)
else:
manager.install(self.self_candidate)
termui.logger.info("Synchronization complete.")
| BaseSynchronizer |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/protocol1.py | {
"start": 1990,
"end": 2044
} | class ____(ProtoBase1[_A, _B], Protocol[_A]): ...
| Proto4 |
python | apache__airflow | task-sdk/tests/task_sdk/bases/test_operator.py | {
"start": 40467,
"end": 40621
} | class ____(BaseOperator):
log = structlog.get_logger(__name__)
def execute(self, context):
return f"Hello {self.owner}!"
| HelloWorldOperator |
python | joke2k__faker | tests/providers/test_automotive.py | {
"start": 9968,
"end": 11097
} | class ____(_SimpleAutomotiveTestMixin):
"""Test ru_RU automotive provider methods"""
_plate_letters = "".join(RuRuAutomotiveProvider.license_plate_letters)
license_plate_pattern: Pattern = re.compile(
r"(?:"
r"(?P<private_plate_prefix>[{0}]\d\d\d[{0}][{0}])|"
r"(?P<public_transport_plate_prefix>[{0}][{0}]\d\d\d)|"
r"(?P<trailer_plate_prefix>[{0}][{0}]\d\d\d\d)|"
r"(?P<police_plate_prefix>[{0}]\d\d\d\d)|"
r"(?P<military_plate_prefix>\d\d\d\d[{0}][{0}])|"
r"(?P<plate_number_special>00\dCD\d|00\dD\d\d\d|00\dT\d\d\d)"
r") (?P<plate_suffix>.*)".format(_plate_letters),
)
def perform_extra_checks(self, license_plate, match):
plate_suffix = match.group("plate_suffix")
assert plate_suffix in RuRuAutomotiveProvider.license_plate_suffix
def test_vehicle_category(self, faker, num_samples):
for _ in range(num_samples):
vehicle_category = faker.vehicle_category()
assert isinstance(vehicle_category, str)
assert vehicle_category in RuRuAutomotiveProvider.vehicle_categories
| TestRuRu |
python | sympy__sympy | sympy/physics/biomechanics/curve.py | {
"start": 62205,
"end": 63154
} | class ____:
"""Simple data container to group together related characteristic curves."""
tendon_force_length: CharacteristicCurveFunction
tendon_force_length_inverse: CharacteristicCurveFunction
fiber_force_length_passive: CharacteristicCurveFunction
fiber_force_length_passive_inverse: CharacteristicCurveFunction
fiber_force_length_active: CharacteristicCurveFunction
fiber_force_velocity: CharacteristicCurveFunction
fiber_force_velocity_inverse: CharacteristicCurveFunction
def __iter__(self):
"""Iterator support for ``CharacteristicCurveCollection``."""
yield self.tendon_force_length
yield self.tendon_force_length_inverse
yield self.fiber_force_length_passive
yield self.fiber_force_length_passive_inverse
yield self.fiber_force_length_active
yield self.fiber_force_velocity
yield self.fiber_force_velocity_inverse
| CharacteristicCurveCollection |
python | modin-project__modin | modin/pandas/io.py | {
"start": 28418,
"end": 30834
} | class ____(ClassLogger, pandas.HDFStore): # noqa: PR01, D200
"""
Dict-like IO interface for storing pandas objects in PyTables.
"""
_return_modin_dataframe = True
def __getattribute__(self, item):
default_behaviors = ["__init__", "__class__"]
method = super(HDFStore, self).__getattribute__(item)
if item not in default_behaviors:
if callable(method):
def return_handler(*args, **kwargs):
"""
Replace the default behavior of methods with inplace kwarg.
Returns
-------
A Modin DataFrame in place of a pandas DataFrame, or the same
return type as pandas.HDFStore.
Notes
-----
This function will replace all of the arguments passed to
methods of HDFStore with the pandas equivalent. It will convert
Modin DataFrame to pandas DataFrame, etc. Currently, pytables
does not accept Modin DataFrame objects, so we must convert to
pandas.
"""
# We don't want to constantly be giving this error message for
# internal methods.
if item[0] != "_":
_maybe_warn_on_default("`{}`".format(item))
args = [
(
to_pandas(arg)
if isinstance(arg, ModinObjects.DataFrame)
else arg
)
for arg in args
]
kwargs = {
k: to_pandas(v) if isinstance(v, ModinObjects.DataFrame) else v
for k, v in kwargs.items()
}
obj = super(HDFStore, self).__getattribute__(item)(*args, **kwargs)
if self._return_modin_dataframe and isinstance(
obj, pandas.DataFrame
):
return ModinObjects.DataFrame(obj)
return obj
# We replace the method with `return_handler` for inplace operations
method = return_handler
return method
@_inherit_docstrings(pandas.ExcelFile)
| HDFStore |
python | openai__openai-python | src/openai/types/chat/chat_completion_content_part_image.py | {
"start": 247,
"end": 608
} | class ____(BaseModel):
url: str
"""Either a URL of the image or the base64 encoded image data."""
detail: Optional[Literal["auto", "low", "high"]] = None
"""Specifies the detail level of the image.
Learn more in the
[Vision guide](https://platform.openai.com/docs/guides/vision#low-or-high-fidelity-image-understanding).
"""
| ImageURL |
python | charliermarsh__ruff | crates/ruff_linter/resources/test/fixtures/pep8_naming/N804.py | {
"start": 1348,
"end": 1459
} | class ____(type(foo)):
def foo_method(self):
pass
# https://github.com/astral-sh/ruff/issues/18459
| Bar |
python | encode__django-rest-framework | tests/test_validators.py | {
"start": 1668,
"end": 1745
} | class ____(models.Model):
integer = models.IntegerField()
| IntegerFieldModel |
python | agronholm__apscheduler | src/apscheduler/datastores/sqlalchemy.py | {
"start": 3550,
"end": 52626
} | class ____(BaseExternalDataStore):
"""
Uses a relational database to store data.
When started, this data store creates the appropriate tables on the given database
if they're not already present.
Operations are retried (in accordance to ``retry_settings``) when an operation
raises either :exc:`OSError` or :exc:`sqlalchemy.exc.InterfaceError`.
This store has been tested to work with:
* PostgreSQL (asyncpg and psycopg drivers)
* MySQL (asyncmy driver)
* aiosqlite (not recommended right now, as issues like
`#1032 <https://github.com/agronholm/apscheduler/issues/1032>`_ exist)
:param engine_or_url: a SQLAlchemy URL or engine (preferably asynchronous, but can
be synchronous)
:param schema: a database schema name to use, if not the default
.. note:: The data store will not manage the life cycle of any engine instance
passed to it, so you need to close the engine afterwards when you're done with
it.
.. warning:: Do not use SQLite when sharing the data store with multiple schedulers,
as there is an unresolved issue with that
(`#959 <https://github.com/agronholm/apscheduler/issues/959>`_).
"""
engine_or_url: str | URL | Engine | AsyncEngine = attrs.field(
validator=instance_of((str, URL, Engine, AsyncEngine))
)
schema: str | None = attrs.field(kw_only=True, default=None)
_engine: Engine | AsyncEngine = attrs.field(init=False)
_close_on_exit: bool = attrs.field(init=False, default=False)
_supports_update_returning: bool = attrs.field(init=False, default=False)
_supports_tzaware_timestamps: bool = attrs.field(init=False, default=False)
_supports_native_interval: bool = attrs.field(init=False, default=False)
_metadata: MetaData = attrs.field(init=False)
_t_metadata: Table = attrs.field(init=False)
_t_tasks: Table = attrs.field(init=False)
_t_schedules: Table = attrs.field(init=False)
_t_jobs: Table = attrs.field(init=False)
_t_job_results: Table = attrs.field(init=False)
def __attrs_post_init__(self) -> None:
if isinstance(self.engine_or_url, (str, URL)):
try:
self._engine = create_async_engine(self.engine_or_url)
except InvalidRequestError:
self._engine = create_engine(self.engine_or_url)
self._close_on_exit = True
else:
self._engine = self.engine_or_url
# Generate the table definitions
prefix = f"{self.schema}." if self.schema else ""
self._supports_tzaware_timestamps = self._engine.dialect.name in (
"postgresql",
"oracle",
)
self._supports_native_interval = self._engine.dialect.name == "postgresql"
self._metadata = self.get_table_definitions()
self._t_metadata = self._metadata.tables[prefix + "metadata"]
self._t_tasks = self._metadata.tables[prefix + "tasks"]
self._t_schedules = self._metadata.tables[prefix + "schedules"]
self._t_jobs = self._metadata.tables[prefix + "jobs"]
self._t_job_results = self._metadata.tables[prefix + "job_results"]
def __repr__(self) -> str:
return create_repr(self, url=repr(self._engine.url), schema=self.schema)
def _retry(self) -> tenacity.AsyncRetrying:
def after_attempt(retry_state: tenacity.RetryCallState) -> None:
self._logger.warning(
"Temporary data store error (attempt %d): %s",
retry_state.attempt_number,
retry_state.outcome.exception(),
)
# OSError is raised by asyncpg if it can't connect
return tenacity.AsyncRetrying(
stop=self.retry_settings.stop,
wait=self.retry_settings.wait,
retry=tenacity.retry_if_exception_type((InterfaceError, OSError)),
after=after_attempt,
sleep=anyio.sleep,
reraise=True,
)
@asynccontextmanager
async def _begin_transaction(
self,
) -> AsyncGenerator[Connection | AsyncConnection, None]:
# A shielded cancel scope is injected to the exit stack to allow finalization
# to occur even when the surrounding cancel scope is cancelled
async with AsyncExitStack() as exit_stack:
if isinstance(self._engine, AsyncEngine):
async_cm = self._engine.begin()
conn = await async_cm.__aenter__()
exit_stack.enter_context(CancelScope(shield=True))
exit_stack.push_async_exit(async_cm.__aexit__)
else:
cm = self._engine.begin()
conn = await to_thread.run_sync(cm.__enter__)
exit_stack.enter_context(CancelScope(shield=True))
exit_stack.push_async_exit(partial(to_thread.run_sync, cm.__exit__))
yield conn
async def _create_metadata(self, conn: Connection | AsyncConnection) -> None:
if isinstance(conn, AsyncConnection):
await conn.run_sync(self._metadata.create_all)
else:
await to_thread.run_sync(self._metadata.create_all, conn)
async def _execute(
self,
conn: Connection | AsyncConnection,
statement: Executable,
parameters: Sequence | Mapping | None = None,
):
if isinstance(conn, AsyncConnection):
return await conn.execute(statement, parameters)
else:
return await to_thread.run_sync(conn.execute, statement, parameters)
@property
def _temporary_failure_exceptions(self) -> tuple[type[Exception], ...]:
# SQlite does not use the network, so it doesn't have "temporary" failures
if self._engine.dialect.name == "sqlite":
return ()
return InterfaceError, OSError
def _convert_incoming_fire_times(self, data: dict[str, Any]) -> dict[str, Any]:
for field in ("last_fire_time", "next_fire_time"):
if not self._supports_tzaware_timestamps:
utcoffset_minutes = data.pop(f"{field}_utcoffset", None)
if utcoffset_minutes is not None:
tz = timezone(timedelta(minutes=utcoffset_minutes))
timestamp = data[field] / 1000_000
data[field] = datetime.fromtimestamp(timestamp, tz=tz)
return data
def _convert_outgoing_fire_times(self, data: dict[str, Any]) -> dict[str, Any]:
for field in ("last_fire_time", "next_fire_time"):
if not self._supports_tzaware_timestamps:
field_value = data[field]
if field_value is not None:
data[field] = int(field_value.timestamp() * 1000_000)
data[f"{field}_utcoffset"] = (
field_value.utcoffset().total_seconds() // 60
)
else:
data[f"{field}_utcoffset"] = None
return data
def get_table_definitions(self) -> MetaData:
if self._supports_tzaware_timestamps:
timestamp_type: TypeEngine[datetime] = DateTime(timezone=True)
last_fire_time_tzoffset_columns: tuple[Column, ...] = (
Column("last_fire_time", timestamp_type),
)
next_fire_time_tzoffset_columns: tuple[Column, ...] = (
Column("next_fire_time", timestamp_type, index=True),
)
else:
timestamp_type = EmulatedTimestampTZ()
last_fire_time_tzoffset_columns = (
Column("last_fire_time", BigInteger),
Column("last_fire_time_utcoffset", SmallInteger),
)
next_fire_time_tzoffset_columns = (
Column("next_fire_time", BigInteger, index=True),
Column("next_fire_time_utcoffset", SmallInteger),
)
if self._supports_native_interval:
interval_type: TypeDecorator[timedelta] = Interval(second_precision=6)
else:
interval_type = EmulatedInterval()
if self._engine.dialect.name == "postgresql":
from sqlalchemy.dialects.postgresql import JSONB
json_type = JSONB
else:
json_type = JSON
metadata = MetaData(schema=self.schema)
Table("metadata", metadata, Column("schema_version", Integer, nullable=False))
Table(
"tasks",
metadata,
Column("id", Unicode(500), primary_key=True),
Column("func", Unicode(500)),
Column("job_executor", Unicode(500), nullable=False),
Column("max_running_jobs", Integer),
Column("misfire_grace_time", interval_type),
Column("metadata", json_type, nullable=False),
Column("running_jobs", Integer, nullable=False, server_default=literal(0)),
)
Table(
"schedules",
metadata,
Column("id", Unicode(500), primary_key=True),
Column("task_id", Unicode(500), nullable=False, index=True),
Column("trigger", LargeBinary),
Column("args", LargeBinary),
Column("kwargs", LargeBinary),
Column("paused", Boolean, nullable=False, server_default=literal(False)),
Column("coalesce", Enum(CoalescePolicy, metadata=metadata), nullable=False),
Column("misfire_grace_time", interval_type),
Column("max_jitter", interval_type),
Column("job_executor", Unicode(500), nullable=False),
Column("job_result_expiration_time", interval_type),
Column("metadata", json_type, nullable=False),
*last_fire_time_tzoffset_columns,
*next_fire_time_tzoffset_columns,
Column("acquired_by", Unicode(500), index=True),
Column("acquired_until", timestamp_type),
)
Table(
"jobs",
metadata,
Column("id", Uuid, primary_key=True),
Column("task_id", Unicode(500), nullable=False, index=True),
Column("args", LargeBinary, nullable=False),
Column("kwargs", LargeBinary, nullable=False),
Column("schedule_id", Unicode(500), index=True),
Column("scheduled_fire_time", timestamp_type),
Column("executor", Unicode(500), nullable=False),
Column("jitter", interval_type),
Column("start_deadline", timestamp_type),
Column("result_expiration_time", interval_type),
Column("metadata", json_type, nullable=False),
Column("created_at", timestamp_type, nullable=False, index=True),
Column("acquired_by", Unicode(500), index=True),
Column("acquired_until", timestamp_type),
)
Table(
"job_results",
metadata,
Column("job_id", Uuid, primary_key=True),
Column("outcome", Enum(JobOutcome, metadata=metadata), nullable=False),
Column("started_at", timestamp_type, index=True),
Column("finished_at", timestamp_type, nullable=False),
Column("expires_at", timestamp_type, nullable=False, index=True),
Column("exception", LargeBinary),
Column("return_value", LargeBinary),
)
return metadata
async def start(
self, exit_stack: AsyncExitStack, event_broker: EventBroker, logger: Logger
) -> None:
asynclib = sniffio.current_async_library() or "(unknown)"
if asynclib != "asyncio":
raise RuntimeError(
f"This data store requires asyncio; currently running: {asynclib}"
)
if self._close_on_exit:
if isinstance(self._engine, AsyncEngine):
exit_stack.push_async_callback(self._engine.dispose)
else:
exit_stack.callback(self._engine.dispose)
await super().start(exit_stack, event_broker, logger)
# Verify that the schema is in place
async for attempt in self._retry():
with attempt:
async with self._begin_transaction() as conn:
# Create the schema first if it doesn't exist yet
if self.schema:
await self._execute(
conn, CreateSchema(name=self.schema, if_not_exists=True)
)
if self.start_from_scratch:
for table in self._metadata.sorted_tables:
await self._execute(conn, DropTable(table, if_exists=True))
await self._create_metadata(conn)
query = select(self._t_metadata.c.schema_version)
result = await self._execute(conn, query)
version = result.scalar()
if version is None:
await self._execute(
conn, self._t_metadata.insert(), {"schema_version": 1}
)
elif version > 1:
raise RuntimeError(
f"Unexpected schema version ({version}); "
f"only version 1 is supported by this version of "
f"APScheduler"
)
# Find out if the dialect supports UPDATE...RETURNING
async for attempt in self._retry():
with attempt:
update = (
self._t_metadata.update()
.values(schema_version=self._t_metadata.c.schema_version)
.returning(self._t_metadata.c.schema_version)
)
async with self._begin_transaction() as conn:
try:
await self._execute(conn, update)
except (CompileError, ProgrammingError):
pass # the support flag is False by default
else:
self._supports_update_returning = True
async def _deserialize_schedules(self, result: Result) -> list[Schedule]:
schedules: list[Schedule] = []
for row in result:
try:
schedules.append(
Schedule.unmarshal(
self.serializer,
self._convert_incoming_fire_times(row._asdict()),
)
)
except SerializationError as exc:
await self._event_broker.publish(
ScheduleDeserializationFailed(schedule_id=row.id, exception=exc)
)
return schedules
async def _deserialize_jobs(self, result: Result) -> list[Job]:
jobs: list[Job] = []
for row in result:
try:
jobs.append(Job.unmarshal(self.serializer, row._asdict()))
except SerializationError as exc:
await self._event_broker.publish(
JobDeserializationFailed(job_id=row.id, exception=exc)
)
return jobs
async def add_task(self, task: Task) -> None:
insert = self._t_tasks.insert().values(
id=task.id,
func=task.func,
job_executor=task.job_executor,
max_running_jobs=task.max_running_jobs,
misfire_grace_time=task.misfire_grace_time,
metadata=task.metadata,
)
try:
async for attempt in self._retry():
with attempt:
async with self._begin_transaction() as conn:
await self._execute(conn, insert)
except IntegrityError:
update = (
self._t_tasks.update()
.values(
func=task.func,
job_executor=task.job_executor,
max_running_jobs=task.max_running_jobs,
misfire_grace_time=task.misfire_grace_time,
metadata=task.metadata,
)
.where(self._t_tasks.c.id == task.id)
)
async for attempt in self._retry():
with attempt:
async with self._begin_transaction() as conn:
await self._execute(conn, update)
await self._event_broker.publish(TaskUpdated(task_id=task.id))
else:
await self._event_broker.publish(TaskAdded(task_id=task.id))
async def remove_task(self, task_id: str) -> None:
delete = self._t_tasks.delete().where(self._t_tasks.c.id == task_id)
async for attempt in self._retry():
with attempt:
async with self._begin_transaction() as conn:
result = await self._execute(conn, delete)
if result.rowcount == 0:
raise TaskLookupError(task_id)
else:
await self._event_broker.publish(TaskRemoved(task_id=task_id))
async def get_task(self, task_id: str) -> Task:
query = self._t_tasks.select().where(self._t_tasks.c.id == task_id)
async for attempt in self._retry():
with attempt:
async with self._begin_transaction() as conn:
result = await self._execute(conn, query)
row = result.first()
if row:
return Task.unmarshal(self.serializer, row._asdict())
else:
raise TaskLookupError(task_id)
async def get_tasks(self) -> list[Task]:
query = self._t_tasks.select().order_by(self._t_tasks.c.id)
async for attempt in self._retry():
with attempt:
async with self._begin_transaction() as conn:
result = await self._execute(conn, query)
tasks = [
Task.unmarshal(self.serializer, row._asdict()) for row in result
]
return tasks
async def add_schedule(
self, schedule: Schedule, conflict_policy: ConflictPolicy
) -> None:
event: DataStoreEvent
values = self._convert_outgoing_fire_times(schedule.marshal(self.serializer))
insert = self._t_schedules.insert().values(**values)
try:
async for attempt in self._retry():
with attempt:
async with self._begin_transaction() as conn:
await self._execute(conn, insert)
except IntegrityError:
if conflict_policy is ConflictPolicy.exception:
raise ConflictingIdError(schedule.id) from None
elif conflict_policy is ConflictPolicy.replace:
del values["id"]
update = (
self._t_schedules.update()
.where(self._t_schedules.c.id == schedule.id)
.values(**values)
)
async for attempt in self._retry():
with attempt:
async with self._begin_transaction() as conn:
await self._execute(conn, update)
event = ScheduleUpdated(
schedule_id=schedule.id,
task_id=schedule.task_id,
next_fire_time=schedule.next_fire_time,
)
await self._event_broker.publish(event)
else:
event = ScheduleAdded(
schedule_id=schedule.id,
task_id=schedule.task_id,
next_fire_time=schedule.next_fire_time,
)
await self._event_broker.publish(event)
async def remove_schedules(self, ids: Iterable[str]) -> None:
async for attempt in self._retry():
with attempt:
async with self._begin_transaction() as conn:
if self._supports_update_returning:
delete_returning = (
self._t_schedules.delete()
.where(self._t_schedules.c.id.in_(ids))
.returning(
self._t_schedules.c.id, self._t_schedules.c.task_id
)
)
removed_ids: list[tuple[str, str]] = [
(row[0], row[1])
for row in await self._execute(conn, delete_returning)
]
else:
query = select(
self._t_schedules.c.id, self._t_schedules.c.task_id
).where(self._t_schedules.c.id.in_(ids))
ids_to_remove: list[str] = []
removed_ids = []
for schedule_id, task_id in await self._execute(conn, query):
ids_to_remove.append(schedule_id)
removed_ids.append((schedule_id, task_id))
delete = self._t_schedules.delete().where(
self._t_schedules.c.id.in_(ids_to_remove)
)
await self._execute(conn, delete)
for schedule_id, task_id in removed_ids:
await self._event_broker.publish(
ScheduleRemoved(
schedule_id=schedule_id, task_id=task_id, finished=False
)
)
async def get_schedules(self, ids: set[str] | None = None) -> list[Schedule]:
query = self._t_schedules.select().order_by(self._t_schedules.c.id)
if ids:
query = query.where(self._t_schedules.c.id.in_(ids))
async for attempt in self._retry():
with attempt:
async with self._begin_transaction() as conn:
result = await self._execute(conn, query)
return await self._deserialize_schedules(result)
async def acquire_schedules(
self, scheduler_id: str, lease_duration: timedelta, limit: int
) -> list[Schedule]:
async for attempt in self._retry():
with attempt:
async with self._begin_transaction() as conn:
now = datetime.now(timezone.utc)
acquired_until = now + lease_duration
if self._supports_tzaware_timestamps:
comparison = self._t_schedules.c.next_fire_time <= now
else:
comparison = self._t_schedules.c.next_fire_time <= int(
now.timestamp() * 1000_000
)
schedules_cte = (
select(self._t_schedules.c.id)
.where(
and_(
self._t_schedules.c.next_fire_time.isnot(None),
comparison,
self._t_schedules.c.paused == false(),
or_(
self._t_schedules.c.acquired_by == scheduler_id,
self._t_schedules.c.acquired_until.is_(None),
self._t_schedules.c.acquired_until < now,
),
)
)
.order_by(self._t_schedules.c.next_fire_time)
.limit(limit)
.with_for_update(skip_locked=True)
.cte()
)
subselect = select(schedules_cte.c.id)
update = (
self._t_schedules.update()
.where(self._t_schedules.c.id.in_(subselect))
.values(acquired_by=scheduler_id, acquired_until=acquired_until)
)
if self._supports_update_returning:
update = update.returning(*self._t_schedules.columns)
result = await self._execute(conn, update)
else:
await self._execute(conn, update)
query = self._t_schedules.select().where(
and_(self._t_schedules.c.acquired_by == scheduler_id)
)
result = await self._execute(conn, query)
schedules = await self._deserialize_schedules(result)
return schedules
async def release_schedules(
self, scheduler_id: str, results: Sequence[ScheduleResult]
) -> None:
task_ids = {result.schedule_id: result.task_id for result in results}
next_fire_times = {
result.schedule_id: result.next_fire_time for result in results
}
async for attempt in self._retry():
with attempt:
async with self._begin_transaction() as conn:
update_events: list[ScheduleUpdated] = []
finished_schedule_ids: list[str] = []
update_args: list[dict[str, Any]] = []
for result in results:
try:
serialized_trigger = self.serializer.serialize(
result.trigger
)
except SerializationError:
self._logger.exception(
"Error serializing trigger for schedule %r – "
"removing from data store",
result.schedule_id,
)
finished_schedule_ids.append(result.schedule_id)
continue
if self._supports_tzaware_timestamps:
update_args.append(
{
"p_id": result.schedule_id,
"p_trigger": serialized_trigger,
"p_last_fire_time": result.last_fire_time,
"p_next_fire_time": result.next_fire_time,
}
)
else:
update_args.append(
{
"p_id": result.schedule_id,
"p_trigger": serialized_trigger,
**marshal_timestamp(
result.last_fire_time, "p_last_fire_time"
),
**marshal_timestamp(
result.next_fire_time, "p_next_fire_time"
),
}
)
# Update schedules
if update_args:
extra_values: dict[str, BindParameter] = {}
p_id: BindParameter = bindparam("p_id")
p_trigger: BindParameter = bindparam("p_trigger")
p_last_fire_time: BindParameter = bindparam("p_last_fire_time")
p_next_fire_time: BindParameter = bindparam("p_next_fire_time")
if not self._supports_tzaware_timestamps:
extra_values["last_fire_time_utcoffset"] = bindparam(
"p_last_fire_time_utcoffset"
)
extra_values["next_fire_time_utcoffset"] = bindparam(
"p_next_fire_time_utcoffset"
)
update = (
self._t_schedules.update()
.where(
and_(
self._t_schedules.c.id == p_id,
self._t_schedules.c.acquired_by == scheduler_id,
)
)
.values(
trigger=p_trigger,
last_fire_time=p_last_fire_time,
next_fire_time=p_next_fire_time,
acquired_by=None,
acquired_until=None,
**extra_values,
)
)
# TODO: actually check which rows were updated?
await self._execute(conn, update, update_args)
updated_ids = list(next_fire_times)
for schedule_id in updated_ids:
event = ScheduleUpdated(
schedule_id=schedule_id,
task_id=task_ids[schedule_id],
next_fire_time=next_fire_times[schedule_id],
)
update_events.append(event)
# Remove schedules that failed to serialize
if finished_schedule_ids:
delete = self._t_schedules.delete().where(
self._t_schedules.c.id.in_(finished_schedule_ids)
)
await self._execute(conn, delete)
for event in update_events:
await self._event_broker.publish(event)
for schedule_id in finished_schedule_ids:
await self._event_broker.publish(
ScheduleRemoved(
schedule_id=schedule_id,
task_id=task_ids[schedule_id],
finished=True,
)
)
async def get_next_schedule_run_time(self) -> datetime | None:
columns = [self._t_schedules.c.next_fire_time]
if not self._supports_tzaware_timestamps:
columns.append(self._t_schedules.c.next_fire_time_utcoffset)
statenent = (
select(*columns)
.where(
self._t_schedules.c.next_fire_time.isnot(None),
self._t_schedules.c.paused == false(),
self._t_schedules.c.acquired_by.is_(None),
)
.order_by(self._t_schedules.c.next_fire_time)
.limit(1)
)
async for attempt in self._retry():
with attempt:
async with self._begin_transaction() as conn:
result = await self._execute(conn, statenent)
if not self._supports_tzaware_timestamps:
if row := result.first():
tz = timezone(timedelta(minutes=row[1]))
return datetime.fromtimestamp(row[0] / 1000_000, tz=tz)
else:
return None
return result.scalar()
async def add_job(self, job: Job) -> None:
marshalled = job.marshal(self.serializer)
insert = self._t_jobs.insert().values(**marshalled)
async for attempt in self._retry():
with attempt:
async with self._begin_transaction() as conn:
await self._execute(conn, insert)
event = JobAdded(
job_id=job.id,
task_id=job.task_id,
schedule_id=job.schedule_id,
)
await self._event_broker.publish(event)
async def get_jobs(self, ids: Iterable[UUID] | None = None) -> list[Job]:
query = self._t_jobs.select().order_by(self._t_jobs.c.id)
if ids:
job_ids = [job_id for job_id in ids]
query = query.where(self._t_jobs.c.id.in_(job_ids))
async for attempt in self._retry():
with attempt:
async with self._begin_transaction() as conn:
result = await self._execute(conn, query)
return await self._deserialize_jobs(result)
async def acquire_jobs(
self, scheduler_id: str, lease_duration: timedelta, limit: int | None = None
) -> list[Job]:
events: list[JobAcquired | JobReleased] = []
async for attempt in self._retry():
with attempt:
async with self._begin_transaction() as conn:
now = datetime.now(timezone.utc)
acquired_until = now + lease_duration
query = (
select(
self._t_jobs,
self._t_tasks.c.max_running_jobs,
self._t_tasks.c.running_jobs,
)
.join(
self._t_tasks, self._t_tasks.c.id == self._t_jobs.c.task_id
)
.where(
or_(
self._t_jobs.c.acquired_until.is_(None),
self._t_jobs.c.acquired_until < now,
)
)
.order_by(self._t_jobs.c.created_at)
.with_for_update(
skip_locked=True,
of=[
self._t_tasks.c.running_jobs,
self._t_jobs.c.acquired_by,
self._t_jobs.c.acquired_until,
],
)
.limit(limit)
)
result = await self._execute(conn, query)
if not result:
return []
acquired_jobs: list[Job] = []
discarded_jobs: list[_JobDiscard] = []
task_job_slots_left: dict[str, float] = defaultdict(
lambda: float("inf")
)
running_job_count_increments: dict[str, int] = defaultdict(
lambda: 0
)
for row in result:
job_dict = row._asdict()
task_max_running_jobs = job_dict.pop("max_running_jobs")
task_running_jobs = job_dict.pop("running_jobs")
if task_max_running_jobs is not None:
task_job_slots_left.setdefault(
row.task_id, task_max_running_jobs - task_running_jobs
)
# Deserialize the job
try:
job = Job.unmarshal(self.serializer, job_dict)
except DeserializationError as exc:
# Deserialization failed, so record the exception as the job
# result
discarded_jobs.append(
_JobDiscard(
job_id=row.id,
outcome=JobOutcome.deserialization_failed,
task_id=row.task_id,
schedule_id=row.schedule_id,
scheduled_fire_time=row.scheduled_fire_time,
result_expires_at=now + row.result_expiration_time,
exception=exc,
)
)
continue
# Discard the job if its start deadline has passed
if job.start_deadline and job.start_deadline < now:
discarded_jobs.append(
_JobDiscard(
job_id=row.id,
outcome=JobOutcome.missed_start_deadline,
task_id=row.task_id,
schedule_id=row.schedule_id,
scheduled_fire_time=row.scheduled_fire_time,
result_expires_at=now + row.result_expiration_time,
)
)
continue
# Skip the job if no more slots are available
if not task_job_slots_left[job.task_id]:
self._logger.debug(
"Skipping job %s because task %r has the maximum "
"number of %d jobs already running",
job.id,
job.task_id,
task_max_running_jobs,
)
continue
task_job_slots_left[job.task_id] -= 1
running_job_count_increments[job.task_id] += 1
job.acquired_by = scheduler_id
job.acquired_until = acquired_until
acquired_jobs.append(job)
events.append(
JobAcquired.from_job(job, scheduler_id=scheduler_id)
)
if acquired_jobs:
# Mark the acquired jobs as acquired by this worker
acquired_job_ids = [job.id for job in acquired_jobs]
update = (
self._t_jobs.update()
.values(
acquired_by=scheduler_id, acquired_until=acquired_until
)
.where(self._t_jobs.c.id.in_(acquired_job_ids))
)
await self._execute(conn, update)
# Increment the running job counters on each task
p_id: BindParameter = bindparam("p_id")
p_increment: BindParameter = bindparam("p_increment")
params = [
{"p_id": task_id, "p_increment": increment}
for task_id, increment in running_job_count_increments.items()
]
update = (
self._t_tasks.update()
.values(
running_jobs=self._t_tasks.c.running_jobs + p_increment
)
.where(self._t_tasks.c.id == p_id)
)
await self._execute(conn, update, params)
# Discard the jobs that could not start
for discard in discarded_jobs:
result = JobResult(
job_id=discard.job_id,
outcome=discard.outcome,
finished_at=now,
expires_at=discard.result_expires_at,
exception=discard.exception,
)
events.append(
await self._release_job(
conn,
result,
scheduler_id,
discard.task_id,
discard.schedule_id,
discard.scheduled_fire_time,
decrement_running_job_count=False,
)
)
# Publish the appropriate events
for event in events:
await self._event_broker.publish(event)
return acquired_jobs
async def _release_job(
self,
conn: Connection | AsyncConnection,
result: JobResult,
scheduler_id: str,
task_id: str,
schedule_id: str | None = None,
scheduled_fire_time: datetime | None = None,
*,
decrement_running_job_count: bool = True,
) -> JobReleased:
# Record the job result
if result.expires_at > result.finished_at:
marshalled = result.marshal(self.serializer)
insert = self._t_job_results.insert().values(**marshalled)
await self._execute(conn, insert)
# Decrement the number of running jobs for this task
if decrement_running_job_count:
update = (
self._t_tasks.update()
.values(running_jobs=self._t_tasks.c.running_jobs - 1)
.where(self._t_tasks.c.id == task_id)
)
await self._execute(conn, update)
# Delete the job
delete = self._t_jobs.delete().where(self._t_jobs.c.id == result.job_id)
await self._execute(conn, delete)
# Create the event, to be sent after commit
return JobReleased.from_result(
result, scheduler_id, task_id, schedule_id, scheduled_fire_time
)
async def release_job(self, scheduler_id: str, job: Job, result: JobResult) -> None:
async for attempt in self._retry():
with attempt:
async with self._begin_transaction() as conn:
event = await self._release_job(
conn,
result,
scheduler_id,
job.task_id,
job.schedule_id,
job.scheduled_fire_time,
)
# Notify other schedulers
await self._event_broker.publish(event)
async def get_job_result(self, job_id: UUID) -> JobResult | None:
async for attempt in self._retry():
with attempt:
async with self._begin_transaction() as conn:
# Retrieve the result
query = self._t_job_results.select().where(
self._t_job_results.c.job_id == job_id
)
if row := (await self._execute(conn, query)).one_or_none():
# Delete the result
delete = self._t_job_results.delete().where(
self._t_job_results.c.job_id == job_id
)
await self._execute(conn, delete)
return JobResult.unmarshal(self.serializer, row._asdict()) if row else None
async def extend_acquired_schedule_leases(
self, scheduler_id: str, schedule_ids: set[str], duration: timedelta
) -> None:
async for attempt in self._retry():
with attempt:
async with self._begin_transaction() as conn:
new_acquired_until = datetime.now(timezone.utc) + duration
update = (
self._t_schedules.update()
.values(acquired_until=new_acquired_until)
.where(
self._t_schedules.c.acquired_by == scheduler_id,
self._t_schedules.c.id.in_(schedule_ids),
)
)
await self._execute(conn, update)
async def extend_acquired_job_leases(
self, scheduler_id: str, job_ids: set[UUID], duration: timedelta
) -> None:
async for attempt in self._retry():
with attempt:
async with self._begin_transaction() as conn:
new_acquired_until = datetime.now(timezone.utc) + duration
update = (
self._t_jobs.update()
.values(acquired_until=new_acquired_until)
.where(
self._t_jobs.c.acquired_by == scheduler_id,
self._t_jobs.c.id.in_(job_ids),
)
)
await self._execute(conn, update)
async def reap_abandoned_jobs(self, scheduler_id: str) -> None:
query = (
select(self._t_jobs)
.where(self._t_jobs.c.acquired_by == scheduler_id)
.with_for_update()
)
async for attempt in self._retry():
events: list[JobReleased] = []
with attempt:
async with self._begin_transaction() as conn:
if results := await self._execute(conn, query):
for row in results:
job_dict = self._convert_incoming_fire_times(row._asdict())
job = Job.unmarshal(
self.serializer, {**job_dict, "args": (), "kwargs": {}}
)
result = JobResult.from_job(job, JobOutcome.abandoned)
event = await self._release_job(
conn,
result,
scheduler_id,
job.task_id,
job.schedule_id,
job.scheduled_fire_time,
)
events.append(event)
for event in events:
await self._event_broker.publish(event)
async def cleanup(self) -> None:
async for attempt in self._retry():
with attempt:
events: list[Event] = []
async with self._begin_transaction() as conn:
# Purge expired job results
delete = self._t_job_results.delete().where(
self._t_job_results.c.expires_at <= datetime.now(timezone.utc)
)
await self._execute(conn, delete)
# Finish any jobs whose leases have expired
now = datetime.now(timezone.utc)
query = select(
self._t_jobs.c.id,
self._t_jobs.c.task_id,
self._t_jobs.c.schedule_id,
self._t_jobs.c.scheduled_fire_time,
self._t_jobs.c.acquired_by,
self._t_jobs.c.result_expiration_time,
).where(
self._t_jobs.c.acquired_by.isnot(None),
self._t_jobs.c.acquired_until < now,
)
for row in await self._execute(conn, query):
result = JobResult(
job_id=row.id,
outcome=JobOutcome.abandoned,
finished_at=now,
expires_at=now + row.result_expiration_time,
)
events.append(
await self._release_job(
conn,
result,
row.acquired_by,
row.task_id,
row.schedule_id,
row.scheduled_fire_time,
)
)
# Clean up finished schedules that have no running jobs
query = (
select(self._t_schedules.c.id, self._t_schedules.c.task_id)
.outerjoin(
self._t_jobs,
self._t_jobs.c.schedule_id == self._t_schedules.c.id,
)
.where(
self._t_schedules.c.next_fire_time.is_(None),
self._t_jobs.c.id.is_(None),
)
)
results = await self._execute(conn, query)
if finished_schedule_ids := dict(results.all()):
delete = self._t_schedules.delete().where(
self._t_schedules.c.id.in_(finished_schedule_ids)
)
await self._execute(conn, delete)
for schedule_id, task_id in finished_schedule_ids.items():
events.append(
ScheduleRemoved(
schedule_id=schedule_id,
task_id=task_id,
finished=True,
)
)
# Publish any events produced from the operations
for event in events:
await self._event_broker.publish(event)
| SQLAlchemyDataStore |
python | hynek__structlog | src/structlog/processors.py | {
"start": 7490,
"end": 8390
} | class ____:
"""
Decode byte string values in ``event_dict``.
Args:
encoding: Encoding to decode from (default: ``"utf-8"``).
errors: How to cope with encoding errors (default: ``"replace"``).
Useful to prevent ``b"abc"`` being rendered as as ``'b"abc"'``.
Just put it in the processor chain before the renderer.
.. versionadded:: 15.4.0
"""
_encoding: str
_errors: str
def __init__(
self, encoding: str = "utf-8", errors: str = "replace"
) -> None:
self._encoding = encoding
self._errors = errors
def __call__(
self, logger: WrappedLogger, name: str, event_dict: EventDict
) -> EventDict:
for key, value in event_dict.items():
if isinstance(value, bytes):
event_dict[key] = value.decode(self._encoding, self._errors)
return event_dict
| UnicodeDecoder |
python | pallets__werkzeug | tests/test_routing.py | {
"start": 53295,
"end": 53744
} | class ____(r.BaseConverter):
def __init__(self, url_map, *items):
super().__init__(url_map)
self.regex = items[0]
def test_regex():
map_ = r.Map(
[
r.Rule(r"/<regex('[^/:]+\.[^/:]+'):value>", endpoint="regex"),
],
converters={"regex": RegexConverter},
)
adapter = map_.bind("localhost")
assert adapter.match("/asdfsa.asdfs") == ("regex", {"value": "asdfsa.asdfs"})
| RegexConverter |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.