language stringclasses 1 value | repo stringclasses 346 values | path stringlengths 6 201 | class_span dict | source stringlengths 21 2.38M | target stringlengths 1 96 |
|---|---|---|---|---|---|
python | run-llama__llama_index | llama-index-integrations/memory/llama-index-memory-bedrock-agentcore/llama_index/memory/bedrock_agentcore/base.py | {
"start": 771,
"end": 4578
} | class ____(BaseMemory):
"""Base class for Bedrock Agent Core Memory."""
_config: Any = PrivateAttr()
_client: Any = PrivateAttr()
_boto_client_kwargs: Any = PrivateAttr()
def __init__(self, client: Any) -> None:
super().__init__()
if client is not None:
self._client = client
def create_event(
self,
memory_id: str,
actor_id: str,
messages: List[ChatMessage],
session_id: str,
) -> None:
if self._client is None:
raise ValueError("Client is not initialized")
if len(messages) == 0:
raise ValueError("The messages field cannot be empty")
payload = convert_messages_to_event_payload(messages)
if payload:
response = self._client.create_event(
memoryId=memory_id,
actorId=actor_id,
sessionId=session_id,
payload=payload,
eventTimestamp=datetime.now(timezone.utc),
)
event_id = response["event"]["eventId"]
if not event_id:
raise RuntimeError("Bedrock AgentCore did not return an event ID")
def list_events(
self, memory_id: str, session_id: str, actor_id: str
) -> List[ChatMessage]:
def fetch_messages(max_results: int, next_token: str = None) -> tuple:
response = self._client.list_events(
memoryId=memory_id,
sessionId=session_id,
actorId=actor_id,
includePayloads=True,
maxResults=max_results,
**({"nextToken": next_token} if next_token else {}),
)
messages = convert_events_to_messages(list(reversed(response["events"])))
return messages, response.get("nextToken")
def has_user_message(messages) -> bool:
return any(msg.role == MessageRole.USER for msg in messages)
initial_max_results = 20
# If user is not the first message, then we need to find the closest User message to construct the oldest conversation in the batch
iterative_max_results = 3
all_messages = []
found_user = False
next_token = None
# Initial fetch with larger batch
messages, next_token = fetch_messages(initial_max_results)
all_messages.extend(messages)
if len(messages) < 0:
return []
# Check if first message is a USER msg. If it's not, some LLMs will throw an exception.
elif messages[0].role == MessageRole.USER:
found_user = True
found_user = has_user_message(messages)
# Keep fetching until we find a user message
while not found_user and next_token:
messages, next_token = fetch_messages(iterative_max_results, next_token)
if has_user_message(messages):
found_user = True
all_messages[:0] = messages
# Remove leading non-user messages
while all_messages[0].role != MessageRole.USER:
all_messages.pop(0)
return all_messages
def retrieve_memories(
self,
memory_id: str,
search_criteria: Dict[str, Any],
max_results: int = 20,
namespace: Optional[str] = "/",
) -> List[Dict[str, Any]]:
response = self._client.retrieve_memory_records(
memoryId=memory_id,
namespace=namespace,
searchCriteria=search_criteria,
maxResults=max_results,
)
memmory_record_summaries = response["memoryRecordSummaries"]
memory_content = []
for summary in memmory_record_summaries:
memory_content.append(summary["content"])
return memory_content
| BaseAgentCoreMemory |
python | python__mypy | mypy/nodes.py | {
"start": 36906,
"end": 40030
} | class ____(SymbolNode, Statement):
"""A decorated function.
A single Decorator object can include any number of function decorators.
"""
__slots__ = ("func", "decorators", "original_decorators", "var", "is_overload")
__match_args__ = ("decorators", "var", "func")
func: FuncDef # Decorated function
decorators: list[Expression] # Decorators (may be empty)
# Some decorators are removed by semanal, keep the original here.
original_decorators: list[Expression]
# TODO: This is mostly used for the type; consider replacing with a 'type' attribute
var: Var # Represents the decorated function obj
is_overload: bool
def __init__(self, func: FuncDef, decorators: list[Expression], var: Var) -> None:
super().__init__()
self.func = func
self.decorators = decorators
self.original_decorators = decorators.copy()
self.var = var
self.is_overload = False
@property
def name(self) -> str:
return self.func.name
@property
def fullname(self) -> str:
return self.func.fullname
@property
def is_final(self) -> bool:
return self.func.is_final
@property
def info(self) -> TypeInfo:
return self.func.info
@property
def type(self) -> mypy.types.Type | None:
return self.var.type
def accept(self, visitor: StatementVisitor[T]) -> T:
return visitor.visit_decorator(self)
def serialize(self) -> JsonDict:
return {
".class": "Decorator",
"func": self.func.serialize(),
"var": self.var.serialize(),
"is_overload": self.is_overload,
}
@classmethod
def deserialize(cls, data: JsonDict) -> Decorator:
assert data[".class"] == "Decorator"
dec = Decorator(FuncDef.deserialize(data["func"]), [], Var.deserialize(data["var"]))
dec.is_overload = data["is_overload"]
return dec
def write(self, data: WriteBuffer) -> None:
write_tag(data, DECORATOR)
self.func.write(data)
self.var.write(data)
write_bool(data, self.is_overload)
write_tag(data, END_TAG)
@classmethod
def read(cls, data: ReadBuffer) -> Decorator:
assert read_tag(data) == FUNC_DEF
func = FuncDef.read(data)
assert read_tag(data) == VAR
var = Var.read(data)
dec = Decorator(func, [], var)
dec.is_overload = read_bool(data)
assert read_tag(data) == END_TAG
return dec
def is_dynamic(self) -> bool:
return self.func.is_dynamic()
VAR_FLAGS: Final = [
"is_self",
"is_cls",
"is_initialized_in_class",
"is_staticmethod",
"is_classmethod",
"is_property",
"is_settable_property",
"is_suppressed_import",
"is_classvar",
"is_abstract_var",
"is_final",
"is_index_var",
"final_unset_in_class",
"final_set_in_init",
"explicit_self_type",
"is_ready",
"is_inferred",
"invalid_partial_type",
"from_module_getattr",
"has_explicit_value",
"allow_incompatible_override",
]
| Decorator |
python | pyqtgraph__pyqtgraph | pyqtgraph/examples/DateAxisItem_QtDesigner.py | {
"start": 587,
"end": 1412
} | class ____(QtWidgets.QMainWindow, Design):
def __init__(self):
super().__init__()
self.setupUi(self)
now = time.time()
# Plot random values with timestamps in the last 6 months
timestamps = np.linspace(now - 6*30*24*3600, now, 100)
self.curve = self.plotWidget.plot(x=timestamps, y=np.random.rand(100),
symbol='o', symbolSize=5, pen=BLUE)
# 'o' circle 't' triangle 'd' diamond '+' plus 's' square
self.plotWidget.setAxisItems({'bottom': pg.DateAxisItem()})
self.plotWidget.showGrid(x=True, y=True)
app = pg.mkQApp("DateAxisItem_QtDesigner Example")
window = ExampleApp()
window.setWindowTitle('pyqtgraph example: DateAxisItem_QtDesigner')
window.show()
if __name__ == '__main__':
pg.exec()
| ExampleApp |
python | apache__airflow | providers/amazon/tests/unit/amazon/aws/waiters/test_comprehend.py | {
"start": 1621,
"end": 3170
} | class ____(TestComprehendCustomWaitersBase):
WAITER_NAME = "pii_entities_detection_job_complete"
@pytest.fixture
def mock_get_job(self):
with mock.patch.object(self.client, "describe_pii_entities_detection_job") as mock_getter:
yield mock_getter
@pytest.mark.parametrize("state", ComprehendStartPiiEntitiesDetectionJobCompletedSensor.SUCCESS_STATES)
def test_pii_entities_detection_job_complete(self, state, mock_get_job):
mock_get_job.return_value = {"PiiEntitiesDetectionJobProperties": {"JobStatus": state}}
ComprehendHook().get_waiter(self.WAITER_NAME).wait(JobId="job_id")
@pytest.mark.parametrize("state", ComprehendStartPiiEntitiesDetectionJobCompletedSensor.FAILURE_STATES)
def test_pii_entities_detection_job_failed(self, state, mock_get_job):
mock_get_job.return_value = {"PiiEntitiesDetectionJobProperties": {"JobStatus": state}}
with pytest.raises(botocore.exceptions.WaiterError):
ComprehendHook().get_waiter(self.WAITER_NAME).wait(JobId="job_id")
def test_pii_entities_detection_job_wait(self, mock_get_job):
wait = {"PiiEntitiesDetectionJobProperties": {"JobStatus": "IN_PROGRESS"}}
success = {"PiiEntitiesDetectionJobProperties": {"JobStatus": "COMPLETED"}}
mock_get_job.side_effect = [wait, wait, success]
ComprehendHook().get_waiter(self.WAITER_NAME).wait(
JobId="job_id", WaiterConfig={"Delay": 0.01, "MaxAttempts": 3}
)
| TestComprehendStartPiiEntitiesDetectionJobCompleteWaiter |
python | apache__airflow | providers/amazon/src/airflow/providers/amazon/aws/executors/ecs/ecs_executor.py | {
"start": 2543,
"end": 27461
} | class ____(BaseExecutor):
"""
Executes the provided Airflow command on an ECS instance.
The Airflow Scheduler creates a shell command, and passes it to the executor. This ECS Executor
runs said Airflow command on a remote Amazon ECS Cluster with a task-definition configured to
launch the same containers as the Scheduler. It then periodically checks in with the launched
tasks (via task ARNs) to determine the status.
This allows individual tasks to specify CPU, memory, GPU, env variables, etc. When initializing a task,
there's an option for "executor config" which should be a dictionary with keys that match the
``ContainerOverride`` definition per AWS documentation (see link below).
Prerequisite: proper configuration of Boto3 library
.. seealso:: https://boto3.amazonaws.com/v1/documentation/api/latest/guide/configuration.html for
authentication and access-key management. You can store an environmental variable, setup aws config from
console, or use IAM roles.
.. seealso:: https://docs.aws.amazon.com/AmazonECS/latest/APIReference/API_ContainerOverride.html for an
Airflow TaskInstance's executor_config.
"""
# AWS limits the maximum number of ARNs in the describe_tasks function.
DESCRIBE_TASKS_BATCH_SIZE = 99
if TYPE_CHECKING and AIRFLOW_V_3_0_PLUS:
# In the v3 path, we store workloads, not commands as strings.
# TODO: TaskSDK: move this type change into BaseExecutor
queued_tasks: dict[TaskInstanceKey, workloads.All] # type: ignore[assignment]
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.active_workers: EcsTaskCollection = EcsTaskCollection()
self.pending_tasks: deque = deque()
# Check if self has the ExecutorConf set on the self.conf attribute, and if not, set it to the global
# configuration object. This allows the changes to be backwards compatible with older versions of
# Airflow.
# Can be removed when minimum supported provider version is equal to the version of core airflow
# which introduces multi-team configuration.
if not hasattr(self, "conf"):
from airflow.configuration import conf
self.conf = conf
self.cluster = self.conf.get(CONFIG_GROUP_NAME, AllEcsConfigKeys.CLUSTER)
self.container_name = self.conf.get(CONFIG_GROUP_NAME, AllEcsConfigKeys.CONTAINER_NAME)
self.attempts_since_last_successful_connection = 0
self.load_ecs_connection(check_connection=False)
self.IS_BOTO_CONNECTION_HEALTHY = False
self.run_task_kwargs = self._load_run_kwargs()
# Maximum number of retries to run an ECS task.
self.max_run_task_attempts = self.conf.get(
CONFIG_GROUP_NAME,
AllEcsConfigKeys.MAX_RUN_TASK_ATTEMPTS,
fallback=CONFIG_DEFAULTS[AllEcsConfigKeys.MAX_RUN_TASK_ATTEMPTS],
)
def queue_workload(self, workload: workloads.All, session: Session | None) -> None:
from airflow.executors import workloads
if not isinstance(workload, workloads.ExecuteTask):
raise RuntimeError(f"{type(self)} cannot handle workloads of type {type(workload)}")
ti = workload.ti
self.queued_tasks[ti.key] = workload
def _process_workloads(self, workloads: Sequence[workloads.All]) -> None:
from airflow.executors.workloads import ExecuteTask
# Airflow V3 version
for w in workloads:
if not isinstance(w, ExecuteTask):
raise RuntimeError(f"{type(self)} cannot handle workloads of type {type(w)}")
command = [w]
key = w.ti.key
queue = w.ti.queue
executor_config = w.ti.executor_config or {}
del self.queued_tasks[key]
self.execute_async(key=key, command=command, queue=queue, executor_config=executor_config) # type: ignore[arg-type]
self.running.add(key)
def start(self):
"""Call this when the Executor is run for the first time by the scheduler."""
check_health = self.conf.getboolean(
CONFIG_GROUP_NAME, AllEcsConfigKeys.CHECK_HEALTH_ON_STARTUP, fallback=False
)
if not check_health:
return
self.log.info("Starting ECS Executor and determining health...")
try:
self.check_health()
except AirflowException:
self.log.error("Stopping the Airflow Scheduler from starting until the issue is resolved.")
raise
def check_health(self):
"""
Make a test API call to check the health of the ECS Executor.
Deliberately use an invalid task ID, some potential outcomes in order:
1. `AccessDeniedException` is raised if there are insufficient permissions.
2. `ClusterNotFoundException` is raised if permissions exist but the cluster does not.
3. The API responds with a failure message if the cluster is found and there
are permissions, but the cluster itself has issues.
4. `InvalidParameterException` is raised if the permissions and cluster exist but the task does not.
The last one is considered a success state for the purposes of this check.
"""
success_status = "succeeded."
status = success_status
try:
invalid_task_id = "a" * 32
self.ecs.stop_task(cluster=self.cluster, task=invalid_task_id)
# If it got this far, something is wrong. stop_task() called with an
# invalid taskID should have thrown a ClientError. All known reasons are
# covered in the ``except`` block below, and this should never be reached.
status = "failed for an unknown reason. "
except ClientError as ex:
error_code = ex.response["Error"]["Code"]
error_message = ex.response["Error"]["Message"]
if ("InvalidParameterException" in error_code) and ("task was not found" in error_message):
# This failure is expected, and means we're healthy
pass
else:
# Catch all for unexpected failures
status = f"failed because: {error_message}. "
except Exception as e:
# Any non-ClientError exceptions. This can include Botocore exceptions for example
status = f"failed because: {e}. "
finally:
msg_prefix = "ECS Executor health check has %s"
if status == success_status:
self.IS_BOTO_CONNECTION_HEALTHY = True
self.log.info(msg_prefix, status)
else:
msg_error_suffix = (
"The ECS executor will not be able to run Airflow tasks until the issue is addressed."
)
raise AirflowException(msg_prefix % status + msg_error_suffix)
def load_ecs_connection(self, check_connection: bool = True):
self.log.info("Loading Connection information")
aws_conn_id = self.conf.get(
CONFIG_GROUP_NAME,
AllEcsConfigKeys.AWS_CONN_ID,
fallback=CONFIG_DEFAULTS[AllEcsConfigKeys.AWS_CONN_ID],
)
region_name = self.conf.get(CONFIG_GROUP_NAME, AllEcsConfigKeys.REGION_NAME, fallback=None)
self.ecs = EcsHook(aws_conn_id=aws_conn_id, region_name=region_name).conn
self.attempts_since_last_successful_connection += 1
self.last_connection_reload = timezone.utcnow()
if check_connection:
self.check_health()
self.attempts_since_last_successful_connection = 0
def sync(self):
if not self.IS_BOTO_CONNECTION_HEALTHY:
exponential_backoff_retry(
self.last_connection_reload,
self.attempts_since_last_successful_connection,
self.load_ecs_connection,
)
if not self.IS_BOTO_CONNECTION_HEALTHY:
return
try:
self.sync_running_tasks()
self.attempt_task_runs()
except (ClientError, NoCredentialsError) as error:
error_code = error.response["Error"]["Code"]
if error_code in INVALID_CREDENTIALS_EXCEPTIONS:
self.IS_BOTO_CONNECTION_HEALTHY = False
self.log.warning(
"AWS credentials are either missing or expired: %s.\nRetrying connection", error
)
except Exception:
# We catch any and all exceptions because otherwise they would bubble
# up and kill the scheduler process
self.log.exception("Failed to sync %s", self.__class__.__name__)
def sync_running_tasks(self):
"""Check and update state on all running tasks."""
all_task_arns = self.active_workers.get_all_arns()
if not all_task_arns:
self.log.debug("No active Airflow tasks, skipping sync.")
return
describe_tasks_response = self.__describe_tasks(all_task_arns)
self.log.debug("Active Workers: %s", describe_tasks_response)
if describe_tasks_response["failures"]:
for failure in describe_tasks_response["failures"]:
self.__handle_failed_task(failure["arn"], failure["reason"])
updated_tasks = describe_tasks_response["tasks"]
for task in updated_tasks:
self.__update_running_task(task)
def __update_running_task(self, task):
self.active_workers.update_task(task)
# Get state of current task.
task_state = task.get_task_state()
task_key = self.active_workers.arn_to_key[task.task_arn]
# Mark finished tasks as either a success/failure.
if task_state == State.FAILED or task_state == State.REMOVED:
self.__log_container_failures(task_arn=task.task_arn)
self.__handle_failed_task(task.task_arn, task.stopped_reason)
elif task_state == State.SUCCESS:
self.log.debug(
"Airflow task %s marked as %s after running on ECS Task (arn) %s",
task_key,
task_state,
task.task_arn,
)
self.success(task_key)
self.active_workers.pop_by_key(task_key)
def __describe_tasks(self, task_arns):
all_task_descriptions = {"tasks": [], "failures": []}
for i in range(0, len(task_arns), self.DESCRIBE_TASKS_BATCH_SIZE):
batched_task_arns = task_arns[i : i + self.DESCRIBE_TASKS_BATCH_SIZE]
if not batched_task_arns:
continue
boto_describe_tasks = self.ecs.describe_tasks(tasks=batched_task_arns, cluster=self.cluster)
describe_tasks_response = BotoDescribeTasksSchema().load(boto_describe_tasks)
all_task_descriptions["tasks"].extend(describe_tasks_response["tasks"])
all_task_descriptions["failures"].extend(describe_tasks_response["failures"])
return all_task_descriptions
def __log_container_failures(self, task_arn: str):
"""Check if the task failed due to issues with the containers."""
containers = self.active_workers.task_by_arn(task_arn).containers
has_exit_codes = all(["exit_code" in x for x in containers])
if not has_exit_codes:
return ""
reasons = [
f"{container['container_arn']} - {container['reason']}"
for container in containers
if "reason" in container
]
if reasons:
self.log.warning(
"The ECS task failed due to the following containers failing:\n%s", "\n".join(reasons)
)
def __handle_failed_task(self, task_arn: str, reason: str):
"""
If an API failure occurs, the task is rescheduled.
This function will determine whether the task has been attempted the appropriate number
of times, and determine whether the task should be marked failed or not. The task will
be removed active_workers, and marked as FAILED, or set into pending_tasks depending on
how many times it has been retried.
"""
task_key = self.active_workers.arn_to_key[task_arn]
task_info = self.active_workers.info_by_key(task_key)
task_cmd = task_info.cmd
queue = task_info.queue
exec_info = task_info.config
failure_count = self.active_workers.failure_count_by_key(task_key)
if int(failure_count) < int(self.max_run_task_attempts):
self.log.warning(
"Airflow task %s failed due to %s. Failure %s out of %s occurred on %s. Rescheduling.",
task_key,
reason,
failure_count,
self.max_run_task_attempts,
task_arn,
)
self.pending_tasks.append(
EcsQueuedTask(
task_key,
task_cmd,
queue,
exec_info,
failure_count + 1,
timezone.utcnow() + calculate_next_attempt_delay(failure_count),
)
)
else:
self.log.error(
"Airflow task %s has failed a maximum of %s times. Marking as failed",
task_key,
failure_count,
)
self.fail(task_key)
self.active_workers.pop_by_key(task_key)
def attempt_task_runs(self):
"""
Take tasks from the pending_tasks queue, and attempts to find an instance to run it on.
If the launch type is EC2, this will attempt to place tasks on empty EC2 instances. If
there are no EC2 instances available, no task is placed and this function will be
called again in the next heart-beat.
If the launch type is FARGATE, this will run the tasks on new AWS Fargate instances.
"""
queue_len = len(self.pending_tasks)
failure_reasons = defaultdict(int)
for _ in range(queue_len):
ecs_task = self.pending_tasks.popleft()
task_key = ecs_task.key
cmd = ecs_task.command
queue = ecs_task.queue
exec_config = ecs_task.executor_config
attempt_number = ecs_task.attempt_number
failure_reasons = []
if timezone.utcnow() < ecs_task.next_attempt_time:
self.pending_tasks.append(ecs_task)
continue
try:
run_task_response = self._run_task(task_key, cmd, queue, exec_config)
except NoCredentialsError:
self.pending_tasks.append(ecs_task)
raise
except ClientError as e:
error_code = e.response["Error"]["Code"]
if error_code in INVALID_CREDENTIALS_EXCEPTIONS:
self.pending_tasks.append(ecs_task)
raise
failure_reasons.append(str(e))
except Exception as e:
# Failed to even get a response back from the Boto3 API or something else went
# wrong. For any possible failure we want to add the exception reasons to the
# failure list so that it is logged to the user and most importantly the task is
# added back to the pending list to be retried later.
failure_reasons.append(str(e))
else:
# We got a response back, check if there were failures. If so, add them to the
# failures list so that it is logged to the user and most importantly the task
# is added back to the pending list to be retried later.
if run_task_response["failures"]:
failure_reasons.extend([f["reason"] for f in run_task_response["failures"]])
if failure_reasons:
# Make sure the number of attempts does not exceed max_run_task_attempts
if int(attempt_number) < int(self.max_run_task_attempts):
ecs_task.attempt_number += 1
ecs_task.next_attempt_time = timezone.utcnow() + calculate_next_attempt_delay(
attempt_number
)
self.pending_tasks.append(ecs_task)
else:
reasons_str = ", ".join(failure_reasons)
self.log.error(
"ECS task %s has failed a maximum of %s times. Marking as failed. Reasons: %s",
task_key,
attempt_number,
reasons_str,
)
self.log_task_event(
event="ecs task submit failure",
ti_key=task_key,
extra=(
f"Task could not be queued after {attempt_number} attempts. "
f"Marking as failed. Reasons: {reasons_str}"
),
)
self.fail(task_key)
elif not run_task_response["tasks"]:
self.log.error("ECS RunTask Response: %s", run_task_response)
self.log_task_event(
event="ecs task submit failure",
extra=f"ECS RunTask Response: {run_task_response}",
ti_key=task_key,
)
raise EcsExecutorException(
"No failures and no ECS tasks provided in response. This should never happen."
)
else:
task = run_task_response["tasks"][0]
self.active_workers.add_task(task, task_key, queue, cmd, exec_config, attempt_number)
self.running_state(task_key, task.task_arn)
def _run_task(
self, task_id: TaskInstanceKey, cmd: CommandType, queue: str, exec_config: ExecutorConfigType
):
"""
Run a queued-up Airflow task.
Not to be confused with execute_async() which inserts tasks into the queue.
The command and executor config will be placed in the container-override
section of the JSON request before calling Boto3's "run_task" function.
"""
run_task_kwargs = self._run_task_kwargs(task_id, cmd, queue, exec_config)
boto_run_task = self.ecs.run_task(**run_task_kwargs)
run_task_response = BotoRunTaskSchema().load(boto_run_task)
return run_task_response
def _run_task_kwargs(
self, task_id: TaskInstanceKey, cmd: CommandType, queue: str, exec_config: ExecutorConfigType
) -> dict:
"""
Update the Airflow command by modifying container overrides for task-specific kwargs.
One last chance to modify Boto3's "run_task" kwarg params before it gets passed into the Boto3 client.
"""
run_task_kwargs = deepcopy(self.run_task_kwargs)
run_task_kwargs = merge_dicts(run_task_kwargs, exec_config)
container_override = self.get_container(run_task_kwargs["overrides"]["containerOverrides"])
container_override["command"] = cmd
# Inject the env variable to configure logging for containerized execution environment
if "environment" not in container_override:
container_override["environment"] = []
container_override["environment"].append({"name": "AIRFLOW_IS_EXECUTOR_CONTAINER", "value": "true"})
return run_task_kwargs
def execute_async(self, key: TaskInstanceKey, command: CommandType, queue=None, executor_config=None):
"""Save the task to be executed in the next sync by inserting the commands into a queue."""
if executor_config and ("name" in executor_config or "command" in executor_config):
raise ValueError('Executor Config should never override "name" or "command"')
if len(command) == 1:
from airflow.executors.workloads import ExecuteTask
if isinstance(command[0], ExecuteTask):
workload = command[0]
ser_input = workload.model_dump_json()
command = [
"python",
"-m",
"airflow.sdk.execution_time.execute_workload",
"--json-string",
ser_input,
]
else:
raise ValueError(
f"EcsExecutor doesn't know how to handle workload of type: {type(command[0])}"
)
self.pending_tasks.append(
EcsQueuedTask(key, command, queue, executor_config or {}, 1, timezone.utcnow())
)
def end(self, heartbeat_interval=10):
"""Wait for all currently running tasks to end, and don't launch any tasks."""
try:
while True:
self.sync()
if not self.active_workers:
break
time.sleep(heartbeat_interval)
except Exception:
# We catch any and all exceptions because otherwise they would bubble
# up and kill the scheduler process.
self.log.exception("Failed to end %s", self.__class__.__name__)
def terminate(self):
"""Kill all ECS processes by calling Boto3's StopTask API."""
try:
for arn in self.active_workers.get_all_arns():
self.ecs.stop_task(
cluster=self.cluster, task=arn, reason="Airflow Executor received a SIGTERM"
)
self.end()
except Exception:
# We catch any and all exceptions because otherwise they would bubble
# up and kill the scheduler process.
self.log.exception("Failed to terminate %s", self.__class__.__name__)
def _load_run_kwargs(self) -> dict:
from airflow.providers.amazon.aws.executors.ecs.ecs_executor_config import build_task_kwargs
ecs_executor_run_task_kwargs = build_task_kwargs(self.conf)
try:
self.get_container(ecs_executor_run_task_kwargs["overrides"]["containerOverrides"])["command"]
except KeyError:
raise KeyError(
"Rendered JSON template does not contain key "
'"overrides[containerOverrides][containers][x][command]"'
)
return ecs_executor_run_task_kwargs
def get_container(self, container_list):
"""Search task list for core Airflow container."""
for container in container_list:
try:
if container["name"] == self.container_name:
return container
except KeyError:
raise EcsExecutorException(
'container "name" must be provided in "containerOverrides" configuration'
)
raise KeyError(f"No such container found by container name: {self.container_name}")
def try_adopt_task_instances(self, tis: Sequence[TaskInstance]) -> Sequence[TaskInstance]:
"""
Adopt task instances which have an external_executor_id (the ECS task ARN).
Anything that is not adopted will be cleared by the scheduler and becomes eligible for re-scheduling.
"""
with Stats.timer("ecs_executor.adopt_task_instances.duration"):
adopted_tis: list[TaskInstance] = []
if task_arns := [ti.external_executor_id for ti in tis if ti.external_executor_id]:
task_descriptions = self.__describe_tasks(task_arns).get("tasks", [])
for task in task_descriptions:
ti = next(ti for ti in tis if ti.external_executor_id == task.task_arn)
self.active_workers.add_task(
task,
ti.key,
ti.queue,
ti.command_as_list(),
ti.executor_config,
ti.try_number,
)
adopted_tis.append(ti)
if adopted_tis:
tasks = [f"{task} in state {task.state}" for task in adopted_tis]
task_instance_str = "\n\t".join(tasks)
self.log.info(
"Adopted the following %d tasks from a dead executor:\n\t%s",
len(adopted_tis),
task_instance_str,
)
not_adopted_tis = [ti for ti in tis if ti not in adopted_tis]
return not_adopted_tis
def log_task_event(self, *, event: str, extra: str, ti_key: TaskInstanceKey):
# TODO: remove this method when min_airflow_version is set to higher than 2.10.0
with suppress(AttributeError):
super().log_task_event(
event=event,
extra=extra,
ti_key=ti_key,
)
| AwsEcsExecutor |
python | numba__llvmlite | llvmlite/binding/orcjit.py | {
"start": 8036,
"end": 11856
} | class ____(ffi.ObjectRef):
"""
A OrcJIT-based LLVM JIT engine that can compile and run LLVM IR as a
collection of JITted dynamic libraries
The C++ OrcJIT API has a lot of memory ownership patterns that do not work
with Python. This API attempts to provide ones that are safe at the expense
of some features. Each LLJIT instance is a collection of JIT-compiled
libraries. In the C++ API, there is a "main" library; this API does not
provide access to the main library. Use the JITLibraryBuilder to create a
new named library instead.
"""
def __init__(self, ptr):
self._td = None
ffi.ObjectRef.__init__(self, ptr)
def lookup(self, dylib, fn):
"""
Find a function in this dynamic library and construct a new tracking
object for it
If the library or function do not exist, an exception will occur.
Parameters
----------
dylib : str or None
the name of the library containing the symbol
fn : str
the name of the function to get
"""
assert not self.closed, "Cannot lookup in closed JIT"
address = ctypes.c_uint64()
with ffi.OutputString() as outerr:
tracker = ffi.lib.LLVMPY_LLJITLookup(self,
dylib.encode("utf-8"),
fn.encode("utf-8"),
ctypes.byref(address),
outerr)
if not tracker:
raise RuntimeError(str(outerr))
return ResourceTracker(tracker, dylib, {fn: address.value})
@property
def target_data(self):
"""
The TargetData for this LLJIT instance.
"""
if self._td is not None:
return self._td
ptr = ffi.lib.LLVMPY_LLJITGetDataLayout(self)
self._td = targets.TargetData(ptr)
self._td._owned = True
return self._td
def _dispose(self):
if self._td is not None:
self._td.detach()
self._capi.LLVMPY_LLJITDispose(self)
def create_lljit_compiler(target_machine=None, *,
use_jit_link=False,
suppress_errors=False):
"""
Create an LLJIT instance
"""
with ffi.OutputString() as outerr:
lljit = ffi.lib.LLVMPY_CreateLLJITCompiler(target_machine,
suppress_errors,
use_jit_link,
outerr)
if not lljit:
raise RuntimeError(str(outerr))
return LLJIT(lljit)
ffi.lib.LLVMPY_LLJITLookup.argtypes = [
ffi.LLVMOrcLLJITRef,
c_char_p,
c_char_p,
POINTER(c_uint64),
POINTER(c_char_p),
]
ffi.lib.LLVMPY_LLJITLookup.restype = ffi.LLVMOrcDylibTrackerRef
ffi.lib.LLVMPY_LLJITGetDataLayout.argtypes = [
ffi.LLVMOrcLLJITRef,
]
ffi.lib.LLVMPY_LLJITGetDataLayout.restype = ffi.LLVMTargetDataRef
ffi.lib.LLVMPY_CreateLLJITCompiler.argtypes = [
ffi.LLVMTargetMachineRef,
c_bool,
c_bool,
POINTER(c_char_p),
]
ffi.lib.LLVMPY_CreateLLJITCompiler.restype = ffi.LLVMOrcLLJITRef
ffi.lib.LLVMPY_LLJITDispose.argtypes = [
ffi.LLVMOrcLLJITRef,
]
ffi.lib.LLVMPY_LLJIT_Link.argtypes = [
ffi.LLVMOrcLLJITRef,
c_char_p,
POINTER(_LinkElement),
c_size_t,
POINTER(_SymbolAddress),
c_size_t,
POINTER(_SymbolAddress),
c_size_t,
POINTER(c_char_p)
]
ffi.lib.LLVMPY_LLJIT_Link.restype = ffi.LLVMOrcDylibTrackerRef
ffi.lib.LLVMPY_LLJIT_Dylib_Tracker_Dispose.argtypes = [
ffi.LLVMOrcDylibTrackerRef,
POINTER(c_char_p)
]
ffi.lib.LLVMPY_LLJIT_Dylib_Tracker_Dispose.restype = c_bool
| LLJIT |
python | pandas-dev__pandas | pandas/tests/io/sas/test_sas7bdat.py | {
"start": 1086,
"end": 14818
} | class ____:
@pytest.mark.slow
def test_from_file(self, dirpath, data_test_ix):
expected, test_ix = data_test_ix
for k in test_ix:
fname = os.path.join(dirpath, f"test{k}.sas7bdat")
df = pd.read_sas(fname, encoding="utf-8")
tm.assert_frame_equal(df, expected)
@pytest.mark.slow
def test_from_buffer(self, dirpath, data_test_ix):
expected, test_ix = data_test_ix
for k in test_ix:
fname = os.path.join(dirpath, f"test{k}.sas7bdat")
with open(fname, "rb") as f:
byts = f.read()
buf = io.BytesIO(byts)
with pd.read_sas(
buf, format="sas7bdat", iterator=True, encoding="utf-8"
) as rdr:
df = rdr.read()
tm.assert_frame_equal(df, expected)
@pytest.mark.slow
def test_from_iterator(self, dirpath, data_test_ix):
expected, test_ix = data_test_ix
for k in test_ix:
fname = os.path.join(dirpath, f"test{k}.sas7bdat")
with pd.read_sas(fname, iterator=True, encoding="utf-8") as rdr:
df = rdr.read(2)
tm.assert_frame_equal(df, expected.iloc[0:2, :])
df = rdr.read(3)
tm.assert_frame_equal(df, expected.iloc[2:5, :])
@pytest.mark.slow
def test_path_pathlib(self, dirpath, data_test_ix):
expected, test_ix = data_test_ix
for k in test_ix:
fname = Path(os.path.join(dirpath, f"test{k}.sas7bdat"))
df = pd.read_sas(fname, encoding="utf-8")
tm.assert_frame_equal(df, expected)
@pytest.mark.slow
@pytest.mark.parametrize("chunksize", (3, 5, 10, 11))
@pytest.mark.parametrize("k", range(1, 17))
def test_iterator_loop(self, dirpath, k, chunksize):
# github #13654
fname = os.path.join(dirpath, f"test{k}.sas7bdat")
with pd.read_sas(fname, chunksize=chunksize, encoding="utf-8") as rdr:
y = 0
for x in rdr:
y += x.shape[0]
assert y == rdr.row_count
def test_iterator_read_too_much(self, dirpath):
# github #14734
fname = os.path.join(dirpath, "test1.sas7bdat")
with pd.read_sas(
fname, format="sas7bdat", iterator=True, encoding="utf-8"
) as rdr:
d1 = rdr.read(rdr.row_count + 20)
with pd.read_sas(fname, iterator=True, encoding="utf-8") as rdr:
d2 = rdr.read(rdr.row_count + 20)
tm.assert_frame_equal(d1, d2)
def test_encoding_options(datapath):
fname = datapath("io", "sas", "data", "test1.sas7bdat")
df1 = pd.read_sas(fname)
df2 = pd.read_sas(fname, encoding="utf-8")
for col in df1.columns:
try:
df1[col] = df1[col].str.decode("utf-8")
except AttributeError:
pass
tm.assert_frame_equal(df1, df2)
with contextlib.closing(SAS7BDATReader(fname, convert_header_text=False)) as rdr:
df3 = rdr.read()
for x, y in zip(df1.columns, df3.columns):
assert x == y.decode()
def test_encoding_infer(datapath):
fname = datapath("io", "sas", "data", "test1.sas7bdat")
with pd.read_sas(fname, encoding="infer", iterator=True) as df1_reader:
# check: is encoding inferred correctly from file
assert df1_reader.inferred_encoding == "cp1252"
df1 = df1_reader.read()
with pd.read_sas(fname, encoding="cp1252", iterator=True) as df2_reader:
df2 = df2_reader.read()
# check: reader reads correct information
tm.assert_frame_equal(df1, df2)
def test_productsales(datapath):
fname = datapath("io", "sas", "data", "productsales.sas7bdat")
df = pd.read_sas(fname, encoding="utf-8")
fname = datapath("io", "sas", "data", "productsales.csv")
df0 = pd.read_csv(fname, parse_dates=["MONTH"])
vn = ["ACTUAL", "PREDICT", "QUARTER", "YEAR"]
df0[vn] = df0[vn].astype(np.float64)
df0["MONTH"] = df0["MONTH"].astype("M8[s]")
tm.assert_frame_equal(df, df0)
def test_12659(datapath):
fname = datapath("io", "sas", "data", "test_12659.sas7bdat")
df = pd.read_sas(fname)
fname = datapath("io", "sas", "data", "test_12659.csv")
df0 = pd.read_csv(fname)
df0 = df0.astype(np.float64)
tm.assert_frame_equal(df, df0)
def test_airline(datapath):
fname = datapath("io", "sas", "data", "airline.sas7bdat")
df = pd.read_sas(fname)
fname = datapath("io", "sas", "data", "airline.csv")
df0 = pd.read_csv(fname)
df0 = df0.astype(np.float64)
tm.assert_frame_equal(df, df0)
@pytest.mark.skipif(WASM, reason="Pyodide/WASM has 32-bitness")
def test_date_time(datapath):
# Support of different SAS date/datetime formats (PR #15871)
fname = datapath("io", "sas", "data", "datetime.sas7bdat")
df = pd.read_sas(fname)
fname = datapath("io", "sas", "data", "datetime.csv")
df0 = pd.read_csv(
fname, parse_dates=["Date1", "Date2", "DateTime", "DateTimeHi", "Taiw"]
)
# GH 19732: Timestamps imported from sas will incur floating point errors
# See GH#56014 for discussion of the correct "expected" results
# We are really just testing that we are "close". This only seems to be
# an issue near the implementation bounds.
df[df.columns[3]] = df.iloc[:, 3].dt.round("us")
df0["Date1"] = df0["Date1"].astype("M8[s]")
df0["Date2"] = df0["Date2"].astype("M8[s]")
df0["DateTime"] = df0["DateTime"].astype("M8[ms]")
df0["Taiw"] = df0["Taiw"].astype("M8[s]")
res = df0["DateTimeHi"].astype("M8[us]").dt.round("ms")
df0["DateTimeHi"] = res.astype("M8[ms]")
if not IS64:
# No good reason for this, just what we get on the CI
df0.loc[0, "DateTimeHi"] += np.timedelta64(1, "ms")
df0.loc[[2, 3], "DateTimeHi"] -= np.timedelta64(1, "ms")
tm.assert_frame_equal(df, df0)
@pytest.mark.parametrize("column", ["WGT", "CYL"])
def test_compact_numerical_values(datapath, column):
# Regression test for #21616
fname = datapath("io", "sas", "data", "cars.sas7bdat")
df = pd.read_sas(fname, encoding="latin-1")
# The two columns CYL and WGT in cars.sas7bdat have column
# width < 8 and only contain integral values.
# Test that pandas doesn't corrupt the numbers by adding
# decimals.
result = df[column]
expected = df[column].round()
tm.assert_series_equal(result, expected, check_exact=True)
def test_many_columns(datapath):
# Test for looking for column information in more places (PR #22628)
fname = datapath("io", "sas", "data", "many_columns.sas7bdat")
df = pd.read_sas(fname, encoding="latin-1")
fname = datapath("io", "sas", "data", "many_columns.csv")
df0 = pd.read_csv(fname, encoding="latin-1")
tm.assert_frame_equal(df, df0)
def test_inconsistent_number_of_rows(datapath):
# Regression test for issue #16615. (PR #22628)
fname = datapath("io", "sas", "data", "load_log.sas7bdat")
df = pd.read_sas(fname, encoding="latin-1")
assert len(df) == 2097
def test_zero_variables(datapath):
# Check if the SAS file has zero variables (PR #18184)
fname = datapath("io", "sas", "data", "zero_variables.sas7bdat")
with pytest.raises(EmptyDataError, match="No columns to parse from file"):
pd.read_sas(fname)
@pytest.mark.parametrize("encoding", [None, "utf8"])
def test_zero_rows(datapath, encoding):
# GH 18198
fname = datapath("io", "sas", "data", "zero_rows.sas7bdat")
result = pd.read_sas(fname, encoding=encoding)
str_value = b"a" if encoding is None else "a"
expected = pd.DataFrame([{"char_field": str_value, "num_field": 1.0}]).iloc[:0]
tm.assert_frame_equal(result, expected)
def test_corrupt_read(datapath):
# We don't really care about the exact failure, the important thing is
# that the resource should be cleaned up afterwards (BUG #35566)
fname = datapath("io", "sas", "data", "corrupt.sas7bdat")
msg = "'SAS7BDATReader' object has no attribute 'row_count'"
with pytest.raises(AttributeError, match=msg):
pd.read_sas(fname)
@pytest.mark.xfail(WASM, reason="failing with currently set tolerances on WASM")
def test_max_sas_date(datapath):
# GH 20927
# NB. max datetime in SAS dataset is 31DEC9999:23:59:59.999
# but this is read as 29DEC9999:23:59:59.998993 by a buggy
# sas7bdat module
# See also GH#56014 for discussion of the correct "expected" results.
fname = datapath("io", "sas", "data", "max_sas_date.sas7bdat")
df = pd.read_sas(fname, encoding="iso-8859-1")
expected = pd.DataFrame(
{
"text": ["max", "normal"],
"dt_as_float": [253717747199.999, 1880323199.999],
"dt_as_dt": np.array(
[
datetime(9999, 12, 29, 23, 59, 59, 999000),
datetime(2019, 8, 1, 23, 59, 59, 999000),
],
dtype="M8[ms]",
),
"date_as_float": [2936547.0, 21762.0],
"date_as_date": np.array(
[
datetime(9999, 12, 29),
datetime(2019, 8, 1),
],
dtype="M8[s]",
),
},
columns=["text", "dt_as_float", "dt_as_dt", "date_as_float", "date_as_date"],
)
if not IS64:
# No good reason for this, just what we get on the CI
expected.loc[:, "dt_as_dt"] -= np.timedelta64(1, "ms")
tm.assert_frame_equal(df, expected)
@pytest.mark.xfail(WASM, reason="failing with currently set tolerances on WASM")
def test_max_sas_date_iterator(datapath):
# GH 20927
# when called as an iterator, only those chunks with a date > pd.Timestamp.max
# are returned as datetime.datetime, if this happens that whole chunk is returned
# as datetime.datetime
col_order = ["text", "dt_as_float", "dt_as_dt", "date_as_float", "date_as_date"]
fname = datapath("io", "sas", "data", "max_sas_date.sas7bdat")
results = []
for df in pd.read_sas(fname, encoding="iso-8859-1", chunksize=1):
# GH 19732: Timestamps imported from sas will incur floating point errors
df.reset_index(inplace=True, drop=True)
results.append(df)
expected = [
pd.DataFrame(
{
"text": ["max"],
"dt_as_float": [253717747199.999],
"dt_as_dt": np.array(
[datetime(9999, 12, 29, 23, 59, 59, 999000)], dtype="M8[ms]"
),
"date_as_float": [2936547.0],
"date_as_date": np.array([datetime(9999, 12, 29)], dtype="M8[s]"),
},
columns=col_order,
),
pd.DataFrame(
{
"text": ["normal"],
"dt_as_float": [1880323199.999],
"dt_as_dt": np.array(["2019-08-01 23:59:59.999"], dtype="M8[ms]"),
"date_as_float": [21762.0],
"date_as_date": np.array(["2019-08-01"], dtype="M8[s]"),
},
columns=col_order,
),
]
if not IS64:
# No good reason for this, just what we get on the CI
expected[0].loc[0, "dt_as_dt"] -= np.timedelta64(1, "ms")
expected[1].loc[0, "dt_as_dt"] -= np.timedelta64(1, "ms")
tm.assert_frame_equal(results[0], expected[0])
tm.assert_frame_equal(results[1], expected[1])
@pytest.mark.skipif(WASM, reason="Pyodide/WASM has 32-bitness")
def test_null_date(datapath):
fname = datapath("io", "sas", "data", "dates_null.sas7bdat")
df = pd.read_sas(fname, encoding="utf-8")
expected = pd.DataFrame(
{
"datecol": np.array(
[
datetime(9999, 12, 29),
np.datetime64("NaT"),
],
dtype="M8[s]",
),
"datetimecol": np.array(
[
datetime(9999, 12, 29, 23, 59, 59, 999000),
np.datetime64("NaT"),
],
dtype="M8[ms]",
),
},
)
if not IS64:
# No good reason for this, just what we get on the CI
expected.loc[0, "datetimecol"] -= np.timedelta64(1, "ms")
tm.assert_frame_equal(df, expected)
def test_meta2_page(datapath):
# GH 35545
fname = datapath("io", "sas", "data", "test_meta2_page.sas7bdat")
df = pd.read_sas(fname)
assert len(df) == 1000
@pytest.mark.parametrize(
"test_file, override_offset, override_value, expected_msg",
[
("test2.sas7bdat", 0x10000 + 55229, 0x80 | 0x0F, "Out of bounds"),
("test2.sas7bdat", 0x10000 + 55229, 0x10, "unknown control byte"),
("test3.sas7bdat", 118170, 184, "Out of bounds"),
],
)
def test_rle_rdc_exceptions(
datapath, test_file, override_offset, override_value, expected_msg
):
"""Errors in RLE/RDC decompression should propagate."""
with open(datapath("io", "sas", "data", test_file), "rb") as fd:
data = bytearray(fd.read())
data[override_offset] = override_value
with pytest.raises(Exception, match=expected_msg):
pd.read_sas(io.BytesIO(data), format="sas7bdat")
def test_0x40_control_byte(datapath):
# GH 31243
fname = datapath("io", "sas", "data", "0x40controlbyte.sas7bdat")
df = pd.read_sas(fname, encoding="ascii")
fname = datapath("io", "sas", "data", "0x40controlbyte.csv")
df0 = pd.read_csv(fname, dtype="str")
tm.assert_frame_equal(df, df0)
def test_0x00_control_byte(datapath):
# GH 47099
fname = datapath("io", "sas", "data", "0x00controlbyte.sas7bdat.bz2")
df = next(pd.read_sas(fname, chunksize=11_000))
assert df.shape == (11_000, 20)
| TestSAS7BDAT |
python | PyCQA__pylint | tests/functional/u/useless/useless_parent_delegation_py38.py | {
"start": 170,
"end": 293
} | class ____(Egg):
def __init__(self, first: float, /, second: float) -> None:
super().__init__(first, second)
| Spam |
python | ray-project__ray | release/ray_release/exception.py | {
"start": 3787,
"end": 3862
} | class ____(CommandError):
exit_code = ExitCode.COMMAND_ALERT
| ResultsAlert |
python | gevent__gevent | src/gevent/libuv/watcher.py | {
"start": 1959,
"end": 3318
} | class ____(object):
# Makes sure that everything stored as a function
# on the wrapper instances (classes, actually,
# because this is used by the metaclass)
# checks its return value and raises an error.
# This expects that everything we call has an int
# or void return value and follows the conventions
# of error handling (that negative values are errors)
def __init__(self, uv):
self._libuv = uv
def __getattr__(self, name):
libuv_func = getattr(self._libuv, name)
@functools.wraps(libuv_func)
def wrap(*args, **kwargs):
if args and isinstance(args[0], watcher):
args = args[1:]
res = libuv_func(*args, **kwargs)
if res is not None and res < 0:
kind = UVFuncallError
if res == libuv.UV_EBADF:
kind = lambda msg: OSError(abs(res), msg)
raise kind(
str(ffi.string(libuv.uv_err_name(res)).decode('ascii')
+ ' '
+ ffi.string(libuv.uv_strerror(res)).decode('ascii'))
+ " Args: " + repr(args) + " KWARGS: " + repr(kwargs)
+ " UVError: " + str(res)
)
return res
setattr(self, name, wrap)
return wrap
| libuv_error_wrapper |
python | charliermarsh__ruff | crates/ruff_linter/resources/test/fixtures/pylint/missing_maxsplit_arg.py | {
"start": 162,
"end": 4730
} | class ____():
split = "1,2,3"
# Errors
## Test split called directly on string literal
"1,2,3".split(",")[0] # [missing-maxsplit-arg]
"1,2,3".split(",")[-1] # [missing-maxsplit-arg]
"1,2,3".rsplit(",")[0] # [missing-maxsplit-arg]
"1,2,3".rsplit(",")[-1] # [missing-maxsplit-arg]
## Test split called on string variable
SEQ.split(",")[0] # [missing-maxsplit-arg]
SEQ.split(",")[-1] # [missing-maxsplit-arg]
SEQ.rsplit(",")[0] # [missing-maxsplit-arg]
SEQ.rsplit(",")[-1] # [missing-maxsplit-arg]
## Test split called on class attribute
Foo.class_str.split(",")[0] # [missing-maxsplit-arg]
Foo.class_str.split(",")[-1] # [missing-maxsplit-arg]
Foo.class_str.rsplit(",")[0] # [missing-maxsplit-arg]
Foo.class_str.rsplit(",")[-1] # [missing-maxsplit-arg]
## Test split called on sliced string
"1,2,3"[::-1].split(",")[0] # [missing-maxsplit-arg]
"1,2,3"[::-1][::-1].split(",")[0] # [missing-maxsplit-arg]
SEQ[:3].split(",")[0] # [missing-maxsplit-arg]
Foo.class_str[1:3].split(",")[-1] # [missing-maxsplit-arg]
"1,2,3"[::-1].rsplit(",")[0] # [missing-maxsplit-arg]
SEQ[:3].rsplit(",")[0] # [missing-maxsplit-arg]
Foo.class_str[1:3].rsplit(",")[-1] # [missing-maxsplit-arg]
## Test sep given as named argument
"1,2,3".split(sep=",")[0] # [missing-maxsplit-arg]
"1,2,3".split(sep=",")[-1] # [missing-maxsplit-arg]
"1,2,3".rsplit(sep=",")[0] # [missing-maxsplit-arg]
"1,2,3".rsplit(sep=",")[-1] # [missing-maxsplit-arg]
## Special cases
"1,2,3".split("\n")[0] # [missing-maxsplit-arg]
"1,2,3".split("split")[-1] # [missing-maxsplit-arg]
"1,2,3".rsplit("rsplit")[0] # [missing-maxsplit-arg]
## Test class attribute named split
Bar.split.split(",")[0] # [missing-maxsplit-arg]
Bar.split.split(",")[-1] # [missing-maxsplit-arg]
Bar.split.rsplit(",")[0] # [missing-maxsplit-arg]
Bar.split.rsplit(",")[-1] # [missing-maxsplit-arg]
## Test unpacked dict literal kwargs
"1,2,3".split(**{"sep": ","})[0] # [missing-maxsplit-arg]
# OK
## Test not accessing the first or last element
### Test split called directly on string literal
"1,2,3".split(",")[1]
"1,2,3".split(",")[-2]
"1,2,3".rsplit(",")[1]
"1,2,3".rsplit(",")[-2]
### Test split called on string variable
SEQ.split(",")[1]
SEQ.split(",")[-2]
SEQ.rsplit(",")[1]
SEQ.rsplit(",")[-2]
### Test split called on class attribute
Foo.class_str.split(",")[1]
Foo.class_str.split(",")[-2]
Foo.class_str.rsplit(",")[1]
Foo.class_str.rsplit(",")[-2]
### Test split called on sliced string
"1,2,3"[::-1].split(",")[1]
SEQ[:3].split(",")[1]
Foo.class_str[1:3].split(",")[-2]
"1,2,3"[::-1].rsplit(",")[1]
SEQ[:3].rsplit(",")[1]
Foo.class_str[1:3].rsplit(",")[-2]
### Test sep given as named argument
"1,2,3".split(sep=",")[1]
"1,2,3".split(sep=",")[-2]
"1,2,3".rsplit(sep=",")[1]
"1,2,3".rsplit(sep=",")[-2]
## Test varying maxsplit argument
### str.split() tests
"1,2,3".split(sep=",", maxsplit=1)[-1]
"1,2,3".split(sep=",", maxsplit=1)[0]
"1,2,3".split(sep=",", maxsplit=2)[-1]
"1,2,3".split(sep=",", maxsplit=2)[0]
"1,2,3".split(sep=",", maxsplit=2)[1]
### str.rsplit() tests
"1,2,3".rsplit(sep=",", maxsplit=1)[-1]
"1,2,3".rsplit(sep=",", maxsplit=1)[0]
"1,2,3".rsplit(sep=",", maxsplit=2)[-1]
"1,2,3".rsplit(sep=",", maxsplit=2)[0]
"1,2,3".rsplit(sep=",", maxsplit=2)[1]
## Test user-defined split
Foo("1,2,3").split(",")[0]
Foo("1,2,3").split(",")[-1]
Foo("1,2,3").rsplit(",")[0]
Foo("1,2,3").rsplit(",")[-1]
## Test split called on sliced list
["1", "2", "3"][::-1].split(",")[0]
## Test class attribute named split
Bar.split[0]
Bar.split[-1]
Bar.split[0]
Bar.split[-1]
## Test unpacked dict literal kwargs
"1,2,3".split(",", **{"maxsplit": 1})[0]
"1,2,3".split(**{"sep": ",", "maxsplit": 1})[0]
# TODO
## Test variable split result index
## TODO: These require the ability to resolve a variable name to a value
# Errors
result_index = 0
"1,2,3".split(",")[result_index] # TODO: [missing-maxsplit-arg]
result_index = -1
"1,2,3".split(",")[result_index] # TODO: [missing-maxsplit-arg]
# OK
result_index = 1
"1,2,3".split(",")[result_index]
result_index = -2
"1,2,3".split(",")[result_index]
## Test split result index modified in loop
## TODO: These require the ability to recognize being in a loop where:
## - the result of split called on a string is indexed by a variable
## - the variable index above is modified
# OK
result_index = 0
for j in range(3):
print(SEQ.split(",")[result_index])
result_index = result_index + 1
## Test accessor
## TODO: These require the ability to get the return type of a method
## (possibly via `typing::is_string`)
| Bar |
python | fastai__fastai | fastai/learner.py | {
"start": 24099,
"end": 24717
} | class ____(Metric):
"Use to include a pre-calculated metric value (for instance calculated in a `Callback`) and returned by `func`"
def __init__(self, func, metric_name=None): store_attr('func, metric_name')
@property
def value(self): return self.func()
@property
def name(self): return self.metric_name if self.metric_name else self.func.__name__
# %% ../nbs/13a_learner.ipynb 133
from fastprogress.fastprogress import format_time
# %% ../nbs/13a_learner.ipynb 134
def _maybe_item(t):
t = t.value
try: return t.item()
except: return t
# %% ../nbs/13a_learner.ipynb 135
| ValueMetric |
python | joblib__joblib | joblib/test/test_memory.py | {
"start": 48714,
"end": 50660
} | class ____:
"Tests for the MemorizedFunc and NotMemorizedFunc classes"
@staticmethod
def f(x, counter):
counter[x] = counter.get(x, 0) + 1
return counter[x]
def test_call_method_memorized(self, memory):
"Test calling the function"
f = memory.cache(self.f, ignore=["counter"])
counter = {}
assert f(2, counter) == 1
assert f(2, counter) == 1
x, meta = f.call(2, counter)
assert x == 2, "f has not been called properly"
assert isinstance(meta, dict), (
"Metadata are not returned by MemorizedFunc.call."
)
def test_call_method_not_memorized(self, memory):
"Test calling the function"
f = NotMemorizedFunc(self.f)
counter = {}
assert f(2, counter) == 1
assert f(2, counter) == 2
x, meta = f.call(2, counter)
assert x == 3, "f has not been called properly"
assert isinstance(meta, dict), (
"Metadata are not returned by MemorizedFunc.call."
)
@with_numpy
@parametrize(
"location",
[
"test_cache_dir",
pathlib.Path("test_cache_dir"),
pathlib.Path("test_cache_dir").resolve(),
],
)
def test_memory_creates_gitignore(location):
"""Test that using the memory object automatically creates a `.gitignore` file
within the new cache directory."""
mem = Memory(location)
arr = np.asarray([[1, 2, 3], [4, 5, 6]])
costly_operation = mem.cache(np.square)
costly_operation(arr)
location = pathlib.Path(location)
try:
path_to_gitignore_file = os.path.join(location, ".gitignore")
gitignore_file_content = "# Created by joblib automatically.\n*\n"
with open(path_to_gitignore_file) as f:
assert gitignore_file_content == f.read()
finally: # remove cache folder after test
shutil.rmtree(location, ignore_errors=True)
| TestMemorizedFunc |
python | django__django | django/urls/converters.py | {
"start": 627,
"end": 1358
} | class ____(StringConverter):
regex = ".+"
DEFAULT_CONVERTERS = {
"int": IntConverter(),
"path": PathConverter(),
"slug": SlugConverter(),
"str": StringConverter(),
"uuid": UUIDConverter(),
}
REGISTERED_CONVERTERS = {}
def register_converter(converter, type_name):
if type_name in REGISTERED_CONVERTERS or type_name in DEFAULT_CONVERTERS:
raise ValueError(f"Converter {type_name!r} is already registered.")
REGISTERED_CONVERTERS[type_name] = converter()
get_converters.cache_clear()
from django.urls.resolvers import _route_to_regex
_route_to_regex.cache_clear()
@functools.cache
def get_converters():
return {**DEFAULT_CONVERTERS, **REGISTERED_CONVERTERS}
| PathConverter |
python | numba__numba | numba/tests/test_dictobject.py | {
"start": 49866,
"end": 51778
} | class ____(TestCase):
def test_check_untyped_dict_ops(self):
# Check operation on untyped dictionary
d = Dict()
self.assertFalse(d._typed)
self.assertEqual(len(d), 0)
self.assertEqual(str(d), str({}))
self.assertEqual(list(iter(d)), [])
# Test __getitem__
with self.assertRaises(KeyError) as raises:
d[1]
self.assertEqual(str(raises.exception), str(KeyError(1)))
# Test __delitem__
with self.assertRaises(KeyError) as raises:
del d[1]
self.assertEqual(str(raises.exception), str(KeyError(1)))
# Test .pop
with self.assertRaises(KeyError):
d.pop(1)
self.assertEqual(str(raises.exception), str(KeyError(1)))
# Test .pop
self.assertIs(d.pop(1, None), None)
# Test .get
self.assertIs(d.get(1), None)
# Test .popitem
with self.assertRaises(KeyError) as raises:
d.popitem()
self.assertEqual(str(raises.exception),
str(KeyError('dictionary is empty')))
# Test setdefault(k)
with self.assertRaises(TypeError) as raises:
d.setdefault(1)
self.assertEqual(
str(raises.exception),
str(TypeError('invalid operation on untyped dictionary')),
)
# Test __contains__
self.assertFalse(1 in d)
# It's untyped
self.assertFalse(d._typed)
def test_getitem(self):
# Test __getitem__
d = Dict()
d[1] = 2
# It's typed now
self.assertTrue(d._typed)
self.assertEqual(d[1], 2)
def test_setdefault(self):
# Test setdefault(k, d)
d = Dict()
d.setdefault(1, 2)
# It's typed now
self.assertTrue(d._typed)
self.assertEqual(d[1], 2)
@jitclass(spec=[('a', types.intp)])
| TestNonCompiledInfer |
python | pypa__pip | src/pip/_vendor/rich/_null_file.py | {
"start": 98,
"end": 1394
} | class ____(IO[str]):
def close(self) -> None:
pass
def isatty(self) -> bool:
return False
def read(self, __n: int = 1) -> str:
return ""
def readable(self) -> bool:
return False
def readline(self, __limit: int = 1) -> str:
return ""
def readlines(self, __hint: int = 1) -> List[str]:
return []
def seek(self, __offset: int, __whence: int = 1) -> int:
return 0
def seekable(self) -> bool:
return False
def tell(self) -> int:
return 0
def truncate(self, __size: Optional[int] = 1) -> int:
return 0
def writable(self) -> bool:
return False
def writelines(self, __lines: Iterable[str]) -> None:
pass
def __next__(self) -> str:
return ""
def __iter__(self) -> Iterator[str]:
return iter([""])
def __enter__(self) -> IO[str]:
return self
def __exit__(
self,
__t: Optional[Type[BaseException]],
__value: Optional[BaseException],
__traceback: Optional[TracebackType],
) -> None:
pass
def write(self, text: str) -> int:
return 0
def flush(self) -> None:
pass
def fileno(self) -> int:
return -1
NULL_FILE = NullFile()
| NullFile |
python | langchain-ai__langchain | libs/partners/anthropic/langchain_anthropic/output_parsers.py | {
"start": 397,
"end": 3446
} | class ____(BaseGenerationOutputParser):
"""Output parser for tool calls."""
first_tool_only: bool = False
"""Whether to return only the first tool call."""
args_only: bool = False
"""Whether to return only the arguments of the tool calls."""
pydantic_schemas: list[type[BaseModel]] | None = None
"""Pydantic schemas to parse tool calls into."""
model_config = ConfigDict(
extra="forbid",
)
def parse_result(self, result: list[Generation], *, partial: bool = False) -> Any:
"""Parse a list of candidate model Generations into a specific format.
Args:
result: A list of `Generation` to be parsed. The Generations are assumed
to be different candidate outputs for a single model input.
partial: (Not used) Whether the result is a partial result. If `True`, the
parser may return a partial result, which may not be complete or valid.
Returns:
Structured output.
"""
if not result or not isinstance(result[0], ChatGeneration):
return None if self.first_tool_only else []
message = cast("AIMessage", result[0].message)
tool_calls: list = [
dict(tc) for tc in _extract_tool_calls_from_message(message)
]
if isinstance(message.content, list):
# Map tool call id to index
id_to_index = {
block["id"]: i
for i, block in enumerate(message.content)
if isinstance(block, dict) and block["type"] == "tool_use"
}
tool_calls = [{**tc, "index": id_to_index[tc["id"]]} for tc in tool_calls]
if self.pydantic_schemas:
tool_calls = [self._pydantic_parse(tc) for tc in tool_calls]
elif self.args_only:
tool_calls = [tc["args"] for tc in tool_calls]
else:
pass
if self.first_tool_only:
return tool_calls[0] if tool_calls else None
return list(tool_calls)
def _pydantic_parse(self, tool_call: dict) -> BaseModel:
cls_ = {schema.__name__: schema for schema in self.pydantic_schemas or []}[
tool_call["name"]
]
return cls_(**tool_call["args"])
def _extract_tool_calls_from_message(message: AIMessage) -> list[ToolCall]:
"""Extract tool calls from a list of content blocks."""
if message.tool_calls:
return message.tool_calls
return extract_tool_calls(message.content)
def extract_tool_calls(content: str | list[str | dict]) -> list[ToolCall]:
"""Extract tool calls from a list of content blocks."""
if isinstance(content, list):
tool_calls = []
for block in content:
if isinstance(block, str):
continue
if block["type"] != "tool_use":
continue
tool_calls.append(
tool_call(name=block["name"], args=block["input"], id=block["id"]),
)
return tool_calls
return []
| ToolsOutputParser |
python | gevent__gevent | src/gevent/tests/test__greenlet.py | {
"start": 14479,
"end": 14637
} | class ____(AbstractGenericWaitTestCase):
g = gevent.Greenlet()
def wait(self, timeout):
gevent.joinall([self.g], timeout=timeout)
| TestJoinAll0 |
python | ray-project__ray | python/ray/serve/tests/test_target_capacity.py | {
"start": 14858,
"end": 30955
} | class ____:
def check_num_replicas(
self,
expected_num_replicas: int,
app_name: str,
deployment_name: str,
replica_state: ReplicaState = ReplicaState.RUNNING,
controller_handle=None,
) -> bool:
"""Checks that the number of replicas are as expected.
Args:
expected_num_replicas: the expected number of replicas.
app_name: the deployment's application name.
deployment_name: the deployment's name.
replica_state: only replicas in this state are counted.
controller_handle: this is an optional argument. If provided, the
controller handle is used to get the current autoscaling
metrics and print them if the assertion fails.
"""
deployment = serve.status().applications[app_name].deployments[deployment_name]
num_running_replicas = deployment.replica_states.get(replica_state, 0)
if controller_handle is None:
assert num_running_replicas == expected_num_replicas, f"{deployment}"
else:
deployment_id = DeploymentID(name=deployment_name, app_name=app_name)
autoscaling_metrics = ray.get(
controller_handle._get_metrics_for_deployment_for_testing.remote(
deployment_id
)
)
assert num_running_replicas == expected_num_replicas, (
f"Status: {deployment}" f"\nAutoscaling metrics: {autoscaling_metrics}"
)
return True
def apply_config_and_check_status(
self,
client: ServeControllerClient,
target_capacity: Optional[float],
config: ServeDeploySchema,
app_name: str,
deployment_name: str,
expected_app_status: Optional[ApplicationStatus] = None,
expected_deployment_status: Optional[DeploymentStatus] = None,
expected_deployment_status_trigger: Optional[DeploymentStatusTrigger] = None,
timeout=10,
):
"""Applies config with specified target_capacity."""
config = deepcopy(config)
config.target_capacity = target_capacity
client.deploy_apps(config)
def check():
status = serve.status()
assert status.target_capacity == target_capacity
if expected_app_status is not None:
assert status.applications[app_name].status == expected_app_status
dep_status = status.applications[app_name].deployments[deployment_name]
if expected_deployment_status is not None:
assert dep_status.status == expected_deployment_status
if expected_deployment_status_trigger is not None:
assert dep_status.status_trigger == expected_deployment_status_trigger
return True
wait_for_condition(check, timeout=timeout)
def unblock_replica_creation_and_deletion(self, lifecycle_signal, app_name: str):
"""Unblocks creating and deleting ControlledLifecycleDeployment replicas.
These replicas can't initialize or be deleted until the
"lifecycle_signal" actor runs send, so this method runs send, waits
until the replicas start or stop running, and then resets the signal.
"""
def check_running():
app_status_data = serve.status().applications[app_name]
app_status = app_status_data.status
assert app_status == ApplicationStatus.RUNNING, f"{app_status_data}"
return True
ray.get(lifecycle_signal.send.remote())
wait_for_condition(check_running, timeout=20, retry_interval_ms=500)
ray.get(lifecycle_signal.send.remote(clear=True))
def test_static_num_replicas_target_capacity_update(
self, shutdown_ray_and_serve, client: ServeControllerClient
):
"""Check how Serve's status updates when target_capacity changes."""
app_name = "controlled_app"
deployment_name = "controlled"
num_replicas = 20
signal = SignalActor.options(
name="lifecycle_signal", namespace=SERVE_NAMESPACE
).remote()
config = ServeDeploySchema(
applications=[
ServeApplicationSchema(
name=app_name,
import_path=(
"ray.serve.tests.test_target_capacity:create_controlled_app"
),
args={"num_replicas": num_replicas},
)
]
)
# Initially deploy at target_capacity 0, and check status.
self.apply_config_and_check_status(
client,
target_capacity=0.0,
config=config,
app_name=app_name,
deployment_name=deployment_name,
expected_app_status=ApplicationStatus.RUNNING,
expected_deployment_status=DeploymentStatus.HEALTHY,
expected_deployment_status_trigger=(
DeploymentStatusTrigger.CONFIG_UPDATE_COMPLETED
),
)
self.check_num_replicas(0, app_name, deployment_name)
# Increase the target_capacity, and check again.
self.apply_config_and_check_status(
client,
target_capacity=50.0,
config=config,
app_name=app_name,
deployment_name=deployment_name,
expected_app_status=ApplicationStatus.DEPLOYING,
expected_deployment_status=DeploymentStatus.UPSCALING,
expected_deployment_status_trigger=(
DeploymentStatusTrigger.CONFIG_UPDATE_STARTED
),
)
self.check_num_replicas(0, app_name, deployment_name)
self.unblock_replica_creation_and_deletion(signal, app_name)
self.check_num_replicas(int(0.5 * num_replicas), app_name, deployment_name)
# Decrease the target_capacity, and check again.
self.apply_config_and_check_status(
client,
target_capacity=10.0,
config=config,
app_name=app_name,
deployment_name=deployment_name,
expected_app_status=ApplicationStatus.DEPLOYING,
expected_deployment_status=DeploymentStatus.DOWNSCALING,
expected_deployment_status_trigger=(
DeploymentStatusTrigger.CONFIG_UPDATE_STARTED
),
)
# DeploymentStateManager marks replicas as STOPPING once the deployment
# starts downscaling.
wait_for_condition(
self.check_num_replicas,
expected_num_replicas=int(0.1 * num_replicas),
app_name=app_name,
deployment_name=deployment_name,
replica_state=ReplicaState.RUNNING,
timeout=20,
)
self.check_num_replicas(
int(0.4 * num_replicas),
app_name,
deployment_name,
replica_state=ReplicaState.STOPPING,
)
self.unblock_replica_creation_and_deletion(signal, app_name)
self.check_num_replicas(int(0.1 * num_replicas), app_name, deployment_name)
self.check_num_replicas(
0, app_name, deployment_name, replica_state=ReplicaState.STOPPING
)
# Send a signal so all replicas shut down quickly when the test finishes.
ray.get(signal.send.remote())
@pytest.mark.skipif(sys.platform == "win32", reason="Autoscaling flaky on Windows.")
def test_autoscaling_target_capacity_update(
self, shutdown_ray_and_serve, client: ServeControllerClient
):
"""Check Serve's status when target_capacity changes while autoscaling."""
# TODO(landscapepainter): This test fails locally due to the stall for replica initialization
# during upscaling and delayed response from serve.status(). It does not fail from
# buildkite, but need to investigate why it fails locally.
app_name = "controlled_app"
deployment_name = "controlled"
min_replicas = 10
initial_replicas = 30
max_replicas = 70
lifecycle_signal = SignalActor.options(
name="lifecycle_signal", namespace=SERVE_NAMESPACE
).remote()
request_signal = SignalActor.options(
name="request_signal", namespace=SERVE_NAMESPACE
).remote()
config = ServeDeploySchema(
applications=[
ServeApplicationSchema(
name=app_name,
import_path=(
"ray.serve.tests.test_target_capacity:"
"create_autoscaling_controlled_app"
),
args=dict(
min_replicas=min_replicas,
initial_replicas=initial_replicas,
max_replicas=max_replicas,
),
)
]
)
# Initially deploy at target_capacity 0, and check status.
self.apply_config_and_check_status(
client,
target_capacity=0.0,
config=config,
app_name=app_name,
deployment_name=deployment_name,
timeout=20,
expected_app_status=ApplicationStatus.RUNNING,
expected_deployment_status=DeploymentStatus.HEALTHY,
expected_deployment_status_trigger=(
DeploymentStatusTrigger.CONFIG_UPDATE_COMPLETED
),
)
self.check_num_replicas(0, app_name, deployment_name)
# Increase the target_capacity, and check again.
self.apply_config_and_check_status(
client,
target_capacity=50.0,
config=config,
app_name=app_name,
deployment_name=deployment_name,
expected_app_status=ApplicationStatus.DEPLOYING,
expected_deployment_status=DeploymentStatus.UPSCALING,
expected_deployment_status_trigger=(
DeploymentStatusTrigger.CONFIG_UPDATE_STARTED
),
)
self.check_num_replicas(0, app_name, deployment_name)
self.unblock_replica_creation_and_deletion(lifecycle_signal, app_name)
self.check_num_replicas(int(0.5 * initial_replicas), app_name, deployment_name)
# Send requests and check that the application scales up.
requests = []
handle = serve.get_app_handle(app_name)
for _ in range(max_replicas):
requests.append(handle.remote())
ray.get(lifecycle_signal.send.remote())
wait_for_condition(
self.check_num_replicas,
expected_num_replicas=int(0.5 * max_replicas),
app_name=app_name,
deployment_name=deployment_name,
timeout=20,
)
# Clear requests and check that application scales down.
ray.get(request_signal.send.remote())
results = [request.result() for request in requests]
assert results == ["Hello world!"] * (max_replicas)
wait_for_condition(
self.check_num_replicas,
expected_num_replicas=int(0.5 * initial_replicas),
app_name=app_name,
deployment_name=deployment_name,
replica_state=ReplicaState.RUNNING,
timeout=20,
retry_interval_ms=1000,
)
wait_for_condition(
self.check_num_replicas,
expected_num_replicas=0,
app_name=app_name,
deployment_name=deployment_name,
replica_state=ReplicaState.STOPPING,
timeout=20,
retry_interval_ms=1000,
)
ray.get(lifecycle_signal.send.remote(clear=True))
ray.get(request_signal.send.remote(clear=True))
# Decrease the target_capacity, and check that min_replicas is used
# to create the lower bound.
self.apply_config_and_check_status(
client,
target_capacity=10.0,
config=config,
app_name=app_name,
deployment_name=deployment_name,
expected_app_status=ApplicationStatus.DEPLOYING,
expected_deployment_status=DeploymentStatus.DOWNSCALING,
expected_deployment_status_trigger=(
DeploymentStatusTrigger.CONFIG_UPDATE_STARTED
),
)
self.unblock_replica_creation_and_deletion(lifecycle_signal, app_name)
self.check_num_replicas(
int(0.1 * min_replicas),
app_name,
deployment_name,
replica_state=ReplicaState.RUNNING,
)
self.check_num_replicas(
0, app_name, deployment_name, replica_state=ReplicaState.STOPPING
)
# Check that target_capacity * max_replicas is still the upper bound.
requests = []
handle = serve.get_app_handle(app_name)
for _ in range(max_replicas):
requests.append(handle.remote())
ray.get(lifecycle_signal.send.remote())
wait_for_condition(
self.check_num_replicas,
expected_num_replicas=int(0.1 * max_replicas),
app_name=app_name,
deployment_name=deployment_name,
controller_handle=client._controller,
timeout=25,
retry_interval_ms=2000,
)
# Clear requests and check that application scales down to
# target_capacity * min_replicas.
ray.get(request_signal.send.remote())
results = [request.result() for request in requests]
assert results == ["Hello world!"] * (max_replicas)
wait_for_condition(
self.check_num_replicas,
expected_num_replicas=int(0.1 * min_replicas),
app_name=app_name,
deployment_name=deployment_name,
controller_handle=client._controller,
timeout=25,
retry_interval_ms=2000,
)
ray.get(lifecycle_signal.send.remote(clear=True))
ray.get(request_signal.send.remote(clear=True))
# Scaling up to 100% target_capacity should make Serve use
# initial_replicas as lower bound.
self.apply_config_and_check_status(
client,
target_capacity=100.0,
config=config,
app_name=app_name,
deployment_name=deployment_name,
expected_app_status=ApplicationStatus.DEPLOYING,
expected_deployment_status=DeploymentStatus.UPSCALING,
expected_deployment_status_trigger=(
DeploymentStatusTrigger.CONFIG_UPDATE_STARTED
),
)
self.check_num_replicas(int(0.1 * min_replicas), app_name, deployment_name)
self.unblock_replica_creation_and_deletion(lifecycle_signal, app_name)
self.check_num_replicas(initial_replicas, app_name, deployment_name)
# Unsetting target_capacity should make Serve use
# min_replicas as lower bound. The current number of
# replicas is already a valid number, so the application stays
# RUNNING.
self.apply_config_and_check_status(
client,
target_capacity=None,
config=config,
app_name=app_name,
deployment_name=deployment_name,
expected_app_status=ApplicationStatus.RUNNING,
expected_deployment_status=DeploymentStatus.DOWNSCALING,
expected_deployment_status_trigger=(DeploymentStatusTrigger.AUTOSCALING),
)
self.unblock_replica_creation_and_deletion(lifecycle_signal, app_name)
wait_for_condition(
self.check_num_replicas,
expected_num_replicas=min_replicas,
app_name=app_name,
deployment_name=deployment_name,
)
# Send a signal so all replicas shut down quickly when the test finishes.
ray.get(request_signal.send.remote())
ray.get(lifecycle_signal.send.remote())
@serve.deployment(
ray_actor_options={"num_cpus": 0},
max_ongoing_requests=2,
graceful_shutdown_timeout_s=0,
)
| TestTargetCapacityUpdateAndServeStatus |
python | altair-viz__altair | altair/vegalite/v6/schema/channels.py | {
"start": 541220,
"end": 549364
} | class ____(
DatumChannelMixin, core.FieldOrDatumDefWithConditionDatumDefstringnull
):
"""
ShapeDatum schema wrapper.
Parameters
----------
bandPosition : float
Relative position on a band of a stacked, binned, time unit, or band scale. For
example, the marks will be positioned at the beginning of the band if set to ``0``,
and at the middle of the band if set to ``0.5``.
condition : dict, :class:`ConditionalValueDefstringnullExprRef`, :class:`ConditionalParameterValueDefstringnullExprRef`, :class:`ConditionalPredicateValueDefstringnullExprRef`, Sequence[dict, :class:`ConditionalValueDefstringnullExprRef`, :class:`ConditionalParameterValueDefstringnullExprRef`, :class:`ConditionalPredicateValueDefstringnullExprRef`]
One or more value definition(s) with `a parameter or a test predicate
<https://vega.github.io/vega-lite/docs/condition.html>`__.
**Note:** A field definition's ``condition`` property can only contain `conditional
value definitions <https://vega.github.io/vega-lite/docs/condition.html#value>`__
since Vega-Lite only allows at most one encoded field per encoding channel.
datum : str, bool, dict, float, :class:`ExprRef`, :class:`DateTime`, :class:`RepeatRef`, :class:`PrimitiveValue`, None
A constant value in data domain.
title : str, :class:`Text`, Sequence[str], None
A title for the field. If ``null``, the title will be removed.
**Default value:** derived from the field's name and transformation function
(``aggregate``, ``bin`` and ``timeUnit``). If the field has an aggregate function,
the function is displayed as part of the title (e.g., ``"Sum of Profit"``). If the
field is binned or has a time unit applied, the applied function is shown in
parentheses (e.g., ``"Profit (binned)"``, ``"Transaction Date (year-month)"``).
Otherwise, the title is simply the field name.
**Notes**:
1) You can customize the default field title format by providing the `fieldTitle
<https://vega.github.io/vega-lite/docs/config.html#top-level-config>`__ property in
the `config <https://vega.github.io/vega-lite/docs/config.html>`__ or `fieldTitle
function via the compile function's options
<https://vega.github.io/vega-lite/usage/compile.html#field-title>`__.
2) If both field definition's ``title`` and axis, header, or legend ``title`` are
defined, axis/header/legend title will be used.
type : :class:`Type`, Literal['quantitative', 'ordinal', 'temporal', 'nominal', 'geojson']
The type of measurement (``"quantitative"``, ``"temporal"``, ``"ordinal"``, or
``"nominal"``) for the encoded field or constant value (``datum``). It can also be a
``"geojson"`` type for encoding `'geoshape'
<https://vega.github.io/vega-lite/docs/geoshape.html>`__.
Vega-Lite automatically infers data types in many cases as discussed below. However,
type is required for a field if: (1) the field is not nominal and the field encoding
has no specified ``aggregate`` (except ``argmin`` and ``argmax``), ``bin``, scale
type, custom ``sort`` order, nor ``timeUnit`` or (2) if you wish to use an ordinal
scale for a field with ``bin`` or ``timeUnit``.
**Default value:**
1) For a data ``field``, ``"nominal"`` is the default data type unless the field
encoding has ``aggregate``, ``channel``, ``bin``, scale type, ``sort``, or
``timeUnit`` that satisfies the following criteria:
* ``"quantitative"`` is the default type if (1) the encoded field contains ``bin``
or ``aggregate`` except ``"argmin"`` and ``"argmax"``, (2) the encoding channel is
``latitude`` or ``longitude`` channel or (3) if the specified scale type is `a
quantitative scale <https://vega.github.io/vega-lite/docs/scale.html#type>`__.
* ``"temporal"`` is the default type if (1) the encoded field contains ``timeUnit``
or (2) the specified scale type is a time or utc scale
* ``"ordinal"`` is the default type if (1) the encoded field contains a `custom sort
order
<https://vega.github.io/vega-lite/docs/sort.html#specifying-custom-sort-order>`__,
(2) the specified scale type is an ordinal/point/band scale, or (3) the encoding
channel is ``order``.
2) For a constant value in data domain (``datum``):
* ``"quantitative"`` if the datum is a number
* ``"nominal"`` if the datum is a string
* ``"temporal"`` if the datum is `a date time object
<https://vega.github.io/vega-lite/docs/datetime.html>`__
**Note:**
* Data ``type`` describes the semantics of the data rather than the primitive data
types (number, string, etc.). The same primitive data type can have different
types of measurement. For example, numeric data can represent quantitative,
ordinal, or nominal data.
* Data values for a temporal field can be either a date-time string (e.g.,
``"2015-03-07 12:32:17"``, ``"17:01"``, ``"2015-03-16"``. ``"2015"``) or a
timestamp number (e.g., ``1552199579097``).
* When using with `bin <https://vega.github.io/vega-lite/docs/bin.html>`__, the
``type`` property can be either ``"quantitative"`` (for using a linear bin scale)
or `"ordinal" (for using an ordinal bin scale)
<https://vega.github.io/vega-lite/docs/type.html#cast-bin>`__.
* When using with `timeUnit
<https://vega.github.io/vega-lite/docs/timeunit.html>`__, the ``type`` property
can be either ``"temporal"`` (default, for using a temporal scale) or `"ordinal"
(for using an ordinal scale)
<https://vega.github.io/vega-lite/docs/type.html#cast-bin>`__.
* When using with `aggregate
<https://vega.github.io/vega-lite/docs/aggregate.html>`__, the ``type`` property
refers to the post-aggregation data type. For example, we can calculate count
``distinct`` of a categorical field ``"cat"`` using ``{"aggregate": "distinct",
"field": "cat"}``. The ``"type"`` of the aggregate output is ``"quantitative"``.
* Secondary channels (e.g., ``x2``, ``y2``, ``xError``, ``yError``) do not have
``type`` as they must have exactly the same type as their primary channels (e.g.,
``x``, ``y``).
**See also:** `type <https://vega.github.io/vega-lite/docs/type.html>`__
documentation.
"""
_class_is_valid_at_instantiation = False
_encoding_name = "shape"
@overload
def bandPosition(self, _: float, /) -> ShapeDatum: ...
@overload
def condition(
self,
*,
test: Optional[str | SchemaBase | Map] = Undefined,
value: Optional[str | Parameter | SchemaBase | Map | None] = Undefined,
) -> ShapeDatum: ...
@overload
def condition(
self,
*,
empty: Optional[bool] = Undefined,
param: Optional[str | SchemaBase] = Undefined,
value: Optional[str | Parameter | SchemaBase | Map | None] = Undefined,
) -> ShapeDatum: ...
@overload
def condition(
self, _: list[core.ConditionalValueDefstringnullExprRef], /
) -> ShapeDatum: ...
@overload
def title(self, _: str | Sequence[str] | None, /) -> ShapeDatum: ...
@overload
def type(self, _: Type_T, /) -> ShapeDatum: ...
def __init__(
self,
datum,
bandPosition: Optional[float] = Undefined,
condition: Optional[SchemaBase | Sequence[SchemaBase | Map] | Map] = Undefined,
title: Optional[str | SchemaBase | Sequence[str] | None] = Undefined,
type: Optional[SchemaBase | Type_T] = Undefined,
**kwds,
):
super().__init__(
datum=datum,
bandPosition=bandPosition,
condition=condition,
title=title,
type=type,
**kwds,
)
@with_property_setters
| ShapeDatum |
python | doocs__leetcode | solution/1600-1699/1681.Minimum Incompatibility/Solution.py | {
"start": 0,
"end": 1187
} | class ____:
def minimumIncompatibility(self, nums: List[int], k: int) -> int:
n = len(nums)
m = n // k
g = [-1] * (1 << n)
for i in range(1, 1 << n):
if i.bit_count() != m:
continue
s = set()
mi, mx = 20, 0
for j, x in enumerate(nums):
if i >> j & 1:
if x in s:
break
s.add(x)
mi = min(mi, x)
mx = max(mx, x)
if len(s) == m:
g[i] = mx - mi
f = [inf] * (1 << n)
f[0] = 0
for i in range(1 << n):
if f[i] == inf:
continue
s = set()
mask = 0
for j, x in enumerate(nums):
if (i >> j & 1) == 0 and x not in s:
s.add(x)
mask |= 1 << j
if len(s) < m:
continue
j = mask
while j:
if g[j] != -1:
f[i | j] = min(f[i | j], f[i] + g[j])
j = (j - 1) & mask
return f[-1] if f[-1] != inf else -1
| Solution |
python | mlflow__mlflow | mlflow/gateway/schemas/chat.py | {
"start": 2660,
"end": 3007
} | class ____(ChatMessage, ResponseModel):
# Override the `tool_call_id` field to be excluded from the response.
# This is a band-aid solution to avoid exposing the tool_call_id in the response,
# while we use the same ChatMessage model for both request and response.
tool_call_id: str | None = Field(None, exclude=True)
| ResponseMessage |
python | kubernetes-client__python | kubernetes/client/models/v1_webhook_conversion.py | {
"start": 383,
"end": 5789
} | class ____(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'client_config': 'ApiextensionsV1WebhookClientConfig',
'conversion_review_versions': 'list[str]'
}
attribute_map = {
'client_config': 'clientConfig',
'conversion_review_versions': 'conversionReviewVersions'
}
def __init__(self, client_config=None, conversion_review_versions=None, local_vars_configuration=None): # noqa: E501
"""V1WebhookConversion - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._client_config = None
self._conversion_review_versions = None
self.discriminator = None
if client_config is not None:
self.client_config = client_config
self.conversion_review_versions = conversion_review_versions
@property
def client_config(self):
"""Gets the client_config of this V1WebhookConversion. # noqa: E501
:return: The client_config of this V1WebhookConversion. # noqa: E501
:rtype: ApiextensionsV1WebhookClientConfig
"""
return self._client_config
@client_config.setter
def client_config(self, client_config):
"""Sets the client_config of this V1WebhookConversion.
:param client_config: The client_config of this V1WebhookConversion. # noqa: E501
:type: ApiextensionsV1WebhookClientConfig
"""
self._client_config = client_config
@property
def conversion_review_versions(self):
"""Gets the conversion_review_versions of this V1WebhookConversion. # noqa: E501
conversionReviewVersions is an ordered list of preferred `ConversionReview` versions the Webhook expects. The API server will use the first version in the list which it supports. If none of the versions specified in this list are supported by API server, conversion will fail for the custom resource. If a persisted Webhook configuration specifies allowed versions and does not include any versions known to the API Server, calls to the webhook will fail. # noqa: E501
:return: The conversion_review_versions of this V1WebhookConversion. # noqa: E501
:rtype: list[str]
"""
return self._conversion_review_versions
@conversion_review_versions.setter
def conversion_review_versions(self, conversion_review_versions):
"""Sets the conversion_review_versions of this V1WebhookConversion.
conversionReviewVersions is an ordered list of preferred `ConversionReview` versions the Webhook expects. The API server will use the first version in the list which it supports. If none of the versions specified in this list are supported by API server, conversion will fail for the custom resource. If a persisted Webhook configuration specifies allowed versions and does not include any versions known to the API Server, calls to the webhook will fail. # noqa: E501
:param conversion_review_versions: The conversion_review_versions of this V1WebhookConversion. # noqa: E501
:type: list[str]
"""
if self.local_vars_configuration.client_side_validation and conversion_review_versions is None: # noqa: E501
raise ValueError("Invalid value for `conversion_review_versions`, must not be `None`") # noqa: E501
self._conversion_review_versions = conversion_review_versions
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, V1WebhookConversion):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, V1WebhookConversion):
return True
return self.to_dict() != other.to_dict()
| V1WebhookConversion |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/typeNarrowing7.py | {
"start": 59,
"end": 1915
} | class ____:
val: list[list[str | None]] = []
def func1(v1: list[complex | None]):
if v1[0] and v1[1]:
reveal_type(v1[0], expected_text="complex")
reveal_type(v1[1], expected_text="complex")
reveal_type(v1[2], expected_text="complex | None")
v1[0], v1[1] = None, None
reveal_type(v1[0], expected_text="None")
reveal_type(v1[1], expected_text="None")
v1[0], v1[1] = 1, 2
reveal_type(v1[0], expected_text="Literal[1]")
reveal_type(v1[1], expected_text="Literal[2]")
v1 = []
reveal_type(v1[0], expected_text="complex | None")
i = 1
if v1[i]:
reveal_type(v1[i], expected_text="complex | None")
foo = Foo()
if foo.val[0][2]:
reveal_type(foo.val[0][2], expected_text="str")
reveal_type(foo.val[1][2], expected_text="str | None")
foo.val = []
reveal_type(foo.val[0][2], expected_text="str | None")
if v1[-1]:
reveal_type(v1[-1], expected_text="complex")
def func2(v1: list[dict[str, str] | list[str]]):
if isinstance(v1[0], dict):
reveal_type(v1[0], expected_text="dict[str, str]")
reveal_type(v1[1], expected_text="dict[str, str] | list[str]")
if isinstance(v1[-1], list):
reveal_type(v1[-1], expected_text="list[str]")
def func3():
v1: dict[str, int] = {}
reveal_type(v1["x1"], expected_text="int")
v1["x1"] = 3
reveal_type(v1["x1"], expected_text="Literal[3]")
v1[f"x2"] = 5
reveal_type(v1["x2"], expected_text="int")
v1 = {}
reveal_type(v1["x1"], expected_text="int")
v2: dict[str, dict[str, int]] = {}
reveal_type(v2["y1"]["y2"], expected_text="int")
v2["y1"]["y2"] = 3
reveal_type(v2["y1"]["y2"], expected_text="Literal[3]")
v2["y1"] = {}
reveal_type(v2["y1"]["y2"], expected_text="int")
| Foo |
python | run-llama__llama_index | llama-index-integrations/readers/llama-index-readers-youtube-transcript/llama_index/readers/youtube_transcript/base.py | {
"start": 354,
"end": 2303
} | class ____(BasePydanticReader):
"""Youtube Transcript reader."""
is_remote: bool = True
@classmethod
def class_name(cls) -> str:
"""Get the name identifier of the class."""
return "YoutubeTranscriptReader"
def load_data(
self,
ytlinks: List[str],
languages: Optional[List[str]] = ["en"],
**load_kwargs: Any,
) -> List[Document]:
"""
Load data from the input directory.
Args:
pages (List[str]): List of youtube links \
for which transcripts are to be read.
"""
results = []
for link in ytlinks:
video_id = self._extract_video_id(link)
if not video_id:
raise ValueError(
f"Supplied url {link} is not a supported youtube URL."
"Supported formats include:"
" youtube.com/watch?v=\\{video_id\\} "
"(with or without 'www.')\n"
" youtube.com/embed?v=\\{video_id\\} "
"(with or without 'www.')\n"
" youtu.be/{video_id\\} (never includes www subdomain)"
)
transcript_chunks = YouTubeTranscriptApi.get_transcript(
video_id, languages=languages
)
chunk_text = [chunk["text"] for chunk in transcript_chunks]
transcript = "\n".join(chunk_text)
results.append(
Document(
text=transcript, id_=video_id, extra_info={"video_id": video_id}
)
)
return results
@staticmethod
def _extract_video_id(yt_link) -> Optional[str]:
for pattern in YOUTUBE_URL_PATTERNS:
match = re.search(pattern, yt_link)
if match:
return match.group(1)
# return None if no match is found
return None
| YoutubeTranscriptReader |
python | sqlalchemy__sqlalchemy | test/dialect/postgresql/test_types.py | {
"start": 53126,
"end": 53853
} | class ____(DDLEventWCreateHarness, fixtures.TestBase):
__sparse_driver_backend__ = True
__only_on__ = "postgresql > 8.3"
creates_implicitly_with_table = False
drops_implicitly_with_table = False
requires_table_to_exist = False
@testing.fixture
def produce_subject(self):
return DOMAIN(
name="email",
data_type=Text,
check=r"VALUE ~ '[^@]+@[^@]+\.[^@]+'",
)
@testing.fixture
def produce_table_integrated_subject(self, metadata, produce_subject):
return Table(
"table",
metadata,
Column("id", Integer, primary_key=True),
Column("email", produce_subject),
)
| DomainDDLEventTest |
python | walkccc__LeetCode | solutions/933. Number of Recent Calls/933.py | {
"start": 0,
"end": 209
} | class ____:
def __init__(self):
self.q = collections.deque()
def ping(self, t: int) -> int:
self.q.append(t)
while self.q[0] < t - 3000:
self.q.popleft()
return len(self.q)
| RecentCounter |
python | more-itertools__more-itertools | tests/test_more.py | {
"start": 210444,
"end": 211477
} | class ____(TestCase):
def test_simple(self):
iterable = [0, 1, 2]
actual = list(mi.powerset_of_sets(iterable))
expected = [set(), {0}, {1}, {2}, {0, 1}, {0, 2}, {1, 2}, {0, 1, 2}]
self.assertEqual(actual, expected)
def test_hash_count(self):
hash_count = 0
class Str(str):
def __hash__(true_self):
nonlocal hash_count
hash_count += 1
return super.__hash__(true_self)
iterable = map(Str, 'ABBBCDD')
self.assertEqual(len(list(mi.powerset_of_sets(iterable))), 128)
self.assertLessEqual(hash_count, 14)
def test_baseset(self):
iterable = [0, 1, 2]
for kind in (set, frozenset):
ps = list(mi.powerset_of_sets(iterable, baseset=kind))
self.assertEqual(set(map(type, ps)), {kind})
# Verify that an actual set can be formed.
ps = set(mi.powerset_of_sets('abc', baseset=frozenset))
self.assertIn({'a', 'b'}, ps)
| PowersetOfSetsTests |
python | sqlalchemy__sqlalchemy | lib/sqlalchemy/engine/characteristics.py | {
"start": 499,
"end": 2953
} | class ____(abc.ABC):
"""An abstract base for an object that can set, get and reset a
per-connection characteristic, typically one that gets reset when the
connection is returned to the connection pool.
transaction isolation is the canonical example, and the
``IsolationLevelCharacteristic`` implementation provides this for the
``DefaultDialect``.
The ``ConnectionCharacteristic`` class should call upon the ``Dialect`` for
the implementation of each method. The object exists strictly to serve as
a dialect visitor that can be placed into the
``DefaultDialect.connection_characteristics`` dictionary where it will take
effect for calls to :meth:`_engine.Connection.execution_options` and
related APIs.
.. versionadded:: 1.4
"""
__slots__ = ()
transactional: ClassVar[bool] = False
@abc.abstractmethod
def reset_characteristic(
self, dialect: Dialect, dbapi_conn: DBAPIConnection
) -> None:
"""Reset the characteristic on the DBAPI connection to its default
value."""
@abc.abstractmethod
def set_characteristic(
self, dialect: Dialect, dbapi_conn: DBAPIConnection, value: Any
) -> None:
"""set characteristic on the DBAPI connection to a given value."""
def set_connection_characteristic(
self,
dialect: Dialect,
conn: Connection,
dbapi_conn: DBAPIConnection,
value: Any,
) -> None:
"""set characteristic on the :class:`_engine.Connection` to a given
value.
.. versionadded:: 2.0.30 - added to support elements that are local
to the :class:`_engine.Connection` itself.
"""
self.set_characteristic(dialect, dbapi_conn, value)
@abc.abstractmethod
def get_characteristic(
self, dialect: Dialect, dbapi_conn: DBAPIConnection
) -> Any:
"""Given a DBAPI connection, get the current value of the
characteristic.
"""
def get_connection_characteristic(
self, dialect: Dialect, conn: Connection, dbapi_conn: DBAPIConnection
) -> Any:
"""Given a :class:`_engine.Connection`, get the current value of the
characteristic.
.. versionadded:: 2.0.30 - added to support elements that are local
to the :class:`_engine.Connection` itself.
"""
return self.get_characteristic(dialect, dbapi_conn)
| ConnectionCharacteristic |
python | getsentry__sentry | src/sentry/codecov/endpoints/repository_tokens/serializers.py | {
"start": 409,
"end": 2389
} | class ____(serializers.Serializer):
"""
Serializer for repository tokens response
"""
results = RepositoryTokenNodeSerializer(many=True)
pageInfo = PageInfoSerializer()
totalCount = serializers.IntegerField()
def to_representation(self, graphql_response):
"""
Transform the GraphQL response to the serialized format
"""
try:
repository_tokens_data = graphql_response["data"]["owner"]["repositories"]
repository_tokens = repository_tokens_data["edges"]
page_info = repository_tokens_data.get("pageInfo", {})
nodes = []
for edge in repository_tokens:
node = edge["node"]
nodes.append(node)
response_data = {
"results": nodes,
"pageInfo": repository_tokens_data.get(
"pageInfo",
{
"hasNextPage": page_info.get("hasNextPage", False),
"hasPreviousPage": page_info.get("hasPreviousPage", False),
"startCursor": page_info.get("startCursor"),
"endCursor": page_info.get("endCursor"),
},
),
"totalCount": repository_tokens_data.get("totalCount", len(nodes)),
}
return super().to_representation(response_data)
except (KeyError, TypeError) as e:
sentry_sdk.capture_exception(e)
logger.exception(
"Error parsing GraphQL response",
extra={
"error": str(e),
"endpoint": "repository-tokens",
"response_keys": (
list(graphql_response.keys())
if isinstance(graphql_response, dict)
else None
),
},
)
raise
| RepositoryTokensSerializer |
python | apache__airflow | airflow-core/tests/unit/api_fastapi/core_api/routes/public/test_hitl.py | {
"start": 15203,
"end": 17025
} | class ____:
@pytest.mark.usefixtures("sample_hitl_detail")
def test_should_respond_200_with_existing_response(
self,
test_client: TestClient,
sample_ti_url_identifier: str,
expected_sample_hitl_detail_dict: dict[str, Any],
) -> None:
response = test_client.get(f"{sample_ti_url_identifier}/hitlDetails")
assert response.status_code == 200
assert response.json() == expected_sample_hitl_detail_dict
def test_should_respond_401(
self,
unauthenticated_test_client: TestClient,
sample_ti_url_identifier: str,
) -> None:
response = unauthenticated_test_client.get(f"{sample_ti_url_identifier}/hitlDetails")
assert response.status_code == 401
def test_should_respond_403(
self,
unauthorized_test_client: TestClient,
sample_ti_url_identifier: str,
) -> None:
response = unauthorized_test_client.get(f"{sample_ti_url_identifier}/hitlDetails")
assert response.status_code == 403
def test_should_respond_404_without_ti(
self,
test_client: TestClient,
sample_ti_url_identifier: str,
) -> None:
response = test_client.get(f"{sample_ti_url_identifier}/hitlDetails")
assert response.status_code == 404
assert response.json() == {"detail": expected_ti_not_found_error_msg}
def test_should_respond_404_without_hitl_detail(
self,
test_client: TestClient,
sample_ti_url_identifier: str,
expected_hitl_detail_not_found_error_msg: str,
) -> None:
response = test_client.get(f"{sample_ti_url_identifier}/hitlDetails")
assert response.status_code == 404
assert response.json() == {"detail": expected_hitl_detail_not_found_error_msg}
| TestGetHITLDetailEndpoint |
python | getsentry__sentry | tests/sentry/deletions/test_detector.py | {
"start": 614,
"end": 6114
} | class ____(BaseWorkflowTest, HybridCloudTestMixin):
def setUp(self) -> None:
self.data_condition_group = self.create_data_condition_group()
self.data_condition = self.create_data_condition(condition_group=self.data_condition_group)
self.snuba_query = self.create_snuba_query()
self.subscription = QuerySubscription.objects.create(
project=self.project,
status=QuerySubscription.Status.ACTIVE.value,
subscription_id="123",
snuba_query=self.snuba_query,
)
self.data_source = self.create_data_source(
organization=self.organization, source_id=self.subscription.id
)
self.detector = self.create_detector(
project_id=self.project.id,
name="Test Detector",
type=MetricIssue.slug,
workflow_condition_group=self.data_condition_group,
)
self.workflow = self.create_workflow()
self.data_source_detector = self.create_data_source_detector(
data_source=self.data_source, detector=self.detector
)
self.detector_workflow = DetectorWorkflow.objects.create(
detector=self.detector, workflow=self.workflow
)
self.detector.status = ObjectStatus.PENDING_DELETION
self.detector.save()
def test_simple(self) -> None:
self.ScheduledDeletion.schedule(instance=self.detector, days=0)
with self.tasks():
run_scheduled_deletions()
assert not Detector.objects.filter(id=self.detector.id).exists()
assert not DataSourceDetector.objects.filter(id=self.data_source_detector.id).exists()
assert not DetectorWorkflow.objects.filter(id=self.detector_workflow.id).exists()
assert not DataConditionGroup.objects.filter(id=self.data_condition_group.id).exists()
assert not DataCondition.objects.filter(id=self.data_condition.id).exists()
assert not DataSource.objects.filter(id=self.data_source.id).exists()
assert not QuerySubscription.objects.filter(id=self.subscription.id).exists()
assert not SnubaQuery.objects.filter(id=self.snuba_query.id).exists()
def test_multiple_data_sources(self) -> None:
snuba_query_2 = self.create_snuba_query()
subscription_2 = QuerySubscription.objects.create(
project=self.project,
status=QuerySubscription.Status.ACTIVE.value,
subscription_id="456",
snuba_query=snuba_query_2,
)
data_source_2 = self.create_data_source(
organization=self.organization, source_id=subscription_2.id
)
data_source_detector_2 = self.create_data_source_detector(
data_source=data_source_2, detector=self.detector
)
self.ScheduledDeletion.schedule(instance=self.detector, days=0)
with self.tasks():
run_scheduled_deletions()
assert not Detector.objects.filter(id=self.detector.id).exists()
assert not DataSourceDetector.objects.filter(
id__in=[self.data_source_detector.id, data_source_detector_2.id]
).exists()
assert not DetectorWorkflow.objects.filter(id=self.detector_workflow.id).exists()
assert not DataConditionGroup.objects.filter(id=self.data_condition_group.id).exists()
assert not DataCondition.objects.filter(id=self.data_condition.id).exists()
assert not DataSource.objects.filter(
id__in=[self.data_source.id, data_source_2.id]
).exists()
assert not QuerySubscription.objects.filter(
id__in=[self.subscription.id, subscription_2.id]
).exists()
assert not SnubaQuery.objects.filter(
id__in=[self.snuba_query.id, snuba_query_2.id]
).exists()
def test_data_source_not_deleted(self) -> None:
"""
Test that we do not delete a DataSource that is connected to another Detector
"""
detector_2 = self.create_detector(
project_id=self.project.id,
name="Testy Detector",
type=MetricIssue.slug,
)
data_source_detector_2 = self.create_data_source_detector(
data_source=self.data_source, detector=detector_2
)
self.ScheduledDeletion.schedule(instance=self.detector, days=0)
with self.tasks():
run_scheduled_deletions()
assert not Detector.objects.filter(id=self.detector.id).exists()
assert not DataSourceDetector.objects.filter(id=self.data_source_detector.id).exists()
assert not DetectorWorkflow.objects.filter(id=self.detector_workflow.id).exists()
assert not DataConditionGroup.objects.filter(id=self.data_condition_group.id).exists()
assert not DataCondition.objects.filter(id=self.data_condition.id).exists()
assert DataSource.objects.filter(id=self.data_source.id).exists()
assert DataSourceDetector.objects.filter(id=data_source_detector_2.id).exists()
def test_delete_uptime_detector(self) -> None:
detector = self.create_uptime_detector()
uptime_sub = get_uptime_subscription(detector)
self.ScheduledDeletion.schedule(instance=detector, days=0)
with self.tasks():
run_scheduled_deletions()
with pytest.raises(Detector.DoesNotExist):
detector.refresh_from_db()
with pytest.raises(UptimeSubscription.DoesNotExist):
uptime_sub.refresh_from_db()
| DeleteDetectorTest |
python | ray-project__ray | python/ray/tune/tests/test_searchers.py | {
"start": 902,
"end": 10914
} | class ____(unittest.TestCase):
"""
Test searcher handling of invalid values (NaN, -inf, inf).
Implicitly tests automatic config conversion and default (anonymous)
mode handling.
Also tests that searcher save doesn't throw any errors during
experiment checkpointing.
"""
def setUp(self):
self.config = {"report": tune.uniform(0.0, 5.0), "list": [1, 2, 3], "num": 4}
def tearDown(self):
pass
@classmethod
def setUpClass(cls):
ray.init(num_cpus=4, num_gpus=0, include_dashboard=False)
@classmethod
def tearDownClass(cls):
ray.shutdown()
def assertCorrectExperimentOutput(self, analysis):
best_trial = analysis.best_trial
self.assertLessEqual(best_trial.config["report"], 2.0)
# Make sure that constant parameters aren't lost
# Hyperopt converts lists to tuples, so check for either
self.assertIn(best_trial.config["list"], ([1, 2, 3], (1, 2, 3)))
self.assertEqual(best_trial.config["num"], 4)
@contextlib.contextmanager
def check_searcher_checkpoint_errors_scope(self):
buffer = []
from ray.tune.execution.tune_controller import logger
with patch.object(logger, "warning", lambda x: buffer.append(x)):
yield
assert not any(
"Experiment state snapshotting failed: Can't pickle local object" in x
for x in buffer
), "Searcher checkpointing failed (unable to serialize)."
def testAxManualSetup(self):
from ax.service.ax_client import AxClient
from ray.tune.search.ax import AxSearch
config = self.config.copy()
config["mixed_list"] = [1, tune.uniform(2, 3), 4]
converted_config = AxSearch.convert_search_space(config)
# At least one nan, inf, -inf and float
client = AxClient(random_seed=4321)
client.create_experiment(
parameters=converted_config, objective_name="_metric", minimize=False
)
searcher = AxSearch(ax_client=client)
out = tune.run(
_invalid_objective,
search_alg=searcher,
metric="_metric",
mode="max",
num_samples=4,
reuse_actors=False,
)
self.assertCorrectExperimentOutput(out)
self.assertEqual(out.best_trial.config["mixed_list"][0], 1)
self.assertGreaterEqual(out.best_trial.config["mixed_list"][1], 2)
self.assertLess(out.best_trial.config["mixed_list"][1], 3)
self.assertEqual(out.best_trial.config["mixed_list"][2], 4)
def testAx(self):
from ray.tune.search.ax import AxSearch
searcher = ConcurrencyLimiter(AxSearch(random_seed=4321), max_concurrent=2)
with self.check_searcher_checkpoint_errors_scope():
# Make sure enough samples are used so that Ax actually fits a model
# for config suggestion
out = tune.run(
_invalid_objective,
search_alg=searcher,
metric="_metric",
mode="max",
num_samples=16,
reuse_actors=False,
config=self.config,
)
self.assertCorrectExperimentOutput(out)
def testBayesOpt(self):
from ray.tune.search.bayesopt import BayesOptSearch
with self.check_searcher_checkpoint_errors_scope():
out = tune.run(
_invalid_objective,
# At least one nan, inf, -inf and float
search_alg=BayesOptSearch(random_state=1234),
config=self.config,
metric="_metric",
mode="max",
num_samples=8,
reuse_actors=False,
)
self.assertCorrectExperimentOutput(out)
@pytest.mark.skipif(
sys.version_info >= (3, 12),
reason="BOHB not yet supported for python 3.12+",
)
def testBOHB(self):
from ray.tune.search.bohb import TuneBOHB
with self.check_searcher_checkpoint_errors_scope():
out = tune.run(
_invalid_objective,
search_alg=TuneBOHB(seed=1000),
config=self.config,
metric="_metric",
mode="max",
num_samples=8,
reuse_actors=False,
)
self.assertCorrectExperimentOutput(out)
@pytest.mark.skipif(
sys.version_info >= (3, 12), reason="HEBO doesn't support py312"
)
def testHEBO(self):
if Version(pandas.__version__) >= Version("2.0.0"):
pytest.skip("HEBO does not support pandas>=2.0.0")
from ray.tune.search.hebo import HEBOSearch
with self.check_searcher_checkpoint_errors_scope():
out = tune.run(
_invalid_objective,
# At least one nan, inf, -inf and float
search_alg=HEBOSearch(random_state_seed=123),
config=self.config,
metric="_metric",
mode="max",
num_samples=8,
reuse_actors=False,
)
self.assertCorrectExperimentOutput(out)
def testHyperopt(self):
from ray.tune.search.hyperopt import HyperOptSearch
with self.check_searcher_checkpoint_errors_scope():
out = tune.run(
_invalid_objective,
# At least one nan, inf, -inf and float
search_alg=HyperOptSearch(random_state_seed=1234),
config=self.config,
metric="_metric",
mode="max",
num_samples=8,
reuse_actors=False,
)
self.assertCorrectExperimentOutput(out)
def testNevergrad(self):
import nevergrad as ng
from ray.tune.search.nevergrad import NevergradSearch
np.random.seed(2020) # At least one nan, inf, -inf and float
with self.check_searcher_checkpoint_errors_scope():
out = tune.run(
_invalid_objective,
search_alg=NevergradSearch(optimizer=ng.optimizers.RandomSearch),
config=self.config,
mode="max",
num_samples=16,
reuse_actors=False,
)
self.assertCorrectExperimentOutput(out)
def testNevergradWithRequiredOptimizerKwargs(self):
import nevergrad as ng
from ray.tune.search.nevergrad import NevergradSearch
NevergradSearch(optimizer=ng.optimizers.CM, optimizer_kwargs=dict(budget=16))
def testOptuna(self):
from optuna.samplers import RandomSampler
from ray.tune.search.optuna import OptunaSearch
np.random.seed(1000) # At least one nan, inf, -inf and float
with self.check_searcher_checkpoint_errors_scope():
out = tune.run(
_invalid_objective,
search_alg=OptunaSearch(sampler=RandomSampler(seed=1234), storage=None),
config=self.config,
metric="_metric",
mode="max",
num_samples=8,
reuse_actors=False,
)
self.assertCorrectExperimentOutput(out)
def testOptunaWithStorage(self):
from optuna.samplers import RandomSampler
from optuna.storages import JournalStorage
from optuna.storages.journal import JournalFileBackend
from ray.tune.search.optuna import OptunaSearch
np.random.seed(1000) # At least one nan, inf, -inf and float
storage_file_path = "/tmp/my_test_study.log"
with self.check_searcher_checkpoint_errors_scope():
out = tune.run(
_invalid_objective,
search_alg=OptunaSearch(
sampler=RandomSampler(seed=1234),
study_name="my_test_study",
storage=JournalStorage(
JournalFileBackend(file_path=storage_file_path)
),
),
config=self.config,
metric="_metric",
mode="max",
num_samples=8,
reuse_actors=False,
)
self.assertCorrectExperimentOutput(out)
self.assertTrue(os.path.exists(storage_file_path))
def testOptunaReportTooOften(self):
from optuna.samplers import RandomSampler
from ray.tune.search.optuna import OptunaSearch
searcher = OptunaSearch(
sampler=RandomSampler(seed=1234),
space=OptunaSearch.convert_search_space(self.config),
metric="metric",
mode="max",
)
searcher.suggest("trial_1")
searcher.on_trial_result("trial_1", {"training_iteration": 1, "metric": 1})
searcher.on_trial_complete("trial_1", {"training_iteration": 2, "metric": 1})
# Report after complete should not fail
searcher.on_trial_result("trial_1", {"training_iteration": 3, "metric": 1})
searcher.on_trial_complete("trial_1", {"training_iteration": 4, "metric": 1})
def testZOOpt(self):
self.skipTest(
"Recent ZOOpt versions fail handling invalid values gracefully. "
"Skipping until a fix is added in a future ZOOpt release."
)
from ray.tune.search.zoopt import ZOOptSearch
# This seed tests that a nan result doesn't cause an error if it shows
# up after the initial data collection phase.
np.random.seed(1002) # At least one nan, inf, -inf and float
with self.check_searcher_checkpoint_errors_scope():
out = tune.run(
_invalid_objective,
search_alg=ZOOptSearch(budget=25, parallel_num=4),
config=self.config,
metric="_metric",
mode="max",
num_samples=16,
reuse_actors=False,
)
self.assertCorrectExperimentOutput(out)
| InvalidValuesTest |
python | HypothesisWorks__hypothesis | hypothesis-python/tests/nocover/test_conjecture_int_list.py | {
"start": 824,
"end": 1659
} | class ____(RuleBasedStateMachine):
@initialize(ls=st.lists(INTEGERS))
def starting_lists(self, ls):
self.model = list(ls)
self.target = IntList(ls)
@invariant()
def lists_are_equivalent(self):
if hasattr(self, "model"):
assert isinstance(self.model, list)
assert isinstance(self.target, IntList)
assert len(self.model) == len(self.target)
assert list(self.target) == self.model
@rule(n=INTEGERS)
def append(self, n):
self.model.append(n)
self.target.append(n)
@rule(i=valid_index())
def delete(self, i):
del self.model[i]
del self.target[i]
@rule(i=valid_index())
def agree_on_values(self, i):
assert self.model[i] == self.target[i]
TestIntList = IntListRules.TestCase
| IntListRules |
python | dagster-io__dagster | python_modules/dagster/dagster/_core/execution/context/invocation.py | {
"start": 7453,
"end": 32535
} | class ____(OpExecutionContext, BaseDirectExecutionContext):
"""The ``context`` object available as the first argument to an op's compute function when
being invoked directly. Can also be used as a context manager.
"""
def __init__(
self,
op_config: Any,
resources_dict: Mapping[str, Any],
resources_config: Mapping[str, Any],
instance: Optional[DagsterInstance],
partition_key: Optional[str],
partition_key_range: Optional[PartitionKeyRange],
mapping_key: Optional[str],
run_tags: Mapping[str, str],
event_loop: Optional[AbstractEventLoop],
):
from dagster._core.execution.api import ephemeral_instance_if_missing
from dagster._core.execution.context_creation_job import initialize_console_manager
self._op_config = op_config
self._mapping_key = mapping_key
self._exit_stack = ExitStack()
# Construct ephemeral instance if missing
self._instance = self._exit_stack.enter_context(ephemeral_instance_if_missing(instance))
self._resources_config = resources_config
# Open resource context manager
self._resources_contain_cm = False
self._resource_defs = wrap_resources_for_execution(resources_dict)
self._resources = self._exit_stack.enter_context(
build_resources(
resources=self._resource_defs,
instance=self._instance,
resource_config=resources_config,
event_loop=event_loop,
)
)
self._resources_contain_cm = isinstance(self._resources, IContainsGenerator)
self._log = initialize_console_manager(None)
self._pdb: Optional[ForkedPdb] = None
self._cm_scope_entered = False
check.invariant(
not (partition_key and partition_key_range),
"Must supply at most one of partition_key or partition_key_range",
)
self._partition_key = partition_key
self._partition_key_range = partition_key_range
self._run_tags = run_tags
self._event_loop = event_loop
# Maintains the properties on the context that are bound to a particular invocation
# of an op
# @op
# def my_op(context):
# # context._per_invocation_properties.alias is "my_op"
# ...
# ctx = build_op_context() # ctx._per_invocation_properties is None
# my_op(ctx)
# ctx._per_invocation_properties is None # ctx is unbound at the end of invocation
self._per_invocation_properties = None
# Maintains the properties on the context that are modified during invocation
# @op
# def my_op(context):
# # context._execution_properties can be modified with output metadata etc.
# ...
# ctx = build_op_context() # ctx._execution_properties is empty
# my_op(ctx)
# ctx._execution_properties.output_metadata # information is retained after invocation
# my_op(ctx) # ctx._execution_properties is cleared at the beginning of the next invocation
self._execution_properties = DirectExecutionProperties()
def __enter__(self):
self._cm_scope_entered = True
return self
def __exit__(self, *exc):
self._exit_stack.close()
def __del__(self):
self._exit_stack.close()
def _check_bound_to_invocation(self, fn_name: str, fn_type: str) -> PerInvocationProperties:
if self._per_invocation_properties is None:
raise DagsterInvalidPropertyError(_property_msg(fn_name, fn_type))
# return self._per_invocation_properties so that the calling function can access properties
# of self._per_invocation_properties without causing pyright errors
return self._per_invocation_properties
def bind( # pyright: ignore[reportIncompatibleMethodOverride]
self,
op_def: OpDefinition,
pending_invocation: Optional[PendingNodeInvocation[OpDefinition]],
assets_def: Optional[AssetsDefinition],
config_from_args: Optional[Mapping[str, Any]],
resources_from_args: Optional[Mapping[str, Any]],
) -> "DirectOpExecutionContext":
from dagster._core.definitions.resource_invocation import resolve_bound_config
if self._per_invocation_properties is not None:
raise DagsterInvalidInvocationError(
f"This context is currently being used to execute {self.alias}. The context cannot be used to execute another op until {self.alias} has finished executing."
)
# reset execution_properties
self._execution_properties = DirectExecutionProperties()
# update the bound context with properties relevant to the invocation of the op
invocation_tags = (
pending_invocation.tags
if isinstance(pending_invocation, PendingNodeInvocation)
else None
)
tags = merge_dicts(op_def.tags, invocation_tags) if invocation_tags else op_def.tags
hook_defs = (
pending_invocation.hook_defs
if isinstance(pending_invocation, PendingNodeInvocation)
else None
)
invocation_alias = (
pending_invocation.given_alias
if isinstance(pending_invocation, PendingNodeInvocation)
else None
)
alias = invocation_alias if invocation_alias else op_def.name
if resources_from_args:
if self._resource_defs:
raise DagsterInvalidInvocationError(
"Cannot provide resources in both context and kwargs"
)
resource_defs = wrap_resources_for_execution(resources_from_args)
# add new resources context to the stack to be cleared on exit
resources = self._exit_stack.enter_context(
build_resources(
resource_defs,
self.instance,
event_loop=self._event_loop,
)
)
elif assets_def and assets_def.resource_defs:
for key in sorted(list(assets_def.resource_defs.keys())):
if key in self._resource_defs:
raise DagsterInvalidInvocationError(
f"Error when invoking {assets_def!s} resource '{key}' "
"provided on both the definition and invocation context. Please "
"provide on only one or the other."
)
resource_defs = wrap_resources_for_execution(
{**self._resource_defs, **assets_def.resource_defs}
)
# add new resources context to the stack to be cleared on exit
resources = self._exit_stack.enter_context(
build_resources(
resource_defs,
self.instance,
self._resources_config,
event_loop=self._event_loop,
)
)
else:
# this runs the check in resources() to ensure we are in a context manager if necessary
resources = self.resources
resource_defs = self._resource_defs
_validate_resource_requirements(resource_defs, op_def)
if self._op_config and config_from_args:
raise DagsterInvalidInvocationError("Cannot provide config in both context and kwargs")
op_config = resolve_bound_config(config_from_args or self._op_config, op_def)
step_description = f'op "{op_def.name}"'
self._per_invocation_properties = PerInvocationProperties(
op_def=op_def,
tags=tags,
hook_defs=hook_defs,
alias=alias,
assets_def=assets_def,
resources=resources,
op_config=op_config,
step_description=step_description,
)
return self
def unbind(self):
self._per_invocation_properties = None
@property
def is_bound(self) -> bool:
return self._per_invocation_properties is not None
@property
def execution_properties(self) -> DirectExecutionProperties:
return self._execution_properties
@property
def per_invocation_properties(self) -> PerInvocationProperties:
return self._check_bound_to_invocation(
fn_name="_per_invocation_properties", fn_type="property"
)
@property
def op_config(self) -> Any:
if self._per_invocation_properties is None:
return self._op_config
return self._per_invocation_properties.op_config
@property
def resource_keys(self) -> AbstractSet[str]:
return self._resource_defs.keys()
@property
def resources(self) -> Resources:
if self._per_invocation_properties is not None:
return self._per_invocation_properties.resources
if self._resources_contain_cm and not self._cm_scope_entered:
raise DagsterInvariantViolationError(
"At least one provided resource is a generator, but attempting to access "
"resources outside of context manager scope. You can use the following syntax to "
"open a context manager: `with build_op_context(...) as context:`"
)
return self._resources
@property
def dagster_run(self) -> DagsterRun:
raise DagsterInvalidPropertyError(_property_msg("dagster_run", "property"))
@property
def instance(self) -> DagsterInstance:
return self._instance
@property
def pdb(self) -> ForkedPdb:
"""dagster.utils.forked_pdb.ForkedPdb: Gives access to pdb debugging from within the solid.
Example:
.. code-block:: python
@solid
def debug_solid(context):
context.pdb.set_trace()
"""
if self._pdb is None:
self._pdb = ForkedPdb()
return self._pdb
@property
def step_launcher(self) -> Optional[StepLauncher]:
raise DagsterInvalidPropertyError(_property_msg("step_launcher", "property"))
@property
def run_id(self) -> str:
"""str: Hard-coded value to indicate that we are directly invoking solid."""
return "EPHEMERAL"
@property
def run_config(self) -> dict:
per_invocation_properties = self._check_bound_to_invocation(
fn_name="run_config", fn_type="property"
)
run_config: dict[str, object] = {}
if self._op_config and per_invocation_properties.op_def:
run_config["ops"] = {
per_invocation_properties.op_def.name: {
"config": per_invocation_properties.op_config
}
}
run_config["resources"] = self._resources_config
return run_config
@property
def job_def(self) -> JobDefinition:
raise DagsterInvalidPropertyError(_property_msg("job_def", "property"))
@property
def repository_def(self) -> RepositoryDefinition:
raise DagsterInvalidPropertyError(_property_msg("repository_def", "property"))
@property
def job_name(self) -> str:
raise DagsterInvalidPropertyError(_property_msg("job_name", "property"))
@property
def log(self) -> DagsterLogManager:
"""DagsterLogManager: A console manager constructed for this context."""
return self._log
@property
def node_handle(self) -> NodeHandle:
raise DagsterInvalidPropertyError(_property_msg("node_handle", "property"))
@property
def op(self) -> Node:
raise DagsterInvalidPropertyError(_property_msg("op", "property"))
@property
def solid(self) -> Node:
raise DagsterInvalidPropertyError(_property_msg("solid", "property"))
@property
def op_def(self) -> OpDefinition:
per_invocation_properties = self._check_bound_to_invocation(
fn_name="op_def", fn_type="property"
)
return cast("OpDefinition", per_invocation_properties.op_def)
@property
def has_assets_def(self) -> bool:
per_invocation_properties = self._check_bound_to_invocation(
fn_name="has_assets_def", fn_type="property"
)
return per_invocation_properties.assets_def is not None
@property
def assets_def(self) -> AssetsDefinition:
per_invocation_properties = self._check_bound_to_invocation(
fn_name="assets_def", fn_type="property"
)
if per_invocation_properties.assets_def is None:
raise DagsterInvalidPropertyError(
f"Op {self.op_def.name} does not have an assets definition."
)
return per_invocation_properties.assets_def
@property
def has_partition_key(self) -> bool:
return self._partition_key is not None
@property
def partition_key(self) -> str:
if self._partition_key:
return self._partition_key
check.failed("Tried to access partition_key for a non-partitioned run")
@property
def has_partition_key_range(self) -> bool:
return self._partition_key_range is not None
@property
def partition_keys(self) -> Sequence[str]:
key_range = self.partition_key_range
partitions_def = self.assets_def.partitions_def
if partitions_def is None:
raise DagsterInvariantViolationError(
"Cannot access partition_keys for a non-partitioned run"
)
with partition_loading_context(dynamic_partitions_store=self.instance):
return partitions_def.get_partition_keys_in_range(key_range)
@property
def partition_key_range(self) -> PartitionKeyRange:
"""The range of partition keys for the current run.
If run is for a single partition key, return a `PartitionKeyRange` with the same start and
end. Raises an error if the current run is not a partitioned run.
"""
if self._partition_key_range:
return self._partition_key_range
elif self._partition_key:
return PartitionKeyRange(self._partition_key, self._partition_key)
else:
check.failed("Tried to access partition_key range for a non-partitioned run")
def asset_partition_key_for_output(self, output_name: str = "result") -> str:
return self.partition_key
def has_tag(self, key: str) -> bool:
return key in self._run_tags
def get_tag(self, key: str) -> Optional[str]:
return self._run_tags.get(key)
@property
def run_tags(self) -> Mapping[str, str]:
return self._run_tags
@property
def alias(self) -> str:
per_invocation_properties = self._check_bound_to_invocation(
fn_name="alias", fn_type="property"
)
return cast("str", per_invocation_properties.alias)
def get_step_execution_context(self) -> StepExecutionContext:
raise DagsterInvalidPropertyError(_property_msg("get_step_execution_context", "method"))
def get_events(self) -> Sequence[UserEvent]:
"""Retrieve the list of user-generated events that were logged via the context.
**Examples:**
.. code-block:: python
from dagster import op, build_op_context, AssetMaterialization, ExpectationResult
@op
def my_op(context):
...
def test_my_op():
context = build_op_context()
my_op(context)
all_user_events = context.get_events()
materializations = [event for event in all_user_events if isinstance(event, AssetMaterialization)]
expectation_results = [event for event in all_user_events if isinstance(event, ExpectationResult)]
...
"""
return self._execution_properties.user_events
def get_output_metadata(
self, output_name: str, mapping_key: Optional[str] = None
) -> Optional[Mapping[str, Any]]:
"""Retrieve metadata that was logged for an output and mapping_key, if it exists.
If metadata cannot be found for the particular output_name/mapping_key combination, None will be returned.
Args:
output_name (str): The name of the output to retrieve logged metadata for.
mapping_key (Optional[str]): The mapping key to retrieve metadata for (only applies when using dynamic outputs).
Returns:
Optional[Mapping[str, Any]]: The metadata values present for the output_name/mapping_key combination, if present.
"""
metadata = self._execution_properties.output_metadata.get(output_name)
if mapping_key and metadata:
return metadata.get(mapping_key)
return metadata
def get_mapping_key(self) -> Optional[str]:
return self._mapping_key
def for_type(self, dagster_type: DagsterType) -> TypeCheckContext:
self._check_bound_to_invocation(fn_name="for_type", fn_type="method")
resources = cast("NamedTuple", self.resources)
return TypeCheckContext(
self.run_id,
self.log,
ScopedResourcesBuilder(resources._asdict()),
dagster_type,
)
def describe_op(self) -> str:
per_invocation_properties = self._check_bound_to_invocation(
fn_name="describe_op", fn_type="method"
)
return per_invocation_properties.step_description
def log_event(self, event: UserEvent) -> None:
self._check_bound_to_invocation(fn_name="log_event", fn_type="method")
check.inst_param(
event,
"event",
(AssetMaterialization, AssetObservation, ExpectationResult),
)
self._execution_properties.user_events.append(event)
def observe_output(self, output_name: str, mapping_key: Optional[str] = None) -> None:
self._check_bound_to_invocation(fn_name="observe_output", fn_type="method")
if mapping_key:
if output_name not in self._execution_properties.seen_outputs:
self._execution_properties.seen_outputs[output_name] = set()
cast("set[str]", self._execution_properties.seen_outputs[output_name]).add(mapping_key)
else:
self._execution_properties.seen_outputs[output_name] = "seen"
def has_seen_output(self, output_name: str, mapping_key: Optional[str] = None) -> bool:
if mapping_key:
return (
output_name in self._execution_properties.seen_outputs
and mapping_key in self._execution_properties.seen_outputs[output_name]
)
return output_name in self._execution_properties.seen_outputs
def asset_partitions_time_window_for_output(self, output_name: str = "result") -> TimeWindow:
self._check_bound_to_invocation(
fn_name="asset_partitions_time_window_for_output", fn_type="method"
)
partitions_def = self.assets_def.partitions_def
if partitions_def is None:
check.failed("Tried to access partition_key for a non-partitioned asset")
if not has_one_dimension_time_window_partitioning(partitions_def=partitions_def):
raise DagsterInvariantViolationError(
"Expected a TimeWindowPartitionsDefinition or MultiPartitionsDefinition with a"
f" single time dimension, but instead found {type(partitions_def)}"
)
return cast(
"Union[MultiPartitionsDefinition, TimeWindowPartitionsDefinition]", partitions_def
).time_window_for_partition_key(self.partition_key)
@property
def partition_time_window(self) -> TimeWindow:
return self.asset_partitions_time_window_for_output()
def add_output_metadata(
self,
metadata: Mapping[str, Any],
output_name: Optional[str] = None,
mapping_key: Optional[str] = None,
) -> None:
"""Add metadata to one of the outputs of an op.
This can only be used once per output in the body of an op. Using this method with the same output_name more than once within an op will result in an error.
Args:
metadata (Mapping[str, Any]): The metadata to attach to the output
output_name (Optional[str]): The name of the output to attach metadata to. If there is only one output on the op, then this argument does not need to be provided. The metadata will automatically be attached to the only output.
**Examples:**
.. code-block:: python
from dagster import Out, op
from typing import Tuple
@op
def add_metadata(context):
context.add_output_metadata({"foo", "bar"})
return 5 # Since the default output is called "result", metadata will be attached to the output "result".
@op(out={"a": Out(), "b": Out()})
def add_metadata_two_outputs(context) -> Tuple[str, int]:
context.add_output_metadata({"foo": "bar"}, output_name="b")
context.add_output_metadata({"baz": "bat"}, output_name="a")
return ("dog", 5)
"""
self._check_bound_to_invocation(fn_name="add_output_metadata", fn_type="method")
metadata = check.mapping_param(metadata, "metadata", key_type=str)
output_name = check.opt_str_param(output_name, "output_name")
mapping_key = check.opt_str_param(mapping_key, "mapping_key")
if output_name is None and len(self.op_def.output_defs) == 1:
output_def = self.op_def.output_defs[0]
output_name = output_def.name
elif output_name is None:
raise DagsterInvariantViolationError(
"Attempted to log metadata without providing output_name, but multiple outputs"
" exist. Please provide an output_name to the invocation of"
" `context.add_output_metadata`."
)
else:
output_def = self.op_def.output_def_named(output_name)
if self.has_seen_output(output_name, mapping_key):
output_desc = (
f"output '{output_def.name}'"
if not mapping_key
else f"output '{output_def.name}' with mapping_key '{mapping_key}'"
)
raise DagsterInvariantViolationError(
f"In {self.op_def.node_type_str} '{self.op_def.name}', attempted to log output"
f" metadata for {output_desc} which has already been yielded. Metadata must be"
" logged before the output is yielded."
)
if output_def.is_dynamic and not mapping_key:
raise DagsterInvariantViolationError(
f"In {self.op_def.node_type_str} '{self.op_def.name}', attempted to log metadata"
f" for dynamic output '{output_def.name}' without providing a mapping key. When"
" logging metadata for a dynamic output, it is necessary to provide a mapping key."
)
output_name = output_def.name
if output_name in self._execution_properties.output_metadata:
if (
not mapping_key
or mapping_key in self._execution_properties.output_metadata[output_name]
):
raise DagsterInvariantViolationError(
f"In {self.op_def.node_type_str} '{self.op_def.name}', attempted to log"
f" metadata for output '{output_name}' more than once."
)
if mapping_key:
if output_name not in self._execution_properties.output_metadata:
self._execution_properties.output_metadata[output_name] = {}
self._execution_properties.output_metadata[output_name][mapping_key] = metadata
else:
self._execution_properties.output_metadata[output_name] = metadata # pyright: ignore[reportArgumentType]
# In bound mode no conversion is done on returned values and missing but expected outputs are not
# allowed.
@property
def requires_typed_event_stream(self) -> bool:
self._check_bound_to_invocation(fn_name="requires_typed_event_stream", fn_type="property")
return self._execution_properties.requires_typed_event_stream
@property
def typed_event_stream_error_message(self) -> Optional[str]:
self._check_bound_to_invocation(
fn_name="typed_event_stream_error_message", fn_type="property"
)
return self._execution_properties.typed_event_stream_error_message
def set_requires_typed_event_stream(self, *, error_message: Optional[str]) -> None: # pyright: ignore[reportIncompatibleMethodOverride]
self._check_bound_to_invocation(fn_name="set_requires_typed_event_stream", fn_type="method")
self._execution_properties.requires_typed_event_stream = True
self._execution_properties.typed_event_stream_error_message = error_message
| DirectOpExecutionContext |
python | dagster-io__dagster | python_modules/dagster/dagster/_core/events/__init__.py | {
"start": 75028,
"end": 76189
} | class ____(
NamedTuple(
"_LoadedInputData",
[
("input_name", str),
("manager_key", str),
("upstream_output_name", Optional[str]),
("upstream_step_key", Optional[str]),
("metadata", Mapping[str, MetadataValue]),
],
)
):
def __new__(
cls,
input_name: str,
manager_key: str,
upstream_output_name: Optional[str] = None,
upstream_step_key: Optional[str] = None,
metadata: Optional[Mapping[str, MetadataValue]] = None,
):
return super().__new__(
cls,
input_name=check.str_param(input_name, "input_name"),
manager_key=check.str_param(manager_key, "manager_key"),
upstream_output_name=check.opt_str_param(upstream_output_name, "upstream_output_name"),
upstream_step_key=check.opt_str_param(upstream_step_key, "upstream_step_key"),
metadata=normalize_metadata(
check.opt_mapping_param(metadata, "metadata", key_type=str)
),
)
@whitelist_for_serdes(storage_field_names={"file_key": "log_key"})
| LoadedInputData |
python | plotly__plotly.py | plotly/graph_objs/sunburst/_textfont.py | {
"start": 233,
"end": 17124
} | class ____(_BaseTraceHierarchyType):
_parent_path_str = "sunburst"
_path_str = "sunburst.textfont"
_valid_props = {
"color",
"colorsrc",
"family",
"familysrc",
"lineposition",
"linepositionsrc",
"shadow",
"shadowsrc",
"size",
"sizesrc",
"style",
"stylesrc",
"textcase",
"textcasesrc",
"variant",
"variantsrc",
"weight",
"weightsrc",
}
@property
def color(self):
"""
The 'color' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color: see https://plotly.com/python/css-colors/ for a list
- A list or array of any of the above
Returns
-------
str|numpy.ndarray
"""
return self["color"]
@color.setter
def color(self, val):
self["color"] = val
@property
def colorsrc(self):
"""
Sets the source reference on Chart Studio Cloud for `color`.
The 'colorsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["colorsrc"]
@colorsrc.setter
def colorsrc(self, val):
self["colorsrc"] = val
@property
def family(self):
"""
HTML font family - the typeface that will be applied by the web
browser. The web browser can only apply a font if it is
available on the system where it runs. Provide multiple font
families, separated by commas, to indicate the order in which
to apply fonts if they aren't available.
The 'family' property is a string and must be specified as:
- A non-empty string
- A tuple, list, or one-dimensional numpy array of the above
Returns
-------
str|numpy.ndarray
"""
return self["family"]
@family.setter
def family(self, val):
self["family"] = val
@property
def familysrc(self):
"""
Sets the source reference on Chart Studio Cloud for `family`.
The 'familysrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["familysrc"]
@familysrc.setter
def familysrc(self, val):
self["familysrc"] = val
@property
def lineposition(self):
"""
Sets the kind of decoration line(s) with text, such as an
"under", "over" or "through" as well as combinations e.g.
"under+over", etc.
The 'lineposition' property is a flaglist and may be specified
as a string containing:
- Any combination of ['under', 'over', 'through'] joined with '+' characters
(e.g. 'under+over')
OR exactly one of ['none'] (e.g. 'none')
- A list or array of the above
Returns
-------
Any|numpy.ndarray
"""
return self["lineposition"]
@lineposition.setter
def lineposition(self, val):
self["lineposition"] = val
@property
def linepositionsrc(self):
"""
Sets the source reference on Chart Studio Cloud for
`lineposition`.
The 'linepositionsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["linepositionsrc"]
@linepositionsrc.setter
def linepositionsrc(self, val):
self["linepositionsrc"] = val
@property
def shadow(self):
"""
Sets the shape and color of the shadow behind text. "auto"
places minimal shadow and applies contrast text font color. See
https://developer.mozilla.org/en-US/docs/Web/CSS/text-shadow
for additional options.
The 'shadow' property is a string and must be specified as:
- A string
- A number that will be converted to a string
- A tuple, list, or one-dimensional numpy array of the above
Returns
-------
str|numpy.ndarray
"""
return self["shadow"]
@shadow.setter
def shadow(self, val):
self["shadow"] = val
@property
def shadowsrc(self):
"""
Sets the source reference on Chart Studio Cloud for `shadow`.
The 'shadowsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["shadowsrc"]
@shadowsrc.setter
def shadowsrc(self, val):
self["shadowsrc"] = val
@property
def size(self):
"""
The 'size' property is a number and may be specified as:
- An int or float in the interval [1, inf]
- A tuple, list, or one-dimensional numpy array of the above
Returns
-------
int|float|numpy.ndarray
"""
return self["size"]
@size.setter
def size(self, val):
self["size"] = val
@property
def sizesrc(self):
"""
Sets the source reference on Chart Studio Cloud for `size`.
The 'sizesrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["sizesrc"]
@sizesrc.setter
def sizesrc(self, val):
self["sizesrc"] = val
@property
def style(self):
"""
Sets whether a font should be styled with a normal or italic
face from its family.
The 'style' property is an enumeration that may be specified as:
- One of the following enumeration values:
['normal', 'italic']
- A tuple, list, or one-dimensional numpy array of the above
Returns
-------
Any|numpy.ndarray
"""
return self["style"]
@style.setter
def style(self, val):
self["style"] = val
@property
def stylesrc(self):
"""
Sets the source reference on Chart Studio Cloud for `style`.
The 'stylesrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["stylesrc"]
@stylesrc.setter
def stylesrc(self, val):
self["stylesrc"] = val
@property
def textcase(self):
"""
Sets capitalization of text. It can be used to make text appear
in all-uppercase or all-lowercase, or with each word
capitalized.
The 'textcase' property is an enumeration that may be specified as:
- One of the following enumeration values:
['normal', 'word caps', 'upper', 'lower']
- A tuple, list, or one-dimensional numpy array of the above
Returns
-------
Any|numpy.ndarray
"""
return self["textcase"]
@textcase.setter
def textcase(self, val):
self["textcase"] = val
@property
def textcasesrc(self):
"""
Sets the source reference on Chart Studio Cloud for `textcase`.
The 'textcasesrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["textcasesrc"]
@textcasesrc.setter
def textcasesrc(self, val):
self["textcasesrc"] = val
@property
def variant(self):
"""
Sets the variant of the font.
The 'variant' property is an enumeration that may be specified as:
- One of the following enumeration values:
['normal', 'small-caps', 'all-small-caps',
'all-petite-caps', 'petite-caps', 'unicase']
- A tuple, list, or one-dimensional numpy array of the above
Returns
-------
Any|numpy.ndarray
"""
return self["variant"]
@variant.setter
def variant(self, val):
self["variant"] = val
@property
def variantsrc(self):
"""
Sets the source reference on Chart Studio Cloud for `variant`.
The 'variantsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["variantsrc"]
@variantsrc.setter
def variantsrc(self, val):
self["variantsrc"] = val
@property
def weight(self):
"""
Sets the weight (or boldness) of the font.
The 'weight' property is a integer and may be specified as:
- An int (or float that will be cast to an int)
in the interval [1, 1000]
OR exactly one of ['normal', 'bold'] (e.g. 'bold')
- A tuple, list, or one-dimensional numpy array of the above
Returns
-------
int|numpy.ndarray
"""
return self["weight"]
@weight.setter
def weight(self, val):
self["weight"] = val
@property
def weightsrc(self):
"""
Sets the source reference on Chart Studio Cloud for `weight`.
The 'weightsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["weightsrc"]
@weightsrc.setter
def weightsrc(self, val):
self["weightsrc"] = val
@property
def _prop_descriptions(self):
return """\
color
colorsrc
Sets the source reference on Chart Studio Cloud for
`color`.
family
HTML font family - the typeface that will be applied by
the web browser. The web browser can only apply a font
if it is available on the system where it runs. Provide
multiple font families, separated by commas, to
indicate the order in which to apply fonts if they
aren't available.
familysrc
Sets the source reference on Chart Studio Cloud for
`family`.
lineposition
Sets the kind of decoration line(s) with text, such as
an "under", "over" or "through" as well as combinations
e.g. "under+over", etc.
linepositionsrc
Sets the source reference on Chart Studio Cloud for
`lineposition`.
shadow
Sets the shape and color of the shadow behind text.
"auto" places minimal shadow and applies contrast text
font color. See https://developer.mozilla.org/en-
US/docs/Web/CSS/text-shadow for additional options.
shadowsrc
Sets the source reference on Chart Studio Cloud for
`shadow`.
size
sizesrc
Sets the source reference on Chart Studio Cloud for
`size`.
style
Sets whether a font should be styled with a normal or
italic face from its family.
stylesrc
Sets the source reference on Chart Studio Cloud for
`style`.
textcase
Sets capitalization of text. It can be used to make
text appear in all-uppercase or all-lowercase, or with
each word capitalized.
textcasesrc
Sets the source reference on Chart Studio Cloud for
`textcase`.
variant
Sets the variant of the font.
variantsrc
Sets the source reference on Chart Studio Cloud for
`variant`.
weight
Sets the weight (or boldness) of the font.
weightsrc
Sets the source reference on Chart Studio Cloud for
`weight`.
"""
def __init__(
self,
arg=None,
color=None,
colorsrc=None,
family=None,
familysrc=None,
lineposition=None,
linepositionsrc=None,
shadow=None,
shadowsrc=None,
size=None,
sizesrc=None,
style=None,
stylesrc=None,
textcase=None,
textcasesrc=None,
variant=None,
variantsrc=None,
weight=None,
weightsrc=None,
**kwargs,
):
"""
Construct a new Textfont object
Sets the font used for `textinfo`.
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of
:class:`plotly.graph_objs.sunburst.Textfont`
color
colorsrc
Sets the source reference on Chart Studio Cloud for
`color`.
family
HTML font family - the typeface that will be applied by
the web browser. The web browser can only apply a font
if it is available on the system where it runs. Provide
multiple font families, separated by commas, to
indicate the order in which to apply fonts if they
aren't available.
familysrc
Sets the source reference on Chart Studio Cloud for
`family`.
lineposition
Sets the kind of decoration line(s) with text, such as
an "under", "over" or "through" as well as combinations
e.g. "under+over", etc.
linepositionsrc
Sets the source reference on Chart Studio Cloud for
`lineposition`.
shadow
Sets the shape and color of the shadow behind text.
"auto" places minimal shadow and applies contrast text
font color. See https://developer.mozilla.org/en-
US/docs/Web/CSS/text-shadow for additional options.
shadowsrc
Sets the source reference on Chart Studio Cloud for
`shadow`.
size
sizesrc
Sets the source reference on Chart Studio Cloud for
`size`.
style
Sets whether a font should be styled with a normal or
italic face from its family.
stylesrc
Sets the source reference on Chart Studio Cloud for
`style`.
textcase
Sets capitalization of text. It can be used to make
text appear in all-uppercase or all-lowercase, or with
each word capitalized.
textcasesrc
Sets the source reference on Chart Studio Cloud for
`textcase`.
variant
Sets the variant of the font.
variantsrc
Sets the source reference on Chart Studio Cloud for
`variant`.
weight
Sets the weight (or boldness) of the font.
weightsrc
Sets the source reference on Chart Studio Cloud for
`weight`.
Returns
-------
Textfont
"""
super().__init__("textfont")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError("""\
The first argument to the plotly.graph_objs.sunburst.Textfont
constructor must be a dict or
an instance of :class:`plotly.graph_objs.sunburst.Textfont`""")
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
self._set_property("color", arg, color)
self._set_property("colorsrc", arg, colorsrc)
self._set_property("family", arg, family)
self._set_property("familysrc", arg, familysrc)
self._set_property("lineposition", arg, lineposition)
self._set_property("linepositionsrc", arg, linepositionsrc)
self._set_property("shadow", arg, shadow)
self._set_property("shadowsrc", arg, shadowsrc)
self._set_property("size", arg, size)
self._set_property("sizesrc", arg, sizesrc)
self._set_property("style", arg, style)
self._set_property("stylesrc", arg, stylesrc)
self._set_property("textcase", arg, textcase)
self._set_property("textcasesrc", arg, textcasesrc)
self._set_property("variant", arg, variant)
self._set_property("variantsrc", arg, variantsrc)
self._set_property("weight", arg, weight)
self._set_property("weightsrc", arg, weightsrc)
self._process_kwargs(**dict(arg, **kwargs))
self._skip_invalid = False
| Textfont |
python | pytorch__pytorch | torch/nn/modules/module.py | {
"start": 1315,
"end": 2345
} | class ____(
# pyrefly: ignore [invalid-inheritance]
namedtuple("IncompatibleKeys", ["missing_keys", "unexpected_keys"]),
):
__slots__ = ()
def __repr__(self) -> str:
# pyrefly: ignore [missing-attribute]
if not self.missing_keys and not self.unexpected_keys:
return "<All keys matched successfully>"
return super().__repr__()
__str__ = __repr__
def _addindent(s_, numSpaces):
s = s_.split("\n")
# don't do anything for single-line stuff
if len(s) == 1:
return s_
first = s.pop(0)
s = [(numSpaces * " ") + line for line in s]
s = "\n".join(s)
s = first + "\n" + s
return s
r"""This tracks hooks common to all modules that are executed immediately before
.registering the buffer/module/parameter"""
_global_buffer_registration_hooks: dict[int, Callable] = OrderedDict()
_global_module_registration_hooks: dict[int, Callable] = OrderedDict()
_global_parameter_registration_hooks: dict[int, Callable] = OrderedDict()
| _IncompatibleKeys |
python | huggingface__transformers | src/transformers/models/sew/modeling_sew.py | {
"start": 6738,
"end": 7685
} | class ____(nn.Module):
def __init__(self, config):
super().__init__()
self.projection = nn.Linear(config.hidden_size, config.hidden_size * config.squeeze_factor)
self.activation = ACT2FN[config.feat_extract_activation]
self.squeeze_factor = config.squeeze_factor
def forward(self, hidden_states):
hidden_states = self.projection(hidden_states)
hidden_states = self.activation(hidden_states)
if self.squeeze_factor > 1:
# transform embedding channels to sequence length
bsz, src_len, src_embed_dim = hidden_states.size()
tgt_len = src_len * self.squeeze_factor
tgt_embed_dim = src_embed_dim // self.squeeze_factor
hidden_states = hidden_states.reshape(bsz, src_len, self.squeeze_factor, tgt_embed_dim)
hidden_states = hidden_states.reshape(bsz, tgt_len, tgt_embed_dim)
return hidden_states
| SEWUpsampling |
python | google__jax | tests/logging_test.py | {
"start": 3247,
"end": 10239
} | class ____(jtu.JaxTestCase):
@unittest.skipIf(platform.system() == "Windows",
"Subprocess test doesn't work on Windows")
def test_no_log_spam(self):
if jtu.is_cloud_tpu() and xla_bridge._backends:
raise self.skipTest(
"test requires fresh process on Cloud TPU because only one process "
"can use the TPU at a time")
if sys.executable is None:
raise self.skipTest("test requires access to python binary")
o = _run("""
import jax
jax.device_count()
f = jax.jit(lambda x: x + 1)
f(1)
f(2)
jax.numpy.add(1, 1)
""")
lines = o.stdout.split("\n")
lines.extend(o.stderr.split("\n"))
allowlist = [
(
"An NVIDIA GPU may be present on this machine, but a"
" CUDA-enabled jaxlib is not installed. Falling back to cpu."
),
]
lines = [l for l in lines if l in allowlist]
self.assertEmpty(lines)
def test_debug_logging(self):
# Warmup so we don't get "No GPU/TPU" warning later.
jax.jit(lambda x: x + 1)(1)
# Nothing logged by default (except warning messages, which we don't expect
# here).
with capture_jax_logs() as log_output:
jax.jit(lambda x: x + 1)(1)
self.assertEmpty(log_output.getvalue())
# Turn on all debug logging.
with jax_debug_log_modules("jax"):
with capture_jax_logs() as log_output:
jax.jit(lambda x: x + 1)(1)
self.assertIn("Finished tracing + transforming", log_output.getvalue())
self.assertIn("Compiling jit(<lambda>)", log_output.getvalue())
# Turn off all debug logging.
with jax_debug_log_modules(""):
with capture_jax_logs() as log_output:
jax.jit(lambda x: x + 1)(1)
self.assertEmpty(log_output.getvalue())
# Turn on one module.
with jax_debug_log_modules("jax._src.dispatch"):
with capture_jax_logs() as log_output:
jax.jit(lambda x: x + 1)(1)
self.assertIn("Finished tracing + transforming", log_output.getvalue())
self.assertNotIn("Compiling jit(<lambda>)", log_output.getvalue())
# Turn everything off again.
with jax_debug_log_modules(""):
with capture_jax_logs() as log_output:
jax.jit(lambda x: x + 1)(1)
self.assertEmpty(log_output.getvalue())
@jtu.skip_on_devices("tpu")
@unittest.skipIf(platform.system() == "Windows",
"Subprocess test doesn't work on Windows")
def test_subprocess_stderr_info_logging(self):
if sys.executable is None:
raise self.skipTest("test requires access to python binary")
o = _run("""
import jax # this prints INFO logging from backend imports
jax.jit(lambda x: x)(1) # this prints logs to DEBUG (from compilation)
""", { "JAX_LOGGING_LEVEL": "INFO" })
log_output = o.stderr
info_lines = log_output.split("\n")
self.assertGreater(len(info_lines), 0)
self.assertIn("INFO", log_output)
self.assertNotIn("DEBUG", log_output)
@jtu.skip_on_devices("tpu")
@unittest.skipIf(platform.system() == "Windows",
"Subprocess test doesn't work on Windows")
def test_subprocess_stderr_debug_logging(self):
if sys.executable is None:
raise self.skipTest("test requires access to python binary")
program = """
import jax # this prints INFO logging from backend imports
jax.jit(lambda x: x)(1) # this prints logs to DEBUG (from compilation)
"""
o = _run(program, { "JAX_LOGGING_LEVEL": "DEBUG" })
log_output = o.stderr
self.assertIn("INFO", log_output)
self.assertIn("DEBUG", log_output)
o = _run(program, { "JAX_DEBUG_LOG_MODULES": "jax" })
log_output = o.stderr
self.assertIn("DEBUG", log_output)
@jtu.skip_on_devices("tpu")
@unittest.skipIf(platform.system() == "Windows",
"Subprocess test doesn't work on Windows")
def test_subprocess_toggling_logging_level(self):
if sys.executable is None:
raise self.skipTest("test requires access to python binary")
_separator = "---------------------------"
o = _run(f"""
import sys
import jax # this prints INFO logging from backend imports
jax.jit(lambda x: x)(1) # this prints logs to DEBUG (from compilation)
jax.config.update("jax_logging_level", None)
sys.stderr.write("{_separator}")
jax.jit(lambda x: x)(1) # should not log anything now
""", {"JAX_LOGGING_LEVEL": "DEBUG"})
log_output = o.stderr
m = re.search(_separator, log_output)
self.assertTrue(m is not None)
log_output_verbose = log_output[:m.start()]
log_output_silent = log_output[m.end():]
self.assertIn("Finished tracing + transforming <lambda> for pjit",
log_output_verbose)
self.assertEqual(log_output_silent, "")
@jtu.skip_on_devices("tpu")
@unittest.skipIf(platform.system() == "Windows",
"Subprocess test doesn't work on Windows")
def test_subprocess_double_logging_absent(self):
if sys.executable is None:
raise self.skipTest("test requires access to python binary")
o = _run("""
import jax # this prints INFO logging from backend imports
jax.config.update("jax_debug_log_modules", "jax._src.compiler,jax._src.dispatch")
jax.jit(lambda x: x)(1) # this prints logs to DEBUG (from compilation)
""", { "JAX_LOGGING_LEVEL": "DEBUG" })
log_output = o.stderr
self.assertNotEmpty(log_output)
log_lines = log_output.strip().split("\n")
# only one tracing line should be printed, if there's more than one
# then logs are printing duplicated
self.assertLen([line for line in log_lines
if "Finished tracing + transforming" in line], 1)
@jtu.skip_on_devices("tpu")
@unittest.skipIf(platform.system() == "Windows",
"Subprocess test doesn't work on Windows")
def test_subprocess_cpp_logging_level(self):
if sys.executable is None:
raise self.skipTest("test requires access to python binary")
program = """
import sys
import jax # this prints INFO logging from backend imports
jax.distributed.initialize("127.0.0.1:12345", num_processes=1, process_id=0)
"""
o = _run(program, { "JAX_LOGGING_LEVEL": "DEBUG" })
self.assertIn("Initializing CoordinationService", o.stderr)
o = _run(program, { "JAX_LOGGING_LEVEL": "INFO" })
self.assertIn("Initializing CoordinationService", o.stderr)
# verbose logging: WARNING, None
o = _run(program, { "JAX_LOGGING_LEVEL": "WARNING" })
self.assertNotIn("Initializing CoordinationService", o.stderr)
o = _run(program)
default_cpp_log_level = os.environ.get("TF_CPP_MIN_LOG_LEVEL")
if default_cpp_log_level is not None and int(default_cpp_log_level) >= 1:
self.assertNotIn("Initializing CoordinationService", o.stderr)
else:
self.assertIn("Initializing CoordinationService", o.stderr)
if __name__ == "__main__":
absltest.main(testLoader=jtu.JaxTestLoader())
| LoggingTest |
python | apache__airflow | providers/google/src/airflow/providers/google/cloud/operators/vertex_ai/batch_prediction_job.py | {
"start": 18443,
"end": 21573
} | class ____(GoogleCloudBaseOperator):
"""
Deletes a BatchPredictionJob. Can only be called on jobs that already finished.
:param project_id: Required. The ID of the Google Cloud project that the service belongs to.
:param region: Required. The ID of the Google Cloud region that the service belongs to.
:param batch_prediction_job_id: The ID of the BatchPredictionJob resource to be deleted.
:param retry: Designation of what errors, if any, should be retried.
:param timeout: The timeout for this request.
:param metadata: Strings which should be sent along with the request as metadata.
:param gcp_conn_id: The connection ID to use connecting to Google Cloud.
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
"""
template_fields = ("region", "project_id", "batch_prediction_job_id", "impersonation_chain")
def __init__(
self,
*,
region: str,
project_id: str,
batch_prediction_job_id: str,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
gcp_conn_id: str = "google_cloud_default",
impersonation_chain: str | Sequence[str] | None = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.region = region
self.project_id = project_id
self.batch_prediction_job_id = batch_prediction_job_id
self.retry = retry
self.timeout = timeout
self.metadata = metadata
self.gcp_conn_id = gcp_conn_id
self.impersonation_chain = impersonation_chain
def execute(self, context: Context):
hook = BatchPredictionJobHook(
gcp_conn_id=self.gcp_conn_id,
impersonation_chain=self.impersonation_chain,
)
try:
self.log.info("Deleting batch prediction job: %s", self.batch_prediction_job_id)
operation = hook.delete_batch_prediction_job(
project_id=self.project_id,
region=self.region,
batch_prediction_job=self.batch_prediction_job_id,
retry=self.retry,
timeout=self.timeout,
metadata=self.metadata,
)
hook.wait_for_operation(timeout=self.timeout, operation=operation)
self.log.info("Batch prediction job was deleted.")
except NotFound:
self.log.info("The Batch prediction job %s does not exist.", self.batch_prediction_job_id)
| DeleteBatchPredictionJobOperator |
python | kamyu104__LeetCode-Solutions | Python/maximum-points-after-collecting-coins-from-all-nodes.py | {
"start": 1236,
"end": 2080
} | class ____(object):
def maximumPoints(self, edges, coins, k):
"""
:type edges: List[List[int]]
:type coins: List[int]
:type k: int
:rtype: int
"""
def memoization(u, p, d):
if d >= max_d:
return 0
if lookup[u][d] is None:
lookup[u][d] = max(((coins[u]>>d)-k)+sum(memoization(v, u, d) for v in adj[u] if v != p),
(coins[u]>>(d+1))+sum(memoization(v, u, d+1) for v in adj[u] if v != p))
return lookup[u][d]
adj = [[] for _ in xrange(len(coins))]
for u, v in edges:
adj[u].append(v)
adj[v].append(u)
max_d = max(coins).bit_length()
lookup = [[None]*max_d for _ in xrange(len(coins))]
return memoization(0, -1, 0)
| Solution2 |
python | conda__conda | conda/exceptions.py | {
"start": 41826,
"end": 42131
} | class ____(CondaError):
def __init__(self, username: str, packagename: str, *args, **kwargs):
msg = f"{username}/{packagename} file not downloaded"
self.username = username
self.packagename = packagename
super().__init__(msg, *args, **kwargs)
| EnvironmentFileNotDownloaded |
python | pypa__setuptools | setuptools/_distutils/errors.py | {
"start": 2576,
"end": 2737
} | class ____(DistutilsError):
"""Any problems executing an external program (such as the C
compiler, when compiling C files)."""
pass
| DistutilsExecError |
python | jina-ai__jina | jina/helper.py | {
"start": 31016,
"end": 47294
} | class ____:
"""Class for cache invalidation, remove strategy.
:param func: func to wrap as a decorator.
:param attribute: String as the function name to invalidate cached
data. E.g. in :class:`cached_property` we cache data inside the class obj
with the `key`: `CACHED_{func.__name__}`, the func name in `cached_property`
is the name to invalidate.
"""
def __init__(self, func, attribute: str):
self.func = func
self.attribute = attribute
def __call__(self, *args, **kwargs):
obj = args[0]
cached_key = f'CACHED_{self.attribute}'
if cached_key in obj.__dict__:
del obj.__dict__[cached_key] # invalidate
self.func(*args, **kwargs)
def __get__(self, obj, cls):
from functools import partial
return partial(self.__call__, obj)
def cache_invalidate(attribute: str):
"""The cache invalidator decorator to wrap the method call.
Check the implementation in :class:`_cache_invalidate`.
:param attribute: The func name as was stored in the obj to invalidate.
:return: wrapped method.
"""
def _wrap(func):
return _cache_invalidate(func, attribute)
return _wrap
def get_now_timestamp():
"""
Get the datetime.
:return: The datetime in int format.
"""
now = datetime.now()
return int(datetime.timestamp(now))
def get_readable_time(*args, **kwargs):
"""
Get the datetime in human readable format (e.g. 115 days and 17 hours and 46 minutes and 40 seconds).
For example:
.. highlight:: python
.. code-block:: python
get_readable_time(seconds=1000)
:param args: arguments for datetime.timedelta
:param kwargs: key word arguments for datetime.timedelta
:return: Datetime in human readable format.
"""
import datetime
secs = float(datetime.timedelta(*args, **kwargs).total_seconds())
units = [('day', 86400), ('hour', 3600), ('minute', 60), ('second', 1)]
parts = []
for unit, mul in units:
if secs / mul >= 1 or mul == 1:
if mul > 1:
n = int(math.floor(secs / mul))
secs -= n * mul
else:
n = int(secs)
parts.append(f'{n} {unit}' + ('' if n == 1 else 's'))
return ' and '.join(parts)
def get_internal_ip():
"""
Return the private IP address of the gateway for connecting from other machine in the same network.
:return: Private IP address.
"""
import socket
ip = '127.0.0.1'
try:
with socket.socket(socket.AF_INET, socket.SOCK_DGRAM) as s:
# doesn't even have to be reachable
s.connect(('10.255.255.255', 1))
ip = s.getsockname()[0]
except Exception:
pass
return ip
def get_public_ip(timeout: float = 0.3):
"""
Return the public IP address of the gateway for connecting from other machine in the public network.
:param timeout: the seconds to wait until return None.
:return: Public IP address.
.. warn::
Set `timeout` to a large number will block the Flow.
"""
import urllib.request
def _get_ip(url):
try:
req = urllib.request.Request(url, headers={'User-Agent': 'Mozilla/5.0'})
with urllib.request.urlopen(req, timeout=timeout) as fp:
_ip = fp.read().decode().strip()
return _ip
except:
pass # intentionally ignored, public ip is not showed
ip_server_list = [
'https://api.ipify.org',
'https://ident.me',
'https://checkip.amazonaws.com/',
]
for idx, ip in enumerate(ip_server_list):
r = _get_ip(ip)
if r:
return r
def convert_tuple_to_list(d: Dict):
"""
Convert all the tuple type values from a dict to list.
:param d: Dict type of data.
"""
for k, v in d.items():
if isinstance(v, tuple):
d[k] = list(v)
elif isinstance(v, dict):
convert_tuple_to_list(v)
def is_jupyter() -> bool: # pragma: no cover
"""
Check if we're running in a Jupyter notebook, using magic command `get_ipython` that only available in Jupyter.
:return: True if run in a Jupyter notebook else False.
"""
try:
get_ipython # noqa: F821
except NameError:
return False
shell = get_ipython().__class__.__name__ # noqa: F821
if shell == 'ZMQInteractiveShell':
return True # Jupyter notebook or qtconsole
elif shell == 'Shell':
return True # Google colab
elif shell == 'TerminalInteractiveShell':
return False # Terminal running IPython
else:
return False # Other type (?)
def iscoroutinefunction(func: Callable):
return inspect.iscoroutinefunction(func)
def run_async(func, *args, **kwargs):
"""Generalized asyncio.run for jupyter notebook.
When running inside jupyter, an eventloop already exists, can't be stopped, can't be killed.
Directly calling asyncio.run will fail, as This function cannot be called when another asyncio event loop
is running in the same thread.
.. see_also:
https://stackoverflow.com/questions/55409641/asyncio-run-cannot-be-called-from-a-running-event-loop
:param func: function to run
:param args: parameters
:param kwargs: key-value parameters
:return: asyncio.run(func)
"""
class _RunThread(threading.Thread):
"""Create a running thread when in Jupyter notebook."""
def run(self):
"""Run given `func` asynchronously."""
self.result = asyncio.run(func(*args, **kwargs))
try:
loop = asyncio.get_running_loop()
except RuntimeError:
loop = None
if loop and loop.is_running():
# eventloop already exist
# running inside Jupyter
thread = _RunThread()
thread.start()
thread.join()
try:
return thread.result
except AttributeError:
from jina.excepts import BadClient
raise BadClient(
'something wrong when running the eventloop, result can not be retrieved'
)
else:
return asyncio.run(func(*args, **kwargs))
def slugify(value):
"""
Normalize string, converts to lowercase, removes non-alpha characters, and converts spaces to hyphens.
:param value: Original string.
:return: Processed string.
"""
s = str(value).strip().replace(' ', '_')
return re.sub(r'(?u)[^-\w.]', '', s)
def is_yaml_filepath(val) -> bool:
"""
Check if the file is YAML file.
:param val: Path of target file.
:return: True if the file is YAML else False.
"""
if __windows__:
r = r'.*.ya?ml$' # TODO: might not be exhaustive
else:
r = r'^[/\w\-\_\.]+.ya?ml$'
return re.match(r, val.strip()) is not None
def download_mermaid_url(mermaid_url, output) -> None:
"""
Download the jpg image from mermaid_url.
:param mermaid_url: The URL of the image.
:param output: A filename specifying the name of the image to be created, the suffix svg/jpg determines the file type of the output image.
"""
from urllib.request import Request, urlopen
try:
req = Request(mermaid_url, headers={'User-Agent': 'Mozilla/5.0'})
with open(output, 'wb') as fp:
fp.write(urlopen(req).read())
except:
from jina.logging.predefined import default_logger
default_logger.error(
'can not download image, please check your graph and the network connections'
)
def find_request_binding(target):
"""Find `@request` decorated methods in a class.
:param target: the target class to check
:return: a dictionary with key as request type and value as method name
"""
import ast
import inspect
from jina.constants import __default_endpoint__
res = {}
def visit_function_def(node):
for e in node.decorator_list:
req_name = ''
if isinstance(e, ast.Call) and e.func.id == 'requests':
req_name = e.keywords[0].value.s
elif isinstance(e, ast.Name) and e.id == 'requests':
req_name = __default_endpoint__
if req_name:
if req_name in res:
raise ValueError(
f'you already bind `{res[req_name]}` with `{req_name}` request'
)
else:
res[req_name] = node.name
V = ast.NodeVisitor()
V.visit_FunctionDef = visit_function_def
V.visit(compile(inspect.getsource(target), '?', 'exec', ast.PyCF_ONLY_AST))
return res
def dunder_get(_dict: Any, key: str) -> Any:
"""Returns value for a specified dunderkey
A "dunderkey" is just a fieldname that may or may not contain
double underscores (dunderscores!) for referencing nested keys in
a dict. eg::
>>> data = {'a': {'b': 1}}
>>> dunder_get(data, 'a__b')
1
key 'b' can be referrenced as 'a__b'
:param _dict : (dict, list, struct or object) which we want to index into
:param key : (str) that represents a first level or nested key in the dict
:return: (mixed) value corresponding to the key
"""
try:
part1, part2 = key.split('__', 1)
except ValueError:
part1, part2 = key, ''
try:
part1 = int(part1) # parse int parameter
except ValueError:
pass
from google.protobuf.struct_pb2 import ListValue, Struct
if isinstance(part1, int):
result = _dict[part1]
elif isinstance(_dict, (dict, Struct, MutableMapping)):
if part1 in _dict:
result = _dict[part1]
else:
result = None
elif isinstance(_dict, (Iterable, ListValue)):
result = _dict[part1]
else:
result = getattr(_dict, part1)
return dunder_get(result, part2) if part2 else result
if TYPE_CHECKING: # pragma: no cover
from fastapi import FastAPI
def extend_rest_interface(app: 'FastAPI') -> 'FastAPI':
"""Extend Jina built-in FastAPI instance with customized APIs, routing, etc.
:param app: the built-in FastAPI instance given by Jina
:return: the extended FastAPI instance
.. highlight:: python
.. code-block:: python
def extend_rest_interface(app: 'FastAPI'):
@app.get('/extension1')
async def root():
return {"message": "Hello World"}
return app
"""
return app
def get_ci_vendor() -> Optional[str]:
from jina.constants import __resources_path__
with open(
os.path.join(__resources_path__, 'ci-vendors.json'), encoding='utf-8'
) as fp:
all_cis = json.load(fp)
for c in all_cis:
if isinstance(c['env'], str) and c['env'] in os.environ:
return c['constant']
elif isinstance(c['env'], dict):
for k, v in c['env'].items():
if os.environ.get(k, None) == v:
return c['constant']
elif isinstance(c['env'], list):
for k in c['env']:
if k in os.environ:
return c['constant']
def deprecate_by(new_fn):
def _f(*args, **kwargs):
import inspect
old_fn_name = inspect.stack()[1][4][0].strip().split("=")[0].strip()
warnings.warn(
f'`{old_fn_name}` is renamed to `{new_fn.__name__}` with the same usage, please use the latter instead. '
f'The old function will be removed soon.',
DeprecationWarning,
)
return new_fn(*args, **kwargs)
return _f
def get_request_header() -> Dict:
"""Return the header of request.
:return: request header
"""
metas, envs = get_full_version()
header = {
**{f'jinameta-{k}': str(v) for k, v in metas.items()},
**envs,
}
return header
def get_rich_console():
"""
Function to get jina rich default console.
:return: rich console
"""
return Console(
force_terminal=True if 'PYCHARM_HOSTED' in os.environ else None,
color_system=None if 'JINA_LOG_NO_COLOR' in os.environ else 'auto',
)
from jina.parsers import set_client_cli_parser
__default_port_client__ = 80
__default_port_tls_client__ = 443
def parse_client(kwargs) -> Namespace:
"""
Parse the kwargs for the Client
:param kwargs: kwargs to be parsed
:return: parsed argument.
"""
kwargs = _parse_kwargs(kwargs)
args = ArgNamespace.kwargs2namespace(
kwargs, set_client_cli_parser(), warn_unknown=True
)
if not args.port:
args.port = (
__default_port_client__ if not args.tls else __default_port_tls_client__
)
return args
def _parse_kwargs(kwargs: Dict[str, Any]) -> Dict[str, Any]:
if 'host' in kwargs.keys():
return_scheme = dict()
(
kwargs['host'],
return_scheme['port'],
return_scheme['protocol'],
return_scheme['tls'],
) = parse_host_scheme(kwargs['host'])
for key, value in return_scheme.items():
if value:
if key in kwargs:
raise ValueError(
f"You can't have two definitions of {key}: you have one in the host scheme and one in the keyword argument"
)
elif value:
kwargs[key] = value
kwargs = _delete_host_slash(kwargs)
return kwargs
def _delete_host_slash(kwargs: Dict[str, Any]) -> Dict[str, Any]:
if 'host' in kwargs:
if kwargs['host'][-1] == '/':
kwargs['host'] = kwargs['host'][:-1]
return kwargs
def parse_host_scheme(host: str) -> Tuple[str, str, str, bool]:
scheme, _hostname, port = _parse_url(host)
tls = None
if scheme in ('grpcs', 'https', 'wss'):
scheme = scheme[:-1]
tls = True
if scheme == 'ws':
scheme = 'websocket'
return _hostname, port, scheme, tls
def _parse_url(host):
if '://' in host:
scheme, host = host.split('://')
else:
scheme = None
if ':' in host:
host, port = host.split(':')
else:
port = None
return scheme, host, port
def _single_port_free(host: str, port: int) -> bool:
with socket(AF_INET, SOCK_STREAM) as session:
if session.connect_ex((host, port)) == 0:
return False
else:
return True
def is_port_free(host: Union[str, List[str]], port: Union[int, List[int]]) -> bool:
if isinstance(port, list):
if isinstance(host, str):
return all([_single_port_free(host, _p) for _p in port])
else:
return all([all([_single_port_free(_h, _p) for _p in port]) for _h in host])
else:
if isinstance(host, str):
return _single_port_free(host, port)
else:
return all([_single_port_free(_h, port) for _h in host])
def send_telemetry_event(event: str, obj_cls_name: Any, **kwargs) -> None:
"""Sends in a thread a request with telemetry for a given event
:param event: Event leading to the telemetry entry
:param obj_cls_name: Class name of the object to be tracked
:param kwargs: Extra kwargs to be passed to the data sent
"""
if 'JINA_OPTOUT_TELEMETRY' in os.environ:
return
def _telemetry():
url = 'https://telemetry.jina.ai/'
try:
from jina.helper import get_full_version
metas, _ = get_full_version()
data = base64.urlsafe_b64encode(
json.dumps(
{**metas, 'event': f'{obj_cls_name}.{event}', **kwargs}
).encode('utf-8')
)
req = urllib.request.Request(
url, data=data, headers={'User-Agent': 'Mozilla/5.0'}
)
urllib.request.urlopen(req)
except:
pass
threading.Thread(target=_telemetry, daemon=True).start()
def is_generator(func):
import inspect
return inspect.isgeneratorfunction(func) or inspect.isasyncgenfunction(func)
| _cache_invalidate |
python | pytorch__pytorch | test/distributed/fsdp/test_fsdp_use_orig_params.py | {
"start": 54739,
"end": 55608
} | class ____(FSDPTest):
@skip_if_lt_x_gpu(2)
def test_non_uniform_requires_grad(self):
model = nn.Sequential(
nn.Linear(3, 3, device=device_type),
nn.Linear(3, 3, device=device_type),
)
# Freeze biases only and flatten both weights and biases into the same
# `FlatParameter` to exercise non-uniform `requires_grad`
model[0].bias.requires_grad = False
model[1].bias.requires_grad = False
fsdp_model = FSDP(model, use_orig_params=True)
self.assertTrue(fsdp_model[0].weight.requires_grad)
self.assertFalse(fsdp_model[0].bias.requires_grad)
self.assertTrue(fsdp_model[1].weight.requires_grad)
self.assertFalse(fsdp_model[1].bias.requires_grad)
# Define this to be large enough to trigger stack corruption
NUM_SIZE0_TENSORS = 1000
| TestFSDPUseOrigParamsInit |
python | Pylons__pyramid | tests/test_integration.py | {
"start": 19149,
"end": 20490
} | class ____(IntegrationBase, unittest.TestCase):
package = 'tests.pkgs.exceptionviewapp'
root_factory = lambda *arg: excroot
def test_root(self):
res = self.testapp.get('/', status=200)
self.assertTrue(b'maybe' in res.body)
def test_notanexception(self):
res = self.testapp.get('/notanexception', status=200)
self.assertTrue(b'no' in res.body)
def test_anexception(self):
res = self.testapp.get('/anexception', status=200)
self.assertTrue(b'yes' in res.body)
def test_route_raise_exception(self):
res = self.testapp.get('/route_raise_exception', status=200)
self.assertTrue(b'yes' in res.body)
def test_route_raise_exception2(self):
res = self.testapp.get('/route_raise_exception2', status=200)
self.assertTrue(b'yes' in res.body)
def test_route_raise_exception3(self):
res = self.testapp.get('/route_raise_exception3', status=200)
self.assertTrue(b'whoa' in res.body)
def test_route_raise_exception4(self):
res = self.testapp.get('/route_raise_exception4', status=200)
self.assertTrue(b'whoa' in res.body)
def test_raise_httpexception(self):
res = self.testapp.get('/route_raise_httpexception', status=200)
self.assertTrue(b'caught' in res.body)
| TestExceptionViewsApp |
python | django__django | django/test/testcases.py | {
"start": 62796,
"end": 63114
} | class ____(FSFilesHandler):
"""
Handler for serving the media files. A private class that is meant to be
used solely as a convenience by LiveServerThread.
"""
def get_base_dir(self):
return settings.MEDIA_ROOT
def get_base_url(self):
return settings.MEDIA_URL
| _MediaFilesHandler |
python | sanic-org__sanic | sanic/pages/css.py | {
"start": 525,
"end": 1105
} | class ____(ABCMeta):
"""Cascade stylesheets, i.e. combine all ancestor styles"""
def __new__(cls, name, bases, attrs):
Page = super().__new__(cls, name, bases, attrs)
# Use a locally defined STYLE or the one from styles directory
Page.STYLE = _extract_style(attrs.get("STYLE_FILE"), name)
Page.STYLE += attrs.get("STYLE_APPEND", "")
# Combine with all ancestor styles
Page.CSS = "".join(
Class.STYLE
for Class in reversed(Page.__mro__)
if type(Class) is CSS
)
return Page
| CSS |
python | marshmallow-code__marshmallow | tests/test_schema.py | {
"start": 73958,
"end": 75399
} | class ____:
class MySchema(Schema):
class Meta:
load_only = ("str_load_only",)
dump_only = ("str_dump_only",)
str_dump_only = fields.String()
str_load_only = fields.String()
str_regular = fields.String()
@pytest.fixture
def schema(self):
return self.MySchema()
@pytest.fixture
def data(self):
return dict(
str_dump_only="Dump Only",
str_load_only="Load Only",
str_regular="Regular String",
)
def test_load_only(self, schema, data):
result = schema.dump(data)
assert "str_load_only" not in result
assert "str_dump_only" in result
assert "str_regular" in result
def test_dump_only(self, schema, data):
result = schema.load(data, unknown=EXCLUDE)
assert "str_dump_only" not in result
assert "str_load_only" in result
assert "str_regular" in result
# regression test for https://github.com/marshmallow-code/marshmallow/pull/765
def test_url_field_requre_tld_false(self):
class NoTldTestSchema(Schema):
url = fields.Url(require_tld=False, schemes=["marshmallow"])
schema = NoTldTestSchema()
data_with_no_top_level_domain = {"url": "marshmallow://app/discounts"}
result = schema.load(data_with_no_top_level_domain)
assert result == data_with_no_top_level_domain
| TestLoadOnly |
python | dagster-io__dagster | python_modules/dagster/dagster/_core/storage/event_log/sqlite/sqlite_event_log.py | {
"start": 2372,
"end": 21684
} | class ____(SqlEventLogStorage, ConfigurableClass):
"""SQLite-backed event log storage.
Users should not directly instantiate this class; it is instantiated by internal machinery when
``dagster-webserver`` and ``dagster-graphql`` load, based on the values in the ``dagster.yaml`` file insqliteve
``$DAGSTER_HOME``. Configuration of this class should be done by setting values in that file.
This is the default event log storage when none is specified in the ``dagster.yaml``.
To explicitly specify SQLite for event log storage, you can add a block such as the following
to your ``dagster.yaml``:
.. code-block:: YAML
event_log_storage:
module: dagster._core.storage.event_log
class: SqliteEventLogStorage
config:
base_dir: /path/to/dir
The ``base_dir`` param tells the event log storage where on disk to store the databases. To
improve concurrent performance, event logs are stored in a separate SQLite database for each
run.
"""
def __init__(self, base_dir: str, inst_data: Optional[ConfigurableClassData] = None):
"""Note that idempotent initialization of the SQLite database is done on a per-run_id
basis in the body of connect, since each run is stored in a separate database.
"""
self._base_dir = os.path.abspath(check.str_param(base_dir, "base_dir"))
mkdir_p(self._base_dir)
self._obs = None
self._watchers = defaultdict(dict)
self._inst_data = check.opt_inst_param(inst_data, "inst_data", ConfigurableClassData)
# Used to ensure that each run ID attempts to initialize its DB the first time it connects,
# ensuring that the database will be created if it doesn't exist
self._initialized_dbs = set()
# Ensure that multiple threads (like the event log watcher) interact safely with each other
self._db_lock = threading.Lock()
if not os.path.exists(self.path_for_shard(INDEX_SHARD_NAME)):
conn_string = self.conn_string_for_shard(INDEX_SHARD_NAME)
engine = create_engine(conn_string, poolclass=NullPool)
self._initdb(engine, for_index_shard=True)
self.reindex_events()
self.reindex_assets()
super().__init__()
def upgrade(self) -> None:
all_run_ids = self.get_all_run_ids()
print(f"Updating event log storage for {len(all_run_ids)} runs on disk...") # noqa: T201
alembic_config = get_alembic_config(__file__)
if all_run_ids:
for run_id in tqdm(all_run_ids):
with self.run_connection(run_id) as conn:
run_alembic_upgrade(alembic_config, conn, run_id)
print("Updating event log storage for index db on disk...") # noqa: T201
with self.index_connection() as conn:
run_alembic_upgrade(alembic_config, conn, "index")
self._initialized_dbs = set()
@property
def inst_data(self) -> Optional[ConfigurableClassData]:
return self._inst_data
@classmethod
def config_type(cls) -> UserConfigSchema:
return {"base_dir": StringSource}
@classmethod
def from_config_value( # pyright: ignore[reportIncompatibleMethodOverride]
cls, inst_data: Optional[ConfigurableClassData], config_value: "SqliteStorageConfig"
) -> "SqliteEventLogStorage":
return SqliteEventLogStorage(inst_data=inst_data, **config_value)
def get_all_run_ids(self) -> Sequence[str]:
all_filenames = glob.glob(os.path.join(self._base_dir, "*.db"))
return [
os.path.splitext(os.path.basename(filename))[0]
for filename in all_filenames
if os.path.splitext(os.path.basename(filename))[0] != INDEX_SHARD_NAME
]
def has_table(self, table_name: str) -> bool:
conn_string = self.conn_string_for_shard(INDEX_SHARD_NAME)
engine = create_engine(conn_string, poolclass=NullPool)
with engine.connect() as conn:
return bool(engine.dialect.has_table(conn, table_name))
def path_for_shard(self, run_id: str) -> str:
return os.path.join(self._base_dir, f"{run_id}.db")
def conn_string_for_shard(self, shard_name: str) -> str:
check.str_param(shard_name, "shard_name")
return create_db_conn_string(self._base_dir, shard_name)
def _initdb(self, engine: Engine, for_index_shard=False) -> None:
alembic_config = get_alembic_config(__file__)
retry_limit = 10
while True:
try:
with engine.connect() as connection:
db_revision, head_revision = check_alembic_revision(alembic_config, connection)
if not (db_revision and head_revision):
table_names = db.inspect(engine).get_table_names()
if "event_logs" in table_names and for_index_shard:
# The event_log table exists but the alembic version table does not. This means that the SQLite db was
# initialized with SQLAlchemy 2.0 before https://github.com/dagster-io/dagster/pull/25740 was merged.
# We should pin the alembic revision to the last known stamped revision before we unpinned SQLAlchemy 2.0
# This should be safe because we have guarded all known migrations since then.
rev_to_stamp = LAST_KNOWN_STAMPED_SQLITE_ALEMBIC_REVISION
else:
rev_to_stamp = "head"
SqlEventLogStorageMetadata.create_all(engine)
connection.execute(db.text("PRAGMA journal_mode=WAL;"))
stamp_alembic_rev(alembic_config, connection, rev=rev_to_stamp)
safe_commit(connection)
break
except (db_exc.DatabaseError, sqlite3.DatabaseError, sqlite3.OperationalError) as exc:
# This is SQLite-specific handling for concurrency issues that can arise when
# multiple processes (e.g. the dagster-webserver process and user code process) contend with
# each other to init the db. When we hit the following errors, we know that another
# process is on the case and we should retry.
err_msg = str(exc)
if not (
re.search(r"table [A-Za-z_]* already exists", err_msg)
or "database is locked" in err_msg
or "UNIQUE constraint failed: alembic_version.version_num" in err_msg
):
raise
if retry_limit == 0:
raise
else:
logging.info(
"SqliteEventLogStorage._initdb: Encountered apparent concurrent init, "
"retrying (%s retries left). Exception: %s",
retry_limit,
err_msg,
)
time.sleep(0.2)
retry_limit -= 1
@contextmanager
def _connect(self, shard: str) -> Iterator[Connection]:
with self._db_lock:
check.str_param(shard, "shard")
conn_string = self.conn_string_for_shard(shard)
engine = create_engine(conn_string, poolclass=NullPool)
if shard not in self._initialized_dbs:
self._initdb(engine)
self._initialized_dbs.add(shard)
with engine.connect() as conn:
with conn.begin():
yield conn
engine.dispose()
def run_connection(self, run_id: Optional[str] = None) -> Any:
return self._connect(run_id) # type: ignore # bad sig
def index_connection(self) -> ContextManager[Connection]:
return self._connect(INDEX_SHARD_NAME)
def store_event(self, event: EventLogEntry) -> None:
"""Overridden method to replicate asset events in a central assets.db sqlite shard, enabling
cross-run asset queries.
Args:
event (EventLogEntry): The event to store.
"""
check.inst_param(event, "event", EventLogEntry)
insert_event_statement = self.prepare_insert_event(event)
run_id = event.run_id
with self.run_connection(run_id) as conn:
conn.execute(insert_event_statement)
if event.is_dagster_event and event.dagster_event_type in ASSET_EVENTS:
event_id = None
# mirror the event in the cross-run index database
with self.index_connection() as conn:
result = conn.execute(insert_event_statement)
event_id = result.inserted_primary_key[0]
self.store_asset_event(event, event_id)
if event_id is None:
raise DagsterInvariantViolationError(
"Cannot store asset event tags for null event id."
)
self.store_asset_event_tags([event], [event_id])
if event.is_dagster_event and event.dagster_event_type in ASSET_CHECK_EVENTS:
self.store_asset_check_event(event, None)
if event.is_dagster_event and event.dagster_event_type in EVENT_TYPE_TO_PIPELINE_RUN_STATUS:
# should mirror run status change events in the index shard
with self.index_connection() as conn:
conn.execute(insert_event_statement)
def get_event_records(
self,
event_records_filter: EventRecordsFilter,
limit: Optional[int] = None,
ascending: bool = False,
) -> Sequence[EventLogRecord]:
"""Overridden method to enable cross-run event queries in sqlite.
The record id in sqlite does not auto increment cross runs, so instead of fetching events
after record id, we only fetch events whose runs updated after update_timestamp.
"""
check.opt_inst_param(event_records_filter, "event_records_filter", EventRecordsFilter)
check.opt_int_param(limit, "limit")
check.bool_param(ascending, "ascending")
is_asset_query = event_records_filter and event_records_filter.event_type in ASSET_EVENTS
if is_asset_query:
# asset materializations, observations and materialization planned events
# get mirrored into the index shard, so no custom run shard-aware cursor logic needed
return super().get_event_records(
event_records_filter=event_records_filter, limit=limit, ascending=ascending
)
return self._get_run_sharded_event_records(
event_records_filter=event_records_filter, limit=limit, ascending=ascending
)
def _get_run_sharded_event_records(
self,
event_records_filter: EventRecordsFilter,
limit: Optional[int] = None,
ascending: bool = False,
) -> Sequence[EventLogRecord]:
query = db_select([SqlEventLogStorageTable.c.id, SqlEventLogStorageTable.c.event])
if event_records_filter.asset_key:
asset_details = next(iter(self._get_assets_details([event_records_filter.asset_key])))
else:
asset_details = None
if event_records_filter.after_cursor is not None and not isinstance(
event_records_filter.after_cursor, RunShardedEventsCursor
):
raise Exception(
"""
Called `get_event_records` on a run-sharded event log storage with a cursor that
is not run-aware. Add a RunShardedEventsCursor to your query filter
or switch your instance configuration to use a non-run-sharded event log storage
(e.g. PostgresEventLogStorage, ConsolidatedSqliteEventLogStorage)
"""
)
query = self._apply_filter_to_query(
query=query,
event_records_filter=event_records_filter,
asset_details=asset_details,
apply_cursor_filters=False, # run-sharded cursor filters don't really make sense
)
if limit:
query = query.limit(limit)
if ascending:
query = query.order_by(SqlEventLogStorageTable.c.timestamp.asc())
else:
query = query.order_by(SqlEventLogStorageTable.c.timestamp.desc())
# workaround for the run-shard sqlite to enable cross-run queries: get a list of run_ids
# whose events may qualify the query, and then open run_connection per run_id at a time.
run_updated_after = (
event_records_filter.after_cursor.run_updated_after
if isinstance(event_records_filter.after_cursor, RunShardedEventsCursor)
else None
)
run_records = self._instance.get_run_records(
filters=RunsFilter(updated_after=run_updated_after),
order_by="update_timestamp",
ascending=ascending,
)
def _get_event_records_for_run(run_id: str) -> Sequence[EventLogRecord]:
records = []
with self.run_connection(run_id) as conn:
results = conn.execute(query).fetchall()
for row_id, json_str in results:
try:
event_record = deserialize_value(json_str, EventLogEntry)
records.append(EventLogRecord(storage_id=row_id, event_log_entry=event_record))
if limit and len(records) >= limit:
return records
except DeserializationError:
logging.warning(
"Could not resolve event record as EventLogEntry for id `%s`.", row_id
)
except seven.JSONDecodeError:
logging.warning("Could not parse event record id `%s`.", row_id)
return records
event_records = []
for run_record in run_records:
run_id = run_record.dagster_run.run_id
event_records.extend(_get_event_records_for_run(run_id))
if limit and len(event_records) >= limit:
break
if not limit or len(event_records) < limit:
event_records.extend(_get_event_records_for_run(RUNLESS_RUN_ID))
return event_records[:limit]
def fetch_run_status_changes(
self,
records_filter: Union[DagsterEventType, RunStatusChangeRecordsFilter],
limit: int,
cursor: Optional[str] = None,
ascending: bool = False,
) -> EventRecordsResult:
# custom implementation of the run status change event query to only read from the index
# shard instead of from the run shards. This bypasses the default Sqlite implementation of
# the deprecated _get_event_records method, which reads from the run shards, opting for the
# super implementation instead, which reads from the index shard
event_type = (
records_filter
if isinstance(records_filter, DagsterEventType)
else records_filter.event_type
)
if event_type not in EVENT_TYPE_TO_PIPELINE_RUN_STATUS:
expected = ", ".join(EVENT_TYPE_TO_PIPELINE_RUN_STATUS.keys())
check.failed(f"Expected one of {expected}, received {event_type.value}")
before_cursor, after_cursor = EventRecordsFilter.get_cursor_params(cursor, ascending)
event_records_filter = (
records_filter.to_event_records_filter_without_job_names(cursor, ascending)
if isinstance(records_filter, RunStatusChangeRecordsFilter)
else EventRecordsFilter(
event_type, before_cursor=before_cursor, after_cursor=after_cursor
)
)
# bypass the run-sharded cursor logic... any caller of this run status change specific
# method should be reading from the index shard, which as of 1.5.0 contains mirrored run
# status change events
records = super().get_event_records(
event_records_filter=event_records_filter, limit=limit, ascending=ascending
)
if records:
new_cursor = EventLogCursor.from_storage_id(records[-1].storage_id).to_string()
elif cursor:
new_cursor = cursor
else:
new_cursor = EventLogCursor.from_storage_id(-1).to_string()
has_more = len(records) == limit
return EventRecordsResult(records, cursor=new_cursor, has_more=has_more)
def wipe(self) -> None:
# should delete all the run-sharded db files and drop the contents of the index
for filename in (
glob.glob(os.path.join(self._base_dir, "*.db"))
+ glob.glob(os.path.join(self._base_dir, "*.db-wal"))
+ glob.glob(os.path.join(self._base_dir, "*.db-shm"))
):
if (
not filename.endswith(f"{INDEX_SHARD_NAME}.db")
and not filename.endswith(f"{INDEX_SHARD_NAME}.db-wal")
and not filename.endswith(f"{INDEX_SHARD_NAME}.db-shm")
):
with contextlib.suppress(FileNotFoundError):
os.unlink(filename)
self._initialized_dbs = set()
self._wipe_index()
def _delete_mirrored_events_for_asset_key(self, asset_key: AssetKey) -> None:
with self.index_connection() as conn:
conn.execute(
SqlEventLogStorageTable.delete().where(
SqlEventLogStorageTable.c.asset_key == asset_key.to_string(),
)
)
def wipe_asset(self, asset_key: AssetKey) -> None:
# default implementation will update the event_logs in the sharded dbs, and the asset_key
# table in the asset shard, but will not remove the mirrored event_log events in the asset
# shard
super().wipe_asset(asset_key)
self._delete_mirrored_events_for_asset_key(asset_key)
def watch(self, run_id: str, cursor: Optional[str], callback: EventHandlerFn) -> None:
if not self._obs:
self._obs = Observer()
self._obs.start()
watchdog = SqliteEventLogStorageWatchdog(self, run_id, callback, cursor)
self._watchers[run_id][callback] = (
watchdog,
self._obs.schedule(watchdog, self._base_dir, recursive=True),
)
def end_watch(self, run_id: str, handler: EventHandlerFn) -> None:
if handler in self._watchers[run_id]:
event_handler, watch = self._watchers[run_id][handler]
self._obs.remove_handler_for_watch(event_handler, watch) # type: ignore # (possible none)
del self._watchers[run_id][handler]
def dispose(self) -> None:
if self._obs:
self._obs.stop()
self._obs.join(timeout=15)
def alembic_version(self) -> AlembicVersion:
alembic_config = get_alembic_config(__file__)
with self.index_connection() as conn:
return check_alembic_revision(alembic_config, conn)
@property
def is_run_sharded(self) -> bool:
return True
@cached_property
def supports_global_concurrency_limits(self) -> bool:
return self.has_table("concurrency_limits")
| SqliteEventLogStorage |
python | dagster-io__dagster | examples/docs_projects/project_dagster_modal_pipes/src/modal_project/config.py | {
"start": 74,
"end": 1755
} | class ____:
name: str
params: str
relative_speed: int # Higher is faster
def get_logger(name, level=logging.INFO):
logger = logging.getLogger(name)
handler = logging.StreamHandler()
handler.setFormatter(logging.Formatter("%(levelname)s: %(asctime)s: %(name)s %(message)s"))
logger.addHandler(handler)
logger.setLevel(level)
return logger
CACHE_DIR = "/cache"
# Where downloaded podcasts are stored, by guid hash.
# Mostly .mp3 files 50-100MiB.
RAW_AUDIO_DIR = pathlib.Path(CACHE_DIR, "raw_audio")
# Stores metadata of individual podcast episodes as JSON.
PODCAST_METADATA_DIR = pathlib.Path(CACHE_DIR, "podcast_metadata")
# Completed episode transcriptions. Stored as flat files with
# files structured as '{guid_hash}-{model_slug}.json'.
TRANSCRIPTIONS_DIR = pathlib.Path(CACHE_DIR, "transcriptions")
# Searching indexing files, refreshed by scheduled functions.
SEARCH_DIR = pathlib.Path(CACHE_DIR, "search")
# Location of modal checkpoint.
MODEL_DIR = pathlib.Path(CACHE_DIR, "model")
transcripts_per_podcast_limit = 2
supported_whisper_models = {
"tiny.en": ModelSpec(name="tiny.en", params="39M", relative_speed=32),
# Takes around 3-10 minutes to transcribe a podcast, depending on length.
"base.en": ModelSpec(name="base.en", params="74M", relative_speed=16),
"small.en": ModelSpec(name="small.en", params="244M", relative_speed=6),
"medium.en": ModelSpec(name="medium.en", params="769M", relative_speed=2),
# Very slow. Will take around 45 mins to 1.5 hours to transcribe.
"large": ModelSpec(name="large", params="1550M", relative_speed=1),
}
DEFAULT_MODEL = supported_whisper_models["base.en"]
| ModelSpec |
python | openai__openai-python | src/openai/resources/beta/realtime/transcription_sessions.py | {
"start": 6829,
"end": 12886
} | class ____(AsyncAPIResource):
@cached_property
def with_raw_response(self) -> AsyncTranscriptionSessionsWithRawResponse:
"""
This property can be used as a prefix for any HTTP method call to return
the raw response object instead of the parsed content.
For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers
"""
return AsyncTranscriptionSessionsWithRawResponse(self)
@cached_property
def with_streaming_response(self) -> AsyncTranscriptionSessionsWithStreamingResponse:
"""
An alternative to `.with_raw_response` that doesn't eagerly read the response body.
For more information, see https://www.github.com/openai/openai-python#with_streaming_response
"""
return AsyncTranscriptionSessionsWithStreamingResponse(self)
async def create(
self,
*,
client_secret: transcription_session_create_params.ClientSecret | NotGiven = NOT_GIVEN,
include: List[str] | NotGiven = NOT_GIVEN,
input_audio_format: Literal["pcm16", "g711_ulaw", "g711_alaw"] | NotGiven = NOT_GIVEN,
input_audio_noise_reduction: transcription_session_create_params.InputAudioNoiseReduction
| NotGiven = NOT_GIVEN,
input_audio_transcription: transcription_session_create_params.InputAudioTranscription | NotGiven = NOT_GIVEN,
modalities: List[Literal["text", "audio"]] | NotGiven = NOT_GIVEN,
turn_detection: transcription_session_create_params.TurnDetection | NotGiven = NOT_GIVEN,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
) -> TranscriptionSession:
"""
Create an ephemeral API token for use in client-side applications with the
Realtime API specifically for realtime transcriptions. Can be configured with
the same session parameters as the `transcription_session.update` client event.
It responds with a session object, plus a `client_secret` key which contains a
usable ephemeral API token that can be used to authenticate browser clients for
the Realtime API.
Args:
client_secret: Configuration options for the generated client secret.
include:
The set of items to include in the transcription. Current available items are:
- `item.input_audio_transcription.logprobs`
input_audio_format: The format of input audio. Options are `pcm16`, `g711_ulaw`, or `g711_alaw`. For
`pcm16`, input audio must be 16-bit PCM at a 24kHz sample rate, single channel
(mono), and little-endian byte order.
input_audio_noise_reduction: Configuration for input audio noise reduction. This can be set to `null` to turn
off. Noise reduction filters audio added to the input audio buffer before it is
sent to VAD and the model. Filtering the audio can improve VAD and turn
detection accuracy (reducing false positives) and model performance by improving
perception of the input audio.
input_audio_transcription: Configuration for input audio transcription. The client can optionally set the
language and prompt for transcription, these offer additional guidance to the
transcription service.
modalities: The set of modalities the model can respond with. To disable audio, set this to
["text"].
turn_detection: Configuration for turn detection, ether Server VAD or Semantic VAD. This can be
set to `null` to turn off, in which case the client must manually trigger model
response. Server VAD means that the model will detect the start and end of
speech based on audio volume and respond at the end of user speech. Semantic VAD
is more advanced and uses a turn detection model (in conjunction with VAD) to
semantically estimate whether the user has finished speaking, then dynamically
sets a timeout based on this probability. For example, if user audio trails off
with "uhhm", the model will score a low probability of turn end and wait longer
for the user to continue speaking. This can be useful for more natural
conversations, but may have a higher latency.
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
extra_body: Add additional JSON properties to the request
timeout: Override the client-level default timeout for this request, in seconds
"""
extra_headers = {"OpenAI-Beta": "assistants=v2", **(extra_headers or {})}
return await self._post(
"/realtime/transcription_sessions",
body=await async_maybe_transform(
{
"client_secret": client_secret,
"include": include,
"input_audio_format": input_audio_format,
"input_audio_noise_reduction": input_audio_noise_reduction,
"input_audio_transcription": input_audio_transcription,
"modalities": modalities,
"turn_detection": turn_detection,
},
transcription_session_create_params.TranscriptionSessionCreateParams,
),
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
cast_to=TranscriptionSession,
)
| AsyncTranscriptionSessions |
python | pytorch__pytorch | torch/testing/_internal/common_pruning.py | {
"start": 11111,
"end": 12485
} | class ____(nn.Module):
r"""Model with Conv2d layers, all with bias, some in a Sequential and some following, and then a Pool2d
and a Flatten module followed by a Linear layer.
Activation functions and Pool2ds in between each layer also.
Used to test pruned Conv2d-Pool2d-Flatten-Linear fusion."""
def __init__(self) -> None:
super().__init__()
self.seq = nn.Sequential(
nn.Conv2d(1, 3, kernel_size=3, padding=1, bias=True),
nn.MaxPool2d(kernel_size=2, stride=2, padding=1),
nn.ReLU(),
nn.Conv2d(3, 5, kernel_size=3, padding=1, bias=True),
nn.Tanh(),
nn.AvgPool2d(kernel_size=2, stride=2, padding=1),
)
self.conv2d1 = nn.Conv2d(5, 7, kernel_size=3, padding=1, bias=True)
self.af1 = nn.ReLU()
self.conv2d2 = nn.Conv2d(7, 11, kernel_size=3, padding=1, bias=True)
self.avg_pool = nn.AdaptiveAvgPool2d((2, 2))
self.flatten = nn.Flatten()
self.fc = nn.Linear(44, 13, bias=True)
def forward(self, x: torch.Tensor) -> torch.Tensor:
x = self.seq(x)
x = self.conv2d1(x)
x = F.max_pool2d(x, kernel_size=2, stride=2, padding=1)
x = self.af1(x)
x = self.conv2d2(x)
x = self.avg_pool(x)
x = self.flatten(x)
x = self.fc(x)
return x
| Conv2dPoolFlatten |
python | sympy__sympy | sympy/stats/crv.py | {
"start": 2210,
"end": 2826
} | class ____(ProductDomain, ContinuousDomain):
"""
A collection of independent domains with continuous support
"""
def compute_expectation(self, expr, variables=None, **kwargs):
if variables is None:
variables = self.symbols
for domain in self.domains:
domain_vars = frozenset(variables) & frozenset(domain.symbols)
if domain_vars:
expr = domain.compute_expectation(expr, domain_vars, **kwargs)
return expr
def as_boolean(self):
return And(*[domain.as_boolean() for domain in self.domains])
| ProductContinuousDomain |
python | PyCQA__pylint | tests/functional/a/arguments_differ.py | {
"start": 5065,
"end": 5163
} | class ____(ParentT2):
async def func(self, user_input: typing.List) -> None:
pass
| ChildT2 |
python | wepe__MachineLearning | DeepLearning Tutorials/FaceRecognition_CNN(olivettifaces)/train_CNN_olivettifaces.py | {
"start": 2472,
"end": 3631
} | class ____(object):
def __init__(self, input, n_in, n_out):
self.W = theano.shared(
value=numpy.zeros(
(n_in, n_out),
dtype=theano.config.floatX
),
name='W',
borrow=True
)
self.b = theano.shared(
value=numpy.zeros(
(n_out,),
dtype=theano.config.floatX
),
name='b',
borrow=True
)
self.p_y_given_x = T.nnet.softmax(T.dot(input, self.W) + self.b)
self.y_pred = T.argmax(self.p_y_given_x, axis=1)
self.params = [self.W, self.b]
def negative_log_likelihood(self, y):
return -T.mean(T.log(self.p_y_given_x)[T.arange(y.shape[0]), y])
def errors(self, y):
if y.ndim != self.y_pred.ndim:
raise TypeError(
'y should have the same shape as self.y_pred',
('y', y.type, 'y_pred', self.y_pred.type)
)
if y.dtype.startswith('int'):
return T.mean(T.neq(self.y_pred, y))
else:
raise NotImplementedError()
#全连接层,分类器前一层
| LogisticRegression |
python | getsentry__sentry-python | tests/test_conftest.py | {
"start": 1716,
"end": 1896
} | class ____: # noqa: B903
def __init__(self, name=None, age=None, description=None):
self.name = name
self.age = age
self.description = description
| Animal |
python | pytorch__pytorch | test/distributed/test_c10d_nccl.py | {
"start": 249140,
"end": 255116
} | class ____(MultiProcessTestCase):
def _create_process_group_nccl(self, store, opts, device_id=None):
# create nccl processgroup with opts
c10d.init_process_group(
"nccl",
world_size=self.world_size,
rank=self.rank,
store=store,
pg_options=opts,
device_id=device_id,
)
pg = c10d.distributed_c10d._get_default_group()
return pg
def opts(self, high_priority_stream=False):
opts = c10d.ProcessGroupNCCL.Options()
opts.is_high_priority_stream = high_priority_stream
return opts
def setUp(self):
super().setUp()
# TORCH_NCCL_BLOCKING_WAIT overrides TORCH_NCCL_ASYNC_ERROR_HANDLING hence tests
# that use TORCH_NCCL_BLOCKING_WAIT will test it as expected.
os.environ["TORCH_NCCL_ASYNC_ERROR_HANDLING"] = "1"
# self.num_gpus = torch.cuda.device_count()
self._spawn_processes()
def tearDown(self):
super().tearDown()
try:
os.remove(self.file_name)
except OSError:
pass
@property
def world_size(self):
return 8
@property
def rank_to_GPU(self):
# return rank to GPU map
return init_multigpu_helper(self.world_size, "nccl")
@requires_nccl_version((2, 18), "Need NCCL 2.18+ for ncclCommSplit")
@skip_if_lt_x_gpu(8)
def test_comm_split_group_larger_scale(self):
store = c10d.FileStore(self.file_name, self.world_size)
device = torch.device(f"cuda:{self.rank}")
pg = self._create_process_group_nccl(store, self.opts(), device_id=device)
backend = pg._get_backend(torch.device(device))
tensor = torch.full((1,), self.rank).cuda(device)
ng1 = c10d.split_group(pg, [[0, 1], [2, 3, 4, 5, 6, 7]])
# comm split happens eagerly since device_id is passed to init_process_group.
self.assertEqual(backend.comm_split_count(), 1)
# dist.broadcast take Source rank on global process group
if self.rank < 2:
dist.broadcast(tensor, 0, group=ng1)
self.assertEqual(tensor, torch.full((1,), 0))
else:
dist.broadcast(tensor, 2, group=ng1)
self.assertEqual(tensor, torch.full((1,), 2))
# test split with only one colored group, other ranks should be no color split.
ng2 = c10d.split_group(pg, [[5, 6, 7]])
self.assertEqual(backend.comm_split_count(), 2)
if self.rank >= 5:
tensor2 = torch.full((1,), self.rank).cuda(device)
dist.broadcast(tensor2, 7, group=ng2)
self.assertEqual(tensor2, torch.full((1,), 7))
else:
self.assertEqual(ng2, None)
# a barrier and a cuda sync before destroying all pgs.
dist.barrier(pg)
torch.cuda.synchronize()
dist.destroy_process_group()
@requires_nccl_version((2, 18), "Need NCCL 2.18+ for ncclCommSplit")
@skip_if_lt_x_gpu(8)
def test_comm_recursive_split_group(self):
store = c10d.FileStore(self.file_name, self.world_size)
device = torch.device(f"cuda:{self.rank}")
pg = self._create_process_group_nccl(store, self.opts(), device_id=device)
backend = pg._get_backend(torch.device(device))
# split the default PG into 2 subgroups, each subgroup (ng1) has 4 ranks.
tensor1 = torch.full((1,), self.rank).cuda(device)
ng1 = c10d.split_group(pg, [[0, 1, 2, 3], [4, 5, 6, 7]])
backend1 = ng1._get_backend(torch.device(device))
if self.rank < 4:
dist.broadcast(tensor1, 0, group=ng1)
self.assertEqual(tensor1, torch.full((1,), 0))
else:
dist.broadcast(tensor1, 4, group=ng1)
self.assertEqual(tensor1, torch.full((1,), 4))
# comm split happens eagerly since device_id is passed to init_process_group.
self.assertEqual(backend.comm_split_count(), 1)
self.assertEqual(backend1.comm_split_count(), 0)
# further split ng1 into 2 subgroups, each subgroup (ng2) has 2 ranks.
tensor2 = torch.full((1,), self.rank).cuda(device)
ng2 = c10d.split_group(ng1, [[0, 1], [2, 3]])
backend2 = ng2._get_backend(torch.device(device))
self.assertEqual(backend.comm_split_count(), 1)
self.assertEqual(backend1.comm_split_count(), 1)
self.assertEqual(backend2.comm_split_count(), 0)
# execute collective calls within each 2-rank pg
if self.rank == 0 or self.rank == 1:
dist.broadcast(tensor2, 1, group=ng2)
self.assertEqual(tensor2, torch.full((1,), 1))
if self.rank == 2 or self.rank == 3:
dist.broadcast(tensor2, 2, group=ng2)
self.assertEqual(tensor2, torch.full((1,), 2))
if self.rank == 4 or self.rank == 5:
dist.broadcast(tensor2, 5, group=ng2)
self.assertEqual(tensor2, torch.full((1,), 5))
if self.rank == 6 or self.rank == 7:
dist.broadcast(tensor2, 6, group=ng2)
self.assertEqual(tensor2, torch.full((1,), 6))
# Test the case when the split changes the pg option of split group
# while the parent pg option is not changed.
new_pg = c10d.new_group([0, 1, 2, 3, 4, 5, 6, 7], device_id=device)
backend_new_pg = new_pg._get_backend(torch.device(device))
self.assertEqual(len(backend_new_pg.options.global_ranks_in_group), 8)
c10d.split_group(new_pg, [[0, 2, 4, 6], [1, 3, 5, 7]])
self.assertEqual(len(backend_new_pg.options.global_ranks_in_group), 8)
# a barrier and a cuda sync before destroying all pgs.
dist.barrier(pg)
torch.cuda.synchronize()
dist.destroy_process_group()
if __name__ == "__main__":
assert not torch.cuda._initialized, (
"test_distributed must not have initialized CUDA context on main process"
)
run_tests()
| ProcessGroupNCCLLargerScaleTest |
python | graphql-python__graphene | graphene/tests/issues/test_1293.py | {
"start": 160,
"end": 496
} | class ____(graphene.InputObjectType):
datetime_after = graphene.DateTime(
required=False,
default_value=datetime.fromtimestamp(1434549820.776, timezone.utc),
)
datetime_before = graphene.DateTime(
required=False,
default_value=datetime.fromtimestamp(1444549820.776, timezone.utc),
)
| Filters |
python | numba__numba | numba/tests/test_compiler_lock.py | {
"start": 160,
"end": 511
} | class ____(TestCase):
def test_gcl_as_context_manager(self):
with global_compiler_lock:
require_global_compiler_lock()
def test_gcl_as_decorator(self):
@global_compiler_lock
def func():
require_global_compiler_lock()
func()
if __name__ == '__main__':
unittest.main()
| TestCompilerLock |
python | airbytehq__airbyte | airbyte-ci/connectors/pipelines/pipelines/airbyte_ci/connectors/migrate_to_manifest_only/declarative_component_schema.py | {
"start": 53891,
"end": 55058
} | class ____(BaseModel):
type: Literal["ListPartitionRouter"]
cursor_field: str = Field(
...,
description='While iterating over list values, the name of field used to reference a list value. The partition value can be accessed with string interpolation. e.g. "{{ stream_partition[\'my_key\'] }}" where "my_key" is the value of the cursor_field.',
examples=["section", "{{ config['section_key'] }}"],
title="Current Partition Value Identifier",
)
values: Union[str, List[str]] = Field(
...,
description="The list of attributes being iterated over and used as input for the requests made to the source API.",
examples=[["section_a", "section_b", "section_c"], "{{ config['sections'] }}"],
title="Partition Values",
)
request_option: Optional[RequestOption] = Field(
None,
description="A request option describing where the list value should be injected into and under what field name if applicable.",
title="Inject Partition Value Into Outgoing HTTP Request",
)
parameters: Optional[Dict[str, Any]] = Field(None, alias="$parameters")
| ListPartitionRouter |
python | great-expectations__great_expectations | great_expectations/datasource/fluent/data_asset/path/file_asset.py | {
"start": 1392,
"end": 1730
} | class ____(ValueError):
def __init__(self, missing_groups: set[str]):
message = (
"The following group(s) are required but are "
f"missing from the regex: {', '.join(missing_groups)}"
)
super().__init__(message)
self.missing_groups = missing_groups
| RegexMissingRequiredGroupsError |
python | spack__spack | lib/spack/spack/vendor/ruamel/yaml/tokens.py | {
"start": 8993,
"end": 9061
} | class ____(Token):
__slots__ = ()
id = '}'
| FlowMappingEndToken |
python | getsentry__sentry | src/sentry/ratelimits/leaky_bucket.py | {
"start": 486,
"end": 840
} | class ____:
burst_limit: int # maximum number of requests allowed in a burst
drip_rate: int # number of requests allowed per second
last_drip: float = 0 # unix timestamp of the last drip
current_level: float = 0 # current level of the bucket
wait_time: float = 0 # seconds to wait until next request is allowed
| LeakyBucketLimitInfo |
python | tensorflow__tensorflow | third_party/xla/build_tools/lint/diff_parser_test.py | {
"start": 776,
"end": 4084
} | class ____(absltest.TestCase):
@classmethod
def setUpClass(cls):
super().setUpClass()
testdata = test_utils.xla_src_root() / "build_tools" / "lint" / "testdata"
with (testdata / "bad_cc.diff").open() as f:
cls.bad_cc_diff = f.read()
with (testdata / "important_cc.diff").open() as f:
cls.important_cc_diff = f.read()
with (testdata / "crosstool.diff").open() as f:
cls.crosstool_diff = f.read()
def test_parse_important_cc_diff(self):
hunks = diff_parser.parse_hunks(self.important_cc_diff)
self.assertLen(hunks, 1)
[hunk] = hunks
self.assertEqual(hunk.file, "src/important.cc")
self.assertEqual(hunk.start, 1)
self.assertEqual(hunk.length, 3)
expected_lines = [
"+// Here we care if we find prohibited regexes.",
"+std::unique_ptr<int> add(int a, int b) {",
"+ return std::make_unique<int>(a + b);",
"+}",
]
self.assertEqual(hunk.lines, expected_lines)
def test_parse_bad_cc_diff(self):
hunks = diff_parser.parse_hunks(self.bad_cc_diff)
self.assertLen(hunks, 2)
bad_cc_hunk, important_cc_hunk = hunks
# check bad_cc_hunk
self.assertEqual(bad_cc_hunk.file, "src/dir/bad.cc")
self.assertEqual(bad_cc_hunk.start, 1)
self.assertEqual(bad_cc_hunk.length, 7)
expected_lines = [
"+// This code is bad!",
"+",
"+using Make_Unique = std::make_unique; // OK",
"+",
"+std::unique_ptr<int> add(int a, int b) {",
"+ return Make_Unique<int>(a + b); // OK. Fixed now!",
"+}",
]
self.assertEqual(bad_cc_hunk.lines, expected_lines)
# check important_cc_hunk
self.assertEqual(important_cc_hunk.file, "src/important.cc")
self.assertEqual(important_cc_hunk.start, 1)
self.assertEqual(important_cc_hunk.length, 5)
expected_lines = [
"+// Here we care if we find prohibited regexes.",
"+",
"+std::unique_ptr<int> add(int a, int b) {",
"+ return std::make_unique<int>(a + b);",
"+}",
]
self.assertEqual(important_cc_hunk.lines, expected_lines)
def test_parse_crosstool_diff(self):
hunks = diff_parser.parse_hunks(self.crosstool_diff)
self.assertLen(hunks, 3)
small_hunk, big_hunk, literal_cc_hunk = hunks
self.assertEqual(
small_hunk.file,
"third_party/gpus/crosstool/cc_toolchain_config.bzl.tpl",
)
self.assertEqual(small_hunk.start, 24)
self.assertEqual(small_hunk.length, 7)
self.assertEqual(
big_hunk.file, "third_party/gpus/crosstool/cc_toolchain_config.bzl.tpl"
)
self.assertEqual(big_hunk.start, 300)
self.assertEqual(big_hunk.length, 45)
self.assertEqual(literal_cc_hunk.file, "xla/literal.cc")
self.assertEqual(literal_cc_hunk.start, 47)
self.assertEqual(literal_cc_hunk.length, 7)
def test_added_lines(self):
hunks = diff_parser.parse_hunks(self.crosstool_diff)
small_hunk, big_hunk, literal_cc_hunk = hunks
line_numbers = lambda hunk: [line_no for line_no, _ in hunk.added_lines()]
self.assertEqual(line_numbers(small_hunk), [27])
self.assertEqual(line_numbers(big_hunk), list(range(303, 342)))
self.assertEqual(line_numbers(literal_cc_hunk), [50])
if __name__ == "__main__":
absltest.main()
| ParseDiffTest |
python | cython__cython | Demos/benchmarks/bm_raytrace.py | {
"start": 406,
"end": 2435
} | class ____(object):
def __init__(self, initx, inity, initz):
self.x = initx
self.y = inity
self.z = initz
def __str__(self):
return '(%s,%s,%s)' % (self.x, self.y, self.z)
def __repr__(self):
return 'Vector(%s,%s,%s)' % (self.x, self.y, self.z)
def magnitude(self):
return math.sqrt(self.dot(self))
def __add__(self, other):
if other.isPoint():
return Point(self.x + other.x, self.y + other.y, self.z + other.z)
else:
return Vector(self.x + other.x, self.y + other.y, self.z + other.z)
def __sub__(self, other):
other.mustBeVector()
return Vector(self.x - other.x, self.y - other.y, self.z - other.z)
def scale(self, factor):
return Vector(factor * self.x, factor * self.y, factor * self.z)
def dot(self, other):
other.mustBeVector()
return (self.x * other.x) + (self.y * other.y) + (self.z * other.z)
def cross(self, other):
other.mustBeVector()
return Vector(self.y * other.z - self.z * other.y,
self.z * other.x - self.x * other.z,
self.x * other.y - self.y * other.x)
def normalized(self):
return self.scale(1.0 / self.magnitude())
def negated(self):
return self.scale(-1)
def __eq__(self, other):
return (self.x == other.x) and (self.y == other.y) and (self.z == other.z)
def isVector(self):
return True
def isPoint(self):
return False
def mustBeVector(self):
return self
def mustBePoint(self):
raise 'Vectors are not points!'
def reflectThrough(self, normal):
d = normal.scale(self.dot(normal))
return self - d.scale(2)
Vector.ZERO = Vector(0, 0, 0)
Vector.RIGHT = Vector(1, 0, 0)
Vector.UP = Vector(0, 1, 0)
Vector.OUT = Vector(0, 0, 1)
assert Vector.RIGHT.reflectThrough(Vector.UP) == Vector.RIGHT
assert Vector(-1, -1, 0).reflectThrough(Vector.UP) == Vector(-1, 1, 0)
| Vector |
python | eventlet__eventlet | tests/db_pool_test.py | {
"start": 9984,
"end": 10046
} | class ____:
def rollback(self):
pass
| DummyConnection |
python | keras-team__keras | keras/src/metrics/iou_metrics.py | {
"start": 15690,
"end": 19272
} | class ____(IoU):
"""Computes the mean Intersection-Over-Union metric.
Formula:
```python
iou = true_positives / (true_positives + false_positives + false_negatives)
```
Intersection-Over-Union is a common evaluation metric for semantic image
segmentation.
To compute IoUs, the predictions are accumulated in a confusion matrix,
weighted by `sample_weight` and the metric is then calculated from it.
If `sample_weight` is `None`, weights default to 1.
Use `sample_weight` of 0 to mask values.
Note that this class first computes IoUs for all individual classes, then
returns the mean of these values.
Args:
num_classes: The possible number of labels the prediction task can have.
This value must be provided, since a confusion matrix of dimension =
[num_classes, num_classes] will be allocated.
name: (Optional) string name of the metric instance.
dtype: (Optional) data type of the metric result.
ignore_class: Optional integer. The ID of a class to be ignored during
metric computation. This is useful, for example, in segmentation
problems featuring a "void" class (commonly -1 or 255) in
segmentation maps. By default (`ignore_class=None`), all classes are
considered.
sparse_y_true: Whether labels are encoded using integers or
dense floating point vectors. If `False`, the `argmax` function
is used to determine each sample's most likely associated label.
sparse_y_pred: Whether predictions are encoded using integers or
dense floating point vectors. If `False`, the `argmax` function
is used to determine each sample's most likely associated label.
axis: (Optional) The dimension containing the logits. Defaults to `-1`.
Example:
>>> # cm = [[1, 1],
>>> # [1, 1]]
>>> # sum_row = [2, 2], sum_col = [2, 2], true_positives = [1, 1]
>>> # iou = true_positives / (sum_row + sum_col - true_positives))
>>> # result = (1 / (2 + 2 - 1) + 1 / (2 + 2 - 1)) / 2 = 0.33
>>> m = keras.metrics.MeanIoU(num_classes=2)
>>> m.update_state([0, 0, 1, 1], [0, 1, 0, 1])
>>> m.result()
0.33333334
>>> m.reset_state()
>>> m.update_state([0, 0, 1, 1], [0, 1, 0, 1],
... sample_weight=[0.3, 0.3, 0.3, 0.1])
>>> m.result().numpy()
0.23809525
Usage with `compile()` API:
```python
model.compile(
optimizer='sgd',
loss='mse',
metrics=[keras.metrics.MeanIoU(num_classes=2)])
```
"""
def __init__(
self,
num_classes,
name=None,
dtype=None,
ignore_class=None,
sparse_y_true=True,
sparse_y_pred=True,
axis=-1,
):
target_class_ids = list(range(num_classes))
super().__init__(
name=name,
num_classes=num_classes,
target_class_ids=target_class_ids,
axis=axis,
dtype=dtype,
ignore_class=ignore_class,
sparse_y_true=sparse_y_true,
sparse_y_pred=sparse_y_pred,
)
def get_config(self):
return {
"num_classes": self.num_classes,
"name": self.name,
"dtype": self._dtype,
"ignore_class": self.ignore_class,
"sparse_y_true": self.sparse_y_true,
"sparse_y_pred": self.sparse_y_pred,
"axis": self.axis,
}
@keras_export("keras.metrics.OneHotIoU")
| MeanIoU |
python | getsentry__sentry | src/sentry/integrations/discord/actions/issue_alert/form.py | {
"start": 543,
"end": 2920
} | class ____(forms.Form):
# NOTE: server (guild id) maps directly to the integration ID
server = forms.ChoiceField(choices=(), widget=forms.Select())
channel_id = forms.CharField(widget=forms.TextInput())
tags = forms.CharField(required=False, widget=forms.TextInput())
def __init__(self, *args: Any, **kwargs: Any) -> None:
server_list = [(i.id, i.name) for i in kwargs.pop("integrations")]
super().__init__(*args, **kwargs)
if server_list:
assert isinstance(self.fields["server"], ChoiceField)
self.fields["server"].initial = server_list[0][0]
self.fields["server"].choices = server_list
self.fields["server"].widget.choices = server_list
def _format_discord_error_message(self, message: str) -> str:
return f"Discord: {message}"
def clean(self) -> dict[str, object] | None:
cleaned_data: dict[str, object] = super().clean() or {}
channel_id = cleaned_data.get("channel_id")
server = cleaned_data.get("server")
integration = integration_service.get_integration(
integration_id=server, status=ObjectStatus.ACTIVE
)
if not server or not integration:
raise forms.ValidationError(
self._format_discord_error_message("Server is a required field."),
code="invalid",
)
if channel_id and isinstance(channel_id, str):
try:
channel = get_channel_id_from_url(channel_id)
validate_channel_id(
channel_id=channel,
guild_id=integration.external_id,
guild_name=integration.name,
)
cleaned_data["channel_id"] = channel
except ValidationError as e:
raise forms.ValidationError(
self._format_discord_error_message("; ".join(e.messages)),
code="invalid",
)
except IntegrationError as e:
raise forms.ValidationError(
self._format_discord_error_message("; ".join(str(e))),
code="invalid",
)
except ApiTimeoutError:
raise forms.ValidationError("Discord channel lookup timed out")
return cleaned_data
| DiscordNotifyServiceForm |
python | pandas-dev__pandas | pandas/tests/test_nanops.py | {
"start": 27464,
"end": 29536
} | class ____:
def test_numeric_values(self):
# Test integer
assert nanops._ensure_numeric(1) == 1
# Test float
assert nanops._ensure_numeric(1.1) == 1.1
# Test complex
assert nanops._ensure_numeric(1 + 2j) == 1 + 2j
def test_ndarray(self):
# Test numeric ndarray
values = np.array([1, 2, 3])
assert np.allclose(nanops._ensure_numeric(values), values)
# Test object ndarray
o_values = values.astype(object)
assert np.allclose(nanops._ensure_numeric(o_values), values)
# Test convertible string ndarray
s_values = np.array(["1", "2", "3"], dtype=object)
msg = r"Could not convert \['1' '2' '3'\] to numeric"
with pytest.raises(TypeError, match=msg):
nanops._ensure_numeric(s_values)
# Test non-convertible string ndarray
s_values = np.array(["foo", "bar", "baz"], dtype=object)
msg = r"Could not convert .* to numeric"
with pytest.raises(TypeError, match=msg):
nanops._ensure_numeric(s_values)
def test_convertable_values(self):
with pytest.raises(TypeError, match="Could not convert string '1' to numeric"):
nanops._ensure_numeric("1")
with pytest.raises(
TypeError, match="Could not convert string '1.1' to numeric"
):
nanops._ensure_numeric("1.1")
with pytest.raises(
TypeError, match=r"Could not convert string '1\+1j' to numeric"
):
nanops._ensure_numeric("1+1j")
def test_non_convertable_values(self):
msg = "Could not convert string 'foo' to numeric"
with pytest.raises(TypeError, match=msg):
nanops._ensure_numeric("foo")
# with the wrong type, python raises TypeError for us
msg = "argument must be a string or a number"
with pytest.raises(TypeError, match=msg):
nanops._ensure_numeric({})
with pytest.raises(TypeError, match=msg):
nanops._ensure_numeric([])
| TestEnsureNumeric |
python | huggingface__transformers | src/transformers/models/grounding_dino/modeling_grounding_dino.py | {
"start": 7346,
"end": 12393
} | class ____(ModelOutput):
r"""
last_hidden_state (`torch.FloatTensor` of shape `(batch_size, num_queries, hidden_size)`):
Sequence of hidden-states at the output of the last layer of the decoder of the model.
init_reference_points (`torch.FloatTensor` of shape `(batch_size, num_queries, 4)`):
Initial reference points sent through the Transformer decoder.
intermediate_hidden_states (`torch.FloatTensor` of shape `(batch_size, config.decoder_layers, num_queries, hidden_size)`):
Stacked intermediate hidden states (output of each layer of the decoder).
intermediate_reference_points (`torch.FloatTensor` of shape `(batch_size, config.decoder_layers, num_queries, 4)`):
Stacked intermediate reference points (reference points of each layer of the decoder).
encoder_last_hidden_state_vision (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
Sequence of hidden-states at the output of the last layer of the encoder of the model.
encoder_last_hidden_state_text (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
Sequence of hidden-states at the output of the last layer of the encoder of the model.
encoder_vision_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
Tuple of `torch.FloatTensor` (one for the output of the vision embeddings + one for the output of each
layer) of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the vision encoder at the
output of each layer plus the initial embedding outputs.
encoder_text_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
Tuple of `torch.FloatTensor` (one for the output of the text embeddings + one for the output of each layer)
of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the text encoder at the output of
each layer plus the initial embedding outputs.
encoder_attentions (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
Tuple of tuples of `torch.FloatTensor` (one for attention for each layer) of shape `(batch_size, num_heads,
sequence_length, sequence_length)`. Attentions weights after the attention softmax, used to compute the
weighted average in the text-vision attention, vision-text attention, text-enhancer (self-attention) and
multi-scale deformable attention heads. attention softmax, used to compute the weighted average in the
bi-attention heads.
enc_outputs_class (`torch.FloatTensor` of shape `(batch_size, sequence_length, config.num_labels)`, *optional*, returned when `config.two_stage=True`):
Predicted bounding boxes scores where the top `config.num_queries` scoring bounding boxes are picked as
region proposals in the first stage. Output of bounding box binary classification (i.e. foreground and
background).
enc_outputs_coord_logits (`torch.FloatTensor` of shape `(batch_size, sequence_length, 4)`, *optional*, returned when `config.two_stage=True`):
Logits of predicted bounding boxes coordinates in the first stage.
encoder_logits (`torch.FloatTensor` of shape `(batch_size, sequence_length, config.num_labels)`, *optional*, returned when `config.two_stage=True`):
Logits of top `config.num_queries` scoring bounding boxes in the first stage.
encoder_pred_boxes (`torch.FloatTensor` of shape `(batch_size, sequence_length, 4)`, *optional*, returned when `config.two_stage=True`):
Coordinates of top `config.num_queries` scoring bounding boxes in the first stage.
"""
last_hidden_state: Optional[torch.FloatTensor] = None
init_reference_points: Optional[torch.FloatTensor] = None
intermediate_hidden_states: Optional[torch.FloatTensor] = None
intermediate_reference_points: Optional[torch.FloatTensor] = None
decoder_hidden_states: Optional[tuple[torch.FloatTensor]] = None
decoder_attentions: Optional[tuple[tuple[torch.FloatTensor]]] = None
encoder_last_hidden_state_vision: Optional[torch.FloatTensor] = None
encoder_last_hidden_state_text: Optional[torch.FloatTensor] = None
encoder_vision_hidden_states: Optional[tuple[torch.FloatTensor]] = None
encoder_text_hidden_states: Optional[tuple[torch.FloatTensor]] = None
encoder_attentions: Optional[tuple[tuple[torch.FloatTensor]]] = None
enc_outputs_class: Optional[torch.FloatTensor] = None
enc_outputs_coord_logits: Optional[torch.FloatTensor] = None
encoder_logits: Optional[torch.FloatTensor] = None
encoder_pred_boxes: Optional[torch.FloatTensor] = None
@dataclass
@auto_docstring(
custom_intro="""
Output type of [`GroundingDinoForObjectDetection`].
"""
)
| GroundingDinoModelOutput |
python | apache__airflow | providers/cncf/kubernetes/src/airflow/providers/cncf/kubernetes/executors/local_kubernetes_executor.py | {
"start": 1751,
"end": 12243
} | class ____(BaseExecutor):
"""
Chooses between LocalExecutor and KubernetesExecutor based on the queue defined on the task.
When the task's queue is the value of ``kubernetes_queue`` in section ``[local_kubernetes_executor]``
of the configuration (default value: `kubernetes`), KubernetesExecutor is selected to run the task,
otherwise, LocalExecutor is used.
"""
supports_ad_hoc_ti_run: bool = True
# TODO: Remove this attribute once providers rely on Airflow >=3.0.0
supports_pickling: bool = False
supports_sentry: bool = False
is_local: bool = False
is_single_threaded: bool = False
is_production: bool = True
serve_logs: bool = True
callback_sink: BaseCallbackSink | None = None
KUBERNETES_QUEUE = conf.get("local_kubernetes_executor", "kubernetes_queue")
def __init__(
self,
local_executor: LocalExecutor | None = None,
kubernetes_executor: KubernetesExecutor | None = None,
):
if AIRFLOW_V_3_0_PLUS or not local_executor or not kubernetes_executor:
raise RuntimeError(
f"{self.__class__.__name__} does not support Airflow 3.0+. See "
"https://airflow.apache.org/docs/apache-airflow/stable/core-concepts/executor/index.html#using-multiple-executors-concurrently"
" how to use multiple executors concurrently."
)
super().__init__()
self._job_id: int | str | None = None
self.local_executor = local_executor
self.kubernetes_executor = kubernetes_executor
self.kubernetes_executor.kubernetes_queue = self.KUBERNETES_QUEUE
@property
def _task_event_logs(self):
self.local_executor._task_event_logs += self.kubernetes_executor._task_event_logs
self.kubernetes_executor._task_event_logs.clear()
return self.local_executor._task_event_logs
@_task_event_logs.setter
def _task_event_logs(self, value):
"""Not implemented for hybrid executors."""
@property
def queued_tasks(self) -> dict[TaskInstanceKey, Any]:
"""Return queued tasks from local and kubernetes executor."""
queued_tasks = self.local_executor.queued_tasks.copy()
# TODO: fix this, there is misalignment between the types of queued_tasks so it is likely wrong
queued_tasks.update(self.kubernetes_executor.queued_tasks) # type: ignore[arg-type]
return queued_tasks
@queued_tasks.setter
def queued_tasks(self, value) -> None:
"""Not implemented for hybrid executors."""
@property
def running(self) -> set[TaskInstanceKey]:
"""Return running tasks from local and kubernetes executor."""
return self.local_executor.running.union(self.kubernetes_executor.running)
@running.setter
def running(self, value) -> None:
"""Not implemented for hybrid executors."""
@property
def job_id(self) -> int | str | None:
"""
Inherited attribute from BaseExecutor.
Since this is not really an executor, but a wrapper of executors
we implemented it as property, so we can have custom setter.
"""
return self._job_id
@job_id.setter
def job_id(self, value: int | str | None) -> None:
"""Expose job ID for SchedulerJob."""
self._job_id = value
self.kubernetes_executor.job_id = value
self.local_executor.job_id = value
def start(self) -> None:
"""Start local and kubernetes executor."""
self.log.info("Starting local and Kubernetes Executor")
self.local_executor.start()
self.kubernetes_executor.start()
@property
def slots_available(self) -> int:
"""Number of new tasks this executor instance can accept."""
return self.local_executor.slots_available
@property
def slots_occupied(self):
"""Number of tasks this executor instance is currently managing."""
return len(self.running) + len(self.queued_tasks)
def queue_command(
self,
task_instance: TaskInstance,
command: CommandType,
priority: int = 1,
queue: str | None = None,
) -> None:
"""Queues command via local or kubernetes executor."""
executor = self._router(task_instance)
self.log.debug("Using executor: %s for %s", executor.__class__.__name__, task_instance.key)
executor.queue_command(task_instance, command, priority, queue) # type: ignore[union-attr]
def queue_task_instance(
self,
task_instance: TaskInstance,
mark_success: bool = False,
ignore_all_deps: bool = False,
ignore_depends_on_past: bool = False,
wait_for_past_depends_before_skipping: bool = False,
ignore_task_deps: bool = False,
ignore_ti_state: bool = False,
pool: str | None = None,
cfg_path: str | None = None,
**kwargs,
) -> None:
"""Queues task instance via local or kubernetes executor."""
from airflow.models.taskinstance import SimpleTaskInstance # type: ignore[attr-defined]
executor = self._router(SimpleTaskInstance.from_ti(task_instance))
self.log.debug(
"Using executor: %s to queue_task_instance for %s", executor.__class__.__name__, task_instance.key
)
if not hasattr(task_instance, "pickle_id"):
del kwargs["pickle_id"]
executor.queue_task_instance( # type: ignore[union-attr]
task_instance=task_instance,
mark_success=mark_success,
ignore_all_deps=ignore_all_deps,
ignore_depends_on_past=ignore_depends_on_past,
wait_for_past_depends_before_skipping=wait_for_past_depends_before_skipping,
ignore_task_deps=ignore_task_deps,
ignore_ti_state=ignore_ti_state,
pool=pool,
cfg_path=cfg_path,
**kwargs,
)
def get_task_log(self, ti: TaskInstance, try_number: int) -> tuple[list[str], list[str]]:
"""Fetch task log from kubernetes executor."""
if ti.queue == self.kubernetes_executor.kubernetes_queue:
return self.kubernetes_executor.get_task_log(ti=ti, try_number=try_number)
return [], []
def has_task(self, task_instance: TaskInstance) -> bool:
"""
Check if a task is either queued or running in either local or kubernetes executor.
:param task_instance: TaskInstance
:return: True if the task is known to this executor
"""
return self.local_executor.has_task(task_instance) or self.kubernetes_executor.has_task(task_instance)
def heartbeat(self) -> None:
"""Heartbeat sent to trigger new jobs in local and kubernetes executor."""
self.local_executor.heartbeat()
self.kubernetes_executor.heartbeat()
def get_event_buffer(
self, dag_ids: list[str] | None = None
) -> dict[TaskInstanceKey, EventBufferValueType]:
"""
Return and flush the event buffer from local and kubernetes executor.
:param dag_ids: dag_ids to return events for, if None returns all
:return: a dict of events
"""
cleared_events_from_local = self.local_executor.get_event_buffer(dag_ids)
cleared_events_from_kubernetes = self.kubernetes_executor.get_event_buffer(dag_ids)
return {**cleared_events_from_local, **cleared_events_from_kubernetes}
def try_adopt_task_instances(self, tis: Sequence[TaskInstance]) -> Sequence[TaskInstance]:
"""
Try to adopt running task instances that have been abandoned by a SchedulerJob dying.
Anything that is not adopted will be cleared by the scheduler (and then become eligible for
re-scheduling)
:return: any TaskInstances that were unable to be adopted
"""
local_tis = [ti for ti in tis if ti.queue != self.KUBERNETES_QUEUE]
kubernetes_tis = [ti for ti in tis if ti.queue == self.KUBERNETES_QUEUE]
return [
*self.local_executor.try_adopt_task_instances(local_tis),
*self.kubernetes_executor.try_adopt_task_instances(kubernetes_tis),
]
@deprecated(
reason="Replaced by function `revoke_task`. Upgrade airflow core to make this go away.",
category=AirflowProviderDeprecationWarning,
action="ignore", # ignoring since will get warning from the nested executors
)
def cleanup_stuck_queued_tasks(self, tis: list[TaskInstance]) -> list[str]:
# LocalExecutor doesn't have a cleanup_stuck_queued_tasks method, so we
# will only run KubernetesExecutor's
kubernetes_tis = [ti for ti in tis if ti.queue == self.KUBERNETES_QUEUE]
return self.kubernetes_executor.cleanup_stuck_queued_tasks(kubernetes_tis)
def revoke_task(self, *, ti: TaskInstance):
if ti.queue == self.KUBERNETES_QUEUE:
self.kubernetes_executor.revoke_task(ti=ti)
def end(self) -> None:
"""End local and kubernetes executor."""
self.local_executor.end()
self.kubernetes_executor.end()
def terminate(self) -> None:
"""Terminate local and kubernetes executor."""
self.local_executor.terminate()
self.kubernetes_executor.terminate()
def _router(self, simple_task_instance: SimpleTaskInstance) -> LocalExecutor | KubernetesExecutor:
"""
Return either local_executor or kubernetes_executor.
:param simple_task_instance: SimpleTaskInstance
:return: local_executor or kubernetes_executor
"""
if simple_task_instance.queue == self.KUBERNETES_QUEUE:
return self.kubernetes_executor
return self.local_executor
def debug_dump(self) -> None:
"""Debug dump; called in response to SIGUSR2 by the scheduler."""
self.log.info("Dumping LocalExecutor state")
self.local_executor.debug_dump()
self.log.info("Dumping KubernetesExecutor state")
self.kubernetes_executor.debug_dump()
def send_callback(self, request: CallbackRequest) -> None:
"""
Send callback for execution.
:param request: Callback request to be executed.
"""
if not self.callback_sink:
raise ValueError("Callback sink is not ready.")
self.callback_sink.send(request)
@staticmethod
def get_cli_commands() -> list:
return KubernetesExecutor.get_cli_commands()
| LocalKubernetesExecutor |
python | pytorch__pytorch | test/distributed/fsdp/test_fsdp_unshard_params.py | {
"start": 1400,
"end": 8094
} | class ____(FSDPTest):
"""
This contains any methods common to both the sharded and non-sharded cases.
"""
def _test_unshard_params_writeback(
self,
writeback: bool,
check_outer: bool,
**fsdp_kwargs: dict[str, Any],
):
model = nn.Sequential(
nn.Linear(5, 5, bias=False, device=device_type.type),
nn.Linear(5, 3, bias=False, device=device_type.type),
)
model[0] = FSDP(model[0], **fsdp_kwargs)
model = FSDP(model, **fsdp_kwargs)
uses_sharded_strategy = model.sharding_strategy != ShardingStrategy.NO_SHARD
offloading_params = model.cpu_offload.offload_params
# Assumes depth-first `.parameters()`
outer_param: Union[FlatParameter, nn.Parameter] = next(model.parameters())
inner_param: Union[FlatParameter, nn.Parameter] = next(model[0].parameters())
param_to_check = outer_param if check_outer else inner_param
# Write a known value to all elements of the *sharded* parameter or
# `FlatParameter` to check
with torch.no_grad():
param_to_check.zero_()
param_to_check += self.rank + 2
# Zero the *unsharded* parameters
with FSDP.summon_full_params(model, writeback=writeback), torch.no_grad():
for param in model.parameters():
param.zero_()
# Check the 0th singleton element of the sharded parameter to see if
# the zeroing from inside the context persists
param_elem_to_check = param_to_check[0]
if param_elem_to_check.numel() > 1:
# For `use_orig_params=True` and `NO_SHARD`, the parameter
# preserves the original 2D shape, so we must access one more time
param_elem_to_check = param_elem_to_check[0]
if writeback or (not uses_sharded_strategy and not offloading_params):
# When FSDP does not use a sharded strategy and is not offloading
# parameters to CPU, it directly exposes the tensor storage that
# serves as the unsharded source of truth, so the write is always
# reflected regardless of `writeback`.
self.assertEqual(param_elem_to_check, 0)
else:
self.assertEqual(param_elem_to_check, self.rank + 2)
if offloading_params:
cpu_device = torch.device("cpu")
for param in model.parameters():
self.assertEqual(param.device, cpu_device)
def _get_test_unshard_params_writeback_config(self) -> dict[str, list[Any]]:
return {
"writeback": [True, False],
"check_outer": [True, False],
"mixed_precision": [MixedPrecision(param_dtype=torch.float16), None],
"cpu_offload": [
CPUOffload(offload_params=False),
CPUOffload(offload_params=True),
],
"use_orig_params": [True, False],
}
def _test_unshard_params_param_data(
self,
rank0_only: bool,
offload_to_cpu: bool,
cpu_offload: CPUOffload,
mixed_precision: Optional[MixedPrecision],
use_orig_params: bool,
):
local_model = NestedWrappedModule.init(
self.process_group,
FSDPInitMode.NO_FSDP,
DEVICEInitMode.DEVICE_BEFORE,
fsdp_kwargs={"device_id": device_type.type},
deterministic=True,
)
# Apply FSDP such that the root module does not have FSDP applied,
# while there are multiple FSDP root submodules (as proven later)
fsdp_model = NestedWrappedModule.init(
self.process_group,
FSDPInitMode.RECURSIVE,
DEVICEInitMode.DEVICE_BEFORE,
fsdp_kwargs={
"cpu_offload": cpu_offload,
"mixed_precision": mixed_precision,
"use_orig_params": use_orig_params,
},
deterministic=True,
)
self.assertFalse(isinstance(fsdp_model, FSDP))
# Hard code the following names because getting them is non-trivial
non_fsdp_managed_param_names = {
"module.0.weight",
"module.0.bias",
"module.3.weight",
"module.3.bias",
}
with FSDP.summon_full_params(
fsdp_model,
rank0_only=rank0_only,
writeback=not rank0_only,
offload_to_cpu=offload_to_cpu,
):
if not rank0_only or self.rank == 0:
for p1, (n2, p2) in zip(
local_model.parameters(), fsdp_model.named_parameters()
):
self.assertEqual(p1.shape, p2.shape)
if (
offload_to_cpu
and clean_tensor_name(n2) not in non_fsdp_managed_param_names
):
self.assertEqual(torch.device("cpu"), p2.device)
else:
self.assertEqual(p1.device, p2.device)
self.assertEqual(
p1.dtype, p2.dtype
) # even if FSDP uses mixed precision
self.assertEqual(p1, p2)
self.assertTrue(isinstance(p2, nn.Parameter))
else:
# Check that each `FlatParameter` has the sharded size as a
# proxy for it being resharded
for handle in traversal_utils._get_fsdp_handles(fsdp_model):
if handle.uses_sharded_strategy:
self.assertEqual(
handle.flat_param.shape, handle.flat_param._sharded_size
)
else:
self.assertEqual(
handle.flat_param.shape,
handle.flat_param._unpadded_unsharded_size,
)
# Prove the number of FSDP roots after lazy initialization
num_fsdp_roots = 0
for fsdp_state in traversal_utils._get_fsdp_states(fsdp_model):
num_fsdp_roots += fsdp_state._is_root
self.assertGreater(num_fsdp_roots, 1)
def _get_test_unshard_params_param_data_config(self) -> dict[str, list[Any]]:
return {
"rank0_only": [False, True],
"offload_to_cpu": [False, True],
"cpu_offload": [
CPUOffload(offload_params=False),
CPUOffload(offload_params=True),
],
"mixed_precision": [MixedPrecision(param_dtype=torch.float16), None],
"use_orig_params": [True, False],
}
| TestUnshardParamsBase |
python | ray-project__ray | rllib/offline/output_writer.py | {
"start": 461,
"end": 660
} | class ____(OutputWriter):
"""Output writer that discards its outputs."""
@override(OutputWriter)
def write(self, sample_batch: SampleBatchType):
# Do nothing.
pass
| NoopOutput |
python | huggingface__transformers | src/transformers/models/longt5/modeling_longt5.py | {
"start": 45419,
"end": 46712
} | class ____(nn.Module):
def __init__(self, config, has_relative_attention_bias=False, layer_idx: Optional[int] = None):
super().__init__()
self.SelfAttention = LongT5Attention(
config, has_relative_attention_bias=has_relative_attention_bias, layer_idx=layer_idx
)
self.layer_norm = LongT5LayerNorm(config.d_model, eps=config.layer_norm_epsilon)
self.dropout = nn.Dropout(config.dropout_rate)
def forward(
self,
hidden_states,
attention_mask=None,
position_bias=None,
past_key_values=None,
use_cache=False,
output_attentions=False,
cache_position=None,
):
normed_hidden_states = self.layer_norm(hidden_states)
attention_output = self.SelfAttention(
normed_hidden_states,
mask=attention_mask,
position_bias=position_bias,
past_key_values=past_key_values,
use_cache=use_cache,
output_attentions=output_attentions,
cache_position=cache_position,
)
hidden_states = hidden_states + self.dropout(attention_output[0])
outputs = (hidden_states,) + attention_output[1:] # add attentions if we output them
return outputs
| LongT5LayerSelfAttention |
python | django__django | tests/sitemaps_tests/urls/http.py | {
"start": 2980,
"end": 3152
} | class ____(Sitemap):
location = "/location/"
def items(self):
return []
def lastmod(self, obj):
return obj.lastmod
| CallableLastmodNoItemsSitemap |
python | jmcnamara__XlsxWriter | xlsxwriter/test/comparison/test_chart_display_units11.py | {
"start": 315,
"end": 1177
} | class ____(ExcelComparisonTest):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.set_filename("chart_display_units11.xlsx")
def test_create_file(self):
"""Test the creation of a simple XlsxWriter file."""
workbook = Workbook(self.got_filename)
worksheet = workbook.add_worksheet()
chart = workbook.add_chart({"type": "column"})
chart.axis_ids = [69559424, 69560960]
data = [
[10000000, 20000000, 30000000, 20000000, 10000000],
]
worksheet.write_column(0, 0, data[0])
chart.add_series({"values": "=Sheet1!$A$1:$A$5"})
chart.set_y_axis({"display_units": "hundreds"})
worksheet.insert_chart("E9", chart)
workbook.close()
self.assertExcelEqual()
| TestCompareXLSXFiles |
python | PyCQA__pylint | tests/functional/ext/no_self_use/no_self_use.py | {
"start": 1459,
"end": 1869
} | class ____(Super):
"""override method with need for self"""
def method(self):
"""no i can not be a function"""
print(42)
def __len__(self):
"""no i can not be a function"""
return 42
def __cmp__(self, other):
"""no i can not be a function"""
print(42)
def __copy__(self):
return 24
def __getstate__(self):
return 42
| Sub1 |
python | plotly__plotly.py | plotly/graph_objs/layout/ternary/aaxis/_tickfont.py | {
"start": 235,
"end": 9925
} | class ____(_BaseLayoutHierarchyType):
_parent_path_str = "layout.ternary.aaxis"
_path_str = "layout.ternary.aaxis.tickfont"
_valid_props = {
"color",
"family",
"lineposition",
"shadow",
"size",
"style",
"textcase",
"variant",
"weight",
}
@property
def color(self):
"""
The 'color' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color: see https://plotly.com/python/css-colors/ for a list
Returns
-------
str
"""
return self["color"]
@color.setter
def color(self, val):
self["color"] = val
@property
def family(self):
"""
HTML font family - the typeface that will be applied by the web
browser. The web browser can only apply a font if it is
available on the system where it runs. Provide multiple font
families, separated by commas, to indicate the order in which
to apply fonts if they aren't available.
The 'family' property is a string and must be specified as:
- A non-empty string
Returns
-------
str
"""
return self["family"]
@family.setter
def family(self, val):
self["family"] = val
@property
def lineposition(self):
"""
Sets the kind of decoration line(s) with text, such as an
"under", "over" or "through" as well as combinations e.g.
"under+over", etc.
The 'lineposition' property is a flaglist and may be specified
as a string containing:
- Any combination of ['under', 'over', 'through'] joined with '+' characters
(e.g. 'under+over')
OR exactly one of ['none'] (e.g. 'none')
Returns
-------
Any
"""
return self["lineposition"]
@lineposition.setter
def lineposition(self, val):
self["lineposition"] = val
@property
def shadow(self):
"""
Sets the shape and color of the shadow behind text. "auto"
places minimal shadow and applies contrast text font color. See
https://developer.mozilla.org/en-US/docs/Web/CSS/text-shadow
for additional options.
The 'shadow' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["shadow"]
@shadow.setter
def shadow(self, val):
self["shadow"] = val
@property
def size(self):
"""
The 'size' property is a number and may be specified as:
- An int or float in the interval [1, inf]
Returns
-------
int|float
"""
return self["size"]
@size.setter
def size(self, val):
self["size"] = val
@property
def style(self):
"""
Sets whether a font should be styled with a normal or italic
face from its family.
The 'style' property is an enumeration that may be specified as:
- One of the following enumeration values:
['normal', 'italic']
Returns
-------
Any
"""
return self["style"]
@style.setter
def style(self, val):
self["style"] = val
@property
def textcase(self):
"""
Sets capitalization of text. It can be used to make text appear
in all-uppercase or all-lowercase, or with each word
capitalized.
The 'textcase' property is an enumeration that may be specified as:
- One of the following enumeration values:
['normal', 'word caps', 'upper', 'lower']
Returns
-------
Any
"""
return self["textcase"]
@textcase.setter
def textcase(self, val):
self["textcase"] = val
@property
def variant(self):
"""
Sets the variant of the font.
The 'variant' property is an enumeration that may be specified as:
- One of the following enumeration values:
['normal', 'small-caps', 'all-small-caps',
'all-petite-caps', 'petite-caps', 'unicase']
Returns
-------
Any
"""
return self["variant"]
@variant.setter
def variant(self, val):
self["variant"] = val
@property
def weight(self):
"""
Sets the weight (or boldness) of the font.
The 'weight' property is a integer and may be specified as:
- An int (or float that will be cast to an int)
in the interval [1, 1000]
OR exactly one of ['normal', 'bold'] (e.g. 'bold')
Returns
-------
int
"""
return self["weight"]
@weight.setter
def weight(self, val):
self["weight"] = val
@property
def _prop_descriptions(self):
return """\
color
family
HTML font family - the typeface that will be applied by
the web browser. The web browser can only apply a font
if it is available on the system where it runs. Provide
multiple font families, separated by commas, to
indicate the order in which to apply fonts if they
aren't available.
lineposition
Sets the kind of decoration line(s) with text, such as
an "under", "over" or "through" as well as combinations
e.g. "under+over", etc.
shadow
Sets the shape and color of the shadow behind text.
"auto" places minimal shadow and applies contrast text
font color. See https://developer.mozilla.org/en-
US/docs/Web/CSS/text-shadow for additional options.
size
style
Sets whether a font should be styled with a normal or
italic face from its family.
textcase
Sets capitalization of text. It can be used to make
text appear in all-uppercase or all-lowercase, or with
each word capitalized.
variant
Sets the variant of the font.
weight
Sets the weight (or boldness) of the font.
"""
def __init__(
self,
arg=None,
color=None,
family=None,
lineposition=None,
shadow=None,
size=None,
style=None,
textcase=None,
variant=None,
weight=None,
**kwargs,
):
"""
Construct a new Tickfont object
Sets the tick font.
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of :class:`plotly.graph_objs.layout.ternary
.aaxis.Tickfont`
color
family
HTML font family - the typeface that will be applied by
the web browser. The web browser can only apply a font
if it is available on the system where it runs. Provide
multiple font families, separated by commas, to
indicate the order in which to apply fonts if they
aren't available.
lineposition
Sets the kind of decoration line(s) with text, such as
an "under", "over" or "through" as well as combinations
e.g. "under+over", etc.
shadow
Sets the shape and color of the shadow behind text.
"auto" places minimal shadow and applies contrast text
font color. See https://developer.mozilla.org/en-
US/docs/Web/CSS/text-shadow for additional options.
size
style
Sets whether a font should be styled with a normal or
italic face from its family.
textcase
Sets capitalization of text. It can be used to make
text appear in all-uppercase or all-lowercase, or with
each word capitalized.
variant
Sets the variant of the font.
weight
Sets the weight (or boldness) of the font.
Returns
-------
Tickfont
"""
super().__init__("tickfont")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError("""\
The first argument to the plotly.graph_objs.layout.ternary.aaxis.Tickfont
constructor must be a dict or
an instance of :class:`plotly.graph_objs.layout.ternary.aaxis.Tickfont`""")
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
self._set_property("color", arg, color)
self._set_property("family", arg, family)
self._set_property("lineposition", arg, lineposition)
self._set_property("shadow", arg, shadow)
self._set_property("size", arg, size)
self._set_property("style", arg, style)
self._set_property("textcase", arg, textcase)
self._set_property("variant", arg, variant)
self._set_property("weight", arg, weight)
self._process_kwargs(**dict(arg, **kwargs))
self._skip_invalid = False
| Tickfont |
python | sphinx-doc__sphinx | sphinx/domains/c/_ast.py | {
"start": 16425,
"end": 17478
} | class ____(ASTExpression):
def __init__(self, op: str, expr: ASTExpression) -> None:
self.op = op
self.expr = expr
def __eq__(self, other: object) -> bool:
if not isinstance(other, ASTUnaryOpExpr):
return NotImplemented
return self.op == other.op and self.expr == other.expr
def __hash__(self) -> int:
return hash((self.op, self.expr))
def _stringify(self, transform: StringifyTransform) -> str:
if self.op[0] in 'cn':
return self.op + ' ' + transform(self.expr)
else:
return self.op + transform(self.expr)
def describe_signature(
self, signode: TextElement, mode: str, env: BuildEnvironment, symbol: Symbol
) -> None:
if self.op[0] in 'cn':
signode += addnodes.desc_sig_keyword(self.op, self.op)
signode += addnodes.desc_sig_space()
else:
signode += addnodes.desc_sig_operator(self.op, self.op)
self.expr.describe_signature(signode, mode, env, symbol)
| ASTUnaryOpExpr |
python | instagram__MonkeyType | demo/inbox.py | {
"start": 2709,
"end": 3430
} | class ____(AggregatorInterface[FollowedEvent]):
type = EventType.FOLLOWED
def __init__(self, repo: RepoInterface) -> None:
self.events: List[FollowedEvent] = []
self.user_ids: Set[UserId] = set()
super().__init__(repo)
def add(self, event):
self.events.append(event)
self.user_ids.add(event.follower_id)
def aggregate(self):
users_by_id = self.repo.get_users_by_ids(self.user_ids)
return [
AggregatedItem(
type=self.type,
text=f"{users_by_id[e.follower_id].name} started following you.",
published=e.published,
)
for e in self.events
]
| FollowersAggregator |
python | walkccc__LeetCode | solutions/2565. Subsequence With the Minimum Score/2565.py | {
"start": 0,
"end": 1465
} | class ____:
def minimumScore(self, s: str, t: str) -> int:
# leftmost[j] := the minimum index i s.t. t[0..j] is a subsequence of s[0..i].
# -1 := impossible
leftmost = [-1] * len(t)
# rightmost[j] := the maximum index i s.t. t[j:] is a subsequence of s[i..n).
# -1 := impossible
rightmost = [-1] * len(t)
j = 0 # t's index
for i in range(len(s)):
if s[i] == t[j]:
leftmost[j] = i
j += 1
if j == len(t):
break
j = len(t) - 1 # t's index
for i in reversed(range(len(s))):
if s[i] == t[j]:
rightmost[j] = i
j -= 1
if j == -1:
break
# The worst case is to delete t[0:j] since t[j:] is a subsequence of s. (deduced
# from the above loop).
ans = j + 1
j = 0
for i in range(len(t)):
# It's impossible that t[0..i] is a subsequence of s. So, stop the loop since
# no need to consider any larger i.
if leftmost[i] == -1:
break
# While t[0..i] + t[j:] is not a subsequence of s, increase j.
while j < len(t) and leftmost[i] >= rightmost[j]:
j += 1
# Now, leftmost[i] < rightmost[j], so t[0..i] + t[j:n] is a subsequence of s.
# If i == j that means t is a subsequence of s, so just return 0.
if i == j:
return 0
# Delete t[i + 1..j - 1] and that's a total of j - i - 1 letters.
ans = min(ans, j - i - 1)
return ans
| Solution |
python | tensorflow__tensorflow | tensorflow/python/distribute/mirrored_values_test.py | {
"start": 3945,
"end": 9295
} | class ____(test.TestCase, parameterized.TestCase):
def _assign_mirrored(self, v, new):
for var, n in zip(v.values, new):
self.evaluate(var.assign(n))
def _save_return_saver(self, sess, var):
saver = saver_lib.Saver(var_list=[var])
test_dir = self.get_temp_dir()
prefix = os.path.join(test_dir, "ckpt")
return saver.save(sess, prefix), saver
def _save(self, sess, var):
save_path, _ = self._save_return_saver(sess, var)
return save_path
def _save_mirrored(self, distribution):
"""Save variables with mirroring, returns save_path."""
with self.session(graph=ops.Graph()) as sess:
mirrored = _make_mirrored(distribution)
# Overwrite the initial values.
self._assign_mirrored(mirrored, [3., 4.])
# Saves the current value of v[0], 3.
save_path = self._save(sess, mirrored)
# Change the values between save and restore.
self._assign_mirrored(mirrored, [5., 6.])
return save_path
def _save_normal(self):
"""Save variables without mirroring, returns save_path."""
with self.session(graph=ops.Graph()) as sess:
var = variable_scope.get_variable(
name="v", initializer=1., use_resource=True)
# Overwrite the initial value.
self.evaluate(var.assign(3.))
# Saves the current value of var, 3.
save_path = self._save(sess, var)
# Change the values between save and restore.
self.evaluate(var.assign(5.))
return save_path
def _restore_normal(self, save_path):
"""Restore to variables without mirroring in a fresh graph."""
with self.session(graph=ops.Graph()) as sess:
var = variable_scope.get_variable(
name="v", initializer=7., use_resource=True)
# Overwrite the initial value.
self.evaluate(var.assign(8.))
# Restores the saved value of 3. to `var`.
saver = saver_lib.Saver(var_list=[var])
saver.restore(sess, save_path)
self.assertEqual(3., self.evaluate(var))
def _restore_mirrored(self, save_path, distribution):
"""Restore to variables with mirroring in a fresh graph."""
with self.session(graph=ops.Graph()) as sess:
mirrored = _make_mirrored(distribution)
v = mirrored.values
# Overwrite the initial values.
self._assign_mirrored(mirrored, [7., 8.])
# Restores the saved value of 3. to both variables.
saver = saver_lib.Saver(var_list=[mirrored])
saver.restore(sess, save_path)
self.assertEqual([3., 3.], self.evaluate([v[0], v[1]]))
@combinations.generate(mirrored_and_tpu_strategy_combinations())
def testSaveAndRestoreMirroredOneGraph(self, distribution):
with self.cached_session() as sess:
mirrored = _make_mirrored(distribution)
v = mirrored .values
# Overwrite the initial values.
self._assign_mirrored(mirrored, [3., 4.])
# Saves the current value of v[0], 3.
save_path, saver = self._save_return_saver(sess, mirrored)
# Change the values between save and restore.
self._assign_mirrored(mirrored, [5., 6.])
# Restores the saved value of 3. to both variables.
saver.restore(sess, save_path)
self.assertEqual([3., 3.], self.evaluate([v[0], v[1]]))
@combinations.generate(mirrored_and_tpu_strategy_combinations())
def testSaveMirroredRestoreMirrored(self, distribution):
if context.num_gpus() < 1 and context.executing_eagerly():
# Graph mode can work without GPU because the Placer "moves" the
# variable to a CPU. In other words, if there is no GPU available, but
# user requested to create a variable on GPU, Placer will ignore the
# user request and assign the VarHandleOp to CPU. This requires
# soft_placement, which is on by default.
self.skipTest("A GPU is not available for this test in eager mode.")
save_path = self._save_mirrored(distribution)
self._restore_mirrored(save_path, distribution)
@combinations.generate(mirrored_and_tpu_strategy_combinations())
def testSaveMirroredRestoreNormal(self, distribution):
if context.num_gpus() < 1 and context.executing_eagerly():
# Graph mode can work without GPU because the Placer "moves" the
# variable to a CPU. In other words, if there is no GPU available, but
# user requested to create a variable on GPU, Placer will ignore the
# user request and assign the VarHandleOp to CPU. This requires
# soft_placement, which is on by default.
self.skipTest("A GPU is not available for this test in eager mode.")
save_path = self._save_mirrored(distribution)
self._restore_normal(save_path)
@combinations.generate(mirrored_and_tpu_strategy_combinations())
def testSaveNormalRestoreMirrored(self, distribution):
if context.num_gpus() < 1 and context.executing_eagerly():
# Graph mode can work without GPU because the Placer "moves" the
# variable to a CPU. In other words, if there is no GPU available, but
# user requested to create a variable on GPU, Placer will ignore the
# user request and assign the VarHandleOp to CPU. This requires
# soft_placement, which is on by default.
self.skipTest("A GPU is not available for this test in eager mode.")
save_path = self._save_normal()
self._restore_mirrored(save_path, distribution)
| MirroredVariableSaveRestoreTest |
python | pypa__warehouse | tests/unit/admin/views/test_projects.py | {
"start": 6589,
"end": 9359
} | class ____:
def test_add_observation(self, db_request):
release = ReleaseFactory.create()
user = UserFactory.create()
db_request.route_path = pretend.call_recorder(
lambda *a, **kw: "/admin/projects/"
)
db_request.matchdict["project_name"] = release.project.normalized_name
db_request.POST["kind"] = ObservationKind.IsSpam.value[0]
db_request.POST["summary"] = "This is a summary"
db_request.user = user
views.add_release_observation(release, db_request)
assert len(release.observations) == 1
def test_no_kind_errors(self):
release = pretend.stub(
project=pretend.stub(name="foo", normalized_name="foo"), version="1.0"
)
request = pretend.stub(
POST={},
session=pretend.stub(flash=pretend.call_recorder(lambda *a, **kw: None)),
route_path=lambda *a, **kw: "/foo/bar/",
)
with pytest.raises(HTTPSeeOther) as exc:
views.add_release_observation(release, request)
assert exc.value.status_code == 303
assert exc.value.headers["Location"] == "/foo/bar/"
assert request.session.flash.calls == [
pretend.call("Provide a kind", queue="error")
]
def test_invalid_kind_errors(self):
release = pretend.stub(
project=pretend.stub(name="foo", normalized_name="foo"), version="1.0"
)
request = pretend.stub(
POST={"kind": "not a valid kind"},
session=pretend.stub(flash=pretend.call_recorder(lambda *a, **kw: None)),
route_path=lambda *a, **kw: "/foo/bar/",
)
with pytest.raises(HTTPSeeOther) as exc:
views.add_release_observation(release, request)
assert exc.value.status_code == 303
assert exc.value.headers["Location"] == "/foo/bar/"
assert request.session.flash.calls == [
pretend.call("Invalid kind", queue="error")
]
def test_no_summary_errors(self):
release = pretend.stub(
project=pretend.stub(name="foo", normalized_name="foo"), version="1.0"
)
request = pretend.stub(
POST={"kind": ObservationKind.IsSpam.value[0]},
session=pretend.stub(flash=pretend.call_recorder(lambda *a, **kw: None)),
route_path=lambda *a, **kw: "/foo/bar/",
)
with pytest.raises(HTTPSeeOther) as exc:
views.add_release_observation(release, request)
assert exc.value.status_code == 303
assert exc.value.headers["Location"] == "/foo/bar/"
assert request.session.flash.calls == [
pretend.call("Provide a summary", queue="error")
]
| TestReleaseAddObservation |
python | walkccc__LeetCode | solutions/2940. Find Building Where Alice and Bob Can Meet/2940.py | {
"start": 47,
"end": 220
} | class ____:
queryIndex: int
a: int # Alice's index
b: int # Bob's index
def __iter__(self):
yield self.queryIndex
yield self.a
yield self.b
| IndexedQuery |
python | dabeaz-course__practical-python | Work/Data/stocksim.py | {
"start": 5060,
"end": 5152
} | class ____(object):
def update(self,record):
print(csv_record(record))
| BasicPrinter |
python | django-crispy-forms__django-crispy-forms | crispy_forms/bootstrap.py | {
"start": 35471,
"end": 37901
} | class ____(LayoutObject):
"""
Bootstrap layout object for rendering crispy forms objects inside a
bootstrap modal.
Attributes
----------
template : str
The default template which this Layout Object will be rendered
with.
Parameters
----------
*fields : str
The fields to be rendered within the modal.
template : str, optional
Overrides the default template, if provided. By default ``None``.
css_id: str, optional
The modal's DOM id. By default ``modal_id``.
title: str, optional
Text to display in the modal's header which will be wrapped in an
``<H5>`` tag. By default ``Modal Title``.
title_id: str, optional
The title's DOM id. By default ``modal_title_id``.
css_class : str, optional
CSS classes to be applied to the field. These are added to any classes
included in the ``attrs`` dict. By default None.
title_class: str, optional
Additional CSS classes to be applied to the title. By default None.
**kwargs : dict, optional
Additional attributes are converted into key="value", pairs. These
attributes are added to the ``<div>``.
Examples
--------
Example::
Modal(
'field1',
Div('field2'),
css_id="modal-id-ex",
css_class="modal-class-ex,
title="This is my modal",
)
"""
template = "%s/layout/modal.html"
def __init__(
self,
*fields,
template=None,
css_id="modal_id",
title="Modal Title",
title_id="modal_title_id",
css_class=None,
title_class=None,
**kwargs,
):
self.fields = list(fields)
self.template = template or self.template
self.css_id = css_id
self.css_class = css_class or ""
self.title = title
self.title_id = title_id
self.title_class = title_class or ""
kwargs = {**kwargs, "tabindex": "-1", "role": "dialog", "aria-labelledby": "%s-label" % self.title_id}
self.flat_attrs = flatatt(kwargs)
def render(self, form, context, template_pack=TEMPLATE_PACK, **kwargs):
fields = self.get_rendered_fields(form, context, template_pack, **kwargs)
template = self.get_template_name(template_pack)
return render_to_string(template, {"modal": self, "fields": fields})
| Modal |
python | pydata__xarray | xarray/core/_aggregations.py | {
"start": 180197,
"end": 235067
} | class ____:
_obj: Dataset
def reduce(
self,
func: Callable[..., Any],
dim: Dims = None,
*,
axis: int | Sequence[int] | None = None,
keep_attrs: bool | None = None,
keepdims: bool = False,
**kwargs: Any,
) -> Dataset:
raise NotImplementedError()
def _flox_reduce(
self,
dim: Dims,
**kwargs: Any,
) -> Dataset:
raise NotImplementedError()
def count(
self,
dim: Dims = None,
*,
keep_attrs: bool | None = None,
**kwargs: Any,
) -> Dataset:
"""
Reduce this Dataset's data by applying ``count`` along some dimension(s).
Parameters
----------
dim : str, Iterable of Hashable, "..." or None, default: None
Name of dimension[s] along which to apply ``count``. For e.g. ``dim="x"``
or ``dim=["x", "y"]``. If None, will reduce over the Resample dimensions.
If "...", will reduce over all dimensions.
keep_attrs : bool or None, optional
If True, ``attrs`` will be copied from the original
object to the new one. If False, the new object will be
returned without attributes.
**kwargs : Any
Additional keyword arguments passed on to the appropriate array
function for calculating ``count`` on this object's data.
These could include dask-specific kwargs like ``split_every``.
Returns
-------
reduced : Dataset
New Dataset with ``count`` applied to its data and the
indicated dimension(s) removed
See Also
--------
pandas.DataFrame.count
dask.dataframe.DataFrame.count
Dataset.count
:ref:`resampling`
User guide on resampling operations.
Notes
-----
Use the ``flox`` package to significantly speed up resampling computations,
especially with dask arrays. Xarray will use flox by default if installed.
Pass flox-specific keyword arguments in ``**kwargs``.
See the `flox documentation <https://flox.readthedocs.io>`_ for more.
Examples
--------
>>> da = xr.DataArray(
... np.array([1, 2, 3, 0, 2, np.nan]),
... dims="time",
... coords=dict(
... time=("time", pd.date_range("2001-01-01", freq="ME", periods=6)),
... labels=("time", np.array(["a", "b", "c", "c", "b", "a"])),
... ),
... )
>>> ds = xr.Dataset(dict(da=da))
>>> ds
<xarray.Dataset> Size: 120B
Dimensions: (time: 6)
Coordinates:
* time (time) datetime64[ns] 48B 2001-01-31 2001-02-28 ... 2001-06-30
labels (time) <U1 24B 'a' 'b' 'c' 'c' 'b' 'a'
Data variables:
da (time) float64 48B 1.0 2.0 3.0 0.0 2.0 nan
>>> ds.resample(time="3ME").count()
<xarray.Dataset> Size: 48B
Dimensions: (time: 3)
Coordinates:
* time (time) datetime64[ns] 24B 2001-01-31 2001-04-30 2001-07-31
Data variables:
da (time) int64 24B 1 3 1
"""
if (
flox_available
and OPTIONS["use_flox"]
and contains_only_chunked_or_numpy(self._obj)
):
return self._flox_reduce(
func="count",
dim=dim,
numeric_only=False,
# fill_value=fill_value,
keep_attrs=keep_attrs,
**kwargs,
)
else:
return self.reduce(
duck_array_ops.count,
dim=dim,
numeric_only=False,
keep_attrs=keep_attrs,
**kwargs,
)
def all(
self,
dim: Dims = None,
*,
keep_attrs: bool | None = None,
**kwargs: Any,
) -> Dataset:
"""
Reduce this Dataset's data by applying ``all`` along some dimension(s).
Parameters
----------
dim : str, Iterable of Hashable, "..." or None, default: None
Name of dimension[s] along which to apply ``all``. For e.g. ``dim="x"``
or ``dim=["x", "y"]``. If None, will reduce over the Resample dimensions.
If "...", will reduce over all dimensions.
keep_attrs : bool or None, optional
If True, ``attrs`` will be copied from the original
object to the new one. If False, the new object will be
returned without attributes.
**kwargs : Any
Additional keyword arguments passed on to the appropriate array
function for calculating ``all`` on this object's data.
These could include dask-specific kwargs like ``split_every``.
Returns
-------
reduced : Dataset
New Dataset with ``all`` applied to its data and the
indicated dimension(s) removed
See Also
--------
numpy.all
dask.array.all
Dataset.all
:ref:`resampling`
User guide on resampling operations.
Notes
-----
Use the ``flox`` package to significantly speed up resampling computations,
especially with dask arrays. Xarray will use flox by default if installed.
Pass flox-specific keyword arguments in ``**kwargs``.
See the `flox documentation <https://flox.readthedocs.io>`_ for more.
Examples
--------
>>> da = xr.DataArray(
... np.array([True, True, True, True, True, False], dtype=bool),
... dims="time",
... coords=dict(
... time=("time", pd.date_range("2001-01-01", freq="ME", periods=6)),
... labels=("time", np.array(["a", "b", "c", "c", "b", "a"])),
... ),
... )
>>> ds = xr.Dataset(dict(da=da))
>>> ds
<xarray.Dataset> Size: 78B
Dimensions: (time: 6)
Coordinates:
* time (time) datetime64[ns] 48B 2001-01-31 2001-02-28 ... 2001-06-30
labels (time) <U1 24B 'a' 'b' 'c' 'c' 'b' 'a'
Data variables:
da (time) bool 6B True True True True True False
>>> ds.resample(time="3ME").all()
<xarray.Dataset> Size: 27B
Dimensions: (time: 3)
Coordinates:
* time (time) datetime64[ns] 24B 2001-01-31 2001-04-30 2001-07-31
Data variables:
da (time) bool 3B True True False
"""
if (
flox_available
and OPTIONS["use_flox"]
and contains_only_chunked_or_numpy(self._obj)
):
return self._flox_reduce(
func="all",
dim=dim,
numeric_only=False,
# fill_value=fill_value,
keep_attrs=keep_attrs,
**kwargs,
)
else:
return self.reduce(
duck_array_ops.array_all,
dim=dim,
numeric_only=False,
keep_attrs=keep_attrs,
**kwargs,
)
def any(
self,
dim: Dims = None,
*,
keep_attrs: bool | None = None,
**kwargs: Any,
) -> Dataset:
"""
Reduce this Dataset's data by applying ``any`` along some dimension(s).
Parameters
----------
dim : str, Iterable of Hashable, "..." or None, default: None
Name of dimension[s] along which to apply ``any``. For e.g. ``dim="x"``
or ``dim=["x", "y"]``. If None, will reduce over the Resample dimensions.
If "...", will reduce over all dimensions.
keep_attrs : bool or None, optional
If True, ``attrs`` will be copied from the original
object to the new one. If False, the new object will be
returned without attributes.
**kwargs : Any
Additional keyword arguments passed on to the appropriate array
function for calculating ``any`` on this object's data.
These could include dask-specific kwargs like ``split_every``.
Returns
-------
reduced : Dataset
New Dataset with ``any`` applied to its data and the
indicated dimension(s) removed
See Also
--------
numpy.any
dask.array.any
Dataset.any
:ref:`resampling`
User guide on resampling operations.
Notes
-----
Use the ``flox`` package to significantly speed up resampling computations,
especially with dask arrays. Xarray will use flox by default if installed.
Pass flox-specific keyword arguments in ``**kwargs``.
See the `flox documentation <https://flox.readthedocs.io>`_ for more.
Examples
--------
>>> da = xr.DataArray(
... np.array([True, True, True, True, True, False], dtype=bool),
... dims="time",
... coords=dict(
... time=("time", pd.date_range("2001-01-01", freq="ME", periods=6)),
... labels=("time", np.array(["a", "b", "c", "c", "b", "a"])),
... ),
... )
>>> ds = xr.Dataset(dict(da=da))
>>> ds
<xarray.Dataset> Size: 78B
Dimensions: (time: 6)
Coordinates:
* time (time) datetime64[ns] 48B 2001-01-31 2001-02-28 ... 2001-06-30
labels (time) <U1 24B 'a' 'b' 'c' 'c' 'b' 'a'
Data variables:
da (time) bool 6B True True True True True False
>>> ds.resample(time="3ME").any()
<xarray.Dataset> Size: 27B
Dimensions: (time: 3)
Coordinates:
* time (time) datetime64[ns] 24B 2001-01-31 2001-04-30 2001-07-31
Data variables:
da (time) bool 3B True True True
"""
if (
flox_available
and OPTIONS["use_flox"]
and contains_only_chunked_or_numpy(self._obj)
):
return self._flox_reduce(
func="any",
dim=dim,
numeric_only=False,
# fill_value=fill_value,
keep_attrs=keep_attrs,
**kwargs,
)
else:
return self.reduce(
duck_array_ops.array_any,
dim=dim,
numeric_only=False,
keep_attrs=keep_attrs,
**kwargs,
)
def max(
self,
dim: Dims = None,
*,
skipna: bool | None = None,
keep_attrs: bool | None = None,
**kwargs: Any,
) -> Dataset:
"""
Reduce this Dataset's data by applying ``max`` along some dimension(s).
Parameters
----------
dim : str, Iterable of Hashable, "..." or None, default: None
Name of dimension[s] along which to apply ``max``. For e.g. ``dim="x"``
or ``dim=["x", "y"]``. If None, will reduce over the Resample dimensions.
If "...", will reduce over all dimensions.
skipna : bool or None, optional
If True, skip missing values (as marked by NaN). By default, only
skips missing values for float dtypes; other dtypes either do not
have a sentinel missing value (int) or ``skipna=True`` has not been
implemented (object, datetime64 or timedelta64).
keep_attrs : bool or None, optional
If True, ``attrs`` will be copied from the original
object to the new one. If False, the new object will be
returned without attributes.
**kwargs : Any
Additional keyword arguments passed on to the appropriate array
function for calculating ``max`` on this object's data.
These could include dask-specific kwargs like ``split_every``.
Returns
-------
reduced : Dataset
New Dataset with ``max`` applied to its data and the
indicated dimension(s) removed
See Also
--------
numpy.max
dask.array.max
Dataset.max
:ref:`resampling`
User guide on resampling operations.
Notes
-----
Use the ``flox`` package to significantly speed up resampling computations,
especially with dask arrays. Xarray will use flox by default if installed.
Pass flox-specific keyword arguments in ``**kwargs``.
See the `flox documentation <https://flox.readthedocs.io>`_ for more.
Examples
--------
>>> da = xr.DataArray(
... np.array([1, 2, 3, 0, 2, np.nan]),
... dims="time",
... coords=dict(
... time=("time", pd.date_range("2001-01-01", freq="ME", periods=6)),
... labels=("time", np.array(["a", "b", "c", "c", "b", "a"])),
... ),
... )
>>> ds = xr.Dataset(dict(da=da))
>>> ds
<xarray.Dataset> Size: 120B
Dimensions: (time: 6)
Coordinates:
* time (time) datetime64[ns] 48B 2001-01-31 2001-02-28 ... 2001-06-30
labels (time) <U1 24B 'a' 'b' 'c' 'c' 'b' 'a'
Data variables:
da (time) float64 48B 1.0 2.0 3.0 0.0 2.0 nan
>>> ds.resample(time="3ME").max()
<xarray.Dataset> Size: 48B
Dimensions: (time: 3)
Coordinates:
* time (time) datetime64[ns] 24B 2001-01-31 2001-04-30 2001-07-31
Data variables:
da (time) float64 24B 1.0 3.0 2.0
Use ``skipna`` to control whether NaNs are ignored.
>>> ds.resample(time="3ME").max(skipna=False)
<xarray.Dataset> Size: 48B
Dimensions: (time: 3)
Coordinates:
* time (time) datetime64[ns] 24B 2001-01-31 2001-04-30 2001-07-31
Data variables:
da (time) float64 24B 1.0 3.0 nan
"""
if (
flox_available
and OPTIONS["use_flox"]
and contains_only_chunked_or_numpy(self._obj)
):
return self._flox_reduce(
func="max",
dim=dim,
skipna=skipna,
numeric_only=False,
# fill_value=fill_value,
keep_attrs=keep_attrs,
**kwargs,
)
else:
return self.reduce(
duck_array_ops.max,
dim=dim,
skipna=skipna,
numeric_only=False,
keep_attrs=keep_attrs,
**kwargs,
)
def min(
self,
dim: Dims = None,
*,
skipna: bool | None = None,
keep_attrs: bool | None = None,
**kwargs: Any,
) -> Dataset:
"""
Reduce this Dataset's data by applying ``min`` along some dimension(s).
Parameters
----------
dim : str, Iterable of Hashable, "..." or None, default: None
Name of dimension[s] along which to apply ``min``. For e.g. ``dim="x"``
or ``dim=["x", "y"]``. If None, will reduce over the Resample dimensions.
If "...", will reduce over all dimensions.
skipna : bool or None, optional
If True, skip missing values (as marked by NaN). By default, only
skips missing values for float dtypes; other dtypes either do not
have a sentinel missing value (int) or ``skipna=True`` has not been
implemented (object, datetime64 or timedelta64).
keep_attrs : bool or None, optional
If True, ``attrs`` will be copied from the original
object to the new one. If False, the new object will be
returned without attributes.
**kwargs : Any
Additional keyword arguments passed on to the appropriate array
function for calculating ``min`` on this object's data.
These could include dask-specific kwargs like ``split_every``.
Returns
-------
reduced : Dataset
New Dataset with ``min`` applied to its data and the
indicated dimension(s) removed
See Also
--------
numpy.min
dask.array.min
Dataset.min
:ref:`resampling`
User guide on resampling operations.
Notes
-----
Use the ``flox`` package to significantly speed up resampling computations,
especially with dask arrays. Xarray will use flox by default if installed.
Pass flox-specific keyword arguments in ``**kwargs``.
See the `flox documentation <https://flox.readthedocs.io>`_ for more.
Examples
--------
>>> da = xr.DataArray(
... np.array([1, 2, 3, 0, 2, np.nan]),
... dims="time",
... coords=dict(
... time=("time", pd.date_range("2001-01-01", freq="ME", periods=6)),
... labels=("time", np.array(["a", "b", "c", "c", "b", "a"])),
... ),
... )
>>> ds = xr.Dataset(dict(da=da))
>>> ds
<xarray.Dataset> Size: 120B
Dimensions: (time: 6)
Coordinates:
* time (time) datetime64[ns] 48B 2001-01-31 2001-02-28 ... 2001-06-30
labels (time) <U1 24B 'a' 'b' 'c' 'c' 'b' 'a'
Data variables:
da (time) float64 48B 1.0 2.0 3.0 0.0 2.0 nan
>>> ds.resample(time="3ME").min()
<xarray.Dataset> Size: 48B
Dimensions: (time: 3)
Coordinates:
* time (time) datetime64[ns] 24B 2001-01-31 2001-04-30 2001-07-31
Data variables:
da (time) float64 24B 1.0 0.0 2.0
Use ``skipna`` to control whether NaNs are ignored.
>>> ds.resample(time="3ME").min(skipna=False)
<xarray.Dataset> Size: 48B
Dimensions: (time: 3)
Coordinates:
* time (time) datetime64[ns] 24B 2001-01-31 2001-04-30 2001-07-31
Data variables:
da (time) float64 24B 1.0 0.0 nan
"""
if (
flox_available
and OPTIONS["use_flox"]
and contains_only_chunked_or_numpy(self._obj)
):
return self._flox_reduce(
func="min",
dim=dim,
skipna=skipna,
numeric_only=False,
# fill_value=fill_value,
keep_attrs=keep_attrs,
**kwargs,
)
else:
return self.reduce(
duck_array_ops.min,
dim=dim,
skipna=skipna,
numeric_only=False,
keep_attrs=keep_attrs,
**kwargs,
)
def mean(
self,
dim: Dims = None,
*,
skipna: bool | None = None,
keep_attrs: bool | None = None,
**kwargs: Any,
) -> Dataset:
"""
Reduce this Dataset's data by applying ``mean`` along some dimension(s).
Parameters
----------
dim : str, Iterable of Hashable, "..." or None, default: None
Name of dimension[s] along which to apply ``mean``. For e.g. ``dim="x"``
or ``dim=["x", "y"]``. If None, will reduce over the Resample dimensions.
If "...", will reduce over all dimensions.
skipna : bool or None, optional
If True, skip missing values (as marked by NaN). By default, only
skips missing values for float dtypes; other dtypes either do not
have a sentinel missing value (int) or ``skipna=True`` has not been
implemented (object, datetime64 or timedelta64).
keep_attrs : bool or None, optional
If True, ``attrs`` will be copied from the original
object to the new one. If False, the new object will be
returned without attributes.
**kwargs : Any
Additional keyword arguments passed on to the appropriate array
function for calculating ``mean`` on this object's data.
These could include dask-specific kwargs like ``split_every``.
Returns
-------
reduced : Dataset
New Dataset with ``mean`` applied to its data and the
indicated dimension(s) removed
See Also
--------
numpy.mean
dask.array.mean
Dataset.mean
:ref:`resampling`
User guide on resampling operations.
Notes
-----
Use the ``flox`` package to significantly speed up resampling computations,
especially with dask arrays. Xarray will use flox by default if installed.
Pass flox-specific keyword arguments in ``**kwargs``.
See the `flox documentation <https://flox.readthedocs.io>`_ for more.
Examples
--------
>>> da = xr.DataArray(
... np.array([1, 2, 3, 0, 2, np.nan]),
... dims="time",
... coords=dict(
... time=("time", pd.date_range("2001-01-01", freq="ME", periods=6)),
... labels=("time", np.array(["a", "b", "c", "c", "b", "a"])),
... ),
... )
>>> ds = xr.Dataset(dict(da=da))
>>> ds
<xarray.Dataset> Size: 120B
Dimensions: (time: 6)
Coordinates:
* time (time) datetime64[ns] 48B 2001-01-31 2001-02-28 ... 2001-06-30
labels (time) <U1 24B 'a' 'b' 'c' 'c' 'b' 'a'
Data variables:
da (time) float64 48B 1.0 2.0 3.0 0.0 2.0 nan
>>> ds.resample(time="3ME").mean()
<xarray.Dataset> Size: 48B
Dimensions: (time: 3)
Coordinates:
* time (time) datetime64[ns] 24B 2001-01-31 2001-04-30 2001-07-31
Data variables:
da (time) float64 24B 1.0 1.667 2.0
Use ``skipna`` to control whether NaNs are ignored.
>>> ds.resample(time="3ME").mean(skipna=False)
<xarray.Dataset> Size: 48B
Dimensions: (time: 3)
Coordinates:
* time (time) datetime64[ns] 24B 2001-01-31 2001-04-30 2001-07-31
Data variables:
da (time) float64 24B 1.0 1.667 nan
"""
if (
flox_available
and OPTIONS["use_flox"]
and contains_only_chunked_or_numpy(self._obj)
):
return self._flox_reduce(
func="mean",
dim=dim,
skipna=skipna,
numeric_only=True,
# fill_value=fill_value,
keep_attrs=keep_attrs,
**kwargs,
)
else:
return self.reduce(
duck_array_ops.mean,
dim=dim,
skipna=skipna,
numeric_only=True,
keep_attrs=keep_attrs,
**kwargs,
)
def prod(
self,
dim: Dims = None,
*,
skipna: bool | None = None,
min_count: int | None = None,
keep_attrs: bool | None = None,
**kwargs: Any,
) -> Dataset:
"""
Reduce this Dataset's data by applying ``prod`` along some dimension(s).
Parameters
----------
dim : str, Iterable of Hashable, "..." or None, default: None
Name of dimension[s] along which to apply ``prod``. For e.g. ``dim="x"``
or ``dim=["x", "y"]``. If None, will reduce over the Resample dimensions.
If "...", will reduce over all dimensions.
skipna : bool or None, optional
If True, skip missing values (as marked by NaN). By default, only
skips missing values for float dtypes; other dtypes either do not
have a sentinel missing value (int) or ``skipna=True`` has not been
implemented (object, datetime64 or timedelta64).
min_count : int or None, optional
The required number of valid values to perform the operation. If
fewer than min_count non-NA values are present the result will be
NA. Only used if skipna is set to True or defaults to True for the
array's dtype. Changed in version 0.17.0: if specified on an integer
array and skipna=True, the result will be a float array.
keep_attrs : bool or None, optional
If True, ``attrs`` will be copied from the original
object to the new one. If False, the new object will be
returned without attributes.
**kwargs : Any
Additional keyword arguments passed on to the appropriate array
function for calculating ``prod`` on this object's data.
These could include dask-specific kwargs like ``split_every``.
Returns
-------
reduced : Dataset
New Dataset with ``prod`` applied to its data and the
indicated dimension(s) removed
See Also
--------
numpy.prod
dask.array.prod
Dataset.prod
:ref:`resampling`
User guide on resampling operations.
Notes
-----
Use the ``flox`` package to significantly speed up resampling computations,
especially with dask arrays. Xarray will use flox by default if installed.
Pass flox-specific keyword arguments in ``**kwargs``.
See the `flox documentation <https://flox.readthedocs.io>`_ for more.
Non-numeric variables will be removed prior to reducing.
Examples
--------
>>> da = xr.DataArray(
... np.array([1, 2, 3, 0, 2, np.nan]),
... dims="time",
... coords=dict(
... time=("time", pd.date_range("2001-01-01", freq="ME", periods=6)),
... labels=("time", np.array(["a", "b", "c", "c", "b", "a"])),
... ),
... )
>>> ds = xr.Dataset(dict(da=da))
>>> ds
<xarray.Dataset> Size: 120B
Dimensions: (time: 6)
Coordinates:
* time (time) datetime64[ns] 48B 2001-01-31 2001-02-28 ... 2001-06-30
labels (time) <U1 24B 'a' 'b' 'c' 'c' 'b' 'a'
Data variables:
da (time) float64 48B 1.0 2.0 3.0 0.0 2.0 nan
>>> ds.resample(time="3ME").prod()
<xarray.Dataset> Size: 48B
Dimensions: (time: 3)
Coordinates:
* time (time) datetime64[ns] 24B 2001-01-31 2001-04-30 2001-07-31
Data variables:
da (time) float64 24B 1.0 0.0 2.0
Use ``skipna`` to control whether NaNs are ignored.
>>> ds.resample(time="3ME").prod(skipna=False)
<xarray.Dataset> Size: 48B
Dimensions: (time: 3)
Coordinates:
* time (time) datetime64[ns] 24B 2001-01-31 2001-04-30 2001-07-31
Data variables:
da (time) float64 24B 1.0 0.0 nan
Specify ``min_count`` for finer control over when NaNs are ignored.
>>> ds.resample(time="3ME").prod(skipna=True, min_count=2)
<xarray.Dataset> Size: 48B
Dimensions: (time: 3)
Coordinates:
* time (time) datetime64[ns] 24B 2001-01-31 2001-04-30 2001-07-31
Data variables:
da (time) float64 24B nan 0.0 nan
"""
if (
flox_available
and OPTIONS["use_flox"]
and contains_only_chunked_or_numpy(self._obj)
):
return self._flox_reduce(
func="prod",
dim=dim,
skipna=skipna,
min_count=min_count,
numeric_only=True,
# fill_value=fill_value,
keep_attrs=keep_attrs,
**kwargs,
)
else:
return self.reduce(
duck_array_ops.prod,
dim=dim,
skipna=skipna,
min_count=min_count,
numeric_only=True,
keep_attrs=keep_attrs,
**kwargs,
)
def sum(
self,
dim: Dims = None,
*,
skipna: bool | None = None,
min_count: int | None = None,
keep_attrs: bool | None = None,
**kwargs: Any,
) -> Dataset:
"""
Reduce this Dataset's data by applying ``sum`` along some dimension(s).
Parameters
----------
dim : str, Iterable of Hashable, "..." or None, default: None
Name of dimension[s] along which to apply ``sum``. For e.g. ``dim="x"``
or ``dim=["x", "y"]``. If None, will reduce over the Resample dimensions.
If "...", will reduce over all dimensions.
skipna : bool or None, optional
If True, skip missing values (as marked by NaN). By default, only
skips missing values for float dtypes; other dtypes either do not
have a sentinel missing value (int) or ``skipna=True`` has not been
implemented (object, datetime64 or timedelta64).
min_count : int or None, optional
The required number of valid values to perform the operation. If
fewer than min_count non-NA values are present the result will be
NA. Only used if skipna is set to True or defaults to True for the
array's dtype. Changed in version 0.17.0: if specified on an integer
array and skipna=True, the result will be a float array.
keep_attrs : bool or None, optional
If True, ``attrs`` will be copied from the original
object to the new one. If False, the new object will be
returned without attributes.
**kwargs : Any
Additional keyword arguments passed on to the appropriate array
function for calculating ``sum`` on this object's data.
These could include dask-specific kwargs like ``split_every``.
Returns
-------
reduced : Dataset
New Dataset with ``sum`` applied to its data and the
indicated dimension(s) removed
See Also
--------
numpy.sum
dask.array.sum
Dataset.sum
:ref:`resampling`
User guide on resampling operations.
Notes
-----
Use the ``flox`` package to significantly speed up resampling computations,
especially with dask arrays. Xarray will use flox by default if installed.
Pass flox-specific keyword arguments in ``**kwargs``.
See the `flox documentation <https://flox.readthedocs.io>`_ for more.
Non-numeric variables will be removed prior to reducing.
Examples
--------
>>> da = xr.DataArray(
... np.array([1, 2, 3, 0, 2, np.nan]),
... dims="time",
... coords=dict(
... time=("time", pd.date_range("2001-01-01", freq="ME", periods=6)),
... labels=("time", np.array(["a", "b", "c", "c", "b", "a"])),
... ),
... )
>>> ds = xr.Dataset(dict(da=da))
>>> ds
<xarray.Dataset> Size: 120B
Dimensions: (time: 6)
Coordinates:
* time (time) datetime64[ns] 48B 2001-01-31 2001-02-28 ... 2001-06-30
labels (time) <U1 24B 'a' 'b' 'c' 'c' 'b' 'a'
Data variables:
da (time) float64 48B 1.0 2.0 3.0 0.0 2.0 nan
>>> ds.resample(time="3ME").sum()
<xarray.Dataset> Size: 48B
Dimensions: (time: 3)
Coordinates:
* time (time) datetime64[ns] 24B 2001-01-31 2001-04-30 2001-07-31
Data variables:
da (time) float64 24B 1.0 5.0 2.0
Use ``skipna`` to control whether NaNs are ignored.
>>> ds.resample(time="3ME").sum(skipna=False)
<xarray.Dataset> Size: 48B
Dimensions: (time: 3)
Coordinates:
* time (time) datetime64[ns] 24B 2001-01-31 2001-04-30 2001-07-31
Data variables:
da (time) float64 24B 1.0 5.0 nan
Specify ``min_count`` for finer control over when NaNs are ignored.
>>> ds.resample(time="3ME").sum(skipna=True, min_count=2)
<xarray.Dataset> Size: 48B
Dimensions: (time: 3)
Coordinates:
* time (time) datetime64[ns] 24B 2001-01-31 2001-04-30 2001-07-31
Data variables:
da (time) float64 24B nan 5.0 nan
"""
if (
flox_available
and OPTIONS["use_flox"]
and contains_only_chunked_or_numpy(self._obj)
):
return self._flox_reduce(
func="sum",
dim=dim,
skipna=skipna,
min_count=min_count,
numeric_only=True,
# fill_value=fill_value,
keep_attrs=keep_attrs,
**kwargs,
)
else:
return self.reduce(
duck_array_ops.sum,
dim=dim,
skipna=skipna,
min_count=min_count,
numeric_only=True,
keep_attrs=keep_attrs,
**kwargs,
)
def std(
self,
dim: Dims = None,
*,
skipna: bool | None = None,
ddof: int = 0,
keep_attrs: bool | None = None,
**kwargs: Any,
) -> Dataset:
"""
Reduce this Dataset's data by applying ``std`` along some dimension(s).
Parameters
----------
dim : str, Iterable of Hashable, "..." or None, default: None
Name of dimension[s] along which to apply ``std``. For e.g. ``dim="x"``
or ``dim=["x", "y"]``. If None, will reduce over the Resample dimensions.
If "...", will reduce over all dimensions.
skipna : bool or None, optional
If True, skip missing values (as marked by NaN). By default, only
skips missing values for float dtypes; other dtypes either do not
have a sentinel missing value (int) or ``skipna=True`` has not been
implemented (object, datetime64 or timedelta64).
ddof : int, default: 0
“Delta Degrees of Freedom”: the divisor used in the calculation is ``N - ddof``,
where ``N`` represents the number of elements.
keep_attrs : bool or None, optional
If True, ``attrs`` will be copied from the original
object to the new one. If False, the new object will be
returned without attributes.
**kwargs : Any
Additional keyword arguments passed on to the appropriate array
function for calculating ``std`` on this object's data.
These could include dask-specific kwargs like ``split_every``.
Returns
-------
reduced : Dataset
New Dataset with ``std`` applied to its data and the
indicated dimension(s) removed
See Also
--------
numpy.std
dask.array.std
Dataset.std
:ref:`resampling`
User guide on resampling operations.
Notes
-----
Use the ``flox`` package to significantly speed up resampling computations,
especially with dask arrays. Xarray will use flox by default if installed.
Pass flox-specific keyword arguments in ``**kwargs``.
See the `flox documentation <https://flox.readthedocs.io>`_ for more.
Non-numeric variables will be removed prior to reducing.
Examples
--------
>>> da = xr.DataArray(
... np.array([1, 2, 3, 0, 2, np.nan]),
... dims="time",
... coords=dict(
... time=("time", pd.date_range("2001-01-01", freq="ME", periods=6)),
... labels=("time", np.array(["a", "b", "c", "c", "b", "a"])),
... ),
... )
>>> ds = xr.Dataset(dict(da=da))
>>> ds
<xarray.Dataset> Size: 120B
Dimensions: (time: 6)
Coordinates:
* time (time) datetime64[ns] 48B 2001-01-31 2001-02-28 ... 2001-06-30
labels (time) <U1 24B 'a' 'b' 'c' 'c' 'b' 'a'
Data variables:
da (time) float64 48B 1.0 2.0 3.0 0.0 2.0 nan
>>> ds.resample(time="3ME").std()
<xarray.Dataset> Size: 48B
Dimensions: (time: 3)
Coordinates:
* time (time) datetime64[ns] 24B 2001-01-31 2001-04-30 2001-07-31
Data variables:
da (time) float64 24B 0.0 1.247 0.0
Use ``skipna`` to control whether NaNs are ignored.
>>> ds.resample(time="3ME").std(skipna=False)
<xarray.Dataset> Size: 48B
Dimensions: (time: 3)
Coordinates:
* time (time) datetime64[ns] 24B 2001-01-31 2001-04-30 2001-07-31
Data variables:
da (time) float64 24B 0.0 1.247 nan
Specify ``ddof=1`` for an unbiased estimate.
>>> ds.resample(time="3ME").std(skipna=True, ddof=1)
<xarray.Dataset> Size: 48B
Dimensions: (time: 3)
Coordinates:
* time (time) datetime64[ns] 24B 2001-01-31 2001-04-30 2001-07-31
Data variables:
da (time) float64 24B nan 1.528 nan
"""
if (
flox_available
and OPTIONS["use_flox"]
and contains_only_chunked_or_numpy(self._obj)
):
return self._flox_reduce(
func="std",
dim=dim,
skipna=skipna,
ddof=ddof,
numeric_only=True,
# fill_value=fill_value,
keep_attrs=keep_attrs,
**kwargs,
)
else:
return self.reduce(
duck_array_ops.std,
dim=dim,
skipna=skipna,
ddof=ddof,
numeric_only=True,
keep_attrs=keep_attrs,
**kwargs,
)
def var(
self,
dim: Dims = None,
*,
skipna: bool | None = None,
ddof: int = 0,
keep_attrs: bool | None = None,
**kwargs: Any,
) -> Dataset:
"""
Reduce this Dataset's data by applying ``var`` along some dimension(s).
Parameters
----------
dim : str, Iterable of Hashable, "..." or None, default: None
Name of dimension[s] along which to apply ``var``. For e.g. ``dim="x"``
or ``dim=["x", "y"]``. If None, will reduce over the Resample dimensions.
If "...", will reduce over all dimensions.
skipna : bool or None, optional
If True, skip missing values (as marked by NaN). By default, only
skips missing values for float dtypes; other dtypes either do not
have a sentinel missing value (int) or ``skipna=True`` has not been
implemented (object, datetime64 or timedelta64).
ddof : int, default: 0
“Delta Degrees of Freedom”: the divisor used in the calculation is ``N - ddof``,
where ``N`` represents the number of elements.
keep_attrs : bool or None, optional
If True, ``attrs`` will be copied from the original
object to the new one. If False, the new object will be
returned without attributes.
**kwargs : Any
Additional keyword arguments passed on to the appropriate array
function for calculating ``var`` on this object's data.
These could include dask-specific kwargs like ``split_every``.
Returns
-------
reduced : Dataset
New Dataset with ``var`` applied to its data and the
indicated dimension(s) removed
See Also
--------
numpy.var
dask.array.var
Dataset.var
:ref:`resampling`
User guide on resampling operations.
Notes
-----
Use the ``flox`` package to significantly speed up resampling computations,
especially with dask arrays. Xarray will use flox by default if installed.
Pass flox-specific keyword arguments in ``**kwargs``.
See the `flox documentation <https://flox.readthedocs.io>`_ for more.
Non-numeric variables will be removed prior to reducing.
Examples
--------
>>> da = xr.DataArray(
... np.array([1, 2, 3, 0, 2, np.nan]),
... dims="time",
... coords=dict(
... time=("time", pd.date_range("2001-01-01", freq="ME", periods=6)),
... labels=("time", np.array(["a", "b", "c", "c", "b", "a"])),
... ),
... )
>>> ds = xr.Dataset(dict(da=da))
>>> ds
<xarray.Dataset> Size: 120B
Dimensions: (time: 6)
Coordinates:
* time (time) datetime64[ns] 48B 2001-01-31 2001-02-28 ... 2001-06-30
labels (time) <U1 24B 'a' 'b' 'c' 'c' 'b' 'a'
Data variables:
da (time) float64 48B 1.0 2.0 3.0 0.0 2.0 nan
>>> ds.resample(time="3ME").var()
<xarray.Dataset> Size: 48B
Dimensions: (time: 3)
Coordinates:
* time (time) datetime64[ns] 24B 2001-01-31 2001-04-30 2001-07-31
Data variables:
da (time) float64 24B 0.0 1.556 0.0
Use ``skipna`` to control whether NaNs are ignored.
>>> ds.resample(time="3ME").var(skipna=False)
<xarray.Dataset> Size: 48B
Dimensions: (time: 3)
Coordinates:
* time (time) datetime64[ns] 24B 2001-01-31 2001-04-30 2001-07-31
Data variables:
da (time) float64 24B 0.0 1.556 nan
Specify ``ddof=1`` for an unbiased estimate.
>>> ds.resample(time="3ME").var(skipna=True, ddof=1)
<xarray.Dataset> Size: 48B
Dimensions: (time: 3)
Coordinates:
* time (time) datetime64[ns] 24B 2001-01-31 2001-04-30 2001-07-31
Data variables:
da (time) float64 24B nan 2.333 nan
"""
if (
flox_available
and OPTIONS["use_flox"]
and contains_only_chunked_or_numpy(self._obj)
):
return self._flox_reduce(
func="var",
dim=dim,
skipna=skipna,
ddof=ddof,
numeric_only=True,
# fill_value=fill_value,
keep_attrs=keep_attrs,
**kwargs,
)
else:
return self.reduce(
duck_array_ops.var,
dim=dim,
skipna=skipna,
ddof=ddof,
numeric_only=True,
keep_attrs=keep_attrs,
**kwargs,
)
def median(
self,
dim: Dims = None,
*,
skipna: bool | None = None,
keep_attrs: bool | None = None,
**kwargs: Any,
) -> Dataset:
"""
Reduce this Dataset's data by applying ``median`` along some dimension(s).
Parameters
----------
dim : str, Iterable of Hashable, "..." or None, default: None
Name of dimension[s] along which to apply ``median``. For e.g. ``dim="x"``
or ``dim=["x", "y"]``. If None, will reduce over the Resample dimensions.
If "...", will reduce over all dimensions.
skipna : bool or None, optional
If True, skip missing values (as marked by NaN). By default, only
skips missing values for float dtypes; other dtypes either do not
have a sentinel missing value (int) or ``skipna=True`` has not been
implemented (object, datetime64 or timedelta64).
keep_attrs : bool or None, optional
If True, ``attrs`` will be copied from the original
object to the new one. If False, the new object will be
returned without attributes.
**kwargs : Any
Additional keyword arguments passed on to the appropriate array
function for calculating ``median`` on this object's data.
These could include dask-specific kwargs like ``split_every``.
Returns
-------
reduced : Dataset
New Dataset with ``median`` applied to its data and the
indicated dimension(s) removed
See Also
--------
numpy.median
dask.array.median
Dataset.median
:ref:`resampling`
User guide on resampling operations.
Notes
-----
Use the ``flox`` package to significantly speed up resampling computations,
especially with dask arrays. Xarray will use flox by default if installed.
Pass flox-specific keyword arguments in ``**kwargs``.
See the `flox documentation <https://flox.readthedocs.io>`_ for more.
Non-numeric variables will be removed prior to reducing.
Examples
--------
>>> da = xr.DataArray(
... np.array([1, 2, 3, 0, 2, np.nan]),
... dims="time",
... coords=dict(
... time=("time", pd.date_range("2001-01-01", freq="ME", periods=6)),
... labels=("time", np.array(["a", "b", "c", "c", "b", "a"])),
... ),
... )
>>> ds = xr.Dataset(dict(da=da))
>>> ds
<xarray.Dataset> Size: 120B
Dimensions: (time: 6)
Coordinates:
* time (time) datetime64[ns] 48B 2001-01-31 2001-02-28 ... 2001-06-30
labels (time) <U1 24B 'a' 'b' 'c' 'c' 'b' 'a'
Data variables:
da (time) float64 48B 1.0 2.0 3.0 0.0 2.0 nan
>>> ds.resample(time="3ME").median()
<xarray.Dataset> Size: 48B
Dimensions: (time: 3)
Coordinates:
* time (time) datetime64[ns] 24B 2001-01-31 2001-04-30 2001-07-31
Data variables:
da (time) float64 24B 1.0 2.0 2.0
Use ``skipna`` to control whether NaNs are ignored.
>>> ds.resample(time="3ME").median(skipna=False)
<xarray.Dataset> Size: 48B
Dimensions: (time: 3)
Coordinates:
* time (time) datetime64[ns] 24B 2001-01-31 2001-04-30 2001-07-31
Data variables:
da (time) float64 24B 1.0 2.0 nan
"""
return self.reduce(
duck_array_ops.median,
dim=dim,
skipna=skipna,
numeric_only=True,
keep_attrs=keep_attrs,
**kwargs,
)
def cumsum(
self,
dim: Dims = None,
*,
skipna: bool | None = None,
keep_attrs: bool | None = None,
**kwargs: Any,
) -> Dataset:
"""
Reduce this Dataset's data by applying ``cumsum`` along some dimension(s).
Parameters
----------
dim : str, Iterable of Hashable, "..." or None, default: None
Name of dimension[s] along which to apply ``cumsum``. For e.g. ``dim="x"``
or ``dim=["x", "y"]``. If None, will reduce over the Resample dimensions.
If "...", will reduce over all dimensions.
skipna : bool or None, optional
If True, skip missing values (as marked by NaN). By default, only
skips missing values for float dtypes; other dtypes either do not
have a sentinel missing value (int) or ``skipna=True`` has not been
implemented (object, datetime64 or timedelta64).
keep_attrs : bool or None, optional
If True, ``attrs`` will be copied from the original
object to the new one. If False, the new object will be
returned without attributes.
**kwargs : Any
Additional keyword arguments passed on to the appropriate array
function for calculating ``cumsum`` on this object's data.
These could include dask-specific kwargs like ``split_every``.
Returns
-------
reduced : Dataset
New Dataset with ``cumsum`` applied to its data and the
indicated dimension(s) removed
See Also
--------
numpy.cumsum
dask.array.cumsum
Dataset.cumsum
Dataset.cumulative
:ref:`resampling`
User guide on resampling operations.
Notes
-----
Use the ``flox`` package to significantly speed up resampling computations,
especially with dask arrays. Xarray will use flox by default if installed.
Pass flox-specific keyword arguments in ``**kwargs``.
See the `flox documentation <https://flox.readthedocs.io>`_ for more.
Non-numeric variables will be removed prior to reducing.
Note that the methods on the ``cumulative`` method are more performant (with numbagg installed)
and better supported. ``cumsum`` and ``cumprod`` may be deprecated
in the future.
Examples
--------
>>> da = xr.DataArray(
... np.array([1, 2, 3, 0, 2, np.nan]),
... dims="time",
... coords=dict(
... time=("time", pd.date_range("2001-01-01", freq="ME", periods=6)),
... labels=("time", np.array(["a", "b", "c", "c", "b", "a"])),
... ),
... )
>>> ds = xr.Dataset(dict(da=da))
>>> ds
<xarray.Dataset> Size: 120B
Dimensions: (time: 6)
Coordinates:
* time (time) datetime64[ns] 48B 2001-01-31 2001-02-28 ... 2001-06-30
labels (time) <U1 24B 'a' 'b' 'c' 'c' 'b' 'a'
Data variables:
da (time) float64 48B 1.0 2.0 3.0 0.0 2.0 nan
>>> ds.resample(time="3ME").cumsum()
<xarray.Dataset> Size: 48B
Dimensions: (time: 6)
Dimensions without coordinates: time
Data variables:
da (time) float64 48B 1.0 2.0 5.0 5.0 2.0 2.0
Use ``skipna`` to control whether NaNs are ignored.
>>> ds.resample(time="3ME").cumsum(skipna=False)
<xarray.Dataset> Size: 48B
Dimensions: (time: 6)
Dimensions without coordinates: time
Data variables:
da (time) float64 48B 1.0 2.0 5.0 5.0 2.0 nan
"""
return self.reduce(
duck_array_ops.cumsum,
dim=dim,
skipna=skipna,
numeric_only=True,
keep_attrs=keep_attrs,
**kwargs,
)
def cumprod(
self,
dim: Dims = None,
*,
skipna: bool | None = None,
keep_attrs: bool | None = None,
**kwargs: Any,
) -> Dataset:
"""
Reduce this Dataset's data by applying ``cumprod`` along some dimension(s).
Parameters
----------
dim : str, Iterable of Hashable, "..." or None, default: None
Name of dimension[s] along which to apply ``cumprod``. For e.g. ``dim="x"``
or ``dim=["x", "y"]``. If None, will reduce over the Resample dimensions.
If "...", will reduce over all dimensions.
skipna : bool or None, optional
If True, skip missing values (as marked by NaN). By default, only
skips missing values for float dtypes; other dtypes either do not
have a sentinel missing value (int) or ``skipna=True`` has not been
implemented (object, datetime64 or timedelta64).
keep_attrs : bool or None, optional
If True, ``attrs`` will be copied from the original
object to the new one. If False, the new object will be
returned without attributes.
**kwargs : Any
Additional keyword arguments passed on to the appropriate array
function for calculating ``cumprod`` on this object's data.
These could include dask-specific kwargs like ``split_every``.
Returns
-------
reduced : Dataset
New Dataset with ``cumprod`` applied to its data and the
indicated dimension(s) removed
See Also
--------
numpy.cumprod
dask.array.cumprod
Dataset.cumprod
Dataset.cumulative
:ref:`resampling`
User guide on resampling operations.
Notes
-----
Use the ``flox`` package to significantly speed up resampling computations,
especially with dask arrays. Xarray will use flox by default if installed.
Pass flox-specific keyword arguments in ``**kwargs``.
See the `flox documentation <https://flox.readthedocs.io>`_ for more.
Non-numeric variables will be removed prior to reducing.
Note that the methods on the ``cumulative`` method are more performant (with numbagg installed)
and better supported. ``cumsum`` and ``cumprod`` may be deprecated
in the future.
Examples
--------
>>> da = xr.DataArray(
... np.array([1, 2, 3, 0, 2, np.nan]),
... dims="time",
... coords=dict(
... time=("time", pd.date_range("2001-01-01", freq="ME", periods=6)),
... labels=("time", np.array(["a", "b", "c", "c", "b", "a"])),
... ),
... )
>>> ds = xr.Dataset(dict(da=da))
>>> ds
<xarray.Dataset> Size: 120B
Dimensions: (time: 6)
Coordinates:
* time (time) datetime64[ns] 48B 2001-01-31 2001-02-28 ... 2001-06-30
labels (time) <U1 24B 'a' 'b' 'c' 'c' 'b' 'a'
Data variables:
da (time) float64 48B 1.0 2.0 3.0 0.0 2.0 nan
>>> ds.resample(time="3ME").cumprod()
<xarray.Dataset> Size: 48B
Dimensions: (time: 6)
Dimensions without coordinates: time
Data variables:
da (time) float64 48B 1.0 2.0 6.0 0.0 2.0 2.0
Use ``skipna`` to control whether NaNs are ignored.
>>> ds.resample(time="3ME").cumprod(skipna=False)
<xarray.Dataset> Size: 48B
Dimensions: (time: 6)
Dimensions without coordinates: time
Data variables:
da (time) float64 48B 1.0 2.0 6.0 0.0 2.0 nan
"""
return self.reduce(
duck_array_ops.cumprod,
dim=dim,
skipna=skipna,
numeric_only=True,
keep_attrs=keep_attrs,
**kwargs,
)
| DatasetResampleAggregations |
python | kamyu104__LeetCode-Solutions | Python/convert-to-base-2.py | {
"start": 380,
"end": 767
} | class ____(object):
def baseNeg2(self, N):
"""
:type N: int
:rtype: str
"""
BASE = -2
result = []
while N:
N, r = divmod(N, BASE)
if r < 0:
r -= BASE
N += 1
result.append(str(r))
result.reverse()
return "".join(result) if result else "0"
| Solution2 |
python | django__django | tests/model_inheritance_regress/models.py | {
"start": 48,
"end": 212
} | class ____(models.Model):
name = models.CharField(max_length=50)
address = models.CharField(max_length=80)
class Meta:
ordering = ("name",)
| Place |
python | geekcomputers__Python | Snake_water_gun/main.py | {
"start": 276,
"end": 2735
} | class ____:
HEADERS = "\033[95m"
OKBLUE = "\033[94m"
OKGREEN = "\033[93m"
WARNING = "\033[92m"
FAIL = "\033[91m"
ENDC = "\033[0m"
BOLD = "\033[1m"
UNDERLINE = "\033[4m"
run = True
li = ["s", "w", "g"]
while True:
system("clear")
b = input(
bcolors.OKBLUE
+ bcolors.BOLD
+ "Welcome to the game 'Snake-Water-Gun'.\nWanna play? Type Y or N: "
+ bcolors.ENDC
).capitalize()
if b == "N":
run = False
print("Ok bubyeee! See you later")
break
elif b == "Y" or b == "y":
print(
"There will be 10 matches, and the one who wins more matches will win. Let's start."
)
break
else:
continue
i = 0
score = 0
while run and i < 10:
comp_choice = random.choice(li)
user_choice = input("Type s for snake, w for water or g for gun: ").lower()
if user_choice == comp_choice:
print(bcolors.HEADERS + "Game draws. Play again" + bcolors.ENDC)
elif user_choice == "s" and comp_choice == "g":
print(bcolors.FAIL + "It's Snake v/s Gun You lose!" + bcolors.ENDC)
elif user_choice == "s" and comp_choice == "w":
print(bcolors.OKGREEN + "It's Snake v/s Water. You won" + bcolors.ENDC)
score += 1
elif user_choice == "w" and comp_choice == "s":
print(bcolors.FAIL + "It's Water v/s Snake You lose!" + bcolors.ENDC)
elif user_choice == "w" and comp_choice == "g":
print(bcolors.OKGREEN + "It's Water v/s Gun. You won" + bcolors.ENDC)
score += 1
elif user_choice == "g" and comp_choice == "w":
print(bcolors.FAIL + "It's Gun v/s Water You lose!" + bcolors.ENDC)
elif user_choice == "g" and comp_choice == "s":
print(bcolors.OKGREEN + "It's Gun v/s Snake. You won" + bcolors.ENDC)
score += 1
else:
print("Wrong input")
continue
i += 1
print(f"{10 - i} matches left")
if run == True:
print(f"Your score is {score} and the final result is...")
time.sleep(3)
if score > 5:
print(
bcolors.OKGREEN
+ bcolors.BOLD
+ "Woooh!!!!!!! Congratulations you won"
+ bcolors.ENDC
)
elif score == 5:
print("Game draws!!!!!!!")
elif score < 5:
print(
bcolors.FAIL
+ bcolors.BOLD
+ "You lose!!!. Better luck next time"
+ bcolors.ENDC
)
| bcolors |
python | astropy__astropy | astropy/io/registry/tests/test_registries.py | {
"start": 10143,
"end": 25256
} | class ____(TestUnifiedIORegistryBase):
"""Test :class:`astropy.io.registry.UnifiedInputRegistry`."""
def setup_class(self):
"""Setup class. This is called 1st by pytest."""
self._cls = UnifiedInputRegistry
# ===========================================
def test_inherited_read_registration(self, registry):
# check that multi-generation inheritance works properly,
# meaning that a child inherits from parents before
# grandparents, see astropy/astropy#7156
class Child1(EmptyData):
pass
class Child2(Child1):
pass
def _read():
return EmptyData()
def _read1():
return Child1()
# check that reader gets inherited
registry.register_reader("test", EmptyData, _read)
assert registry.get_reader("test", Child2) is _read
# check that nearest ancestor is identified
# (i.e. that the reader for Child2 is the registered method
# for Child1, and not Table)
registry.register_reader("test", Child1, _read1)
assert registry.get_reader("test", Child2) is _read1
# ===========================================
@pytest.mark.skip("TODO!")
def test_get_formats(self, registry):
"""Test ``registry.get_formats()``."""
raise AssertionError()
@SKIPIF_OPTIMIZED_PYTHON
def test_delay_doc_updates(self, registry, fmtcls1):
"""Test ``registry.delay_doc_updates()``."""
super().test_delay_doc_updates(registry, fmtcls1)
with registry.delay_doc_updates(EmptyData):
registry.register_reader("test", EmptyData, empty_reader)
# test that the doc has not yet been updated.
# if a the format was registered in a different way, then
# test that this method is not present.
if "Format" in EmptyData.read.__doc__:
docs = EmptyData.read.__doc__.split("\n")
ihd = [i for i, s in enumerate(docs) if ("Format" in s)][0]
ifmt = docs[ihd].index("Format") + 1
iread = docs[ihd].index("Read") + 1
# there might not actually be anything here, which is also good
if docs[-2] != docs[-1]:
assert docs[-1][ifmt : ifmt + 5] == "test"
assert docs[-1][iread : iread + 3] != "Yes"
# now test it's updated
docs = EmptyData.read.__doc__.split("\n")
ihd = [i for i, s in enumerate(docs) if ("Format" in s)][0]
ifmt = docs[ihd].index("Format") + 2
iread = docs[ihd].index("Read") + 1
assert docs[-2][ifmt : ifmt + 4] == "test"
assert docs[-2][iread : iread + 3] == "Yes"
def test_identify_read_format(self, registry):
"""Test ``registry.identify_format()``."""
args = ("read", EmptyData, None, None, (None,), {})
# test there is no format to identify
formats = registry.identify_format(*args)
assert formats == []
# test there is a format to identify
# doesn't actually matter if register a reader, it returns True for all
registry.register_identifier("test", EmptyData, empty_identifier)
formats = registry.identify_format(*args)
assert "test" in formats
# -----------------------
def test_register_reader(self, registry, fmtcls1, fmtcls2):
"""Test ``registry.register_reader()``."""
# initial check it's not registered
assert fmtcls1 not in registry._readers
assert fmtcls2 not in registry._readers
# register
registry.register_reader(*fmtcls1, empty_reader)
registry.register_reader(*fmtcls2, empty_reader)
assert fmtcls1 in registry._readers
assert fmtcls2 in registry._readers
assert registry._readers[fmtcls1] == (empty_reader, 0) # (f, priority)
assert registry._readers[fmtcls2] == (empty_reader, 0) # (f, priority)
def test_register_reader_invalid(self, registry, fmtcls1):
fmt, cls = fmtcls1
registry.register_reader(fmt, cls, empty_reader)
with pytest.raises(IORegistryError) as exc:
registry.register_reader(fmt, cls, empty_reader)
assert (
str(exc.value) == f"Reader for format '{fmt}' and class "
f"'{cls.__name__}' is already defined"
)
def test_register_reader_force(self, registry, fmtcls1):
registry.register_reader(*fmtcls1, empty_reader)
registry.register_reader(*fmtcls1, empty_reader, force=True)
assert fmtcls1 in registry._readers
def test_register_readers_with_same_name_on_different_classes(self, registry):
# No errors should be generated if the same name is registered for
# different objects...but this failed under python3
registry.register_reader("test", EmptyData, lambda: EmptyData())
registry.register_reader("test", OtherEmptyData, lambda: OtherEmptyData())
t = EmptyData.read(format="test", registry=registry)
assert isinstance(t, EmptyData)
tbl = OtherEmptyData.read(format="test", registry=registry)
assert isinstance(tbl, OtherEmptyData)
# -----------------------
def test_unregister_reader(self, registry, fmtcls1):
"""Test ``registry.unregister_reader()``."""
registry.register_reader(*fmtcls1, empty_reader)
assert fmtcls1 in registry._readers
registry.unregister_reader(*fmtcls1)
assert fmtcls1 not in registry._readers
def test_unregister_reader_invalid(self, registry, fmtcls1):
fmt, cls = fmtcls1
with pytest.raises(IORegistryError) as exc:
registry.unregister_reader(*fmtcls1)
assert (
str(exc.value)
== f"No reader defined for format '{fmt}' and class '{cls.__name__}'"
)
# -----------------------
def test_get_reader(self, registry, fmtcls):
"""Test ``registry.get_reader()``."""
fmt, cls = fmtcls
with pytest.raises(IORegistryError):
registry.get_reader(fmt, cls)
registry.register_reader(fmt, cls, empty_reader)
reader = registry.get_reader(fmt, cls)
assert reader is empty_reader
def test_get_reader_invalid(self, registry, fmtcls):
fmt, cls = fmtcls
with pytest.raises(IORegistryError) as exc:
registry.get_reader(fmt, cls)
assert str(exc.value).startswith(
f"No reader defined for format '{fmt}' and class '{cls.__name__}'"
)
# -----------------------
def test_read_noformat(self, registry, fmtcls1):
"""Test ``registry.read()`` when there isn't a reader."""
with pytest.raises(IORegistryError) as exc:
fmtcls1[1].read(registry=registry)
assert str(exc.value).startswith(
"Format could not be identified based"
" on the file name or contents, "
"please provide a 'format' argument."
)
def test_read_noformat_arbitrary(self, registry, original, fmtcls1):
"""Test that all identifier functions can accept arbitrary input"""
registry._identifiers.update(original["identifiers"])
with pytest.raises(IORegistryError) as exc:
fmtcls1[1].read(object(), registry=registry)
assert str(exc.value).startswith(
"Format could not be identified based"
" on the file name or contents, "
"please provide a 'format' argument."
)
def test_read_noformat_arbitrary_file(self, tmp_path, registry, original):
"""Tests that all identifier functions can accept arbitrary files"""
registry._readers.update(original["readers"])
testfile = tmp_path / "foo.example"
with open(testfile, "w") as f:
f.write("Hello world")
with pytest.raises(IORegistryError) as exc:
Table.read(testfile)
assert str(exc.value).startswith(
"Format could not be identified based"
" on the file name or contents, "
"please provide a 'format' argument."
)
def test_read_toomanyformats(self, registry, fmtcls1, fmtcls2):
fmt1, cls = fmtcls1
fmt2, _ = fmtcls2
registry.register_identifier(fmt1, cls, lambda o, *x, **y: True)
registry.register_identifier(fmt2, cls, lambda o, *x, **y: True)
with pytest.raises(IORegistryError) as exc:
cls.read(registry=registry)
assert str(exc.value) == f"Format is ambiguous - options are: {fmt1}, {fmt2}"
def test_read_uses_priority(self, registry, fmtcls1, fmtcls2):
fmt1, cls = fmtcls1
fmt2, _ = fmtcls2
counter = Counter()
def counting_reader1(*args, **kwargs):
counter[fmt1] += 1
return cls()
def counting_reader2(*args, **kwargs):
counter[fmt2] += 1
return cls()
registry.register_reader(fmt1, cls, counting_reader1, priority=1)
registry.register_reader(fmt2, cls, counting_reader2, priority=2)
registry.register_identifier(fmt1, cls, lambda o, *x, **y: True)
registry.register_identifier(fmt2, cls, lambda o, *x, **y: True)
cls.read(registry=registry)
assert counter[fmt2] == 1
assert counter[fmt1] == 0
def test_read_format_noreader(self, registry, fmtcls1):
fmt, cls = fmtcls1
with pytest.raises(IORegistryError) as exc:
cls.read(format=fmt, registry=registry)
assert str(exc.value).startswith(
f"No reader defined for format '{fmt}' and class '{cls.__name__}'"
)
def test_read_identifier(self, tmp_path, registry, fmtcls1, fmtcls2):
fmt1, cls = fmtcls1
fmt2, _ = fmtcls2
registry.register_identifier(
fmt1, cls, lambda o, path, fileobj, *x, **y: path.endswith("a")
)
registry.register_identifier(
fmt2, cls, lambda o, path, fileobj, *x, **y: path.endswith("b")
)
# Now check that we got past the identifier and are trying to get
# the reader. The registry.get_reader will fail but the error message
# will tell us if the identifier worked.
filename = tmp_path / "testfile.a"
open(filename, "w").close()
with pytest.raises(IORegistryError) as exc:
cls.read(filename, registry=registry)
assert str(exc.value).startswith(
f"No reader defined for format '{fmt1}' and class '{cls.__name__}'"
)
filename = tmp_path / "testfile.b"
open(filename, "w").close()
with pytest.raises(IORegistryError) as exc:
cls.read(filename, registry=registry)
assert str(exc.value).startswith(
f"No reader defined for format '{fmt2}' and class '{cls.__name__}'"
)
def test_read_valid_return(self, registry, fmtcls):
fmt, cls = fmtcls
registry.register_reader(fmt, cls, empty_reader)
t = cls.read(format=fmt, registry=registry)
assert isinstance(t, cls)
def test_read_non_existing_unknown_ext(self, fmtcls1):
"""Raise the correct error when attempting to read a non-existing
file with an unknown extension."""
with pytest.raises(OSError):
data = fmtcls1[1].read("non-existing-file-with-unknown.ext")
def test_read_directory(self, tmp_path, registry, fmtcls1):
"""
Regression test for a bug that caused the I/O registry infrastructure to
not work correctly for datasets that are represented by folders as
opposed to files, when using the descriptors to add read/write methods.
"""
_, cls = fmtcls1
registry.register_identifier(
"test_folder_format", cls, lambda o, *x, **y: o == "read"
)
registry.register_reader("test_folder_format", cls, empty_reader)
filename = tmp_path / "folder_dataset"
filename.mkdir()
# With the format explicitly specified
dataset = cls.read(filename, format="test_folder_format", registry=registry)
assert isinstance(dataset, cls)
# With the auto-format identification
dataset = cls.read(filename, registry=registry)
assert isinstance(dataset, cls)
# ===========================================
# Compat tests
def test_compat_register_reader(self, registry, fmtcls1):
# with registry specified
assert fmtcls1 not in registry._readers
compat.register_reader(*fmtcls1, empty_reader, registry=registry)
assert fmtcls1 in registry._readers
# without registry specified it becomes default_registry
if registry is not default_registry:
assert fmtcls1 not in default_registry._readers
try:
compat.register_reader(*fmtcls1, empty_identifier)
except Exception:
pass
else:
assert fmtcls1 in default_registry._readers
finally:
default_registry._readers.pop(fmtcls1)
def test_compat_unregister_reader(self, registry, fmtcls1):
# with registry specified
registry.register_reader(*fmtcls1, empty_reader)
assert fmtcls1 in registry._readers
compat.unregister_reader(*fmtcls1, registry=registry)
assert fmtcls1 not in registry._readers
# without registry specified it becomes default_registry
if registry is not default_registry:
assert fmtcls1 not in default_registry._readers
default_registry.register_reader(*fmtcls1, empty_reader)
assert fmtcls1 in default_registry._readers
compat.unregister_reader(*fmtcls1)
assert fmtcls1 not in registry._readers
def test_compat_get_reader(self, registry, fmtcls1):
# with registry specified
registry.register_reader(*fmtcls1, empty_reader)
reader = compat.get_reader(*fmtcls1, registry=registry)
assert reader is empty_reader
registry.unregister_reader(*fmtcls1)
# without registry specified it becomes default_registry
if registry is not default_registry:
default_registry.register_reader(*fmtcls1, empty_reader)
reader = compat.get_reader(*fmtcls1)
assert reader is empty_reader
default_registry.unregister_reader(*fmtcls1)
def test_compat_read(self, registry, fmtcls1):
fmt, cls = fmtcls1
# with registry specified
registry.register_reader(*fmtcls1, empty_reader)
t = compat.read(cls, format=fmt, registry=registry)
assert isinstance(t, cls)
registry.unregister_reader(*fmtcls1)
# without registry specified it becomes default_registry
if registry is not default_registry:
default_registry.register_reader(*fmtcls1, empty_reader)
t = compat.read(cls, format=fmt)
assert isinstance(t, cls)
default_registry.unregister_reader(*fmtcls1)
| TestUnifiedInputRegistry |
python | oauthlib__oauthlib | tests/openid/connect/core/grant_types/test_hybrid.py | {
"start": 386,
"end": 639
} | class ____(AuthorizationCodeGrantTest):
"""Test that OpenID don't interfere with normal OAuth 2 flows."""
def setUp(self):
super().setUp()
self.auth = HybridGrant(request_validator=self.mock_validator)
| OpenIDHybridInterferenceTest |
python | sympy__sympy | sympy/physics/quantum/tests/test_innerproduct.py | {
"start": 702,
"end": 936
} | class ____(Ket, FooState):
@classmethod
def dual_class(self):
return FooBra
def _eval_innerproduct_FooBra(self, bra):
return Integer(1)
def _eval_innerproduct_BarBra(self, bra):
return I
| FooKet |
python | tensorflow__tensorflow | tensorflow/python/keras/layers/convolutional.py | {
"start": 124320,
"end": 126087
} | class ____(Layer):
"""Cropping layer for 1D input (e.g. temporal sequence).
It crops along the time dimension (axis 1).
Examples:
>>> input_shape = (2, 3, 2)
>>> x = np.arange(np.prod(input_shape)).reshape(input_shape)
>>> print(x)
[[[ 0 1]
[ 2 3]
[ 4 5]]
[[ 6 7]
[ 8 9]
[10 11]]]
>>> y = tf.keras.layers.Cropping1D(cropping=1)(x)
>>> print(y)
tf.Tensor(
[[[2 3]]
[[8 9]]], shape=(2, 1, 2), dtype=int64)
Args:
cropping: Int or tuple of int (length 2)
How many units should be trimmed off at the beginning and end of
the cropping dimension (axis 1).
If a single int is provided, the same value will be used for both.
Input shape:
3D tensor with shape `(batch_size, axis_to_crop, features)`
Output shape:
3D tensor with shape `(batch_size, cropped_axis, features)`
"""
def __init__(self, cropping=(1, 1), **kwargs):
super(Cropping1D, self).__init__(**kwargs)
self.cropping = conv_utils.normalize_tuple(cropping, 2, 'cropping')
self.input_spec = InputSpec(ndim=3)
def compute_output_shape(self, input_shape):
input_shape = tensor_shape.TensorShape(input_shape).as_list()
if input_shape[1] is not None:
length = input_shape[1] - self.cropping[0] - self.cropping[1]
else:
length = None
return tensor_shape.TensorShape([input_shape[0], length, input_shape[2]])
def call(self, inputs):
if self.cropping[1] == 0:
return inputs[:, self.cropping[0]:, :]
else:
return inputs[:, self.cropping[0]:-self.cropping[1], :]
def get_config(self):
config = {'cropping': self.cropping}
base_config = super(Cropping1D, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
| Cropping1D |
python | PrefectHQ__prefect | src/prefect/server/events/schemas/automations.py | {
"start": 7959,
"end": 15630
} | class ____(ResourceTrigger):
"""
A trigger that fires based on the presence or absence of events within a given
period of time.
"""
type: Literal["event"] = "event"
after: Set[str] = Field(
default_factory=set,
description=(
"The event(s) which must first been seen to fire this trigger. If "
"empty, then fire this trigger immediately. Events may include "
"trailing wildcards, like `prefect.flow-run.*`"
),
)
expect: Set[str] = Field(
default_factory=set,
description=(
"The event(s) this trigger is expecting to see. If empty, this "
"trigger will match any event. Events may include trailing wildcards, "
"like `prefect.flow-run.*`"
),
)
for_each: Set[str] = Field(
default_factory=set,
description=(
"Evaluate the trigger separately for each distinct value of these labels "
"on the resource. By default, labels refer to the primary resource of the "
"triggering event. You may also refer to labels from related "
"resources by specifying `related:<role>:<label>`. This will use the "
"value of that label for the first related resource in that role. For "
'example, `"for_each": ["related:flow:prefect.resource.id"]` would '
"evaluate the trigger for each flow."
),
)
posture: Literal[Posture.Reactive, Posture.Proactive] = Field( # type: ignore[valid-type]
...,
description=(
"The posture of this trigger, either Reactive or Proactive. Reactive "
"triggers respond to the _presence_ of the expected events, while "
"Proactive triggers respond to the _absence_ of those expected events."
),
)
threshold: int = Field(
1,
description=(
"The number of events required for this trigger to fire (for "
"Reactive triggers), or the number of events expected (for Proactive "
"triggers)"
),
)
within: timedelta = Field(
timedelta(seconds=0),
ge=timedelta(seconds=0),
description=(
"The time period over which the events must occur. For Reactive triggers, "
"this may be as low as 0 seconds, but must be at least 10 seconds for "
"Proactive triggers"
),
)
@model_validator(mode="before")
@classmethod
def enforce_minimum_within_for_proactive_triggers(
cls, data: Dict[str, Any] | Any
) -> Dict[str, Any]:
if not isinstance(data, dict):
return data
if "within" in data and data["within"] is None:
raise ValueError("`within` should be a valid timedelta")
posture: Optional[Posture] = data.get("posture")
within: Optional[timedelta] = data.get("within")
if isinstance(within, (int, float)):
data["within"] = within = timedelta(seconds=within)
if posture == Posture.Proactive:
if not within or within == timedelta(0):
data["within"] = timedelta(seconds=10.0)
elif within < timedelta(seconds=10.0):
raise ValueError(
"`within` for Proactive triggers must be greater than or equal to "
"10 seconds"
)
return data
def covers(self, event: ReceivedEvent) -> bool:
if not self.covers_resources(event.resource, event.related):
return False
if not self.event_pattern.match(event.event):
return False
return True
@property
def immediate(self) -> bool:
"""Does this reactive trigger fire immediately for all events?"""
return self.posture == Posture.Reactive and self.within == timedelta(0)
_event_pattern: Optional[re.Pattern[str]] = PrivateAttr(None)
@property
def event_pattern(self) -> re.Pattern[str]:
"""A regular expression which may be evaluated against any event string to
determine if this trigger would be interested in the event"""
if self._event_pattern:
return self._event_pattern
if not self.expect:
# This preserves the trivial match for `expect`, and matches the behavior
# of expects() below
self._event_pattern = re.compile(".+")
else:
patterns = [
# escape each pattern, then translate wildcards ('*' -> r'.+')
re.escape(e).replace("\\*", ".+")
for e in self.expect | self.after
]
self._event_pattern = re.compile("|".join(patterns))
return self._event_pattern
def starts_after(self, event: str) -> bool:
# Warning: Previously we returned 'True' if there was trivial 'after' criteria.
# Although this is not wrong, it led to automations processing more events
# than they should have.
if not self.after:
return False
for candidate in self.after:
if matches(candidate, event):
return True
return False
def expects(self, event: str) -> bool:
if not self.expect:
return True
for candidate in self.expect:
if matches(candidate, event):
return True
return False
def bucketing_key(self, event: ReceivedEvent) -> Tuple[str, ...]:
return tuple(
event.find_resource_label(label) or "" for label in sorted(self.for_each)
)
def meets_threshold(self, event_count: int) -> bool:
if self.posture == Posture.Reactive and event_count >= self.threshold:
return True
if self.posture == Posture.Proactive and event_count < self.threshold:
return True
return False
def create_automation_state_change_event(
self, firing: Firing, trigger_state: TriggerState
) -> ReceivedEvent:
"""Returns a ReceivedEvent for an automation state change
into a triggered or resolved state."""
automation = firing.trigger.automation
triggering_event = firing.triggering_event
resource_data = Resource(
{
"prefect.resource.id": f"prefect.automation.{automation.id}",
"prefect.resource.name": automation.name,
}
)
if self.posture.value:
resource_data["prefect.posture"] = self.posture.value
return ReceivedEvent(
occurred=firing.triggered,
event=f"prefect.automation.{trigger_state.value.lower()}",
resource=resource_data,
related=(
[
RelatedResource(
{
"prefect.resource.id": f"prefect.event.{triggering_event.id}",
"prefect.resource.role": "triggering-event",
}
)
]
if triggering_event
else []
),
payload={
"triggering_labels": firing.triggering_labels,
"triggering_event": (
triggering_event.model_dump(mode="json")
if triggering_event
else None
),
},
id=uuid7(),
)
ServerTriggerTypes: TypeAlias = Union[EventTrigger, CompoundTrigger, SequenceTrigger]
"""The union of all concrete trigger types that a user may actually create"""
T = TypeVar("T", bound=Trigger)
| EventTrigger |
python | openai__openai-python | src/openai/_base_client.py | {
"start": 7761,
"end": 8889
} | class ____(Generic[_T, AsyncPageT]):
def __init__(
self,
client: AsyncAPIClient,
options: FinalRequestOptions,
page_cls: Type[AsyncPageT],
model: Type[_T],
) -> None:
self._model = model
self._client = client
self._options = options
self._page_cls = page_cls
def __await__(self) -> Generator[Any, None, AsyncPageT]:
return self._get_page().__await__()
async def _get_page(self) -> AsyncPageT:
def _parser(resp: AsyncPageT) -> AsyncPageT:
resp._set_private_attributes(
model=self._model,
options=self._options,
client=self._client,
)
return resp
self._options.post_parser = _parser
return await self._client.request(self._page_cls, self._options)
async def __aiter__(self) -> AsyncIterator[_T]:
# https://github.com/microsoft/pyright/issues/3464
page = cast(
AsyncPageT,
await self, # type: ignore
)
async for item in page:
yield item
| AsyncPaginator |
python | apache__airflow | providers/google/src/airflow/providers/google/cloud/operators/dataplex.py | {
"start": 106402,
"end": 110183
} | class ____(DataplexCatalogBaseOperator):
"""
List EntryGroup resources.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:DataplexCatalogListEntryGroupsOperator`
:param filter_by: Optional. Filter to apply on the list results.
:param order_by: Optional. Fields to order the results by.
:param page_size: Optional. Maximum number of EntryGroups to return on the page.
:param page_token: Optional. Token to retrieve the next page of results.
:param project_id: Required. The ID of the Google Cloud project where the service is used.
:param location: Required. The ID of the Google Cloud region where the service is used.
:param gcp_conn_id: Optional. The connection ID to use to connect to Google Cloud.
:param retry: Optional. A retry object used to retry requests. If `None` is specified, requests will not
be retried.
:param timeout: Optional. The amount of time, in seconds, to wait for the request to complete.
Note that if `retry` is specified, the timeout applies to each individual attempt.
:param metadata: Optional. Additional metadata that is provided to the method.
:param impersonation_chain: Optional. Service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
"""
template_fields: Sequence[str] = tuple(DataplexCatalogBaseOperator.template_fields)
operator_extra_links = (DataplexCatalogEntryGroupsLink(),)
def __init__(
self,
page_size: int | None = None,
page_token: str | None = None,
filter_by: str | None = None,
order_by: str | None = None,
*args,
**kwargs,
) -> None:
super().__init__(*args, **kwargs)
self.page_size = page_size
self.page_token = page_token
self.filter_by = filter_by
self.order_by = order_by
def execute(self, context: Context):
DataplexCatalogEntryGroupsLink.persist(context=context)
self.log.info(
"Listing Dataplex Catalog EntryGroup from location %s.",
self.location,
)
try:
entry_group_on_page = self.hook.list_entry_groups(
location=self.location,
project_id=self.project_id,
page_size=self.page_size,
page_token=self.page_token,
filter_by=self.filter_by,
order_by=self.order_by,
retry=self.retry,
timeout=self.timeout,
metadata=self.metadata,
)
self.log.info("EntryGroup on page: %s", entry_group_on_page)
context["ti"].xcom_push(
key="entry_group_page",
value=ListEntryGroupsResponse.to_dict(entry_group_on_page._response),
)
except Exception as ex:
raise AirflowException(ex)
# Constructing list to return EntryGroups in readable format
entry_groups_list = [
MessageToDict(entry_group._pb, preserving_proto_field_name=True)
for entry_group in next(iter(entry_group_on_page.pages)).entry_groups
]
return entry_groups_list
| DataplexCatalogListEntryGroupsOperator |
python | sympy__sympy | sympy/physics/quantum/qft.py | {
"start": 2817,
"end": 4768
} | class ____(Gate):
"""Superclass of Quantum Fourier and Inverse Quantum Fourier Gates."""
@classmethod
def _eval_args(self, args):
if len(args) != 2:
raise QuantumError(
'QFT/IQFT only takes two arguments, got: %r' % args
)
if args[0] >= args[1]:
raise QuantumError("Start must be smaller than finish")
return Gate._eval_args(args)
def _represent_default_basis(self, **options):
return self._represent_ZGate(None, **options)
def _represent_ZGate(self, basis, **options):
"""
Represents the (I)QFT In the Z Basis
"""
nqubits = options.get('nqubits', 0)
if nqubits == 0:
raise QuantumError(
'The number of qubits must be given as nqubits.')
if nqubits < self.min_qubits:
raise QuantumError(
'The number of qubits %r is too small for the gate.' % nqubits
)
size = self.size
omega = self.omega
#Make a matrix that has the basic Fourier Transform Matrix
arrayFT = [[omega**(
i*j % size)/sqrt(size) for i in range(size)] for j in range(size)]
matrixFT = Matrix(arrayFT)
#Embed the FT Matrix in a higher space, if necessary
if self.label[0] != 0:
matrixFT = matrix_tensor_product(eye(2**self.label[0]), matrixFT)
if self.min_qubits < nqubits:
matrixFT = matrix_tensor_product(
matrixFT, eye(2**(nqubits - self.min_qubits)))
return matrixFT
@property
def targets(self):
return range(self.label[0], self.label[1])
@property
def min_qubits(self):
return self.label[1]
@property
def size(self):
"""Size is the size of the QFT matrix"""
return 2**(self.label[1] - self.label[0])
@property
def omega(self):
return Symbol('omega')
| Fourier |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.