language stringclasses 1
value | repo stringclasses 346
values | path stringlengths 6 201 | class_span dict | source stringlengths 21 2.38M | target stringlengths 1 96 |
|---|---|---|---|---|---|
python | Pylons__pyramid | docs/quick_tutorial/json/tutorial/tests.py | {
"start": 675,
"end": 1350
} | class ____(unittest.TestCase):
def setUp(self):
from tutorial import main
app = main({})
from webtest import TestApp
self.testapp = TestApp(app)
def test_home(self):
res = self.testapp.get('/', status=200)
self.assertIn(b'<h1>Hi Home View', res.body)
def test_hello(self):
res = self.testapp.get('/howdy', status=200)
self.assertIn(b'<h1>Hi Hello View', res.body)
def test_hello_json(self):
res = self.testapp.get('/howdy.json', status=200)
self.assertIn(b'{"name": "Hello View"}', res.body)
self.assertEqual(res.content_type, 'application/json')
| TutorialFunctionalTests |
python | mlflow__mlflow | mlflow/server/jobs/utils.py | {
"start": 7827,
"end": 14902
} | class ____:
instance: "huey.SqliteHuey"
submit_task: Callable[..., Any]
# Each job function has an individual execution pool, each execution pool
# is managed by a Huey instance.
# The `_huey_instance_map` stores the map, the key is the job function fullname,
# and the value is the `HueyInstance` object.
_huey_instance_map: dict[str, HueyInstance] = {}
_huey_instance_map_lock = threading.RLock()
def _get_or_init_huey_instance(instance_key: str):
from huey import SqliteHuey
from huey.serializer import Serializer
class CloudPickleSerializer(Serializer):
def serialize(self, data):
return cloudpickle.dumps(data)
def deserialize(self, data):
return cloudpickle.loads(data)
with _huey_instance_map_lock:
if instance_key not in _huey_instance_map:
_logger.info(f"Creating huey instance for {instance_key}")
huey_store_file = os.path.join(
os.environ[HUEY_STORAGE_PATH_ENV_VAR], f"{instance_key}.mlflow-huey-store"
)
huey_instance = SqliteHuey(
filename=huey_store_file,
results=False,
serializer=CloudPickleSerializer(),
)
huey_submit_task_fn = huey_instance.task(retries=0)(_exec_job)
_huey_instance_map[instance_key] = HueyInstance(
instance=huey_instance,
submit_task=huey_submit_task_fn,
)
return _huey_instance_map[instance_key]
def _launch_huey_consumer(job_fn_fullname: str) -> None:
_logger.info(f"Starting huey consumer for job function {job_fn_fullname}")
job_fn = _load_function(job_fn_fullname)
if not hasattr(job_fn, "_job_fn_metadata"):
raise MlflowException.invalid_parameter_value(
f"The job function {job_fn_fullname} is not decorated by "
"'mlflow.server.jobs.job_function'."
)
max_job_parallelism = job_fn._job_fn_metadata.max_workers
def _huey_consumer_thread() -> None:
while True:
# start MLflow job runner process
# Put it inside the loop to ensure the job runner process alive
job_runner_proc = _start_huey_consumer_proc(
job_fn_fullname,
max_job_parallelism,
)
job_runner_proc.wait()
time.sleep(1)
# start job runner.
threading.Thread(
target=_huey_consumer_thread,
name=f"MLflow-huey-consumer-{job_fn_fullname}-watcher",
daemon=False,
).start()
def _launch_job_runner(env_map, server_proc_pid):
return subprocess.Popen(
[
sys.executable,
"-m",
"mlflow.server.jobs._job_runner",
],
env={**os.environ, **env_map, "MLFLOW_SERVER_PID": str(server_proc_pid)},
)
def _start_watcher_to_kill_job_runner_if_mlflow_server_dies(check_interval: float = 1.0) -> None:
mlflow_server_pid = int(os.environ.get("MLFLOW_SERVER_PID"))
def watcher():
while True:
if not is_process_alive(mlflow_server_pid):
os.kill(os.getpid(), signal.SIGTERM)
time.sleep(check_interval)
t = threading.Thread(target=watcher, daemon=True, name="job-runner-watcher")
t.start()
def _load_function(fullname: str) -> Callable[..., Any]:
match fullname.split("."):
case [*module_parts, func_name] if module_parts:
module_name = ".".join(module_parts)
case _:
raise MlflowException.invalid_parameter_value(
f"Invalid function fullname format: {fullname}"
)
try:
module = importlib.import_module(module_name)
return getattr(module, func_name)
except ModuleNotFoundError:
# Module doesn't exist
raise MlflowException.invalid_parameter_value(
f"Module not found for function '{fullname}'",
)
except AttributeError:
# Function doesn't exist in the module
raise MlflowException.invalid_parameter_value(
f"Function not found in module for '{fullname}'",
)
def _enqueue_unfinished_jobs(server_launching_timestamp: int) -> None:
from mlflow.server.handlers import _get_job_store
job_store = _get_job_store()
unfinished_jobs = job_store.list_jobs(
statuses=[JobStatus.PENDING, JobStatus.RUNNING],
# filter out jobs created after the server is launched.
end_timestamp=server_launching_timestamp,
)
for job in unfinished_jobs:
if job.status == JobStatus.RUNNING:
job_store.reset_job(job.job_id) # reset the job status to PENDING
params = json.loads(job.params)
function = _load_function(job.function_fullname)
timeout = job.timeout
# enqueue job
_get_or_init_huey_instance(job.function_fullname).submit_task(
job.job_id, function, params, timeout
)
def _validate_function_parameters(function: Callable[..., Any], params: dict[str, Any]) -> None:
"""Validate that the provided parameters match the function's required arguments.
Args:
function: The function to validate parameters against
params: Dictionary of parameters provided for the function
Raises:
MlflowException: If required parameters are missing
"""
sig = inspect.signature(function)
# Get all required parameters (no default value)
# Exclude VAR_POSITIONAL (*args) and VAR_KEYWORD (**kwargs) parameters
required_params = [
name
for name, param in sig.parameters.items()
if param.default is inspect.Parameter.empty
and param.kind not in (inspect.Parameter.VAR_POSITIONAL, inspect.Parameter.VAR_KEYWORD)
]
# Check for missing required parameters
if missing_params := [param for param in required_params if param not in params]:
raise MlflowException.invalid_parameter_value(
f"Missing required parameters for function '{function.__name__}': {missing_params}. "
f"Expected parameters: {list(sig.parameters.keys())}"
)
def _check_requirements(backend_store_uri: str | None = None) -> None:
from mlflow.server import BACKEND_STORE_URI_ENV_VAR
from mlflow.utils.uri import extract_db_type_from_uri
if os.name == "nt":
raise MlflowException("MLflow job backend does not support Windows system.")
if shutil.which("uv") is None:
raise MlflowException("MLflow job backend requires 'uv' but it is not installed.")
backend_store_uri = backend_store_uri or os.environ.get(BACKEND_STORE_URI_ENV_VAR)
if not backend_store_uri:
raise MlflowException(
"MLflow job backend requires a database backend store URI but "
"'--backend-store-uri' is not set"
)
try:
extract_db_type_from_uri(backend_store_uri)
except MlflowException:
raise MlflowException(
f"MLflow job backend requires a database backend store URI but got {backend_store_uri}"
)
| HueyInstance |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/protocol32.py | {
"start": 841,
"end": 1211
} | class ____(Generic[Arg, Value]):
def method1(self, default: Value) -> Value:
return default
def another(self, arg: Arg) -> None:
return
def func2(arg: Arg, value: Value) -> Interface[Arg, Value]:
# This should generate an error because
# Implementation2 doesn't implement method2.
return Implementation2[Arg, Value]()
| Implementation2 |
python | spack__spack | lib/spack/spack/cmd/common/arguments.py | {
"start": 15945,
"end": 24019
} | class ____(argparse.Action):
"""Generic action for setting spack config options from CLI.
This works like a ``store_const`` action but you can set the
``dest`` to some Spack configuration path (like ``concretizer:reuse``)
and the ``const`` will be stored there using ``spack.config.set()``
"""
def __init__(
self,
option_strings,
dest,
const,
default=None,
required=False,
help=None,
metavar=None,
require_environment=False,
):
# save the config option we're supposed to set
self.config_path = dest
# save whether the option requires an active env
self.require_environment = require_environment
# destination is translated to a legal python identifier by
# substituting '_' for ':'.
dest = dest.replace(":", "_")
super().__init__(
option_strings=option_strings,
dest=dest,
nargs=0,
const=const,
default=default,
required=required,
help=help,
)
def __call__(self, parser, namespace, values, option_string):
if self.require_environment and not ev.active_environment():
raise argparse.ArgumentTypeError(
f"argument '{self.option_strings[-1]}' requires an environment"
)
# Retrieve the name of the config option and set it to
# the const from the constructor or a value from the CLI.
# Note that this is only called if the argument is actually
# specified on the command line.
spack.config.set(self.config_path, self.const, scope="command_line")
def add_concretizer_args(subparser):
"""Add a subgroup of arguments for controlling concretization.
These will appear in a separate group called 'concretizer arguments'.
There's no need to handle them in your command logic -- they all use
``ConfigSetAction``, which automatically handles setting configuration
options.
If you *do* need to access a value passed on the command line, you can
get at, e.g., the ``concretizer:reuse`` via ``args.concretizer_reuse``.
Just substitute ``_`` for ``:``.
"""
subgroup = subparser.add_argument_group("concretizer arguments")
subgroup.add_argument(
"-f",
"--force",
action=ConfigSetAction,
require_environment=True,
dest="concretizer:force",
const=True,
default=False,
help="allow changes to concretized specs in spack.lock (in an env)",
)
subgroup.add_argument(
"-U",
"--fresh",
action=ConfigSetAction,
dest="concretizer:reuse",
const=False,
default=None,
help="do not reuse installed deps; build newest configuration",
)
subgroup.add_argument(
"--reuse",
action=ConfigSetAction,
dest="concretizer:reuse",
const=True,
default=None,
help="reuse installed packages/buildcaches when possible",
)
subgroup.add_argument(
"--fresh-roots",
"--reuse-deps",
action=ConfigSetAction,
dest="concretizer:reuse",
const="dependencies",
default=None,
help="concretize with fresh roots and reused dependencies",
)
subgroup.add_argument(
"--deprecated",
action=ConfigSetAction,
dest="config:deprecated",
const=True,
default=None,
help="allow concretizer to select deprecated versions",
)
def add_connection_args(subparser, add_help):
def add_argument_string_or_variable(parser, arg: str, *, deprecate_str: bool = True, **kwargs):
group = parser.add_mutually_exclusive_group()
group.add_argument(arg, **kwargs)
# Update help string
if "help" in kwargs:
kwargs["help"] = "environment variable containing " + kwargs["help"]
group.add_argument(arg + "-variable", **kwargs)
s3_connection_parser = subparser.add_argument_group("S3 Connection")
add_argument_string_or_variable(
s3_connection_parser,
"--s3-access-key-id",
help="ID string to use to connect to this S3 mirror",
)
s3_connection_parser.add_argument(
"--s3-access-key-secret-variable",
help="environment variable containing secret string to use to connect to this S3 mirror",
)
s3_connection_parser.add_argument(
"--s3-access-token-variable",
help="environment variable containing access token to use to connect to this S3 mirror",
)
s3_connection_parser.add_argument(
"--s3-profile", help="S3 profile name to use to connect to this S3 mirror", default=None
)
s3_connection_parser.add_argument(
"--s3-endpoint-url", help="endpoint URL to use to connect to this S3 mirror"
)
oci_connection_parser = subparser.add_argument_group("OCI Connection")
add_argument_string_or_variable(
oci_connection_parser,
"--oci-username",
deprecate_str=False,
help="username to use to connect to this OCI mirror",
)
oci_connection_parser.add_argument(
"--oci-password-variable",
help="environment variable containing password to use to connect to this OCI mirror",
)
def use_buildcache(cli_arg_value):
"""Translate buildcache related command line arguments into a pair of strings,
representing whether the root or its dependencies can use buildcaches.
Argument type that accepts comma-separated subargs:
1. auto|only|never
2. package:auto|only|never
3. dependencies:auto|only|never
Args:
cli_arg_value (str): command line argument value to be translated
Return:
Tuple of two strings
"""
valid_keys = frozenset(["package", "dependencies"])
valid_values = frozenset(["only", "never", "auto"])
# Split in args, split in key/value, and trim whitespace
args = [tuple(map(lambda x: x.strip(), part.split(":"))) for part in cli_arg_value.split(",")]
# Verify keys and values
def is_valid(arg):
if len(arg) == 1:
return arg[0] in valid_values
if len(arg) == 2:
return arg[0] in valid_keys and arg[1] in valid_values
return False
valid, invalid = stable_partition(args, is_valid)
# print first error
if invalid:
raise argparse.ArgumentTypeError("invalid argument `{}`".format(":".join(invalid[0])))
# Default values
package = "auto"
dependencies = "auto"
# Override in order.
for arg in valid:
if len(arg) == 1:
package = dependencies = arg[0]
continue
key, val = arg
if key == "package":
package = val
else:
dependencies = val
return package, dependencies
def mirror_name_or_url(m):
# Look up mirror by name or use anonymous mirror with path/url.
# We want to guard against typos in mirror names, to avoid pushing
# accidentally to a dir in the current working directory.
# If there's a \ or / in the name, it's interpreted as a path or url.
if "/" in m or "\\" in m or m in (".", ".."):
return spack.mirrors.mirror.Mirror(m)
# Otherwise, the named mirror is required to exist.
try:
return spack.mirrors.utils.require_mirror_name(m)
except ValueError as e:
raise argparse.ArgumentTypeError(f"{e}. Did you mean {os.path.join('.', m)}?") from e
def mirror_url(url):
try:
return spack.mirrors.mirror.Mirror.from_url(url)
except ValueError as e:
raise argparse.ArgumentTypeError(str(e)) from e
def mirror_directory(path):
try:
return spack.mirrors.mirror.Mirror.from_local_path(path)
except ValueError as e:
raise argparse.ArgumentTypeError(str(e)) from e
def mirror_name(name):
try:
return spack.mirrors.utils.require_mirror_name(name)
except ValueError as e:
raise argparse.ArgumentTypeError(str(e)) from e
| ConfigSetAction |
python | getsentry__sentry | src/sentry/api/endpoints/organization_events_trace.py | {
"start": 3755,
"end": 5417
} | class ____(TypedDict):
culprit: str | None
end: float | None
event_id: str
issue_id: int
issue_short_id: str | None
level: str
project_id: int
project_slug: str
span: list[str]
start: float | None
suspect_spans: list[str]
title: str
type: int
LightResponse = TypedDict(
"LightResponse",
{
"errors": list[TraceError],
"event_id": str,
"generation": Optional[int],
"parent_event_id": Optional[str],
"parent_span_id": Optional[str],
"performance_issues": list[TracePerformanceIssue],
"project_id": int,
"project_slug": str,
"span_id": str,
"timestamp": float,
"transaction": str,
"transaction.duration": int,
"transaction.op": str,
},
)
FullResponse = TypedDict(
"FullResponse",
{
"_meta": dict[str, Any],
"children": list["FullResponse"],
"errors": list[TraceError],
"event_id": str,
"generation": Optional[int],
"measurements": dict[str, int],
"parent_event_id": Optional[str],
"parent_span_id": Optional[str],
"performance_issues": list[TracePerformanceIssue],
"profile_id": Optional[str],
"profiler_id": Optional[str],
"project_id": int,
"project_slug": str,
"sdk_name": Optional[str],
"span_id": str,
"start_timestamp": str | int,
"tags": list[EventTag],
"timestamp": str | int,
"transaction": str,
"transaction.duration": int,
"transaction.op": str,
"transaction.status": str,
},
)
| TracePerformanceIssue |
python | pydata__xarray | xarray/tests/test_indexing.py | {
"start": 577,
"end": 1141
} | class ____:
def test_getitem(self):
def getter(key):
return key * 2
indexer = indexing.IndexCallable(getter)
assert indexer[3] == 6
assert indexer[0] == 0
assert indexer[-1] == -2
def test_setitem(self):
def getter(key):
return key * 2
def setter(key, value):
raise NotImplementedError("Setter not implemented")
indexer = indexing.IndexCallable(getter, setter)
with pytest.raises(NotImplementedError):
indexer[3] = 6
| TestIndexCallable |
python | dask__distributed | distributed/worker_state_machine.py | {
"start": 28094,
"end": 28259
} | class ____(StateMachineEvent):
__slots__ = ("who_has", "nbytes")
who_has: dict[Key, Collection[str]]
nbytes: dict[Key, int]
@dataclass
| AcquireReplicasEvent |
python | wandb__wandb | tests/system_tests/test_automations/test_automations_api.py | {
"start": 16949,
"end": 23977
} | class ____:
@fixture
def old_automation(
self,
api: wandb.Api,
event,
action,
automation_name: str,
):
"""The original automation to be updated."""
# Setup: Create the original automation
automation = api.create_automation(
(event >> action), name=automation_name, description="orig description"
)
yield automation
# Cleanup: Delete the automation for good measure
api.delete_automation(automation)
assert len(list(api.automations(name=automation_name))) == 0
def test_update_name(self, api: wandb.Api, old_automation: Automation):
updated_value = "new-name"
old_automation.name = updated_value
new_automation = api.update_automation(old_automation)
assert new_automation.name == updated_value
def test_update_description(self, api: wandb.Api, old_automation: Automation):
new_value = "new description"
old_automation.description = new_value
new_automation = api.update_automation(old_automation)
assert new_automation.description == new_value
def test_update_enabled(self, api: wandb.Api, old_automation: Automation):
new_value = False
old_automation.enabled = new_value
new_automation = api.update_automation(old_automation)
assert new_automation.enabled == new_value
def test_update_action_to_webhook(
self, api: wandb.Api, old_automation: Automation, webhook: WebhookIntegration
):
# This is deliberately an "input" action, even though saved automations
# will have a "saved" action on them. We want to check that this is still
# handled correctly and reliably.
webhook_id = webhook.id
new_payload = {"new-key": "new-value"}
webhook_action = SendWebhook(
integration_id=webhook_id,
request_payload=new_payload,
)
old_automation.action = webhook_action
new_automation = api.update_automation(old_automation)
new_action = new_automation.action
assert isinstance(new_action, SavedWebhookAction)
assert new_action.action_type == ActionType.GENERIC_WEBHOOK
assert new_action.integration.id == webhook_id
assert new_action.request_payload == new_payload
def test_update_action_to_no_op(self, api: wandb.Api, old_automation: Automation):
# This is deliberately an "input" action, even though saved automations
# will have a "saved" action on them. We want to check that this is still
# handled correctly and reliably.
old_automation.action = DoNothing()
new_automation = api.update_automation(old_automation)
new_action = new_automation.action
# NO_OP actions don't have meaningful fields besides these
assert isinstance(new_action, SavedNoOpAction)
assert new_action.action_type == ActionType.NO_OP
# This is only meaningful if the original automation has a webhook action
@mark.parametrize("action_type", [ActionType.GENERIC_WEBHOOK], indirect=True)
def test_update_webhook_payload(self, api: wandb.Api, old_automation: Automation):
new_payload = {"new-key": "new-value"}
old_automation.action.request_payload = new_payload
new_automation = api.update_automation(old_automation)
assert new_automation.action.request_payload == new_payload
# This is only meaningful if the original automation has a notification action
@mark.parametrize("action_type", [ActionType.NOTIFICATION], indirect=True)
def test_update_notification_message(
self, api: wandb.Api, old_automation: Automation
):
new_message = "new message"
old_automation.action.message = new_message
new_automation = api.update_automation(old_automation)
assert new_automation.action.message == new_message
def test_update_scope_to_project(
self, api: wandb.Api, old_automation: Automation, project: Project
):
old_automation.scope = project
new_automation = api.update_automation(old_automation)
updated_scope = new_automation.scope
assert isinstance(updated_scope, ProjectScope)
assert updated_scope.id == project.id
assert updated_scope.name == project.name
@mark.parametrize(
# Run events don't support ArtifactCollection scope, so we'll test those separately.
"event_type",
sorted(
set(EventType)
- {
EventType.RUN_METRIC_THRESHOLD,
EventType.RUN_METRIC_CHANGE,
EventType.RUN_STATE,
EventType.RUN_METRIC_ZSCORE,
}
),
indirect=True,
)
def test_update_scope_to_artifact_collection(
self,
api: wandb.Api,
old_automation: Automation,
event_type: EventType,
artifact_collection: ArtifactCollection,
):
assert old_automation.event.event_type == event_type # Consistency check
old_automation.scope = artifact_collection
new_automation = api.update_automation(old_automation)
updated_scope = new_automation.scope
assert isinstance(updated_scope, ArtifactCollectionScopeTypes)
assert updated_scope.id == artifact_collection.id
assert updated_scope.name == artifact_collection.name
@mark.parametrize(
"event_type",
[
EventType.RUN_METRIC_THRESHOLD,
EventType.RUN_METRIC_CHANGE,
EventType.RUN_STATE,
EventType.RUN_METRIC_ZSCORE,
],
indirect=True,
)
def test_update_scope_to_artifact_collection_fails_for_incompatible_event(
self,
api: wandb.Api,
old_automation: Automation,
event_type: EventType,
artifact_collection: ArtifactCollection,
):
"""Updating automation scope to an artifact collection fails if the event type doesn't support it."""
assert old_automation.event.event_type == event_type # Consistency check
with raises(CommError):
old_automation.scope = artifact_collection
api.update_automation(old_automation)
@mark.parametrize(
"updates",
[
{"name": "new-name"},
{"description": "new-description"},
{"enabled": False},
{"description": "new-description", "enabled": False},
{"name": "new-name", "enabled": False},
{"name": "new-name", "description": "new-description", "enabled": False},
],
)
def test_update_via_kwargs(
self,
api: wandb.Api,
old_automation: Automation,
updates: dict[str, Any],
):
# Update the automation
new_automation = api.update_automation(old_automation, **updates)
for name, value in updates.items():
assert getattr(new_automation, name) == value
| TestUpdateAutomation |
python | apache__airflow | providers/google/src/airflow/providers/google/cloud/sensors/dataflow.py | {
"start": 18079,
"end": 24090
} | class ____(BaseSensorOperator):
"""
Checks for autoscaling events associated with a single job in Google Cloud Dataflow.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:DataflowJobAutoScalingEventsSensor`
:param job_id: ID of the Dataflow job to be checked.
:param callback: a function that can accept a list of serialized autoscaling events.
It can do whatever you want it to do. If the callback function is not provided,
then on successful completion the task will exit with True value.
For more info about the autoscaling event content see:
https://cloud.google.com/python/docs/reference/dataflow/latest/google.cloud.dataflow_v1beta3.types.AutoscalingEvent
:param fail_on_terminal_state: If set to True the sensor will raise an exception when the job reaches a terminal state.
No autoscaling events will be returned.
:param project_id: Optional, the Google Cloud project ID in which to start a job.
If set to None or missing, the default project_id from the Google Cloud connection is used.
:param location: The location of the Dataflow job (for example europe-west1).
If set to None then the value of DEFAULT_DATAFLOW_LOCATION will be used.
See: https://cloud.google.com/dataflow/docs/concepts/regional-endpoints
:param gcp_conn_id: The connection ID to use connecting to Google Cloud.
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
:param deferrable: If True, run the sensor in the deferrable mode.
:param poll_interval: Time (seconds) to wait between two consecutive calls to check the job.
"""
template_fields: Sequence[str] = ("job_id",)
def __init__(
self,
*,
job_id: str,
callback: Callable | None = None,
fail_on_terminal_state: bool = True,
project_id: str = PROVIDE_PROJECT_ID,
location: str = DEFAULT_DATAFLOW_LOCATION,
gcp_conn_id: str = "google_cloud_default",
impersonation_chain: str | Sequence[str] | None = None,
deferrable: bool = conf.getboolean("operators", "default_deferrable", fallback=False),
poll_interval: int = 60,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.job_id = job_id
self.project_id = project_id
self.callback = callback
self.fail_on_terminal_state = fail_on_terminal_state
self.location = location
self.gcp_conn_id = gcp_conn_id
self.impersonation_chain = impersonation_chain
self.deferrable = deferrable
self.poll_interval = poll_interval
def poke(self, context: Context) -> PokeReturnValue | bool:
if self.fail_on_terminal_state:
job = self.hook.get_job(
job_id=self.job_id,
project_id=self.project_id,
location=self.location,
)
job_status = job["currentState"]
if job_status in DataflowJobStatus.TERMINAL_STATES:
message = f"Job with id '{self.job_id}' is already in terminal state: {job_status}"
raise AirflowException(message)
result = self.hook.fetch_job_autoscaling_events_by_id(
job_id=self.job_id,
project_id=self.project_id,
location=self.location,
)
result = result if self.callback is None else self.callback(result)
if isinstance(result, PokeReturnValue):
return result
if bool(result):
return PokeReturnValue(
is_done=True,
xcom_value=result,
)
return False
def execute(self, context: Context) -> Any:
"""Airflow runs this method on the worker and defers using the trigger."""
if not self.deferrable:
super().execute(context)
else:
self.defer(
trigger=DataflowJobAutoScalingEventTrigger(
job_id=self.job_id,
project_id=self.project_id,
location=self.location,
gcp_conn_id=self.gcp_conn_id,
poll_sleep=self.poll_interval,
impersonation_chain=self.impersonation_chain,
fail_on_terminal_state=self.fail_on_terminal_state,
),
method_name="execute_complete",
)
def execute_complete(self, context: Context, event: dict[str, str | list]) -> Any:
"""
Execute this method when the task resumes its execution on the worker after deferral.
If the trigger returns an event with success status - passes the event result to the callback function.
Returns the event result if no callback function is provided.
If the trigger returns an event with error status - raises an exception.
"""
if event["status"] == "success":
self.log.info(event["message"])
return event["result"] if self.callback is None else self.callback(event["result"])
raise AirflowException(f"Sensor failed with the following message: {event['message']}")
@cached_property
def hook(self) -> DataflowHook:
return DataflowHook(
gcp_conn_id=self.gcp_conn_id,
impersonation_chain=self.impersonation_chain,
)
| DataflowJobAutoScalingEventsSensor |
python | langchain-ai__langchain | libs/langchain/langchain_classic/callbacks/streaming_aiter_final_only.py | {
"start": 289,
"end": 3542
} | class ____(AsyncIteratorCallbackHandler):
"""Callback handler that returns an async iterator.
Only the final output of the agent will be iterated.
"""
def append_to_last_tokens(self, token: str) -> None:
"""Append token to the last tokens."""
self.last_tokens.append(token)
self.last_tokens_stripped.append(token.strip())
if len(self.last_tokens) > len(self.answer_prefix_tokens):
self.last_tokens.pop(0)
self.last_tokens_stripped.pop(0)
def check_if_answer_reached(self) -> bool:
"""Check if the answer has been reached."""
if self.strip_tokens:
return self.last_tokens_stripped == self.answer_prefix_tokens_stripped
return self.last_tokens == self.answer_prefix_tokens
def __init__(
self,
*,
answer_prefix_tokens: list[str] | None = None,
strip_tokens: bool = True,
stream_prefix: bool = False,
) -> None:
"""Instantiate AsyncFinalIteratorCallbackHandler.
Args:
answer_prefix_tokens: Token sequence that prefixes the answer.
Default is ["Final", "Answer", ":"]
strip_tokens: Ignore white spaces and new lines when comparing
answer_prefix_tokens to last tokens? (to determine if answer has been
reached)
stream_prefix: Should answer prefix itself also be streamed?
"""
super().__init__()
if answer_prefix_tokens is None:
self.answer_prefix_tokens = DEFAULT_ANSWER_PREFIX_TOKENS
else:
self.answer_prefix_tokens = answer_prefix_tokens
if strip_tokens:
self.answer_prefix_tokens_stripped = [
token.strip() for token in self.answer_prefix_tokens
]
else:
self.answer_prefix_tokens_stripped = self.answer_prefix_tokens
self.last_tokens = [""] * len(self.answer_prefix_tokens)
self.last_tokens_stripped = [""] * len(self.answer_prefix_tokens)
self.strip_tokens = strip_tokens
self.stream_prefix = stream_prefix
self.answer_reached = False
@override
async def on_llm_start(
self,
serialized: dict[str, Any],
prompts: list[str],
**kwargs: Any,
) -> None:
# If two calls are made in a row, this resets the state
self.done.clear()
self.answer_reached = False
@override
async def on_llm_end(self, response: LLMResult, **kwargs: Any) -> None:
if self.answer_reached:
self.done.set()
@override
async def on_llm_new_token(self, token: str, **kwargs: Any) -> None:
# Remember the last n tokens, where n = len(answer_prefix_tokens)
self.append_to_last_tokens(token)
# Check if the last n tokens match the answer_prefix_tokens list ...
if self.check_if_answer_reached():
self.answer_reached = True
if self.stream_prefix:
for t in self.last_tokens:
self.queue.put_nowait(t)
return
# If yes, then put tokens from now on
if self.answer_reached:
self.queue.put_nowait(token)
| AsyncFinalIteratorCallbackHandler |
python | pytorch__pytorch | torch/_inductor/codegen/cpp_micro_gemm.py | {
"start": 58045,
"end": 71818
} | class ____(CppMicroGemmAMX):
"""
This class generates the code for WoQ int4 micro gemm using AMX intrinsics,
which are available on 4th and newer generations of Intel Xeon.
Shape of packed weight = [N // 32, K, 16], viewed as [N, K // 2]
Shape of packed ScalesAndZeros = [K // group_size, N, 2]
Reuse TEMPLATE_KERNEL of CppMicroGemmAMX.
"""
TEMPLATE_ENTRY = r"""
inline bool {{kernel_name}}_is_block_start(int index, int k_start, int group_size) {
// check if (k_start + index) % group_size == 0, assuming group_size = 32/64/128
return ((k_start + index) & (group_size - 1)) == 0;
}
{{declare_kernel}} {
{{kernel.assert_function}}(N % {{block_n}} == 0, "N dimension must be multiple of {{block_n}}");
{{kernel.assert_function}}(K % 2 == 0, "K dimension must be multiple of 2");
{{kernel.assert_function}}({{block_n}} == 32, "block_n must be 32 for WOQ int4");
// Create a stack-allocated buffer for tiles of B.
// Except maybe for the tail-case, an AMX tile of B has 16x32 BF16 elements.
// we cache K * {{block_n}} elements of dequantized B
{{template.codegen_allocate_weight_buffer("dequantized_B_buf", input_t, "K", block_n)}}
constexpr int BLOCK_K = {{block_k}};
constexpr int64_t BLOCK_N = {{block_n}};
constexpr int COLS = BLOCK_N / 16;
const int PREFETCH_SIZE_K = 16 * 4;
const int PREFETCH_SIZE_KB = (PREFETCH_SIZE_K + BLOCK_K - 1) / BLOCK_K;
const int KB = K / BLOCK_K;
__m512i b32[COLS * 2];
__m512 vb[COLS * 2];
__m512 scale[COLS];
__m512 zero[COLS];
// Lookup table to de-quantize int4 values to bf16.
// Values are dequantized as truly int4 [-8, 7] range;
//
// dequant = (bf16(int4_value) * bf16_scale) + bf16_zero
//
static const __m512 lut = _mm512_set_ps(
7.0f, 6.0f, 5.0f, 4.0f,
3.0f, 2.0f, 1.0f, 0.0f,
-1.0f, -2.0f, -3.0f, -4.0f,
-5.0f, -6.0f, -7.0f, -8.0f);
// index for transpose
static const __m512i idx1 = _mm512_set_epi32(
30, 28, 26, 24, 22, 20, 18, 16,
14, 12, 10, 8, 6, 4, 2, 0);
static const __m512i idx2 = _mm512_set_epi32(
31, 29, 27, 25, 23, 21, 19, 17,
15, 13, 11, 9, 7, 5, 3, 1);
// Indices for VNNI layout conversion
__m512i idx_low = _mm512_set_epi32(
0x17,
0x07,
0x16,
0x06,
0x15,
0x05,
0x14,
0x04,
0x13,
0x03,
0x12,
0x02,
0x11,
0x01,
0x10,
0x00);
__m512i idx_high = _mm512_set_epi32(
0x1f,
0x0f,
0x1e,
0x0e,
0x1d,
0x0d,
0x1c,
0x0c,
0x1b,
0x0b,
0x1a,
0x0a,
0x19,
0x09,
0x18,
0x08);
// load scale and zero point
auto load_scale_and_zeros = [&](int i, int _kb) {
// load 2x bfloat16 vector
__m512i t = _mm512_loadu_si512((__m512i*)(ScaleAndZeros + _kb * lds + 32 * i));
_mm_prefetch(ScaleAndZeros + (_kb + PREFETCH_SIZE_KB) * lds + 32 * i, _MM_HINT_T0);
// convert to 2x f32 vector
__m512 a, b;
at::vec::cvtbf16_fp32(t, a, b);
// transpose scale_and_zero from {16, 2} to {2, 16}
// inputs:
// a: {s0, z0, s1, z1, ..., s7, z7}
// b: {s8, z8, s9, z9, ..., s15, z15}
// output:
// scale: {s0, s1, s2, ..., s15}
// zero: {z0, z1, z2, ..., z15}
scale[i] = _mm512_mask_permutex2var_ps(a, 0xffff, idx1, b);
zero[i] = _mm512_mask_permutex2var_ps(a, 0xffff, idx2, b);
};
// Dequantize a B block of 2 * block_n into bf16
// So, it handles k and k+1 at the same time
auto dequantize_B = [&](int n) {
constexpr int64_t ldb_int4 = BLOCK_N / 2; // 16
for (int k = 0, kb = 0; k < K; k += 2) {
// Since block_k must be 32 for AMX microkernels, k_start may not be
// a multiple of q_group_size. In that case, we need to load scales
// and zero points immediately when k == 0 here
if ({{kernel_name}}_is_block_start(k, k_start, q_group_size) || k == 0) {
c10::ForcedUnroll<COLS>{}(load_scale_and_zeros, kb++);
}
_mm_prefetch(B + (k + PREFETCH_SIZE_K) * ldb_int4, _MM_HINT_T0);
// load 256 bits = 64 elements in int4
__m128i b4 = _mm_loadu_si128((__m128i*)(B + n / 2 * K + k * ldb_int4));
b32[0] = _mm512_cvtepu8_epi32(b4);
b32[1] = _mm512_srli_epi32(b32[0], 4);
vb[0] = _mm512_permutexvar_ps(b32[0] , lut);
vb[0] = _mm512_fmadd_ps(vb[0], scale[0], zero[0]);
vb[1] = _mm512_permutexvar_ps(b32[1], lut);
vb[1] = _mm512_fmadd_ps(vb[1], scale[1], zero[1]);
__m128i b4_2 = _mm_loadu_si128((__m128i*)(B + n / 2 * K + (k + 1) * ldb_int4));
b32[0 + COLS] = _mm512_cvtepu8_epi32(b4_2);
b32[1 + COLS] = _mm512_srli_epi32(b32[0 + COLS], 4);
vb[0 + COLS] = _mm512_permutexvar_ps(b32[0 + COLS] , lut);
vb[0 + COLS] = _mm512_fmadd_ps(vb[0 + COLS], scale[0], zero[0]);
vb[1 + COLS] = _mm512_permutexvar_ps(b32[1 + COLS], lut);
vb[1 + COLS] = _mm512_fmadd_ps(vb[1 + COLS], scale[1], zero[1]);
for (int i = 0; i < COLS; i++) {
// convert to VNNI
auto low = _mm512_permutex2var_ps(vb[i], idx_low, vb[i + COLS]);
auto high = _mm512_permutex2var_ps(vb[i], idx_high, vb[i + COLS]);
// convert lower 16 float32 values to bfloat16
auto v0_bf16 = reinterpret_cast<__m256i>(_mm512_cvtneps_pbh(low));
// convert higher 16 float32 values to bfloat16
auto v1_bf16 = reinterpret_cast<__m256i>(_mm512_cvtneps_pbh(high));
// combine the lower 16 and higher 16 bfloat16 values
auto v = _mm512_castsi256_si512(v0_bf16);
v = _mm512_inserti64x4(v, v1_bf16, 1);
// store the VNNI format bfloat16 values
{{input_t}}* addr = dequantized_B_buf + k * 32 + (i % 2) * 32;
_mm512_storeu_si512(addr, v);
}
}
};
for (int64_t n = 0; n < N; n += {{block_n}}) {
// Dequantize K * block_n int8 B elements into BF16
dequantize_B(n);
for (int64_t m = 0; m < M; m += {{block_m}}) {
int64_t block_m = std::min<int64_t>(M - m, {{block_m}});
int64_t m_tail = m;
{%- for num_rows in range(block_m, 0, -16) %}
{%- if num_rows != block_m %}
else
{%- endif %}
if (block_m >= {{num_rows}}) {
{{kernel_name}}_amx_kernel_{{num_rows}}_{{num_columns}}<accum>(
amx_state,
A + m * lda,
dequantized_B_buf + n * K,
C + m * ldc + n,
K,
lda,
{{block_n}},
ldc,
16
);
block_m -= {{num_rows}};
m_tail += {{num_rows}};
}
{%- endfor %}
if (block_m > 0) {
{{kernel_name}}_amx_kernel_16_{{num_columns}}<accum>(
amx_state,
A + m_tail * lda,
dequantized_B_buf + n * K,
C + m_tail * ldc + n,
K,
lda,
{{block_n}},
ldc,
block_m
);
}
} // for m
} // for n
}
"""
def get_kernel_extra_args_declare(self) -> str:
return (
"AMXState& amx_state,\n"
" const int64_t q_group_size,\n"
" const c10::BFloat16* __restrict__ ScaleAndZeros,\n"
" const int64_t lds,\n"
" int64_t k_start,"
)
def get_kernel_extra_args(self, **kwargs) -> list[str]:
assert "kernel" in kwargs
assert "qscale_and_zeros" in kwargs
kernel = kwargs["kernel"]
qscale_and_zeros = kwargs["qscale_and_zeros"]
return [
"amx_state,",
"group_size,",
f"&({kernel.index(qscale_and_zeros, [0, 0, 0])}),",
"N * 2,", # lds
"k_start,",
]
def is_woq_int4(self):
return True
def create_micro_gemm(
name,
m,
n,
k,
input_dtype,
input2_dtype,
output_dtype=None,
compute_dtype=None,
alpha=1,
num_threads=-1,
use_ref=True,
q_group_size=None,
) -> Optional[CppMicroGemm]:
"""
Based on the provided info, try to find the config of the micro-kernel that would
deliver the best performance in terms of lower latency for this case.
"""
def create_from_config(cls, config: CppMicroGemmConfig):
return cls(
name,
config.input_dtype,
config.input2_dtype,
config.output_dtype,
config.compute_dtype,
config.register_blocking,
alpha,
)
def skip_amx_kernel_for_woq(dynamic_M):
# For WoQ GEMM, AMX micro-kernel may not perform well if m is small.
# Exception: for dynamic shapes, we consider using the AMX micro-kernel.
if (
dynamic_M
or input_dtype != torch.bfloat16
or input2_dtype not in [torch.int8, torch.uint8]
):
return False
m_threshold = 5
return m < m_threshold
assert isinstance(n, int) or n.is_number, n
assert isinstance(k, int) or k.is_number, k
from ..utils import has_free_symbols
dynamic_M = has_free_symbols((m,))
m = V.graph.sizevars.size_hint(m, fallback=1) if dynamic_M else m
assert isinstance(m, int) or m.is_number, m
if output_dtype is None:
output_dtype = input_dtype
if compute_dtype is None:
compute_dtype = output_dtype
if num_threads < 0:
num_threads = parallel_num_threads()
vec_isa = pick_vec_isa()
matched_configs = []
for cls, configs in micro_gemm_configs.items():
for config in configs:
if not issubclass(vec_isa.__class__, config.vec_isa_cls):
continue
if (
config.input_dtype == input_dtype
and config.compute_dtype == compute_dtype
and config.input2_dtype == input2_dtype
and config.output_dtype == output_dtype
# The output_dtype here is the output dtype of the micro-kernel.
# In some cases, the actual output dtype of the op for which the micro-kernel
# is being created would be same as that of the activation, but the micro-kernels
# compute output in Float/int32, which is converted in the GEMM template. This is
# subject to change in the future.
):
if config.extra_check is not None and not config.extra_check(
config,
m,
n,
k,
alpha,
num_threads,
dynamic_M=dynamic_M,
q_group_size=q_group_size,
vec_isa=vec_isa,
):
continue
block_m, block_n, block_k = config.register_blocking
if config.vec_isa_cls == VecAMX and skip_amx_kernel_for_woq(dynamic_M):
continue
# Criteria on the ranking of configurations
# 1. ISA: AMX > VEC
# 2. Dividable by block sizes (block_m, block_n, block_k)
# 3. Number of mxn blocks is large enough to occupy all the threads
# 4. Register blocks are larger
isa_score = 0
if config.vec_isa_cls == VecAMX:
isa_score += 1
dividable_score = 0
if m % block_m == 0:
dividable_score += 1
if n % block_n == 0:
dividable_score += 1
if k % block_k == 0:
dividable_score += 1
occupancy_score = 0
n_blocks = (n + block_n - 1) // block_n
total_mxn_blocks = n_blocks * ((m + block_m - 1) // block_m)
if n_blocks >= num_threads:
occupancy_score += 1
if total_mxn_blocks >= num_threads:
occupancy_score += 1
register_bytes = (
block_m * block_n * config.compute_dtype.itemsize
+ (block_m * block_k + block_k * block_n)
* config.input_dtype.itemsize
)
size_score = register_bytes
# if number of mxn blocks can not occupy all the threads,
# we favor smaller register blocks.
if occupancy_score == 0:
size_score = 0 - register_bytes
matched_configs.append(
(
(isa_score, dividable_score, occupancy_score, size_score),
cls,
config,
)
)
if len(matched_configs) == 0:
if use_ref:
return CppMicroGemmRef(
name, input_dtype, input2_dtype, output_dtype, compute_dtype, alpha
)
else:
return None
# TODO(jgong5): allow autotuning on choices of configs
return create_from_config(*max(matched_configs, key=operator.itemgetter(0))[1:])
| CppMicroGemmWoQInt4Amx |
python | bokeh__bokeh | src/bokeh/models/widgets/inputs.py | {
"start": 11352,
"end": 11965
} | class ____(TextLikeInput):
''' Single-line input widget.
'''
# explicit __init__ to support Init signatures
def __init__(self, *args: Any, **kwargs: Any) -> None:
super().__init__(*args, **kwargs)
prefix = Nullable(String, help="""
An optional string prefix to display before the input. This is useful to
indicate e.g. a variable the entered value will be assigned to.
""")
suffix = Nullable(String, help="""
An optional string suffix to display after the input. This is useful to
indicate e.g. the units of measurement of the entered value.
""")
| TextInput |
python | apache__airflow | providers/datadog/tests/unit/datadog/sensors/test_datadog.py | {
"start": 2193,
"end": 4525
} | class ____:
@pytest.fixture(autouse=True)
def setup_connections(self, create_connection_without_db):
create_connection_without_db(
Connection(
conn_id="datadog_default",
conn_type="datadog",
login="login",
password="password",
extra=json.dumps({"api_key": "api_key", "app_key": "app_key"}),
)
)
@patch("airflow.providers.datadog.hooks.datadog.api.Event.query")
@patch("airflow.providers.datadog.sensors.datadog.api.Event.query")
def test_sensor_ok(self, api1, api2):
api1.return_value = at_least_one_event
api2.return_value = at_least_one_event
sensor = DatadogSensor(
task_id="test_datadog",
datadog_conn_id="datadog_default",
from_seconds_ago=3600,
up_to_seconds_from_now=0,
priority=None,
sources=None,
tags=None,
response_check=None,
)
assert sensor.poke({})
@patch("airflow.providers.datadog.hooks.datadog.api.Event.query")
@patch("airflow.providers.datadog.sensors.datadog.api.Event.query")
def test_sensor_fail(self, api1, api2):
api1.return_value = zero_events
api2.return_value = zero_events
sensor = DatadogSensor(
task_id="test_datadog",
datadog_conn_id="datadog_default",
from_seconds_ago=0,
up_to_seconds_from_now=0,
priority=None,
sources=None,
tags=None,
response_check=None,
)
assert not sensor.poke({})
@patch("airflow.providers.datadog.hooks.datadog.api.Event.query")
@patch("airflow.providers.datadog.sensors.datadog.api.Event.query")
def test_sensor_fail_with_exception(self, api1, api2):
api1.return_value = zero_events
api2.return_value = {"status": "error"}
sensor = DatadogSensor(
task_id="test_datadog",
datadog_conn_id="datadog_default",
from_seconds_ago=0,
up_to_seconds_from_now=0,
priority=None,
sources=None,
tags=None,
response_check=None,
)
with pytest.raises(AirflowException):
sensor.poke({})
| TestDatadogSensor |
python | google__pytype | pytype/vm.py | {
"start": 2001,
"end": 2096
} | class ____(Exception):
"""For raising errors in the operation of the VM."""
| VirtualMachineError |
python | joke2k__faker | faker/providers/address/en_AU/__init__.py | {
"start": 47,
"end": 6565
} | class ____(AddressProvider):
city_prefixes = ("North", "East", "West", "South", "New", "Lake", "Port", "St.")
city_suffixes = (
"town",
"ton",
"land",
"ville",
"berg",
"burgh",
"borough",
"bury",
"view",
"port",
"mouth",
"stad",
"furt",
"chester",
"mouth",
"fort",
"haven",
"side",
"shire",
)
building_number_formats = ("###", "##", "#")
street_suffixes = (
"Access",
"Alley",
"Alleyway",
"Amble",
"Anchorage",
"Approach",
"Arcade",
"Artery",
"Avenue",
"Basin",
"Beach",
"Bend",
"Block",
"Boulevard",
"Brace",
"Brae",
"Break",
"Bridge",
"Broadway",
"Brow",
"Bypass",
"Byway",
"Causeway",
"Centre",
"Centreway",
"Chase",
"Circle",
"Circlet",
"Circuit",
"Circus",
"Close",
"Colonnade",
"Common",
"Concourse",
"Copse",
"Corner",
"Corso",
"Court",
"Courtyard",
"Cove",
"Crescent",
"Crest",
"Cross",
"Crossing",
"Crossroad",
"Crossway",
"Cruiseway",
"Cul-de-sac",
"Cutting",
"Dale",
"Dell",
"Deviation",
"Dip",
"Distributor",
"Drive",
"Driveway",
"Edge",
"Elbow",
"End",
"Entrance",
"Esplanade",
"Estate",
"Expressway",
"Extension",
"Fairway",
"Fire Track",
"Firetrail",
"Flat",
"Follow",
"Footway",
"Foreshore",
"Formation",
"Freeway",
"Front",
"Frontage",
"Gap",
"Garden",
"Gardens",
"Gate",
"Gates",
"Glade",
"Glen",
"Grange",
"Green",
"Ground",
"Grove",
"Gully",
"Heights",
"Highroad",
"Highway",
"Hill",
"Interchange",
"Intersection",
"Junction",
"Key",
"Landing",
"Lane",
"Laneway",
"Lees",
"Line",
"Link",
"Little",
"Lookout",
"Loop",
"Lower",
"Mall",
"Meander",
"Mew",
"Mews",
"Motorway",
"Mount",
"Nook",
"Outlook",
"Parade",
"Park",
"Parklands",
"Parkway",
"Part",
"Pass",
"Path",
"Pathway",
"Piazza",
"Place",
"Plateau",
"Plaza",
"Pocket",
"Point",
"Port",
"Promenade",
"Quad",
"Quadrangle",
"Quadrant",
"Quay",
"Quays",
"Ramble",
"Ramp",
"Range",
"Reach",
"Reserve",
"Rest",
"Retreat",
"Ride",
"Ridge",
"Ridgeway",
"Right Of Way",
"Ring",
"Rise",
"River",
"Riverway",
"Riviera",
"Road",
"Roads",
"Roadside",
"Roadway",
"Ronde",
"Rosebowl",
"Rotary",
"Round",
"Route",
"Row",
"Rue",
"Run",
"Service Way",
"Siding",
"Slope",
"Sound",
"Spur",
"Square",
"Stairs",
"State Highway",
"Steps",
"Strand",
"Street",
"Strip",
"Subway",
"Tarn",
"Terrace",
"Thoroughfare",
"Tollway",
"Top",
"Tor",
"Towers",
"Track",
"Trail",
"Trailer",
"Triangle",
"Trunkway",
"Turn",
"Underpass",
"Upper",
"Vale",
"Viaduct",
"View",
"Villas",
"Vista",
"Wade",
"Walk",
"Walkway",
"Way",
"Wynd",
)
postcode_formats = (
# as per https://en.wikipedia.org/wiki/Postcodes_in_Australia
# NSW
"1###",
"20##",
"21##",
"22##",
"23##",
"24##",
"25##",
"2619",
"262#",
"263#",
"264#",
"265#",
"266#",
"267#",
"268#",
"269#",
"27##",
"28##",
"292#",
"293#",
"294#",
"295#",
"296#",
"297#",
"298#",
"299#",
# ACT
"02##",
"260#",
"261#",
"290#",
"291#",
"2920",
# VIC
"3###",
"8###",
# QLD
"4###",
"9###",
# SA
"5###",
# WA
"6###",
# TAS
"7###",
# NT
"08##",
"09##",
)
states = (
"Australian Capital Territory",
"New South Wales",
"Northern Territory",
"Queensland",
"South Australia",
"Tasmania",
"Victoria",
"Western Australia",
)
states_abbr = ("ACT", "NSW", "NT", "QLD", "SA", "TAS", "VIC", "WA")
city_formats = (
"{{city_prefix}} {{first_name}}{{city_suffix}}",
"{{city_prefix}} {{first_name}}",
"{{first_name}}{{city_suffix}}",
"{{last_name}}{{city_suffix}}",
)
street_name_formats = (
"{{first_name}} {{street_suffix}}",
"{{last_name}} {{street_suffix}}",
)
street_address_formats = (
"{{building_number}} {{street_name}}",
"{{secondary_address}}{{building_number}} {{street_name}}",
)
address_formats = ("{{street_address}}\n{{city}}, {{state_abbr}}, {{postcode}}",)
secondary_address_formats = (
"Apt. ### ",
"Flat ## ",
"Suite ### ",
"Unit ## ",
"Level # ",
"###/",
"##/",
"#/",
)
def city_prefix(self) -> str:
return self.random_element(self.city_prefixes)
def secondary_address(self) -> str:
return self.numerify(self.random_element(self.secondary_address_formats))
def administrative_unit(self) -> str:
return self.random_element(self.states)
state = administrative_unit
def state_abbr(self) -> str:
return self.random_element(self.states_abbr)
| Provider |
python | kamyu104__LeetCode-Solutions | Python/rabbits-in-forest.py | {
"start": 50,
"end": 309
} | class ____(object):
def numRabbits(self, answers):
"""
:type answers: List[int]
:rtype: int
"""
count = collections.Counter(answers)
return sum((((k+1)+v-1)//(k+1))*(k+1) for k, v in count.iteritems())
| Solution |
python | google__jax | tests/api_test.py | {
"start": 260134,
"end": 263930
} | class ____(jtu.JaxTestCase):
def test_basic(self):
def f(x, y):
return x * y
primals = 2., 3.
y, f_vjp = api.si_vjp(f, [True, True], *primals)
arg_cts = f_vjp(1., *primals)
self.assertAllClose(y, 6.)
self.assertAllClose(arg_cts, (3., 2.))
def test_basic_pass_through_jit(self):
def f(x, y):
return x * y
@jax.jit
def g():
primals = 2., 3.
y, f_vjp = api.si_vjp(f, [True, True], *primals)
return y, f_vjp
@jax.jit
def h(f_vjp):
return f_vjp(1., 2., 3.)
y, f_vjp = g()
arg_cts = h(f_vjp)
self.assertAllClose(y, 6.)
self.assertAllClose(arg_cts, (3., 2.))
def test_basic_unused(self):
f = jnp.sin
primals = 3.,
y, f_vjp = api.si_vjp(f, [True], *primals)
x_ct, = f_vjp(1., *primals)
self.assertAllClose(y, jnp.sin(3.))
self.assertAllClose(x_ct, jnp.cos(3.))
with self.assertRaisesRegex(Exception, "not used by the backward pass: x"):
_ = api.si_vjp(f, [True], *primals, allow_unused=False)
def test_basic_unused_vjp3(self):
f = jnp.sin
primals = 3.,
y, f_vjp = api.vjp3(f, *primals)
x_ct, = f_vjp(1.)
self.assertAllClose(y, jnp.sin(3.))
self.assertAllClose(x_ct, jnp.cos(3.))
self.assertIsInstance(f_vjp.args_res[0], api.NotNeeded) # can check if unused
def test_basic_opaque(self):
f = jnp.sin
primals = 3.,
with self.assertRaisesRegex(Exception, "the backward pass requires opaque"):
_ = api.si_vjp(f, [True], *primals, allow_opaque=False)
def test_basic_opaque_vjp3(self):
f = jnp.sin
primals = 3.,
_, f_vjp = api.vjp3(f, *primals)
assert f_vjp.opaque_residuals # can detect if opaque res are used
def test_basic_pytree_error(self):
def f(x):
return [x['hi'] * x['bye']]
y, f_vjp = api.si_vjp(f, [True], {'hi': 2., 'bye': 3.})
arg_ct, = f_vjp([1.], {'hi': 2., 'bye': 3.})
self.assertAllClose(y, [6.])
self.assertAllClose(arg_ct, {'hi': 3., 'bye': 2.})
with self.assertRaisesRegex(ValueError, "but the structures differ"):
f_vjp(1., {'hi': 2.})
# TODO(mattjj): improve this vjp3 error message
# def test_basic_pytree_error_vjp3(self):
# def f(x):
# return [x['hi'] * x['bye']]
# y, f_vjp = api.vjp3(f, {'hi': 2., 'bye': 3.})
# arg_ct, = f_vjp([1.], {'hi': 2., 'bye': 3.})
# self.assertAllClose(y, [6.])
# self.assertAllClose(arg_ct, {'hi': 3., 'bye': 2.})
# f_vjp.args_res[0] = {'hi': 2.}
# with self.assertRaisesRegex(ValueError, "but the structures differ"):
# f_vjp(1.)
def test_fsdp(self):
# see https://github.com/jax-ml/jax/pull/27017 for why this is called "fsdp"
def f2(x, w):
x = 1. * x
x = x @ w
x = 2. * x
return x
x = jnp.ones((3, 4))
w = jnp.ones((4, 4))
y, f2_sivjp = api.si_vjp(f2, [False, True], x, w)
y_grad = jnp.ones_like(y)
x_grad, w_grad = f2_sivjp(y_grad, w)
self.assertAllClose(x_grad, 2. * y_grad @ w.T)
def test_fsdp_vjp3(self):
# see https://github.com/jax-ml/jax/pull/27017 for why this is called "fsdp"
def f2(x, w):
x = 1. * x
x = x @ w
x = 2. * x
return x
x = jnp.ones((3, 4))
w = jnp.ones((4, 4))
y, f2_vjp = api.vjp3(f2, x, w)
f2_vjp.args_res[1] = None
y_grad = jnp.ones_like(y)
f2_vjp.args_res[1] = w
x_grad, w_grad = f2_vjp(y_grad)
self.assertAllClose(x_grad, 2. * y_grad @ w.T)
self.assertAllClose(w_grad, 2. * x.T @ y_grad)
self.assertAllClose(w_grad, 2. * x.T @ y_grad)
def test_doesnt_leak_symbolic_zeros(self):
_, vjp = api.si_vjp(lambda x: 1., [False], 3.14)
ans, = vjp(1.0)
self.assertIsInstance(ans, jax.Array)
if __name__ == '__main__':
absltest.main(testLoader=jtu.JaxTestLoader())
| InputSavedVJPTest |
python | pypa__pipenv | pipenv/patched/pip/_internal/vcs/git.py | {
"start": 1560,
"end": 18651
} | class ____(VersionControl):
name = "git"
dirname = ".git"
repo_name = "clone"
schemes = (
"git+http",
"git+https",
"git+ssh",
"git+git",
"git+file",
)
# Prevent the user's environment variables from interfering with pip:
# https://github.com/pypa/pip/issues/1130
unset_environ = ("GIT_DIR", "GIT_WORK_TREE")
default_arg_rev = "HEAD"
@staticmethod
def get_base_rev_args(rev: str) -> List[str]:
return [rev]
@classmethod
def run_command(cls, *args: Any, **kwargs: Any) -> str:
if os.environ.get("PIP_NO_INPUT"):
extra_environ = kwargs.get("extra_environ", {})
extra_environ["GIT_TERMINAL_PROMPT"] = "0"
extra_environ["GIT_SSH_COMMAND"] = "ssh -oBatchMode=yes"
kwargs["extra_environ"] = extra_environ
return super().run_command(*args, **kwargs)
def is_immutable_rev_checkout(self, url: str, dest: str) -> bool:
_, rev_options = self.get_url_rev_options(hide_url(url))
if not rev_options.rev:
return False
if not self.is_commit_id_equal(dest, rev_options.rev):
# the current commit is different from rev,
# which means rev was something else than a commit hash
return False
# return False in the rare case rev is both a commit hash
# and a tag or a branch; we don't want to cache in that case
# because that branch/tag could point to something else in the future
is_tag_or_branch = bool(self.get_revision_sha(dest, rev_options.rev)[0])
return not is_tag_or_branch
def get_git_version(self) -> Tuple[int, ...]:
version = self.run_command(
["version"],
command_desc="git version",
show_stdout=False,
stdout_only=True,
)
match = GIT_VERSION_REGEX.match(version)
if not match:
logger.warning("Can't parse git version: %s", version)
return ()
return (int(match.group(1)), int(match.group(2)))
@classmethod
def get_current_branch(cls, location: str) -> Optional[str]:
"""
Return the current branch, or None if HEAD isn't at a branch
(e.g. detached HEAD).
"""
# git-symbolic-ref exits with empty stdout if "HEAD" is a detached
# HEAD rather than a symbolic ref. In addition, the -q causes the
# command to exit with status code 1 instead of 128 in this case
# and to suppress the message to stderr.
args = ["symbolic-ref", "-q", "HEAD"]
output = cls.run_command(
args,
extra_ok_returncodes=(1,),
show_stdout=False,
stdout_only=True,
cwd=location,
)
ref = output.strip()
if ref.startswith("refs/heads/"):
return ref[len("refs/heads/") :]
return None
@classmethod
def get_revision_sha(cls, dest: str, rev: str) -> Tuple[Optional[str], bool]:
"""
Return (sha_or_none, is_branch), where sha_or_none is a commit hash
if the revision names a remote branch or tag, otherwise None.
Args:
dest: the repository directory.
rev: the revision name.
"""
# Pass rev to pre-filter the list.
output = cls.run_command(
["show-ref", rev],
cwd=dest,
show_stdout=False,
stdout_only=True,
on_returncode="ignore",
)
refs = {}
# NOTE: We do not use splitlines here since that would split on other
# unicode separators, which can be maliciously used to install a
# different revision.
for line in output.strip().split("\n"):
line = line.rstrip("\r")
if not line:
continue
try:
ref_sha, ref_name = line.split(" ", maxsplit=2)
except ValueError:
# Include the offending line to simplify troubleshooting if
# this error ever occurs.
raise ValueError(f"unexpected show-ref line: {line!r}")
refs[ref_name] = ref_sha
branch_ref = f"refs/remotes/origin/{rev}"
tag_ref = f"refs/tags/{rev}"
sha = refs.get(branch_ref)
if sha is not None:
return (sha, True)
sha = refs.get(tag_ref)
return (sha, False)
@classmethod
def _should_fetch(cls, dest: str, rev: str) -> bool:
"""
Return true if rev is a ref or is a commit that we don't have locally.
Branches and tags are not considered in this method because they are
assumed to be always available locally (which is a normal outcome of
``git clone`` and ``git fetch --tags``).
"""
if rev.startswith("refs/"):
# Always fetch remote refs.
return True
if not looks_like_hash(rev):
# Git fetch would fail with abbreviated commits.
return False
if cls.has_commit(dest, rev):
# Don't fetch if we have the commit locally.
return False
return True
@classmethod
def resolve_revision(
cls, dest: str, url: HiddenText, rev_options: RevOptions
) -> RevOptions:
"""
Resolve a revision to a new RevOptions object with the SHA1 of the
branch, tag, or ref if found.
Args:
rev_options: a RevOptions object.
"""
rev = rev_options.arg_rev
# The arg_rev property's implementation for Git ensures that the
# rev return value is always non-None.
assert rev is not None
sha, is_branch = cls.get_revision_sha(dest, rev)
if sha is not None:
rev_options = rev_options.make_new(sha)
rev_options = replace(rev_options, branch_name=(rev if is_branch else None))
return rev_options
# Do not show a warning for the common case of something that has
# the form of a Git commit hash.
if not looks_like_hash(rev):
logger.warning(
"Did not find branch or tag '%s', assuming revision or ref.",
rev,
)
if not cls._should_fetch(dest, rev):
return rev_options
# fetch the requested revision
cls.run_command(
make_command("fetch", "-q", url, rev_options.to_args()),
cwd=dest,
)
# Change the revision to the SHA of the ref we fetched
sha = cls.get_revision(dest, rev="FETCH_HEAD")
rev_options = rev_options.make_new(sha)
return rev_options
@classmethod
def is_commit_id_equal(cls, dest: str, name: Optional[str]) -> bool:
"""
Return whether the current commit hash equals the given name.
Args:
dest: the repository directory.
name: a string name.
"""
if not name:
# Then avoid an unnecessary subprocess call.
return False
return cls.get_revision(dest) == name
def fetch_new(
self, dest: str, url: HiddenText, rev_options: RevOptions, verbosity: int
) -> None:
rev_display = rev_options.to_display()
logger.info("Cloning %s%s to %s", url, rev_display, display_path(dest))
if verbosity <= 0:
flags: Tuple[str, ...] = ("--quiet",)
elif verbosity == 1:
flags = ()
else:
flags = ("--verbose", "--progress")
if self.get_git_version() >= (2, 17):
# Git added support for partial clone in 2.17
# https://git-scm.com/docs/partial-clone
# Speeds up cloning by functioning without a complete copy of repository
self.run_command(
make_command(
"clone",
"--filter=blob:none",
*flags,
url,
dest,
)
)
else:
self.run_command(make_command("clone", *flags, url, dest))
if rev_options.rev:
# Then a specific revision was requested.
rev_options = self.resolve_revision(dest, url, rev_options)
branch_name = getattr(rev_options, "branch_name", None)
logger.debug("Rev options %s, branch_name %s", rev_options, branch_name)
if branch_name is None:
# Only do a checkout if the current commit id doesn't match
# the requested revision.
if not self.is_commit_id_equal(dest, rev_options.rev):
cmd_args = make_command(
"checkout",
"-q",
rev_options.to_args(),
)
self.run_command(cmd_args, cwd=dest)
elif self.get_current_branch(dest) != branch_name:
# Then a specific branch was requested, and that branch
# is not yet checked out.
track_branch = f"origin/{branch_name}"
cmd_args = [
"checkout",
"-b",
branch_name,
"--track",
track_branch,
]
self.run_command(cmd_args, cwd=dest)
else:
sha = self.get_revision(dest)
rev_options = rev_options.make_new(sha)
logger.info("Resolved %s to commit %s", url, rev_options.rev)
#: repo may contain submodules
self.update_submodules(dest)
def switch(self, dest: str, url: HiddenText, rev_options: RevOptions) -> None:
self.run_command(
make_command("config", "remote.origin.url", url),
cwd=dest,
)
cmd_args = make_command("checkout", "-q", rev_options.to_args())
self.run_command(cmd_args, cwd=dest)
self.update_submodules(dest)
def update(self, dest: str, url: HiddenText, rev_options: RevOptions) -> None:
# First fetch changes from the default remote
if self.get_git_version() >= (1, 9):
# fetch tags in addition to everything else
self.run_command(["fetch", "-q", "--tags"], cwd=dest)
else:
self.run_command(["fetch", "-q"], cwd=dest)
# Then reset to wanted revision (maybe even origin/master)
rev_options = self.resolve_revision(dest, url, rev_options)
cmd_args = make_command("reset", "--hard", "-q", rev_options.to_args())
self.run_command(cmd_args, cwd=dest)
#: update submodules
self.update_submodules(dest)
@classmethod
def get_remote_url(cls, location: str) -> str:
"""
Return URL of the first remote encountered.
Raises RemoteNotFoundError if the repository does not have a remote
url configured.
"""
# We need to pass 1 for extra_ok_returncodes since the command
# exits with return code 1 if there are no matching lines.
stdout = cls.run_command(
["config", "--get-regexp", r"remote\..*\.url"],
extra_ok_returncodes=(1,),
show_stdout=False,
stdout_only=True,
cwd=location,
)
remotes = stdout.splitlines()
try:
found_remote = remotes[0]
except IndexError:
raise RemoteNotFoundError
for remote in remotes:
if remote.startswith("remote.origin.url "):
found_remote = remote
break
url = found_remote.split(" ")[1]
return cls._git_remote_to_pip_url(url.strip())
@staticmethod
def _git_remote_to_pip_url(url: str) -> str:
"""
Convert a remote url from what git uses to what pip accepts.
There are 3 legal forms **url** may take:
1. A fully qualified url: ssh://git@example.com/foo/bar.git
2. A local project.git folder: /path/to/bare/repository.git
3. SCP shorthand for form 1: git@example.com:foo/bar.git
Form 1 is output as-is. Form 2 must be converted to URI and form 3 must
be converted to form 1.
See the corresponding test test_git_remote_url_to_pip() for examples of
sample inputs/outputs.
"""
if re.match(r"\w+://", url):
# This is already valid. Pass it though as-is.
return url
if os.path.exists(url):
# A local bare remote (git clone --mirror).
# Needs a file:// prefix.
return pathlib.PurePath(url).as_uri()
scp_match = SCP_REGEX.match(url)
if scp_match:
# Add an ssh:// prefix and replace the ':' with a '/'.
return scp_match.expand(r"ssh://\1\2/\3")
# Otherwise, bail out.
raise RemoteNotValidError(url)
@classmethod
def has_commit(cls, location: str, rev: str) -> bool:
"""
Check if rev is a commit that is available in the local repository.
"""
try:
cls.run_command(
["rev-parse", "-q", "--verify", "sha^" + rev],
cwd=location,
log_failed_cmd=False,
)
except InstallationError:
return False
else:
return True
@classmethod
def get_revision(cls, location: str, rev: Optional[str] = None) -> str:
if rev is None:
rev = "HEAD"
current_rev = cls.run_command(
["rev-parse", rev],
show_stdout=False,
stdout_only=True,
cwd=location,
)
return current_rev.strip()
@classmethod
def get_subdirectory(cls, location: str) -> Optional[str]:
"""
Return the path to Python project root, relative to the repo root.
Return None if the project root is in the repo root.
"""
# find the repo root
git_dir = cls.run_command(
["rev-parse", "--git-dir"],
show_stdout=False,
stdout_only=True,
cwd=location,
).strip()
if not os.path.isabs(git_dir):
git_dir = os.path.join(location, git_dir)
repo_root = os.path.abspath(os.path.join(git_dir, ".."))
return find_path_to_project_root_from_repo_root(location, repo_root)
@classmethod
def get_url_rev_and_auth(cls, url: str) -> Tuple[str, Optional[str], AuthInfo]:
"""
Prefixes stub URLs like 'user@hostname:user/repo.git' with 'ssh://'.
That's required because although they use SSH they sometimes don't
work with a ssh:// scheme (e.g. GitHub). But we need a scheme for
parsing. Hence we remove it again afterwards and return it as a stub.
"""
# Works around an apparent Git bug
# (see https://article.gmane.org/gmane.comp.version-control.git/146500)
scheme, netloc, path, query, fragment = urlsplit(url)
if scheme.endswith("file"):
initial_slashes = path[: -len(path.lstrip("/"))]
newpath = initial_slashes + urllib.request.url2pathname(path).replace(
"\\", "/"
).lstrip("/")
after_plus = scheme.find("+") + 1
url = scheme[:after_plus] + urlunsplit(
(scheme[after_plus:], netloc, newpath, query, fragment),
)
if "://" not in url:
assert "file:" not in url
url = url.replace("git+", "git+ssh://")
url, rev, user_pass = super().get_url_rev_and_auth(url)
url = url.replace("ssh://", "")
else:
url, rev, user_pass = super().get_url_rev_and_auth(url)
return url, rev, user_pass
@classmethod
def update_submodules(cls, location: str) -> None:
if not os.path.exists(os.path.join(location, ".gitmodules")):
return
cls.run_command(
["submodule", "update", "--init", "--recursive", "-q"],
cwd=location,
)
@classmethod
def get_repository_root(cls, location: str) -> Optional[str]:
loc = super().get_repository_root(location)
if loc:
return loc
try:
r = cls.run_command(
["rev-parse", "--show-toplevel"],
cwd=location,
show_stdout=False,
stdout_only=True,
on_returncode="raise",
log_failed_cmd=False,
)
except BadCommand:
logger.debug(
"could not determine if %s is under git control "
"because git is not available",
location,
)
return None
except InstallationError:
return None
return os.path.normpath(r.rstrip("\r\n"))
@staticmethod
def should_add_vcs_url_prefix(repo_url: str) -> bool:
"""In either https or ssh form, requirements must be prefixed with git+."""
return True
vcs.register(Git)
| Git |
python | pytorch__pytorch | torch/nn/modules/normalization.py | {
"start": 382,
"end": 2065
} | class ____(Module):
r"""Applies local response normalization over an input signal.
The input signal is composed of several input planes, where channels occupy the second dimension.
Applies normalization across channels.
.. math::
b_{c} = a_{c}\left(k + \frac{\alpha}{n}
\sum_{c'=\max(0, c-n/2)}^{\min(N-1,c+n/2)}a_{c'}^2\right)^{-\beta}
Args:
size: amount of neighbouring channels used for normalization
alpha: multiplicative factor. Default: 0.0001
beta: exponent. Default: 0.75
k: additive factor. Default: 1
Shape:
- Input: :math:`(N, C, *)`
- Output: :math:`(N, C, *)` (same shape as input)
Examples::
>>> lrn = nn.LocalResponseNorm(2)
>>> signal_2d = torch.randn(32, 5, 24, 24)
>>> signal_4d = torch.randn(16, 5, 7, 7, 7, 7)
>>> output_2d = lrn(signal_2d)
>>> output_4d = lrn(signal_4d)
"""
__constants__ = ["size", "alpha", "beta", "k"]
size: int
alpha: float
beta: float
k: float
def __init__(
self, size: int, alpha: float = 1e-4, beta: float = 0.75, k: float = 1.0
) -> None:
super().__init__()
self.size = size
self.alpha = alpha
self.beta = beta
self.k = k
def forward(self, input: Tensor) -> Tensor:
"""
Runs the forward pass.
"""
return F.local_response_norm(input, self.size, self.alpha, self.beta, self.k)
def extra_repr(self):
"""
Return the extra representation of the module.
"""
return "{size}, alpha={alpha}, beta={beta}, k={k}".format(**self.__dict__)
| LocalResponseNorm |
python | django__django | tests/app_loading/not_installed/models.py | {
"start": 31,
"end": 124
} | class ____(models.Model):
class Meta:
app_label = "not_installed"
| NotInstalledModel |
python | astropy__astropy | astropy/modeling/bounding_box.py | {
"start": 483,
"end": 4649
} | class ____(_BaseInterval):
"""
A single input's bounding box interval.
Parameters
----------
lower : float
The lower bound of the interval
upper : float
The upper bound of the interval
Methods
-------
validate :
Constructs a valid interval
outside :
Determine which parts of an input array are outside the interval.
domain :
Constructs a discretization of the points inside the interval.
"""
def __repr__(self):
return f"Interval(lower={self.lower}, upper={self.upper})"
def copy(self):
return copy.deepcopy(self)
@staticmethod
def _validate_shape(interval):
"""Validate the shape of an interval representation."""
MESSAGE = """An interval must be some sort of sequence of length 2"""
try:
shape = np.shape(interval)
except TypeError:
try:
# np.shape does not work with lists of Quantities
if len(interval) == 1:
interval = interval[0]
shape = np.shape([b.to_value() for b in interval])
except (ValueError, TypeError, AttributeError):
raise ValueError(MESSAGE)
valid_shape = shape in ((2,), (1, 2), (2, 0))
if not valid_shape:
valid_shape = (
len(shape) > 0
and shape[0] == 2
and all(isinstance(b, np.ndarray) for b in interval)
)
if not np.iterable(interval) or not valid_shape:
raise ValueError(MESSAGE)
@classmethod
def _validate_bounds(cls, lower, upper):
"""Validate the bounds are reasonable and construct an interval from them."""
if (np.asanyarray(lower) > np.asanyarray(upper)).all():
warnings.warn(
f"Invalid interval: upper bound {upper} "
f"is strictly less than lower bound {lower}.",
RuntimeWarning,
)
return cls(lower, upper)
@classmethod
def validate(cls, interval):
"""
Construct and validate an interval.
Parameters
----------
interval : iterable
A representation of the interval.
Returns
-------
A validated interval.
"""
cls._validate_shape(interval)
if len(interval) == 1:
interval = tuple(interval[0])
else:
interval = tuple(interval)
return cls._validate_bounds(interval[0], interval[1])
def outside(self, _input: np.ndarray):
"""
Parameters
----------
_input : np.ndarray
The evaluation input in the form of an array.
Returns
-------
Boolean array indicating which parts of _input are outside the interval:
True -> position outside interval
False -> position inside interval
"""
return np.logical_or(_input < self.lower, _input > self.upper)
def domain(self, resolution):
return np.arange(self.lower, self.upper + resolution, resolution)
# The interval where all ignored inputs can be found.
_ignored_interval = _Interval.validate((-np.inf, np.inf))
def get_index(model, key) -> int:
"""
Get the input index corresponding to the given key.
Can pass in either:
the string name of the input or
the input index itself.
"""
if isinstance(key, str):
if key in model.inputs:
index = model.inputs.index(key)
else:
raise ValueError(f"'{key}' is not one of the inputs: {model.inputs}.")
elif np.issubdtype(type(key), np.integer):
if 0 <= key < len(model.inputs):
index = key
else:
raise IndexError(
f"Integer key: {key} must be non-negative and < {len(model.inputs)}."
)
else:
raise ValueError(f"Key value: {key} must be string or integer.")
return index
def get_name(model, index: int):
"""Get the input name corresponding to the input index."""
return model.inputs[index]
| _Interval |
python | pydata__xarray | xarray/core/missing.py | {
"start": 3828,
"end": 5309
} | class ____(BaseInterpolator):
"""Interpolate a 1-D function using Scipy interp1d
See Also
--------
scipy.interpolate.interp1d
"""
def __init__(
self,
xi,
yi,
method=None,
fill_value=None,
assume_sorted=True,
copy=False,
bounds_error=False,
order=None,
axis=-1,
**kwargs,
):
from scipy.interpolate import interp1d
if method is None:
raise ValueError(
"method is a required argument, please supply a "
"valid scipy.inter1d method (kind)"
)
if method == "polynomial":
if order is None:
raise ValueError("order is required when method=polynomial")
method = order
if method == "quintic":
method = 5
self.method = method
self.cons_kwargs = kwargs
self.call_kwargs = {}
nan = np.nan if yi.dtype.kind != "c" else np.nan + np.nan * 1j
if fill_value is None and method == "linear":
fill_value = nan, nan
elif fill_value is None:
fill_value = nan
self.f = interp1d(
xi,
yi,
kind=self.method,
fill_value=fill_value,
bounds_error=bounds_error,
assume_sorted=assume_sorted,
copy=copy,
axis=axis,
**self.cons_kwargs,
)
| ScipyInterpolator |
python | huggingface__transformers | src/transformers/models/dpt/modeling_dpt.py | {
"start": 8294,
"end": 10485
} | class ____(nn.Module):
"""
Construct the CLS token, position and patch embeddings.
"""
def __init__(self, config):
super().__init__()
self.cls_token = nn.Parameter(torch.zeros(1, 1, config.hidden_size))
self.patch_embeddings = DPTViTPatchEmbeddings(config)
num_patches = self.patch_embeddings.num_patches
self.position_embeddings = nn.Parameter(torch.zeros(1, num_patches + 1, config.hidden_size))
self.dropout = nn.Dropout(config.hidden_dropout_prob)
self.config = config
def _resize_pos_embed(self, posemb, grid_size_height, grid_size_width, start_index=1):
posemb_tok = posemb[:, :start_index]
posemb_grid = posemb[0, start_index:]
old_grid_size = torch_int(posemb_grid.size(0) ** 0.5)
posemb_grid = posemb_grid.reshape(1, old_grid_size, old_grid_size, -1).permute(0, 3, 1, 2)
posemb_grid = nn.functional.interpolate(posemb_grid, size=(grid_size_height, grid_size_width), mode="bilinear")
posemb_grid = posemb_grid.permute(0, 2, 3, 1).reshape(1, grid_size_height * grid_size_width, -1)
posemb = torch.cat([posemb_tok, posemb_grid], dim=1)
return posemb
def forward(self, pixel_values: torch.Tensor) -> BaseModelOutputWithIntermediateActivations:
batch_size, num_channels, height, width = pixel_values.shape
# possibly interpolate position encodings to handle varying image sizes
patch_size = self.config.patch_size
position_embeddings = self._resize_pos_embed(
self.position_embeddings, height // patch_size, width // patch_size
)
embeddings = self.patch_embeddings(pixel_values)
batch_size, seq_len, _ = embeddings.size()
# add the [CLS] token to the embedded patch tokens
cls_tokens = self.cls_token.expand(batch_size, -1, -1)
embeddings = torch.cat((cls_tokens, embeddings), dim=1)
# add positional encoding to each token
embeddings = embeddings + position_embeddings
embeddings = self.dropout(embeddings)
return BaseModelOutputWithIntermediateActivations(last_hidden_states=embeddings)
| DPTViTEmbeddings |
python | great-expectations__great_expectations | great_expectations/render/components.py | {
"start": 27306,
"end": 28359
} | class ____(RenderedContent):
def __init__(self, content_blocks, section_name=None) -> None:
if not isinstance(content_blocks, list) and all(
isinstance(content_block, RenderedComponentContent) for content_block in content_blocks
):
raise InvalidRenderedContentError( # noqa: TRY003 # FIXME CoP
"Rendered section content requires a list of RenderedComponentContent "
"for content blocks."
)
self.content_blocks = content_blocks
self.section_name = section_name
@override
def to_json_dict(self) -> dict[str, JSONValues]:
"""Returns a JSON-serializable dict representation of this RenderedSectionContent.
Returns:
A JSON-serializable dict representation of this RenderedSectionContent.
"""
d = super().to_json_dict()
d["content_blocks"] = RenderedContent.rendered_content_list_to_json(self.content_blocks)
d["section_name"] = self.section_name
return d
| RenderedSectionContent |
python | allegroai__clearml | clearml/backend_api/services/v2_13/tasks.py | {
"start": 395428,
"end": 397284
} | class ____(Response):
"""
Response of tasks.update endpoint.
:param updated: Number of tasks updated (0 or 1)
:type updated: int
:param fields: Updated fields names and values
:type fields: dict
"""
_service = "tasks"
_action = "update"
_version = "2.13"
_schema = {
"definitions": {},
"properties": {
"fields": {
"additionalProperties": True,
"description": "Updated fields names and values",
"type": ["object", "null"],
},
"updated": {
"description": "Number of tasks updated (0 or 1)",
"enum": [0, 1],
"type": ["integer", "null"],
},
},
"type": "object",
}
def __init__(self, updated: Optional[int] = None, fields: Optional[dict] = None, **kwargs: Any) -> None:
super(UpdateResponse, self).__init__(**kwargs)
self.updated = updated
self.fields = fields
@schema_property("updated")
def updated(self) -> Optional[int]:
return self._property_updated
@updated.setter
def updated(self, value: Optional[int]) -> None:
if value is None:
self._property_updated = None
return
if isinstance(value, float) and value.is_integer():
value = int(value)
self.assert_isinstance(value, "updated", six.integer_types)
self._property_updated = value
@schema_property("fields")
def fields(self) -> Optional[dict]:
return self._property_fields
@fields.setter
def fields(self, value: Optional[dict]) -> None:
if value is None:
self._property_fields = None
return
self.assert_isinstance(value, "fields", (dict,))
self._property_fields = value
| UpdateResponse |
python | PrefectHQ__prefect | src/integrations/prefect-dbt/tests/core/test_tracker.py | {
"start": 9022,
"end": 11662
} | class ____:
"""Test logging functionality."""
@pytest.mark.parametrize(
"flow_context",
[
(None, None),
({"id": "test-id", "name": "test_name"}, None),
(None, Mock(spec=Flow)),
({"id": "test-flow-run-id", "name": "test_flow_run"}, Mock(spec=Flow)),
],
)
def test_get_task_logger_with_various_contexts(
self, sample_node_id, sample_task_run_id, flow_context
):
"""Test that get_task_logger works with various flow context combinations."""
tracker = NodeTaskTracker()
tracker.set_task_run_id(sample_node_id, sample_task_run_id)
tracker.set_task_run_name(sample_node_id, "test_task_run")
flow_run, flow = flow_context
# Configure mock flow if present
if flow is not None:
flow.name = "test_flow"
logger = tracker.get_task_logger(
sample_node_id,
flow_run=flow_run,
flow=flow,
)
assert isinstance(logger, PrefectLogAdapter)
assert logger.extra["task_run_id"] == sample_task_run_id
assert logger.extra["task_run_name"] == "test_task_run"
assert logger.extra["task_name"] == "execute_dbt_node"
# Verify flow context
if flow_run:
assert logger.extra["flow_run_id"] == flow_run["id"]
assert logger.extra["flow_run_name"] == flow_run["name"]
else:
assert logger.extra["flow_run_id"] == "<unknown>"
assert logger.extra["flow_run_name"] == "<unknown>"
if flow:
assert logger.extra["flow_name"] == flow.name
else:
assert logger.extra["flow_name"] == "<unknown>"
def test_get_task_logger_with_additional_kwargs(
self, sample_node_id, sample_task_run_id
):
"""Test that get_task_logger includes additional kwargs in extra data."""
tracker = NodeTaskTracker()
tracker.set_task_run_id(sample_node_id, sample_task_run_id)
logger = tracker.get_task_logger(
sample_node_id,
custom_key="custom_value",
another_key=123,
)
assert logger.extra["custom_key"] == "custom_value"
assert logger.extra["another_key"] == 123
def test_get_task_logger_without_task_run_id(self, sample_node_id):
"""Test that get_task_logger works without task run ID."""
tracker = NodeTaskTracker()
logger = tracker.get_task_logger(sample_node_id)
assert logger.extra["task_run_id"] is None
assert logger.extra["task_run_name"] is None
| TestNodeTaskTrackerLogging |
python | airbytehq__airbyte | airbyte-ci/connectors/metadata_service/orchestrator/orchestrator/models/ci_report.py | {
"start": 303,
"end": 1055
} | class ____(BaseModel):
file_path: str
pipeline_name: Optional[str] = None
run_timestamp: Optional[str] = None
run_duration: Optional[float] = None
success: Optional[bool] = None
failed_steps: Optional[List[str]] = None
successful_steps: Optional[List[str]] = None
skipped_steps: Optional[List[str]] = None
gha_workflow_run_url: Optional[str] = None
pipeline_start_timestamp: Optional[int] = None
pipeline_end_timestamp: Optional[int] = None
pipeline_duration: Optional[int] = None
git_branch: Optional[str] = None
git_revision: Optional[str] = None
ci_context: Optional[str] = None
pull_request_url: Optional[str] = None
class Config:
extra = Extra.allow
| ConnectorNightlyReport |
python | django__django | tests/test_runner/tests.py | {
"start": 23251,
"end": 25078
} | class ____(SimpleTestCase):
# Raise an exception to don't actually run tests.
@mock.patch.object(
multiprocessing, "Pool", side_effect=Exception("multiprocessing.Pool()")
)
def test_no_initialize_suite_test_runner(self, mocked_pool):
class StubTestRunner(DiscoverRunner):
def setup_test_environment(self, **kwargs):
return
def setup_databases(self, **kwargs):
return
def run_checks(self, databases):
return
def teardown_databases(self, old_config, **kwargs):
return
def teardown_test_environment(self, **kwargs):
return
def run_suite(self, suite, **kwargs):
kwargs = self.get_test_runner_kwargs()
runner = self.test_runner(**kwargs)
return runner.run(suite)
runner = StubTestRunner(
verbosity=0, interactive=False, parallel=2, debug_mode=True
)
with self.assertRaisesMessage(Exception, "multiprocessing.Pool()"):
runner.run_tests(
[
"test_runner_apps.sample.tests_sample.TestDjangoTestCase",
"test_runner_apps.simple.tests",
]
)
# Initializer must be a partial function binding _init_worker.
initializer = mocked_pool.call_args.kwargs["initializer"]
self.assertIsInstance(initializer, functools.partial)
self.assertIs(initializer.args[0], _init_worker)
initargs = mocked_pool.call_args.kwargs["initargs"]
self.assertEqual(len(initargs), 7)
self.assertEqual(initargs[5], True) # debug_mode
self.assertEqual(initargs[6], {db.DEFAULT_DB_ALIAS}) # Used database aliases.
| TestRunnerInitializerTests |
python | networkx__networkx | networkx/algorithms/isomorphism/tests/test_ismags.py | {
"start": 7046,
"end": 12067
} | class ____:
def test_isomorphism(self):
g1 = nx.Graph()
nx.add_cycle(g1, range(4))
g2 = nx.Graph()
nx.add_cycle(g2, range(4))
g2.add_edges_from(list(zip(g2, range(4, 8))))
ismags = iso.ISMAGS(g2, g1)
assert list(ismags.subgraph_isomorphisms_iter(symmetry=True)) == [
{n: n for n in g1.nodes}
]
def test_isomorphism2(self):
g1 = nx.Graph()
nx.add_path(g1, range(3))
g2 = g1.copy()
g2.add_edge(1, 3)
ismags = iso.ISMAGS(g2, g1)
matches = ismags.subgraph_isomorphisms_iter(symmetry=True)
expected_symmetric = [
{0: 0, 1: 1, 2: 2},
{0: 0, 1: 1, 3: 2},
{2: 0, 1: 1, 3: 2},
]
assert _matches_to_sets(matches) == _matches_to_sets(expected_symmetric)
matches = ismags.subgraph_isomorphisms_iter(symmetry=False)
expected_asymmetric = [
{0: 2, 1: 1, 2: 0},
{0: 2, 1: 1, 3: 0},
{2: 2, 1: 1, 3: 0},
]
assert _matches_to_sets(matches) == _matches_to_sets(
expected_symmetric + expected_asymmetric
)
def test_labeled_nodes(self):
g1 = nx.Graph()
nx.add_cycle(g1, range(3))
g1.nodes[1]["attr"] = True
g2 = g1.copy()
g2.add_edge(1, 3)
ismags = iso.ISMAGS(g2, g1, node_match=lambda x, y: x == y)
matches = ismags.subgraph_isomorphisms_iter(symmetry=True)
expected_symmetric = [{0: 0, 1: 1, 2: 2}]
assert _matches_to_sets(matches) == _matches_to_sets(expected_symmetric)
matches = ismags.subgraph_isomorphisms_iter(symmetry=False)
expected_asymmetric = [{0: 2, 1: 1, 2: 0}]
assert _matches_to_sets(matches) == _matches_to_sets(
expected_symmetric + expected_asymmetric
)
def test_labeled_edges(self):
g1 = nx.Graph()
nx.add_cycle(g1, range(3))
g1.edges[1, 2]["attr"] = True
g2 = g1.copy()
g2.add_edge(1, 3)
ismags = iso.ISMAGS(g2, g1, edge_match=lambda x, y: x == y)
matches = ismags.subgraph_isomorphisms_iter(symmetry=True)
expected_symmetric = [{0: 0, 1: 1, 2: 2}]
assert _matches_to_sets(matches) == _matches_to_sets(expected_symmetric)
matches = ismags.subgraph_isomorphisms_iter(symmetry=False)
expected_asymmetric = [{1: 2, 0: 0, 2: 1}]
assert _matches_to_sets(matches) == _matches_to_sets(
expected_symmetric + expected_asymmetric
)
def test_exceptions_for_bad_match_functions(self):
def non_transitive_match(attrs1, attrs2):
return abs(attrs1["freq"] - attrs2["freq"]) <= 1
def simple_non_commutative_match(attrs1, attrs2):
return attrs1["freq"] == 1 + attrs2["freq"]
def non_commutative_match(attrs1, attrs2):
# red matches red and green
# green and blue only match themselves
if attrs2["color"] == "red":
return attrs2["color"] in {"red", "green"}
else:
return attrs1["color"] == attrs2["color"]
G1 = nx.Graph()
G1.add_node(0, color="red", freq=0)
G1.add_node(1, color="red", freq=1)
G1.add_node(2, color="blue", freq=2)
G2 = nx.Graph()
G2.add_node("A", color="red", freq=0)
G2.add_node("B", color="green", freq=1)
G2.add_node("C", color="blue", freq=2)
with pytest.raises(nx.NetworkXError, match="\nInvalid partition"):
iso.ISMAGS(G1, G2, node_match=non_transitive_match)
with pytest.raises(nx.NetworkXError, match="\nInvalid partition"):
iso.ISMAGS(G1, G2, node_match=simple_non_commutative_match)
with pytest.raises(nx.NetworkXError, match="\nInvalid partition"):
iso.ISMAGS(G1, G2, node_match=non_commutative_match)
def test_noncomparable_nodes():
node1 = object()
node2 = object()
node3 = object()
# Graph
G = nx.path_graph([node1, node2, node3])
gm = iso.ISMAGS(G, G)
assert gm.is_isomorphic()
# Just testing some cases
assert gm.subgraph_is_isomorphic()
# DiGraph
G = nx.path_graph([node1, node2, node3], create_using=nx.DiGraph)
H = nx.path_graph([node3, node2, node1], create_using=nx.DiGraph)
dgm = iso.ISMAGS(G, H)
assert dgm.is_isomorphic()
assert dgm.is_isomorphic(symmetry=True)
# Just testing some cases
assert dgm.subgraph_is_isomorphic()
@pytest.mark.parametrize("graph_constructor", graph_classes)
def test_selfloop(graph_constructor):
# Simple test for graphs with selfloops
g1 = graph_constructor([(0, 1), (0, 2), (1, 2), (1, 3), (2, 2), (2, 4)])
nodes = range(5)
rng = random.Random(42)
for _ in range(3):
new_nodes = list(nodes)
rng.shuffle(new_nodes)
d = dict(zip(nodes, new_nodes))
g2 = nx.relabel_nodes(g1, d)
assert iso.ISMAGS(g1, g2).is_isomorphic()
| TestSubgraphIsomorphism |
python | getsentry__sentry | tests/sentry/search/test_utils.py | {
"start": 37510,
"end": 38478
} | class ____(TestCase):
def test_simple_user_tag(self) -> None:
assert convert_user_tag_to_query("user", "id:123456") == 'user.id:"123456"'
def test_user_tag_with_quote(self) -> None:
assert convert_user_tag_to_query("user", 'id:123"456') == 'user.id:"123\\"456"'
def test_user_tag_with_space(self) -> None:
assert convert_user_tag_to_query("user", "id:123 456") == 'user.id:"123 456"'
def test_non_user_tag(self) -> None:
assert convert_user_tag_to_query("user", 'fake:123"456') is None
def test_valid_device_class_mapping() -> None:
assert set(DEVICE_CLASS.keys()) == {"low", "medium", "high"}, "Only 3 possible classes"
# should all be integers
device_classes = {key: {int(value) for value in values} for key, values in DEVICE_CLASS.items()}
assert all(
0 not in values for values in device_classes.values()
), "`0` is not a valid classes as it represents unclassified"
| ConvertUserTagTest |
python | getsentry__sentry | tests/sentry/integrations/slack/threads/activity_notifications/test_assigned_activity_notification.py | {
"start": 231,
"end": 653
} | class ____(TestCase):
def setUp(self) -> None:
self.assigned_activity = Activity.objects.create(
group=self.group,
project=self.project,
type=ActivityType.ASSIGNED.value,
user_id=self.user.id,
data={
"assignee": self.user.id,
},
)
self.obj = AssignedActivityNotification(self.assigned_activity)
| _BaseTestCase |
python | sympy__sympy | sympy/polys/domains/domain.py | {
"start": 2175,
"end": 2393
} | class ____(RingElement, Protocol):
"""A field element.
Must support ``/``.
"""
def __truediv__(self, other: Self | int, /) -> Self: ...
def __rtruediv__(self, other: int, /) -> Self: ...
| FieldElement |
python | qdrant__qdrant-client | qdrant_client/http/models/models.py | {
"start": 128237,
"end": 129410
} | class ____(BaseModel, extra="forbid"):
"""
Configuration for sparse inverted index.
"""
full_scan_threshold: Optional[int] = Field(
default=None,
description="We prefer a full scan search upto (excluding) this number of vectors. Note: this is number of vectors, not KiloBytes.",
)
on_disk: Optional[bool] = Field(
default=None,
description="Store index on disk. If set to false, the index will be stored in RAM. Default: false",
)
datatype: Optional["Datatype"] = Field(
default=None,
description="Defines which datatype should be used for the index. Choosing different datatypes allows to optimize memory usage and performance vs accuracy. - For `float32` datatype - vectors are stored as single-precision floating point numbers, 4 bytes. - For `float16` datatype - vectors are stored as half-precision floating point numbers, 2 bytes. - For `uint8` datatype - vectors are quantized to unsigned 8-bit integers, 1 byte. Quantization to fit byte range `[0, 255]` happens during indexing automatically, so the actual vector data does not need to conform to this range.",
)
| SparseIndexParams |
python | dagster-io__dagster | python_modules/dagster/dagster_tests/components_tests/integration_tests/integration_test_defs/definitions/backcompat_components/yaml_component/__init__.py | {
"start": 23,
"end": 240
} | class ____(dg.Component):
def build_defs(self, context: dg.ComponentLoadContext) -> dg.Definitions:
return dg.Definitions(
assets=[dg.AssetSpec(key=dg.AssetKey(["foo"]))],
)
| MyYamlComponent |
python | scipy__scipy | scipy/optimize/_trustregion_constr/tests/test_qp_subproblem.py | {
"start": 11689,
"end": 15707
} | class ____(TestCase):
def test_2d_box_constraints(self):
# Both constraints are active
ta, tb, intersect = box_sphere_intersections([1, 1], [-2, 2],
[-1, -2], [1, 2], 2,
entire_line=False)
assert_array_almost_equal([ta, tb], [0, 0.5])
assert_equal(intersect, True)
# None of the constraints are active
ta, tb, intersect = box_sphere_intersections([1, 1], [-1, 1],
[-1, -3], [1, 3], 10,
entire_line=False)
assert_array_almost_equal([ta, tb], [0, 1])
assert_equal(intersect, True)
# Box constraints are active
ta, tb, intersect = box_sphere_intersections([1, 1], [-4, 4],
[-1, -3], [1, 3], 10,
entire_line=False)
assert_array_almost_equal([ta, tb], [0, 0.5])
assert_equal(intersect, True)
# Spherical constraints are active
ta, tb, intersect = box_sphere_intersections([1, 1], [-4, 4],
[-1, -3], [1, 3], 2,
entire_line=False)
assert_array_almost_equal([ta, tb], [0, 0.25])
assert_equal(intersect, True)
# Infeasible problems
ta, tb, intersect = box_sphere_intersections([2, 2], [-4, 4],
[-1, -3], [1, 3], 2,
entire_line=False)
assert_equal(intersect, False)
ta, tb, intersect = box_sphere_intersections([1, 1], [-4, 4],
[2, 4], [2, 4], 2,
entire_line=False)
assert_equal(intersect, False)
def test_2d_box_constraints_entire_line(self):
# Both constraints are active
ta, tb, intersect = box_sphere_intersections([1, 1], [-2, 2],
[-1, -2], [1, 2], 2,
entire_line=True)
assert_array_almost_equal([ta, tb], [0, 0.5])
assert_equal(intersect, True)
# None of the constraints are active
ta, tb, intersect = box_sphere_intersections([1, 1], [-1, 1],
[-1, -3], [1, 3], 10,
entire_line=True)
assert_array_almost_equal([ta, tb], [0, 2])
assert_equal(intersect, True)
# Box constraints are active
ta, tb, intersect = box_sphere_intersections([1, 1], [-4, 4],
[-1, -3], [1, 3], 10,
entire_line=True)
assert_array_almost_equal([ta, tb], [0, 0.5])
assert_equal(intersect, True)
# Spherical constraints are active
ta, tb, intersect = box_sphere_intersections([1, 1], [-4, 4],
[-1, -3], [1, 3], 2,
entire_line=True)
assert_array_almost_equal([ta, tb], [0, 0.25])
assert_equal(intersect, True)
# Infeasible problems
ta, tb, intersect = box_sphere_intersections([2, 2], [-4, 4],
[-1, -3], [1, 3], 2,
entire_line=True)
assert_equal(intersect, False)
ta, tb, intersect = box_sphere_intersections([1, 1], [-4, 4],
[2, 4], [2, 4], 2,
entire_line=True)
assert_equal(intersect, False)
| TestBoxSphereBoundariesIntersections |
python | allegroai__clearml | examples/frameworks/fire/fire_grouping_cmd.py | {
"start": 401,
"end": 556
} | class ____(object):
def run(self, volume=1):
return " ".join(["Burp!"] * volume)
def status(self):
return "Satiated."
| DigestionStage |
python | doocs__leetcode | solution/3200-3299/3284.Sum of Consecutive Subarrays/Solution.py | {
"start": 0,
"end": 630
} | class ____:
def getSum(self, nums: List[int]) -> int:
mod = 10**9 + 7
f = g = 1
s = t = nums[0]
ans = nums[0]
for x, y in pairwise(nums):
if y - x == 1:
f += 1
s += f * y
ans = (ans + s) % mod
else:
f = 1
s = y
if y - x == -1:
g += 1
t += g * y
ans = (ans + t) % mod
else:
g = 1
t = y
if abs(y - x) != 1:
ans = (ans + y) % mod
return ans
| Solution |
python | openai__openai-python | src/openai/types/beta/realtime/realtime_server_event.py | {
"start": 2769,
"end": 3068
} | class ____(BaseModel):
event_id: str
"""The unique ID of the server event."""
item: ConversationItem
"""The item to add to the conversation."""
type: Literal["conversation.item.retrieved"]
"""The event type, must be `conversation.item.retrieved`."""
| ConversationItemRetrieved |
python | sqlalchemy__sqlalchemy | test/orm/test_merge.py | {
"start": 1405,
"end": 53934
} | class ____(_fixtures.FixtureTest):
"""Session.merge() functionality"""
run_inserts = None
def load_tracker(self, cls, canary=None):
if canary is None:
def canary(instance, *args):
canary.called += 1
canary.called = 0
event.listen(cls, "load", canary)
return canary
def test_loader_options(self):
User, Address, addresses, users = (
self.classes.User,
self.classes.Address,
self.tables.addresses,
self.tables.users,
)
self.mapper(
User,
users,
properties={"addresses": relationship(Address, backref="user")},
)
self.mapper(Address, addresses)
s = fixture_session()
u = User(
id=7,
name="fred",
addresses=[Address(id=1, email_address="jack@bean.com")],
)
s.add(u)
s.commit()
s.close()
u = User(id=7, name="fred")
u2 = s.merge(u, options=[selectinload(User.addresses)])
eq_(len(u2.__dict__["addresses"]), 1)
def test_transient_to_pending(self):
User, users = self.classes.User, self.tables.users
self.mapper_registry.map_imperatively(User, users)
sess = fixture_session()
load = self.load_tracker(User)
u = User(id=7, name="fred")
eq_(load.called, 0)
u2 = sess.merge(u)
eq_(load.called, 1)
assert u2 in sess
eq_(u2, User(id=7, name="fred"))
sess.flush()
sess.expunge_all()
eq_(sess.query(User).first(), User(id=7, name="fred"))
def test_transient_to_pending_no_pk(self):
"""test that a transient object with no PK attribute
doesn't trigger a needless load."""
User, users = self.classes.User, self.tables.users
self.mapper_registry.map_imperatively(User, users)
sess = fixture_session()
u = User(name="fred")
def go():
sess.merge(u)
self.assert_sql_count(testing.db, go, 0)
def test_warn_transient_already_pending_nopk(self):
User, users = self.classes.User, self.tables.users
self.mapper_registry.map_imperatively(User, users)
sess = fixture_session(autoflush=False)
u = User(name="fred")
sess.add(u)
with expect_warnings(
"Instance <User.*> is already pending in this Session yet is "
"being merged again; this is probably not what you want to do"
):
sess.merge(u)
def test_warn_transient_already_pending_pk(self):
User, users = self.classes.User, self.tables.users
self.mapper_registry.map_imperatively(User, users)
sess = fixture_session(autoflush=False)
u = User(id=1, name="fred")
sess.add(u)
with expect_warnings(
"Instance <User.*> is already pending in this Session yet is "
"being merged again; this is probably not what you want to do"
):
sess.merge(u)
def test_transient_to_pending_collection(self):
User, Address, addresses, users = (
self.classes.User,
self.classes.Address,
self.tables.addresses,
self.tables.users,
)
self.mapper_registry.map_imperatively(
User,
users,
properties={
"addresses": relationship(
Address, backref="user", collection_class=set
)
},
)
self.mapper_registry.map_imperatively(Address, addresses)
load = self.load_tracker(User)
self.load_tracker(Address, load)
u = User(
id=7,
name="fred",
addresses={
Address(id=1, email_address="fred1"),
Address(id=2, email_address="fred2"),
},
)
eq_(load.called, 0)
sess = fixture_session()
sess.merge(u)
eq_(load.called, 3)
merged_users = [e for e in sess if isinstance(e, User)]
eq_(len(merged_users), 1)
assert merged_users[0] is not u
sess.flush()
sess.expunge_all()
eq_(
sess.query(User).one(),
User(
id=7,
name="fred",
addresses={
Address(id=1, email_address="fred1"),
Address(id=2, email_address="fred2"),
},
),
)
def test_transient_non_mutated_collection(self):
User, Address, addresses, users = (
self.classes.User,
self.classes.Address,
self.tables.addresses,
self.tables.users,
)
self.mapper_registry.map_imperatively(
User,
users,
properties={"addresses": relationship(Address, backref="user")},
)
self.mapper_registry.map_imperatively(Address, addresses)
s = fixture_session()
u = User(
id=7,
name="fred",
addresses=[Address(id=1, email_address="jack@bean.com")],
)
s.add(u)
s.commit()
s.close()
u = User(id=7, name="fred")
# access address collection to get implicit blank collection
eq_(u.addresses, [])
u2 = s.merge(u)
# collection wasn't emptied
eq_(u2.addresses, [Address()])
def test_transient_to_pending_collection_pk_none(self):
User, Address, addresses, users = (
self.classes.User,
self.classes.Address,
self.tables.addresses,
self.tables.users,
)
self.mapper_registry.map_imperatively(
User,
users,
properties={
"addresses": relationship(
Address, backref="user", collection_class=set
)
},
)
self.mapper_registry.map_imperatively(Address, addresses)
load = self.load_tracker(User)
self.load_tracker(Address, load)
u = User(
id=None,
name="fred",
addresses={
Address(id=None, email_address="fred1"),
Address(id=None, email_address="fred2"),
},
)
eq_(load.called, 0)
sess = fixture_session()
sess.merge(u)
eq_(load.called, 3)
merged_users = [e for e in sess if isinstance(e, User)]
eq_(len(merged_users), 1)
assert merged_users[0] is not u
sess.flush()
sess.expunge_all()
eq_(
sess.query(User).one(),
User(
name="fred",
addresses={
Address(email_address="fred1"),
Address(email_address="fred2"),
},
),
)
def test_transient_to_persistent(self):
User, users = self.classes.User, self.tables.users
self.mapper_registry.map_imperatively(User, users)
load = self.load_tracker(User)
sess = fixture_session()
u = User(id=7, name="fred")
sess.add(u)
sess.flush()
sess.expunge_all()
eq_(load.called, 0)
_u2 = u2 = User(id=7, name="fred jones")
eq_(load.called, 0)
u2 = sess.merge(u2)
assert u2 is not _u2
eq_(load.called, 1)
sess.flush()
sess.expunge_all()
eq_(sess.query(User).first(), User(id=7, name="fred jones"))
eq_(load.called, 2)
def test_transient_to_persistent_collection(self):
User, Address, addresses, users = (
self.classes.User,
self.classes.Address,
self.tables.addresses,
self.tables.users,
)
self.mapper_registry.map_imperatively(
User,
users,
properties={
"addresses": relationship(
Address,
backref="user",
collection_class=set,
cascade="all, delete-orphan",
)
},
)
self.mapper_registry.map_imperatively(Address, addresses)
load = self.load_tracker(User)
self.load_tracker(Address, load)
u = User(
id=7,
name="fred",
addresses={
Address(id=1, email_address="fred1"),
Address(id=2, email_address="fred2"),
},
)
sess = fixture_session()
sess.add(u)
sess.flush()
sess.expunge_all()
eq_(load.called, 0)
u = User(
id=7,
name="fred",
addresses={
Address(id=3, email_address="fred3"),
Address(id=4, email_address="fred4"),
},
)
u = sess.merge(u)
# 1. merges User object. updates into session.
# 2.,3. merges Address ids 3 & 4, saves into session.
# 4.,5. loads pre-existing elements in "addresses" collection,
# marks as deleted, Address ids 1 and 2.
eq_(load.called, 5)
eq_(
u,
User(
id=7,
name="fred",
addresses={
Address(id=3, email_address="fred3"),
Address(id=4, email_address="fred4"),
},
),
)
sess.flush()
sess.expunge_all()
eq_(
sess.query(User).one(),
User(
id=7,
name="fred",
addresses={
Address(id=3, email_address="fred3"),
Address(id=4, email_address="fred4"),
},
),
)
def test_detached_to_persistent_collection(self):
users, Address, addresses, User = (
self.tables.users,
self.classes.Address,
self.tables.addresses,
self.classes.User,
)
self.mapper_registry.map_imperatively(
User,
users,
properties={
"addresses": relationship(
Address,
backref="user",
order_by=addresses.c.id,
collection_class=set,
)
},
)
self.mapper_registry.map_imperatively(Address, addresses)
load = self.load_tracker(User)
self.load_tracker(Address, load)
a = Address(id=1, email_address="fred1")
u = User(
id=7,
name="fred",
addresses={a, Address(id=2, email_address="fred2")},
)
sess = fixture_session()
sess.add(u)
sess.flush()
sess.expunge_all()
u.name = "fred jones"
u.addresses.add(Address(id=3, email_address="fred3"))
u.addresses.remove(a)
eq_(load.called, 0)
u = sess.merge(u)
eq_(load.called, 4)
sess.flush()
sess.expunge_all()
eq_(
sess.query(User).first(),
User(
id=7,
name="fred jones",
addresses={
Address(id=2, email_address="fred2"),
Address(id=3, email_address="fred3"),
},
),
)
def test_unsaved_cascade(self):
"""Merge of a transient entity with two child transient
entities, with a bidirectional relationship."""
users, Address, addresses, User = (
self.tables.users,
self.classes.Address,
self.tables.addresses,
self.classes.User,
)
self.mapper_registry.map_imperatively(
User,
users,
properties={
"addresses": relationship(
self.mapper_registry.map_imperatively(Address, addresses),
cascade="all",
backref="user",
)
},
)
load = self.load_tracker(User)
self.load_tracker(Address, load)
sess = fixture_session()
u = User(id=7, name="fred")
a1 = Address(email_address="foo@bar.com")
a2 = Address(email_address="hoho@bar.com")
u.addresses.append(a1)
u.addresses.append(a2)
u2 = sess.merge(u)
eq_(load.called, 3)
eq_(
u,
User(
id=7,
name="fred",
addresses=[
Address(email_address="foo@bar.com"),
Address(email_address="hoho@bar.com"),
],
),
)
eq_(
u2,
User(
id=7,
name="fred",
addresses=[
Address(email_address="foo@bar.com"),
Address(email_address="hoho@bar.com"),
],
),
)
sess.flush()
sess.expunge_all()
u2 = sess.get(User, 7)
eq_(
u2,
User(
id=7,
name="fred",
addresses=[
Address(email_address="foo@bar.com"),
Address(email_address="hoho@bar.com"),
],
),
)
eq_(load.called, 6)
def test_merge_empty_attributes(self):
User, dingalings = self.classes.User, self.tables.dingalings
self.mapper_registry.map_imperatively(User, dingalings)
sess = fixture_session(autoflush=False)
# merge empty stuff. goes in as NULL.
# not sure what this was originally trying to
# test.
u1 = sess.merge(User(id=1))
sess.flush()
assert u1.data is None
# save another user with "data"
u2 = User(id=2, data="foo")
sess.add(u2)
sess.flush()
# merge User on u2's pk with
# no "data".
# value isn't whacked from the destination
# dict.
u3 = sess.merge(User(id=2))
eq_(u3.__dict__["data"], "foo")
# make a change.
u3.data = "bar"
# merge another no-"data" user.
# attribute maintains modified state.
# (usually autoflush would have happened
# here anyway).
u4 = sess.merge(User(id=2)) # noqa
eq_(u3.__dict__["data"], "bar")
sess.flush()
# and after the flush.
eq_(u3.data, "bar")
# new row.
u5 = User(id=3, data="foo")
sess.add(u5)
sess.flush()
# blow it away from u5, but don't
# mark as expired. so it would just
# be blank.
del u5.data
# the merge adds expiry to the
# attribute so that it loads.
# not sure if I like this - it currently is needed
# for test_pickled:PickleTest.test_instance_deferred_cols
u6 = sess.merge(User(id=3))
assert "data" not in u6.__dict__
assert u6.data == "foo"
# set it to None. this is actually
# a change so gets preserved.
u6.data = None
u7 = sess.merge(User(id=3)) # noqa
assert u6.__dict__["data"] is None
def test_merge_irregular_collection(self):
users, Address, addresses, User = (
self.tables.users,
self.classes.Address,
self.tables.addresses,
self.classes.User,
)
self.mapper_registry.map_imperatively(
User,
users,
properties={
"addresses": relationship(
self.mapper_registry.map_imperatively(Address, addresses),
backref="user",
collection_class=attribute_keyed_dict("email_address"),
)
},
)
u1 = User(id=7, name="fred")
u1.addresses["foo@bar.com"] = Address(email_address="foo@bar.com")
sess = fixture_session()
sess.merge(u1)
sess.flush()
assert list(u1.addresses.keys()) == ["foo@bar.com"]
def test_attribute_cascade(self):
"""Merge of a persistent entity with two child
persistent entities."""
users, Address, addresses, User = (
self.tables.users,
self.classes.Address,
self.tables.addresses,
self.classes.User,
)
self.mapper_registry.map_imperatively(
User,
users,
properties={
"addresses": relationship(
self.mapper_registry.map_imperatively(Address, addresses),
backref="user",
)
},
)
load = self.load_tracker(User)
self.load_tracker(Address, load)
with fixture_session(expire_on_commit=False) as sess, sess.begin():
# set up data and save
u = User(
id=7,
name="fred",
addresses=[
Address(email_address="foo@bar.com"),
Address(email_address="hoho@la.com"),
],
)
sess.add(u)
# assert data was saved
sess2 = fixture_session()
u2 = sess2.get(User, 7)
eq_(
u2,
User(
id=7,
name="fred",
addresses=[
Address(email_address="foo@bar.com"),
Address(email_address="hoho@la.com"),
],
),
)
# make local changes to data
u.name = "fred2"
u.addresses[1].email_address = "hoho@lalala.com"
eq_(load.called, 3)
# new session, merge modified data into session
with fixture_session(expire_on_commit=False) as sess3:
u3 = sess3.merge(u)
eq_(load.called, 6)
# ensure local changes are pending
eq_(
u3,
User(
id=7,
name="fred2",
addresses=[
Address(email_address="foo@bar.com"),
Address(email_address="hoho@lalala.com"),
],
),
)
# save merged data
sess3.commit()
# assert modified/merged data was saved
with fixture_session() as sess:
u = sess.get(User, 7)
eq_(
u,
User(
id=7,
name="fred2",
addresses=[
Address(email_address="foo@bar.com"),
Address(email_address="hoho@lalala.com"),
],
),
)
eq_(load.called, 9)
# merge persistent object into another session
with fixture_session(expire_on_commit=False) as sess4:
u = sess4.merge(u)
assert len(u.addresses)
for a in u.addresses:
assert a.user is u
def go():
sess4.flush()
# no changes; therefore flush should do nothing
self.assert_sql_count(testing.db, go, 0)
sess4.commit()
eq_(load.called, 12)
# test with "dontload" merge
with fixture_session(expire_on_commit=False) as sess5:
u = sess5.merge(u, load=False)
assert len(u.addresses)
for a in u.addresses:
assert a.user is u
def go():
sess5.flush()
# no changes; therefore flush should do nothing
# but also, load=False wipes out any difference in committed state,
# so no flush at all
self.assert_sql_count(testing.db, go, 0)
eq_(load.called, 15)
with fixture_session(expire_on_commit=False) as sess4, sess4.begin():
u = sess4.merge(u, load=False)
# post merge change
u.addresses[1].email_address = "afafds"
def go():
sess4.flush()
# afafds change flushes
self.assert_sql_count(testing.db, go, 1)
eq_(load.called, 18)
with fixture_session(expire_on_commit=False) as sess5:
u2 = sess5.get(User, u.id)
eq_(u2.name, "fred2")
eq_(u2.addresses[1].email_address, "afafds")
eq_(load.called, 21)
def test_dont_send_neverset_to_get(self):
# test issue #3647
CompositePk, composite_pk_table = (
self.classes.CompositePk,
self.tables.composite_pk_table,
)
self.mapper_registry.map_imperatively(CompositePk, composite_pk_table)
cp1 = CompositePk(j=1, k=1)
sess = fixture_session()
rec = []
def go():
rec.append(sess.merge(cp1))
self.assert_sql_count(testing.db, go, 0)
rec[0].i = 5
sess.commit()
eq_(rec[0].i, 5)
def test_dont_send_neverset_to_get_w_relationship(self):
# test issue #3647
CompositePk, composite_pk_table = (
self.classes.CompositePk,
self.tables.composite_pk_table,
)
User, users = (self.classes.User, self.tables.users)
self.mapper_registry.map_imperatively(
User,
users,
properties={
"elements": relationship(
CompositePk,
primaryjoin=users.c.id == foreign(composite_pk_table.c.i),
)
},
)
self.mapper_registry.map_imperatively(CompositePk, composite_pk_table)
u1 = User(id=5, name="some user")
cp1 = CompositePk(j=1, k=1)
u1.elements.append(cp1)
sess = fixture_session()
rec = []
def go():
rec.append(sess.merge(u1))
self.assert_sql_count(testing.db, go, 1)
u2 = rec[0]
sess.commit()
eq_(u2.elements[0].i, 5)
eq_(u2.id, 5)
def test_no_relationship_cascade(self):
"""test that merge doesn't interfere with a relationship()
target that specifically doesn't include 'merge' cascade.
"""
Address, addresses, users, User = (
self.classes.Address,
self.tables.addresses,
self.tables.users,
self.classes.User,
)
self.mapper_registry.map_imperatively(
Address,
addresses,
properties={"user": relationship(User, cascade="save-update")},
)
self.mapper_registry.map_imperatively(User, users)
sess = fixture_session()
u1 = User(name="fred")
a1 = Address(email_address="asdf", user=u1)
sess.add(a1)
sess.flush()
a2 = Address(id=a1.id, email_address="bar", user=User(name="hoho"))
a2 = sess.merge(a2)
sess.flush()
# no expire of the attribute
assert a2.__dict__["user"] is u1
# merge succeeded
eq_(
sess.query(Address).all(), [Address(id=a1.id, email_address="bar")]
)
# didn't touch user
eq_(sess.query(User).all(), [User(name="fred")])
def test_one_to_many_cascade(self):
users, Address, addresses, User = (
self.tables.users,
self.classes.Address,
self.tables.addresses,
self.classes.User,
)
self.mapper_registry.map_imperatively(
User,
users,
properties={
"addresses": relationship(
self.mapper_registry.map_imperatively(Address, addresses)
)
},
)
load = self.load_tracker(User)
self.load_tracker(Address, load)
sess = fixture_session(expire_on_commit=False)
u = User(name="fred")
a1 = Address(email_address="foo@bar")
a2 = Address(email_address="foo@quux")
u.addresses.extend([a1, a2])
sess.add(u)
sess.commit()
eq_(load.called, 0)
sess2 = fixture_session()
u2 = sess2.get(User, u.id)
eq_(load.called, 1)
u.addresses[1].email_address = "addr 2 modified"
sess2.merge(u)
eq_(u2.addresses[1].email_address, "addr 2 modified")
eq_(load.called, 3)
sess3 = fixture_session()
u3 = sess3.get(User, u.id)
eq_(load.called, 4)
u.name = "also fred"
sess3.merge(u)
eq_(load.called, 6)
eq_(u3.name, "also fred")
def test_many_to_one_cascade(self):
Address, addresses, users, User = (
self.classes.Address,
self.tables.addresses,
self.tables.users,
self.classes.User,
)
self.mapper_registry.map_imperatively(
Address, addresses, properties={"user": relationship(User)}
)
self.mapper_registry.map_imperatively(User, users)
u1 = User(id=1, name="u1")
a1 = Address(id=1, email_address="a1", user=u1)
u2 = User(id=2, name="u2")
sess = fixture_session(expire_on_commit=False)
sess.add_all([a1, u2])
sess.commit()
a1.user = u2
with fixture_session(expire_on_commit=False) as sess2:
a2 = sess2.merge(a1)
eq_(attributes.get_history(a2, "user"), ([u2], (), ()))
assert a2 in sess2.dirty
sess.refresh(a1)
with fixture_session(expire_on_commit=False) as sess2:
a2 = sess2.merge(a1, load=False)
eq_(attributes.get_history(a2, "user"), ((), [u1], ()))
assert a2 not in sess2.dirty
def test_many_to_many_cascade(self):
items, Order, orders, order_items, Item = (
self.tables.items,
self.classes.Order,
self.tables.orders,
self.tables.order_items,
self.classes.Item,
)
self.mapper_registry.map_imperatively(
Order,
orders,
properties={
"items": relationship(
self.mapper_registry.map_imperatively(Item, items),
secondary=order_items,
)
},
)
load = self.load_tracker(Order)
self.load_tracker(Item, load)
with fixture_session(expire_on_commit=False) as sess:
i1 = Item()
i1.description = "item 1"
i2 = Item()
i2.description = "item 2"
o = Order()
o.description = "order description"
o.items.append(i1)
o.items.append(i2)
sess.add(o)
sess.commit()
eq_(load.called, 0)
with fixture_session(expire_on_commit=False) as sess2:
o2 = sess2.get(Order, o.id)
eq_(load.called, 1)
o.items[1].description = "item 2 modified"
sess2.merge(o)
eq_(o2.items[1].description, "item 2 modified")
eq_(load.called, 3)
with fixture_session(expire_on_commit=False) as sess3:
o3 = sess3.get(Order, o.id)
eq_(load.called, 4)
o.description = "desc modified"
sess3.merge(o)
eq_(load.called, 6)
eq_(o3.description, "desc modified")
def test_one_to_one_cascade(self):
users, Address, addresses, User = (
self.tables.users,
self.classes.Address,
self.tables.addresses,
self.classes.User,
)
self.mapper_registry.map_imperatively(
User,
users,
properties={
"address": relationship(
self.mapper_registry.map_imperatively(Address, addresses),
uselist=False,
)
},
)
load = self.load_tracker(User)
self.load_tracker(Address, load)
sess = fixture_session(expire_on_commit=False)
u = User()
u.id = 7
u.name = "fred"
a1 = Address()
a1.email_address = "foo@bar.com"
u.address = a1
sess.add(u)
sess.commit()
eq_(load.called, 0)
sess2 = fixture_session()
u2 = sess2.get(User, 7)
eq_(load.called, 1)
u2.name = "fred2"
u2.address.email_address = "hoho@lalala.com"
eq_(load.called, 2)
u3 = sess.merge(u2)
eq_(load.called, 2)
assert u3 is u
def test_value_to_none(self):
users, Address, addresses, User = (
self.tables.users,
self.classes.Address,
self.tables.addresses,
self.classes.User,
)
self.mapper_registry.map_imperatively(
User,
users,
properties={
"address": relationship(
self.mapper_registry.map_imperatively(Address, addresses),
uselist=False,
backref="user",
)
},
)
sess = fixture_session()
u = User(
id=7,
name="fred",
address=Address(id=1, email_address="foo@bar.com"),
)
sess.add(u)
sess.commit()
sess.close()
u2 = User(id=7, name=None, address=None)
u3 = sess.merge(u2)
assert u3.name is None
assert u3.address is None
sess.close()
a1 = Address(id=1, user=None)
a2 = sess.merge(a1)
assert a2.user is None
def test_transient_no_load(self):
users, User = self.tables.users, self.classes.User
self.mapper_registry.map_imperatively(User, users)
sess = fixture_session()
u = User()
assert_raises_message(
sa.exc.InvalidRequestError,
"load=False option does not support",
sess.merge,
u,
load=False,
)
def test_no_load_with_backrefs(self):
"""load=False populates relationships in both
directions without requiring a load"""
users, Address, addresses, User = (
self.tables.users,
self.classes.Address,
self.tables.addresses,
self.classes.User,
)
self.mapper_registry.map_imperatively(
User,
users,
properties={
"addresses": relationship(
self.mapper_registry.map_imperatively(Address, addresses),
backref="user",
)
},
)
u = User(
id=7,
name="fred",
addresses=[
Address(email_address="ad1"),
Address(email_address="ad2"),
],
)
sess = fixture_session()
sess.add(u)
sess.flush()
sess.close()
assert "user" in u.addresses[1].__dict__
sess = fixture_session()
u2 = sess.merge(u, load=False)
assert "user" in u2.addresses[1].__dict__
eq_(u2.addresses[1].user, User(id=7, name="fred"))
sess.expire(u2.addresses[1], ["user"])
assert "user" not in u2.addresses[1].__dict__
sess.close()
sess = fixture_session()
u = sess.merge(u2, load=False)
assert "user" not in u.addresses[1].__dict__
eq_(u.addresses[1].user, User(id=7, name="fred"))
def test_dontload_with_eager(self):
"""
This test illustrates that with load=False, we can't just copy
the committed_state of the merged instance over; since it
references collection objects which themselves are to be merged.
This committed_state would instead need to be piecemeal
'converted' to represent the correct objects. However, at the
moment I'd rather not support this use case; if you are merging
with load=False, you're typically dealing with caching and the
merged objects shouldn't be 'dirty'.
"""
users, Address, addresses, User = (
self.tables.users,
self.classes.Address,
self.tables.addresses,
self.classes.User,
)
self.mapper_registry.map_imperatively(
User,
users,
properties={
"addresses": relationship(
self.mapper_registry.map_imperatively(Address, addresses)
)
},
)
with fixture_session(expire_on_commit=False) as sess:
u = User()
u.id = 7
u.name = "fred"
a1 = Address()
a1.email_address = "foo@bar.com"
u.addresses.append(a1)
sess.add(u)
sess.commit()
sess2 = fixture_session()
u2 = sess2.get(User, 7, options=[sa.orm.joinedload(User.addresses)])
sess3 = fixture_session()
u3 = sess3.merge(u2, load=False) # noqa
def go():
sess3.flush()
self.assert_sql_count(testing.db, go, 0)
def test_no_load_disallows_dirty(self):
"""load=False doesn't support 'dirty' objects right now
(see test_no_load_with_eager()). Therefore lets assert it.
"""
users, User = self.tables.users, self.classes.User
self.mapper_registry.map_imperatively(User, users)
with fixture_session(expire_on_commit=False) as sess:
u = User()
u.id = 7
u.name = "fred"
sess.add(u)
sess.commit()
u.name = "ed"
sess2 = fixture_session()
try:
sess2.merge(u, load=False)
assert False
except sa.exc.InvalidRequestError as e:
assert (
"merge() with load=False option does not support "
"objects marked as 'dirty'. flush() all changes on "
"mapped instances before merging with load=False." in str(e)
)
u2 = sess2.get(User, 7)
sess3 = fixture_session()
u3 = sess3.merge(u2, load=False) # noqa
assert not sess3.dirty
def go():
sess3.flush()
self.assert_sql_count(testing.db, go, 0)
def test_no_load_sets_backrefs(self):
users, Address, addresses, User = (
self.tables.users,
self.classes.Address,
self.tables.addresses,
self.classes.User,
)
self.mapper_registry.map_imperatively(
User,
users,
properties={
"addresses": relationship(
self.mapper_registry.map_imperatively(Address, addresses),
backref="user",
)
},
)
sess = fixture_session()
u = User()
u.id = 7
u.name = "fred"
a1 = Address()
a1.email_address = "foo@bar.com"
u.addresses.append(a1)
sess.add(u)
sess.flush()
assert u.addresses[0].user is u
sess2 = fixture_session()
u2 = sess2.merge(u, load=False)
assert not sess2.dirty
def go():
assert u2.addresses[0].user is u2
self.assert_sql_count(testing.db, go, 0)
def test_no_load_preserves_parents(self):
"""Merge with load=False does not trigger a 'delete-orphan'
operation.
merge with load=False sets attributes without using events.
this means the 'hasparent' flag is not propagated to the newly
merged instance. in fact this works out OK, because the
'_state.parents' collection on the newly merged instance is
empty; since the mapper doesn't see an active 'False' setting in
this collection when _is_orphan() is called, it does not count
as an orphan (i.e. this is the 'optimistic' logic in
mapper._is_orphan().)
"""
users, Address, addresses, User = (
self.tables.users,
self.classes.Address,
self.tables.addresses,
self.classes.User,
)
self.mapper_registry.map_imperatively(
User,
users,
properties={
"addresses": relationship(
self.mapper_registry.map_imperatively(Address, addresses),
backref="user",
cascade="all, delete-orphan",
)
},
)
with fixture_session(expire_on_commit=False) as sess:
u = User()
u.id = 7
u.name = "fred"
a1 = Address()
a1.email_address = "foo@bar.com"
u.addresses.append(a1)
sess.add(u)
sess.commit()
assert u.addresses[0].user is u
with fixture_session(expire_on_commit=False) as sess2:
u2 = sess2.merge(u, load=False)
assert not sess2.dirty
a2 = u2.addresses[0]
a2.email_address = "somenewaddress"
assert not sa.orm.object_mapper(a2)._is_orphan(
sa.orm.attributes.instance_state(a2)
)
sess2.commit()
with fixture_session() as sess2:
eq_(
sess2.get(User, u2.id).addresses[0].email_address,
"somenewaddress",
)
# this use case is not supported; this is with a pending Address
# on the pre-merged object, and we currently don't support
# 'dirty' objects being merged with load=False. in this case,
# the empty '_state.parents' collection would be an issue, since
# the optimistic flag is False in _is_orphan() for pending
# instances. so if we start supporting 'dirty' with load=False,
# this test will need to pass
sess2 = fixture_session()
sess = fixture_session()
u = sess.get(User, 7)
u.addresses.append(Address())
sess2 = fixture_session()
try:
u2 = sess2.merge(u, load=False)
assert False
# if load=False is changed to support dirty objects, this code
# needs to pass
a2 = u2.addresses[0]
a2.email_address = "somenewaddress"
assert not sa.orm.object_mapper(a2)._is_orphan(
sa.orm.attributes.instance_state(a2)
)
sess2.flush()
sess2.expunge_all()
eq_(
sess2.get(User, u2.id).addresses[0].email_address,
"somenewaddress",
)
except sa.exc.InvalidRequestError as e:
assert "load=False option does not support" in str(e)
@testing.variation("viewonly", ["viewonly", "normal"])
@testing.variation("load", ["load", "noload"])
@testing.variation("lazy", ["select", "raise", "raise_on_sql"])
@testing.variation(
"merge_persistent", ["merge_persistent", "merge_detached"]
)
@testing.variation("detach_original", ["detach", "persistent"])
@testing.variation("direction", ["o2m", "m2o"])
def test_relationship_population_maintained(
self,
viewonly,
load,
lazy,
merge_persistent,
direction,
detach_original,
):
"""test #8862"""
User, Address = self.classes("User", "Address")
users, addresses = self.tables("users", "addresses")
self.mapper_registry.map_imperatively(
User,
users,
properties={
"addresses": relationship(
Address,
viewonly=viewonly.viewonly,
lazy=lazy.name,
back_populates="user",
order_by=addresses.c.id,
)
},
)
self.mapper_registry.map_imperatively(
Address,
addresses,
properties={
"user": relationship(
User,
viewonly=viewonly.viewonly,
lazy=lazy.name,
back_populates="addresses",
)
},
)
s = fixture_session()
u1 = User(id=1, name="u1")
s.add(u1)
s.flush()
s.add_all(
[Address(user_id=1, email_address="e%d" % i) for i in range(1, 4)]
)
s.commit()
if direction.o2m:
cls_to_merge = User
obj_to_merge = (
s.scalars(select(User).options(joinedload(User.addresses)))
.unique()
.one()
)
attrname = "addresses"
elif direction.m2o:
cls_to_merge = Address
obj_to_merge = (
s.scalars(
select(Address)
.filter_by(email_address="e1")
.options(joinedload(Address.user))
)
.unique()
.one()
)
attrname = "user"
else:
direction.fail()
assert attrname in obj_to_merge.__dict__
s2 = Session(testing.db)
if merge_persistent.merge_persistent:
target_persistent = s2.get(cls_to_merge, obj_to_merge.id) # noqa
if detach_original.detach:
s.expunge(obj_to_merge)
with self.sql_execution_asserter(testing.db) as assert_:
merged_object = s2.merge(obj_to_merge, load=load.load)
assert_.assert_(
CountStatements(
0
if load.noload
else 1 if merge_persistent.merge_persistent else 2
)
)
assert attrname in merged_object.__dict__
with self.sql_execution_asserter(testing.db) as assert_:
if direction.o2m:
eq_(
merged_object.addresses,
[
Address(user_id=1, email_address="e%d" % i)
for i in range(1, 4)
],
)
elif direction.m2o:
eq_(merged_object.user, User(id=1, name="u1"))
assert_.assert_(CountStatements(0))
def test_synonym(self):
users = self.tables.users
class User:
def _getValue(self):
return self._value
def _setValue(self, value):
setattr(self, "_value", value)
value = property(_getValue, _setValue)
self.mapper_registry.map_imperatively(
User, users, properties={"uid": synonym("id")}
)
sess = fixture_session()
u = User()
u.name = "ed"
sess.add(u)
sess.flush()
sess.expunge(u)
sess.merge(u)
def test_cascade_doesnt_blowaway_manytoone(self):
"""a merge test that was fixed by [ticket:1202]"""
User, Address, addresses, users = (
self.classes.User,
self.classes.Address,
self.tables.addresses,
self.tables.users,
)
s = fixture_session(autoflush=True, future=True)
self.mapper_registry.map_imperatively(
User,
users,
properties={
"addresses": relationship(
self.mapper_registry.map_imperatively(Address, addresses),
backref="user",
)
},
)
a1 = Address(user=s.merge(User(id=1, name="ed")), email_address="x")
s.add(a1)
before_id = id(a1.user)
a2 = Address(user=s.merge(User(id=1, name="jack")), email_address="x")
s.add(a2)
after_id = id(a1.user)
other_id = id(a2.user)
eq_(before_id, other_id)
eq_(after_id, other_id)
eq_(before_id, after_id)
eq_(a1.user, a2.user)
def test_cascades_dont_autoflush(self):
User, Address, addresses, users = (
self.classes.User,
self.classes.Address,
self.tables.addresses,
self.tables.users,
)
sess = fixture_session(
autoflush=True,
)
self.mapper_registry.map_imperatively(
User,
users,
properties={
"addresses": relationship(
self.mapper_registry.map_imperatively(Address, addresses),
backref="user",
)
},
)
user = User(
id=8, name="fred", addresses=[Address(email_address="user")]
)
merged_user = sess.merge(user)
assert merged_user in sess.new
sess.flush()
assert merged_user not in sess.new
def test_cascades_dont_autoflush_2(self):
users, Address, addresses, User = (
self.tables.users,
self.classes.Address,
self.tables.addresses,
self.classes.User,
)
self.mapper_registry.map_imperatively(
User,
users,
properties={
"addresses": relationship(
Address, backref="user", cascade="all, delete-orphan"
)
},
)
self.mapper_registry.map_imperatively(Address, addresses)
u = User(
id=7, name="fred", addresses=[Address(id=1, email_address="fred1")]
)
sess = fixture_session(
autoflush=True,
)
sess.add(u)
sess.commit()
sess.expunge_all()
u = User(
id=7,
name="fred",
addresses=[
Address(id=1, email_address="fred1"),
Address(id=2, email_address="fred2"),
],
)
sess.merge(u)
assert sess.autoflush
sess.commit()
def test_dont_expire_pending(self):
"""test that pending instances aren't expired during a merge."""
users, User = self.tables.users, self.classes.User
self.mapper_registry.map_imperatively(User, users)
u = User(id=7)
sess = fixture_session(
autoflush=True,
)
u = sess.merge(u)
assert not bool(attributes.instance_state(u).expired_attributes)
def go():
eq_(u.name, None)
self.assert_sql_count(testing.db, go, 0)
def test_option_state(self):
"""test that the merged takes on the MapperOption characteristics
of that which is merged.
"""
users, User = self.tables.users, self.classes.User
class Option(MapperOption):
propagate_to_loaders = True
opt1, opt2 = Option(), Option()
sess = fixture_session()
umapper = self.mapper_registry.map_imperatively(User, users)
sess.add_all([User(id=1, name="u1"), User(id=2, name="u2")])
sess.commit()
sess2 = fixture_session()
s2_users = sess2.query(User).options(opt2).all()
# test 1. no options are replaced by merge options
sess = fixture_session()
s1_users = sess.query(User).all()
for u in s1_users:
ustate = attributes.instance_state(u)
eq_(ustate.load_path.path, (umapper,))
eq_(ustate.load_options, ())
for u in s2_users:
sess.merge(u)
for u in s1_users:
ustate = attributes.instance_state(u)
eq_(ustate.load_path.path, (umapper,))
eq_(ustate.load_options, (opt2,))
# test 2. present options are replaced by merge options
sess = fixture_session()
s1_users = sess.query(User).options(opt1).all()
for u in s1_users:
ustate = attributes.instance_state(u)
eq_(ustate.load_path.path, (umapper,))
eq_(ustate.load_options, (opt1,))
for u in s2_users:
sess.merge(u)
for u in s1_users:
ustate = attributes.instance_state(u)
eq_(ustate.load_path.path, (umapper,))
eq_(ustate.load_options, (opt2,))
def test_resolve_conflicts_pending_doesnt_interfere_no_ident(self):
User, Address, Order = (
self.classes.User,
self.classes.Address,
self.classes.Order,
)
users, addresses, orders = (
self.tables.users,
self.tables.addresses,
self.tables.orders,
)
self.mapper_registry.map_imperatively(
User, users, properties={"orders": relationship(Order)}
)
self.mapper_registry.map_imperatively(
Order, orders, properties={"address": relationship(Address)}
)
self.mapper_registry.map_imperatively(Address, addresses)
u1 = User(id=7, name="x")
u1.orders = [
Order(description="o1", address=Address(email_address="a")),
Order(description="o2", address=Address(email_address="b")),
Order(description="o3", address=Address(email_address="c")),
]
sess = fixture_session()
sess.merge(u1)
sess.flush()
eq_(
sess.query(Address.email_address)
.order_by(Address.email_address)
.all(),
[("a",), ("b",), ("c",)],
)
def test_resolve_conflicts_pending(self):
User, Address, Order = (
self.classes.User,
self.classes.Address,
self.classes.Order,
)
users, addresses, orders = (
self.tables.users,
self.tables.addresses,
self.tables.orders,
)
self.mapper_registry.map_imperatively(
User, users, properties={"orders": relationship(Order)}
)
self.mapper_registry.map_imperatively(
Order, orders, properties={"address": relationship(Address)}
)
self.mapper_registry.map_imperatively(Address, addresses)
u1 = User(id=7, name="x")
u1.orders = [
Order(description="o1", address=Address(id=1, email_address="a")),
Order(description="o2", address=Address(id=1, email_address="b")),
Order(description="o3", address=Address(id=1, email_address="c")),
]
sess = fixture_session()
sess.merge(u1)
sess.flush()
eq_(sess.query(Address).one(), Address(id=1, email_address="c"))
def test_resolve_conflicts_persistent(self):
User, Address, Order = (
self.classes.User,
self.classes.Address,
self.classes.Order,
)
users, addresses, orders = (
self.tables.users,
self.tables.addresses,
self.tables.orders,
)
self.mapper_registry.map_imperatively(
User, users, properties={"orders": relationship(Order)}
)
self.mapper_registry.map_imperatively(
Order, orders, properties={"address": relationship(Address)}
)
self.mapper_registry.map_imperatively(Address, addresses)
sess = fixture_session()
sess.add(Address(id=1, email_address="z"))
sess.commit()
u1 = User(id=7, name="x")
u1.orders = [
Order(description="o1", address=Address(id=1, email_address="a")),
Order(description="o2", address=Address(id=1, email_address="b")),
Order(description="o3", address=Address(id=1, email_address="c")),
]
sess = fixture_session()
sess.merge(u1)
sess.flush()
eq_(sess.query(Address).one(), Address(id=1, email_address="c"))
def test_merge_all(self):
User, users = self.classes.User, self.tables.users
self.mapper_registry.map_imperatively(User, users)
sess = fixture_session()
load = self.load_tracker(User)
ua = User(id=42, name="bob")
ub = User(id=7, name="fred")
eq_(load.called, 0)
uam, ubm = sess.merge_all([ua, ub])
eq_(load.called, 2)
assert uam in sess
assert ubm in sess
eq_(uam, User(id=42, name="bob"))
eq_(ubm, User(id=7, name="fred"))
sess.flush()
sess.expunge_all()
eq_(
sess.query(User).order_by("id").all(),
[User(id=7, name="fred"), User(id=42, name="bob")],
)
| MergeTest |
python | huggingface__transformers | src/transformers/models/stablelm/modeling_stablelm.py | {
"start": 20047,
"end": 20408
} | class ____(PreTrainedModel):
config: StableLmConfig
base_model_prefix = "model"
supports_gradient_checkpointing = True
_no_split_modules = ["StableLmDecoderLayer"]
_skip_keys_device_placement = "past_key_values"
_supports_flash_attn = True
_supports_sdpa = True
_can_compile_fullgraph = True
@auto_docstring
| StableLmPreTrainedModel |
python | ray-project__ray | python/ray/dashboard/memory_utils.py | {
"start": 1276,
"end": 1361
} | class ____(Enum):
NODE_ADDRESS = "node"
STACK_TRACE = "stack_trace"
| GroupByType |
python | realpython__materials | python-getter-setter/label.py | {
"start": 0,
"end": 321
} | class ____:
def __init__(self, text, font):
self._text = text
self._font = font
def get_text(self):
return self._text
def set_text(self, value):
self._text = value
def get_font(self):
return self._font
def set_font(self, value):
self._font = value
| Label |
python | getsentry__sentry | tests/sentry/tasks/test_delete_pending_groups.py | {
"start": 426,
"end": 6723
} | class ____(TestCase):
def _count_groups_in_deletion_status_and_valid_date_range(self) -> int:
"""Count groups with deletion statuses in the valid date range."""
return Group.objects.filter(
status__in=[GroupStatus.PENDING_DELETION, GroupStatus.DELETION_IN_PROGRESS],
last_seen__gte=self._days_ago(MAX_LAST_SEEN_DAYS),
last_seen__lte=self._hours_ago(MIN_LAST_SEEN_HOURS),
).count()
def _days_ago(self, days: int) -> datetime:
return timezone.now() - timedelta(days=days)
def _hours_ago(self, hours: int) -> datetime:
return timezone.now() - timedelta(hours=hours)
def test_schedules_only_groups_within_valid_date_range(self) -> None:
"""Test that only groups with last_seen between 24h-90d are scheduled for deletion."""
project = self.create_project()
# Too recent - within 4 hours (should NOT be scheduled)
too_recent = self.create_group(
project=project, status=GroupStatus.PENDING_DELETION, last_seen=self._hours_ago(4)
)
# Valid range - should be scheduled
valid_group = self.create_group(
project=project, status=GroupStatus.PENDING_DELETION, last_seen=self._hours_ago(7)
)
# Too old - over 90 days (should NOT be scheduled)
too_old = self.create_group(
project=project, status=GroupStatus.DELETION_IN_PROGRESS, last_seen=self._days_ago(91)
)
# Wrong status - should NOT be scheduled
wrong_status = self.create_group(
project=project,
status=GroupStatus.UNRESOLVED,
substatus=GroupSubStatus.NEW,
last_seen=self._days_ago(5),
)
with patch(
"sentry.api.helpers.group_index.delete.delete_groups_for_project.apply_async"
) as mock_delete_task:
delete_pending_groups()
# Verify only the valid group was scheduled
mock_delete_task.assert_called_once()
call_kwargs = mock_delete_task.call_args.kwargs["kwargs"]
assert call_kwargs["object_ids"] == [valid_group.id]
assert call_kwargs["project_id"] == project.id
assert self._count_groups_in_deletion_status_and_valid_date_range() != 0
with self.tasks():
delete_pending_groups()
assert self._count_groups_in_deletion_status_and_valid_date_range() == 0
assert list(Group.objects.all().values_list("id", flat=True).order_by("id")) == [
too_recent.id,
too_old.id,
wrong_status.id,
]
@patch("sentry.api.helpers.group_index.delete.delete_groups_for_project.apply_async")
def test_groups_by_project(self, mock_delete_task: MagicMock) -> None:
"""Test that groups are properly grouped by project when scheduling deletion."""
project1 = self.create_project()
project2 = self.create_project()
group1 = self.create_group(
project=project1, status=GroupStatus.PENDING_DELETION, last_seen=self._days_ago(2)
)
group2 = self.create_group(
project=project1, status=GroupStatus.PENDING_DELETION, last_seen=self._days_ago(2)
)
group3 = self.create_group(
project=project2, status=GroupStatus.PENDING_DELETION, last_seen=self._days_ago(2)
)
delete_pending_groups()
assert mock_delete_task.call_count == 2
# Verify both projects got their deletion tasks scheduled
all_calls = mock_delete_task.call_args_list
project_ids = {call.kwargs["kwargs"]["project_id"] for call in all_calls}
assert project_ids == {project1.id, project2.id}
# Verify correct groups are in each call
for call in all_calls:
call_kwargs = call.kwargs["kwargs"]
if call_kwargs["project_id"] == project1.id:
assert set(call_kwargs["object_ids"]) == {group1.id, group2.id}
elif call_kwargs["project_id"] == project2.id:
assert set(call_kwargs["object_ids"]) == {group3.id}
@patch("sentry.api.helpers.group_index.delete.GROUP_CHUNK_SIZE", 10)
@patch("sentry.api.helpers.group_index.delete.delete_groups_for_project.apply_async")
@patch("sentry.tasks.delete_pending_groups.metrics.incr")
def test_chunks_large_batches(
self,
mock_metrics_incr: MagicMock,
mock_delete_task: MagicMock,
) -> None:
"""Test that groups are chunked according to GROUP_CHUNK_SIZE when scheduling deletion."""
GROUP_CHUNK_SIZE = 10
GROUPS_MORE_THAN_CHUNK_SIZE = 5
project = self.create_project()
# Create more groups than GROUP_CHUNK_SIZE (10 in this test)
num_groups = GROUPS_MORE_THAN_CHUNK_SIZE + GROUP_CHUNK_SIZE
for _ in range(num_groups):
self.create_group(
project=project, status=GroupStatus.PENDING_DELETION, last_seen=self._days_ago(2)
)
delete_pending_groups()
# Should be called twice: one chunk of 10 and one of 5
assert mock_delete_task.call_count == 2
# Verify first chunk has GROUP_CHUNK_SIZE groups
first_call_kwargs = mock_delete_task.call_args_list[0].kwargs["kwargs"]
assert len(first_call_kwargs["object_ids"]) == GROUP_CHUNK_SIZE
# Verify second chunk has remaining groups
second_call_kwargs = mock_delete_task.call_args_list[1].kwargs["kwargs"]
assert len(second_call_kwargs["object_ids"]) == GROUPS_MORE_THAN_CHUNK_SIZE
# Assert metrics are called with correct totals
incr_calls = mock_metrics_incr.call_args_list
incr_names = [c.args[0] for c in incr_calls]
assert "delete_pending_groups.groups_scheduled" in incr_names
assert "delete_pending_groups.tasks_scheduled" in incr_names
groups_scheduled_call = next(
c for c in incr_calls if c.args[0] == "delete_pending_groups.groups_scheduled"
)
assert groups_scheduled_call.kwargs["amount"] == num_groups
tasks_scheduled_call = next(
c for c in incr_calls if c.args[0] == "delete_pending_groups.tasks_scheduled"
)
assert tasks_scheduled_call.kwargs["amount"] == 2
| DeletePendingGroupsTest |
python | run-llama__llama_index | llama-index-integrations/readers/llama-index-readers-web/llama_index/readers/web/browserbase_web/base.py | {
"start": 202,
"end": 1518
} | class ____(BaseReader):
"""
BrowserbaseWebReader.
Load pre-rendered web pages using a headless browser hosted on Browserbase.
Depends on `browserbase` package.
Get your API key from https://browserbase.com
"""
def __init__(
self,
api_key: Optional[str] = None,
project_id: Optional[str] = None,
) -> None:
try:
from browserbase import Browserbase
except ImportError:
raise ImportError(
"`browserbase` package not found, please run `pip install browserbase`"
)
self.browserbase = Browserbase(api_key, project_id)
def lazy_load_data(
self,
urls: Sequence[str],
text_content: bool = False,
session_id: Optional[str] = None,
proxy: Optional[bool] = None,
) -> Iterator[Document]:
"""Load pages from URLs."""
pages = self.browserbase.load_urls(urls, text_content, session_id, proxy)
for i, page in enumerate(pages):
yield Document(
text=page,
metadata={
"url": urls[i],
},
)
if __name__ == "__main__":
reader = BrowserbaseWebReader()
logger.info(reader.load_data(urls=["https://example.com"]))
| BrowserbaseWebReader |
python | keras-team__keras | keras/src/losses/losses.py | {
"start": 20479,
"end": 21958
} | class ____(LossFunctionWrapper):
"""Computes the Poisson loss between `y_true` & `y_pred`.
Formula:
```python
loss = y_pred - y_true * log(y_pred)
```
Args:
reduction: Type of reduction to apply to the loss. In almost all cases
this should be `"sum_over_batch_size"`. Supported options are
`"sum"`, `"sum_over_batch_size"`, `"mean"`,
`"mean_with_sample_weight"` or `None`. `"sum"` sums the loss,
`"sum_over_batch_size"` and `"mean"` sum the loss and divide by the
sample size, and `"mean_with_sample_weight"` sums the loss and
divides by the sum of the sample weights. `"none"` and `None`
perform no aggregation. Defaults to `"sum_over_batch_size"`.
name: Optional name for the loss instance.
dtype: The dtype of the loss's computations. Defaults to `None`, which
means using `keras.backend.floatx()`. `keras.backend.floatx()` is a
`"float32"` unless set to different value
(via `keras.backend.set_floatx()`). If a `keras.DTypePolicy` is
provided, then the `compute_dtype` will be utilized.
"""
def __init__(
self, reduction="sum_over_batch_size", name="poisson", dtype=None
):
super().__init__(poisson, name=name, reduction=reduction, dtype=dtype)
def get_config(self):
return Loss.get_config(self)
@keras_export("keras.losses.BinaryCrossentropy")
| Poisson |
python | pennersr__django-allauth | tests/apps/socialaccount/providers/vimeo/tests.py | {
"start": 237,
"end": 1672
} | class ____(OAuthTestsMixin, TestCase):
provider_id = VimeoProvider.id
def get_mocked_response(self):
return [
MockedResponse(
HTTPStatus.OK,
"""
{"generated_in":"0.0137",
"stat":"ok","person":{
"created_on": "2013-04-08 14:24:47",
"id":"17574504",
"is_contact":"0",
"is_plus":"0","is_pro":"0","is_staff":"0","is_subscribed_to":"0",
"username":"user17574504","display_name":"Raymond Penners","location":"",
"url":[""],"bio":"","number_of_contacts":"0","number_of_uploads":"0",
"number_of_likes":"0","number_of_videos":"0",
"number_of_videos_appears_in":"0","number_of_albums":"0",
"number_of_channels":"0","number_of_groups":"0",
"profileurl":"http:\\/\\/vimeo.com\\/user17574504",
"videosurl":"http:\\/\\/vimeo.com\\/user17574504\\/videos",
"portraits":{"portrait":[{"height":"30","width":"30",
"_content":
"http:\\/\\/a.vimeocdn.com\\/images_v6\\/portraits\\/portrait_30_yellow.png"},
{"height":"75","width":"75","_content":
"http:\\/\\/a.vimeocdn.com\\/images_v6\\/portraits\\/portrait_75_yellow.png"},
{"height":"100","width":"100","_content":
"http:\\/\\/a.vimeocdn.com\\/images_v6\\/portraits\\/portrait_100_yellow.png"},
{"height":"300","width":"300","_content":
"http:\\/\\/a.vimeocdn.com\\/images_v6\\/portraits\\/portrait_300_yellow.png"}]}}}
""",
)
]
def get_expected_to_str(self):
return "user17574504"
| VimeoTests |
python | apache__airflow | providers/amazon/tests/unit/amazon/aws/operators/test_base_aws.py | {
"start": 1233,
"end": 1472
} | class ____(AwsBaseHook):
"""Hook for tests, implements thin-wrapper around s3 client."""
def __init__(self, **kwargs):
kwargs.update({"client_type": "s3", "resource_type": None})
super().__init__(**kwargs)
| FakeS3Hook |
python | kamyu104__LeetCode-Solutions | Python/detonate-the-maximum-bombs.py | {
"start": 68,
"end": 1104
} | class ____(object):
def maximumDetonation(self, bombs):
"""
:type bombs: List[List[int]]
:rtype: int
"""
adj = [[] for _ in xrange(len(bombs))]
for i, (xi, yi, ri) in enumerate(bombs):
for j, (xj, yj, _) in enumerate(bombs):
if j == i:
continue
if (xi-xj)**2+(yi-yj)**2 <= ri**2:
adj[i].append(j)
result = 0
for i in xrange(len(bombs)):
q = [i]
lookup = {i}
while q:
new_q = []
for u in q:
for v in adj[u]:
if v in lookup:
continue
lookup.add(v)
new_q.append(v)
q = new_q
result = max(result, len(lookup))
if result == len(bombs):
break
return result
# Time: O(|V|^2 + |V| * |E|)
# Space: O(|V| + |E|)
# dfs solution
| Solution |
python | walkccc__LeetCode | solutions/3304. Find the K-th Character in String Game I/3304.py | {
"start": 0,
"end": 112
} | class ____:
def kthCharacter(self, k: int) -> str:
return string.ascii_lowercase[(k - 1).bit_count()]
| Solution |
python | davidhalter__jedi | test/completion/decorators.py | {
"start": 1504,
"end": 1881
} | class ____():
class_var = 3
@Decorator
def func_without_self(arg, arg2):
return arg, arg2
@Decorator
def func_with_self(self, arg):
return self.class_var
#? int()
MethodDecoratorAsClass().func_without_self('')[0]
#? str()
MethodDecoratorAsClass().func_without_self('')[1]
#?
MethodDecoratorAsClass().func_with_self(1)
| MethodDecoratorAsClass |
python | PyCQA__isort | isort/exceptions.py | {
"start": 1227,
"end": 1603
} | class ____(ISortError):
"""Raised when isort has introduced a syntax error in the process of sorting imports"""
def __init__(self, file_path: str):
super().__init__(
f"isort introduced syntax errors when attempting to sort the imports contained within "
f"{file_path}."
)
self.file_path = file_path
| IntroducedSyntaxErrors |
python | cython__cython | Cython/Debugger/libcython.py | {
"start": 19369,
"end": 19873
} | class ____(gdb.Parameter):
"""
Base class for cython parameters
"""
def __init__(self, name, command_class, parameter_class, default=None):
self.show_doc = self.set_doc = self.__class__.__doc__
super().__init__(name, command_class,
parameter_class)
if default is not None:
self.value = default
def __bool__(self):
return bool(self.value)
__nonzero__ = __bool__ # Python 2
| CythonParameter |
python | dagster-io__dagster | python_modules/automation/automation_tests/dagster_docs_tests/test_python_ast_rule.py | {
"start": 18941,
"end": 24056
} | class ____:
"""Test edge cases and error scenarios."""
def test_code_block_with_unicode(self):
"""Test code blocks containing Unicode characters."""
docstring = """
Function with Unicode in code.
.. code-block:: python
text = "Hello, 世界!"
print(text)
"""
result = validate_docstring_text(docstring, "test.symbol")
assert result.is_valid()
def test_code_block_with_f_strings(self):
"""Test code blocks with f-string syntax."""
docstring = """
Function with f-strings.
.. code-block:: python
name = "Alice"
greeting = f"Hello, {name}!"
print(f"The greeting is: {greeting}")
"""
result = validate_docstring_text(docstring, "test.symbol")
assert result.is_valid()
def test_code_block_with_type_hints(self):
"""Test code blocks with type hints."""
docstring = """
Function with type hints.
.. code-block:: python
from typing import List, Optional
def process_items(items: List[str]) -> Optional[str]:
if not items:
return None
return items[0].upper()
"""
result = validate_docstring_text(docstring, "test.symbol")
assert result.is_valid()
def test_code_block_with_decorators(self):
"""Test code blocks with decorators."""
docstring = """
Function with decorators.
.. code-block:: python
@property
@lru_cache(maxsize=128)
def expensive_property(self) -> str:
return self._compute_value()
"""
result = validate_docstring_text(docstring, "test.symbol")
assert result.is_valid()
def test_very_long_code_block(self):
"""Test validation of very long code blocks."""
# Generate a long but valid code block
lines = ["def long_function():"]
lines.extend([f" var_{i} = {i}" for i in range(100)])
lines.append(" return sum([" + ", ".join(f"var_{i}" for i in range(100)) + "])")
# Properly indent the code for the docstring
indented_code = "\n".join(f" {line}" if line.strip() else "" for line in lines)
docstring = f"""
Function with long code block.
.. code-block:: python
{indented_code}
"""
result = validate_docstring_text(docstring, "test.symbol")
assert result.is_valid()
def test_deeply_nested_code(self):
"""Test deeply nested but valid code."""
docstring = """
Function with deeply nested code.
.. code-block:: python
for i in range(3):
for j in range(3):
for k in range(3):
if i == j == k:
for m in range(2):
if m > 0:
try:
result = process(i, j, k, m)
if result:
print(result)
except Exception:
continue
"""
result = validate_docstring_text(docstring, "test.symbol")
assert result.is_valid()
def test_code_block_with_triple_quotes(self):
"""Test code blocks containing triple-quoted strings."""
docstring = '''
Function with triple quotes in code.
.. code-block:: python
docstring = """
This is a docstring
with multiple lines.
"""
code = \'\'\'
This is code
with single quotes.
\'\'\'
'''
result = validate_docstring_text(docstring, "test.symbol")
assert result.is_valid()
@pytest.mark.parametrize(
"invalid_syntax",
[
"def func(", # Missing closing paren
"if True\n pass", # Missing colon
"x = 1 +", # Incomplete expression
"def func():\nprint('wrong indent')", # Indentation error
"x = [1, 2,", # Incomplete list
"def func():\n return\n x = 1", # Inconsistent indentation
],
)
def test_various_syntax_errors(self, invalid_syntax):
"""Test detection of various types of syntax errors."""
docstring = f"""
Function with syntax error.
.. code-block:: python
{invalid_syntax}
"""
result = validate_docstring_text(docstring, "test.symbol")
assert not result.is_valid()
assert result.has_errors()
# Should have Python syntax error
python_errors = [e for e in result.errors if "Python code block syntax error" in e]
assert len(python_errors) >= 1
| TestEdgeCasesAndErrorScenarios |
python | encode__django-rest-framework | tests/test_validators.py | {
"start": 36882,
"end": 37064
} | class ____(models.Model):
slug = models.CharField(max_length=100, unique_for_date='published')
published = models.DateTimeField(auto_now_add=True)
| HiddenFieldUniqueForDateModel |
python | huggingface__transformers | src/transformers/models/qwen3_vl/modeling_qwen3_vl.py | {
"start": 15962,
"end": 18326
} | class ____(nn.Module):
def __init__(self, hidden_size, eps: float = 1e-6) -> None:
"""
Qwen3VLTextRMSNorm is equivalent to T5LayerNorm
"""
super().__init__()
self.weight = nn.Parameter(torch.ones(hidden_size))
self.variance_epsilon = eps
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
input_dtype = hidden_states.dtype
hidden_states = hidden_states.to(torch.float32)
variance = hidden_states.pow(2).mean(-1, keepdim=True)
hidden_states = hidden_states * torch.rsqrt(variance + self.variance_epsilon)
return self.weight * hidden_states.to(input_dtype)
def extra_repr(self):
return f"{tuple(self.weight.shape)}, eps={self.variance_epsilon}"
@use_kernel_func_from_hub("rotary_pos_emb")
def apply_rotary_pos_emb(q, k, cos, sin, position_ids=None, unsqueeze_dim=1):
"""Applies Rotary Position Embedding to the query and key tensors.
Args:
q (`torch.Tensor`): The query tensor.
k (`torch.Tensor`): The key tensor.
cos (`torch.Tensor`): The cosine part of the rotary embedding.
sin (`torch.Tensor`): The sine part of the rotary embedding.
position_ids (`torch.Tensor`, *optional*):
Deprecated and unused.
unsqueeze_dim (`int`, *optional*, defaults to 1):
The 'unsqueeze_dim' argument specifies the dimension along which to unsqueeze cos[position_ids] and
sin[position_ids] so that they can be properly broadcasted to the dimensions of q and k. For example, note
that cos[position_ids] and sin[position_ids] have the shape [batch_size, seq_len, head_dim]. Then, if q and
k have the shape [batch_size, heads, seq_len, head_dim], then setting unsqueeze_dim=1 makes
cos[position_ids] and sin[position_ids] broadcastable to the shapes of q and k. Similarly, if q and k have
the shape [batch_size, seq_len, heads, head_dim], then set unsqueeze_dim=2.
Returns:
`tuple(torch.Tensor)` comprising of the query and key tensors rotated using the Rotary Position Embedding.
"""
cos = cos.unsqueeze(unsqueeze_dim)
sin = sin.unsqueeze(unsqueeze_dim)
q_embed = (q * cos) + (rotate_half(q) * sin)
k_embed = (k * cos) + (rotate_half(k) * sin)
return q_embed, k_embed
| Qwen3VLTextRMSNorm |
python | PrefectHQ__prefect | tests/test_flow_engine.py | {
"start": 28087,
"end": 35111
} | class ____:
@pytest.mark.parametrize("interrupt_type", [KeyboardInterrupt, SystemExit])
async def test_interrupt_in_flow_function_crashes_flow(
self, prefect_client, interrupt_type
):
@flow
async def my_flow():
raise interrupt_type()
with pytest.raises(interrupt_type):
await my_flow()
flow_runs = await prefect_client.read_flow_runs()
assert len(flow_runs) == 1
flow_run = flow_runs[0]
assert flow_run.state.is_crashed()
assert flow_run.state.type == StateType.CRASHED
assert "Execution was aborted" in flow_run.state.message
with pytest.raises(CrashedRun, match="Execution was aborted"):
await flow_run.state.result()
@pytest.mark.parametrize("interrupt_type", [KeyboardInterrupt, SystemExit])
async def test_interrupt_in_flow_function_crashes_flow_sync(
self, prefect_client, interrupt_type
):
@flow
def my_flow():
raise interrupt_type()
with pytest.raises(interrupt_type):
my_flow()
flow_runs = await prefect_client.read_flow_runs()
assert len(flow_runs) == 1
flow_run = flow_runs[0]
assert flow_run.state.is_crashed()
assert flow_run.state.type == StateType.CRASHED
assert "Execution was aborted" in flow_run.state.message
with pytest.raises(CrashedRun, match="Execution was aborted"):
await flow_run.state.result()
@pytest.mark.parametrize("interrupt_type", [KeyboardInterrupt, SystemExit])
async def test_interrupt_in_flow_orchestration_crashes_flow(
self, prefect_client, interrupt_type, monkeypatch
):
monkeypatch.setattr(
FlowRunEngine, "begin_run", MagicMock(side_effect=interrupt_type)
)
@flow
def my_flow():
pass
with pytest.raises(interrupt_type):
my_flow()
flow_runs = await prefect_client.read_flow_runs()
assert len(flow_runs) == 1
flow_run = flow_runs[0]
assert flow_run.state.is_crashed()
assert flow_run.state.type == StateType.CRASHED
assert "Execution was aborted" in flow_run.state.message
with pytest.raises(CrashedRun, match="Execution was aborted"):
await flow_run.state.result()
async def test_base_exception_after_user_code_finishes_does_not_crash_sync(
self, prefect_client, monkeypatch, caplog
):
"""
Test that a BaseException raised after user code finishes executing
does not crash the flow run (sync flow).
"""
@flow
def my_flow():
return 42
# Mock the flow run engine to raise a BaseException after handle_success
original_handle_success = FlowRunEngine.handle_success
def handle_success_with_exception(self, result):
original_handle_success(self, result)
# At this point the flow run state is final (Completed)
raise BaseException("Post-execution error")
monkeypatch.setattr(
FlowRunEngine, "handle_success", handle_success_with_exception
)
# The flow should complete successfully and return the result
result = my_flow()
assert result == 42
flow_runs = await prefect_client.read_flow_runs()
assert len(flow_runs) == 1
flow_run = flow_runs[0]
# The flow run should be completed, not crashed
assert flow_run.state.is_completed()
assert not flow_run.state.is_crashed()
# Verify the debug log message was recorded
assert (
"BaseException was raised after user code finished executing" in caplog.text
)
async def test_base_exception_after_user_code_finishes_does_not_crash_async(
self, prefect_client, monkeypatch, caplog
):
"""
Test that a BaseException raised after user code finishes executing
does not crash the flow run (async flow).
"""
@flow
async def my_flow():
return 42
# Mock the flow run engine to raise a BaseException after handle_success
original_handle_success = AsyncFlowRunEngine.handle_success
async def handle_success_with_exception(self, result):
await original_handle_success(self, result)
# At this point the flow run state is final (Completed)
raise BaseException("Post-execution error")
monkeypatch.setattr(
AsyncFlowRunEngine, "handle_success", handle_success_with_exception
)
# The flow should complete successfully and return the result
result = await my_flow()
assert result == 42
flow_runs = await prefect_client.read_flow_runs()
assert len(flow_runs) == 1
flow_run = flow_runs[0]
# The flow run should be completed, not crashed
assert flow_run.state.is_completed()
assert not flow_run.state.is_crashed()
# Verify the debug log message was recorded
assert (
"BaseException was raised after user code finished executing" in caplog.text
)
async def test_base_exception_before_user_code_finishes_crashes_sync(
self, prefect_client, monkeypatch
):
"""
Test that a BaseException raised before user code finishes executing
still crashes the flow run (sync flow).
"""
@flow
def my_flow():
return 42
# Mock the flow run engine to raise a BaseException during begin_run
monkeypatch.setattr(
FlowRunEngine,
"begin_run",
MagicMock(side_effect=BaseException("Pre-execution error")),
)
with pytest.raises(BaseException, match="Pre-execution error"):
my_flow()
flow_runs = await prefect_client.read_flow_runs()
assert len(flow_runs) == 1
flow_run = flow_runs[0]
# The flow run should be crashed
assert flow_run.state.is_crashed()
async def test_base_exception_before_user_code_finishes_crashes_async(
self, prefect_client, monkeypatch
):
"""
Test that a BaseException raised before user code finishes executing
still crashes the flow run (async flow).
"""
@flow
async def my_flow():
return 42
# Mock the flow run engine to raise a BaseException during begin_run
async def begin_run_with_exception(self):
raise BaseException("Pre-execution error")
monkeypatch.setattr(AsyncFlowRunEngine, "begin_run", begin_run_with_exception)
with pytest.raises(BaseException, match="Pre-execution error"):
await my_flow()
flow_runs = await prefect_client.read_flow_runs()
assert len(flow_runs) == 1
flow_run = flow_runs[0]
# The flow run should be crashed
assert flow_run.state.is_crashed()
| TestFlowCrashDetection |
python | sympy__sympy | sympy/physics/control/lti.py | {
"start": 53828,
"end": 61768
} | class ____(TransferFunctionBase):
r"""
A class for representing LTI (Linear, time-invariant) systems that can be
strictly described by ratio of polynomials in the Laplace transform complex variable. The arguments
are ``num``, ``den``, and ``var``, where ``num`` and ``den`` are numerator and
denominator polynomials of the ``TransferFunction`` respectively, and the third argument is
a complex variable of the Laplace transform used by these polynomials of the transfer function.
``num`` and ``den`` can be either polynomials or numbers, whereas ``var``
has to be a :py:class:`~.Symbol`.
See :class:`TransferFunctionBase` for more information.
Parameters
==========
num : Expr, Number
The numerator polynomial of the transfer function.
den : Expr, Number
The denominator polynomial of the transfer function.
var : Symbol
Complex variable of the Laplace transform used by the
polynomials of the transfer function.
Raises
======
TypeError
When ``var`` is not a Symbol or when ``num`` or ``den`` is not a
number or a polynomial.
ValueError
When ``den`` is zero.
Examples
========
>>> from sympy.abc import s, p, a
>>> from sympy.physics.control.lti import TransferFunction
>>> tf1 = TransferFunction(s + a, s**2 + s + 1, s)
>>> tf1
TransferFunction(a + s, s**2 + s + 1, s)
>>> tf1.num
a + s
>>> tf1.den
s**2 + s + 1
>>> tf1.var
s
>>> tf1.args
(a + s, s**2 + s + 1, s)
Any complex variable can be used for ``var``.
>>> tf2 = TransferFunction(a*p**3 - a*p**2 + s*p, p + a**2, p)
>>> tf2
TransferFunction(a*p**3 - a*p**2 + p*s, a**2 + p, p)
>>> tf3 = TransferFunction((p + 3)*(p - 1), (p - 1)*(p + 5), p)
>>> tf3
TransferFunction((p - 1)*(p + 3), (p - 1)*(p + 5), p)
To negate a transfer function the ``-`` operator can be prepended:
>>> tf4 = TransferFunction(-a + s, p**2 + s, p)
>>> -tf4
TransferFunction(a - s, p**2 + s, p)
>>> tf5 = TransferFunction(s**4 - 2*s**3 + 5*s + 4, s + 4, s)
>>> -tf5
TransferFunction(-s**4 + 2*s**3 - 5*s - 4, s + 4, s)
You can use a float or an integer (or other constants) as numerator and denominator:
>>> tf6 = TransferFunction(1/2, 4, s)
>>> tf6.num
0.500000000000000
>>> tf6.den
4
>>> tf6.var
s
>>> tf6.args
(0.5, 4, s)
You can take the integer power of a transfer function using the ``**`` operator:
>>> tf7 = TransferFunction(s + a, s - a, s)
>>> tf7**3
TransferFunction((a + s)**3, (-a + s)**3, s)
>>> tf7**0
TransferFunction(1, 1, s)
>>> tf8 = TransferFunction(p + 4, p - 3, p)
>>> tf8**-1
TransferFunction(p - 3, p + 4, p)
Addition, subtraction, and multiplication of transfer functions can form
unevaluated ``Series`` or ``Parallel`` objects.
>>> tf9 = TransferFunction(s + 1, s**2 + s + 1, s)
>>> tf10 = TransferFunction(s - p, s + 3, s)
>>> tf11 = TransferFunction(4*s**2 + 2*s - 4, s - 1, s)
>>> tf12 = TransferFunction(1 - s, s**2 + 4, s)
>>> tf9 + tf10
Parallel(TransferFunction(s + 1, s**2 + s + 1, s), TransferFunction(-p + s, s + 3, s))
>>> tf10 - tf11
Parallel(TransferFunction(-p + s, s + 3, s), TransferFunction(-4*s**2 - 2*s + 4, s - 1, s))
>>> tf9 * tf10
Series(TransferFunction(s + 1, s**2 + s + 1, s), TransferFunction(-p + s, s + 3, s))
>>> tf10 - (tf9 + tf12)
Parallel(TransferFunction(-p + s, s + 3, s), TransferFunction(-s - 1, s**2 + s + 1, s), TransferFunction(s - 1, s**2 + 4, s))
>>> tf10 - (tf9 * tf12)
Parallel(TransferFunction(-p + s, s + 3, s), Series(TransferFunction(-1, 1, s), TransferFunction(s + 1, s**2 + s + 1, s), TransferFunction(1 - s, s**2 + 4, s)))
>>> tf11 * tf10 * tf9
Series(TransferFunction(4*s**2 + 2*s - 4, s - 1, s), TransferFunction(-p + s, s + 3, s), TransferFunction(s + 1, s**2 + s + 1, s))
>>> tf9 * tf11 + tf10 * tf12
Parallel(Series(TransferFunction(s + 1, s**2 + s + 1, s), TransferFunction(4*s**2 + 2*s - 4, s - 1, s)), Series(TransferFunction(-p + s, s + 3, s), TransferFunction(1 - s, s**2 + 4, s)))
>>> (tf9 + tf12) * (tf10 + tf11)
Series(Parallel(TransferFunction(s + 1, s**2 + s + 1, s), TransferFunction(1 - s, s**2 + 4, s)), Parallel(TransferFunction(-p + s, s + 3, s), TransferFunction(4*s**2 + 2*s - 4, s - 1, s)))
These unevaluated ``Series`` or ``Parallel`` objects can convert into the
resultant transfer function using ``.doit()`` method or by ``.rewrite(TransferFunction)``.
>>> ((tf9 + tf10) * tf12).doit()
TransferFunction((1 - s)*((-p + s)*(s**2 + s + 1) + (s + 1)*(s + 3)), (s + 3)*(s**2 + 4)*(s**2 + s + 1), s)
>>> (tf9 * tf10 - tf11 * tf12).rewrite(TransferFunction)
TransferFunction(-(1 - s)*(s + 3)*(s**2 + s + 1)*(4*s**2 + 2*s - 4) + (-p + s)*(s - 1)*(s + 1)*(s**2 + 4), (s - 1)*(s + 3)*(s**2 + 4)*(s**2 + s + 1), s)
See Also
========
TransferFunctionBase, DiscreteTransferFunction, Feedback, Series, Parallel
"""
def __new__(cls, num, den, var):
return super(TransferFunction, cls).__new__(cls, num, den, var)
@classmethod
def from_rational_expression(cls, expr, var=None):
r"""
See :func:`TransferFunctionBase.from_rational_expression`.
"""
return super().from_rational_expression(expr, var)
@classmethod
def from_coeff_lists(cls, num_list, den_list, var):
r"""
See :func:`TransferFunctionBase.from_coeff_lists`.
"""
return super().from_coeff_lists(num_list, den_list, var)
@classmethod
def from_zpk(cls, zeros, poles, gain, var):
r"""
See :func:`TransferFunctionBase.from_zpk`.
"""
return super().from_zpk(zeros, poles, gain, var)
def dc_gain(self):
r"""
See :func:`TransferFunctionBase.dc_gain`.
"""
m = Mul(self.num, Pow(self.den, -1, evaluate=False), evaluate=False)
return limit(m, self.var, 0)
def get_asymptotic_stability_conditions(
self, cancel_poles_zeros=False, fast=False
) -> list[Boolean]:
r"""
See :func:`TransferFunctionBase.get_asymptotic_stability_conditions`.
"""
standard_form = self.to_standard_form(cancel_poles_zeros)
domain = EXRAW if fast else None
p = Poly(standard_form.den, self.var, domain = domain)
return [c > 0 for c in p.hurwitz_conditions()]
def _eval_rewrite_as_StateSpace(self, *args):
"""
Returns the equivalent space model of the transfer function model.
The state space model will be returned in the controllable canonical
form.
Unlike the space state to transfer function model conversion, the
transfer function to state space model conversion is not unique.
There can be multiple state space representations of a given transfer
function model.
Examples
========
>>> from sympy.abc import s
>>> from sympy.physics.control import TransferFunction, StateSpace
>>> tf = TransferFunction(s**2 + 1, s**3 + 2*s + 10, s)
>>> tf.rewrite(StateSpace)
StateSpace(Matrix([
[ 0, 1, 0],
[ 0, 0, 1],
[-10, -2, 0]]), Matrix([
[0],
[0],
[1]]), Matrix([[1, 0, 1]]), Matrix([[0]]))
"""
A, B, C, D = self._StateSpace_matrices_equivalent()
return StateSpace(A, B, C, D)
def _eval_rewrite_as_DiscreteStateSpace(self, *args):
raise TypeError("""
The continuous transfer function model cannot be rewritten as a
discrete-time state space model.
""")
@property
def sampling_time(self):
"""The sampling time of the transfer function is zero."""
return S.Zero
_is_continuous = True
| TransferFunction |
python | pytorch__pytorch | test/conftest.py | {
"start": 11197,
"end": 13836
} | class ____:
# Modified fromo _pytest/stepwise.py in order to save the currently running
# test instead of the last failed test
def __init__(self, config: Config) -> None:
self.config = config
self.report_status = ""
assert config.cache is not None
self.cache: pytest.Cache = config.cache
directory = f"{STEPCURRENT_CACHE_DIR}/{config.getoption('stepcurrent')}"
self.lastrun_location = f"{directory}/lastrun"
self.lastrun: Optional[str] = self.cache.get(self.lastrun_location, None)
self.initial_val = self.lastrun
self.skip: bool = config.getoption("stepcurrent_skip")
self.run_single: bool = config.getoption("run_single")
self.made_failing_xml_location = f"{directory}/made_failing_xml"
self.cache.set(self.made_failing_xml_location, False)
def pytest_collection_modifyitems(self, config: Config, items: list[Any]) -> None:
if not self.lastrun:
self.report_status = "Cannot find last run test, not skipping"
return
# check all item nodes until we find a match on last run
failed_index = None
for index, item in enumerate(items):
if item.nodeid == self.lastrun:
failed_index = index
if self.skip:
failed_index += 1
break
# If the previously failed test was not found among the test items,
# do not skip any tests.
if failed_index is None:
self.report_status = "previously run test not found, not skipping."
else:
self.report_status = f"skipping {failed_index} already run items."
deselected = items[:failed_index]
del items[:failed_index]
if self.run_single:
self.report_status += f" Running only {items[0].nodeid}"
deselected += items[1:]
del items[1:]
config.hook.pytest_deselected(items=deselected)
def pytest_report_collectionfinish(self) -> Optional[str]:
if self.config.getoption("verbose") >= 0 and self.report_status:
return f"stepcurrent: {self.report_status}"
return None
def pytest_runtest_protocol(self, item, nextitem) -> None:
self.lastrun = item.nodeid
self.cache.set(self.lastrun_location, self.lastrun)
def pytest_sessionfinish(self, session, exitstatus):
if exitstatus == 0:
self.cache.set(self.lastrun_location, self.initial_val)
if exitstatus != 0:
self.cache.set(self.made_failing_xml_location, True)
| StepcurrentPlugin |
python | mlflow__mlflow | mlflow/models/flavor_backend.py | {
"start": 115,
"end": 3490
} | class ____:
"""
Abstract class for Flavor Backend.
This class defines the API interface for local model deployment of MLflow model flavors.
"""
__metaclass__ = ABCMeta
def __init__(self, config, **kwargs):
self._config = config
@abstractmethod
def predict(self, model_uri, input_path, output_path, content_type):
"""
Generate predictions using a saved MLflow model referenced by the given URI.
Input and output are read from and written to a file or stdin / stdout.
Args:
model_uri: URI pointing to the MLflow model to be used for scoring.
input_path: Path to the file with input data. If not specified, data is read from
stdin.
output_path: Path to the file with output predictions. If not specified, data is
written to stdout.
content_type: Specifies the input format. Can be one of {``json``, ``csv``}
"""
@abstractmethod
def serve(
self,
model_uri,
port,
host,
timeout,
enable_mlserver,
synchronous=True,
stdout=None,
stderr=None,
):
"""
Serve the specified MLflow model locally.
Args:
model_uri: URI pointing to the MLflow model to be used for scoring.
port: Port to use for the model deployment.
host: Host to use for the model deployment. Defaults to ``localhost``.
timeout: Timeout in seconds to serve a request. Defaults to 60.
enable_mlserver: Whether to use MLServer or the local scoring server.
synchronous: If True, wait until server process exit and return 0, if process exit
with non-zero return code, raise exception.
If False, return the server process `Popen` instance immediately.
stdout: Redirect server stdout
stderr: Redirect server stderr
"""
def prepare_env(self, model_uri, capture_output=False):
"""
Performs any preparation necessary to predict or serve the model, for example
downloading dependencies or initializing a conda environment. After preparation,
calling predict or serve should be fast.
"""
@abstractmethod
def build_image(
self,
model_uri,
image_name,
install_java=False,
install_mlflow=False,
mlflow_home=None,
enable_mlserver=False,
base_image=None,
): ...
@abstractmethod
def generate_dockerfile(
self,
model_uri,
output_dir,
install_java=False,
install_mlflow=False,
mlflow_home=None,
enable_mlserver=False,
base_image=None,
): ...
@abstractmethod
def can_score_model(self):
"""
Check whether this flavor backend can be deployed in the current environment.
Returns:
True if this flavor backend can be applied in the current environment.
"""
def can_build_image(self):
"""
Returns:
True if this flavor has a `build_image` method defined for building a docker
container capable of serving the model, False otherwise.
"""
return callable(getattr(self.__class__, "build_image", None))
| FlavorBackend |
python | gevent__gevent | src/gevent/tests/test__monkey_ssl_warning3.py | {
"start": 380,
"end": 1330
} | class ____(unittest.TestCase):
@unittest.skipIf(sys.version_info[:2] < (3, 6),
"Only on Python 3.6+")
def test_ssl_subclass_and_module_reference(self):
from gevent import monkey
self.assertFalse(monkey.saved)
with warnings.catch_warnings(record=True) as issued_warnings:
warnings.simplefilter('always')
monkey.patch_all()
monkey.patch_all()
issued_warnings = [x for x in issued_warnings
if isinstance(x.message, monkey.MonkeyPatchWarning)]
self.assertEqual(1, len(issued_warnings))
message = str(issued_warnings[0].message)
self.assertNotIn("Modules that had direct imports", message)
self.assertIn("Subclasses (NOT patched)", message)
# the gevent subclasses should not be in here.
self.assertNotIn('gevent.', message)
if __name__ == '__main__':
unittest.main()
| Test |
python | doocs__leetcode | solution/3300-3399/3386.Button with Longest Push Time/Solution.py | {
"start": 0,
"end": 281
} | class ____:
def buttonWithLongestTime(self, events: List[List[int]]) -> int:
ans, t = events[0]
for (_, t1), (i, t2) in pairwise(events):
d = t2 - t1
if d > t or (d == t and i < ans):
ans, t = i, d
return ans
| Solution |
python | run-llama__llama_index | llama-index-integrations/readers/llama-index-readers-zep/llama_index/readers/zep/base.py | {
"start": 170,
"end": 2531
} | class ____(BaseReader):
"""
Zep document vector store reader.
Args:
api_url (str): Zep API URL
api_key (str): Zep API key, optional
"""
def __init__(self, api_url: str, api_key: Optional[str] = None):
"""Initialize with parameters."""
from zep_python import ZepClient
self._api_url = api_url
self._api_key = api_key
self._client = ZepClient(base_url=api_url, api_key=api_key)
def load_data(
self,
collection_name: str,
query: Optional[str] = None,
vector: Optional[List[float]] = None,
metadata: Optional[Dict[str, Any]] = None,
top_k: Optional[int] = 5,
separate_documents: Optional[bool] = True,
include_values: Optional[bool] = True,
) -> List[Document]:
"""
Load data from Zep.
Args:
collection_name (str): Name of the Zep collection.
query (Optional[str]): Query string. Required if vector is None.
vector (Optional[List[float]]): Query vector. Required if query is None.
metadata (Optional[Dict[str, Any]]): Metadata to filter on.
top_k (Optional[int]): Number of results to return. Defaults to 5.
separate_documents (Optional[bool]): Whether to return separate
documents per retrieved entry. Defaults to True.
include_values (Optional[bool]): Whether to include the embedding in
the response. Defaults to True.
Returns:
List[Document]: A list of documents.
"""
if query is None and vector is None:
raise ValueError("Either query or vector must be specified.")
collection = self._client.document.get_collection(name=collection_name)
response = collection.search(
text=query, embedding=vector, limit=top_k, metadata=metadata
)
documents = [
(
Document(text=d.content, embedding=d.embedding)
if include_values
else Document(text=d.content)
)
for d in response
]
if not separate_documents:
text_list = [d.get_text() for d in documents]
text = "\n\n".join(text_list)
documents = [Document(text=text)]
return documents
| ZepReader |
python | django__django | tests/expressions/tests.py | {
"start": 99595,
"end": 100661
} | class ____(TestCase):
@classmethod
def setUpTestData(cls):
cls.sday = sday = datetime.date(2010, 6, 25)
cls.stime = stime = datetime.datetime(2010, 6, 25, 12, 15, 30, 747000)
cls.ex1 = Experiment.objects.create(
name="Experiment 1",
assigned=sday,
completed=sday + datetime.timedelta(2),
estimated_time=datetime.timedelta(2),
start=stime,
end=stime + datetime.timedelta(2),
)
def test_month_aggregation(self):
self.assertEqual(
Experiment.objects.aggregate(month_count=Count("assigned__month")),
{"month_count": 1},
)
def test_transform_in_values(self):
self.assertSequenceEqual(
Experiment.objects.values("assigned__month"),
[{"assigned__month": 6}],
)
def test_multiple_transforms_in_values(self):
self.assertSequenceEqual(
Experiment.objects.values("end__date__month"),
[{"end__date__month": 6}],
)
| FieldTransformTests |
python | kamyu104__LeetCode-Solutions | Python/long-pressed-name.py | {
"start": 29,
"end": 431
} | class ____(object):
def isLongPressedName(self, name, typed):
"""
:type name: str
:type typed: str
:rtype: bool
"""
i = 0
for j in xrange(len(typed)):
if i < len(name) and name[i] == typed[j]:
i += 1
elif j == 0 or typed[j] != typed[j-1]:
return False
return i == len(name)
| Solution |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/typedDictClosed4.py | {
"start": 1431,
"end": 1670
} | class ____(TypedDict):
name: str
def func2(p1: MovieExtraInt, p2: MovieNotClosed):
# This should generate an error because of a type inconsistency.
extra_int: MovieExtraInt = p2
not_closed: MovieNotClosed = p1
| MovieNotClosed |
python | realpython__materials | python-script-structure/iris_summary.py | {
"start": 838,
"end": 920
} | class ____(StrEnum):
SUMMARY = auto()
METADATA = auto()
@dataclass
| Operation |
python | tensorflow__tensorflow | tensorflow/python/ops/ragged/ragged_batch_gather_op_test.py | {
"start": 1421,
"end": 25376
} | class ____(test_util.TensorFlowTestCase,
parameterized.TestCase):
@parameterized.parameters([
#=========================================================================
# Docstring Example
#=========================================================================
dict(
descr='Docstring example',
params=ragged_factory_ops.constant_value([['a', 'b', 'c'], ['d'], [],
['e']]),
indices=ragged_factory_ops.constant_value([[1, 2, 0], [], [], [0,
0]]),
expected=ragged_factory_ops.constant_value([[b'b', b'c', b'a'], [],
[], [b'e', b'e']])),
#=========================================================================
# 0 Batch Dimensions
#=========================================================================
dict(
descr='params: [P1], indices: [I], result: [I]',
params=['a', 'b', 'c', 'd'],
indices=[3, 2],
expected=[b'd', b'c']),
dict(
descr='params: [P1, (P2)], indices: [I], result: [I, (P2)]',
params=ragged_factory_ops.constant_value([['a', 'b'], [], ['c'],
['d', 'e']]),
indices=[3, 2],
expected=ragged_factory_ops.constant_value([[b'd', b'e'], [b'c']])),
#=========================================================================
# 1 Batch Dimension
#=========================================================================
dict(
descr='params: [B1, P1], indices: [B1, I], result: [B1, I]',
params=[['a', 'b', 'c'], ['d', 'e', 'f'], ['g', 'h', 'i']],
indices=[[2, 0], [0, 1], [1, 0]],
expected=[[b'c', b'a'], [b'd', b'e'], [b'h', b'g']]),
dict(
descr='params: [B1, (P1)], indices: [B1, I], result: [B1, I]',
params=ragged_factory_ops.constant_value([['a', 'b', 'c'], ['d', 'e'],
['g']]),
indices=[[2, 0], [0, 1], [0, 0]],
expected=[[b'c', b'a'], [b'd', b'e'], [b'g', b'g']]),
dict(
descr='params: [B1, P1], indices: [B1, (I)], result: [B1, (I)]',
params=[['a', 'b', 'c'], ['d', 'e', 'f'], ['g', 'h', 'i']],
indices=ragged_factory_ops.constant_value([[2, 0, 2], [0], [1]]),
expected=ragged_factory_ops.constant_value([[b'c', b'a', b'c'],
[b'd'], [b'h']])),
dict(
descr=('params: [B1, (P1), (P2), P3], indices: [B1, I], '
'result: [B1, I, (P2), P3]'),
params=ragged_factory_ops.constant_value(
[[[['a']], [['b'], ['c']]], [[['d'], ['e']], [['f']]], [[['g']]]],
ragged_rank=2),
indices=[[1, 0], [0, 1], [0, 0]],
expected=ragged_factory_ops.constant_value(
[[[[b'b'], [b'c']], [[b'a']]], [[[b'd'], [b'e']], [[b'f']]],
[[[b'g']], [[b'g']]]],
ragged_rank=2)),
#=========================================================================
# 2 Batch Dimensions
#=========================================================================
dict(
descr=('params: [B1, B2, P1], indices: [B1, B2, I], '
'result: [B1, B2, I]'),
params=[[['a', 'b', 'c']], [['d', 'e', 'f']], [['g', 'h', 'i']]],
indices=[[[2, 0]], [[0, 1]], [[1, 0]]],
expected=[[[b'c', b'a']], [[b'd', b'e']], [[b'h', b'g']]]),
dict(
descr=('params: [B1, (B2), P1], indices: [B1, (B2), I], '
'result: [B1, (B2), I]'),
params=ragged_factory_ops.constant_value(
[[['a', 'b', 'c'], ['d', 'e', 'f']], [['g', 'h', 'i']]],
ragged_rank=1),
indices=ragged_factory_ops.constant_value(
[[[2, 0], [0, 1]], [[1, 0]]], ragged_rank=1),
expected=ragged_factory_ops.constant_value(
[[[b'c', b'a'], [b'd', b'e']], [[b'h', b'g']]], ragged_rank=1)),
dict(
descr=('params: [B1, (B2), (P1)], indices: [B1, (B2), I], '
'result: [B1, (B2), I]'),
params=ragged_factory_ops.constant_value(
[[['a', 'b', 'c'], ['d']], [['e', 'f']]], ragged_rank=2),
indices=ragged_factory_ops.constant_value(
[[[2, 0], [0, 0]], [[1, 0]]], ragged_rank=1),
expected=ragged_factory_ops.constant_value(
[[[b'c', b'a'], [b'd', b'd']], [[b'f', b'e']]], ragged_rank=1)),
dict(
descr=('params: [B1, (B2), P1], indices: [B1, (B2), (I)], '
'result: [B1, (B2), (I)]'),
params=ragged_factory_ops.constant_value(
[[['a', 'b', 'c'], ['d', 'e', 'f']], [['g', 'h', 'i']]],
ragged_rank=1),
indices=ragged_factory_ops.constant_value(
[[[2, 1, 0], [0]], [[1, 1]]], ragged_rank=2),
expected=ragged_factory_ops.constant_value(
[[[b'c', b'b', b'a'], [b'd']], [[b'h', b'h']]], ragged_rank=2)),
#=========================================================================
# 3 Batch Dimensions
#=========================================================================
dict(
descr=(
'params: [B1, (B2), (B3), (P1)], indices: [B1, (B2), (B3), I], '
'result: [B1, (B2), (B3), I]'),
params=ragged_factory_ops.constant_value(
[[[['a', 'b', 'c'], ['d']], [['e', 'f']]]], ragged_rank=3),
indices=ragged_factory_ops.constant_value(
[[[[2, 0], [0, 0]], [[1, 0]]]], ragged_rank=2),
expected=ragged_factory_ops.constant_value(
[[[[b'c', b'a'], [b'd', b'd']], [[b'f', b'e']]]], ragged_rank=2)),
])
def testRaggedBatchGather(self, descr, params, indices, expected):
result = ragged_batch_gather_ops.batch_gather(params, indices)
self.assertAllEqual(result, expected)
@parameterized.parameters([
# Docstring example:
dict(
descr='Docstring example',
params=[['a', 'b', 'c'], ['d'], [], ['e']],
indices=[[1, 2, -1], [], [], [0, 10]],
expected=[['b', 'c', 'FOO'], [], [], ['e', 'FOO']],
default_value='FOO',
),
# Dimensions:
# indices: [4]
# params: [2, (d1), (d2)]
dict(
descr='params: [2, (d1), (d2), indices: [4]',
indices=[1, 100, 0, -1],
params=[[['The', 'deal', 'came', 'about', '18', 'months', 'after',
'Yahoo', '!', 'rejected', 'a', '47.5', '-', 'billion', '-',
'dollar', 'takeover', 'offer', 'from', 'Microsoft', '.'],
['Trumpty', 'Dumpty', 'sat', 'on', 'a', 'wall']],
[["It's", 'always', 'darkest', 'before', 'the', 'dawn']]],
expected=[[["It's", 'always', 'darkest', 'before', 'the', 'dawn']],
[['$NONE^']],
[['The', 'deal', 'came', 'about', '18', 'months', 'after',
'Yahoo', '!', 'rejected', 'a', '47.5', '-', 'billion',
'-', 'dollar', 'takeover', 'offer', 'from', 'Microsoft',
'.'],
['Trumpty', 'Dumpty', 'sat', 'on', 'a', 'wall']],
[['$NONE^']]],
),
# Dimensions:
# params: [1, (d1)]
# indices: [3]
dict(
descr='params: rank 2, indices: rank 1',
params=[
['Bruce', 'Wayne'],
],
indices=[-1, 0, 1000],
expected=[['$NONE^'], ['Bruce', 'Wayne'], ['$NONE^']]
),
# Dimensions:
# params: [1, (d1)]
# indices: [1, (d2)]
dict(
descr='Test underbound indices of shape [1, (d2)]',
params=[
['The', 'deal', 'came', 'about', '18', 'months', 'after', 'Yahoo',
'!', 'rejected', 'a', '47.5', '-', 'billion', '-', 'dollar',
'takeover', 'offer', 'from', 'Microsoft', '.'],
],
indices=[[8, -1]],
expected=[['!', '$NONE^']],
),
dict(
descr='Test underbound indices of shape [2, (d2)]',
params=[
['The', 'deal', 'came', 'about', '18', 'months', 'after', 'Yahoo',
'!', 'rejected', 'a', '47.5', '-', 'billion', '-', 'dollar',
'takeover', 'offer', 'from', 'Microsoft', '.'],
['Who', 'let', 'the', 'dogs', 'out', '?'],
],
indices=[[8, -1], [1, 100]],
expected=[['!', '$NONE^'], ['let', '$NONE^']],
),
# Dimensions:
# params: [2, (d1)]
# indices: [2, (d2)]
dict(
descr='Test underbound indices of rank 2',
params=[
['The', 'deal', 'came', 'about', '18', 'months', 'after', 'Yahoo',
'!', 'rejected', 'a', '47.5', '-', 'billion', '-', 'dollar',
'takeover', 'offer', 'from', 'Microsoft', '.'],
['He', 'left', 'us', '.', 'Little', 'boys', 'crowded', 'together',
'on', 'long', 'wooden', 'benches', ',', 'and', 'in', 'the',
'center', 'of', 'the', 'room', 'sat', 'the', 'teacher', '.',
'His', 'black', 'beard', 'dripped', 'down', 'over', 'the',
'front', 'of', 'his', 'coat', '.', 'One', 'white', 'hand',
'poised', 'a', 'stick', 'above', 'his', 'desk', '.', 'He',
'turned', 'his', 'surly', ',', 'half', '-', 'closed', 'eyes',
'toward', 'us', ',', 'stared', 'for', 'a', 'second', ',', 'then',
'shouted', 'in', 'Yiddish', ',', '``', 'One', ',', 'two', ',',
'three', "''", '!', '!', 'Rapping', 'the', 'stick', 'against',
'the', 'desk', '.', 'The', 'little', 'boys', 'shrilled', 'out',
'a', 'Yiddish', 'translation', 'or', 'interpretation', 'of',
'the', 'Five', 'Books', 'of', 'Moses', ',', 'which', 'they',
'had', 'previously', 'chanted', 'in', 'Hebrew', '.']],
indices=[[8, -1], [3, 23, 35, 45, 75, 83, -121]],
expected=[['!', '$NONE^'], ['.', '.', '.', '.', '!', '.', '$NONE^']],
),
dict(
descr='Test overbound indices of rank 2',
params=[
['The', 'deal', 'came', 'about', '18', 'months', 'after', 'Yahoo',
'!', 'rejected', 'a', '47.5', '-', 'billion', '-', 'dollar',
'takeover', 'offer', 'from', 'Microsoft', '.'],
['He', 'left', 'us', '.', 'Little', 'boys', 'crowded', 'together',
'on', 'long', 'wooden', 'benches', ',', 'and', 'in', 'the',
'center', 'of', 'the', 'room', 'sat', 'the', 'teacher', '.',
'His', 'black', 'beard', 'dripped', 'down', 'over', 'the',
'front', 'of', 'his', 'coat', '.', 'One', 'white', 'hand',
'poised', 'a', 'stick', 'above', 'his', 'desk', '.', 'He',
'turned', 'his', 'surly', ',', 'half', '-', 'closed', 'eyes',
'toward', 'us', ',', 'stared', 'for', 'a', 'second', ',', 'then',
'shouted', 'in', 'Yiddish', ',', '``', 'One', ',', 'two', ',',
'three', "''", '!', '!', 'Rapping', 'the', 'stick', 'against',
'the', 'desk', '.', 'The', 'little', 'boys', 'shrilled', 'out',
'a', 'Yiddish', 'translation', 'or', 'interpretation', 'of',
'the', 'Five', 'Books', 'of', 'Moses', ',', 'which', 'they',
'had', 'previously', 'chanted', 'in', 'Hebrew', '.']],
indices=[[8, 8823], [3, 23, 35, 45, 75, 83, 1234]],
expected=[['!', '$NONE^'], ['.', '.', '.', '.', '!', '.', '$NONE^']],
),
# Dimensions:
# params: [2, (d1), 2]
# indices: [2, (d2)]
dict(
descr='params: rank 3, indices: rank 2',
params=[
[['The', 'deal'], ['takeover', 'offer'], ['from', 'Microsoft']],
[['Who', 'let'], ['the', 'dogs'], ['out', '?']],
],
ragged_rank=1,
indices=[[1, -1, 2, 30], [1, 100]],
indices_ragged_rank=1,
expected=[[['takeover', 'offer'],
['$NONE^', '$NONE^'],
['from', 'Microsoft'],
['$NONE^', '$NONE^']],
[['the', 'dogs'],
['$NONE^', '$NONE^']]],
expected_ragged_rank=1,
default_value=['$NONE^', '$NONE^'],
),
# Dimensions:
# params: [2, (d1), (d2)]
# indices: [2, (d3)]
dict(
descr='params: [2, (d1), (d2)], indices: [2, (d3)]',
params=[
[['The', 'deal', 'came', 'about', '18', 'months', 'after',
'Yahoo', '!', 'rejected', 'a', '47.5', '-', 'billion', '-',
'dollar', 'takeover', 'offer', 'from', 'Microsoft', '.'],
['Trumpty', 'Dumpty', 'sat', 'on', 'a', 'wall'],
],
[['It\'s', 'always', 'darkest', 'before', 'the', 'dawn']]
],
indices=[[1, 100], [0, -1]],
expected=[[['Trumpty', 'Dumpty', 'sat', 'on', 'a', 'wall'],
['$NONE^']],
[["It's", 'always', 'darkest', 'before', 'the', 'dawn'],
['$NONE^']]]
),
# Dimensions:
# params: [2, (d1), (d2)]
# indices: [2, (d1), (d3)]
dict(
descr='Test overbound indices of rank 3',
params=[
[['The', 'deal', 'came', 'about', '18', 'months', 'after',
'Yahoo', '!', 'rejected', 'a', '47.5', '-', 'billion', '-',
'dollar', 'takeover', 'offer', 'from', 'Microsoft', '.'],
['Foo', 'bar', 'mar']],
[['He', 'left', 'us', '.', 'Little', 'boys', 'crowded',
'together', 'on', 'long', 'wooden', 'benches', ',', 'and', 'in',
'the', 'center', 'of', 'the', 'room', 'sat', 'the', 'teacher',
'.', 'His', 'black', 'beard', 'dripped', 'down', 'over', 'the',
'front', 'of', 'his', 'coat', '.', 'One', 'white', 'hand',
'poised', 'a', 'stick', 'above', 'his', 'desk', '.', 'He',
'turned', 'his', 'surly', ',', 'half', '-', 'closed', 'eyes',
'toward', 'us', ',', 'stared', 'for', 'a', 'second', ',',
'then', 'shouted', 'in', 'Yiddish', ',', '``', 'One', ',',
'two', ',',
'three', "''", '!', '!', 'Rapping', 'the', 'stick', 'against',
'the', 'desk', '.', 'The', 'little', 'boys', 'shrilled', 'out',
'a', 'Yiddish', 'translation', 'or', 'interpretation', 'of',
'the', 'Five', 'Books', 'of', 'Moses', ',', 'which', 'they',
'had', 'previously', 'chanted', 'in', 'Hebrew', '.'],
['I', 'too', 'was', 'hustled', 'scammed', 'bamboozled', 'hood',
'winked', 'lead', 'astray']]
],
indices=[[[8, 8823], [0, 100]], [[3, 23, 35, 45, 75, 83, 1234], [5]]],
expected=[[['!', '$NONE^'], ['Foo', '$NONE^']],
[['.', '.', '.', '.', '!', '.', '$NONE^'],
['bamboozled']]],
),
# params.shape = [2, (d1), 8]
# indices.shape = [2, (d1), 3]
dict(
descr='params = [2, (2, 1), 8], indices = [2, (2, 1), 3]',
params=[[['h'] * 8, ['w'] * 8], [['b'] * 8]],
ragged_rank=1,
indices=[[[0, 100, 1], [0, 1, 0]], [[1, 0, 0]]],
indices_ragged_rank=1,
expected=[[['h', '$NONE^', 'h'], ['w', 'w', 'w']], [['b', 'b', 'b']]],
expected_ragged_rank=1,
),
])
def testRaggedBatchGatherWithDefault(
self, descr, params, indices, expected, indices_ragged_rank=None,
expected_ragged_rank=None, ragged_rank=None, default_value='$NONE^'):
params = ragged_factory_ops.constant(params, ragged_rank=ragged_rank)
indices = ragged_factory_ops.constant(
indices, ragged_rank=indices_ragged_rank or ragged_rank)
expected = ragged_factory_ops.constant(
expected, ragged_rank=expected_ragged_rank or ragged_rank)
result = ragged_batch_gather_with_default_op.batch_gather_with_default(
params, indices, default_value)
self.assertAllEqual(result, expected)
@parameterized.parameters([
# Dimensions:
# params: dims [2, 5], indices: [2, 2]
dict(
descr='params: dims [2, 5], indices: [2, 2]',
params=[
['The', 'deal', 'came', 'about', '18'],
['He', 'left', 'us', '.', 'Little']],
indices=[[0, -1], [3, 121]],
expected=[['The', '$NONE^'], ['.', '$NONE^']],
default_value='$NONE^',
),
# Dimensions:
# params: dims [2, 2, 5], indices: [2, 2]
dict(
descr='params: dims [2, 2, 5], indices: [2, 2]',
params=[
[['The', 'deal', 'came', 'about', '18'],
['The', 'deal', 'came', 'about', '19'],
],
[['He', 'left', 'us', '.', 'Little'],
['The', 'deal', 'came', 'about', '20'],
]
],
indices=[[0, -1], [0, 121]],
expected=[[['The', 'deal', 'came', 'about', '18'],
['$NONE^', '$NONE^', '$NONE^', '$NONE^', '$NONE^']],
[['He', 'left', 'us', '.', 'Little'],
['$NONE^', '$NONE^', '$NONE^', '$NONE^', '$NONE^']]],
default_value='$NONE^',
),
# Test default_value with shape [5]
dict(
descr='params: dims [2, 2, 5], indices: [2, 2]',
params=[
[['The', 'deal', 'came', 'about', '18'],
['The', 'deal', 'came', 'about', '19'],
],
[['He', 'left', 'us', '.', 'Little'],
['The', 'deal', 'came', 'about', '20'],
]
],
indices=[[0, -1], [0, 121]],
expected=[[['The', 'deal', 'came', 'about', '18'],
[':FOO:', ':FOO:', ':FOO:', ':FOO:', ':FOO:']],
[['He', 'left', 'us', '.', 'Little'],
[':FOO:', ':FOO:', ':FOO:', ':FOO:', ':FOO:']]],
default_value=[':FOO:', ':FOO:', ':FOO:', ':FOO:', ':FOO:'],
),
])
def testRaggedBatchGatherWithDefaultOnTensors(
self, descr, params, indices, expected, default_value):
params = constant_op.constant(params)
indices = constant_op.constant(indices)
expected = constant_op.constant(expected)
result = ragged_batch_gather_with_default_op.batch_gather_with_default(
params, indices, default_value)
self.assertAllEqual(expected, result)
@parameterized.parameters([
dict(
params=[['The', 'deal', 'came', 'about', '18', 'months', 'after',
'Yahoo', '!', 'rejected', 'a', '47.5', '-', 'billion', '-',
'dollar', 'takeover', 'offer', 'from', 'Microsoft', '.']],
indices=[[[8, -1]]],
# Exception here because different errors are thrown in eager vs
# graph mode.
error=Exception,
default_value='$NONE^',
),
])
def testRankMismatch(
self, params, indices, default_value, error):
params = ragged_factory_ops.constant(params)
indices = ragged_factory_ops.constant(indices)
with self.assertRaises(error):
_ = ragged_batch_gather_with_default_op.batch_gather_with_default(
params, indices, default_value)
@parameterized.parameters([
# Dimensions:
# params: [2, (d1), 2]
# indices: [2, (d2)]
# default_value: []
dict(
descr='params: rank 3, indices: rank 2, default: rank = [], but'
' should be [2]',
params=[
[['The', 'deal'], ['takeover', 'offer'], ['from', 'Microsoft']],
[['Who', 'let'], ['the', 'dogs'], ['out', '?']],
],
ragged_rank=1,
indices=[[1, -1, 2, 30], [1, 100]],
indices_ragged_rank=1,
default_value='$NONE^',
error=Exception,
)
])
def testInvalidDefaultValueRank(
self, descr, params, indices, default_value, error, ragged_rank=None,
indices_ragged_rank=None):
params = ragged_factory_ops.constant(params, ragged_rank=ragged_rank)
indices = ragged_factory_ops.constant(
indices, ragged_rank=indices_ragged_rank)
with self.assertRaises(error):
_ = ragged_batch_gather_with_default_op.batch_gather_with_default(
params, indices, default_value)
def testRaggedBatchGatherUnknownRankError(self):
if context.executing_eagerly():
return
params = [['a', 'b'], ['c', 'd']]
indices = array_ops.placeholder(dtypes.int32, shape=None)
ragged_indices = ragged_tensor.RaggedTensor.from_row_splits(
indices, [0, 2, 4])
with self.assertRaisesRegex(
ValueError, r'batch_dims=-1 may only be negative '
r'if rank\(indices\) is statically known.'):
ragged_batch_gather_ops.batch_gather(params, indices)
with self.assertRaisesRegex(
ValueError, r'batch_dims=-1 may only be negative '
r'if rank\(indices\) is statically known.'):
ragged_batch_gather_ops.batch_gather(params, ragged_indices)
@parameterized.parameters(
[
dict(
params=ragged_factory_ops.constant_value([['a'], ['b'], ['c']]),
indices=ragged_factory_ops.constant_value([[0], [0]]),
message=(r'batch shape from indices .* does not match params')),
dict(
params=[[[1, 2], [3, 4]], [[5, 6], [7, 8]]],
indices=ragged_factory_ops.constant_value([[[0, 0], [0, 0, 0]],
[[0]]]),
message='batch shape from indices does not match params shape'),
dict( # rank mismatch
params=ragged_factory_ops.constant_value([[[0, 0], [0, 0, 0]],
[[0]]]),
indices=ragged_factory_ops.constant_value([[[0, 0]], [[0, 0, 0]],
[[0]]]),
error=(ValueError, errors.InvalidArgumentError)),
dict(
params=ragged_factory_ops.constant_value([[[0, 0], [0, 0, 0]],
[[0]], [[0]]]),
indices=ragged_factory_ops.constant_value([[[0, 0]], [[0, 0, 0]],
[[0]]]),
error=(ValueError, errors.InvalidArgumentError),
message=(r'batch shape from indices .* does not match '
r'params shape|dimension size mismatch')),
dict(
params=ragged_factory_ops.constant_value(['a', 'b', 'c']),
indices=ragged_factory_ops.constant_value([[0], [0]]),
message=r'batch_dims must be less than rank\(params\)'),
dict(
params=ragged_factory_ops.constant_value([['a']]),
indices=0,
message='batch_dims=-1 out of bounds: expected 0<=batch_dims<0'),
dict(
params=ragged_factory_ops.constant_value([['a']]),
indices=[[[0]]],
message=r'batch_dims must be less than rank\(params\)'),
])
def testRaggedBatchGatherStaticError(self,
params,
indices,
message=None,
error=ValueError):
with self.assertRaisesRegex(error, message):
ragged_batch_gather_ops.batch_gather(params, indices)
if __name__ == '__main__':
googletest.main()
| RaggedBatchGatherOpTest |
python | apache__airflow | providers/amazon/src/airflow/providers/amazon/aws/operators/appflow.py | {
"start": 13551,
"end": 16147
} | class ____(AppflowBaseOperator):
"""
Execute an AppFlow run after updating the filters to select only a single day.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:AppflowRunDailyOperator`
:param source: The source name (Supported: salesforce)
:param flow_name: The flow name
:param source_field: The field name to apply filters
:param filter_date: The date value (or template) to be used in filters.
:param poll_interval: how often in seconds to check the query status
:param wait_for_completion: whether to wait for the run to end to return
"""
def __init__(
self,
source: str,
flow_name: str,
source_field: str,
filter_date: str,
poll_interval: int = 20,
wait_for_completion: bool = True,
**kwargs,
) -> None:
if not filter_date:
raise ValueError(MANDATORY_FILTER_DATE_MSG.format(entity="AppflowRunDailyOperator"))
if source != "salesforce":
raise ValueError(NOT_SUPPORTED_SOURCE_MSG.format(source=source, entity="AppflowRunDailyOperator"))
super().__init__(
source=source,
flow_name=flow_name,
flow_update=True,
source_field=source_field,
filter_date=filter_date,
poll_interval=poll_interval,
wait_for_completion=wait_for_completion,
**kwargs,
)
def _update_flow(self) -> None:
if not self.filter_date_parsed:
raise ValueError(f"Invalid filter_date argument parser value: {self.filter_date_parsed}")
if not self.source_field:
raise ValueError(f"Invalid source_field argument value: {self.source_field}")
start_filter_date = self.filter_date_parsed - timedelta(milliseconds=1)
end_filter_date = self.filter_date_parsed + timedelta(days=1)
filter_task: TaskTypeDef = {
"taskType": "Filter",
"connectorOperator": {self.connector_type: "BETWEEN"}, # type: ignore
"sourceFields": [self.source_field],
"taskProperties": {
"DATA_TYPE": "datetime",
"LOWER_BOUND": str(datetime_to_epoch_ms(start_filter_date)), # NOT inclusive
"UPPER_BOUND": str(datetime_to_epoch_ms(end_filter_date)), # NOT inclusive
},
}
self.hook.update_flow_filter(
flow_name=self.flow_name, filter_tasks=[filter_task], set_trigger_ondemand=True
)
| AppflowRunDailyOperator |
python | ansible__ansible | test/lib/ansible_test/_internal/commands/integration/cloud/vcenter.py | {
"start": 310,
"end": 833
} | class ____(CloudProvider):
"""VMware vcenter/esx plugin. Sets up cloud resources for tests."""
def __init__(self, args: IntegrationConfig) -> None:
super().__init__(args)
self.uses_config = True
def setup(self) -> None:
"""Setup the cloud resource before delegation and register a cleanup callback."""
super().setup()
if not self._use_static_config():
raise ApplicationError('Configuration file does not exist: %s' % self.config_static_path)
| VcenterProvider |
python | wandb__wandb | wandb/vendor/graphql-core-1.1/wandb_graphql/execution/executors/process.py | {
"start": 211,
"end": 757
} | class ____(object):
def __init__(self):
self.processes = []
self.q = Queue()
def wait_until_finished(self):
for _process in self.processes:
_process.join()
self.q.close()
self.q.join_thread()
def execute(self, fn, *args, **kwargs):
promise = Promise()
self.q.put([promise, fn, args, kwargs], False)
_process = Process(target=queue_process, args=(self.q))
_process.start()
self.processes.append(_process)
return promise
| ProcessExecutor |
python | numpy__numpy | numpy/_core/tests/test_casting_unittests.py | {
"start": 4546,
"end": 5616
} | class ____:
"""
These test cases exercise some behaviour changes
"""
@pytest.mark.parametrize("string", ["S", "U"])
@pytest.mark.parametrize("floating", ["e", "f", "d", "g"])
def test_float_to_string(self, floating, string):
assert np.can_cast(floating, string)
# 100 is long enough to hold any formatted floating
assert np.can_cast(floating, f"{string}100")
def test_to_void(self):
# But in general, we do consider these safe:
assert np.can_cast("d", "V")
assert np.can_cast("S20", "V")
# Do not consider it a safe cast if the void is too smaller:
assert not np.can_cast("d", "V1")
assert not np.can_cast("S20", "V1")
assert not np.can_cast("U1", "V1")
# Structured to unstructured is just like any other:
assert np.can_cast("d,i", "V", casting="same_kind")
# Unstructured void to unstructured is actually no cast at all:
assert np.can_cast("V3", "V", casting="no")
assert np.can_cast("V0", "V", casting="no")
| TestChanges |
python | tensorflow__tensorflow | tensorflow/python/data/experimental/kernel_tests/service/multi_device_test.py | {
"start": 1298,
"end": 3681
} | class ____(data_service_test_base.TestBase, parameterized.TestCase):
def setUp(self):
super(MultiDeviceTest, self).setUp()
self._devices = self.configureDevicesForMultiDeviceTest(2)
@combinations.generate(test_base.default_test_combinations())
def testReadDatasetOnDifferentDevices(self):
cluster = data_service_test_base.TestCluster(num_workers=1)
num_elements = 10
with ops.device(self._devices[0]):
dataset = dataset_ops.Dataset.range(num_elements)
element_spec = dataset.element_spec
dataset_id = data_service_ops.register_dataset(
cluster.dispatcher_address(), dataset)
dataset = data_service_ops.from_dataset_id(
processing_mode=data_service_ops.ShardingPolicy.OFF,
service=cluster.dispatcher_address(),
dataset_id=dataset_id,
element_spec=element_spec)
self.assertDatasetProduces(dataset, list(range(num_elements)))
with ops.device(self._devices[1]):
dataset = data_service_ops.from_dataset_id(
processing_mode=data_service_ops.ShardingPolicy.OFF,
service=cluster.dispatcher_address(),
dataset_id=dataset_id,
element_spec=dataset.element_spec)
self.assertDatasetProduces(dataset, list(range(num_elements)))
@combinations.generate(test_base.default_test_combinations())
def testResourceOnWrongDevice(self):
cluster = data_service_test_base.TestCluster(num_workers=1)
with ops.device(self._devices[0]):
initializer = self.lookupTableInitializer("keyvaluetensor", [10, 11])
table = lookup_ops.StaticHashTable(initializer, -1)
self.evaluate(lookup_ops.tables_initializer())
dataset = dataset_ops.Dataset.range(3)
dataset = dataset.map(table.lookup)
dataset = self.make_distributed_dataset(dataset, cluster)
self.assertDatasetProduces(
dataset, [10, 11, -1], requires_initialization=True)
with ops.device(self._devices[1]):
dataset = dataset_ops.Dataset.range(3)
dataset = dataset.map(table.lookup)
with self.assertRaisesRegex(
errors.FailedPreconditionError,
"Serialization error while trying to register a dataset"):
dataset = self.make_distributed_dataset(dataset, cluster)
self.getDatasetOutput(dataset, requires_initialization=True)
if __name__ == "__main__":
test.main()
| MultiDeviceTest |
python | numpy__numpy | numpy/f2py/tests/test_regression.py | {
"start": 4005,
"end": 4427
} | class ____(util.F2PyTest):
# Check that comments are stripped from F90 continuation lines
sources = [util.getpath("tests", "src", "regression", "f90continuation.f90")]
@pytest.mark.slow
def test_gh26148b(self):
x1 = np.array(3, dtype=np.int32)
x2 = np.array(5, dtype=np.int32)
res = self.module.testsub(x1, x2)
assert res[0] == 8
assert res[1] == 15
| TestF90Contiuation |
python | HypothesisWorks__hypothesis | hypothesis-python/tests/nocover/test_build_signature.py | {
"start": 3203,
"end": 3572
} | class ____:
def __init__(x: int = 0, self: bool = True): # noqa
assert not isinstance(x, int)
x.self = self
def test_build_in_from_type_with_self_named_something_else():
find_any(st.from_type(UnconventionalSignature), lambda x: x.self is True)
find_any(st.from_type(UnconventionalSignature), lambda x: x.self is False)
| UnconventionalSignature |
python | huggingface__transformers | src/transformers/models/edgetam/modeling_edgetam.py | {
"start": 23998,
"end": 28587
} | class ____(nn.Module):
def __init__(self, config: EdgeTamPromptEncoderConfig):
super().__init__()
self.shared_embedding = EdgeTamPositionalEmbedding(config)
self.mask_embed = EdgeTamMaskEmbedding(config)
self.no_mask_embed = nn.Embedding(1, config.hidden_size)
self.image_embedding_size = (config.image_size // config.patch_size, config.image_size // config.patch_size)
self.mask_input_size = (4 * config.image_size // config.patch_size, 4 * config.image_size // config.patch_size)
self.input_image_size = config.image_size
self.point_embed = nn.Embedding(config.num_point_embeddings, config.hidden_size)
self.hidden_size = config.hidden_size
self.not_a_point_embed = nn.Embedding(1, config.hidden_size)
def _embed_points(self, points: torch.Tensor, labels: torch.Tensor, pad: bool) -> torch.Tensor:
"""Embeds point prompts."""
points = points + 0.5 # Shift to center of pixel
if pad:
points = torch.nn.functional.pad(points, (0, 0, 0, 1), mode="constant", value=0)
labels = torch.nn.functional.pad(labels, (0, 1), mode="constant", value=-1)
input_shape = (self.input_image_size, self.input_image_size)
point_embedding = self.shared_embedding(points, input_shape)
# torch.where and expanding the labels tensor is required by the ONNX export
point_embedding = torch.where(labels[..., None] == -1, self.not_a_point_embed.weight, point_embedding)
# This is required for the ONNX export. The dtype, device need to be explicitly
# specified as otherwise torch.onnx.export interprets as double
point_embedding = torch.where(
labels[..., None] != -10,
point_embedding,
torch.zeros_like(point_embedding),
)
# Add point embeddings for labels >= 0
point_embedding = point_embedding + self.point_embed(labels.clamp(min=0)) * (labels >= 0).unsqueeze(-1)
return point_embedding
def _embed_boxes(self, boxes: torch.Tensor) -> torch.Tensor:
"""Embeds box prompts."""
boxes = boxes + 0.5 # Shift to center of pixel
coords = boxes.view(*boxes.shape[:2], 2, 2)
# add padding point for consistency with the original implementation
coords = torch.nn.functional.pad(coords, (0, 0, 0, 1), mode="constant", value=0)
corner_embedding = self.shared_embedding(coords, (self.input_image_size, self.input_image_size))
corner_embedding[:, :, 0, :] += self.point_embed.weight[2]
corner_embedding[:, :, 1, :] += self.point_embed.weight[3]
corner_embedding[:, :, 2, :] = self.not_a_point_embed.weight.expand_as(corner_embedding[:, :, 2, :])
return corner_embedding
def forward(
self,
input_points: Optional[tuple[torch.Tensor, torch.Tensor]],
input_labels: Optional[torch.Tensor],
input_boxes: Optional[torch.Tensor],
input_masks: Optional[torch.Tensor],
) -> tuple[torch.Tensor, torch.Tensor]:
"""
Embeds different types of prompts, returning both sparse and dense embeddings.
Args:
points (`torch.Tensor`, *optional*):
point coordinates and labels to embed.
boxes (`torch.Tensor`, *optional*):
boxes to embed
masks (`torch.Tensor`, *optional*):
masks to embed
"""
sparse_embeddings = None
batch_size = 1
if input_points is not None:
batch_size = input_points.shape[0]
if input_labels is None:
raise ValueError("If points are provided, labels must also be provided.")
point_embeddings = self._embed_points(input_points, input_labels, pad=(input_boxes is None))
sparse_embeddings = point_embeddings
if input_boxes is not None:
batch_size = input_boxes.shape[0]
box_embeddings = self._embed_boxes(input_boxes)
if sparse_embeddings is None:
sparse_embeddings = box_embeddings
else:
sparse_embeddings = torch.cat([sparse_embeddings, box_embeddings], dim=2)
if input_masks is not None:
dense_embeddings = self.mask_embed(input_masks)
else:
dense_embeddings = self.no_mask_embed.weight.reshape(1, -1, 1, 1).expand(
batch_size, -1, self.image_embedding_size[0], self.image_embedding_size[1]
)
return sparse_embeddings, dense_embeddings
| EdgeTamPromptEncoder |
python | huggingface__transformers | src/transformers/models/ernie4_5_moe/modeling_ernie4_5_moe.py | {
"start": 10323,
"end": 13353
} | class ____(nn.Module):
"""Multi-headed attention from 'Attention Is All You Need' paper"""
def __init__(self, config: Ernie4_5_MoeConfig, layer_idx: int):
super().__init__()
self.config = config
self.layer_idx = layer_idx
self.head_dim = getattr(config, "head_dim", config.hidden_size // config.num_attention_heads)
self.num_key_value_groups = config.num_attention_heads // config.num_key_value_heads
self.scaling = self.head_dim**-0.5
self.attention_dropout = 0.0
self.is_causal = True
self.q_proj = nn.Linear(config.hidden_size, config.num_attention_heads * self.head_dim, bias=config.use_bias)
self.k_proj = nn.Linear(config.hidden_size, config.num_key_value_heads * self.head_dim, bias=config.use_bias)
self.v_proj = nn.Linear(config.hidden_size, config.num_key_value_heads * self.head_dim, bias=config.use_bias)
self.o_proj = nn.Linear(config.num_attention_heads * self.head_dim, config.hidden_size, bias=config.use_bias)
self.rotary_fn = apply_rotary_pos_emb
def forward(
self,
hidden_states: torch.Tensor,
position_embeddings: Optional[tuple[torch.Tensor, torch.Tensor]] = None,
attention_mask: Optional[torch.Tensor] = None,
past_key_values: Optional[Cache] = None,
cache_position: Optional[torch.LongTensor] = None,
**kwargs: Unpack[TransformersKwargs],
) -> tuple[torch.Tensor, torch.Tensor]:
input_shape = hidden_states.shape[:-1]
hidden_shape = (*input_shape, -1, self.head_dim)
query_states = self.q_proj(hidden_states).view(hidden_shape).transpose(1, 2)
key_states = self.k_proj(hidden_states).view(hidden_shape).transpose(1, 2)
value_states = self.v_proj(hidden_states).view(hidden_shape).transpose(1, 2)
cos, sin = position_embeddings
query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin)
if past_key_values is not None:
# sin and cos are specific to RoPE models; cache_position needed for the static cache
cache_kwargs = {"sin": sin, "cos": cos, "cache_position": cache_position}
key_states, value_states = past_key_values.update(key_states, value_states, self.layer_idx, cache_kwargs)
attention_interface: Callable = eager_attention_forward
if self.config._attn_implementation != "eager":
attention_interface = ALL_ATTENTION_FUNCTIONS[self.config._attn_implementation]
attn_output, attn_weights = attention_interface(
self,
query_states,
key_states,
value_states,
attention_mask,
dropout=0.0 if not self.training else self.attention_dropout,
scaling=self.scaling,
**kwargs,
)
attn_output = attn_output.reshape(*input_shape, -1).contiguous()
attn_output = self.o_proj(attn_output)
return attn_output, attn_weights
| Ernie4_5_MoeAttention |
python | sympy__sympy | sympy/functions/special/hyper.py | {
"start": 2404,
"end": 3141
} | class ____(DefinedFunction):
""" Base class that takes care of differentiation, when some of
the arguments are actually tuples. """
# This is not deduced automatically since there are Tuples as arguments.
is_commutative = True
def _eval_derivative(self, s):
try:
res = 0
if self.args[0].has(s) or self.args[1].has(s):
for i, p in enumerate(self._diffargs):
m = self._diffargs[i].diff(s)
if m != 0:
res += self.fdiff((1, i))*m
return res + self.fdiff(3)*self.args[2].diff(s)
except (ArgumentIndexError, NotImplementedError):
return Derivative(self, s)
| TupleParametersBase |
python | tensorflow__tensorflow | tensorflow/python/kernel_tests/linalg/linalg_ops_test.py | {
"start": 19390,
"end": 24916
} | class ____(test.TestCase, parameterized.TestCase):
def check_residual(self, matrix, eigvals, eigvectors, atol):
# Test that A*eigvectors is close to eigvectors*diag(eigvals).
l = math_ops.cast(linalg.diag(eigvals), dtype=eigvectors.dtype)
av = math_ops.matmul(matrix, eigvectors)
vl = math_ops.matmul(eigvectors, l)
self.assertAllClose(av, vl, atol=atol)
def check_orthogonality(self, eigvectors, tol):
# Test that eigenvectors are orthogonal.
k = array_ops.shape(eigvectors)[1]
vtv = math_ops.matmul(
eigvectors, eigvectors, adjoint_a=True) - linalg.eye(
k, dtype=eigvectors.dtype)
self.assertAllLess(math_ops.abs(vtv), tol)
def run_test(self, alpha, beta, eigvals_only=True):
n = alpha.shape[0]
matrix = np.diag(alpha) + np.diag(beta, 1) + np.diag(np.conj(beta), -1)
# scipy.linalg.eigh_tridiagonal doesn't support complex inputs, so for
# this we call the slower numpy.linalg.eigh.
if np.issubdtype(alpha.dtype, np.complexfloating):
eigvals_expected, _ = np.linalg.eigh(matrix)
else:
eigvals_expected = scipy.linalg.eigh_tridiagonal(
alpha, beta, eigvals_only=True)
eigvals = linalg.eigh_tridiagonal(alpha, beta, eigvals_only=eigvals_only)
if not eigvals_only:
eigvals, eigvectors = eigvals
eps = np.finfo(alpha.dtype).eps
atol = n * eps * np.amax(np.abs(eigvals_expected))
self.assertAllClose(eigvals_expected, eigvals, atol=atol)
if not eigvals_only:
self.check_orthogonality(eigvectors, 2 * np.sqrt(n) * eps)
self.check_residual(matrix, eigvals, eigvectors, atol)
@parameterized.parameters((np.float32), (np.float64), (np.complex64),
(np.complex128))
def test_small(self, dtype):
for n in [1, 2, 3]:
alpha = np.ones([n], dtype=dtype)
beta = np.ones([n - 1], dtype=dtype)
if np.issubdtype(alpha.dtype, np.complexfloating):
beta += 1j * beta
self.run_test(alpha, beta)
@parameterized.parameters((np.float32), (np.float64), (np.complex64),
(np.complex128))
def test_toeplitz(self, dtype):
n = 8
for a, b in [[2, -1], [1, 0], [0, 1], [-1e10, 1e10], [-1e-10, 1e-10]]:
alpha = a * np.ones([n], dtype=dtype)
beta = b * np.ones([n - 1], dtype=dtype)
if np.issubdtype(alpha.dtype, np.complexfloating):
beta += 1j * beta
self.run_test(alpha, beta)
@parameterized.parameters((np.float32), (np.float64), (np.complex64),
(np.complex128))
def test_random_uniform(self, dtype):
for n in [8, 50]:
alpha = np.random.uniform(size=(n,)).astype(dtype)
beta = np.random.uniform(size=(n - 1,)).astype(dtype)
if np.issubdtype(beta.dtype, np.complexfloating):
beta += 1j * np.random.uniform(size=(n - 1,)).astype(dtype)
self.run_test(alpha, beta)
@parameterized.parameters((np.float32), (np.float64), (np.complex64),
(np.complex128))
def test_select(self, dtype):
n = 4
alpha = np.random.uniform(size=(n,)).astype(dtype)
beta = np.random.uniform(size=(n - 1,)).astype(dtype)
eigvals_all = linalg.eigh_tridiagonal(alpha, beta, select="a")
eps = np.finfo(alpha.dtype).eps
atol = 2 * n * eps
for first in range(n - 1):
for last in range(first + 1, n - 1):
# Check that we get the expected eigenvalues by selecting by
# index range.
eigvals_index = linalg.eigh_tridiagonal(
alpha, beta, select="i", select_range=(first, last))
self.assertAllClose(
eigvals_all[first:(last + 1)], eigvals_index, atol=atol)
# Check that we get the expected eigenvalues by selecting by
# value range.
eigvals_value = linalg.eigh_tridiagonal(
alpha,
beta,
select="v",
select_range=(eigvals_all[first], eigvals_all[last]))
self.assertAllClose(
eigvals_all[first:(last + 1)], eigvals_value, atol=atol)
@parameterized.parameters((np.float32), (np.float64), (np.complex64),
(np.complex128))
def test_extreme_eigenvalues_test(self, dtype):
huge = 0.33 * np.finfo(dtype).max
tiny = 3 * np.finfo(dtype).tiny
for (a, b) in [(tiny, tiny), (huge, np.sqrt(huge))]:
alpha = np.array([-a, -np.sqrt(a), np.sqrt(a), a]).astype(dtype)
beta = b * np.ones([3], dtype=dtype)
if np.issubdtype(alpha.dtype, np.complexfloating):
beta += 1j * beta
@parameterized.parameters((np.float32), (np.float64), (np.complex64),
(np.complex128))
def test_eigenvectors(self, dtype):
if test.is_gpu_available(cuda_only=True) or test_util.is_xla_enabled():
# cuda and XLA do not yet expose the stabilized tridiagonal solver
# needed for inverse iteration.
return
n = 8
alpha = np.random.uniform(size=(n,)).astype(dtype)
beta = np.random.uniform(size=(n - 1,)).astype(dtype)
if np.issubdtype(beta.dtype, np.complexfloating):
beta += 1j * np.random.uniform(size=(n - 1,)).astype(dtype)
self.run_test(alpha, beta, eigvals_only=False)
# Test that we can correctly generate an orthogonal basis for
# a fully degenerate matrix.
eps = np.finfo(dtype).eps
alpha = np.ones(n).astype(dtype)
beta = 0.01 * np.sqrt(eps) * np.ones((n - 1)).astype(dtype)
self.run_test(alpha, beta, eigvals_only=False)
if __name__ == "__main__":
test.main()
| EighTridiagonalTest |
python | dask__dask | dask/array/_array_expr/_reductions.py | {
"start": 10146,
"end": 13664
} | class ____(ArrayExpr):
_parameters = [
"array",
"func",
"split_every",
"keepdims",
"dtype",
"name",
"reduced_meta",
]
_defaults = {
"keepdims": False,
"dtype": None,
"name": None,
"reduced_meta": None,
}
def __dask_tokenize__(self):
if not self._determ_token:
# TODO: Is there an actual need to overwrite this?
self._determ_token = _tokenize_deterministic(
self.func, self.array, self.split_every, self.keepdims, self.dtype
)
return self._determ_token
@cached_property
def _name(self):
return (
(self.operand("name") or funcname(self.func))
+ "-"
+ self.deterministic_token
)
@cached_property
def chunks(self):
chunks = [
(
tuple(1 for p in partition_all(self.split_every[i], c))
if i in self.split_every
else c
)
for (i, c) in enumerate(self.array.chunks)
]
if not self.keepdims:
out_axis = [i for i in range(self.array.ndim) if i not in self.split_every]
getter = lambda k: get(out_axis, k)
chunks = list(getter(chunks))
return tuple(chunks)
def _layer(self):
x = self.array
parts = [
list(partition_all(self.split_every.get(i, 1), range(n)))
for (i, n) in enumerate(x.numblocks)
]
keys = product(*map(range, map(len, parts)))
if not self.keepdims:
out_axis = [i for i in range(x.ndim) if i not in self.split_every]
getter = lambda k: get(out_axis, k)
keys = map(getter, keys)
dsk = {}
for k, p in zip(keys, product(*parts)):
free = {
i: j[0]
for (i, j) in enumerate(p)
if len(j) == 1 and i not in self.split_every
}
dummy = dict(i for i in enumerate(p) if i[0] in self.split_every)
g = lol_tuples((x.name,), range(x.ndim), free, dummy)
dsk[(self._name,) + k] = (self.func, g)
return dsk
@property
def _meta(self):
meta = self.array._meta
if self.reduced_meta is not None:
try:
meta = self.func(self.reduced_meta, computing_meta=True)
# no meta keyword argument exists for func, and it isn't required
except TypeError:
try:
meta = self.func(self.reduced_meta)
except ValueError as e:
# min/max functions have no identity, don't apply function to meta
if "zero-size array to reduction operation" in str(e):
meta = self.reduced_meta
# when no work can be computed on the empty array (e.g., func is a ufunc)
except ValueError:
pass
# some functions can't compute empty arrays (those for which reduced_meta
# fall into the ValueError exception) and we have to rely on reshaping
# the array according to len(out_chunks)
if is_arraylike(meta) and meta.ndim != len(self.chunks):
if len(self.chunks) == 0:
meta = meta.sum()
else:
meta = meta.reshape((0,) * len(self.chunks))
return meta
from dask.array._array_expr._collection import blockwise
| PartialReduce |
python | ray-project__ray | python/ray/serve/multiplex.py | {
"start": 621,
"end": 11015
} | class ____:
"""A wrapper class that wraps the model load function and
provides the LRU caching functionality.
The model multiplexer is a wrapper class that wraps the model load function
and provides the LRU caching functionality, and the model load function should
be a coroutine function that takes the model ID as the first argument and
returns the user-constructed model object.
The model multiplexer will also ensure that the number of models on the current
replica does not exceed the specified limit.
The model will be unloaded in the LRU order, the model multiplexer will call the
model's __del__ attribute if it exists to clean up the model resources eagerly.
"""
_PUSH_MULTIPLEXED_MODEL_IDS_TASK_NAME = "push_multiplexed_model_ids"
def __init__(
self,
model_load_func: Callable[[str], Any],
self_arg: Any,
max_num_models_per_replica: int,
):
"""Initialize the model multiplexer.
Args:
model_load_func: the model load async function.
self_arg: self argument when model_load_func is class method.
max_num_models_per_replica: the maximum number of models to be loaded on the
current replica. If it is -1, there is no limit for the number of models
per replica.
"""
ServeUsageTag.MULTIPLEXED_API_USED.record("1")
self.models = OrderedDict()
self._func: Callable = model_load_func
self.self_arg: Any = self_arg
self.max_num_models_per_replica: int = max_num_models_per_replica
# log MODEL_LOAD_LATENCY_BUCKET_MS
logger.debug(f"MODEL_LOAD_LATENCY_BUCKET_MS: {MODEL_LOAD_LATENCY_BUCKETS_MS}")
self.model_load_latency_ms = metrics.Histogram(
"serve_multiplexed_model_load_latency_ms",
description="The time it takes to load a model.",
boundaries=MODEL_LOAD_LATENCY_BUCKETS_MS,
)
self.model_unload_latency_ms = metrics.Histogram(
"serve_multiplexed_model_unload_latency_ms",
description="The time it takes to unload a model.",
boundaries=MODEL_LOAD_LATENCY_BUCKETS_MS,
)
self.num_models_gauge = metrics.Gauge(
"serve_num_multiplexed_models",
description="The number of models loaded on the current replica.",
)
self.registered_model_gauge = metrics.Gauge(
"serve_registered_multiplexed_model_id",
description="The model id registered on the current replica.",
tag_keys=("model_id",),
)
self.get_model_requests_counter = metrics.Counter(
"serve_multiplexed_get_model_requests_counter",
description="The counter for get model requests on the current replica.",
)
self.models_unload_counter = metrics.Counter(
"serve_multiplexed_models_unload_counter",
description="The counter for unloaded models on the current replica.",
)
self.models_load_counter = metrics.Counter(
"serve_multiplexed_models_load_counter",
description="The counter for loaded models on the current replica.",
)
context = _get_internal_replica_context()
if context is None:
raise RuntimeError(
"`@serve.multiplex` can only be used within a deployment "
"(failed to retrieve Serve replica context)."
)
self._app_name: str = context.app_name
self._deployment_name: str = context.deployment
self._replica_id: ReplicaID = context.replica_id
# Whether to push the multiplexed replica info to the controller.
self._push_multiplexed_replica_info: bool = False
# Model cache lock to ensure that only one model is loading/unloading at a time.
self._model_cache_lock = asyncio.Lock()
# The set of model IDs that are being loaded. This is used to early push
# model ids info to the controller. The tasks will be added when there is cache
# miss, and will be removed when the model is loaded successfully or
# failed to load.
self._model_load_tasks: Set[str] = set()
self.metrics_pusher = MetricsPusher()
self.metrics_pusher.register_or_update_task(
self._PUSH_MULTIPLEXED_MODEL_IDS_TASK_NAME,
self._push_model_ids_info,
PUSH_MULTIPLEXED_MODEL_IDS_INTERVAL_S,
)
self.metrics_pusher.start()
def _get_loading_and_loaded_model_ids(self) -> List[str]:
"""Get the model IDs of the loaded models & loading models in the replica.
This is to push the model id information early to the controller, so that
requests can be routed to the replica.
"""
models_list = set(self.models.keys())
models_list.update(self._model_load_tasks)
return list(models_list)
def _push_model_ids_info(self):
"""Push the multiplexed replica info to the controller."""
try:
self.num_models_gauge.set(len(self.models))
for model_id in self.models:
self.registered_model_gauge.set(1, tags={"model_id": model_id})
if self._push_multiplexed_replica_info:
_get_global_client().record_request_routing_info(
RequestRoutingInfo(
replica_id=self._replica_id,
multiplexed_model_ids=self._get_loading_and_loaded_model_ids(),
)
)
self._push_multiplexed_replica_info = False
except Exception as e:
logger.warning(
"Failed to push the multiplexed replica info "
f"to the controller. Error: {e}"
)
async def shutdown(self):
"""Unload all the models when the model multiplexer is deleted."""
while len(self.models) > 0:
try:
await self.unload_model_lru()
except Exception as e:
logger.exception(
f"Failed to unload model. Error: {e}",
)
async def load_model(self, model_id: str) -> Any:
"""Load the model if it is not loaded yet, and return
the user-constructed model object.
Args:
model_id: the model ID.
Returns:
The user-constructed model object.
"""
if type(model_id) is not str:
raise TypeError("The model ID must be a string.")
if not model_id:
raise ValueError("The model ID cannot be empty.")
self.get_model_requests_counter.inc()
if model_id in self.models:
# Move the model to the end of the OrderedDict to ensure LRU caching.
model = self.models.pop(model_id)
self.models[model_id] = model
return self.models[model_id]
else:
# Set the flag to push the multiplexed replica info to the controller
# before loading the model. This is to make sure we can push the model
# id info to the controller/router early, so that requests can be routed to
# the replica.
self._push_multiplexed_replica_info = True
self._model_load_tasks.add(model_id)
async with self._model_cache_lock:
# Check if the model has been loaded by another request.
if model_id in self.models:
return self.models[model_id]
try:
# If the number of models per replica is specified, check
# if the number of models on the current replica has
# reached the limit.
if (
self.max_num_models_per_replica > 0
and len(self.models) >= self.max_num_models_per_replica
):
# Unload the least recently used model.
await self.unload_model_lru()
self._push_multiplexed_replica_info = True
# Load the model.
logger.info(f"Loading model '{model_id}'.")
self.models_load_counter.inc()
load_start_time = time.time()
if self.self_arg is None:
self.models[model_id] = await self._func(model_id)
else:
self.models[model_id] = await self._func(
self.self_arg, model_id
)
load_latency_ms = (time.time() - load_start_time) * 1000.0
logger.info(
f"Successfully loaded model '{model_id}' in "
f"{load_latency_ms:.1f}ms."
)
self._model_load_tasks.discard(model_id)
self.model_load_latency_ms.observe(load_latency_ms)
return self.models[model_id]
except Exception as e:
logger.error(
f"Failed to load model '{model_id}'. Error: {e}",
)
self._model_load_tasks.discard(model_id)
raise e
async def unload_model_lru(self) -> None:
"""Unload the least recently used model."""
self.models_unload_counter.inc()
unload_start_time = time.time()
model_id, model = self.models.popitem(last=False)
logger.info(f"Unloading model '{model_id}'.")
# If the model has __del__ attribute, call it.
# This is to clean up the model resources eagerly.
if hasattr(model, "__del__"):
if not inspect.iscoroutinefunction(model.__del__):
await asyncio.get_running_loop().run_in_executor(None, model.__del__)
else:
await model.__del__()
model.__del__ = lambda _: None
unload_latency_ms = (time.time() - unload_start_time) * 1000.0
self.model_unload_latency_ms.observe(unload_latency_ms)
logger.info(
f"Successfully unloaded model '{model_id}' in {unload_latency_ms:.1f}ms."
)
self.registered_model_gauge.set(0, tags={"model_id": model_id})
| _ModelMultiplexWrapper |
python | huggingface__transformers | src/transformers/models/clap/modeling_clap.py | {
"start": 50760,
"end": 51626
} | class ____(nn.Module):
def __init__(self, config):
super().__init__()
self.self = ClapTextSelfAttention(config)
self.output = ClapTextSelfOutput(config)
def forward(
self,
hidden_states: torch.Tensor,
attention_mask: Optional[torch.FloatTensor] = None,
output_attentions: Optional[bool] = False,
**kwargs,
) -> tuple[torch.Tensor]:
self_outputs = self.self(
hidden_states,
attention_mask=attention_mask,
output_attentions=output_attentions,
**kwargs,
)
attention_output = self.output(self_outputs[0], hidden_states)
outputs = (attention_output,) + self_outputs[1:] # add attentions if we output them
return outputs
# Copied from transformers.models.bert.modeling_bert.BertIntermediate
| ClapTextAttention |
python | apache__airflow | providers/google/tests/unit/google/cloud/sensors/test_dataplex.py | {
"start": 3660,
"end": 6160
} | class ____:
def run_job(self, state: int):
job = mock.Mock()
job.state = state
return job
@mock.patch(DATAPLEX_HOOK)
def test_done(self, mock_hook):
job = self.run_job(DataScanJob.State.SUCCEEDED)
mock_hook.return_value.get_data_scan_job.return_value = job
sensor = DataplexDataQualityJobStatusSensor(
task_id=TASK_ID,
project_id=PROJECT_ID,
job_id=TEST_JOB_ID,
data_scan_id=TEST_DATA_SCAN_ID,
region=REGION,
api_version=API_VERSION,
gcp_conn_id=GCP_CONN_ID,
impersonation_chain=IMPERSONATION_CHAIN,
timeout=TIMEOUT,
)
result = sensor.poke(context={})
mock_hook.return_value.get_data_scan_job.assert_called_once_with(
project_id=PROJECT_ID,
region=REGION,
job_id=TEST_JOB_ID,
data_scan_id=TEST_DATA_SCAN_ID,
timeout=TIMEOUT,
retry=DEFAULT,
metadata=(),
)
assert result
def test_start_sensor_time(self):
sensor = DataplexDataQualityJobStatusSensor(
task_id=TASK_ID,
project_id=PROJECT_ID,
job_id=TEST_JOB_ID,
data_scan_id=TEST_DATA_SCAN_ID,
region=REGION,
api_version=API_VERSION,
gcp_conn_id=GCP_CONN_ID,
impersonation_chain=IMPERSONATION_CHAIN,
timeout=TIMEOUT,
)
assert sensor.start_sensor_time is None
duration_1 = sensor._duration()
duration_2 = sensor._duration()
assert sensor.start_sensor_time
assert 0 < duration_1 < duration_2
@mock.patch.object(DataplexDataQualityJobStatusSensor, "_duration")
def test_start_sensor_time_timeout(self, mock_duration):
result_timeout = 100
mock_duration.return_value = result_timeout + 1
sensor = DataplexDataQualityJobStatusSensor(
task_id=TASK_ID,
project_id=PROJECT_ID,
job_id=TEST_JOB_ID,
data_scan_id=TEST_DATA_SCAN_ID,
region=REGION,
api_version=API_VERSION,
gcp_conn_id=GCP_CONN_ID,
impersonation_chain=IMPERSONATION_CHAIN,
timeout=TIMEOUT,
result_timeout=result_timeout,
)
with pytest.raises(AirflowDataQualityScanResultTimeoutException):
sensor.poke(context={})
| TestDataplexDataQualityJobStatusSensor |
python | openai__gym | gym/wrappers/record_video.py | {
"start": 660,
"end": 8310
} | class ____(gym.Wrapper):
"""This wrapper records videos of rollouts.
Usually, you only want to record episodes intermittently, say every hundredth episode.
To do this, you can specify **either** ``episode_trigger`` **or** ``step_trigger`` (not both).
They should be functions returning a boolean that indicates whether a recording should be started at the
current episode or step, respectively.
If neither :attr:`episode_trigger` nor ``step_trigger`` is passed, a default ``episode_trigger`` will be employed.
By default, the recording will be stopped once a `terminated` or `truncated` signal has been emitted by the environment. However, you can
also create recordings of fixed length (possibly spanning several episodes) by passing a strictly positive value for
``video_length``.
"""
def __init__(
self,
env: gym.Env,
video_folder: str,
episode_trigger: Callable[[int], bool] = None,
step_trigger: Callable[[int], bool] = None,
video_length: int = 0,
name_prefix: str = "rl-video",
):
"""Wrapper records videos of rollouts.
Args:
env: The environment that will be wrapped
video_folder (str): The folder where the recordings will be stored
episode_trigger: Function that accepts an integer and returns ``True`` iff a recording should be started at this episode
step_trigger: Function that accepts an integer and returns ``True`` iff a recording should be started at this step
video_length (int): The length of recorded episodes. If 0, entire episodes are recorded.
Otherwise, snippets of the specified length are captured
name_prefix (str): Will be prepended to the filename of the recordings
"""
super().__init__(env)
if episode_trigger is None and step_trigger is None:
episode_trigger = capped_cubic_video_schedule
trigger_count = sum(x is not None for x in [episode_trigger, step_trigger])
assert trigger_count == 1, "Must specify exactly one trigger"
self.episode_trigger = episode_trigger
self.step_trigger = step_trigger
self.video_recorder: Optional[video_recorder.VideoRecorder] = None
self.video_folder = os.path.abspath(video_folder)
# Create output folder if needed
if os.path.isdir(self.video_folder):
logger.warn(
f"Overwriting existing videos at {self.video_folder} folder "
f"(try specifying a different `video_folder` for the `RecordVideo` wrapper if this is not desired)"
)
os.makedirs(self.video_folder, exist_ok=True)
self.name_prefix = name_prefix
self.step_id = 0
self.video_length = video_length
self.recording = False
self.terminated = False
self.truncated = False
self.recorded_frames = 0
self.is_vector_env = getattr(env, "is_vector_env", False)
self.episode_id = 0
def reset(self, **kwargs):
"""Reset the environment using kwargs and then starts recording if video enabled."""
observations = super().reset(**kwargs)
self.terminated = False
self.truncated = False
if self.recording:
assert self.video_recorder is not None
self.video_recorder.frames = []
self.video_recorder.capture_frame()
self.recorded_frames += 1
if self.video_length > 0:
if self.recorded_frames > self.video_length:
self.close_video_recorder()
elif self._video_enabled():
self.start_video_recorder()
return observations
def start_video_recorder(self):
"""Starts video recorder using :class:`video_recorder.VideoRecorder`."""
self.close_video_recorder()
video_name = f"{self.name_prefix}-step-{self.step_id}"
if self.episode_trigger:
video_name = f"{self.name_prefix}-episode-{self.episode_id}"
base_path = os.path.join(self.video_folder, video_name)
self.video_recorder = video_recorder.VideoRecorder(
env=self.env,
base_path=base_path,
metadata={"step_id": self.step_id, "episode_id": self.episode_id},
)
self.video_recorder.capture_frame()
self.recorded_frames = 1
self.recording = True
def _video_enabled(self):
if self.step_trigger:
return self.step_trigger(self.step_id)
else:
return self.episode_trigger(self.episode_id)
def step(self, action):
"""Steps through the environment using action, recording observations if :attr:`self.recording`."""
(
observations,
rewards,
terminateds,
truncateds,
infos,
) = self.env.step(action)
if not (self.terminated or self.truncated):
# increment steps and episodes
self.step_id += 1
if not self.is_vector_env:
if terminateds or truncateds:
self.episode_id += 1
self.terminated = terminateds
self.truncated = truncateds
elif terminateds[0] or truncateds[0]:
self.episode_id += 1
self.terminated = terminateds[0]
self.truncated = truncateds[0]
if self.recording:
assert self.video_recorder is not None
self.video_recorder.capture_frame()
self.recorded_frames += 1
if self.video_length > 0:
if self.recorded_frames > self.video_length:
self.close_video_recorder()
else:
if not self.is_vector_env:
if terminateds or truncateds:
self.close_video_recorder()
elif terminateds[0] or truncateds[0]:
self.close_video_recorder()
elif self._video_enabled():
self.start_video_recorder()
return observations, rewards, terminateds, truncateds, infos
def close_video_recorder(self):
"""Closes the video recorder if currently recording."""
if self.recording:
assert self.video_recorder is not None
self.video_recorder.close()
self.recording = False
self.recorded_frames = 1
def render(self, *args, **kwargs):
"""Compute the render frames as specified by render_mode attribute during initialization of the environment or as specified in kwargs."""
if self.video_recorder is None or not self.video_recorder.enabled:
return super().render(*args, **kwargs)
if len(self.video_recorder.render_history) > 0:
recorded_frames = [
self.video_recorder.render_history.pop()
for _ in range(len(self.video_recorder.render_history))
]
if self.recording:
return recorded_frames
else:
return recorded_frames + super().render(*args, **kwargs)
else:
if self.recording:
return self.video_recorder.last_frame
else:
return super().render(*args, **kwargs)
def close(self):
"""Closes the wrapper then the video recorder."""
super().close()
self.close_video_recorder()
def __del__(self):
"""Closes the video recorder."""
self.close_video_recorder()
| RecordVideo |
python | tensorflow__tensorflow | tensorflow/python/profiler/profiler_wrapper_test.py | {
"start": 934,
"end": 1690
} | class ____(test_util.TensorFlowTestCase):
def test_xspace_to_tools_data_default_options(self):
# filenames only used for `hlo_proto` tool.
profiler_wrapper_plugin.xspace_to_tools_data([], 'trace_viewer')
def _test_xspace_to_tools_data_options(self, options):
profiler_wrapper_plugin.xspace_to_tools_data([], 'trace_viewer', options)
def test_xspace_to_tools_data_empty_options(self):
self._test_xspace_to_tools_data_options({})
def test_xspace_to_tools_data_int_options(self):
self._test_xspace_to_tools_data_options({'example_option': 0})
def test_xspace_to_tools_data_str_options(self):
self._test_xspace_to_tools_data_options({'example_option': 'example'})
if __name__ == '__main__':
test.main()
| ProfilerSessionTest |
python | sympy__sympy | sympy/polys/domains/gaussiandomains.py | {
"start": 5978,
"end": 7296
} | class ____(GaussianElement[MPZ]):
"""Gaussian integer: domain element for :ref:`ZZ_I`
>>> from sympy import ZZ_I
>>> z = ZZ_I(2, 3)
>>> z
(2 + 3*I)
>>> type(z)
<class 'sympy.polys.domains.gaussiandomains.GaussianInteger'>
"""
base = ZZ
def __truediv__(self, other: GaussianElement | int) -> GaussianRational:
"""Return a Gaussian rational."""
return QQ_I.convert(self)/other
def __divmod__(self, other: GaussianInteger | int) -> tuple[GaussianInteger, GaussianInteger]:
if not other:
raise ZeroDivisionError('divmod({}, 0)'.format(self))
other_conv = self._get_xy(other)
if other_conv is None:
return NotImplemented
x, y = other_conv
# multiply self and other by x - I*y
# self/other == (a + I*b)/c
a, b = self.x*x + self.y*y, -self.x*y + self.y*x
c = x*x + y*y
# find integers qx and qy such that
# |a - qx*c| <= c/2 and |b - qy*c| <= c/2
qx = (2*a + c) // (2*c) # -c <= 2*a - qx*2*c < c
qy = (2*b + c) // (2*c)
q = GaussianInteger(qx, qy)
# |self/other - q| < 1 since
# |a/c - qx|**2 + |b/c - qy|**2 <= 1/4 + 1/4 < 1
return q, self - q*other # |r| < |other|
| GaussianInteger |
python | ray-project__ray | rllib/examples/rl_modules/classes/tiny_atari_cnn_rlm.py | {
"start": 618,
"end": 7812
} | class ____(TorchRLModule, ValueFunctionAPI, TargetNetworkAPI):
"""A tiny CNN stack for fast-learning of Atari envs.
The architecture here is the exact same as the one used by the old API stack as
CNN default ModelV2.
We stack 3 CNN layers based on the config, then a 4th one with linear activation
and n 1x1 filters, where n is the number of actions in the (discrete) action space.
Simple reshaping (no flattening or extra linear layers necessary) lead to the
action logits, which can directly be used inside a distribution or loss.
.. testcode::
import numpy as np
import gymnasium as gym
my_net = TinyAtariCNN(
observation_space=gym.spaces.Box(-1.0, 1.0, (42, 42, 4), np.float32),
action_space=gym.spaces.Discrete(4),
)
B = 10
w = 42
h = 42
c = 4
data = torch.from_numpy(
np.random.random_sample(size=(B, w, h, c)).astype(np.float32)
)
print(my_net.forward_inference({"obs": data}))
print(my_net.forward_exploration({"obs": data}))
print(my_net.forward_train({"obs": data}))
num_all_params = sum(int(np.prod(p.size())) for p in my_net.parameters())
print(f"num params = {num_all_params}")
"""
@override(TorchRLModule)
def setup(self):
"""Use this method to create all the model components that you require.
Feel free to access the following useful properties in this class:
- `self.model_config`: The config dict for this RLModule class,
which should contain flxeible settings, for example: {"hiddens": [256, 256]}.
- `self.observation|action_space`: The observation and action space that
this RLModule is subject to. Note that the observation space might not be the
exact space from your env, but that it might have already gone through
preprocessing through a connector pipeline (for example, flattening,
frame-stacking, mean/std-filtering, etc..).
"""
# Get the CNN stack config from our RLModuleConfig's (self.config)
# `model_config` property:
conv_filters = self.model_config.get("conv_filters")
# Default CNN stack with 3 layers:
if conv_filters is None:
conv_filters = [
[16, 4, 2, "same"], # num filters, kernel wxh, stride wxh, padding type
[32, 4, 2, "same"],
[256, 11, 1, "valid"],
]
# Build the CNN layers.
layers = []
# Add user-specified hidden convolutional layers first
width, height, in_depth = self.observation_space.shape
in_size = [width, height]
for filter_specs in conv_filters:
if len(filter_specs) == 4:
out_depth, kernel_size, strides, padding = filter_specs
else:
out_depth, kernel_size, strides = filter_specs
padding = "same"
# Pad like in tensorflow's SAME mode.
if padding == "same":
padding_size, out_size = same_padding(in_size, kernel_size, strides)
layers.append(nn.ZeroPad2d(padding_size))
# No actual padding is performed for "valid" mode, but we will still
# compute the output size (input for the next layer).
else:
out_size = valid_padding(in_size, kernel_size, strides)
layer = nn.Conv2d(in_depth, out_depth, kernel_size, strides, bias=True)
# Initialize CNN layer kernel and bias.
nn.init.xavier_uniform_(layer.weight)
nn.init.zeros_(layer.bias)
layers.append(layer)
# Activation.
layers.append(nn.ReLU())
in_size = out_size
in_depth = out_depth
self._base_cnn_stack = nn.Sequential(*layers)
# Add the final CNN 1x1 layer with num_filters == num_actions to be reshaped to
# yield the logits (no flattening, no additional linear layers required).
_final_conv = nn.Conv2d(in_depth, self.action_space.n, 1, 1, bias=True)
nn.init.xavier_uniform_(_final_conv.weight)
nn.init.zeros_(_final_conv.bias)
self._logits = nn.Sequential(
nn.ZeroPad2d(same_padding(in_size, 1, 1)[0]), _final_conv
)
self._values = nn.Linear(in_depth, 1)
# Mimick old API stack behavior of initializing the value function with `normc`
# std=0.01.
normc_initializer(0.01)(self._values.weight)
@override(TorchRLModule)
def _forward(self, batch, **kwargs):
# Compute the basic 1D feature tensor (inputs to policy- and value-heads).
_, logits = self._compute_embeddings_and_logits(batch)
# Return features and logits as ACTION_DIST_INPUTS (categorical distribution).
return {
Columns.ACTION_DIST_INPUTS: logits,
}
@override(TorchRLModule)
def _forward_train(self, batch, **kwargs):
# Compute the basic 1D feature tensor (inputs to policy- and value-heads).
embeddings, logits = self._compute_embeddings_and_logits(batch)
# Return features and logits as ACTION_DIST_INPUTS (categorical distribution).
return {
Columns.ACTION_DIST_INPUTS: logits,
Columns.EMBEDDINGS: embeddings,
}
# We implement this RLModule as a TargetNetworkAPI RLModule, so it can be used
# by the APPO algorithm.
@override(TargetNetworkAPI)
def make_target_networks(self) -> None:
self._target_base_cnn_stack = make_target_network(self._base_cnn_stack)
self._target_logits = make_target_network(self._logits)
@override(TargetNetworkAPI)
def get_target_network_pairs(self):
return [
(self._base_cnn_stack, self._target_base_cnn_stack),
(self._logits, self._target_logits),
]
@override(TargetNetworkAPI)
def forward_target(self, batch, **kw):
obs = batch[Columns.OBS].permute(0, 3, 1, 2)
embeddings = self._target_base_cnn_stack(obs)
logits = self._target_logits(embeddings)
return {TARGET_NETWORK_ACTION_DIST_INPUTS: torch.squeeze(logits, dim=[-1, -2])}
# We implement this RLModule as a ValueFunctionAPI RLModule, so it can be used
# by value-based methods like PPO or IMPALA.
@override(ValueFunctionAPI)
def compute_values(
self,
batch: Dict[str, Any],
embeddings: Optional[Any] = None,
) -> TensorType:
# Features not provided -> We need to compute them first.
if embeddings is None:
obs = batch[Columns.OBS]
embeddings = self._base_cnn_stack(obs.permute(0, 3, 1, 2))
embeddings = torch.squeeze(embeddings, dim=[-1, -2])
return self._values(embeddings).squeeze(-1)
def _compute_embeddings_and_logits(self, batch):
obs = batch[Columns.OBS].permute(0, 3, 1, 2)
embeddings = self._base_cnn_stack(obs)
logits = self._logits(embeddings)
return (
torch.squeeze(embeddings, dim=[-1, -2]),
torch.squeeze(logits, dim=[-1, -2]),
)
| TinyAtariCNN |
python | openai__openai-python | src/openai/types/beta/threads/run_submit_tool_outputs_params.py | {
"start": 1222,
"end": 1643
} | class ____(RunSubmitToolOutputsParamsBase):
stream: Required[Literal[True]]
"""
If `true`, returns a stream of events that happen during the Run as server-sent
events, terminating when the Run enters a terminal state with a `data: [DONE]`
message.
"""
RunSubmitToolOutputsParams = Union[RunSubmitToolOutputsParamsNonStreaming, RunSubmitToolOutputsParamsStreaming]
| RunSubmitToolOutputsParamsStreaming |
python | pytorch__pytorch | test/dynamo/test_autograd_function.py | {
"start": 3139,
"end": 3284
} | class ____(torch.nn.Module):
def forward(self, input, weight, bias=None):
return LinearFunction.apply(input, weight, bias)
| ModuleLinear |
python | kamyu104__LeetCode-Solutions | Python/minimum-factorization.py | {
"start": 32,
"end": 449
} | class ____(object):
def smallestFactorization(self, a):
"""
:type a: int
:rtype: int
"""
if a < 2:
return a
result, mul = 0, 1
for i in reversed(xrange(2, 10)):
while a % i == 0:
a /= i
result = mul*i + result
mul *= 10
return result if a == 1 and result < 2**31 else 0
| Solution |
python | keras-team__keras | keras/src/initializers/random_initializers.py | {
"start": 6735,
"end": 11083
} | class ____(RandomInitializer):
"""Initializer that adapts its scale to the shape of its input tensors.
With `distribution="truncated_normal" or "untruncated_normal"`, samples are
drawn from a truncated/untruncated normal distribution with a mean of zero
and a standard deviation (after truncation, if used) `stddev = sqrt(scale /
n)`, where `n` is:
- number of input units in the weight tensor, if `mode="fan_in"`
- number of output units, if `mode="fan_out"`
- average of the numbers of input and output units, if `mode="fan_avg"`
With `distribution="uniform"`, samples are drawn from a uniform distribution
within `[-limit, limit]`, where `limit = sqrt(3 * scale / n)`.
Examples:
>>> # Standalone usage:
>>> initializer = VarianceScaling(
scale=0.1, mode='fan_in', distribution='uniform')
>>> values = initializer(shape=(2, 2))
>>> # Usage in a Keras layer:
>>> initializer = VarianceScaling(
scale=0.1, mode='fan_in', distribution='uniform')
>>> layer = Dense(3, kernel_initializer=initializer)
Args:
scale: Scaling factor (positive float).
mode: One of `"fan_in"`, `"fan_out"`, `"fan_avg"`.
distribution: Random distribution to use.
One of `"truncated_normal"`, `"untruncated_normal"`, or `"uniform"`.
seed: A Python integer or instance of
`keras.backend.SeedGenerator`.
Used to make the behavior of the initializer
deterministic. Note that an initializer seeded with an integer
or `None` (unseeded) will produce the same random values
across multiple calls. To get different random values
across multiple calls, use as seed an instance
of `keras.backend.SeedGenerator`.
"""
def __init__(
self,
scale=1.0,
mode="fan_in",
distribution="truncated_normal",
seed=None,
):
if scale <= 0.0:
raise ValueError(
"Argument `scale` must be positive float. "
f"Received: scale={scale}"
)
allowed_modes = {"fan_in", "fan_out", "fan_avg"}
if mode not in allowed_modes:
raise ValueError(
f"Invalid `mode` argument: {mode}. "
f"Please use one of {allowed_modes}"
)
distribution = distribution.lower()
if distribution == "normal":
distribution = "truncated_normal"
allowed_distributions = {
"uniform",
"truncated_normal",
"untruncated_normal",
}
if distribution not in allowed_distributions:
raise ValueError(
f"Invalid `distribution` argument: {distribution}."
f"Please use one of {allowed_distributions}"
)
self.scale = scale
self.mode = mode
self.distribution = distribution
super().__init__(seed=seed)
def __call__(self, shape, dtype=None):
scale = self.scale
fan_in, fan_out = compute_fans(shape)
if self.mode == "fan_in":
scale /= max(1.0, fan_in)
elif self.mode == "fan_out":
scale /= max(1.0, fan_out)
else:
scale /= max(1.0, (fan_in + fan_out) / 2.0)
if self.distribution == "truncated_normal":
stddev = math.sqrt(scale) / 0.87962566103423978
return random.truncated_normal(
shape, mean=0.0, stddev=stddev, dtype=dtype, seed=self.seed
)
elif self.distribution == "untruncated_normal":
stddev = math.sqrt(scale)
return random.normal(
shape, mean=0.0, stddev=stddev, dtype=dtype, seed=self.seed
)
else:
limit = math.sqrt(3.0 * scale)
return random.uniform(
shape, minval=-limit, maxval=limit, dtype=dtype, seed=self.seed
)
def get_config(self):
base_config = super().get_config()
config = {
"scale": self.scale,
"mode": self.mode,
"distribution": self.distribution,
}
return {**base_config, **config}
@keras_export(
[
"keras.initializers.GlorotUniform",
"keras.initializers.glorot_uniform",
]
)
| VarianceScaling |
python | huggingface__transformers | src/transformers/models/roberta_prelayernorm/modeling_roberta_prelayernorm.py | {
"start": 31760,
"end": 36862
} | class ____(RobertaPreLayerNormPreTrainedModel, GenerationMixin):
_tied_weights_keys = {
"lm_head.decoder.weight": "roberta_prelayernorm.embeddings.word_embeddings.weight",
"lm_head.decoder.bias": "lm_head.bias",
}
def __init__(self, config):
super().__init__(config)
if not config.is_decoder:
logger.warning(
"If you want to use `RobertaPreLayerNormLMHeadModel` as a standalone, add `is_decoder=True.`"
)
self.roberta_prelayernorm = RobertaPreLayerNormModel(config, add_pooling_layer=False)
self.lm_head = RobertaPreLayerNormLMHead(config)
# Initialize weights and apply final processing
self.post_init()
def get_output_embeddings(self):
return self.lm_head.decoder
def set_output_embeddings(self, new_embeddings):
self.lm_head.decoder = new_embeddings
@can_return_tuple
@auto_docstring
def forward(
self,
input_ids: Optional[torch.LongTensor] = None,
attention_mask: Optional[torch.FloatTensor] = None,
token_type_ids: Optional[torch.LongTensor] = None,
position_ids: Optional[torch.LongTensor] = None,
inputs_embeds: Optional[torch.FloatTensor] = None,
encoder_hidden_states: Optional[torch.FloatTensor] = None,
encoder_attention_mask: Optional[torch.FloatTensor] = None,
labels: Optional[torch.LongTensor] = None,
past_key_values: Optional[tuple[tuple[torch.FloatTensor]]] = None,
use_cache: Optional[bool] = None,
cache_position: Optional[torch.Tensor] = None,
logits_to_keep: Union[int, torch.Tensor] = 0,
**kwargs: Unpack[TransformersKwargs],
) -> Union[tuple[torch.Tensor], CausalLMOutputWithCrossAttentions]:
r"""
token_type_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0,1]`:
- 0 corresponds to a *sentence A* token,
- 1 corresponds to a *sentence B* token.
This parameter can only be used when the model is initialized with `type_vocab_size` parameter with value
>= 2. All the value in this tensor should be always < type_vocab_size.
[What are token type IDs?](../glossary#token-type-ids)
labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
Labels for computing the left-to-right language modeling loss (next word prediction). Indices should be in
`[-100, 0, ..., config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are
ignored (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`
Example:
```python
>>> from transformers import AutoTokenizer, RobertaPreLayerNormForCausalLM, AutoConfig
>>> import torch
>>> tokenizer = AutoTokenizer.from_pretrained("andreasmadsen/efficient_mlm_m0.40")
>>> config = AutoConfig.from_pretrained("andreasmadsen/efficient_mlm_m0.40")
>>> config.is_decoder = True
>>> model = RobertaPreLayerNormForCausalLM.from_pretrained("andreasmadsen/efficient_mlm_m0.40", config=config)
>>> inputs = tokenizer("Hello, my dog is cute", return_tensors="pt")
>>> outputs = model(**inputs)
>>> prediction_logits = outputs.logits
```"""
if labels is not None:
use_cache = False
outputs: BaseModelOutputWithPoolingAndCrossAttentions = self.roberta_prelayernorm(
input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
inputs_embeds=inputs_embeds,
encoder_hidden_states=encoder_hidden_states,
encoder_attention_mask=encoder_attention_mask,
past_key_values=past_key_values,
use_cache=use_cache,
cache_position=cache_position,
return_dict=True,
**kwargs,
)
hidden_states = outputs.last_hidden_state
# Only compute necessary logits, and do not upcast them to float if we are not computing the loss
slice_indices = slice(-logits_to_keep, None) if isinstance(logits_to_keep, int) else logits_to_keep
logits = self.lm_head(hidden_states[:, slice_indices, :])
loss = None
if labels is not None:
loss = self.loss_function(logits=logits, labels=labels, vocab_size=self.config.vocab_size, **kwargs)
return CausalLMOutputWithCrossAttentions(
loss=loss,
logits=logits,
past_key_values=outputs.past_key_values,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
cross_attentions=outputs.cross_attentions,
)
@auto_docstring(
custom_intro="""
RoBERTa-PreLayerNorm Model with a `language modeling` head on top.
"""
)
| RobertaPreLayerNormForCausalLM |
python | Netflix__metaflow | metaflow/_vendor/click/_compat.py | {
"start": 19083,
"end": 24169
} | class ____(object):
def __init__(self, f, tmp_filename, real_filename):
self._f = f
self._tmp_filename = tmp_filename
self._real_filename = real_filename
self.closed = False
@property
def name(self):
return self._real_filename
def close(self, delete=False):
if self.closed:
return
self._f.close()
if not _can_replace:
try:
os.remove(self._real_filename)
except OSError:
pass
_replace(self._tmp_filename, self._real_filename)
self.closed = True
def __getattr__(self, name):
return getattr(self._f, name)
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, tb):
self.close(delete=exc_type is not None)
def __repr__(self):
return repr(self._f)
auto_wrap_for_ansi = None
colorama = None
get_winterm_size = None
def strip_ansi(value):
return _ansi_re.sub("", value)
def _is_jupyter_kernel_output(stream):
if WIN:
# TODO: Couldn't test on Windows, should't try to support until
# someone tests the details wrt colorama.
return
while isinstance(stream, (_FixupStream, _NonClosingTextIOWrapper)):
stream = stream._stream
return stream.__class__.__module__.startswith("ipykernel.")
def should_strip_ansi(stream=None, color=None):
if color is None:
if stream is None:
stream = sys.stdin
return not isatty(stream) and not _is_jupyter_kernel_output(stream)
return not color
# If we're on Windows, we provide transparent integration through
# colorama. This will make ANSI colors through the echo function
# work automatically.
if WIN:
# Windows has a smaller terminal
DEFAULT_COLUMNS = 79
from ._winconsole import _get_windows_console_stream, _wrap_std_stream
def _get_argv_encoding():
import locale
return locale.getpreferredencoding()
if PY2:
def raw_input(prompt=""):
sys.stderr.flush()
if prompt:
stdout = _default_text_stdout()
stdout.write(prompt)
stdin = _default_text_stdin()
return stdin.readline().rstrip("\r\n")
try:
import colorama
except ImportError:
pass
else:
_ansi_stream_wrappers = WeakKeyDictionary()
def auto_wrap_for_ansi(stream, color=None):
"""This function wraps a stream so that calls through colorama
are issued to the win32 console API to recolor on demand. It
also ensures to reset the colors if a write call is interrupted
to not destroy the console afterwards.
"""
try:
cached = _ansi_stream_wrappers.get(stream)
except Exception:
cached = None
if cached is not None:
return cached
strip = should_strip_ansi(stream, color)
ansi_wrapper = colorama.AnsiToWin32(stream, strip=strip)
rv = ansi_wrapper.stream
_write = rv.write
def _safe_write(s):
try:
return _write(s)
except:
ansi_wrapper.reset_all()
raise
rv.write = _safe_write
try:
_ansi_stream_wrappers[stream] = rv
except Exception:
pass
return rv
def get_winterm_size():
win = colorama.win32.GetConsoleScreenBufferInfo(
colorama.win32.STDOUT
).srWindow
return win.Right - win.Left, win.Bottom - win.Top
else:
def _get_argv_encoding():
return getattr(sys.stdin, "encoding", None) or get_filesystem_encoding()
_get_windows_console_stream = lambda *x: None
_wrap_std_stream = lambda *x: None
def term_len(x):
return len(strip_ansi(x))
def isatty(stream):
try:
return stream.isatty()
except Exception:
return False
def _make_cached_stream_func(src_func, wrapper_func):
cache = WeakKeyDictionary()
def func():
stream = src_func()
try:
rv = cache.get(stream)
except Exception:
rv = None
if rv is not None:
return rv
rv = wrapper_func()
try:
stream = src_func() # In case wrapper_func() modified the stream
cache[stream] = rv
except Exception:
pass
return rv
return func
_default_text_stdin = _make_cached_stream_func(lambda: sys.stdin, get_text_stdin)
_default_text_stdout = _make_cached_stream_func(lambda: sys.stdout, get_text_stdout)
_default_text_stderr = _make_cached_stream_func(lambda: sys.stderr, get_text_stderr)
binary_streams = {
"stdin": get_binary_stdin,
"stdout": get_binary_stdout,
"stderr": get_binary_stderr,
}
text_streams = {
"stdin": get_text_stdin,
"stdout": get_text_stdout,
"stderr": get_text_stderr,
}
| _AtomicFile |
python | fluentpython__example-code | 21-class-metaprog/evalsupport.py | {
"start": 209,
"end": 512
} | class ____(type):
print('<[400]> MetaAleph body')
def __init__(cls, name, bases, dic):
print('<[500]> MetaAleph.__init__')
def inner_2(self):
print('<[600]> MetaAleph.__init__:inner_2')
cls.method_z = inner_2
print('<[700]> evalsupport module end')
| MetaAleph |
python | django__django | django/core/exceptions.py | {
"start": 849,
"end": 971
} | class ____(SuspiciousOperation):
"""A Suspicious filesystem operation was attempted"""
pass
| SuspiciousFileOperation |
python | huggingface__transformers | src/transformers/models/patchtsmixer/modeling_patchtsmixer.py | {
"start": 33393,
"end": 36001
} | class ____(nn.Module):
"""
Class to perform random or forecast masking.
Parameters:
config (`PatchTSMixerConfig`): model config
Returns:
x_mask (`torch.Tensor` of shape `(batch_size, num_channels, num_patches, patch_length)`)
Masked patched input
mask (`torch.Tensor` of shape `(batch_size, num_channels, num_patches)`)
Bool tensor indicating True on masked points
"""
def __init__(self, config: PatchTSMixerConfig):
super().__init__()
self.random_mask_ratio = config.random_mask_ratio
self.channel_consistent_masking = config.channel_consistent_masking
self.mask_type = config.mask_type
self.num_forecast_mask_patches = config.num_forecast_mask_patches
self.unmasked_channel_indices = config.unmasked_channel_indices
self.mask_value = config.mask_value
if self.unmasked_channel_indices is not None:
self.unmasked_channel_indices = sorted(self.unmasked_channel_indices)
def forward(self, patch_input: torch.Tensor):
"""
Parameters:
patch_input (`torch.Tensor` of shape `(batch_size, num_channels, num_patches, patch_length)`, *required*):
Patch input
Return:
masked_input (`torch.Tensor` of shape `(batch_size, num_channels, num_patches, patch_length)`)
Masked patched input
mask (`torch.Tensor` of shape `(batch_size, num_channels, num_patches)`)
Bool tensor indicating True on masked points
"""
if self.mask_type == "random":
masked_input, mask = random_masking(
inputs=patch_input,
mask_ratio=self.random_mask_ratio,
unmasked_channel_indices=self.unmasked_channel_indices,
channel_consistent_masking=self.channel_consistent_masking,
mask_value=self.mask_value,
)
elif self.mask_type == "forecast":
masked_input, mask = forecast_masking(
inputs=patch_input,
num_forecast_mask_patches=self.num_forecast_mask_patches,
unmasked_channel_indices=self.unmasked_channel_indices,
mask_value=self.mask_value,
)
else:
raise ValueError(f"Invalid mask type {self.mask_type}.")
# mask: [bs x num_input_channels x num_patch]
mask = mask.bool()
return masked_input, mask
# Copied from transformers.models.patchtst.modeling_patchtst.PatchTSTStdScaler with PatchTST->PatchTSMixer
| PatchTSMixerMasking |
python | numpy__numpy | numpy/lib/tests/test_recfunctions.py | {
"start": 23191,
"end": 25452
} | class ____:
# Test append_fields
def _create_arrays(self):
x = np.array([1, 2, ])
y = np.array([10, 20, 30])
z = np.array(
[('A', 1.), ('B', 2.)], dtype=[('A', '|S3'), ('B', float)])
w = np.array([(1, (2, 3.0)), (4, (5, 6.0))],
dtype=[('a', int), ('b', [('ba', float), ('bb', int)])])
return w, x, y, z
def test_append_single(self):
# Test simple case
x = self._create_arrays()[1]
test = append_fields(x, 'A', data=[10, 20, 30])
control = ma.array([(1, 10), (2, 20), (-1, 30)],
mask=[(0, 0), (0, 0), (1, 0)],
dtype=[('f0', int), ('A', int)],)
assert_equal(test, control)
def test_append_double(self):
# Test simple case
x = self._create_arrays()[1]
test = append_fields(x, ('A', 'B'), data=[[10, 20, 30], [100, 200]])
control = ma.array([(1, 10, 100), (2, 20, 200), (-1, 30, -1)],
mask=[(0, 0, 0), (0, 0, 0), (1, 0, 1)],
dtype=[('f0', int), ('A', int), ('B', int)],)
assert_equal(test, control)
def test_append_on_flex(self):
# Test append_fields on flexible type arrays
z = self._create_arrays()[-1]
test = append_fields(z, 'C', data=[10, 20, 30])
control = ma.array([('A', 1., 10), ('B', 2., 20), (-1, -1., 30)],
mask=[(0, 0, 0), (0, 0, 0), (1, 1, 0)],
dtype=[('A', '|S3'), ('B', float), ('C', int)],)
assert_equal(test, control)
def test_append_on_nested(self):
# Test append_fields on nested fields
w = self._create_arrays()[0]
test = append_fields(w, 'C', data=[10, 20, 30])
control = ma.array([(1, (2, 3.0), 10),
(4, (5, 6.0), 20),
(-1, (-1, -1.), 30)],
mask=[(
0, (0, 0), 0), (0, (0, 0), 0), (1, (1, 1), 0)],
dtype=[('a', int),
('b', [('ba', float), ('bb', int)]),
('C', int)],)
assert_equal(test, control)
| TestAppendFields |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/typedDict21.py | {
"start": 635,
"end": 683
} | class ____(TypedDict):
f1: str
f2: str
| TD5 |
python | huggingface__transformers | src/transformers/models/pegasus/modeling_pegasus.py | {
"start": 2534,
"end": 5248
} | class ____(nn.Embedding):
"""This module produces sinusoidal positional embeddings of any length."""
def __init__(self, num_positions: int, embedding_dim: int, padding_idx: Optional[int] = None) -> None:
super().__init__(num_positions, embedding_dim, _freeze=True)
def create_weight(self):
"""
Identical to the XLM create_sinusoidal_embeddings except features are not interleaved. The cos features are in
the 2nd half of the vector. [dim // 2:]
"""
n_pos, dim = self.weight.shape
position_enc = np.array(
[[pos / np.power(10000, 2 * (j // 2) / dim) for j in range(dim)] for pos in range(n_pos)]
)
out = torch.empty(n_pos, dim, dtype=self.weight.dtype, requires_grad=False)
sentinel = dim // 2 if dim % 2 == 0 else (dim // 2) + 1
out[:, 0:sentinel] = torch.FloatTensor(np.sin(position_enc[:, 0::2]))
out[:, sentinel:] = torch.FloatTensor(np.cos(position_enc[:, 1::2]))
return out
@torch.no_grad()
def forward(
self, input_ids_shape: torch.Size, past_key_values_length: int = 0, position_ids: Optional[torch.Tensor] = None
) -> torch.Tensor:
"""`input_ids_shape` is expected to be [bsz x seqlen]."""
if position_ids is None:
bsz, seq_len = input_ids_shape[:2]
position_ids = torch.arange(
past_key_values_length, past_key_values_length + seq_len, dtype=torch.long, device=self.weight.device
)
return super().forward(position_ids)
# Copied from transformers.models.bert.modeling_bert.eager_attention_forward
def eager_attention_forward(
module: nn.Module,
query: torch.Tensor,
key: torch.Tensor,
value: torch.Tensor,
attention_mask: Optional[torch.Tensor],
scaling: Optional[float] = None,
dropout: float = 0.0,
**kwargs: Unpack[TransformersKwargs],
):
if scaling is None:
scaling = query.size(-1) ** -0.5
# Take the dot product between "query" and "key" to get the raw attention scores.
attn_weights = torch.matmul(query, key.transpose(2, 3)) * scaling
if attention_mask is not None:
attention_mask = attention_mask[:, :, :, : key.shape[-2]]
attn_weights = attn_weights + attention_mask
attn_weights = nn.functional.softmax(attn_weights, dim=-1)
attn_weights = nn.functional.dropout(attn_weights, p=dropout, training=module.training)
attn_output = torch.matmul(attn_weights, value)
attn_output = attn_output.transpose(1, 2).contiguous()
return attn_output, attn_weights
# Copied from transformers.models.bart.modeling_bart.BartAttention with Bart->Pegasus
| PegasusSinusoidalPositionalEmbedding |
python | getsentry__sentry | tests/sentry/api/endpoints/test_api_application_details.py | {
"start": 771,
"end": 1352
} | class ____(APITestCase):
def test_simple(self) -> None:
app = ApiApplication.objects.create(owner=self.user, name="a")
self.login_as(self.user)
url = reverse("sentry-api-0-api-application-details", args=[app.client_id])
response = self.client.put(url, data={"name": "foobaz"})
assert response.status_code == 200, (response.status_code, response.content)
assert response.data["id"] == app.client_id
app = ApiApplication.objects.get(id=app.id)
assert app.name == "foobaz"
@control_silo_test
| ApiApplicationUpdateTest |
python | apache__airflow | airflow-core/src/airflow/cli/commands/config_command.py | {
"start": 3052,
"end": 42582
} | class ____:
"""
Class representing the configuration changes in Airflow 3.0.
:param config: The configuration parameter being changed.
:param default_change: If the change is a default value change.
:param old_default: The old default value (valid only if default_change is True).
:param new_default: The new default value for the configuration parameter.
:param suggestion: A suggestion for replacing or handling the removed configuration.
:param renamed_to: The new section and option if the configuration is renamed.
:param was_deprecated: If the config is removed, whether the old config was deprecated.
:param was_removed: If the config is removed.
:param is_invalid_if: If the current config value is invalid in the future.
:param breaking: Mark if this change is known to be breaking and causing errors/ warnings / deprecations.
:param remove_if_equals: For removal rules, remove the option only if its current value equals this value.
"""
config: ConfigParameter
default_change: bool = False
old_default: str | bool | int | float | None = None
new_default: str | bool | int | float | None = None
suggestion: str = ""
renamed_to: ConfigParameter | None = None
was_deprecated: bool = True
was_removed: bool = True
is_invalid_if: Any = None
breaking: bool = False
remove_if_equals: str | bool | int | float | None = None
@property
def message(self) -> str | None:
"""Generate a message for this configuration change."""
if self.default_change:
value = conf.get(self.config.section, self.config.option)
if value != self.new_default:
return (
f"Changed default value of `{self.config.option}` in `{self.config.section}` "
f"from `{self.old_default}` to `{self.new_default}`. "
f"You currently have `{value}` set. {self.suggestion}"
)
if self.renamed_to:
if self.config.section != self.renamed_to.section:
return (
f"`{self.config.option}` configuration parameter moved from `{self.config.section}` section to "
f"`{self.renamed_to.section}` section as `{self.renamed_to.option}`."
)
return (
f"`{self.config.option}` configuration parameter renamed to `{self.renamed_to.option}` "
f"in the `{self.config.section}` section."
)
if self.was_removed and not self.remove_if_equals:
return (
f"Removed{' deprecated' if self.was_deprecated else ''} `{self.config.option}` configuration parameter "
f"from `{self.config.section}` section. "
f"{self.suggestion}"
)
if self.is_invalid_if is not None:
value = conf.get(self.config.section, self.config.option)
if value == self.is_invalid_if:
return (
f"Invalid value `{self.is_invalid_if}` set for `{self.config.option}` configuration parameter "
f"in `{self.config.section}` section. {self.suggestion}"
)
return None
CONFIGS_CHANGES = [
# admin
ConfigChange(
config=ConfigParameter("admin", "hide_sensitive_variable_fields"),
renamed_to=ConfigParameter("core", "hide_sensitive_var_conn_fields"),
),
ConfigChange(
config=ConfigParameter("admin", "sensitive_variable_fields"),
renamed_to=ConfigParameter("core", "sensitive_var_conn_names"),
),
# core
ConfigChange(
config=ConfigParameter("core", "executor"),
default_change=True,
old_default="SequentialExecutor",
new_default="LocalExecutor",
was_removed=False,
),
ConfigChange(
config=ConfigParameter("core", "hostname"),
was_removed=True,
remove_if_equals=":",
),
ConfigChange(
config=ConfigParameter("core", "check_slas"),
suggestion="The SLA feature is removed in Airflow 3.0, to be replaced with Airflow Alerts in future",
),
ConfigChange(
config=ConfigParameter("core", "strict_dataset_uri_validation"),
suggestion="Dataset URI with a defined scheme will now always be validated strictly, "
"raising a hard error on validation failure.",
),
ConfigChange(
config=ConfigParameter("core", "dag_default_view"),
was_deprecated=False,
),
ConfigChange(
config=ConfigParameter("core", "dag_orientation"),
was_deprecated=False,
),
ConfigChange(
config=ConfigParameter("core", "dataset_manager_class"),
renamed_to=ConfigParameter("core", "asset_manager_class"),
),
ConfigChange(
config=ConfigParameter("core", "dataset_manager_kwargs"),
renamed_to=ConfigParameter("core", "asset_manager_kwargs"),
),
ConfigChange(
config=ConfigParameter("core", "worker_precheck"),
renamed_to=ConfigParameter("celery", "worker_precheck"),
),
ConfigChange(
config=ConfigParameter("core", "non_pooled_task_slot_count"),
renamed_to=ConfigParameter("core", "default_pool_task_slot_count"),
),
ConfigChange(
config=ConfigParameter("core", "dag_concurrency"),
renamed_to=ConfigParameter("core", "max_active_tasks_per_dag"),
),
ConfigChange(
config=ConfigParameter("core", "sql_alchemy_conn"),
renamed_to=ConfigParameter("database", "sql_alchemy_conn"),
),
ConfigChange(
config=ConfigParameter("core", "sql_engine_encoding"),
renamed_to=ConfigParameter("database", "sql_engine_encoding"),
),
ConfigChange(
config=ConfigParameter("core", "sql_engine_collation_for_ids"),
renamed_to=ConfigParameter("database", "sql_engine_collation_for_ids"),
),
ConfigChange(
config=ConfigParameter("core", "sql_alchemy_pool_enabled"),
renamed_to=ConfigParameter("database", "sql_alchemy_pool_enabled"),
),
ConfigChange(
config=ConfigParameter("core", "sql_alchemy_pool_size"),
renamed_to=ConfigParameter("database", "sql_alchemy_pool_size"),
),
ConfigChange(
config=ConfigParameter("core", "sql_alchemy_max_overflow"),
renamed_to=ConfigParameter("database", "sql_alchemy_max_overflow"),
),
ConfigChange(
config=ConfigParameter("core", "sql_alchemy_pool_recycle"),
renamed_to=ConfigParameter("database", "sql_alchemy_pool_recycle"),
),
ConfigChange(
config=ConfigParameter("core", "sql_alchemy_pool_pre_ping"),
renamed_to=ConfigParameter("database", "sql_alchemy_pool_pre_ping"),
),
ConfigChange(
config=ConfigParameter("core", "sql_alchemy_schema"),
renamed_to=ConfigParameter("database", "sql_alchemy_schema"),
),
ConfigChange(
config=ConfigParameter("core", "sql_alchemy_connect_args"),
renamed_to=ConfigParameter("database", "sql_alchemy_connect_args"),
),
ConfigChange(
config=ConfigParameter("core", "load_default_connections"),
renamed_to=ConfigParameter("database", "load_default_connections"),
),
ConfigChange(
config=ConfigParameter("core", "max_db_retries"),
renamed_to=ConfigParameter("database", "max_db_retries"),
),
ConfigChange(config=ConfigParameter("core", "task_runner")),
ConfigChange(config=ConfigParameter("core", "enable_xcom_pickling")),
ConfigChange(
config=ConfigParameter("core", "dag_file_processor_timeout"),
renamed_to=ConfigParameter("dag_processor", "dag_file_processor_timeout"),
),
ConfigChange(
config=ConfigParameter("core", "dag_processor_manager_log_location"),
),
ConfigChange(
config=ConfigParameter("core", "log_processor_filename_template"),
),
ConfigChange(
config=ConfigParameter("core", "parallelism"),
was_removed=False,
is_invalid_if="0",
suggestion="Please set the `parallelism` configuration parameter to a value greater than 0.",
),
# api
ConfigChange(
config=ConfigParameter("api", "access_control_allow_origin"),
renamed_to=ConfigParameter("api", "access_control_allow_origins"),
),
ConfigChange(
config=ConfigParameter("api", "auth_backend"),
renamed_to=ConfigParameter("fab", "auth_backends"),
),
ConfigChange(
config=ConfigParameter("api", "auth_backends"),
renamed_to=ConfigParameter("fab", "auth_backends"),
),
# logging
ConfigChange(
config=ConfigParameter("logging", "enable_task_context_logger"),
suggestion="Remove TaskContextLogger: Replaced by the Log table for better handling of task log "
"messages outside the execution context.",
),
ConfigChange(
config=ConfigParameter("logging", "dag_processor_manager_log_location"),
was_deprecated=False,
),
ConfigChange(
config=ConfigParameter("logging", "dag_processor_manager_log_stdout"),
was_deprecated=False,
),
ConfigChange(
config=ConfigParameter("logging", "log_processor_filename_template"),
was_deprecated=False,
),
ConfigChange(
config=ConfigParameter("logging", "log_filename_template"),
was_removed=True,
remove_if_equals="{{ ti.dag_id }}/{{ ti.task_id }}/{{ ts }}/{{ try_number }}.log",
breaking=True,
),
ConfigChange(
config=ConfigParameter("logging", "log_filename_template"),
was_removed=True,
remove_if_equals="dag_id={{ ti.dag_id }}/run_id={{ ti.run_id }}/task_id={{ ti.task_id }}/{% if ti.map_index >= 0 %}map_index={{ ti.map_index }}/{% endif %}attempt={{ try_number }}.log",
breaking=True,
),
# metrics
ConfigChange(
config=ConfigParameter("metrics", "metrics_use_pattern_match"),
),
ConfigChange(
config=ConfigParameter("metrics", "timer_unit_consistency"),
suggestion="In Airflow 3.0, the `timer_unit_consistency` setting in the `metrics` section is "
"removed as it is now the default behaviour. This is done to standardize all timer and "
"timing metrics to milliseconds across all metric loggers",
),
ConfigChange(
config=ConfigParameter("metrics", "statsd_allow_list"),
renamed_to=ConfigParameter("metrics", "metrics_allow_list"),
),
ConfigChange(
config=ConfigParameter("metrics", "statsd_block_list"),
renamed_to=ConfigParameter("metrics", "metrics_block_list"),
),
# traces
ConfigChange(
config=ConfigParameter("traces", "otel_task_log_event"),
),
# operators
ConfigChange(
config=ConfigParameter("operators", "allow_illegal_arguments"),
),
# webserver
ConfigChange(
config=ConfigParameter("webserver", "allow_raw_html_descriptions"),
),
ConfigChange(
config=ConfigParameter("webserver", "cookie_samesite"),
renamed_to=ConfigParameter("fab", "cookie_samesite"),
breaking=True,
),
ConfigChange(
config=ConfigParameter("webserver", "audit_view_included_events"),
),
ConfigChange(
config=ConfigParameter("webserver", "audit_view_excluded_events"),
),
ConfigChange(
config=ConfigParameter("webserver", "instance_name"),
renamed_to=ConfigParameter("api", "instance_name"),
),
ConfigChange(
config=ConfigParameter("webserver", "update_fab_perms"),
renamed_to=ConfigParameter("fab", "update_fab_perms"),
),
ConfigChange(
config=ConfigParameter("webserver", "auth_rate_limited"),
renamed_to=ConfigParameter("fab", "auth_rate_limited"),
),
ConfigChange(
config=ConfigParameter("webserver", option="auth_rate_limit"),
renamed_to=ConfigParameter("fab", "auth_rate_limit"),
),
ConfigChange(
config=ConfigParameter("webserver", "config_file"),
renamed_to=ConfigParameter("fab", "config_file"),
),
ConfigChange(
config=ConfigParameter("webserver", "session_backend"),
renamed_to=ConfigParameter("fab", "session_backend"),
),
ConfigChange(
config=ConfigParameter("webserver", "session_lifetime_days"),
renamed_to=ConfigParameter("fab", "session_lifetime_minutes"),
),
ConfigChange(
config=ConfigParameter("webserver", "force_log_out_after"),
renamed_to=ConfigParameter("fab", "session_lifetime_minutes"),
),
ConfigChange(
config=ConfigParameter("webserver", "session_lifetime_minutes"),
renamed_to=ConfigParameter("fab", "session_lifetime_minutes"),
),
ConfigChange(
config=ConfigParameter("webserver", "access_denied_message"),
renamed_to=ConfigParameter("fab", "access_denied_message"),
),
ConfigChange(
config=ConfigParameter("webserver", "expose_hostname"),
renamed_to=ConfigParameter("fab", "expose_hostname"),
),
ConfigChange(
config=ConfigParameter("webserver", "navbar_color"),
renamed_to=ConfigParameter("fab", "navbar_color"),
),
ConfigChange(
config=ConfigParameter("webserver", "navbar_text_color"),
renamed_to=ConfigParameter("fab", "navbar_text_color"),
),
ConfigChange(
config=ConfigParameter("webserver", "navbar_hover_color"),
renamed_to=ConfigParameter("fab", "navbar_hover_color"),
),
ConfigChange(
config=ConfigParameter("webserver", "navbar_text_hover_color"),
renamed_to=ConfigParameter("fab", "navbar_text_hover_color"),
),
ConfigChange(
config=ConfigParameter("webserver", "x_frame_enabled"),
was_deprecated=False,
),
ConfigChange(
config=ConfigParameter("webserver", "base_url"),
renamed_to=ConfigParameter("api", "base_url"),
),
ConfigChange(
config=ConfigParameter("webserver", "secret_key"),
renamed_to=ConfigParameter("api", "secret_key"),
),
ConfigChange(
config=ConfigParameter("webserver", "web_server_host"),
renamed_to=ConfigParameter("api", "host"),
),
ConfigChange(
config=ConfigParameter("webserver", "web_server_port"),
renamed_to=ConfigParameter("api", "port"),
),
ConfigChange(
config=ConfigParameter("webserver", "workers"),
renamed_to=ConfigParameter("api", "workers"),
),
ConfigChange(
config=ConfigParameter("webserver", "web_server_worker_timeout"),
renamed_to=ConfigParameter("api", "worker_timeout"),
),
ConfigChange(
config=ConfigParameter("webserver", "web_server_ssl_cert"),
renamed_to=ConfigParameter("api", "ssl_cert"),
),
ConfigChange(
config=ConfigParameter("webserver", "web_server_ssl_key"),
renamed_to=ConfigParameter("api", "ssl_key"),
),
ConfigChange(
config=ConfigParameter("webserver", "access_logfile"),
renamed_to=ConfigParameter("api", "access_logfile"),
),
ConfigChange(
config=ConfigParameter("webserver", "grid_view_sorting_order"),
renamed_to=ConfigParameter("api", "grid_view_sorting_order"),
),
ConfigChange(
config=ConfigParameter("webserver", "enable_swagger_ui"),
renamed_to=ConfigParameter("api", "enable_swagger_ui"),
),
ConfigChange(
config=ConfigParameter("webserver", "error_logfile"),
was_deprecated=False,
),
ConfigChange(
config=ConfigParameter("webserver", "access_logformat"),
was_deprecated=False,
),
ConfigChange(
config=ConfigParameter("webserver", "web_server_master_timeout"),
was_deprecated=False,
),
ConfigChange(
config=ConfigParameter("webserver", "worker_refresh_batch_size"),
was_deprecated=False,
),
ConfigChange(
config=ConfigParameter("webserver", "worker_refresh_interval"),
was_deprecated=False,
),
ConfigChange(
config=ConfigParameter("webserver", "reload_on_plugin_change"),
was_deprecated=False,
),
ConfigChange(
config=ConfigParameter("webserver", "worker_class"),
was_deprecated=False,
),
ConfigChange(
config=ConfigParameter("webserver", "expose_stacktrace"),
was_deprecated=False,
),
ConfigChange(
config=ConfigParameter("webserver", "log_fetch_delay_sec"),
was_deprecated=False,
),
ConfigChange(
config=ConfigParameter("webserver", "log_auto_tailing_offset"),
was_deprecated=False,
),
ConfigChange(
config=ConfigParameter("webserver", "log_animation_speed"),
was_deprecated=False,
),
ConfigChange(
config=ConfigParameter("webserver", "default_dag_run_display_number"),
was_deprecated=False,
),
ConfigChange(
config=ConfigParameter("webserver", "log_fetch_timeout_sec"),
renamed_to=ConfigParameter("api", "log_fetch_timeout_sec"),
),
ConfigChange(
config=ConfigParameter("webserver", "hide_paused_dags_by_default"),
renamed_to=ConfigParameter("api", "hide_paused_dags_by_default"),
),
ConfigChange(
config=ConfigParameter("webserver", "page_size"),
renamed_to=ConfigParameter("api", "page_size"),
),
ConfigChange(
config=ConfigParameter("webserver", "default_wrap"),
renamed_to=ConfigParameter("api", "default_wrap"),
),
ConfigChange(
config=ConfigParameter("webserver", "require_confirmation_dag_change"),
renamed_to=ConfigParameter("api", "require_confirmation_dag_change"),
),
ConfigChange(
config=ConfigParameter("webserver", "instance_name_has_markup"),
was_deprecated=False,
),
ConfigChange(
config=ConfigParameter("webserver", "warn_deployment_exposure"),
was_deprecated=False,
),
ConfigChange(
config=ConfigParameter("webserver", "auto_refresh_interval"),
renamed_to=ConfigParameter("api", "auto_refresh_interval"),
),
ConfigChange(
config=ConfigParameter("webserver", "enable_proxy_fix"),
renamed_to=ConfigParameter("fab", "enable_proxy_fix"),
),
ConfigChange(
config=ConfigParameter("webserver", "proxy_fix_x_for"),
renamed_to=ConfigParameter("fab", "proxy_fix_x_for"),
),
ConfigChange(
config=ConfigParameter("webserver", "proxy_fix_x_proto"),
renamed_to=ConfigParameter("fab", "proxy_fix_x_proto"),
),
ConfigChange(
config=ConfigParameter("webserver", "proxy_fix_x_host"),
renamed_to=ConfigParameter("fab", "proxy_fix_x_host"),
),
ConfigChange(
config=ConfigParameter("webserver", "proxy_fix_x_port"),
renamed_to=ConfigParameter("fab", "proxy_fix_x_port"),
),
ConfigChange(
config=ConfigParameter("webserver", "proxy_fix_x_prefix"),
renamed_to=ConfigParameter("fab", "proxy_fix_x_prefix"),
),
ConfigChange(
config=ConfigParameter("webserver", "expose_config"),
renamed_to=ConfigParameter("api", "expose_config"),
),
ConfigChange(
config=ConfigParameter("webserver", "cookie_secure"),
renamed_to=ConfigParameter("fab", "cookie_secure"),
breaking=True,
was_deprecated=False,
),
ConfigChange(
config=ConfigParameter("webserver", "analytics_tool"),
was_deprecated=False,
),
ConfigChange(
config=ConfigParameter("webserver", "analytics_id"),
was_deprecated=False,
),
ConfigChange(
config=ConfigParameter("webserver", "analytics_url"),
was_deprecated=False,
),
ConfigChange(
config=ConfigParameter("webserver", "show_recent_stats_for_completed_runs"),
was_deprecated=False,
),
ConfigChange(
config=ConfigParameter("webserver", "run_internal_api"),
was_deprecated=False,
),
ConfigChange(
config=ConfigParameter("webserver", "caching_hash_method"),
was_deprecated=False,
),
ConfigChange(
config=ConfigParameter("webserver", "show_trigger_form_if_no_params"),
was_deprecated=False,
),
ConfigChange(
config=ConfigParameter("webserver", "num_recent_configurations_for_trigger"),
was_deprecated=False,
),
ConfigChange(
config=ConfigParameter("webserver", "allowed_payload_size"),
was_deprecated=False,
),
ConfigChange(
config=ConfigParameter("webserver", "max_form_memory_size"),
was_deprecated=False,
),
ConfigChange(
config=ConfigParameter("webserver", "max_form_parts"),
was_deprecated=False,
),
ConfigChange(
config=ConfigParameter("webserver", "default_ui_timezone"),
was_deprecated=False,
),
# policy
ConfigChange(
config=ConfigParameter("policy", "airflow_local_settings"),
renamed_to=ConfigParameter("policy", "task_policy"),
),
ConfigChange(
config=ConfigParameter("webserver", "navbar_logo_text_color"),
was_deprecated=False,
),
# scheduler
ConfigChange(
config=ConfigParameter("scheduler", "dependency_detector"),
),
ConfigChange(
config=ConfigParameter("scheduler", "allow_trigger_in_future"),
),
ConfigChange(
config=ConfigParameter("scheduler", "dag_stale_not_seen_duration"),
),
ConfigChange(
config=ConfigParameter("scheduler", "catchup_by_default"),
default_change=True,
old_default="True",
was_removed=False,
new_default="False",
suggestion="In Airflow 3.0 the default value for `catchup_by_default` is set to `False`. "
"This means that DAGs without explicit definition of the `catchup` parameter will not "
"catchup by default. "
"If your DAGs rely on catchup behavior, not explicitly defined in the DAG definition, "
"set this configuration parameter to `True` in the `scheduler` section of your `airflow.cfg` "
"to enable the behavior from Airflow 2.x.",
),
ConfigChange(
config=ConfigParameter("scheduler", "create_cron_data_intervals"),
default_change=True,
old_default="True",
new_default="False",
was_removed=False,
),
ConfigChange(
config=ConfigParameter("scheduler", "create_delta_data_intervals"),
default_change=True,
old_default="True",
new_default="False",
was_removed=False,
),
ConfigChange(
config=ConfigParameter("scheduler", "processor_poll_interval"),
renamed_to=ConfigParameter("scheduler", "scheduler_idle_sleep_time"),
),
ConfigChange(
config=ConfigParameter("scheduler", "deactivate_stale_dags_interval"),
renamed_to=ConfigParameter("scheduler", "parsing_cleanup_interval"),
),
ConfigChange(
config=ConfigParameter("scheduler", "statsd_on"), renamed_to=ConfigParameter("metrics", "statsd_on")
),
ConfigChange(
config=ConfigParameter("scheduler", "max_threads"),
renamed_to=ConfigParameter("dag_processor", "parsing_processes"),
),
ConfigChange(
config=ConfigParameter("scheduler", "statsd_host"),
renamed_to=ConfigParameter("metrics", "statsd_host"),
),
ConfigChange(
config=ConfigParameter("scheduler", "statsd_port"),
renamed_to=ConfigParameter("metrics", "statsd_port"),
),
ConfigChange(
config=ConfigParameter("scheduler", "statsd_prefix"),
renamed_to=ConfigParameter("metrics", "statsd_prefix"),
),
ConfigChange(
config=ConfigParameter("scheduler", "statsd_allow_list"),
renamed_to=ConfigParameter("metrics", "statsd_allow_list"),
),
ConfigChange(
config=ConfigParameter("scheduler", "stat_name_handler"),
renamed_to=ConfigParameter("metrics", "stat_name_handler"),
),
ConfigChange(
config=ConfigParameter("scheduler", "statsd_datadog_enabled"),
renamed_to=ConfigParameter("metrics", "statsd_datadog_enabled"),
),
ConfigChange(
config=ConfigParameter("scheduler", "statsd_datadog_tags"),
renamed_to=ConfigParameter("metrics", "statsd_datadog_tags"),
),
ConfigChange(
config=ConfigParameter("scheduler", "statsd_datadog_metrics_tags"),
renamed_to=ConfigParameter("metrics", "statsd_datadog_metrics_tags"),
),
ConfigChange(
config=ConfigParameter("scheduler", "statsd_custom_client_path"),
renamed_to=ConfigParameter("metrics", "statsd_custom_client_path"),
),
ConfigChange(
config=ConfigParameter("scheduler", "parsing_processes"),
renamed_to=ConfigParameter("dag_processor", "parsing_processes"),
),
ConfigChange(
config=ConfigParameter("scheduler", "file_parsing_sort_mode"),
renamed_to=ConfigParameter("dag_processor", "file_parsing_sort_mode"),
),
ConfigChange(
config=ConfigParameter("scheduler", "max_callbacks_per_loop"),
renamed_to=ConfigParameter("dag_processor", "max_callbacks_per_loop"),
),
ConfigChange(
config=ConfigParameter("scheduler", "min_file_process_interval"),
renamed_to=ConfigParameter("dag_processor", "min_file_process_interval"),
),
ConfigChange(
config=ConfigParameter("scheduler", "stale_dag_threshold"),
renamed_to=ConfigParameter("dag_processor", "stale_dag_threshold"),
),
ConfigChange(
config=ConfigParameter("scheduler", "print_stats_interval"),
renamed_to=ConfigParameter("dag_processor", "print_stats_interval"),
),
ConfigChange(
config=ConfigParameter("scheduler", "dag_dir_list_interval"),
renamed_to=ConfigParameter("dag_processor", "refresh_interval"),
),
ConfigChange(
config=ConfigParameter("scheduler", "local_task_job_heartbeat_sec"),
renamed_to=ConfigParameter("scheduler", "task_instance_heartbeat_sec"),
),
ConfigChange(
config=ConfigParameter("scheduler", "scheduler_zombie_task_threshold"),
renamed_to=ConfigParameter("scheduler", "task_instance_heartbeat_timeout"),
),
ConfigChange(
config=ConfigParameter("scheduler", "zombie_detection_interval"),
renamed_to=ConfigParameter("scheduler", "task_instance_heartbeat_timeout_detection_interval"),
),
ConfigChange(
config=ConfigParameter("scheduler", "child_process_log_directory"),
renamed_to=ConfigParameter("logging", "dag_processor_child_process_log_directory"),
),
# celery
ConfigChange(
config=ConfigParameter("celery", "stalled_task_timeout"),
renamed_to=ConfigParameter("scheduler", "task_queued_timeout"),
),
ConfigChange(
config=ConfigParameter("celery", "default_queue"),
renamed_to=ConfigParameter("operators", "default_queue"),
),
ConfigChange(
config=ConfigParameter("celery", "task_adoption_timeout"),
renamed_to=ConfigParameter("scheduler", "task_queued_timeout"),
),
# kubernetes_executor
ConfigChange(
config=ConfigParameter("kubernetes_executor", "worker_pods_pending_timeout"),
renamed_to=ConfigParameter("scheduler", "task_queued_timeout"),
),
ConfigChange(
config=ConfigParameter("kubernetes_executor", "worker_pods_pending_timeout_check_interval"),
renamed_to=ConfigParameter("scheduler", "task_queued_timeout_check_interval"),
),
# smtp
ConfigChange(
config=ConfigParameter("smtp", "smtp_user"),
suggestion="Please use the SMTP connection (`smtp_default`).",
),
ConfigChange(
config=ConfigParameter("smtp", "smtp_password"),
suggestion="Please use the SMTP connection (`smtp_default`).",
),
# database
ConfigChange(
config=ConfigParameter("database", "load_default_connections"),
),
# triggerer
ConfigChange(
config=ConfigParameter("triggerer", "default_capacity"),
renamed_to=ConfigParameter("triggerer", "capacity"),
),
# email
ConfigChange(
config=ConfigParameter("email", "email_backend"),
was_removed=True,
remove_if_equals="airflow.contrib.utils.sendgrid.send_email",
),
# elasticsearch
ConfigChange(
config=ConfigParameter("elasticsearch", "log_id_template"),
was_removed=True,
remove_if_equals="{dag_id}-{task_id}-{logical_date}-{try_number}",
breaking=True,
),
]
@providers_configuration_loaded
def lint_config(args) -> None:
"""
Lint the airflow.cfg file for removed, or renamed configurations.
This function scans the Airflow configuration file for parameters that are removed or renamed in
Airflow 3.0. It provides suggestions for alternative parameters or settings where applicable.
CLI Arguments:
--section: str (optional)
The specific section of the configuration to lint.
Example: --section core
--option: str (optional)
The specific option within a section to lint.
Example: --option check_slas
--ignore-section: str (optional)
A section to ignore during linting.
Example: --ignore-section webserver
--ignore-option: str (optional)
An option to ignore during linting.
Example: --ignore-option smtp_user
--verbose: flag (optional)
Enables detailed output, including the list of ignored sections and options.
Example: --verbose
Examples:
1. Lint all sections and options:
airflow config lint
2. Lint a specific section:
airflow config lint --section core,webserver
3. Lint specific sections and options:
airflow config lint --section smtp --option smtp_user
4. Ignore a section:
airflow config lint --ignore-section webserver,api
5. Ignore an options:
airflow config lint --ignore-option smtp_user,session_lifetime_days
6. Enable verbose output:
airflow config lint --verbose
:param args: The CLI arguments for linting configurations.
"""
console = AirflowConsole()
lint_issues = []
section_to_check_if_provided = args.section or []
option_to_check_if_provided = args.option or []
ignore_sections = args.ignore_section or []
ignore_options = args.ignore_option or []
for configuration in CONFIGS_CHANGES:
if section_to_check_if_provided and configuration.config.section not in section_to_check_if_provided:
continue
if option_to_check_if_provided and configuration.config.option not in option_to_check_if_provided:
continue
if configuration.config.section in ignore_sections or configuration.config.option in ignore_options:
continue
if conf.has_option(
configuration.config.section, configuration.config.option, lookup_from_deprecated=False
):
if configuration.message is not None:
lint_issues.append(configuration.message)
if lint_issues:
console.print("[red]Found issues in your airflow.cfg:[/red]")
for issue in lint_issues:
console.print(f" - [yellow]{issue}[/yellow]")
if args.verbose:
console.print("\n[blue]Detailed Information:[/blue]")
console.print(f"Ignored sections: [green]{', '.join(ignore_sections)}[/green]")
console.print(f"Ignored options: [green]{', '.join(ignore_options)}[/green]")
console.print("\n[red]Please update your configuration file accordingly.[/red]")
else:
console.print("[green]No issues found in your airflow.cfg. It is ready for Airflow 3![/green]")
@providers_configuration_loaded
def update_config(args) -> None:
"""
Update the airflow.cfg file to migrate configuration changes from Airflow 2.x to Airflow 3.
By default, this command will perform a dry-run (showing the changes only) and list only
the breaking configuration changes by scanning the current configuration file for parameters that have
been renamed, removed, or had their default values changed in Airflow 3.0. To see or fix all recommended
changes, use the --all-recommendations argument. To automatically update your airflow.cfg file, use
the --fix argument. This command cleans up the existing comments in airflow.cfg but creates a backup of
the old airflow.cfg file.
CLI Arguments:
--fix: flag (optional)
Automatically fix/apply the breaking changes (or all changes if --all-recommendations is also
specified)
Example: --fix
--all-recommendations: flag (optional)
Include non-breaking (recommended) changes as well as breaking ones.
Can be used with --fix.
Example: --all-recommendations
--section: str (optional)
Comma-separated list of configuration sections to update.
Example: --section core,database
--option: str (optional)
Comma-separated list of configuration options to update.
Example: --option sql_alchemy_conn,dag_concurrency
--ignore-section: str (optional)
Comma-separated list of configuration sections to ignore during update.
Example: --ignore-section webserver
--ignore-option: str (optional)
Comma-separated list of configuration options to ignore during update.
Example: --ignore-option check_slas
Examples:
1. Dry-run mode (print the changes in modified airflow.cfg) showing only breaking changes:
airflow config update
2. Dry-run mode showing all recommendations:
airflow config update --all-recommendations
3. Apply (fix) only breaking changes:
airflow config update --fix
4. Apply (fix) all recommended changes:
airflow config update --fix --all-recommendations
5. Show changes only the specific sections:
airflow config update --section core,database
6.Show changes only the specific options:
airflow config update --option sql_alchemy_conn,dag_concurrency
7. Ignores the specific section:
airflow config update --ignore-section webserver
:param args: The CLI arguments for updating configuration.
"""
console = AirflowConsole()
changes_applied: list[str] = []
modifications = ConfigModifications()
include_all = args.all_recommendations if args.all_recommendations else False
apply_fix = args.fix if args.fix else False
dry_run = not apply_fix
update_sections = args.section if args.section else None
update_options = args.option if args.option else None
ignore_sections = args.ignore_section if args.ignore_section else []
ignore_options = args.ignore_option if args.ignore_option else []
config_dict = conf.as_dict(
display_source=True,
include_env=False,
include_cmds=False,
include_secret=True,
display_sensitive=True,
)
for change in CONFIGS_CHANGES:
if not include_all and not change.breaking:
continue
conf_section = change.config.section.lower()
conf_option = change.config.option.lower()
full_key = f"{conf_section}.{conf_option}"
if update_sections is not None and conf_section not in [s.lower() for s in update_sections]:
continue
if update_options is not None and full_key not in [opt.lower() for opt in update_options]:
continue
if conf_section in [s.lower() for s in ignore_sections] or full_key in [
opt.lower() for opt in ignore_options
]:
continue
if conf_section not in config_dict or conf_option not in config_dict[conf_section]:
continue
value_data = config_dict[conf_section][conf_option]
if not (isinstance(value_data, tuple) and value_data[1] == "airflow.cfg"):
continue
current_value = value_data[0]
prefix = "[[red]BREAKING[/red]]" if change.breaking else "[[yellow]Recommended[/yellow]]"
if change.default_change:
if str(current_value) != str(change.new_default):
modifications.add_default_update(conf_section, conf_option, str(change.new_default))
changes_applied.append(
f"{prefix} Updated default value of '{conf_section}/{conf_option}' from "
f"'{current_value}' to '{change.new_default}'."
)
if change.renamed_to:
modifications.add_rename(
conf_section, conf_option, change.renamed_to.section, change.renamed_to.option
)
changes_applied.append(
f"{prefix} Renamed '{conf_section}/{conf_option}' to "
f"'{change.renamed_to.section.lower()}/{change.renamed_to.option.lower()}'."
)
elif change.was_removed:
if change.remove_if_equals is not None:
if str(current_value) == str(change.remove_if_equals):
modifications.add_remove(conf_section, conf_option)
changes_applied.append(
f"{prefix} Removed '{conf_section}/{conf_option}' from configuration."
)
else:
modifications.add_remove(conf_section, conf_option)
changes_applied.append(f"{prefix} Removed '{conf_section}/{conf_option}' from configuration.")
backup_path = f"{AIRFLOW_CONFIG}.bak"
try:
shutil.copy2(AIRFLOW_CONFIG, backup_path)
console.print(f"Backup saved as '{backup_path}'.")
except Exception as e:
console.print(f"Failed to create backup: {e}")
raise AirflowConfigException("Backup creation failed. Aborting update_config operation.")
if dry_run:
console.print("[blue]Dry-run mode enabled. No changes will be written to airflow.cfg.[/blue]")
with StringIO() as config_output:
conf.write_custom_config(
file=config_output,
comment_out_defaults=True,
include_descriptions=True,
modifications=modifications,
)
new_config = config_output.getvalue()
console.print(new_config)
else:
with open(AIRFLOW_CONFIG, "w") as config_file:
conf.write_custom_config(
file=config_file,
comment_out_defaults=True,
include_descriptions=True,
modifications=modifications,
)
if changes_applied:
console.print("[green]The following are the changes in airflow config:[/green]")
for change_msg in changes_applied:
console.print(f" - {change_msg}")
if dry_run:
console.print(
"[blue]Dry-run mode is enabled. To apply above airflow.cfg run the command "
"with `--fix`.[/blue]"
)
else:
console.print("[green]No updates needed. Your configuration is already up-to-date.[/green]")
if args.verbose:
console.print("[blue]Configuration update completed with verbose output enabled.[/blue]")
| ConfigChange |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.