language stringclasses 1 value | repo stringclasses 346 values | path stringlengths 6 201 | class_span dict | source stringlengths 21 2.38M | target stringlengths 1 96 |
|---|---|---|---|---|---|
python | apache__airflow | providers/google/src/airflow/providers/google/cloud/operators/bigquery_dts.py | {
"start": 9561,
"end": 23165
} | class ____(GoogleCloudBaseOperator):
"""
Start manual transfer runs to be executed now with schedule_time equal to current time.
The transfer runs can be created for a time range where the run_time is between
start_time (inclusive) and end_time (exclusive), or for a specific run_time.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:BigQueryDataTransferServiceStartTransferRunsOperator`
:param transfer_config_id: Id of transfer config to be used.
:param requested_time_range: Time range for the transfer runs that should be started.
If a dict is provided, it must be of the same form as the protobuf
message `~google.cloud.bigquery_datatransfer_v1.types.TimeRange`
:param requested_run_time: Specific run_time for a transfer run to be started. The
requested_run_time must not be in the future. If a dict is provided, it
must be of the same form as the protobuf message
`~google.cloud.bigquery_datatransfer_v1.types.Timestamp`
:param project_id: The BigQuery project id where the transfer configuration should be
created.
:param location: BigQuery Transfer Service location for regional transfers.
:param retry: A retry object used to retry requests. If `None` is
specified, requests will not be retried.
:param timeout: The amount of time, in seconds, to wait for the request to
complete. Note that if retry is specified, the timeout applies to each individual
attempt.
:param metadata: Additional metadata that is provided to the method.
:param gcp_conn_id: The connection ID used to connect to Google Cloud.
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
:param deferrable: Run operator in the deferrable mode.
"""
template_fields: Sequence[str] = (
"transfer_config_id",
"project_id",
"requested_time_range",
"requested_run_time",
"gcp_conn_id",
"impersonation_chain",
)
operator_extra_links = (BigQueryDataTransferConfigLink(),)
def __init__(
self,
*,
transfer_config_id: str,
project_id: str = PROVIDE_PROJECT_ID,
location: str | None = None,
requested_time_range: dict | None = None,
requested_run_time: dict | None = None,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
gcp_conn_id="google_cloud_default",
impersonation_chain: str | Sequence[str] | None = None,
deferrable: bool = conf.getboolean("operators", "default_deferrable", fallback=False),
**kwargs,
) -> None:
super().__init__(**kwargs)
self.project_id = project_id
self.location = location
self.transfer_config_id = transfer_config_id
self.requested_time_range = requested_time_range
self.requested_run_time = requested_run_time
self.retry = retry
self.timeout = timeout
self.metadata = metadata
self.gcp_conn_id = gcp_conn_id
self.impersonation_chain = impersonation_chain
self.deferrable = deferrable
self._transfer_run: dict = {}
@cached_property
def hook(self) -> BiqQueryDataTransferServiceHook:
hook = BiqQueryDataTransferServiceHook(
gcp_conn_id=self.gcp_conn_id,
impersonation_chain=self.impersonation_chain,
location=self.location,
)
return hook
def execute(self, context: Context):
self.log.info("Submitting manual transfer for %s", self.transfer_config_id)
if self.requested_run_time and isinstance(self.requested_run_time.get("seconds"), str):
self.requested_run_time["seconds"] = int(self.requested_run_time["seconds"])
response = self.hook.start_manual_transfer_runs(
transfer_config_id=self.transfer_config_id,
requested_time_range=self.requested_time_range,
requested_run_time=self.requested_run_time,
project_id=self.project_id,
retry=self.retry,
timeout=self.timeout,
metadata=self.metadata,
)
transfer_config = _get_transfer_config_details(response.runs[0].name)
BigQueryDataTransferConfigLink.persist(
context=context,
region=transfer_config["region"],
config_id=transfer_config["config_id"],
project_id=transfer_config["project_id"],
)
result = StartManualTransferRunsResponse.to_dict(response)
run_id = get_object_id(result["runs"][0])
context["ti"].xcom_push(key="run_id", value=run_id)
if not self.deferrable:
# Save as attribute for further use by OpenLineage
self._transfer_run = self._wait_for_transfer_to_be_done(
run_id=run_id,
transfer_config_id=transfer_config["config_id"],
)
self.log.info("Transfer run %s submitted successfully.", run_id)
return self._transfer_run
self.defer(
trigger=BigQueryDataTransferRunTrigger(
project_id=self.project_id,
config_id=transfer_config["config_id"],
run_id=run_id,
gcp_conn_id=self.gcp_conn_id,
location=self.location,
impersonation_chain=self.impersonation_chain,
),
method_name="execute_completed",
)
def _wait_for_transfer_to_be_done(self, run_id: str, transfer_config_id: str, interval: int = 10):
if interval <= 0:
raise ValueError("Interval must be > 0")
while True:
transfer_run: TransferRun = self.hook.get_transfer_run(
run_id=run_id,
transfer_config_id=transfer_config_id,
project_id=self.project_id,
retry=self.retry,
timeout=self.timeout,
metadata=self.metadata,
)
state = transfer_run.state
if self._job_is_done(state):
if state in (TransferState.FAILED, TransferState.CANCELLED):
raise AirflowException(f"Transfer run was finished with {state} status.")
result = TransferRun.to_dict(transfer_run)
return result
self.log.info("Transfer run is still working, waiting for %s seconds...", interval)
self.log.info("Transfer run status: %s", state)
time.sleep(interval)
@staticmethod
def _job_is_done(state: TransferState) -> bool:
finished_job_statuses = [
state.SUCCEEDED,
state.CANCELLED,
state.FAILED,
]
return state in finished_job_statuses
def execute_completed(self, context: Context, event: dict):
"""Execute after invoked trigger in defer method finishes its job."""
if event["status"] in ("failed", "cancelled"):
self.log.error("Trigger finished its work with status: %s.", event["status"])
raise AirflowException(event["message"])
transfer_run: TransferRun = self.hook.get_transfer_run(
project_id=self.project_id,
run_id=event["run_id"],
transfer_config_id=event["config_id"],
)
self.log.info(
"%s finished with message: %s",
event["run_id"],
event["message"],
)
# Save as attribute for further use by OpenLineage
self._transfer_run = TransferRun.to_dict(transfer_run)
return self._transfer_run
def get_openlineage_facets_on_complete(self, _):
"""Implement _on_complete as we need a run config to extract information."""
from urllib.parse import urlsplit
from airflow.providers.common.compat.openlineage.facet import Dataset, ErrorMessageRunFacet
from airflow.providers.google.cloud.hooks.gcs import _parse_gcs_url
from airflow.providers.google.cloud.openlineage.utils import (
BIGQUERY_NAMESPACE,
extract_ds_name_from_gcs_path,
)
from airflow.providers.openlineage.extractors import OperatorLineage
from airflow.providers.openlineage.sqlparser import DatabaseInfo, SQLParser
if not self._transfer_run:
self.log.debug("No BigQuery Data Transfer configuration was found by OpenLineage.")
return OperatorLineage()
data_source_id = self._transfer_run["data_source_id"]
dest_dataset_id = self._transfer_run["destination_dataset_id"]
params = self._transfer_run["params"]
input_datasets, output_datasets = [], []
run_facets, job_facets = {}, {}
if data_source_id in ("google_cloud_storage", "amazon_s3", "azure_blob_storage"):
if data_source_id == "google_cloud_storage":
bucket, path = _parse_gcs_url(params["data_path_template"]) # gs://bucket...
namespace = f"gs://{bucket}"
name = extract_ds_name_from_gcs_path(path)
elif data_source_id == "amazon_s3":
parsed_url = urlsplit(params["data_path"]) # s3://bucket...
namespace = f"s3://{parsed_url.netloc}"
name = extract_ds_name_from_gcs_path(parsed_url.path)
else: # azure_blob_storage
storage_account = params["storage_account"]
container = params["container"]
namespace = f"abfss://{container}@{storage_account}.dfs.core.windows.net"
name = extract_ds_name_from_gcs_path(params["data_path"])
input_datasets.append(Dataset(namespace=namespace, name=name))
dest_table_name = params["destination_table_name_template"]
output_datasets.append(
Dataset(
namespace=BIGQUERY_NAMESPACE,
name=f"{self.project_id}.{dest_dataset_id}.{dest_table_name}",
)
)
elif data_source_id in ("postgresql", "oracle", "mysql"):
scheme = data_source_id if data_source_id != "postgresql" else "postgres"
host = params["connector.endpoint.host"]
port = params["connector.endpoint.port"]
for asset in params["assets"]:
# MySQL: db/table; Other: db/schema/table;
table_name = asset.split("/")[-1]
input_datasets.append(
Dataset(namespace=f"{scheme}://{host}:{int(port)}", name=asset.replace("/", "."))
)
output_datasets.append(
Dataset(
namespace=BIGQUERY_NAMESPACE, name=f"{self.project_id}.{dest_dataset_id}.{table_name}"
)
)
elif data_source_id == "scheduled_query":
bq_db_info = DatabaseInfo(
scheme="bigquery",
authority=None,
database=self.project_id,
)
parser_result = SQLParser("bigquery").generate_openlineage_metadata_from_sql(
sql=params["query"],
database_info=bq_db_info,
database=self.project_id,
use_connection=False,
hook=None, # Hook is not used when use_connection=False
sqlalchemy_engine=None,
)
if parser_result.inputs:
input_datasets.extend(parser_result.inputs)
if parser_result.outputs:
output_datasets.extend(parser_result.outputs)
if parser_result.job_facets:
job_facets = {**job_facets, **parser_result.job_facets}
if parser_result.run_facets:
run_facets = {**run_facets, **parser_result.run_facets}
dest_table_name = params.get("destination_table_name_template")
if dest_table_name:
output_datasets.append(
Dataset(
namespace=BIGQUERY_NAMESPACE,
name=f"{self.project_id}.{dest_dataset_id}.{dest_table_name}",
)
)
else:
self.log.debug(
"BigQuery Data Transfer data_source_id `%s` is not supported by OpenLineage.", data_source_id
)
return OperatorLineage()
error_status = self._transfer_run.get("error_status")
if error_status and str(error_status["code"]) != "0":
run_facets["errorMessage"] = ErrorMessageRunFacet(
message=error_status["message"],
programmingLanguage="python",
stackTrace=str(error_status["details"]),
)
return OperatorLineage(
inputs=input_datasets, outputs=output_datasets, job_facets=job_facets, run_facets=run_facets
)
| BigQueryDataTransferServiceStartTransferRunsOperator |
python | apache__airflow | providers/google/src/airflow/providers/google/cloud/operators/dataplex.py | {
"start": 46999,
"end": 55291
} | class ____(GoogleCloudBaseOperator):
"""
Gets a Data Scan Job resource.
:param project_id: Required. The ID of the Google Cloud project that the lake belongs to.
:param region: Required. The ID of the Google Cloud region that the lake belongs to.
:param data_scan_id: Required. Data Quality scan identifier.
:param job_id: Optional. Data Quality scan job identifier.
:param api_version: The version of the api that will be requested for example 'v1'.
:param retry: A retry object used to retry requests. If `None` is specified, requests
will not be retried.
:param timeout: The amount of time, in seconds, to wait for the request to complete. Note that if
``retry`` is specified, the timeout applies to each individual attempt.
:param metadata: Additional metadata that is provided to the method.
:param gcp_conn_id: The connection ID to use when fetching connection info.
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
:param fail_on_dq_failure: If set to true and not all Data Quality scan rules have been passed,
an exception is thrown. If set to false and not all Data Quality scan rules have been passed,
execution will finish with success.
:param wait_for_results: Flag indicating whether to wait for the result of a job execution
or to return the job in its current state.
:param result_timeout: Value in seconds for which operator will wait for the Data Quality scan result
when the flag `wait_for_results = True`.
Throws exception if there is no result found after specified amount of seconds.
:param polling_interval_seconds: time in seconds between polling for job completion.
The value is considered only when running in deferrable mode. Must be greater than 0.
:param deferrable: Run operator in the deferrable mode.
:return: Dict representing DataScanJob.
When the job completes with a successful status, information about the Data Quality result
is available.
"""
template_fields = ("project_id", "data_scan_id", "impersonation_chain", "job_id")
def __init__(
self,
project_id: str,
region: str,
data_scan_id: str,
job_id: str | None = None,
api_version: str = "v1",
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
gcp_conn_id: str = "google_cloud_default",
impersonation_chain: str | Sequence[str] | None = None,
fail_on_dq_failure: bool = False,
wait_for_results: bool = True,
result_timeout: float = 60.0 * 10,
deferrable: bool = conf.getboolean("operators", "default_deferrable", fallback=False),
polling_interval_seconds: int = 10,
*args,
**kwargs,
) -> None:
super().__init__(*args, **kwargs)
self.project_id = project_id
self.region = region
self.data_scan_id = data_scan_id
self.job_id = job_id
self.api_version = api_version
self.retry = retry
self.timeout = timeout
self.metadata = metadata
self.gcp_conn_id = gcp_conn_id
self.impersonation_chain = impersonation_chain
self.fail_on_dq_failure = fail_on_dq_failure
self.wait_for_results = wait_for_results
self.result_timeout = result_timeout
self.deferrable = deferrable
self.polling_interval_seconds = polling_interval_seconds
def execute(self, context: Context) -> dict:
hook = DataplexHook(
gcp_conn_id=self.gcp_conn_id,
api_version=self.api_version,
impersonation_chain=self.impersonation_chain,
)
# fetch the last job
if not self.job_id:
jobs = hook.list_data_scan_jobs(
project_id=self.project_id,
region=self.region,
data_scan_id=self.data_scan_id,
retry=self.retry,
timeout=self.timeout,
metadata=self.metadata,
)
job_ids = [DataScanJob.to_dict(job) for job in jobs]
if not job_ids:
raise AirflowException("There are no jobs, you should create one before.")
job_id = job_ids[0]["name"]
self.job_id = job_id.split("/")[-1]
if self.wait_for_results:
if self.deferrable:
self.defer(
trigger=DataplexDataQualityJobTrigger(
job_id=self.job_id,
data_scan_id=self.data_scan_id,
project_id=self.project_id,
region=self.region,
gcp_conn_id=self.gcp_conn_id,
impersonation_chain=self.impersonation_chain,
polling_interval_seconds=self.polling_interval_seconds,
),
method_name="execute_complete",
)
else:
job = hook.wait_for_data_scan_job(
job_id=self.job_id,
data_scan_id=self.data_scan_id,
project_id=self.project_id,
region=self.region,
result_timeout=self.result_timeout,
)
else:
job = hook.get_data_scan_job(
project_id=self.project_id,
region=self.region,
job_id=self.job_id,
data_scan_id=self.data_scan_id,
retry=self.retry,
timeout=self.timeout,
metadata=self.metadata,
)
if job.state == DataScanJob.State.SUCCEEDED:
if not job.data_quality_result.passed:
if self.fail_on_dq_failure:
raise AirflowDataQualityScanException(
f"Data Quality job {self.job_id} execution failed due to failure of its scanning "
f"rules: {self.data_scan_id}"
)
else:
self.log.info("Data Quality job executed successfully")
else:
self.log.info("Data Quality job execution returned status: %s", job.state)
result = DataScanJob.to_dict(job)
result["state"] = DataScanJob.State(result["state"]).name
return result
def execute_complete(self, context, event=None) -> None:
"""
Act as a callback for when the trigger fires - returns immediately.
Relies on trigger to throw an exception, otherwise it assumes execution was
successful.
"""
job_state = event["job_state"]
job_id = event["job_id"]
job = event["job"]
if job_state == DataScanJob.State.FAILED.name: # type: ignore
raise AirflowException(f"Job failed:\n{job_id}")
if job_state == DataScanJob.State.CANCELLED.name: # type: ignore
raise AirflowException(f"Job was cancelled:\n{job_id}")
if job_state == DataScanJob.State.SUCCEEDED.name: # type: ignore
if not job["data_quality_result"]["passed"]:
if self.fail_on_dq_failure:
raise AirflowDataQualityScanException(
f"Data Quality job {self.job_id} execution failed due to failure of its scanning "
f"rules: {self.data_scan_id}"
)
else:
self.log.info("Data Quality job executed successfully")
else:
self.log.info("Data Quality job execution returned status: %s", job_state)
return job
| DataplexGetDataQualityScanResultOperator |
python | huggingface__transformers | src/transformers/models/flex_olmo/modular_flex_olmo.py | {
"start": 10309,
"end": 10373
} | class ____(Olmo2RotaryEmbedding):
pass
| FlexOlmoRotaryEmbedding |
python | doocs__leetcode | solution/3100-3199/3187.Peaks in Array/Solution.py | {
"start": 404,
"end": 1282
} | class ____:
def countOfPeaks(self, nums: List[int], queries: List[List[int]]) -> List[int]:
def update(i: int, val: int):
if i <= 0 or i >= n - 1:
return
if nums[i - 1] < nums[i] and nums[i] > nums[i + 1]:
tree.update(i, val)
n = len(nums)
tree = BinaryIndexedTree(n - 1)
for i in range(1, n - 1):
update(i, 1)
ans = []
for q in queries:
if q[0] == 1:
l, r = q[1] + 1, q[2] - 1
ans.append(0 if l > r else tree.query(r) - tree.query(l - 1))
else:
idx, val = q[1:]
for i in range(idx - 1, idx + 2):
update(i, -1)
nums[idx] = val
for i in range(idx - 1, idx + 2):
update(i, 1)
return ans
| Solution |
python | celery__celery | t/unit/utils/test_collections.py | {
"start": 4208,
"end": 4688
} | class ____:
def test_exception_info(self):
try:
raise LookupError('The quick brown fox jumps...')
except Exception:
einfo = ExceptionInfo()
assert str(einfo) == einfo.traceback
assert isinstance(einfo.exception.exc, LookupError)
assert einfo.exception.exc.args == ('The quick brown fox jumps...',)
assert einfo.traceback
assert repr(einfo)
@t.skip.if_win32
| test_ExceptionInfo |
python | pyinstaller__pyinstaller | PyInstaller/utils/hooks/tcl_tk.py | {
"start": 2678,
"end": 16187
} | class ____:
# Root directory names of Tcl and Tk library/data directories in the frozen application. These directories are
# originally fully versioned (e.g., tcl8.6 and tk8.6); we want to remap them to unversioned variants, so that our
# run-time hook (pyi_rthook__tkinter.py) does not have to determine version numbers when setting `TCL_LIBRARY`
# and `TK_LIBRARY` environment variables.
#
# We also cannot use plain "tk" and "tcl", because on macOS, the Tcl and Tk shared libraries might come from
# framework bundles, and would therefore end up being collected as "Tcl" and "Tk" in the top-level application
# directory, causing clash due to filesystem being case-insensitive by default.
TCL_ROOTNAME = '_tcl_data'
TK_ROOTNAME = '_tk_data'
def __init__(self):
pass
def __repr__(self):
return "TclTkInfo"
# Delay initialization of Tcl/Tk information until until the corresponding attributes are first requested.
def __getattr__(self, name):
if 'available' in self.__dict__:
# Initialization was already done, but requested attribute is not available.
raise AttributeError(name)
# Load Qt library info...
self._load_tcl_tk_info()
# ... and return the requested attribute
return getattr(self, name)
def _load_tcl_tk_info(self):
logger.info("%s: initializing cached Tcl/Tk info...", self)
# Initialize variables so that they might be accessed even if tkinter/Tcl/Tk is unavailable or if initialization
# fails for some reason.
self.available = False
self.tkinter_extension_file = None
self.tcl_version = None
self.tk_version = None
self.tcl_threaded = False
self.tcl_data_dir = None
self.tk_data_dir = None
self.tcl_module_dir = None
self.is_macos_system_framework = False
self.tcl_shared_library = None
self.tk_shared_library = None
self.data_files = []
try:
tcl_tk_info = _get_tcl_tk_info()
except Exception as e:
logger.warning("%s: failed to obtain Tcl/Tk info: %s", self, e)
return
# If tkinter could not be imported, `_get_tcl_tk_info` returns None. In such cases, emit a debug message instead
# of a warning, because this initialization might be triggered by a helper function that is trying to determine
# availability of `tkinter` by inspecting the `available` attribute.
if tcl_tk_info is None:
logger.debug("%s: failed to obtain Tcl/Tk info: tkinter/_tkinter could not be imported.", self)
return
# Copy properties
for key, value in tcl_tk_info.items():
setattr(self, key, value)
# Parse Tcl/Tk version into (major, minor) tuple.
self.tcl_version = tuple((int(x) for x in self.tcl_version.split(".")[:2]))
self.tk_version = tuple((int(x) for x in self.tk_version.split(".")[:2]))
# Determine full path to Tcl and Tk shared libraries against which the `_tkinter` extension module is linked.
# This can only be done when `_tkinter` is in fact an extension, and not a built-in. In the latter case, the
# Tcl/Tk libraries are statically linked into python shared library, so there are no shared libraries for us
# to discover.
if self.tkinter_extension_file:
try:
(
self.tcl_shared_library,
self.tk_shared_library,
) = self._find_tcl_tk_shared_libraries(self.tkinter_extension_file)
except Exception:
logger.warning("%s: failed to determine Tcl and Tk shared library location!", self, exc_info=True)
# macOS: check if _tkinter is linked against system-provided Tcl.framework and Tk.framework. This is the
# case with python3 from XCode tools (and was the case with very old homebrew python builds). In such cases,
# we should not be collecting Tcl/Tk files.
if compat.is_darwin:
self.is_macos_system_framework = self._check_macos_system_framework(self.tcl_shared_library)
# Emit a warning in the unlikely event that we are dealing with Teapot-distributed version of ActiveTcl.
if not self.is_macos_system_framework:
self._warn_if_using_activetcl_or_teapot(self.tcl_data_dir)
# Infer location of Tk library/data directory. Ideally, we could infer this by running
#
# import tkinter
# root = tkinter.Tk()
# tk_data_dir = root.tk.exprstring('$tk_library')
#
# in the isolated subprocess as part of `_get_tcl_tk_info`. However, that is impractical, as it shows the empty
# window, and on some platforms (e.g., linux) requires display server. Therefore, try to guess the location,
# based on the following heuristic:
# - if TK_LIBRARY is defined use it.
# - if Tk is built as macOS framework bundle, look for Scripts sub-directory in Resources directory next to
# the shared library.
# - otherwise, look for: $tcl_root/../tkX.Y, where X and Y are Tk major and minor version.
if "TK_LIBRARY" in os.environ:
self.tk_data_dir = os.environ["TK_LIBRARY"]
elif compat.is_darwin and self.tk_shared_library and (
# is_framework_bundle_lib handles only fully-versioned framework library paths...
(osxutils.is_framework_bundle_lib(self.tk_shared_library)) or
# ... so manually handle top-level-symlinked variant for now.
(self.tk_shared_library).endswith("Tk.framework/Tk")
):
# Fully resolve the library path, in case it is a top-level symlink; for example, resolve
# /Library/Frameworks/Python.framework/Versions/3.13/Frameworks/Tk.framework/Tk
# into
# /Library/Frameworks/Python.framework/Versions/3.13/Frameworks/Tk.framework/Versions/8.6/Tk
tk_lib_realpath = os.path.realpath(self.tk_shared_library)
# Resources/Scripts directory next to the shared library
self.tk_data_dir = os.path.join(os.path.dirname(tk_lib_realpath), "Resources", "Scripts")
else:
self.tk_data_dir = os.path.join(
os.path.dirname(self.tcl_data_dir),
f"tk{self.tk_version[0]}.{self.tk_version[1]}",
)
# Infer location of Tcl module directory. The modules directory is separate from the library/data one, and
# is located at $tcl_root/../tclX, where X is the major Tcl version.
self.tcl_module_dir = os.path.join(
os.path.dirname(self.tcl_data_dir),
f"tcl{self.tcl_version[0]}",
)
# Find all data files
if self.is_macos_system_framework:
logger.info("%s: using macOS system Tcl/Tk framework - not collecting data files.", self)
else:
# Collect Tcl and Tk scripts from their corresponding library/data directories. See comment at the
# definition of TK_ROOTNAME and TK_ROOTNAME variables.
if os.path.isdir(self.tcl_data_dir):
self.data_files += self._collect_files_from_directory(
self.tcl_data_dir,
prefix=self.TCL_ROOTNAME,
excludes=['demos', '*.lib', 'tclConfig.sh'],
)
else:
logger.warning("%s: Tcl library/data directory %r does not exist!", self, self.tcl_data_dir)
if os.path.isdir(self.tk_data_dir):
self.data_files += self._collect_files_from_directory(
self.tk_data_dir,
prefix=self.TK_ROOTNAME,
excludes=['demos', '*.lib', 'tkConfig.sh'],
)
else:
logger.warning("%s: Tk library/data directory %r does not exist!", self, self.tk_data_dir)
# Collect Tcl modules from modules directory
if os.path.isdir(self.tcl_module_dir):
self.data_files += self._collect_files_from_directory(
self.tcl_module_dir,
prefix=os.path.basename(self.tcl_module_dir),
)
else:
logger.warning("%s: Tcl module directory %r does not exist!", self, self.tcl_module_dir)
@staticmethod
def _collect_files_from_directory(root, prefix=None, excludes=None):
"""
A minimal port of PyInstaller.building.datastruct.Tree() functionality, which allows us to avoid using Tree
here. This way, the TclTkInfo data structure can be used without having PyInstaller's config context set up.
"""
excludes = excludes or []
todo = [(root, prefix)]
output = []
while todo:
target_dir, prefix = todo.pop()
for entry in os.listdir(target_dir):
# Basic name-based exclusion
if any((fnmatch.fnmatch(entry, exclude) for exclude in excludes)):
continue
src_path = os.path.join(target_dir, entry)
dest_path = os.path.join(prefix, entry) if prefix else entry
if os.path.isdir(src_path):
todo.append((src_path, dest_path))
else:
# Return 3-element tuples with fully-resolved dest path, since other parts of code depend on that.
output.append((dest_path, src_path, 'DATA'))
return output
@staticmethod
def _find_tcl_tk_shared_libraries(tkinter_ext_file):
"""
Find Tcl and Tk shared libraries against which the _tkinter extension module is linked.
"""
tcl_lib = None
tk_lib = None
for _, lib_path in bindepend.get_imports(tkinter_ext_file): # (name, fullpath) tuple
if lib_path is None:
continue # Skip unresolved entries
# For comparison, take basename of lib_path. On macOS, lib_name returned by get_imports is in fact
# referenced name, which is not necessarily just a basename.
lib_name = os.path.basename(lib_path)
lib_name_lower = lib_name.lower() # lower-case for comparisons
# First check for Tk library, because it is unlikely that 'tk' will appear in the name of the Tcl shared
# library, while 'tcl' could appear in the name of the Tk shared library. For example, Fedora 43 ships
# both Tcl/Tk 8.6 and 9.0, and in the latter, the libraries are named `libtcl9.0.so` and `libtcl9tk9.0.so`.
if 'tk' in lib_name_lower:
tk_lib = lib_path
elif 'tcl' in lib_name_lower:
tcl_lib = lib_path
return tcl_lib, tk_lib
@staticmethod
def _check_macos_system_framework(tcl_shared_lib):
# Starting with macOS 11, system libraries are hidden (unless both Python and PyInstaller's bootloader are built
# against macOS 11.x SDK). Therefore, Tcl shared library might end up unresolved (None); but that implicitly
# indicates that the system framework is used.
if tcl_shared_lib is None:
return True
# Check if the path corresponds to the system framework, i.e., [/System]/Library/Frameworks/Tcl.framework/Tcl
return 'Library/Frameworks/Tcl.framework' in tcl_shared_lib
@staticmethod
def _warn_if_using_activetcl_or_teapot(tcl_root):
"""
Check if Tcl installation is a Teapot-distributed version of ActiveTcl, and log a non-fatal warning that the
resulting frozen application will (likely) fail to run on other systems.
PyInstaller does *not* freeze all ActiveTcl dependencies -- including Teapot, which is typically ignorable.
Since Teapot is *not* ignorable in this case, this function warns of impending failure.
See Also
-------
https://github.com/pyinstaller/pyinstaller/issues/621
"""
if tcl_root is None:
return
# Read the "init.tcl" script and look for mentions of "activetcl" and "teapot"
init_tcl = os.path.join(tcl_root, 'init.tcl')
if not os.path.isfile(init_tcl):
return
mentions_activetcl = False
mentions_teapot = False
# Tcl/Tk reads files using the system encoding (https://www.tcl.tk/doc/howto/i18n.html#system_encoding);
# on macOS, this is UTF-8.
with open(init_tcl, 'r', encoding='utf8') as fp:
for line in fp.readlines():
line = line.strip().lower()
if line.startswith('#'):
continue
if 'activetcl' in line:
mentions_activetcl = True
if 'teapot' in line:
mentions_teapot = True
if mentions_activetcl and mentions_teapot:
break
if mentions_activetcl and mentions_teapot:
logger.warning(
"You appear to be using an ActiveTcl build of Tcl/Tk, which PyInstaller has\n"
"difficulty freezing. To fix this, comment out all references to 'teapot' in\n"
f"{init_tcl!r}\n"
"See https://github.com/pyinstaller/pyinstaller/issues/621 for more information."
)
tcltk_info = TclTkInfo()
| TclTkInfo |
python | sympy__sympy | sympy/tensor/toperators.py | {
"start": 248,
"end": 8840
} | class ____(TensExpr):
"""
Partial derivative for tensor expressions.
Examples
========
>>> from sympy.tensor.tensor import TensorIndexType, TensorHead
>>> from sympy.tensor.toperators import PartialDerivative
>>> from sympy import symbols
>>> L = TensorIndexType("L")
>>> A = TensorHead("A", [L])
>>> B = TensorHead("B", [L])
>>> i, j, k = symbols("i j k")
>>> expr = PartialDerivative(A(i), A(j))
>>> expr
PartialDerivative(A(i), A(j))
The ``PartialDerivative`` object behaves like a tensorial expression:
>>> expr.get_indices()
[i, -j]
Notice that the deriving variables have opposite valence than the
printed one: ``A(j)`` is printed as covariant, but the index of the
derivative is actually contravariant, i.e. ``-j``.
Indices can be contracted:
>>> expr = PartialDerivative(A(i), A(i))
>>> expr
PartialDerivative(A(L_0), A(L_0))
>>> expr.get_indices()
[L_0, -L_0]
The method ``.get_indices()`` always returns all indices (even the
contracted ones). If only uncontracted indices are needed, call
``.get_free_indices()``:
>>> expr.get_free_indices()
[]
Nested partial derivatives are flattened:
>>> expr = PartialDerivative(PartialDerivative(A(i), A(j)), A(k))
>>> expr
PartialDerivative(A(i), A(j), A(k))
>>> expr.get_indices()
[i, -j, -k]
Replace a derivative with array values:
>>> from sympy.abc import x, y
>>> from sympy import sin, log
>>> compA = [sin(x), log(x)*y**3]
>>> compB = [x, y]
>>> expr = PartialDerivative(A(i), B(j))
>>> expr.replace_with_arrays({A(i): compA, B(i): compB})
[[cos(x), 0], [y**3/x, 3*y**2*log(x)]]
The returned array is indexed by `(i, -j)`.
Be careful that other SymPy modules put the indices of the deriving
variables before the indices of the derivand in the derivative result.
For example:
>>> expr.get_free_indices()
[i, -j]
>>> from sympy import Matrix, Array
>>> Matrix(compA).diff(Matrix(compB)).reshape(2, 2)
[[cos(x), y**3/x], [0, 3*y**2*log(x)]]
>>> Array(compA).diff(Array(compB))
[[cos(x), y**3/x], [0, 3*y**2*log(x)]]
These are the transpose of the result of ``PartialDerivative``,
as the matrix and the array modules put the index `-j` before `i` in the
derivative result. An array read with index order `(-j, i)` is indeed the
transpose of the same array read with index order `(i, -j)`. By specifying
the index order to ``.replace_with_arrays`` one can get a compatible
expression:
>>> expr.replace_with_arrays({A(i): compA, B(i): compB}, [-j, i])
[[cos(x), y**3/x], [0, 3*y**2*log(x)]]
"""
def __new__(cls, expr, *variables):
# Flatten:
if isinstance(expr, PartialDerivative):
variables = expr.variables + variables
expr = expr.expr
args, indices, free, dum = cls._contract_indices_for_derivative(
S(expr), variables)
obj = TensExpr.__new__(cls, *args)
obj._indices = indices
obj._free = free
obj._dum = dum
return obj
@property
def coeff(self):
return S.One
@property
def nocoeff(self):
return self
@classmethod
def _contract_indices_for_derivative(cls, expr, variables):
variables_opposite_valence = []
for i in variables:
if isinstance(i, Tensor):
i_free_indices = i.get_free_indices()
variables_opposite_valence.append(
i.xreplace({k: -k for k in i_free_indices}))
elif isinstance(i, Symbol):
variables_opposite_valence.append(i)
args, indices, free, dum = TensMul._tensMul_contract_indices(
[expr] + variables_opposite_valence, replace_indices=True)
for i in range(1, len(args)):
args_i = args[i]
if isinstance(args_i, Tensor):
i_indices = args[i].get_free_indices()
args[i] = args[i].xreplace({k: -k for k in i_indices})
return args, indices, free, dum
def doit(self, **hints):
args, indices, free, dum = self._contract_indices_for_derivative(self.expr, self.variables)
obj = self.func(*args)
obj._indices = indices
obj._free = free
obj._dum = dum
return obj
def _expand_partial_derivative(self):
args, indices, free, dum = self._contract_indices_for_derivative(self.expr, self.variables)
obj = self.func(*args)
obj._indices = indices
obj._free = free
obj._dum = dum
result = obj
if not args[0].free_symbols:
return S.Zero
elif isinstance(obj.expr, TensAdd):
# take care of sums of multi PDs
result = obj.expr.func(*[
self.func(a, *obj.variables)._expand_partial_derivative()
for a in result.expr.args])
elif isinstance(obj.expr, TensMul):
# take care of products of multi PDs
if len(obj.variables) == 1:
# derivative with respect to single variable
terms = []
mulargs = list(obj.expr.args)
for ind in range(len(mulargs)):
if not isinstance(sympify(mulargs[ind]), Number):
# a number coefficient is not considered for
# expansion of PartialDerivative
d = self.func(mulargs[ind], *obj.variables)._expand_partial_derivative()
terms.append(TensMul(*(mulargs[:ind]
+ [d]
+ mulargs[(ind + 1):])))
result = TensAdd.fromiter(terms)
else:
# derivative with respect to multiple variables
# decompose:
# partial(expr, (u, v))
# = partial(partial(expr, u).doit(), v).doit()
result = obj.expr # init with expr
for v in obj.variables:
result = self.func(result, v)._expand_partial_derivative()
# then throw PD on it
return result
def _perform_derivative(self):
result = self.expr
for v in self.variables:
if isinstance(result, TensExpr):
result = result._eval_partial_derivative(v)
else:
if v._diff_wrt:
result = result._eval_derivative(v)
else:
result = S.Zero
return result
def get_indices(self):
return self._indices
def get_free_indices(self):
free = sorted(self._free, key=lambda x: x[1])
return [i[0] for i in free]
def _replace_indices(self, repl):
expr = self.expr.xreplace(repl)
mirrored = {-k: -v for k, v in repl.items()}
variables = [i.xreplace(mirrored) for i in self.variables]
return self.func(expr, *variables)
@property
def expr(self):
return self.args[0]
@property
def variables(self):
return self.args[1:]
def _extract_data(self, replacement_dict):
from .array import derive_by_array, tensorcontraction
indices, array = self.expr._extract_data(replacement_dict)
for variable in self.variables:
var_indices, var_array = variable._extract_data(replacement_dict)
var_indices = [-i for i in var_indices]
coeff_array, var_array = zip(*[i.as_coeff_Mul() for i in var_array])
dim_before = len(array.shape)
array = derive_by_array(array, var_array)
dim_after = len(array.shape)
dim_increase = dim_after - dim_before
array = permutedims(array, [i + dim_increase for i in range(dim_before)] + list(range(dim_increase)))
array = array.as_mutable()
varindex = var_indices[0]
# Remove coefficients of base vector:
coeff_index = [0] + [slice(None) for i in range(len(indices))]
for i, coeff in enumerate(coeff_array):
coeff_index[0] = i
array[tuple(coeff_index)] /= coeff
if -varindex in indices:
pos = indices.index(-varindex)
array = tensorcontraction(array, (0, pos+1))
indices.pop(pos)
else:
indices.append(varindex)
return indices, array
| PartialDerivative |
python | jmcnamara__XlsxWriter | xlsxwriter/test/comparison/test_image54.py | {
"start": 315,
"end": 863
} | class ____(ExcelComparisonTest):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.set_filename("image54.xlsx")
def test_create_file(self):
"""Test the creation of a simple XlsxWriter file with image(s)."""
workbook = Workbook(self.got_filename)
worksheet = workbook.add_worksheet()
worksheet.insert_image("E9", self.image_dir + "red.png", {"decorative": True})
workbook.close()
self.assertExcelEqual()
| TestCompareXLSXFiles |
python | charliermarsh__ruff | crates/ruff_linter/resources/test/fixtures/tryceratops/TRY300.py | {
"start": 199,
"end": 941
} | class ____(Exception):
pass
def bad():
try:
a = 1
b = process()
return b
except MyException:
logger.exception("process failed")
def good():
try:
a = 1
b = process()
except MyException:
logger.exception("process failed")
else:
return b
def noreturn():
try:
a = 1
b = process()
except MyException:
logger.exception("process failed")
def good_return_with_side_effects():
try:
pass
return process()
except MyException:
logger.exception("process failed")
def good_noexcept():
try:
pass
return process()
finally:
logger.exception("process failed")
| MyException |
python | mlflow__mlflow | mlflow/utils/checkpoint_utils.py | {
"start": 637,
"end": 8591
} | class ____(metaclass=ExceptionSafeAbstractClass):
"""Callback base class for automatic model checkpointing to MLflow.
You must implement "save_checkpoint" method to save the model as the checkpoint file.
and you must call `check_and_save_checkpoint_if_needed` method in relevant
callback events to trigger automatic checkpointing.
Args:
checkpoint_file_suffix: checkpoint file suffix.
monitor: In automatic model checkpointing, the metric name to monitor if
you set `model_checkpoint_save_best_only` to True.
save_best_only: If True, automatic model checkpointing only saves when
the model is considered the "best" model according to the quantity
monitored and previous checkpoint model is overwritten.
mode: one of {"min", "max"}. In automatic model checkpointing,
if save_best_only=True, the decision to overwrite the current save file is made
based on either the maximization or the minimization of the monitored quantity.
save_weights_only: In automatic model checkpointing, if True, then
only the model's weights will be saved. Otherwise, the optimizer states,
lr-scheduler states, etc are added in the checkpoint too.
save_freq: `"epoch"` or integer. When using `"epoch"`, the callback
saves the model after each epoch. When using integer, the callback
saves the model at end of this many batches. Note that if the saving isn't
aligned to epochs, the monitored metric may potentially be less reliable (it
could reflect as little as 1 batch, since the metrics get reset
every epoch). Defaults to `"epoch"`.
"""
def __init__(
self,
checkpoint_file_suffix,
monitor,
mode,
save_best_only,
save_weights_only,
save_freq,
):
self.checkpoint_file_suffix = checkpoint_file_suffix
self.monitor = monitor
self.mode = mode
self.save_best_only = save_best_only
self.save_weights_only = save_weights_only
self.save_freq = save_freq
self.last_monitor_value = None
self.mlflow_tracking_uri = mlflow.get_tracking_uri()
if self.save_best_only:
if self.monitor is None:
raise MlflowException(
"If checkpoint 'save_best_only' config is set to True, you need to set "
"'monitor' config as well."
)
if self.mode not in ["min", "max"]:
raise MlflowException(
"If checkpoint 'save_best_only' config is set to True, you need to set "
"'mode' config and available modes includes 'min' and 'max', but you set "
f"'mode' to '{self.mode}'."
)
def _is_new_checkpoint_better(self, new_monitor_value):
if self.last_monitor_value is None:
return True
if self.mode == "min":
return new_monitor_value < self.last_monitor_value
return new_monitor_value > self.last_monitor_value
def save_checkpoint(self, filepath: str):
raise NotImplementedError()
def check_and_save_checkpoint_if_needed(self, current_epoch, global_step, metric_dict):
# For distributed model training, trainer workers need to use the driver process
# mlflow_tracking_uri.
# Note that `self.mlflow_tracking_uri` value is assigned in the driver process
# then it is pickled to trainer workers.
mlflow.set_tracking_uri(self.mlflow_tracking_uri)
if self.save_best_only:
if self.monitor not in metric_dict:
_logger.warning(
"Checkpoint logging is skipped, because checkpoint 'save_best_only' config is "
"True, it requires to compare the monitored metric value, but the provided "
"monitored metric value is not available."
)
return
new_monitor_value = metric_dict[self.monitor]
if not self._is_new_checkpoint_better(new_monitor_value):
# Current checkpoint is worse than last saved checkpoint,
# so skip checkpointing.
return
self.last_monitor_value = new_monitor_value
suffix = self.checkpoint_file_suffix
if self.save_best_only:
if self.save_weights_only:
checkpoint_model_filename = (
f"{_LATEST_CHECKPOINT_PREFIX}{_CHECKPOINT_MODEL_FILENAME}"
f"{_WEIGHT_ONLY_CHECKPOINT_SUFFIX}{suffix}"
)
else:
checkpoint_model_filename = (
f"{_LATEST_CHECKPOINT_PREFIX}{_CHECKPOINT_MODEL_FILENAME}{suffix}"
)
checkpoint_metrics_filename = (
f"{_LATEST_CHECKPOINT_PREFIX}{_CHECKPOINT_METRIC_FILENAME}"
)
checkpoint_artifact_dir = _CHECKPOINT_DIR
else:
if self.save_freq == "epoch":
sub_dir_name = f"{_CHECKPOINT_EPOCH_PREFIX}{current_epoch}"
else:
sub_dir_name = f"{_CHECKPOINT_GLOBAL_STEP_PREFIX}{global_step}"
if self.save_weights_only:
checkpoint_model_filename = (
f"{_CHECKPOINT_MODEL_FILENAME}{_WEIGHT_ONLY_CHECKPOINT_SUFFIX}{suffix}"
)
else:
checkpoint_model_filename = f"{_CHECKPOINT_MODEL_FILENAME}{suffix}"
checkpoint_metrics_filename = _CHECKPOINT_METRIC_FILENAME
checkpoint_artifact_dir = f"{_CHECKPOINT_DIR}/{sub_dir_name}"
mlflow.set_tag(
LATEST_CHECKPOINT_ARTIFACT_TAG_KEY,
f"{checkpoint_artifact_dir}/{checkpoint_model_filename}",
)
mlflow.log_dict(
{**metric_dict, "epoch": current_epoch, "global_step": global_step},
f"{checkpoint_artifact_dir}/{checkpoint_metrics_filename}",
)
with TempDir() as tmp_dir:
tmp_model_save_path = os.path.join(tmp_dir.path(), checkpoint_model_filename)
self.save_checkpoint(tmp_model_save_path)
mlflow.log_artifact(tmp_model_save_path, checkpoint_artifact_dir)
def download_checkpoint_artifact(run_id=None, epoch=None, global_step=None, dst_path=None):
from mlflow.client import MlflowClient
from mlflow.utils.mlflow_tags import LATEST_CHECKPOINT_ARTIFACT_TAG_KEY
client = MlflowClient()
if run_id is None:
run = mlflow.active_run()
if run is None:
raise MlflowException(
"There is no active run, please provide the 'run_id' argument for "
"'load_checkpoint' invocation."
)
run_id = run.info.run_id
else:
run = client.get_run(run_id)
latest_checkpoint_artifact_path = run.data.tags.get(LATEST_CHECKPOINT_ARTIFACT_TAG_KEY)
if latest_checkpoint_artifact_path is None:
raise MlflowException("There is no logged checkpoint artifact in the current run.")
checkpoint_filename = posixpath.basename(latest_checkpoint_artifact_path)
if epoch is not None and global_step is not None:
raise MlflowException(
"Only one of 'epoch' and 'global_step' can be set for 'load_checkpoint'."
)
elif global_step is not None:
checkpoint_artifact_path = (
f"{_CHECKPOINT_DIR}/{_CHECKPOINT_GLOBAL_STEP_PREFIX}{global_step}/{checkpoint_filename}"
)
elif epoch is not None:
checkpoint_artifact_path = (
f"{_CHECKPOINT_DIR}/{_CHECKPOINT_EPOCH_PREFIX}{epoch}/{checkpoint_filename}"
)
else:
checkpoint_artifact_path = latest_checkpoint_artifact_path
return client.download_artifacts(run_id, checkpoint_artifact_path, dst_path=dst_path)
| MlflowModelCheckpointCallbackBase |
python | ray-project__ray | python/ray/data/tests/unit/test_datatype.py | {
"start": 13786,
"end": 16872
} | class ____:
"""Test pattern-matching DataTypes with _LogicalDataType enum."""
@pytest.mark.parametrize(
"factory_method,logical_dtype_value",
[
(lambda: DataType.list(), "list"),
(lambda: DataType.large_list(), "large_list"),
(lambda: DataType.struct(), "struct"),
(lambda: DataType.map(), "map"),
(lambda: DataType.tensor(), "tensor"),
(lambda: DataType.variable_shaped_tensor(), "tensor"),
(lambda: DataType.temporal(), "temporal"),
],
)
def test_logical_dtype_creation(self, factory_method, logical_dtype_value):
"""Test that logical DataTypes have correct _logical_dtype."""
from ray.data.datatype import _LogicalDataType
dt = factory_method()
assert dt._physical_dtype is None
assert dt._logical_dtype == _LogicalDataType(logical_dtype_value)
assert isinstance(dt._logical_dtype, _LogicalDataType)
@pytest.mark.parametrize(
"factory_method,expected_repr",
[
(lambda: DataType.list(), "DataType(logical_dtype:LIST)"),
(lambda: DataType.large_list(), "DataType(logical_dtype:LARGE_LIST)"),
(lambda: DataType.struct(), "DataType(logical_dtype:STRUCT)"),
(lambda: DataType.map(), "DataType(logical_dtype:MAP)"),
(lambda: DataType.tensor(), "DataType(logical_dtype:TENSOR)"),
(
lambda: DataType.variable_shaped_tensor(),
"DataType(logical_dtype:TENSOR)",
),
(lambda: DataType.temporal(), "DataType(logical_dtype:TEMPORAL)"),
],
)
def test_logical_dtype_repr(self, factory_method, expected_repr):
"""Test __repr__ for logical DataTypes."""
dt = factory_method()
assert repr(dt) == expected_repr
@pytest.mark.parametrize(
"dt1_factory,dt2_factory,should_be_equal",
[
# Same logical DataTypes should be equal (including explicit ANY form)
(lambda: DataType.list(), lambda: DataType.list(DataType.ANY), True),
(lambda: DataType.list(), lambda: DataType.list(), True),
(lambda: DataType.struct(), lambda: DataType.struct(DataType.ANY), True),
(
lambda: DataType.tensor(),
lambda: DataType.variable_shaped_tensor(),
True,
),
# Different logical DataTypes should not be equal
(lambda: DataType.list(), lambda: DataType.large_list(), False),
(lambda: DataType.list(), lambda: DataType.struct(), False),
(lambda: DataType.map(), lambda: DataType.temporal(), False),
],
)
def test_logical_dtype_equality(self, dt1_factory, dt2_factory, should_be_equal):
"""Test equality between logical DataTypes."""
dt1 = dt1_factory()
dt2 = dt2_factory()
if should_be_equal:
assert dt1 == dt2
assert hash(dt1) == hash(dt2)
else:
assert dt1 != dt2
| TestLogicalDataTypes |
python | ray-project__ray | python/ray/experimental/util/types.py | {
"start": 322,
"end": 418
} | class ____(_CollectiveOp):
reduceOp: ReduceOp = ReduceOp.SUM
@PublicAPI
@dataclass
| AllReduceOp |
python | xlwings__xlwings | xlwings/constants.py | {
"start": 113537,
"end": 113732
} | class ____:
xlSlicerSortAscending = 2 # from enum XlSlicerSort
xlSlicerSortDataSourceOrder = 1 # from enum XlSlicerSort
xlSlicerSortDescending = 3 # from enum XlSlicerSort
| SlicerSort |
python | apache__airflow | providers/apache/beam/src/airflow/providers/apache/beam/operators/beam.py | {
"start": 6948,
"end": 13087
} | class ____(BaseOperator, BeamDataflowMixin, ABC):
"""
Abstract base class for Beam Pipeline Operators.
:param runner: Runner on which pipeline will be run. By default, "DirectRunner" is being used.
Other possible options: DataflowRunner, SparkRunner, FlinkRunner, PortableRunner.
See: :class:`~providers.apache.beam.hooks.beam.BeamRunnerType`
See: https://beam.apache.org/documentation/runners/capability-matrix/
:param default_pipeline_options: Map of default pipeline options.
:param pipeline_options: Map of pipeline options.The key must be a dictionary.
The value can contain different types:
* If the value is None, the single option - ``--key`` (without value) will be added.
* If the value is False, this option will be skipped
* If the value is True, the single option - ``--key`` (without value) will be added.
* If the value is list, the many options will be added for each key.
If the value is ``['A', 'B']`` and the key is ``key`` then the ``--key=A --key=B`` options
will be left
* Other value types will be replaced with the Python textual representation.
When defining labels (labels option), you can also provide a dictionary.
:param gcp_conn_id: Optional.
The connection ID to use connecting to Google Cloud Storage if python file is on GCS.
:param dataflow_config: Dataflow's configuration, used when runner type is set to DataflowRunner,
(optional) defaults to None.
"""
def __init__(
self,
*,
runner: str = "DirectRunner",
default_pipeline_options: dict | None = None,
pipeline_options: dict | None = None,
gcp_conn_id: str = "google_cloud_default",
dataflow_config: DataflowConfiguration | dict | None = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.runner = runner
self.default_pipeline_options = default_pipeline_options or {}
self.pipeline_options = pipeline_options or {}
# ``dataflow_config`` type will resolve into the execute method
self.dataflow_config = dataflow_config or {} # type: ignore[assignment]
self.gcp_conn_id = gcp_conn_id
self.beam_hook: BeamHook
self.dataflow_hook: DataflowHook | None = None
self._dataflow_job_id: str | None = None
self._execute_context: Context | None = None
@property
def dataflow_job_id(self):
return self._dataflow_job_id
@dataflow_job_id.setter
def dataflow_job_id(self, new_value):
if all([new_value, not self._dataflow_job_id, self._execute_context]):
# push job_id as soon as it's ready, to let Sensors work before the job finished
# and job_id pushed as returned value item.
# Use task instance to push XCom (works for both Airflow 2.x and 3.x)
self._execute_context["ti"].xcom_push(key="dataflow_job_id", value=new_value)
self._dataflow_job_id = new_value
def _cast_dataflow_config(self):
if isinstance(self.dataflow_config, dict):
self.dataflow_config = DataflowConfiguration(**self.dataflow_config)
else:
self.dataflow_config = self.dataflow_config or DataflowConfiguration()
if not self.dataflow_config.job_name:
self.dataflow_config.job_name = self.task_id
if self.dataflow_config and self.runner.lower() != BeamRunnerType.DataflowRunner.lower():
self.log.warning(
"dataflow_config is defined but runner is different than DataflowRunner (%s)", self.runner
)
def _init_pipeline_options(
self,
format_pipeline_options: bool = False,
job_name_variable_key: str | None = None,
) -> tuple[bool, str | None, dict, Callable[[str], None] | None, Callable[[], bool] | None]:
self.beam_hook = BeamHook(runner=self.runner)
pipeline_options = self.default_pipeline_options.copy()
process_line_callback: Callable[[str], None] | None = None
is_dataflow_job_id_exist_callback: Callable[[], bool] | None = None
is_dataflow = self.runner.lower() == BeamRunnerType.DataflowRunner.lower()
dataflow_job_name: str | None = None
if is_dataflow:
(
dataflow_job_name,
pipeline_options,
process_line_callback,
is_dataflow_job_id_exist_callback,
) = self._set_dataflow(
pipeline_options=pipeline_options,
job_name_variable_key=job_name_variable_key,
)
self.log.info(pipeline_options)
pipeline_options.update(self.pipeline_options)
if format_pipeline_options:
snake_case_pipeline_options = {
convert_camel_to_snake(key): pipeline_options[key] for key in pipeline_options
}
return (
is_dataflow,
dataflow_job_name,
snake_case_pipeline_options,
process_line_callback,
is_dataflow_job_id_exist_callback,
)
return (
is_dataflow,
dataflow_job_name,
pipeline_options,
process_line_callback,
is_dataflow_job_id_exist_callback,
)
@property
def extra_links_params(self) -> dict[str, Any]:
return {
"project_id": self.dataflow_config.project_id,
"region": self.dataflow_config.location,
"job_id": self.dataflow_job_id,
}
def execute_complete(self, context: Context, event: dict[str, Any]):
"""
Execute when the trigger fires - returns immediately.
Relies on trigger to throw an exception, otherwise it assumes execution was
successful.
"""
if event["status"] == "error":
raise AirflowException(event["message"])
self.log.info(
"%s completed with response %s ",
self.task_id,
event["message"],
)
| BeamBasePipelineOperator |
python | Pylons__pyramid | src/pyramid/httpexceptions.py | {
"start": 23755,
"end": 24598
} | class ____(HTTPClientError):
"""
subclass of :class:`~HTTPClientError`
This indicates that the server did not find anything matching the
Request-URI.
code: 404, title: Not Found
Raise this exception within :term:`view` code to immediately
return the :term:`Not Found View` to the invoking user. Usually
this is a basic ``404`` page, but the Not Found View can be
customized as necessary. See :ref:`changing_the_notfound_view`.
This exception's constructor accepts a ``detail`` argument
(the first argument), which should be a string. The value of this
string will be available as the ``message`` attribute of this exception,
for availability to the :term:`Not Found View`.
"""
code = 404
title = 'Not Found'
explanation = 'The resource could not be found.'
| HTTPNotFound |
python | bokeh__bokeh | tests/unit/bokeh/util/test_version.py | {
"start": 1464,
"end": 2365
} | class ____:
def test_actual(self) -> None:
assert buv.is_full_release() == bool(VERSION_PAT.match(buv.__version__))
def test_mock_full(self, monkeypatch: pytest.MonkeyPatch) -> None:
monkeypatch.setattr(buv, '__version__', "1.5.0")
assert buv.is_full_release()
@pytest.mark.parametrize('v', ("1.2.3.dev2", "1.4.5.rc3", "junk"))
def test_mock_not_full(self, monkeypatch: pytest.MonkeyPatch, v: str) -> None:
monkeypatch.setattr(buv, '__version__', v)
assert not buv.is_full_release()
#-----------------------------------------------------------------------------
# Dev API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Private API
#-----------------------------------------------------------------------------
| Test_is_full_release |
python | facebook__pyre-check | source/interprocedural_analyses/taint/test/integration/multiple_sources.py | {
"start": 2684,
"end": 5694
} | class ____:
def multi_sink(self, user_controlled, permissive_context):
pass
def muliple_main_issues_1(a_or_b: Union[A, B]):
# Due to multiple potential callables at the same call site
a_or_b.multi_sink(user_controlled_input(), permissive_context())
def muliple_main_issues_2():
# Due to joining the issue handles from multiple call sites
vc = permissive_context()
multiple_triggered_context(vc)
def multiple_triggered_context(vc):
id1 = user_controlled_input()
Node.get(id1).send(vc)
id2 = user_controlled_input()
Node.get(id2).send(vc)
def wrapper_node_get_send(id, vc):
Node.get(id).send(vc)
def issue_with_wrapper_node_get_send():
id = user_controlled_input()
vc = permissive_context()
wrapper_node_get_send(id, vc)
def wrapper_node_get_send_triggered_context(vc):
id = user_controlled_input()
# We should see a triggered partial sink here
wrapper_node_get_send(id, vc)
def issue_with_wrapper_node_get_send_triggered_context():
vc = permissive_context()
wrapper_node_get_send_triggered_context(vc)
def wrapper_node_send(id, vc):
# Expect no partial sink, because we need two partial sinks on different
# parameters to file an issue
id = 0
Node.get(id).send(vc)
def wrapper_combined_node_get_send(combined):
# Expect no partial sink, because we need two partial sinks on different
# parameters to file an issue
if 1 == 1:
id = combined
else:
vc = combined
Node.get(id).send(vc)
def wrapper_mismatched_partial_sinks(id, vc):
# Expect no partial sink, because the two partial sinks are from different
# call sites.
Node.get(id).send(0)
Node.get(0).send(vc)
# Share both partial sink kinds in multiple rules
def c_source(): ...
def d_source(): ...
def e_source(): ...
def multi_sink_share_both_sinks(x, y): ...
def demonstrate_triggered_c_from_d_and_e(x):
multi_sink_share_both_sinks(x, d_source())
multi_sink_share_both_sinks(x, e_source())
def issue_with_triggered_c_from_d_and_e():
# Should see two issues
demonstrate_triggered_c_from_d_and_e(c_source())
def demonstrate_triggered_c_from_d(x):
multi_sink_share_both_sinks(x, d_source())
def issue_with_triggered_c_from_d():
# Should see one issue
demonstrate_triggered_c_from_d(c_source())
def demonstrate_triggered_c_from_d_or_e(x):
if 1 == 1:
multi_sink_share_both_sinks(x, d_source())
else:
multi_sink_share_both_sinks(x, e_source())
def issue_with_triggered_c_from_d_or_e():
# Should see two issues
demonstrate_triggered_c_from_d_or_e(c_source())
def demonstrate_triggered_d_and_e(y):
multi_sink_share_both_sinks(c_source(), y)
def issue_with_triggered_d_and_e():
demonstrate_triggered_d_and_e(d_source())
demonstrate_triggered_d_and_e(e_source())
def combine_c_d_and_c_e():
multi_sink_share_both_sinks(c_source(), d_source())
multi_sink_share_both_sinks(c_source(), e_source())
| B |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/hashability2.py | {
"start": 513,
"end": 552
} | class ____:
def __hash__(self): ...
| E |
python | eriklindernoren__ML-From-Scratch | mlfromscratch/supervised_learning/gradient_boosting.py | {
"start": 3690,
"end": 4281
} | class ____(GradientBoosting):
def __init__(self, n_estimators=200, learning_rate=.5, min_samples_split=2,
min_info_gain=1e-7, max_depth=2, debug=False):
super(GradientBoostingClassifier, self).__init__(n_estimators=n_estimators,
learning_rate=learning_rate,
min_samples_split=min_samples_split,
min_impurity=min_info_gain,
max_depth=max_depth,
regression=False)
def fit(self, X, y):
y = to_categorical(y)
super(GradientBoostingClassifier, self).fit(X, y)
| GradientBoostingClassifier |
python | spack__spack | lib/spack/spack/repo.py | {
"start": 6195,
"end": 10127
} | class ____:
# Wrapper around Executable for git to set working directory for all
# invocations.
#
# Not using -C as that is not supported for git < 1.8.5.
def __init__(self, packages_path: str):
self._git_cmd = spack.util.git.git(required=True)
self.packages_dir = packages_path
def __call__(self, *args, **kwargs) -> str:
with working_dir(self.packages_dir):
return self._git_cmd(*args, **kwargs, output=str)
def list_packages(rev: str, repo: "Repo") -> List[str]:
"""List all packages associated with the given revision"""
git = GitExe(repo.packages_path)
# git ls-tree does not support ... merge-base syntax, so do it manually
if rev.endswith("..."):
ref = rev.replace("...", "")
rev = git("merge-base", ref, "HEAD").strip()
output = git("ls-tree", "-r", "--name-only", rev)
# recursively list the packages directory
package_paths = [
line.split(os.sep) for line in output.split("\n") if line.endswith("package.py")
]
# take the directory names with one-level-deep package files
package_names = [
nm.pkg_dir_to_pkg_name(line[0], repo.package_api)
for line in package_paths
if len(line) == 2
]
return sorted(set(package_names))
def diff_packages(rev1: str, rev2: str, repo: "Repo") -> Tuple[Set[str], Set[str]]:
"""Compute packages lists for the two revisions and return a tuple
containing all the packages in rev1 but not in rev2 and all the
packages in rev2 but not in rev1."""
p1 = set(list_packages(rev1, repo))
p2 = set(list_packages(rev2, repo))
return p1.difference(p2), p2.difference(p1)
def get_all_package_diffs(type: str, repo: "Repo", rev1="HEAD^1", rev2="HEAD") -> Set[str]:
"""Get packages changed, added, or removed (or any combination of those) since a commit.
Arguments:
type: String containing one or more of ``A``, ``R``, ``C``.
rev1: Revision to compare against, default is ``"HEAD^"``
rev2: Revision to compare to rev1, default is ``"HEAD"``
"""
lower_type = type.lower()
if not re.match("^[arc]*$", lower_type):
tty.die(
f"Invalid change type: '{type}'. "
"Can contain only A (added), R (removed), or C (changed)"
)
removed, added = diff_packages(rev1, rev2, repo)
git = GitExe(repo.packages_path)
out = git("diff", "--relative", "--name-only", rev1, rev2).strip()
lines = [] if not out else re.split(r"\s+", out)
changed: Set[str] = set()
for path in lines:
dir_name, _, _ = path.partition("/")
if not nm.valid_module_name(dir_name, repo.package_api):
continue
pkg_name = nm.pkg_dir_to_pkg_name(dir_name, repo.package_api)
if pkg_name not in added and pkg_name not in removed:
changed.add(pkg_name)
packages: Set[str] = set()
if "a" in lower_type:
packages |= added
if "r" in lower_type:
packages |= removed
if "c" in lower_type:
packages |= changed
return packages
def add_package_to_git_stage(packages: List[str], repo: "Repo") -> None:
"""add a package to the git stage with ``git add``"""
git = GitExe(repo.packages_path)
for pkg_name in packages:
filename = PATH.filename_for_package_name(pkg_name)
if not os.path.isfile(filename):
tty.die(f"No such package: {pkg_name}. Path does not exist:", filename)
git("add", filename)
def autospec(function):
"""Decorator that automatically converts the first argument of a
function to a Spec.
"""
@functools.wraps(function)
def converter(self, spec_like, *args, **kwargs):
from spack.spec import Spec
if not isinstance(spec_like, Spec):
spec_like = Spec(spec_like)
return function(self, spec_like, *args, **kwargs)
return converter
| GitExe |
python | google__pytype | pytype/abstract/_pytd_function.py | {
"start": 1699,
"end": 3584
} | class ____:
"""Function call matches."""
def __init__(self, args: function.Args, can_match_multiple: bool) -> None:
self._args_vars = set(args.get_variables())
self._can_match_multiple = can_match_multiple
self._data: "list[list[tuple[PyTDSignature, dict[str, cfg.Variable], matcher.GoodMatch]]]" = ([])
self._cur_data: "list[list[tuple[PyTDSignature, dict[str, cfg.Variable], matcher.GoodMatch]]] | None" = (None)
self._sig: PyTDSignature | None = None
def __bool__(self) -> bool:
return bool(self._data)
@contextlib.contextmanager
def with_signature(self, sig: "PyTDSignature") -> Generator[None, None, None]:
"""Sets the signature that we are collecting matches for."""
assert self._sig is self._cur_data is None
self._sig = sig
# We collect data for the current signature separately and merge it in at
# the end so that add() does not wastefully iterate over the new data.
self._cur_data = []
try:
yield
finally:
self._data.extend(self._cur_data)
self._sig = None
self._cur_data = None
def add(
self, arg_dict: dict[str, cfg.Variable], match: "matcher.GoodMatch"
) -> None:
"""Adds a new match."""
for sigs in self._data:
if sigs[-1][0] == self._sig:
continue
new_view = match.view.accessed_subset
old_view = sigs[0][2].view.accessed_subset
if all(new_view[k] == old_view[k] for k in new_view if k in old_view):
if self._can_match_multiple:
sigs.append((self._sig, arg_dict, match))
break
else:
assert self._cur_data is not None
self._cur_data.append([(self._sig, arg_dict, match)])
def get(
self,
) -> "list[list[tuple[PyTDSignature, dict[str, cfg.Variable], matcher.GoodMatch]]]":
"""Gets the matches."""
return self._data # pytype: disable=bad-return-type
| _MatchedSignatures |
python | coleifer__peewee | tests/manytomany.py | {
"start": 19302,
"end": 19477
} | class ____(TestModel):
name = CharField()
accounts = ManyToManyField(Account, backref='lists')
AccountListThrough = AccountList.accounts.get_through_model()
| AccountList |
python | apache__airflow | providers/google/tests/unit/google/cloud/hooks/test_functions.py | {
"start": 1283,
"end": 4685
} | class ____:
def setup_method(self):
with mock.patch(
"airflow.providers.google.common.hooks.base_google.GoogleBaseHook.__init__",
new=mock_base_gcp_hook_no_default_project_id,
):
self.gcf_function_hook_no_project_id = CloudFunctionsHook(gcp_conn_id="test", api_version="v1")
@mock.patch("airflow.providers.google.cloud.hooks.functions.CloudFunctionsHook._authorize")
@mock.patch("airflow.providers.google.cloud.hooks.functions.build")
def test_gcf_client_creation(self, mock_build, mock_authorize):
result = self.gcf_function_hook_no_project_id.get_conn()
mock_build.assert_called_once_with(
"cloudfunctions", "v1", http=mock_authorize.return_value, cache_discovery=False
)
assert mock_build.return_value == result
assert self.gcf_function_hook_no_project_id._conn == result
@mock.patch("airflow.providers.google.cloud.hooks.functions.CloudFunctionsHook.get_conn")
@mock.patch(
"airflow.providers.google.cloud.hooks.functions.CloudFunctionsHook._wait_for_operation_to_complete"
)
def test_create_new_function_overridden_project_id(self, wait_for_operation_to_complete, get_conn):
create_method = (
get_conn.return_value.projects.return_value.locations.return_value.functions.return_value.create
)
execute_method = create_method.return_value.execute
execute_method.return_value = {"name": "operation_id"}
wait_for_operation_to_complete.return_value = None
res = self.gcf_function_hook_no_project_id.create_new_function(
project_id=GCP_PROJECT_ID_HOOK_UNIT_TEST, location=GCF_LOCATION, body={}
)
assert res is None
create_method.assert_called_once_with(body={}, location="projects/example-project/locations/location")
execute_method.assert_called_once_with(num_retries=5)
wait_for_operation_to_complete.assert_called_once_with(operation_name="operation_id")
@mock.patch("requests.put")
@mock.patch("airflow.providers.google.cloud.hooks.functions.CloudFunctionsHook.get_conn")
def test_upload_function_zip_overridden_project_id(self, get_conn, requests_put):
mck, open_module = get_open_mock()
with mock.patch(f"{open_module}.open", mck):
generate_upload_url_method = get_conn.return_value.projects.return_value.locations.return_value.functions.return_value.generateUploadUrl
execute_method = generate_upload_url_method.return_value.execute
execute_method.return_value = {"uploadUrl": "http://uploadHere"}
requests_put.return_value = None
res = self.gcf_function_hook_no_project_id.upload_function_zip(
project_id=GCP_PROJECT_ID_HOOK_UNIT_TEST, location=GCF_LOCATION, zip_path="/tmp/path.zip"
)
assert res == "http://uploadHere"
generate_upload_url_method.assert_called_once_with(
parent="projects/example-project/locations/location"
)
execute_method.assert_called_once_with(num_retries=5)
requests_put.assert_called_once_with(
data=mock.ANY,
headers={"Content-type": "application/zip", "x-goog-content-length-range": "0,104857600"},
url="http://uploadHere",
)
| TestFunctionHookNoDefaultProjectId |
python | spack__spack | lib/spack/spack/multimethod.py | {
"start": 1368,
"end": 2008
} | class ____(type):
"""This allows us to track the class's dict during instantiation."""
#: saved dictionary of attrs on the class being constructed
_locals = None
@classmethod
def __prepare__(cls, name, bases, **kwargs):
"""Save the dictionary that will be used for the class namespace."""
MultiMethodMeta._locals = dict()
return MultiMethodMeta._locals
def __init__(cls, name, bases, attr_dict):
"""Clear out the cached locals dict once the class is built."""
MultiMethodMeta._locals = None
super(MultiMethodMeta, cls).__init__(name, bases, attr_dict)
| MultiMethodMeta |
python | pytorch__pytorch | torch/nn/modules/loss.py | {
"start": 77061,
"end": 81864
} | class ____(_Loss):
r"""Creates a criterion that measures the triplet loss given an input
tensors :math:`x1`, :math:`x2`, :math:`x3` and a margin with a value greater than :math:`0`.
This is used for measuring a relative similarity between samples. A triplet
is composed by `a`, `p` and `n` (i.e., `anchor`, `positive examples` and `negative
examples` respectively). The shapes of all input tensors should be
:math:`(N, D)`.
The distance swap is described in detail in the paper `Learning shallow
convolutional feature descriptors with triplet losses`_ by
V. Balntas, E. Riba et al.
The loss function for each sample in the mini-batch is:
.. math::
L(a, p, n) = \max \{d(a_i, p_i) - d(a_i, n_i) + {\rm margin}, 0\}
where
.. math::
d(x_i, y_i) = \left\lVert {\bf x}_i - {\bf y}_i \right\rVert_p
The norm is calculated using the specified p value and a small constant :math:`\varepsilon` is
added for numerical stability.
See also :class:`~torch.nn.TripletMarginWithDistanceLoss`, which computes the
triplet margin loss for input tensors using a custom distance function.
Args:
margin (float, optional): Default: :math:`1`.
p (int, optional): The norm degree for pairwise distance. Default: :math:`2`.
eps (float, optional): Small constant for numerical stability. Default: :math:`1e-6`.
swap (bool, optional): The distance swap is described in detail in the paper
`Learning shallow convolutional feature descriptors with triplet losses` by
V. Balntas, E. Riba et al. Default: ``False``.
size_average (bool, optional): Deprecated (see :attr:`reduction`). By default,
the losses are averaged over each loss element in the batch. Note that for
some losses, there are multiple elements per sample. If the field :attr:`size_average`
is set to ``False``, the losses are instead summed for each minibatch. Ignored
when :attr:`reduce` is ``False``. Default: ``True``
reduce (bool, optional): Deprecated (see :attr:`reduction`). By default, the
losses are averaged or summed over observations for each minibatch depending
on :attr:`size_average`. When :attr:`reduce` is ``False``, returns a loss per
batch element instead and ignores :attr:`size_average`. Default: ``True``
reduction (str, optional): Specifies the reduction to apply to the output:
``'none'`` | ``'mean'`` | ``'sum'``. ``'none'``: no reduction will be applied,
``'mean'``: the sum of the output will be divided by the number of
elements in the output, ``'sum'``: the output will be summed. Note: :attr:`size_average`
and :attr:`reduce` are in the process of being deprecated, and in the meantime,
specifying either of those two args will override :attr:`reduction`. Default: ``'mean'``
Shape:
- Input: :math:`(N, D)` or :math:`(D)` where :math:`D` is the vector dimension.
- Output: A Tensor of shape :math:`(N)` if :attr:`reduction` is ``'none'`` and
input shape is :math:`(N, D)`; a scalar otherwise.
Examples:
>>> triplet_loss = nn.TripletMarginLoss(margin=1.0, p=2, eps=1e-7)
>>> anchor = torch.randn(100, 128, requires_grad=True)
>>> positive = torch.randn(100, 128, requires_grad=True)
>>> negative = torch.randn(100, 128, requires_grad=True)
>>> output = triplet_loss(anchor, positive, negative)
>>> output.backward()
.. _Learning shallow convolutional feature descriptors with triplet losses:
https://bmva-archive.org.uk/bmvc/2016/papers/paper119/index.html
"""
__constants__ = ["margin", "p", "eps", "swap", "reduction"]
margin: float
p: float
eps: float
swap: bool
def __init__(
self,
margin: float = 1.0,
p: float = 2.0,
eps: float = 1e-6,
swap: bool = False,
size_average=None,
reduce=None,
reduction: str = "mean",
) -> None:
super().__init__(size_average, reduce, reduction)
if margin <= 0:
raise ValueError(
f"TripletMarginLoss: expected margin to be greater than 0, got {margin} instead"
)
self.margin = margin
self.p = p
self.eps = eps
self.swap = swap
def forward(self, anchor: Tensor, positive: Tensor, negative: Tensor) -> Tensor:
"""Runs the forward pass."""
return F.triplet_margin_loss(
anchor,
positive,
negative,
margin=self.margin,
p=self.p,
eps=self.eps,
swap=self.swap,
reduction=self.reduction,
)
| TripletMarginLoss |
python | pytest-dev__pytest-asyncio | pytest_asyncio/plugin.py | {
"start": 16111,
"end": 16909
} | class ____(PytestAsyncioFunction):
"""Pytest item created by an asynchronous generator"""
@staticmethod
def _can_substitute(item: Function) -> bool:
func = item.obj
return inspect.isasyncgenfunction(func)
@classmethod
def _from_function(cls, function: Function, /) -> Function:
async_gen_item = super()._from_function(function)
unsupported_item_type_message = (
f"Tests based on asynchronous generators are not supported. "
f"{function.name} will be ignored."
)
async_gen_item.warn(PytestCollectionWarning(unsupported_item_type_message))
async_gen_item.add_marker(
pytest.mark.xfail(run=False, reason=unsupported_item_type_message)
)
return async_gen_item
| AsyncGenerator |
python | pytorch__pytorch | test/dynamo/test_flat_apply.py | {
"start": 3664,
"end": 5374
} | class ____(torch.nn.Module):
def forward(self, L_x_: "f32[10]", L_y_: "f32[10]"):
l_x_ = L_x_
l_y_ = L_y_
t: "f32[10]" = l_x_ + l_y_
trace_point_tensor_spec : torch.utils._pytree.TreeSpec = self.trace_point_tensor_spec
trace_point_tensor_input_spec : torch.utils._pytree.TreeSpec = self.trace_point_tensor_input_spec
res: "f32[10]" = torch.ops.higher_order.flat_apply(trace_point_tensor_spec, trace_point_tensor_input_spec, l_x_, l_y_, t); trace_point_tensor_spec = trace_point_tensor_input_spec = l_x_ = l_y_ = t = None
return (res,)
""", # NOQA: B950
)
def test_nonstrict_trace_captured_tensor_post_aot_graph(self):
cst = torch.ones(1)
@torch._dynamo.nonstrict_trace
def trace_me(x, y):
torch._dynamo.graph_break()
return x * y + cst
backend = AotEagerAndRecordGraphs()
@torch.compile(fullgraph=True, backend=backend)
def fn(x, y):
return trace_me(x, y)
fn(torch.randn(10), torch.randn(10))
self.assertExpectedInline(
normalize_gm(backend.fw_graphs[0].print_readable(print_output=False)),
"""\
class <lambda>(torch.nn.Module):
def forward(self, arg0_1: "f32[10]", arg1_1: "f32[10]"):
mul: "f32[10]" = torch.ops.aten.mul.Tensor(arg0_1, arg1_1); arg0_1 = arg1_1 = None
_tensor_constant0: "f32[1]" = self._tensor_constant0
add: "f32[10]" = torch.ops.aten.add.Tensor(mul, _tensor_constant0); mul = _tensor_constant0 = None
return (add,)
""", # NOQA: B950
)
if __name__ == "__main__":
from torch._dynamo.test_case import run_tests
run_tests()
| GraphModule |
python | getsentry__sentry | src/sentry/api/endpoints/organization_events_trends.py | {
"start": 16329,
"end": 18100
} | class ____(OrganizationEventsTrendsEndpointBase):
publish_status = {
"GET": ApiPublishStatus.PRIVATE,
}
def build_result_handler(
self,
request,
organization,
snuba_params,
trend_function,
selected_columns,
orderby,
query,
):
def on_results(events_results):
def get_event_stats(
query_columns, query, snuba_params, rollup, zerofill_results, _=None
):
return discover.top_events_timeseries(
query_columns,
selected_columns,
query,
snuba_params,
orderby,
rollup,
min(5, len(events_results["data"])),
organization,
top_events=events_results,
referrer="api.trends.get-event-stats",
zerofill_results=zerofill_results,
)
stats_results = (
self.get_event_stats_data(
request,
organization,
get_event_stats,
top_events=True,
query_column=trend_function,
snuba_params=snuba_params,
query=query,
)
if len(events_results["data"]) > 0
else {}
)
return {
"events": self.handle_results_with_meta(
request, organization, snuba_params.project_ids, events_results
),
"stats": stats_results,
}
return on_results
@region_silo_endpoint
| OrganizationEventsTrendsStatsEndpoint |
python | pytorch__pytorch | torch/_inductor/config.py | {
"start": 74083,
"end": 74294
} | class ____:
# dynamic_linkage=False
# link_libtorch=False
# package_cpp_only=True
# embed_kernel_binary=True
# emit_multi_arch_kernel=True
compile_standalone: bool = False
| aot_inductor_mode |
python | MongoEngine__mongoengine | mongoengine/errors.py | {
"start": 525,
"end": 571
} | class ____(AttributeError):
pass
| LookUpError |
python | Textualize__textual | tests/snapshot_tests/snapshot_apps/nested_specificity.py | {
"start": 512,
"end": 796
} | class ____(BaseTester):
DEFAULT_CSS = """
NestedCSS {
width: 1fr;
height: 1fr;
&:focus {
background: green 20%;
border: round green;
}
background: green 10%;
border: blank;
}
"""
| NestedCSS |
python | spack__spack | lib/spack/spack/vendor/ruamel/yaml/scalarint.py | {
"start": 2876,
"end": 3117
} | class ____(ScalarInt):
def __new__(cls, value, width=None, underscore=None, anchor=None):
# type: (Any, Any, Any, Any) -> Any
return ScalarInt.__new__(cls, value, width=width, underscore=underscore, anchor=anchor)
| BinaryInt |
python | sympy__sympy | sympy/liealgebras/type_e.py | {
"start": 140,
"end": 8280
} | class ____(Standard_Cartan):
def __new__(cls, n):
if n < 6 or n > 8:
raise ValueError("Invalid value of n")
return Standard_Cartan.__new__(cls, "E", n)
def dimension(self):
"""Dimension of the vector space V underlying the Lie algebra
Examples
========
>>> from sympy.liealgebras.cartan_type import CartanType
>>> c = CartanType("E6")
>>> c.dimension()
8
"""
return 8
def basic_root(self, i, j):
"""
This is a method just to generate roots
with a -1 in the ith position and a 1
in the jth position.
"""
root = [0]*8
root[i] = -1
root[j] = 1
return root
def simple_root(self, i):
"""
Every Lie algebra has a unique root system.
Given a root system Q, there is a subset of the
roots such that an element of Q is called a
simple root if it cannot be written as the sum
of two elements in Q. If we let D denote the
set of simple roots, then it is clear that every
element of Q can be written as a linear combination
of elements of D with all coefficients non-negative.
This method returns the ith simple root for E_n.
Examples
========
>>> from sympy.liealgebras.cartan_type import CartanType
>>> c = CartanType("E6")
>>> c.simple_root(2)
[1, 1, 0, 0, 0, 0, 0, 0]
"""
n = self.n
if i == 1:
root = [-0.5]*8
root[0] = 0.5
root[7] = 0.5
return root
elif i == 2:
root = [0]*8
root[1] = 1
root[0] = 1
return root
else:
if i in (7, 8) and n == 6:
raise ValueError("E6 only has six simple roots!")
if i == 8 and n == 7:
raise ValueError("E7 only has seven simple roots!")
return self.basic_root(i - 3, i - 2)
def positive_roots(self):
"""
This method generates all the positive roots of
A_n. This is half of all of the roots of E_n;
by multiplying all the positive roots by -1 we
get the negative roots.
Examples
========
>>> from sympy.liealgebras.cartan_type import CartanType
>>> c = CartanType("A3")
>>> c.positive_roots()
{1: [1, -1, 0, 0], 2: [1, 0, -1, 0], 3: [1, 0, 0, -1], 4: [0, 1, -1, 0],
5: [0, 1, 0, -1], 6: [0, 0, 1, -1]}
"""
n = self.n
neghalf = Rational(-1, 2)
poshalf = S.Half
if n == 6:
posroots = {}
k = 0
for i in range(n-1):
for j in range(i+1, n-1):
k += 1
root = self.basic_root(i, j)
posroots[k] = root
k += 1
root = self.basic_root(i, j)
root[i] = 1
posroots[k] = root
root = [poshalf, poshalf, poshalf, poshalf, poshalf,
neghalf, neghalf, poshalf]
for a, b, c, d, e in itertools.product(
range(2), range(2), range(2), range(2), range(2)):
if (a + b + c + d + e)%2 == 0:
k += 1
if a == 1:
root[0] = neghalf
if b == 1:
root[1] = neghalf
if c == 1:
root[2] = neghalf
if d == 1:
root[3] = neghalf
if e == 1:
root[4] = neghalf
posroots[k] = root[:]
return posroots
if n == 7:
posroots = {}
k = 0
for i in range(n-1):
for j in range(i+1, n-1):
k += 1
root = self.basic_root(i, j)
posroots[k] = root
k += 1
root = self.basic_root(i, j)
root[i] = 1
posroots[k] = root
k += 1
posroots[k] = [0, 0, 0, 0, 0, 1, 1, 0]
root = [poshalf, poshalf, poshalf, poshalf, poshalf,
neghalf, neghalf, poshalf]
for a, b, c, d, e, f in itertools.product(
range(2), range(2), range(2), range(2), range(2), range(2)):
if (a + b + c + d + e + f)%2 == 0:
k += 1
if a == 1:
root[0] = neghalf
if b == 1:
root[1] = neghalf
if c == 1:
root[2] = neghalf
if d == 1:
root[3] = neghalf
if e == 1:
root[4] = neghalf
if f == 1:
root[5] = poshalf
posroots[k] = root[:]
return posroots
if n == 8:
posroots = {}
k = 0
for i in range(n):
for j in range(i+1, n):
k += 1
root = self.basic_root(i, j)
posroots[k] = root
k += 1
root = self.basic_root(i, j)
root[i] = 1
posroots[k] = root
root = [poshalf, poshalf, poshalf, poshalf, poshalf,
neghalf, neghalf, poshalf]
for a, b, c, d, e, f, g in itertools.product(
range(2), range(2), range(2), range(2), range(2),
range(2), range(2)):
if (a + b + c + d + e + f + g)%2 == 0:
k += 1
if a == 1:
root[0] = neghalf
if b == 1:
root[1] = neghalf
if c == 1:
root[2] = neghalf
if d == 1:
root[3] = neghalf
if e == 1:
root[4] = neghalf
if f == 1:
root[5] = poshalf
if g == 1:
root[6] = poshalf
posroots[k] = root[:]
return posroots
def roots(self):
"""
Returns the total number of roots of E_n
"""
n = self.n
if n == 6:
return 72
if n == 7:
return 126
if n == 8:
return 240
def cartan_matrix(self):
"""
Returns the Cartan matrix for G_2
The Cartan matrix matrix for a Lie algebra is
generated by assigning an ordering to the simple
roots, (alpha[1], ...., alpha[l]). Then the ijth
entry of the Cartan matrix is (<alpha[i],alpha[j]>).
Examples
========
>>> from sympy.liealgebras.cartan_type import CartanType
>>> c = CartanType('A4')
>>> c.cartan_matrix()
Matrix([
[ 2, -1, 0, 0],
[-1, 2, -1, 0],
[ 0, -1, 2, -1],
[ 0, 0, -1, 2]])
"""
n = self.n
m = 2*eye(n)
for i in range(3, n - 1):
m[i, i+1] = -1
m[i, i-1] = -1
m[0, 2] = m[2, 0] = -1
m[1, 3] = m[3, 1] = -1
m[2, 3] = -1
m[n-1, n-2] = -1
return m
def basis(self):
"""
Returns the number of independent generators of E_n
"""
n = self.n
if n == 6:
return 78
if n == 7:
return 133
if n == 8:
return 248
def dynkin_diagram(self):
n = self.n
diag = " "*8 + str(2) + "\n"
diag += " "*8 + "0\n"
diag += " "*8 + "|\n"
diag += " "*8 + "|\n"
diag += "---".join("0" for i in range(1, n)) + "\n"
diag += "1 " + " ".join(str(i) for i in range(3, n+1))
return diag
| TypeE |
python | tensorflow__tensorflow | tensorflow/dtensor/python/tests/conv_test.py | {
"start": 8840,
"end": 12494
} | class ____(test_util.DTensorBaseTest):
def setUp(self):
super().setUp()
# TODO(b/261485237): Enable CPU testing once CollectivePermute is supported
# on CPU's.
if not test_util.is_tpu_present():
self.skipTest('This test only runs on TPUs.')
def _create_mesh(self, mesh_dims, topology):
global_ids = test_util.create_device_ids_array(topology)
local_ids = np.ravel(global_ids).tolist()
mesh_dict = {}
for device in ('CPU', 'GPU', 'TPU'):
mesh_dict[device] = Mesh(
mesh_dims,
global_ids,
local_ids,
test_util.create_device_list(topology, device),
)
return self.configTestMesh(mesh_dict)
@parameterized.named_parameters(
test_util.product(*SPATIALLY_PARTITIONED_CONV_TEST_CASES))
def testConv(self, input_shape, kernel_shape, padding, sharding_specs,
topology):
mesh_dims = [spec for spec in sharding_specs if spec != UNSHARDED]
mesh = self._create_mesh(mesh_dims, topology)
x_in = constant_op.constant(
np.random.random(size=input_shape), dtype=dtypes.float32
)
kernel_in = constant_op.constant(
np.random.random(size=kernel_shape), dtype=dtypes.float32
)
expected_output = nn_ops.conv2d_v2(
x_in, kernel_in, strides=[1, 1, 1, 1], padding=padding
)
input_layout = Layout(sharding_specs, mesh)
kernel_layout = Layout.replicated(mesh, 4)
d_x_in = numpy_util.pack_numpy(x_in, input_layout)
d_kernel_in = numpy_util.pack_numpy(kernel_in, kernel_layout)
d_output = nn_ops.conv2d_v2(
d_x_in, d_kernel_in, strides=[1, 1, 1, 1], padding=padding
)
self.assertDTensorEqual(expected_output, input_layout, d_output)
@parameterized.named_parameters(
test_util.product(*SPATIALLY_PARTITIONED_CONV_TEST_CASES))
def testConvWithGradient(self, input_shape, kernel_shape, padding,
sharding_specs, topology):
# TODO(b/208700444): add support for SPMD expansion of spatially partitioned
# conv backprop.
self.skipTest(
'b/208700444: Spatially partitioned conv backprop not implemented.')
mesh_dims = [spec for spec in sharding_specs if spec != UNSHARDED]
mesh = self._create_mesh(mesh_dims, topology)
x_in = constant_op.constant(
np.random.random(size=input_shape), dtype=dtypes.float32
)
kernel_in = constant_op.constant(
np.random.random(size=kernel_shape), dtype=dtypes.float32
)
@polymorphic_function.function
def conv_fn(inputs, img_kernel, padding):
with backprop.GradientTape() as tape:
tape.watch([inputs, img_kernel])
output = nn_ops.conv2d_v2(
inputs, img_kernel, strides=[1, 1, 1, 1], padding=padding
)
inputs_grad, kernel_grad = tape.gradient(output, [inputs, img_kernel])
return output, inputs_grad, kernel_grad
expected_output, expected_inputs_grad, expected_kernel_grad = conv_fn(
x_in, kernel_in, padding)
input_layout = Layout(sharding_specs, mesh)
kernel_layout = Layout.replicated(mesh, 4)
d_x_in = numpy_util.pack_numpy(x_in, input_layout)
d_kernel_in = numpy_util.pack_numpy(kernel_in, kernel_layout)
d_output, d_inputs_grad, d_kernel_grad = conv_fn(d_x_in, d_kernel_in,
padding)
self.assertDTensorEqual(expected_output, input_layout, d_output)
self.assertDTensorEqual(expected_inputs_grad, input_layout, d_inputs_grad)
self.assertDTensorEqual(expected_kernel_grad, kernel_layout, d_kernel_grad)
if __name__ == '__main__':
test.main()
| SpatiallyPartitionedConvOpTest |
python | huggingface__transformers | src/transformers/models/maskformer/modeling_maskformer.py | {
"start": 3695,
"end": 5483
} | class ____(ModelOutput):
r"""
encoder_last_hidden_state (`torch.FloatTensor` of shape`(batch_size, num_channels, height, width)`):
Last hidden states (final feature map) of the last stage of the encoder.
decoder_last_hidden_state (`torch.FloatTensor` of shape`(batch_size, num_channels, height, width)`):
Last hidden states (final feature map) of the last stage of the decoder.
encoder_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each stage) of
shape `(batch_size, num_channels, height, width)`. Hidden-states (also called feature maps) of the model at
the output of each stage.
decoder_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each stage) of
shape `(batch_size, num_channels, height, width)`. Hidden-states (also called feature maps) of the model at
the output of each stage.
"""
encoder_last_hidden_state: Optional[torch.FloatTensor] = None
decoder_last_hidden_state: Optional[torch.FloatTensor] = None
encoder_hidden_states: Optional[tuple[torch.FloatTensor]] = None
decoder_hidden_states: Optional[tuple[torch.FloatTensor]] = None
@dataclass
@auto_docstring(
custom_intro="""
MaskFormer's pixel decoder module output, practically a Feature Pyramid Network. It returns the last hidden state
and (optionally) the hidden states.
"""
)
| MaskFormerPixelLevelModuleOutput |
python | wandb__wandb | tests/system_tests/test_functional/dspy/dspy_callback_completions.py | {
"start": 259,
"end": 1559
} | class ____:
"""Minimal stand-in for dspy.Completions to exercise .items() branch."""
def __init__(self, data):
self._data = data
def items(self):
return list(self._data.items())
def _build_results_stub():
ex = dspy.Example(question="What is 10-3?", answer="7")
# Ensure isinstance(pred, dspy.Completions) is True by monkeypatching
dspy.Completions = DummyCompletions # type: ignore[attr-defined]
pred = dspy.Completions({"answer": "7"}) # type: ignore[call-arg]
return [(ex, pred, True)]
def main() -> None:
from wandb.integration.dspy import WandbDSPyCallback
with wandb.init(project="dspy-system-test-completions") as run:
cb = WandbDSPyCallback(log_results=True, run=run)
class FakeEvaluate:
def __init__(self) -> None:
self.devset = []
self.num_threads = 1
self.auto = "light"
program = MinimalProgram()
cb.on_evaluate_start(
call_id="c1", instance=FakeEvaluate(), inputs={"program": program}
)
results = _build_results_stub()
out = EvaluationResult(score=0.8, results=results)
cb.on_evaluate_end(call_id="c1", outputs=out, exception=None)
if __name__ == "__main__":
main()
| DummyCompletions |
python | jazzband__django-oauth-toolkit | tests/test_generator.py | {
"start": 310,
"end": 1021
} | class ____(TestCase):
def test_generate_client_id(self):
g = self.oauth2_settings.CLIENT_ID_GENERATOR_CLASS()
self.assertEqual(len(g.hash()), 40)
self.oauth2_settings.CLIENT_ID_GENERATOR_CLASS = MockHashGenerator
self.assertEqual(generate_client_id(), 42)
def test_generate_secret_id(self):
g = self.oauth2_settings.CLIENT_SECRET_GENERATOR_CLASS()
self.assertEqual(len(g.hash()), 128)
self.oauth2_settings.CLIENT_SECRET_GENERATOR_CLASS = MockHashGenerator
self.assertEqual(generate_client_secret(), 42)
def test_basegen_misuse(self):
g = BaseHashGenerator()
self.assertRaises(NotImplementedError, g.hash)
| TestGenerators |
python | django-extensions__django-extensions | django_extensions/management/commands/clean_pyc.py | {
"start": 244,
"end": 1647
} | class ____(BaseCommand):
help = "Removes all python bytecode compiled files from the project."
requires_system_checks: List[str] = []
def add_arguments(self, parser):
parser.add_argument(
"--optimize",
"-o",
"-O",
action="store_true",
dest="optimize",
default=False,
help="Remove optimized python bytecode files",
)
parser.add_argument(
"--path",
"-p",
action="store",
dest="path",
help="Specify path to recurse into",
)
@signalcommand
def handle(self, *args, **options):
project_root = options.get("path", getattr(settings, "BASE_DIR", None))
if not project_root:
project_root = getattr(settings, "BASE_DIR", None)
verbosity = options["verbosity"]
if not project_root:
raise CommandError(
"No --path specified and settings.py does not contain BASE_DIR"
)
exts = options["optimize"] and "*.py[co]" or "*.pyc"
for root, dirs, filenames in os.walk(project_root):
for filename in fnmatch.filter(filenames, exts):
full_path = _j(root, filename)
if verbosity > 1:
self.stdout.write("%s\n" % full_path)
os.remove(full_path)
| Command |
python | walkccc__LeetCode | solutions/1575. Count All Possible Routes/1575-2.py | {
"start": 0,
"end": 711
} | class ____:
def countRoutes(
self,
locations: list[int],
start: int,
finish: int,
fuel: int,
) -> int:
MOD = 1_000_000_007
n = len(locations)
# dp[i][j] := the number of ways to reach the `finish` city from the i-th
# city with `j` fuel
dp = [[0] * (fuel + 1) for _ in range(n)]
for f in range(fuel + 1):
dp[finish][f] = 1
for f in range(fuel + 1):
for i in range(n):
for j in range(n):
if i == j:
continue
requiredFuel = abs(locations[i] - locations[j])
if requiredFuel <= f:
dp[i][f] += dp[j][f - requiredFuel]
dp[i][f] %= MOD
return dp[start][fuel]
| Solution |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/typedDictClosed3.py | {
"start": 2639,
"end": 2703
} | class ____(ParentNonOpen6, closed=False):
pass
| ChildNotClosed6 |
python | pytorch__pytorch | torch/fx/experimental/unification/multipledispatch/variadic.py | {
"start": 1477,
"end": 2276
} | class ____(type):
"""A metaclass that overrides ``__getitem__`` on the class. This is used to
generate a new type for Variadic signatures. See the Variadic class for
examples of how this behaves.
"""
def __getitem__(cls, variadic_type):
if not (isinstance(variadic_type, (type, tuple)) or type(variadic_type)):
raise ValueError(
"Variadic types must be type or tuple of types"
" (Variadic[int] or Variadic[(int, float)]"
)
if not isinstance(variadic_type, tuple):
variadic_type = (variadic_type,)
return VariadicSignatureType(
f"Variadic[{typename(variadic_type)}]",
(),
dict(variadic_type=variadic_type, __slots__=()),
)
| VariadicSignatureMeta |
python | getsentry__sentry | src/sentry/incidents/endpoints/serializers/query_subscription.py | {
"start": 1350,
"end": 2263
} | class ____(Serializer):
def get_attrs(
self, item_list: Sequence[QuerySubscription], user, **kwargs
) -> MutableMapping[QuerySubscription, dict[str, Any]]:
attrs: dict[QuerySubscription, dict[str, Any]] = defaultdict(dict)
prefetch_related_objects(item_list, "snuba_query", "snuba_query__snubaqueryeventtype_set")
snuba_queries = [item.snuba_query for item in item_list]
for qs, serialized_sq in zip(item_list, serialize(snuba_queries, user=user)):
attrs[qs]["snuba_query"] = serialized_sq
return attrs
def serialize(
self, obj: QuerySubscription, attrs: Mapping[str, Any], user, **kwargs
) -> dict[str, Any]:
return {
"id": str(obj.id),
"status": obj.status,
"subscription": obj.subscription_id,
"snubaQuery": attrs.get("snuba_query"),
}
| QuerySubscriptionSerializer |
python | ray-project__ray | rllib/examples/rl_modules/classes/custom_action_distribution_rlm.py | {
"start": 1992,
"end": 6716
} | class ____(TorchRLModule, ValueFunctionAPI):
"""A simple TorchRLModule with its own custom action distribution.
The distribution differs from the default one by an additional temperature
parameter applied on top of the Categorical base distribution. See the above
`TorchCategoricalWithTemp` class for details.
.. testcode::
import numpy as np
import gymnasium as gym
my_net = CustomActionDistributionRLModule(
observation_space=gym.spaces.Box(-1.0, 1.0, (4,), np.float32),
action_space=gym.spaces.Discrete(4),
model_config={"action_dist_temperature": 5.0},
)
B = 10
data = torch.from_numpy(
np.random.random_sample(size=(B, 4)).astype(np.float32)
)
# Expect a relatively high-temperature distribution.
# Set "action_dist_temperature" to small values << 1.0 to approximate greedy
# behavior (even when stochastically sampling from the distribution).
print(my_net.forward_exploration({"obs": data}))
"""
@override(TorchRLModule)
def setup(self):
"""Use this method to create all the model components that you require.
Feel free to access the following useful properties in this class:
- `self.model_config`: The config dict for this RLModule class,
which should contain flexible settings, for example: {"hiddens": [256, 256]}.
- `self.observation|action_space`: The observation and action space that
this RLModule is subject to. Note that the observation space might not be the
exact space from your env, but that it might have already gone through
preprocessing through a connector pipeline (for example, flattening,
frame-stacking, mean/std-filtering, etc..).
- `self.inference_only`: If True, this model should be built only for inference
purposes, in which case you may want to exclude any components that are not used
for computing actions, for example a value function branch.
"""
input_dim = self.observation_space.shape[0]
hidden_dim = self.model_config.get("hidden_dim", 256)
output_dim = self.action_space.n
# Define simple encoder, and policy- and vf heads.
self._encoder = torch.nn.Sequential(
torch.nn.Linear(input_dim, hidden_dim),
torch.nn.ReLU(),
)
self._policy_net = torch.nn.Linear(hidden_dim, output_dim)
self._vf = nn.Linear(hidden_dim, 1)
# Plug in a custom action dist class.
# NOTE: If you need more granularity as to which distribution class is used by
# which forward method (`forward_inference`, `forward_exploration`,
# `forward_train`), override the RLModule methods
# `get_inference_action_dist_cls`, `get_exploration_action_dist_cls`, and
# `get_train_action_dist_cls`, and return
# your custom class(es) from these. In this case, leave `self.action_dist_cls`
# set to None, its default value.
self.action_dist_cls = _make_categorical_with_temperature(
self.model_config["action_dist_temperature"]
)
@override(TorchRLModule)
def _forward(self, batch, **kwargs):
# Compute the basic 1D feature tensor (inputs to policy- and value-heads).
_, logits = self._compute_embeddings_and_logits(batch)
# Return features and logits as ACTION_DIST_INPUTS (categorical distribution).
return {
Columns.ACTION_DIST_INPUTS: logits,
}
@override(TorchRLModule)
def _forward_train(self, batch, **kwargs):
# Compute the basic 1D feature tensor (inputs to policy- and value-heads).
embeddings, logits = self._compute_embeddings_and_logits(batch)
# Return features and logits as ACTION_DIST_INPUTS (categorical distribution).
return {
Columns.ACTION_DIST_INPUTS: logits,
Columns.EMBEDDINGS: embeddings,
}
# We implement this RLModule as a ValueFunctionAPI RLModule, so it can be used
# by value-based methods like PPO or IMPALA.
@override(ValueFunctionAPI)
def compute_values(
self,
batch: Dict[str, Any],
embeddings: Optional[Any] = None,
) -> TensorType:
# Features not provided -> We need to compute them first.
if embeddings is None:
embeddings = self._encoder(batch[Columns.OBS])
return self._vf(embeddings).squeeze(-1)
def _compute_embeddings_and_logits(self, batch):
embeddings = self._encoder(batch[Columns.OBS])
logits = self._policy_net(embeddings)
return embeddings, logits
| CustomActionDistributionRLModule |
python | google__jax | tests/shard_map_test.py | {
"start": 158053,
"end": 167530
} | class ____(NamedTuple):
name: str
num_inputs: int
fun: Callable
out_rep: Callable
valid_types: Callable | None = None
fun_specs = [
FunSpec('id', 1, lambda x: x, lambda r: r),
FunSpec('flip', 2, lambda x, y: (y, x), lambda r_x, r_y: (r_y, r_x)),
FunSpec('transpose', 1, lambda x: x.T, lambda r: r),
FunSpec('ravel', 1, lambda x: x.ravel(), lambda r: r),
FunSpec(
'dot', 2, jnp.dot, lambda r1, r2: r1 & r2,
lambda x1, x2: (x1.shape and x2.shape and
x1.shape[-1] == x2.shape[-2 if x2.ndim > 1 else 0]),
),
FunSpec(
'sin_dot_sin', 2,
lambda x1, x2: jnp.sin(jnp.dot(jnp.sin(x1), x2)),
lambda r1, r2: r1 & r2,
lambda x1, x2: (x1.shape and x2.shape and
x1.shape[-1] == x2.shape[-2 if x2.ndim > 1 else 0])),
FunSpec('relu', 1, lambda x: jax.nn.relu(x + 1) - 1, lambda r: r),
]
input_shapes = [
jax.ShapeDtypeStruct(shape, jnp.dtype('float32'))
# TODO(mattjj): 0 axis sizes lead to XLA sigfpe, file bug!
for k in range(1, 4) for shape in it.permutations(range(1, 4), k)
if not shape or len(set(shape)) > 1 # skip all-equal shapes, boring!
]
mesh_shapes = [
(1,),
(1, 1),
(1, 2),
(2, 2),
(2, 4),
(4, 2),
]
# Reference implementation of shard_map.
ShapeDtypeDuck = Any # has shape and dtype attributes
Specs = Any # pytree of PartitionSpec
def shmap_reference(
body_in_types: Sequence[ShapeDtypeDuck],
body_out_types: Sequence[ShapeDtypeDuck],
out_types: Sequence[ShapeDtypeDuck],
f: Callable, mesh: Mesh, in_specs: Specs, out_specs: Specs
) -> Callable:
def f_shmapped(*args):
outs = jax.tree.map(lambda y: jnp.zeros(y.shape, y.dtype), out_types)
getters = [make_indexer(mesh, s, x) for s, x in zip(in_specs, args)]
putters = jax.tree.map(partial(make_indexer, mesh), out_specs, outs)
for idx in it.product(*map(range, mesh.shape.values())):
args_shards = [x[indexer(idx)] for x, indexer in zip(args, getters)]
assert all(x.shape == r.shape for x, r in zip(args_shards, body_in_types))
out_shards = f(*args_shards)
assert jax.tree.all(jax.tree.map(lambda y, r: y.shape == r.shape,
out_shards, body_out_types))
outs = jax.tree.map(lambda y, out, indexer: out.at[indexer(idx)].set(y),
out_shards, outs, putters)
return outs
return f_shmapped
def make_indexer(mesh: Mesh, spec: P, x: Any
) -> Callable[[tuple[int, ...]], tuple[slice, ...]]:
block_shape = [d // math.prod(mesh.shape[ax] for ax in (elt or ()))
for d, elt in zip(x.shape, spec)]
def indexer(idx):
starts = [0 if el is None else
idx[list(mesh.shape).index(el)] if type(el) is not tuple else
sum(idx[list(mesh.shape).index(el[i])]
* math.prod(mesh.shape[e] for e in el[i+1:]) for i in range(len(el)))
for el in spec]
return tuple(slice(start * size, (start + 1) * size)
for start, size in zip(starts, block_shape))
return indexer
# The code below is similar to named_cases_from_sampler in test_util.py, but it
# uses generators instead of passing a "select" function around.
# To sample test cases efficiently, we construct a generator which yields to the
# caller to choose one of an iterable's options. That is, we can read 'yield' in
# this code as 'choose one'. To call functions which themselves need to make
# choices, we use 'yield from'. That is, we can read 'yield from' in this code
# as 'call this choice-making function'.
Option = Any
CaseSpec = tuple # first element is a string test name
Chooser = Generator[Iterable[Option], Option, CaseSpec]
def sample_shmap() -> Chooser:
spec = yield fun_specs
mesh_shape = yield mesh_shapes
axis_names = ('i', 'j', 'k', 'l')[:len(mesh_shape)]
mesh = SimpleNamespace(shape=dict(zip(axis_names, mesh_shape)),
axis_names=axis_names)
in_types = (tys for tys in it.product(input_shapes, repeat=spec.num_inputs)
if not spec.valid_types or spec.valid_types(*tys))
body_in_types = yield in_types
body_out_types = jax.eval_shape(spec.fun, *body_in_types)
in_types, in_specs = yield from make_in_specs(mesh, body_in_types)
args = [np.arange(ty.size, dtype=ty.dtype).reshape(ty.shape) / ty.size
for ty in in_types]
out_reps = spec.out_rep(*map(partial(unmentioned, mesh), in_specs))
out_specs = yield from make_out_specs(mesh, body_out_types, out_reps)
out_types = jax.tree.map(partial(dilate, mesh), out_specs, body_out_types)
ref = partial(shmap_reference, body_in_types, body_out_types, out_types)
in_str = '(' + ','.join(jax.core.ShapedArray(t.shape, t.dtype).str_short()
for t in in_types) + ')'
jit = yield [True, False]
name = f'{spec.name}_{mesh.shape}_jit={jit}_{in_specs}_{out_specs}_{in_str}'
return name, spec.fun, mesh.shape, jit, in_specs, out_specs, args, ref
def unmentioned(mesh: Mesh, pspec: P) -> set[core.AxisName]:
return set(mesh.axis_names) - {n for ns in pspec if ns is not None
for n in (ns if type(ns) is tuple else [ns])}
# To drive the sampler, we have `sample` function which just runs a loop.
def sample(num: int, make_gen: Callable[[], Chooser]) -> Iterator[CaseSpec]:
rng = np.random.RandomState(0)
seen: set[str] = set()
while len(seen) < num:
name, *case = sample_one(rng, make_gen())
if name not in seen:
seen.add(name)
yield case
# To sample one test spec, we run the generator, getting back sequences of
# options from it and sending in our choices from those options until finally a
# test case spec is produced.
def sample_one(rng: np.random.RandomState, gen: Chooser) -> CaseSpec:
lst = list(next(gen))
try:
while True:
choice = lst[rng.randint(len(lst))]
lst = list(gen.send(choice))
except StopIteration as e:
return e.value
# Next are some choice-making functions for shard_map test specifications.
MeshDuck = Any # same attributes as a Mesh
def make_in_specs(mesh: MeshDuck, in_types: Sequence[ShapeDtypeDuck]
) -> Chooser:
pairs = []
for ty in in_types:
pair = yield from make_in_spec(mesh, ty)
pairs.append(pair)
return tuple(zip(*pairs))
def make_in_spec(mesh: Mesh, in_type_base: ShapeDtypeDuck) -> Chooser:
assert len(list(powerset(mesh.shape)))
subset = yield powerset(mesh.shape)
elts = yield partitions(subset, len(in_type_base.shape))
partition_spec = P(*(tuple(e) if e else None for e in elts))
new_type = dilate(mesh, partition_spec, in_type_base)
return new_type, partition_spec
def dilate(mesh: Mesh, spec: P, shape: ShapeDtypeDuck) -> ShapeDtypeDuck:
new_shape = tuple(d * math.prod(mesh.shape[ax] for ax in (elt or ()))
for d, elt in zip(shape.shape, spec))
return jax.ShapeDtypeStruct(new_shape, shape.dtype)
def make_out_specs(
mesh: MeshDuck, out_types: ShapeDtypeDuck | Sequence[ShapeDtypeDuck],
out_reps: set[core.AxisName] | Sequence[set[core.AxisName]]
) -> Chooser:
if type(out_types) is not tuple:
out_spec = yield from make_out_spec(mesh, out_types, out_reps) # type: ignore
return out_spec
else:
out_specs = []
for ty, rep in zip(out_types, out_reps):
out_spec = yield from make_out_spec(mesh, ty, rep) # type: ignore
out_specs.append(out_spec)
return tuple(out_specs)
def make_out_spec(
mesh: Mesh, out_type: ShapeDtypeDuck, out_rep: set[core.AxisName]
) -> Chooser:
subset = yield (s for s in powerset(mesh.shape)
if out_rep | set(s) == set(mesh.shape))
elts = yield partitions(subset, len(out_type.shape))
return P(*(tuple(e) if e else None for e in elts))
# Combinatorial helper functions
T = TypeVar('T')
def partitions(s: Sequence[T], k: int) -> Iterator[list[list[T]]]:
for indices in it.product(range(k), repeat=len(s)):
outs: list[list[T]] = [[] for _ in range(k)]
for i, elt in zip(indices, s):
outs[i].append(elt)
yield outs
def powerset(s: Iterable[T]) -> Iterator[Sequence[T]]:
s = list(s)
return it.chain.from_iterable(it.combinations(s, r) for r in range(len(s)+1))
# Vmap test helpers
Arr = Any
def sample_shmap_batched(bdim_size: int) -> Chooser:
name, *shmap_specs, args, ref = yield from sample_shmap()
bdims = yield all_bdims(*map(op.attrgetter('shape'), args))
batch_args = map(partial(batchify_arg, bdim_size), bdims, args)
return name + f'_vmap_{bdims}', bdims, *shmap_specs, batch_args, ref
def all_bdims(*shapes: tuple[int, ...]
) -> Iterator[Sequence[int | None]]:
bdims = ((None, *range(len(shape) + 1)) for shape in shapes)
return (t for t in it.product(*bdims) if not all(e is None for e in t))
def batchify_arg(size: int, bdim: int | None, x: Arr) -> Arr:
if bdim is None:
return x
else:
iota = np.arange(1, size + 1, dtype=x.dtype).reshape(
[1 if i != bdim else -1 for i in range(len(x.shape) + 1)])
return np.expand_dims(x, bdim) * iota
def args_slicer(args: Sequence[Arr], bdims: Sequence[int | None]
) -> Callable[[int], Sequence[Arr]]:
def slicer(x, bdim):
if bdim is None:
return lambda _: x
else:
return lambda i: x.take(indices=i, axis=bdim)
slicers = map(slicer, args, bdims)
return lambda i: [sl(i) for sl in slicers]
| FunSpec |
python | django__django | tests/admin_views/models.py | {
"start": 11114,
"end": 11292
} | class ____(models.Model):
code = models.AutoField(primary_key=True)
owner = models.ForeignKey(Collector, models.CASCADE)
name = models.CharField(max_length=100)
| Grommet |
python | scikit-learn__scikit-learn | sklearn/utils/tests/test_estimator_checks.py | {
"start": 14210,
"end": 14873
} | class ____(SGDClassifier):
# Toy classifier that only supports binary classification, will fail tests.
def fit(self, X, y, coef_init=None, intercept_init=None, sample_weight=None):
super().fit(X, y, coef_init, intercept_init, sample_weight)
if len(self.classes_) > 2:
raise ValueError("Only 2 classes are supported")
return self
def partial_fit(self, X, y, classes=None, sample_weight=None):
super().partial_fit(X=X, y=y, classes=classes, sample_weight=sample_weight)
if len(self.classes_) > 2:
raise ValueError("Only 2 classes are supported")
return self
| UntaggedBinaryClassifier |
python | getsentry__sentry | src/sentry/issues/endpoints/project_grouping_configs.py | {
"start": 432,
"end": 958
} | class ____(ProjectEndpoint):
owner = ApiOwner.ISSUES
publish_status = {
"GET": ApiPublishStatus.PRIVATE,
}
"""Retrieve available grouping configs with project-specific information
See GroupingConfigsEndpoint
"""
def get(self, request: Request, project) -> Response:
configs = [
config.as_dict()
for config in sorted(GROUPING_CONFIG_CLASSES.values(), key=lambda x: str(x.id))
]
return Response(serialize(configs))
| ProjectGroupingConfigsEndpoint |
python | allegroai__clearml | examples/frameworks/pytorch/pytorch_matplotlib.py | {
"start": 12096,
"end": 19642
} | class ____(nn.Module):
def __init__(self, mean, std):
super(Normalization, self).__init__()
# .view the mean and std to make them [C x 1 x 1] so that they can
# directly work with image Tensor of shape [B x C x H x W].
# B is batch size. C is number of channels. H is height and W is width.
self.mean = torch.tensor(mean).view(-1, 1, 1)
self.std = torch.tensor(std).view(-1, 1, 1)
def forward(self, img):
# normalize img
return (img - self.mean) / self.std
######################################################################
# A ``Sequential`` module contains an ordered list of child modules. For
# instance, ``vgg19.features`` contains a sequence (Conv2d, ReLU, MaxPool2d,
# Conv2d, ReLU...) aligned in the right order of depth. We need to add our
# content loss and style loss layers immediately after the convolution
# layer they are detecting. To do this we must create a new ``Sequential``
# module that has content loss and style loss modules correctly inserted.
#
# desired depth layers to compute style/content losses :
content_layers_default = ['conv_4']
style_layers_default = ['conv_1', 'conv_2', 'conv_3', 'conv_4', 'conv_5']
def get_style_model_and_losses(cnn, normalization_mean, normalization_std,
style_img, content_img,
content_layers=content_layers_default,
style_layers=style_layers_default):
cnn = copy.deepcopy(cnn)
# normalization module
normalization = Normalization(normalization_mean, normalization_std).to(device)
# just in order to have an iterable access to or list of content/syle
# losses
content_losses = []
style_losses = []
# assuming that cnn is a nn.Sequential, so we make a new nn.Sequential
# to put in modules that are supposed to be activated sequentially
model = nn.Sequential(normalization)
i = 0 # increment every time we see a conv
for layer in cnn.children():
if isinstance(layer, nn.Conv2d):
i += 1
name = 'conv_{}'.format(i)
elif isinstance(layer, nn.ReLU):
name = 'relu_{}'.format(i)
# The in-place version doesn't play very nicely with the ContentLoss
# and StyleLoss we insert below. So we replace with out-of-place
# ones here.
layer = nn.ReLU(inplace=False)
elif isinstance(layer, nn.MaxPool2d):
name = 'pool_{}'.format(i)
elif isinstance(layer, nn.BatchNorm2d):
name = 'bn_{}'.format(i)
else:
raise RuntimeError('Unrecognized layer: {}'.format(layer.__class__.__name__))
model.add_module(name, layer)
if name in content_layers:
# add content loss:
target = model(content_img).detach()
content_loss = ContentLoss(target)
model.add_module("content_loss_{}".format(i), content_loss)
content_losses.append(content_loss)
if name in style_layers:
# add style loss:
target_feature = model(style_img).detach()
style_loss = StyleLoss(target_feature)
model.add_module("style_loss_{}".format(i), style_loss)
style_losses.append(style_loss)
# now we trim off the layers after the last content and style losses
for i in range(len(model) - 1, -1, -1):
if isinstance(model[i], ContentLoss) or isinstance(model[i], StyleLoss):
break
model = model[:(i + 1)]
return model, style_losses, content_losses
######################################################################
# Next, we select the input image. You can use a copy of the content image
# or white noise.
#
input_img = content_img.clone()
# if you want to use white noise instead uncomment the below line:
# input_img = torch.randn(content_img.data.size(), device=device)
# add the original input image to the figure:
plt.figure()
imshow(input_img, title='Input Image')
######################################################################
# Gradient Descent
# ----------------
#
# As Leon Gatys, the author of the algorithm, suggested `here <https://discuss.pytorch.org/t/pytorch-tutorial-for-neural-transfert-of-artistic-style/336/20?u=alexis-jacq>`__, we will use
# L-BFGS algorithm to run our gradient descent. Unlike training a network,
# we want to train the input image in order to minimise the content/style
# losses. We will create a PyTorch L-BFGS optimizer ``optim.LBFGS`` and pass
# our image to it as the tensor to optimize.
#
def get_input_optimizer(input_img):
# this line to show that input is a parameter that requires a gradient
optimizer = optim.LBFGS([input_img.requires_grad_()])
return optimizer
######################################################################
# Finally, we must define a function that performs the neural transfer. For
# each iteration of the networks, it is fed an updated input and computes
# new losses. We will run the ``backward`` methods of each loss module to
# dynamicaly compute their gradients. The optimizer requires a "closure"
# function, which reevaluates the modul and returns the loss.
#
# We still have one final constraint to address. The network may try to
# optimize the input with values that exceed the 0 to 1 tensor range for
# the image. We can address this by correcting the input values to be
# between 0 to 1 each time the network is run.
#
def run_style_transfer(cnn, normalization_mean, normalization_std,
content_img, style_img, input_img, num_steps=300,
style_weight=1000000, content_weight=1):
"""Run the style transfer."""
print('Building the style transfer model..')
model, style_losses, content_losses = get_style_model_and_losses(cnn,
normalization_mean, normalization_std, style_img,
content_img)
optimizer = get_input_optimizer(input_img)
print('Optimizing..')
run = [0]
while run[0] <= num_steps:
def closure():
# correct the values of updated input image
input_img.data.clamp_(0, 1)
optimizer.zero_grad()
model(input_img)
style_score = 0
content_score = 0
for sl in style_losses:
style_score += sl.loss
for cl in content_losses:
content_score += cl.loss
style_score *= style_weight
content_score *= content_weight
loss = style_score + content_score
loss.backward()
run[0] += 1
if run[0] % 50 == 0:
print("run {}:".format(run))
print('Style Loss : {:4f} Content Loss: {:4f}'.format(
style_score.item(), content_score.item()))
print()
return style_score + content_score
optimizer.step(closure)
# a last correction...
input_img.data.clamp_(0, 1)
return input_img
######################################################################
# Finally, we can run the algorithm.
#
output = run_style_transfer(cnn, cnn_normalization_mean, cnn_normalization_std,
content_img, style_img, input_img)
plt.figure()
imshow(output, title='Output Image')
# sphinx_gallery_thumbnail_number = 4
plt.ioff()
plt.show()
| Normalization |
python | apache__airflow | providers/amazon/src/airflow/providers/amazon/aws/sensors/rds.py | {
"start": 1299,
"end": 1709
} | class ____(AwsBaseSensor[RdsHook]):
"""Base operator that implements common functions for all sensors."""
aws_hook_class = RdsHook
ui_color = "#ddbb77"
ui_fgcolor = "#ffffff"
def __init__(self, *args, hook_params: dict | None = None, **kwargs):
self.hook_params = hook_params or {}
self.target_statuses: list[str] = []
super().__init__(*args, **kwargs)
| RdsBaseSensor |
python | pytorch__pytorch | test/test_fx_passes.py | {
"start": 17132,
"end": 17722
} | class ____:
@staticmethod
def forward(x):
x = torch.sigmoid(x)
x = torch.sigmoid(x)
x = torch.sigmoid(x)
return torch.sigmoid(x)
@staticmethod
def pattern(x):
return torch.sigmoid(torch.sigmoid(x))
test_cases = [
# match_output, match_placeholder, num_matches
TestCase(False, False, 3, remove_overlapping_matches=False),
TestCase(False, False, 2, remove_overlapping_matches=True),
TestCase(True, False, 1),
TestCase(False, True, 1),
TestCase(True, True, 0)
]
| ChainRepeatedPattern |
python | tensorflow__tensorflow | tensorflow/python/kernel_tests/nn_ops/rnn_cell_test.py | {
"start": 125314,
"end": 136585
} | class ____(test.TestCase, parameterized.TestCase):
def _testDropoutWrapper(self,
batch_size=None,
time_steps=None,
parallel_iterations=None,
wrapper_type=None,
scope="root",
**kwargs):
if batch_size is None and time_steps is None:
# 2 time steps, batch size 1, depth 3
batch_size = 1
time_steps = 2
x = constant_op.constant(
[[[2., 2., 2.]], [[1., 1., 1.]]], dtype=dtypes.float32)
m = rnn_cell_impl.LSTMStateTuple(
*[constant_op.constant([[0.1, 0.1, 0.1]], dtype=dtypes.float32)] * 2)
else:
x = constant_op.constant(
np.random.randn(time_steps, batch_size, 3).astype(np.float32))
m = rnn_cell_impl.LSTMStateTuple(*[
constant_op.
constant([[0.1, 0.1, 0.1]] * batch_size, dtype=dtypes.float32)] * 2)
outputs, final_state = rnn.dynamic_rnn(
cell=wrapper_type(
rnn_cell_impl.LSTMCell(
3, initializer=init_ops.constant_initializer(0.5)),
dtype=x.dtype, **kwargs),
time_major=True,
parallel_iterations=parallel_iterations,
inputs=x,
initial_state=m,
scope=scope)
self.evaluate([variables_lib.global_variables_initializer()])
res = self.evaluate([outputs, final_state])
self.assertEqual(res[0].shape, (time_steps, batch_size, 3))
self.assertEqual(res[1].c.shape, (batch_size, 3))
self.assertEqual(res[1].h.shape, (batch_size, 3))
return res
def testDropoutWrapperProperties(self):
wrapper_type = rnn_cell_impl.DropoutWrapper
cell = rnn_cell_impl.BasicRNNCell(10)
wrapper = wrapper_type(cell)
# Github issue 15810
self.assertEqual(wrapper.wrapped_cell, cell)
self.assertEqual(wrapper.state_size, 10)
self.assertEqual(wrapper.output_size, 10)
def testDropoutWrapperZeroState(self):
wrapper_type = rnn_cell_impl.DropoutWrapper
class _Cell(rnn_cell_impl.BasicRNNCell):
def zero_state(self, batch_size=None, dtype=None):
return "wrapped_cell_zero_state"
wrapper = wrapper_type(_Cell(10))
self.assertEqual(wrapper.zero_state(10, dtypes.float32),
"wrapped_cell_zero_state")
def testDropoutWrapperKeepAllConstantInput(self):
wrapper_type = rnn_cell_impl.DropoutWrapper
keep = array_ops.ones([])
res = self._testDropoutWrapper(
input_keep_prob=keep, output_keep_prob=keep, state_keep_prob=keep,
wrapper_type=wrapper_type)
true_full_output = np.array(
[[[0.751109, 0.751109, 0.751109]], [[0.895509, 0.895509, 0.895509]]],
dtype=np.float32)
true_full_final_c = np.array(
[[1.949385, 1.949385, 1.949385]], dtype=np.float32)
self.assertAllClose(true_full_output, res[0])
self.assertAllClose(true_full_output[1], res[1].h)
self.assertAllClose(true_full_final_c, res[1].c)
def testDropoutWrapperKeepAll(self):
wrapper_type = rnn_cell_impl.DropoutWrapper
keep = variable_scope.get_variable("all", initializer=1.0)
res = self._testDropoutWrapper(
input_keep_prob=keep, output_keep_prob=keep, state_keep_prob=keep,
wrapper_type=wrapper_type)
true_full_output = np.array(
[[[0.751109, 0.751109, 0.751109]], [[0.895509, 0.895509, 0.895509]]],
dtype=np.float32)
true_full_final_c = np.array(
[[1.949385, 1.949385, 1.949385]], dtype=np.float32)
self.assertAllClose(true_full_output, res[0])
self.assertAllClose(true_full_output[1], res[1].h)
self.assertAllClose(true_full_final_c, res[1].c)
def testDropoutWrapperWithSeed(self):
wrapper_type = rnn_cell_impl.DropoutWrapper
keep_some = 0.5
random_seed.set_random_seed(2)
## Use parallel_iterations = 1 in both calls to
## _testDropoutWrapper to ensure the (per-time step) dropout is
## consistent across both calls. Otherwise the seed may not end
## up being munged consistently across both graphs.
res_standard_1 = self._testDropoutWrapper(
input_keep_prob=keep_some,
output_keep_prob=keep_some,
state_keep_prob=keep_some,
seed=10,
parallel_iterations=1,
wrapper_type=wrapper_type,
scope="root_1")
random_seed.set_random_seed(2)
res_standard_2 = self._testDropoutWrapper(
input_keep_prob=keep_some,
output_keep_prob=keep_some,
state_keep_prob=keep_some,
seed=10,
parallel_iterations=1,
wrapper_type=wrapper_type,
scope="root_2")
self.assertAllClose(res_standard_1[0], res_standard_2[0])
self.assertAllClose(res_standard_1[1].c, res_standard_2[1].c)
self.assertAllClose(res_standard_1[1].h, res_standard_2[1].h)
def testDropoutWrapperKeepNoOutput(self):
wrapper_type = rnn_cell_impl.DropoutWrapper
keep_all = variable_scope.get_variable("all", initializer=1.0)
keep_none = variable_scope.get_variable("none", initializer=1e-6)
res = self._testDropoutWrapper(
input_keep_prob=keep_all,
output_keep_prob=keep_none,
state_keep_prob=keep_all,
wrapper_type=wrapper_type)
true_full_output = np.array(
[[[0.751109, 0.751109, 0.751109]], [[0.895509, 0.895509, 0.895509]]],
dtype=np.float32)
true_full_final_c = np.array(
[[1.949385, 1.949385, 1.949385]], dtype=np.float32)
self.assertAllClose(np.zeros(res[0].shape), res[0])
self.assertAllClose(true_full_output[1], res[1].h)
self.assertAllClose(true_full_final_c, res[1].c)
def testDropoutWrapperKeepNoStateExceptLSTMCellMemory(self):
wrapper_type = rnn_cell_impl.DropoutWrapper
keep_all = variable_scope.get_variable("all", initializer=1.0)
keep_none = variable_scope.get_variable("none", initializer=1e-6)
# Even though we dropout state, by default DropoutWrapper never
# drops out the memory ("c") term of an LSTMStateTuple.
res = self._testDropoutWrapper(
input_keep_prob=keep_all,
output_keep_prob=keep_all,
state_keep_prob=keep_none,
wrapper_type=wrapper_type)
true_c_state = np.array([[1.713925, 1.713925, 1.713925]], dtype=np.float32)
true_full_output = np.array(
[[[0.751109, 0.751109, 0.751109]], [[0.895509, 0.895509, 0.895509]]],
dtype=np.float32)
self.assertAllClose(true_full_output[0], res[0][0])
# Second output is modified by zero input state
self.assertGreater(np.linalg.norm(true_full_output[1] - res[0][1]), 1e-4)
# h state has been set to zero
self.assertAllClose(np.zeros(res[1].h.shape), res[1].h)
# c state of an LSTMStateTuple is NEVER modified.
self.assertAllClose(true_c_state, res[1].c)
def testDropoutWrapperKeepNoInput(self):
wrapper_type = rnn_cell_impl.DropoutWrapper
keep_all = variable_scope.get_variable("all", initializer=1.0)
keep_none = variable_scope.get_variable("none", initializer=1e-6)
true_full_output = np.array(
[[[0.751109, 0.751109, 0.751109]], [[0.895509, 0.895509, 0.895509]]],
dtype=np.float32)
true_full_final_c = np.array(
[[1.949385, 1.949385, 1.949385]], dtype=np.float32)
# All outputs are different because inputs are zeroed out
res = self._testDropoutWrapper(
input_keep_prob=keep_none,
output_keep_prob=keep_all,
state_keep_prob=keep_all,
wrapper_type=wrapper_type)
self.assertGreater(np.linalg.norm(res[0] - true_full_output), 1e-4)
self.assertGreater(np.linalg.norm(res[1].h - true_full_output[1]), 1e-4)
self.assertGreater(np.linalg.norm(res[1].c - true_full_final_c), 1e-4)
def testDropoutWrapperRecurrentOutput(self):
wrapper_type = rnn_cell_impl.DropoutWrapper
keep_some = 0.8
keep_all = variable_scope.get_variable("all", initializer=1.0)
res = self._testDropoutWrapper(
input_keep_prob=keep_all,
output_keep_prob=keep_some,
state_keep_prob=keep_all,
variational_recurrent=True,
wrapper_type=wrapper_type,
input_size=3,
batch_size=5,
time_steps=7)
# Ensure the same dropout pattern for all time steps
output_mask = np.abs(res[0]) > 1e-6
for m in output_mask[1:]:
self.assertAllClose(output_mask[0], m)
def testDropoutWrapperRecurrentStateInputAndOutput(self):
wrapper_type = rnn_cell_impl.DropoutWrapper
keep_some = 0.9
res = self._testDropoutWrapper(
input_keep_prob=keep_some,
output_keep_prob=keep_some,
state_keep_prob=keep_some,
variational_recurrent=True,
wrapper_type=wrapper_type,
input_size=3,
batch_size=5,
time_steps=7)
# Smoke test for the state/input masks.
output_mask = np.abs(res[0]) > 1e-6
for time_step in output_mask:
# Ensure the same dropout output pattern for all time steps
self.assertAllClose(output_mask[0], time_step)
for batch_entry in time_step:
# Assert all batch entries get the same mask
self.assertAllClose(batch_entry, time_step[0])
# For state, ensure all batch entries have the same mask
state_c_mask = np.abs(res[1].c) > 1e-6
state_h_mask = np.abs(res[1].h) > 1e-6
for batch_entry in state_c_mask:
self.assertAllClose(batch_entry, state_c_mask[0])
for batch_entry in state_h_mask:
self.assertAllClose(batch_entry, state_h_mask[0])
def testDropoutWrapperRecurrentStateInputAndOutputWithSeed(self):
wrapper_type = rnn_cell_impl.DropoutWrapper
keep_some = 0.9
random_seed.set_random_seed(2347)
np.random.seed(23487)
res0 = self._testDropoutWrapper(
input_keep_prob=keep_some,
output_keep_prob=keep_some,
state_keep_prob=keep_some,
variational_recurrent=True,
wrapper_type=wrapper_type,
input_size=3,
batch_size=5,
time_steps=7,
seed=-234987,
scope="root_0")
random_seed.set_random_seed(2347)
np.random.seed(23487)
res1 = self._testDropoutWrapper(
input_keep_prob=keep_some,
output_keep_prob=keep_some,
state_keep_prob=keep_some,
variational_recurrent=True,
wrapper_type=wrapper_type,
input_size=3,
batch_size=5,
time_steps=7,
seed=-234987,
scope="root_1")
output_mask = np.abs(res0[0]) > 1e-6
for time_step in output_mask:
# Ensure the same dropout output pattern for all time steps
self.assertAllClose(output_mask[0], time_step)
for batch_entry in time_step:
# Assert all batch entries get the same mask
self.assertAllClose(batch_entry, time_step[0])
# For state, ensure all batch entries have the same mask
state_c_mask = np.abs(res0[1].c) > 1e-6
state_h_mask = np.abs(res0[1].h) > 1e-6
for batch_entry in state_c_mask:
self.assertAllClose(batch_entry, state_c_mask[0])
for batch_entry in state_h_mask:
self.assertAllClose(batch_entry, state_h_mask[0])
# Ensure seeded calculation is identical.
self.assertAllClose(res0[0], res1[0])
self.assertAllClose(res0[1].c, res1[1].c)
self.assertAllClose(res0[1].h, res1[1].h)
if __name__ == "__main__":
test.main()
| DropoutWrapperTest |
python | tensorflow__tensorflow | tensorflow/python/data/kernel_tests/shuffle_test.py | {
"start": 2350,
"end": 20367
} | class ____(test_base.DatasetTestBase, parameterized.TestCase):
@combinations.generate(test_base.default_test_combinations())
def testBasic(self):
components = (
np.array([1, 2, 3, 4]), np.array([5, 6, 7, 8]),
np.array([9.0, 10.0, 11.0, 12.0])
)
def dataset_fn(count=5, buffer_size=None, seed=0):
repeat_dataset = (
dataset_ops.Dataset.from_tensor_slices(components).repeat(count))
if buffer_size:
shuffle_dataset = repeat_dataset.shuffle(buffer_size, seed)
self.assertEqual(
tuple([c.shape[1:] for c in components]),
dataset_ops.get_legacy_output_shapes(shuffle_dataset))
return shuffle_dataset
else:
return repeat_dataset
# First run without shuffling to collect the "ground truth".
get_next = self.getNext(dataset_fn())
unshuffled_elements = []
for _ in range(20):
unshuffled_elements.append(self.evaluate(get_next()))
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(get_next())
# Assert that the shuffled dataset has the same elements as the
# "ground truth".
get_next = self.getNext(dataset_fn(buffer_size=100, seed=37))
shuffled_elements = []
for _ in range(20):
shuffled_elements.append(self.evaluate(get_next()))
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(get_next())
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(get_next())
self.assertAllEqual(sorted(unshuffled_elements), sorted(shuffled_elements))
# Assert that shuffling twice with the same seeds gives the same sequence.
get_next = self.getNext(dataset_fn(buffer_size=100, seed=37))
reshuffled_elements_same_seed = []
for _ in range(20):
reshuffled_elements_same_seed.append(self.evaluate(get_next()))
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(get_next())
self.assertEqual(shuffled_elements, reshuffled_elements_same_seed)
# Assert that shuffling twice with a different seed gives a different
# permutation of the same elements.
get_next = self.getNext(dataset_fn(
buffer_size=100, seed=constant_op.constant(137, dtype=dtypes.int64)))
reshuffled_elements_different_seed = []
for _ in range(20):
reshuffled_elements_different_seed.append(self.evaluate(get_next()))
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(get_next())
self.assertNotEqual(shuffled_elements, reshuffled_elements_different_seed)
self.assertAllEqual(
sorted(shuffled_elements), sorted(reshuffled_elements_different_seed))
# Assert that the shuffled dataset has the same elements as the
# "ground truth" when the buffer size is smaller than the input
# dataset.
get_next = self.getNext(dataset_fn(buffer_size=2, seed=37))
reshuffled_elements_small_buffer = []
for _ in range(20):
reshuffled_elements_small_buffer.append(self.evaluate(get_next()))
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(get_next())
self.assertAllEqual(
sorted(unshuffled_elements), sorted(reshuffled_elements_small_buffer))
# Test the case of shuffling an empty dataset.
get_next = self.getNext(dataset_fn(count=0, buffer_size=100, seed=37))
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(get_next())
@combinations.generate(combinations.combine(tf_api_version=1, mode="graph"))
def testSeedZero(self):
"""Test for same behavior when the seed is a Python or Tensor zero."""
iterator = dataset_ops.make_one_shot_iterator(
dataset_ops.Dataset.range(10).shuffle(10, seed=0))
get_next = iterator.get_next()
elems = []
with self.cached_session() as sess:
for _ in range(10):
elems.append(sess.run(get_next))
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
seed_placeholder = array_ops.placeholder(dtypes.int64, shape=[])
iterator = dataset_ops.make_initializable_iterator(
dataset_ops.Dataset.range(10).shuffle(10, seed=seed_placeholder))
get_next = iterator.get_next()
with self.cached_session() as sess:
sess.run(iterator.initializer, feed_dict={seed_placeholder: 0})
for elem in elems:
self.assertEqual(elem, sess.run(get_next))
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
@combinations.generate(test_base.default_test_combinations())
def testDefaultArguments(self):
components = [0, 1, 2, 3, 4]
dataset = (
dataset_ops.Dataset.from_tensor_slices(components).shuffle(5).repeat()
)
get_next = self.getNext(dataset)
counts = collections.defaultdict(lambda: 0)
for _ in range(10):
for _ in range(5):
counts[self.evaluate(get_next())] += 1
for i in range(5):
self.assertEqual(10, counts[i])
@combinations.generate(
combinations.times(
test_base.default_test_combinations(),
combinations.combine(
dataset_range=[100],
buffer_size=[None, 10, 200],
seed=[None, 42],
use_tensor_input=[True, False])))
def testTensorInput(self, dataset_range, buffer_size, seed, use_tensor_input):
dataset = dataset_ops.Dataset.range(dataset_range)
unshuffled_output = self.getDatasetOutput(dataset)
if buffer_size:
buffer_size = (
constant_op.constant(buffer_size, dtype=dtypes.int64)
if use_tensor_input else buffer_size)
else:
buffer_size = dataset.cardinality()
seed = (constant_op.constant(seed, dtype=dtypes.int64)
if seed and use_tensor_input else seed)
shuffled_dataset = dataset.shuffle(buffer_size, seed=seed)
shuffled_output = self.getDatasetOutput(shuffled_dataset)
self.assertEqual(unshuffled_output, list(range(dataset_range)))
self.assertCountEqual(shuffled_output, unshuffled_output)
self.assertNotEqual(shuffled_output, unshuffled_output)
@combinations.generate(test_base.default_test_combinations())
def testUnknownCardinality(self):
components = [0, 1, 2, 3, 4]
dataset = dataset_ops.Dataset.from_tensor_slices(components).shuffle(
dataset_ops.UNKNOWN
)
get_next = self.getNext(dataset)
counts = collections.defaultdict(lambda: 0)
for _ in range(1):
for _ in range(5):
counts[self.evaluate(get_next())] += 1
for i in range(5):
self.assertEqual(1, counts[i])
@combinations.generate(test_base.default_test_combinations())
def testUnknownCardinalityWithRepeatedShuffle(self):
components = [0, 1, 2, 3, 4]
dataset = (
dataset_ops.Dataset.from_tensor_slices(components)
.shuffle(dataset_ops.UNKNOWN)
.repeat()
)
get_next = self.getNext(dataset)
counts = collections.defaultdict(lambda: 0)
for _ in range(10):
for _ in range(5):
counts[self.evaluate(get_next())] += 1
for i in range(5):
self.assertEqual(10, counts[i])
@combinations.generate(test_base.default_test_combinations())
def testUnknownCardinalityWithIncreasingBufferSize(self):
epoch_1 = list(range(5))
epoch_2 = list(range(10, 17))
epoch_3 = list(range(20, 28))
ds = make_variable_size_dataset([epoch_1, epoch_2, epoch_3])
ds = ds.shuffle(dataset_ops.UNKNOWN).repeat(3)
expected = epoch_1 + epoch_2 + epoch_3
self.assertDatasetProduces(ds, expected, assert_items_equal=True)
@combinations.generate(test_base.default_test_combinations())
def testUnknownCardinalityWithVariableBufferSize(self):
epoch_1 = list(range(5))
epoch_2 = list(range(10, 13))
epoch_3 = list(range(20, 27))
ds = make_variable_size_dataset([epoch_1, epoch_2, epoch_3])
ds = ds.shuffle(dataset_ops.UNKNOWN).repeat(3)
expected = epoch_1 + epoch_2 + epoch_3
self.assertDatasetProduces(ds, expected, assert_items_equal=True)
@combinations.generate(test_base.default_test_combinations())
def testInputInitializations(self):
num_rounds = 3
def compute_orders(dataset):
orders = []
for _ in range(num_rounds):
orders.append(self.getDatasetOutput(dataset))
return orders
dataset = dataset_ops.Dataset.range(10).shuffle(10, seed=1)
first_orders = compute_orders(dataset)
dataset = dataset_ops.Dataset.range(10)
# Adding shuffle(1) should not change the order.
dataset = dataset_ops.Dataset.range(10).shuffle(10, seed=1).shuffle(1)
second_orders = compute_orders(dataset)
self.assertEqual(first_orders, second_orders)
@combinations.generate(
combinations.times(
test_base.graph_only_combinations(),
combinations.combine(reshuffle=[True, False]),
combinations.combine(graph_seed=38, op_seed=None) +
combinations.combine(graph_seed=None, op_seed=42) +
combinations.combine(graph_seed=38, op_seed=42)))
def testShuffleSeed(self, reshuffle, graph_seed, op_seed):
results = []
for _ in range(2):
with ops.Graph().as_default() as g:
random_seed.set_random_seed(graph_seed)
dataset = dataset_ops.Dataset.range(10).shuffle(
10, seed=op_seed, reshuffle_each_iteration=reshuffle).repeat(3)
iterator = dataset_ops.make_one_shot_iterator(dataset)
next_element = iterator.get_next()
run_results = []
with self.session(graph=g) as sess:
for _ in range(30):
run_results.append(sess.run(next_element))
with self.assertRaises(errors.OutOfRangeError):
sess.run(next_element)
results.append(run_results)
self.assertAllEqual(results[0], results[1])
# TODO(b/117581999): enable this test for eager-mode.
@combinations.generate(
combinations.times(
test_base.graph_only_combinations(),
combinations.combine(
reshuffle=[True, False], initializable=[True, False])))
def testMultipleIterators(self, reshuffle, initializable):
with ops.Graph().as_default() as g:
dataset = dataset_ops.Dataset.range(100).shuffle(
10, reshuffle_each_iteration=reshuffle).repeat(3)
if initializable:
iterators = [dataset_ops.make_initializable_iterator(dataset)
for _ in range(2)]
else:
iterators = [dataset_ops.make_one_shot_iterator(dataset)
for _ in range(2)]
results = []
with self.session(graph=g) as sess:
for iterator in iterators:
if initializable:
sess.run(iterator.initializer)
next_element = iterator.get_next()
run_results = []
for _ in range(300):
run_results.append(sess.run(next_element))
with self.assertRaises(errors.OutOfRangeError):
sess.run(next_element)
results.append(run_results)
self.assertNotEqual(results[0], results[1])
@combinations.generate(test_base.default_test_combinations())
def testShuffleManyEmptyEpochs(self):
sizes = [0, 0, 0, 0, 1, 0, 0, 2, 0, 0, 0, 0]
sizes_iter = iter(sizes)
def gen():
for i in range(next(sizes_iter)):
yield i
dataset = dataset_ops.Dataset.from_generator(
gen, output_signature=tensor_spec.TensorSpec((), dtypes.int64))
dataset = dataset.shuffle(10).repeat(len(sizes)).take(3)
self.assertDatasetProduces(dataset, [0, 0, 1], assert_items_equal=True)
@combinations.generate(test_base.default_test_combinations())
def testShuffleInfiniteRepeatNonemptyFollowedByEmpty(self):
sizes = [1, 0, 2, 10]
sizes_iter = iter(sizes)
def gen():
for i in range(next(sizes_iter)):
yield i
dataset = dataset_ops.Dataset.from_generator(
gen, output_signature=tensor_spec.TensorSpec((), dtypes.int64))
dataset = dataset.shuffle(10).repeat().take(3)
self.assertDatasetProduces(dataset, [0, 0, 1], assert_items_equal=True)
@combinations.generate(
combinations.times(
test_base.default_test_combinations(),
combinations.combine(reshuffle=[True, False], seed=[None, 42])))
def testReshuffleRepeatEpochs(self, reshuffle, seed):
dataset = dataset_ops.Dataset.range(10).shuffle(
10, seed=seed, reshuffle_each_iteration=reshuffle).repeat(2)
next_element = self.getNext(dataset)
first_epoch = []
for _ in range(10):
first_epoch.append(self.evaluate(next_element()))
second_epoch = []
for _ in range(10):
second_epoch.append(self.evaluate(next_element()))
self.assertEqual(first_epoch == second_epoch, not reshuffle)
@combinations.generate(
combinations.times(
combinations.combine(tf_api_version=2, mode="eager"),
combinations.combine(reshuffle=[True, False], seed=[None, 42])))
def testReshuffleIterationEpochs(self, reshuffle, seed):
# TensorFlow unit tests set the global graph seed. We unset it here so that
# we can control determinism via the `seed` parameter.
random_seed.set_random_seed(None)
dataset = dataset_ops.Dataset.range(10).shuffle(
10, seed=seed, reshuffle_each_iteration=reshuffle)
first_epoch = self.getDatasetOutput(dataset)
second_epoch = self.getDatasetOutput(dataset)
self.assertEqual(first_epoch == second_epoch, not reshuffle)
@combinations.generate(combinations.combine(tf_api_version=2, mode="eager"))
def testShuffleV2ResourceCapture(self):
def make_dataset():
ids = dataset_ops.Dataset.range(10)
ids = ids.shuffle(1)
def interleave_fn(dataset, _):
return dataset
dataset = dataset_ops.Dataset.range(1)
dataset = dataset.interleave(functools.partial(interleave_fn, ids))
return dataset
results = []
for elem in make_dataset():
results.append(elem.numpy())
self.assertAllEqual(results, range(10))
@combinations.generate(
combinations.times(
test_base.eager_only_combinations(),
combinations.combine(reshuffle=[True, False], seed=[None, 42])))
def testReshuffleSeparateTransformations(self, reshuffle, seed):
dataset = dataset_ops.Dataset.range(10)
first_epoch = []
for elem in dataset.shuffle(
10, seed=seed, reshuffle_each_iteration=reshuffle):
first_epoch.append(elem.numpy())
second_epoch = []
for elem in dataset.shuffle(
10, seed=seed, reshuffle_each_iteration=reshuffle):
second_epoch.append(elem.numpy())
self.assertEqual(first_epoch != second_epoch, seed is None)
@combinations.generate(combinations.combine(tf_api_version=2, mode="eager"))
def testShuffleV2InFunction(self):
counter_var = variables.Variable(0)
@def_function.function
def consume():
ds = dataset_ops.Dataset.range(10)
ds = ds.shuffle(1)
for _ in ds:
counter_var.assign(counter_var + 1)
consume()
self.assertAllEqual(self.evaluate(counter_var), 10)
@combinations.generate(test_base.default_test_combinations())
def testEmptyDataset(self):
dataset = dataset_ops.Dataset.from_tensors(1)
def map_fn(x):
with ops.control_dependencies([check_ops.assert_equal(x, 0)]):
return x
dataset = dataset.map(map_fn)
dataset = dataset.cache()
dataset = dataset.shuffle(buffer_size=10).repeat()
get_next = self.getNext(dataset)
# First time around, we get an error for the failed assertion.
with self.assertRaises(errors.InvalidArgumentError):
self.evaluate(get_next())
# Second time around, we get an EOF because the cached dataset is empty.
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(get_next())
@combinations.generate(
combinations.times(test_base.default_test_combinations(),
combinations.combine(reshuffle=[True, False])))
def testDontRerandomizeOnReplicate(self, reshuffle):
random_seed.set_random_seed(None)
# Since the seed generator configuration is preserved across serialization
# of the dataset, each instantiation of the shuffle dataset
# should preserve the shuffle order if reshuffle=False. To preserve the
# shuffle order, the original dataset must be kept alive, since if the
# original dataset was destroyed, its seeds would also be destroyed.
num_elements = 100
dataset_1 = dataset_ops.Dataset.range(num_elements)
dataset_2 = dataset_1.shuffle(
num_elements, reshuffle_each_iteration=reshuffle)
shuffle_1 = self.getDatasetOutput(dataset_2)
dataset_3 = self.graphRoundTrip(dataset_2, allow_stateful=True)
shuffle_2 = self.getDatasetOutput(dataset_3)
self.assertCountEqual(shuffle_1, shuffle_2)
if reshuffle:
self.assertNotEqual(shuffle_1, shuffle_2)
@combinations.generate(test_base.eager_only_combinations())
def testCheckpointLargeBuffer(self):
if (pywrap_sanitizers.is_asan_enabled() or
pywrap_sanitizers.is_tsan_enabled() or
pywrap_sanitizers.is_msan_enabled()):
self.skipTest("Skip to avoid OOM when using sanitizers.")
if sys.platform == "darwin":
self.skipTest("Skip to avoid memory issues on mac.")
dataset = dataset_ops.Dataset.range(12).batch(2)
dataset = dataset.map(
# Create tensors of size 512M.
lambda seed: stateless_random_ops.stateless_random_uniform(
(128, 1024, 1024), seed, dtype=dtypes.float32
)
)
dataset = dataset.shuffle(buffer_size=6)
iterator = iter(dataset)
next(iterator) # Request an element to fill the shuffle buffer
ckpt = trackable_utils.Checkpoint(iterator=iterator)
manager = checkpoint_management.CheckpointManager(
ckpt, self.get_temp_dir(), max_to_keep=1)
manager.save()
del dataset
del iterator
manager.restore_or_initialize()
@combinations.generate(test_base.default_test_combinations())
def testName(self):
dataset = dataset_ops.Dataset.from_tensors(42).shuffle(1, name="shuffle")
self.assertDatasetProduces(dataset, [42])
| ShuffleTest |
python | pennersr__django-allauth | allauth/socialaccount/providers/dingtalk/views.py | {
"start": 223,
"end": 1421
} | class ____(OAuth2Adapter):
provider_id = "dingtalk"
access_token_url = "https://api.dingtalk.com/v1.0/oauth2/userAccessToken" # nosec
authorize_url = "https://login.dingtalk.com/oauth2/auth"
profile_url = "https://api.dingtalk.com/v1.0/contact/users/me"
client_class = DingTalkOAuth2Client
def __init__(self, request):
# dingtalk set "authCode" instead of "code" in callback url
if "authCode" in request.GET:
request.GET._mutable = True
request.GET["code"] = request.GET["authCode"]
request.GET._mutable = False
super(DingTalkOAuth2Adapter, self).__init__(request)
def complete_login(self, request, app, token, **kwargs):
headers = {"x-acs-dingtalk-access-token": token.token}
resp = (
get_adapter().get_requests_session().get(self.profile_url, headers=headers)
)
resp.raise_for_status()
extra_data = resp.json()
return self.get_provider().sociallogin_from_response(request, extra_data)
oauth2_login = OAuth2LoginView.adapter_view(DingTalkOAuth2Adapter)
oauth2_callback = OAuth2CallbackView.adapter_view(DingTalkOAuth2Adapter)
| DingTalkOAuth2Adapter |
python | pytorch__pytorch | test/distributed/fsdp/test_shard_utils.py | {
"start": 569,
"end": 1519
} | class ____(FSDPTest):
@property
def world_size(self):
return 2
def _create_tensor(self, *size):
# Keep everything deterministic.
torch.manual_seed(0)
return torch.rand(*size).to(device=device_type)
@skip_if_lt_x_gpu(2)
def test_create_chunk_sharded_tensor(self):
for size in ((1,), (1, 6), (12,), (12, 6), (25,), (25, 6)):
tensor = self._create_tensor(*size)
sharded_tensor = _create_chunk_sharded_tensor(
tensor,
self.rank,
self.world_size,
torch.accelerator.device_count(),
_get_default_group(),
)
output = (
torch.empty(*size).to(device=device_type) if self.rank == 0 else None
)
sharded_tensor.gather(0, output)
if self.rank == 0:
self.assertEqual(tensor, output)
| TestShardUtilsDistributed |
python | scipy__scipy | scipy/stats/tests/test_mstats_basic.py | {
"start": 20539,
"end": 26116
} | class ____:
def test_trim(self):
a = ma.arange(10)
assert_equal(mstats.trim(a), [0,1,2,3,4,5,6,7,8,9])
a = ma.arange(10)
assert_equal(mstats.trim(a,(2,8)), [None,None,2,3,4,5,6,7,8,None])
a = ma.arange(10)
assert_equal(mstats.trim(a,limits=(2,8),inclusive=(False,False)),
[None,None,None,3,4,5,6,7,None,None])
a = ma.arange(10)
assert_equal(mstats.trim(a,limits=(0.1,0.2),relative=True),
[None,1,2,3,4,5,6,7,None,None])
a = ma.arange(12)
a[[0,-1]] = a[5] = masked
assert_equal(mstats.trim(a, (2,8)),
[None, None, 2, 3, 4, None, 6, 7, 8, None, None, None])
x = ma.arange(100).reshape(10, 10)
expected = [1]*10 + [0]*70 + [1]*20
trimx = mstats.trim(x, (0.1,0.2), relative=True, axis=None)
assert_equal(trimx._mask.ravel(), expected)
trimx = mstats.trim(x, (0.1,0.2), relative=True, axis=0)
assert_equal(trimx._mask.ravel(), expected)
trimx = mstats.trim(x, (0.1,0.2), relative=True, axis=-1)
assert_equal(trimx._mask.T.ravel(), expected)
# same as above, but with an extra masked row inserted
x = ma.arange(110).reshape(11, 10)
x[1] = masked
expected = [1]*20 + [0]*70 + [1]*20
trimx = mstats.trim(x, (0.1,0.2), relative=True, axis=None)
assert_equal(trimx._mask.ravel(), expected)
trimx = mstats.trim(x, (0.1,0.2), relative=True, axis=0)
assert_equal(trimx._mask.ravel(), expected)
trimx = mstats.trim(x.T, (0.1,0.2), relative=True, axis=-1)
assert_equal(trimx.T._mask.ravel(), expected)
def test_trim_old(self):
x = ma.arange(100)
assert_equal(mstats.trimboth(x).count(), 60)
assert_equal(mstats.trimtail(x,tail='r').count(), 80)
x[50:70] = masked
trimx = mstats.trimboth(x)
assert_equal(trimx.count(), 48)
assert_equal(trimx._mask, [1]*16 + [0]*34 + [1]*20 + [0]*14 + [1]*16)
x._mask = nomask
x = x.reshape((10,10))
assert_equal(mstats.trimboth(x).count(), 60)
assert_equal(mstats.trimtail(x).count(), 80)
def test_trimr(self):
x = ma.arange(10)
result = mstats.trimr(x, limits=(0.15, 0.14), inclusive=(False, False))
expected = ma.array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9],
mask=[1, 1, 0, 0, 0, 0, 0, 0, 0, 1])
assert_equal(result, expected)
assert_equal(result.mask, expected.mask)
def test_trimmedmean(self):
data = ma.array([77, 87, 88,114,151,210,219,246,253,262,
296,299,306,376,428,515,666,1310,2611])
assert_almost_equal(mstats.trimmed_mean(data,0.1), 343, 0)
assert_almost_equal(mstats.trimmed_mean(data,(0.1,0.1)), 343, 0)
assert_almost_equal(mstats.trimmed_mean(data,(0.2,0.2)), 283, 0)
def test_trimmedvar(self):
# Basic test. Additional tests of all arguments, edge cases,
# input validation, and proper treatment of masked arrays are needed.
rng = np.random.default_rng(3262323289434724460)
data_orig = rng.random(size=20)
data = np.sort(data_orig)
data = ma.array(data, mask=[1, 1, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 1, 1])
assert_allclose(mstats.trimmed_var(data_orig, 0.1), data.var())
def test_trimmedstd(self):
# Basic test. Additional tests of all arguments, edge cases,
# input validation, and proper treatment of masked arrays are needed.
rng = np.random.default_rng(7121029245207162780)
data_orig = rng.random(size=20)
data = np.sort(data_orig)
data = ma.array(data, mask=[1, 1, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 1, 1])
assert_allclose(mstats.trimmed_std(data_orig, 0.1), data.std())
def test_trimmed_stde(self):
data = ma.array([77, 87, 88,114,151,210,219,246,253,262,
296,299,306,376,428,515,666,1310,2611])
assert_almost_equal(mstats.trimmed_stde(data,(0.2,0.2)), 56.13193, 5)
assert_almost_equal(mstats.trimmed_stde(data,0.2), 56.13193, 5)
def test_winsorization(self):
data = ma.array([77, 87, 88,114,151,210,219,246,253,262,
296,299,306,376,428,515,666,1310,2611])
assert_almost_equal(mstats.winsorize(data,(0.2,0.2)).var(ddof=1),
21551.4, 1)
assert_almost_equal(
mstats.winsorize(data, (0.2,0.2),(False,False)).var(ddof=1),
11887.3, 1)
data[5] = masked
winsorized = mstats.winsorize(data)
assert_equal(winsorized.mask, data.mask)
def test_winsorization_nan(self):
data = ma.array([np.nan, np.nan, 0, 1, 2])
assert_raises(ValueError, mstats.winsorize, data, (0.05, 0.05),
nan_policy='raise')
# Testing propagate (default behavior)
assert_equal(mstats.winsorize(data, (0.4, 0.4)),
ma.array([2, 2, 2, 2, 2]))
assert_equal(mstats.winsorize(data, (0.8, 0.8)),
ma.array([np.nan, np.nan, np.nan, np.nan, np.nan]))
assert_equal(mstats.winsorize(data, (0.4, 0.4), nan_policy='omit'),
ma.array([np.nan, np.nan, 2, 2, 2]))
assert_equal(mstats.winsorize(data, (0.8, 0.8), nan_policy='omit'),
ma.array([np.nan, np.nan, 2, 2, 2]))
@skip_xp_invalid_arg
| TestTrimming |
python | charliermarsh__ruff | crates/ruff_python_formatter/resources/test/fixtures/ruff/docstring.py | {
"start": 1011,
"end": 1392
} | class ____:
def doc_string_without_linebreak_after_colon(self): """ This is somewhat strange
a
b
We format this a is the docstring had started properly indented on the next
line if the target indentation. This may we incorrect since source and target
indentation can be incorrect, but this is also an edge case.
"""
| IndentMeSome |
python | sympy__sympy | sympy/solvers/ode/single.py | {
"start": 89400,
"end": 93185
} | class ____(SingleODESolver):
r"""
Solves an `n`\th order linear non homogeneous Cauchy-Euler equidimensional
ordinary differential equation using variation of parameters.
This is an equation with form `g(x) = a_0 f(x) + a_1 x f'(x) + a_2 x^2 f''(x)
\cdots`.
This method works by assuming that the particular solution takes the form
.. math:: \sum_{x=1}^{n} c_i(x) y_i(x) {a_n} {x^n} \text{, }
where `y_i` is the `i`\th solution to the homogeneous equation. The
solution is then solved using Wronskian's and Cramer's Rule. The
particular solution is given by multiplying eq given below with `a_n x^{n}`
.. math:: \sum_{x=1}^n \left( \int \frac{W_i(x)}{W(x)} \, dx
\right) y_i(x) \text{, }
where `W(x)` is the Wronskian of the fundamental system (the system of `n`
linearly independent solutions to the homogeneous equation), and `W_i(x)`
is the Wronskian of the fundamental system with the `i`\th column replaced
with `[0, 0, \cdots, 0, \frac{x^{- n}}{a_n} g{\left(x \right)}]`.
This method is general enough to solve any `n`\th order inhomogeneous
linear differential equation, but sometimes SymPy cannot simplify the
Wronskian well enough to integrate it. If this method hangs, try using the
``nth_linear_constant_coeff_variation_of_parameters_Integral`` hint and
simplifying the integrals manually. Also, prefer using
``nth_linear_constant_coeff_undetermined_coefficients`` when it
applies, because it does not use integration, making it faster and more
reliable.
Warning, using simplify=False with
'nth_linear_constant_coeff_variation_of_parameters' in
:py:meth:`~sympy.solvers.ode.dsolve` may cause it to hang, because it will
not attempt to simplify the Wronskian before integrating. It is
recommended that you only use simplify=False with
'nth_linear_constant_coeff_variation_of_parameters_Integral' for this
method, especially if the solution to the homogeneous equation has
trigonometric functions in it.
Examples
========
>>> from sympy import Function, dsolve, Derivative
>>> from sympy.abc import x
>>> f = Function('f')
>>> eq = x**2*Derivative(f(x), x, x) - 2*x*Derivative(f(x), x) + 2*f(x) - x**4
>>> dsolve(eq, f(x),
... hint='nth_linear_euler_eq_nonhomogeneous_variation_of_parameters').expand()
Eq(f(x), C1*x + C2*x**2 + x**4/6)
"""
hint = "nth_linear_euler_eq_nonhomogeneous_variation_of_parameters"
has_integral = True
def _matches(self):
eq = self.ode_problem.eq_preprocessed
f = self.ode_problem.func.func
order = self.ode_problem.order
x = self.ode_problem.sym
match = self.ode_problem.get_linear_coefficients(eq, f(x), order)
self.r = None
does_match = False
if order and match:
coeff = match[order]
factor = x**order / coeff
self.r = {i: factor*match[i] for i in match}
if self.r and all(_test_term(self.r[i], f(x), i) for i in
self.r if i >= 0):
if self.r[-1]:
does_match = True
return does_match
def _get_general_solution(self, *, simplify_flag: bool = True):
eq = self.ode_problem.eq
f = self.ode_problem.func.func
x = self.ode_problem.sym
order = self.ode_problem.order
homogen_sol, roots = _get_euler_characteristic_eq_sols(eq, f(x), self.r)
self.r[-1] = self.r[-1]/self.r[order]
sol = _solve_variation_of_parameters(eq, f(x), roots, homogen_sol, order, self.r, simplify_flag)
return [Eq(f(x), homogen_sol.rhs + (sol.rhs - homogen_sol.rhs)*self.r[order])]
| NthLinearEulerEqNonhomogeneousVariationOfParameters |
python | getsentry__sentry | tests/sentry/releases/endpoints/test_project_releases.py | {
"start": 1212,
"end": 4775
} | class ____(APITestCase):
def test_simple(self) -> None:
self.login_as(user=self.user)
team = self.create_team()
project1 = self.create_project(teams=[team], name="foo")
project2 = self.create_project(teams=[team], name="bar")
release1 = Release.objects.create(
organization_id=project1.organization_id,
version="1",
date_added=datetime(2013, 8, 13, 3, 8, 24, 880386, tzinfo=UTC),
)
release1.add_project(project1)
ReleaseProject.objects.filter(project=project1, release=release1).update(new_groups=5)
release2 = Release.objects.create(
organization_id=project1.organization_id,
version="2",
date_added=datetime(2013, 8, 14, 3, 8, 24, 880386, tzinfo=UTC),
)
release2.add_project(project1)
release3 = Release.objects.create(
organization_id=project1.organization_id,
version="3",
date_added=datetime(2013, 8, 12, 3, 8, 24, 880386, tzinfo=UTC),
date_released=datetime(2013, 8, 15, 3, 8, 24, 880386, tzinfo=UTC),
user_agent="my_agent",
)
release3.add_project(project1)
release4 = Release.objects.create(organization_id=project2.organization_id, version="4")
release4.add_project(project2)
url = reverse(
"sentry-api-0-project-releases",
kwargs={
"organization_id_or_slug": project1.organization.slug,
"project_id_or_slug": project1.slug,
},
)
response = self.client.get(url, format="json")
assert response.status_code == 200, response.content
assert len(response.data) == 3
assert response.data[0]["version"] == release3.version
assert response.data[0]["userAgent"] == "my_agent"
assert response.data[1]["version"] == release2.version
assert response.data[2]["version"] == release1.version
assert response.data[2]["newGroups"] == 5
def test_query_filter(self) -> None:
self.login_as(user=self.user)
team = self.create_team()
project = self.create_project(teams=[team], name="foo")
release = Release.objects.create(
organization_id=project.organization_id,
version="foobar",
date_added=datetime(2013, 8, 13, 3, 8, 24, 880386, tzinfo=UTC),
)
release.add_project(project)
url = reverse(
"sentry-api-0-project-releases",
kwargs={
"organization_id_or_slug": project.organization.slug,
"project_id_or_slug": project.slug,
},
)
response = self.client.get(url + "?query=foo", format="json")
assert response.status_code == 200, response.content
assert len(response.data) == 1
assert response.data[0]["version"] == release.version
response = self.client.get(url + "?query=baz", format="json")
assert response.status_code == 200, response.content
assert len(response.data) == 0
release = Release.objects.create(
organization_id=project.organization_id,
version="foo.bar-1.0.0",
date_added=datetime(2013, 8, 14, 3, 8, 24, 880386, tzinfo=UTC),
)
release.add_project(project)
response = self.client.get(url + "?query=1", format="json")
assert response.status_code == 200, response.content
assert len(response.data) == 1
| ProjectReleaseListTest |
python | PrefectHQ__prefect | src/integrations/prefect-azure/tests/deployments/test_steps.py | {
"start": 1383,
"end": 8826
} | class ____:
@pytest.mark.usefixtures("mock_azure_blob_storage")
def test_push_to_azure_blob_storage_with_connection_string(
self, tmp_files: Path, container_client_mock: MagicMock
):
container = "test-container"
folder = "test-folder"
credentials = {"connection_string": "fake_connection_string"}
os.chdir(tmp_files)
push_to_azure_blob_storage(container, folder, credentials)
container_client_mock.from_connection_string.assert_called_once_with(
credentials["connection_string"], container_name=container
)
upload_blob_mock = (
container_client_mock.from_connection_string.return_value.__enter__.return_value.upload_blob # noqa
)
upload_blob_mock.assert_has_calls(
[
call(
f"{folder}/testfile1.txt",
ANY,
overwrite=True,
),
call(
f"{folder}/testfile2.txt",
ANY,
overwrite=True,
),
call(
f"{folder}/testfile3.txt",
ANY,
overwrite=True,
),
call(
f"{folder}/testdir2/testfile5.txt",
ANY,
overwrite=True,
),
],
any_order=True,
)
assert all(
[
open(call[1][1].name).read() == "Sample text"
for call in upload_blob_mock.mock_calls
]
)
@pytest.mark.usefixtures("mock_azure_blob_storage")
def test_push_to_azure_blob_storage_with_account_url(
self, tmp_files: Path, container_client_mock: MagicMock
):
container = "test-container"
folder = "test-folder"
credentials = {"account_url": "https://fake_account_url.blob.core.windows.net/"}
os.chdir(tmp_files)
push_to_azure_blob_storage(container, folder, credentials)
container_client_mock.assert_called_once_with(
account_url=credentials["account_url"],
container_name=container,
credential=ANY,
)
upload_blob_mock = (
container_client_mock.return_value.__enter__.return_value.upload_blob
)
upload_blob_mock.assert_has_calls(
[
call(
f"{folder}/testfile1.txt",
ANY,
overwrite=True,
),
call(
f"{folder}/testfile2.txt",
ANY,
overwrite=True,
),
call(
f"{folder}/testfile3.txt",
ANY,
overwrite=True,
),
call(
f"{folder}/testdir2/testfile5.txt",
ANY,
overwrite=True,
),
],
any_order=True,
)
assert all(
[
open(call[1][1].name).read() == "Sample text"
for call in upload_blob_mock.mock_calls
]
)
@pytest.mark.usefixtures("mock_azure_blob_storage")
def test_push_to_azure_blob_storage_missing_credentials(self, tmp_files: Path):
container = "test-container"
folder = "test-folder"
credentials = {}
os.chdir(tmp_files)
with pytest.raises(
ValueError,
match="Credentials must contain either connection_string or account_url",
):
push_to_azure_blob_storage(container, folder, credentials)
@pytest.mark.usefixtures("mock_azure_blob_storage")
def test_push_to_azure_blob_storage_both_credentials_provided(
self, tmp_files: Path, container_client_mock: MagicMock
):
"""connection_string should take precedence over account_url"""
container = "test-container"
folder = "test-folder"
credentials = {
"account_url": "https://fake_account_url.blob.core.windows.net/",
"connection_string": "fake_connection_string",
}
os.chdir(tmp_files)
push_to_azure_blob_storage(container, folder, credentials)
container_client_mock.from_connection_string.assert_called_once_with(
credentials["connection_string"], container_name=container
)
upload_blob_mock = (
container_client_mock.from_connection_string.return_value.__enter__.return_value.upload_blob # noqa
)
upload_blob_mock.assert_has_calls(
[
call(
f"{folder}/testfile1.txt",
ANY,
overwrite=True,
),
call(
f"{folder}/testfile2.txt",
ANY,
overwrite=True,
),
call(
f"{folder}/testfile3.txt",
ANY,
overwrite=True,
),
call(
f"{folder}/testdir2/testfile5.txt",
ANY,
overwrite=True,
),
],
any_order=True,
)
assert all(
[
open(call[1][1].name).read() == "Sample text"
for call in upload_blob_mock.mock_calls
]
)
@pytest.mark.usefixtures("mock_azure_blob_storage")
def test_push_to_azure_blob_storage_trailing_slash_in_folder(
self, tmp_files: Path, container_client_mock: MagicMock
):
container = "test-container"
folder = "test-folder/"
credentials = {"connection_string": "fake_connection_string"}
os.chdir(tmp_files)
push_to_azure_blob_storage(container, folder, credentials)
upload_blob_mock = (
container_client_mock.from_connection_string.return_value.__enter__.return_value.upload_blob # noqa
)
# Assert that the trailing slash is properly handled
upload_blob_mock.assert_has_calls(
[
call(
"test-folder/testfile1.txt",
ANY,
overwrite=True,
),
# ... repeat for other files
],
any_order=True,
)
@pytest.mark.usefixtures("mock_azure_blob_storage")
def test_push_to_azure_blob_storage_no_folder_provided(
self, tmp_files: Path, container_client_mock: MagicMock
):
container = "test-container"
credentials = {"connection_string": "fake_connection_string"}
os.chdir(tmp_files)
push_to_azure_blob_storage(container, "", credentials)
upload_blob_mock = (
container_client_mock.from_connection_string.return_value.__enter__.return_value.upload_blob # noqa
)
# Assert that the files are uploaded to the root of the container
upload_blob_mock.assert_has_calls(
[
call(
"testfile1.txt",
ANY,
overwrite=True,
),
# ... repeat for other files
],
any_order=True,
)
| TestPush |
python | apache__airflow | providers/google/src/airflow/providers/google/cloud/operators/vertex_ai/dataset.py | {
"start": 8199,
"end": 11056
} | class ____(GoogleCloudBaseOperator):
"""
Deletes a Dataset.
:param project_id: Required. The ID of the Google Cloud project the cluster belongs to.
:param region: Required. The Cloud Dataproc region in which to handle the request.
:param dataset_id: Required. The ID of the Dataset to delete.
:param retry: Designation of what errors, if any, should be retried.
:param timeout: The timeout for this request.
:param metadata: Strings which should be sent along with the request as metadata.
:param gcp_conn_id: The connection ID to use connecting to Google Cloud.
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
"""
template_fields = ("region", "dataset_id", "project_id", "impersonation_chain")
def __init__(
self,
*,
region: str,
project_id: str,
dataset_id: str,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
gcp_conn_id: str = "google_cloud_default",
impersonation_chain: str | Sequence[str] | None = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.region = region
self.project_id = project_id
self.dataset_id = dataset_id
self.retry = retry
self.timeout = timeout
self.metadata = metadata
self.gcp_conn_id = gcp_conn_id
self.impersonation_chain = impersonation_chain
def execute(self, context: Context):
hook = DatasetHook(
gcp_conn_id=self.gcp_conn_id,
impersonation_chain=self.impersonation_chain,
)
try:
self.log.info("Deleting dataset: %s", self.dataset_id)
operation = hook.delete_dataset(
project_id=self.project_id,
region=self.region,
dataset=self.dataset_id,
retry=self.retry,
timeout=self.timeout,
metadata=self.metadata,
)
hook.wait_for_operation(timeout=self.timeout, operation=operation)
self.log.info("Dataset was deleted.")
except NotFound:
self.log.info("The Dataset ID %s does not exist.", self.dataset_id)
| DeleteDatasetOperator |
python | django__django | django/db/backends/ddl_references.py | {
"start": 1876,
"end": 2439
} | class ____(Table):
"""Base class for references to multiple columns of a table."""
def __init__(self, table, columns):
self.table = table
self.columns = columns
def references_column(self, table, column):
return self.table == table and column in self.columns
def rename_column_references(self, table, old_column, new_column):
if self.table == table:
for index, column in enumerate(self.columns):
if column == old_column:
self.columns[index] = new_column
| TableColumns |
python | apache__airflow | providers/amazon/src/airflow/providers/amazon/aws/sensors/sagemaker.py | {
"start": 6968,
"end": 10156
} | class ____(SageMakerBaseSensor):
"""
Poll the training job until it reaches a terminal state; raise AirflowException with the failure reason.
.. seealso::
For more information on how to use this sensor, take a look at the guide:
:ref:`howto/sensor:SageMakerTrainingSensor`
:param job_name: Name of the training job to watch.
:param print_log: Prints the cloudwatch log if True; Defaults to True.
"""
template_fields: Sequence[str] = aws_template_fields(
"job_name",
)
template_ext: Sequence[str] = ()
def __init__(self, *, job_name, print_log=True, **kwargs):
super().__init__(**kwargs)
self.job_name = job_name
self.print_log = print_log
self.positions = {}
self.stream_names = []
self.instance_count: int | None = None
self.state: int | None = None
self.last_description = None
self.last_describe_job_call = None
self.log_resource_inited = False
def init_log_resource(self, hook: SageMakerHook) -> None:
"""Set tailing LogState for associated training job."""
description = hook.describe_training_job(self.job_name)
self.instance_count = description["ResourceConfig"]["InstanceCount"]
status = description["TrainingJobStatus"]
job_already_completed = status not in self.non_terminal_states()
self.state = LogState.COMPLETE if job_already_completed else LogState.TAILING
self.last_description = description
self.last_describe_job_call = time.monotonic()
self.log_resource_inited = True
def non_terminal_states(self):
return SageMakerHook.non_terminal_states
def failed_states(self):
return SageMakerHook.training_failed_states
def get_sagemaker_response(self):
if self.print_log:
if not self.log_resource_inited:
self.init_log_resource(self.hook)
(
self.state,
self.last_description,
self.last_describe_job_call,
) = self.hook.describe_training_job_with_log(
self.job_name,
self.positions,
self.stream_names,
self.instance_count,
self.state,
self.last_description,
self.last_describe_job_call,
)
else:
self.last_description = self.hook.describe_training_job(self.job_name)
status = self.state_from_response(self.last_description)
if (status not in self.non_terminal_states()) and (status not in self.failed_states()):
billable_time = (
self.last_description["TrainingEndTime"] - self.last_description["TrainingStartTime"]
) * self.last_description["ResourceConfig"]["InstanceCount"]
self.log.info("Billable seconds: %s", (int(billable_time.total_seconds()) + 1))
return self.last_description
def get_failed_reason_from_response(self, response):
return response["FailureReason"]
def state_from_response(self, response):
return response["TrainingJobStatus"]
| SageMakerTrainingSensor |
python | wandb__wandb | wandb/sdk/data_types/table.py | {
"start": 43667,
"end": 48860
} | class ____(Media):
"""Join two tables for visualization in the Artifact UI.
Args:
table1 (str, wandb.Table, ArtifactManifestEntry):
the path to a wandb.Table in an artifact, the table object, or ArtifactManifestEntry
table2 (str, wandb.Table):
the path to a wandb.Table in an artifact, the table object, or ArtifactManifestEntry
join_key (str, [str, str]):
key or keys to perform the join
"""
_log_type = "joined-table"
def __init__(self, table1, table2, join_key):
super().__init__()
if not isinstance(join_key, str) and (
not isinstance(join_key, list) or len(join_key) != 2
):
raise ValueError(
"JoinedTable join_key should be a string or a list of two strings"
)
if not self._validate_table_input(table1):
raise ValueError(
"JoinedTable table1 should be an artifact path to a table or wandb.Table object"
)
if not self._validate_table_input(table2):
raise ValueError(
"JoinedTable table2 should be an artifact path to a table or wandb.Table object"
)
self._table1 = table1
self._table2 = table2
self._join_key = join_key
@classmethod
def from_json(cls, json_obj, source_artifact):
t1 = source_artifact.get(json_obj["table1"])
if t1 is None:
t1 = json_obj["table1"]
t2 = source_artifact.get(json_obj["table2"])
if t2 is None:
t2 = json_obj["table2"]
return cls(
t1,
t2,
json_obj["join_key"],
)
@staticmethod
def _validate_table_input(table):
"""Helper method to validate that the table input is one of the 3 supported types."""
return (
(isinstance(table, str) and table.endswith(".table.json"))
or isinstance(table, Table)
or isinstance(table, PartitionedTable)
or (hasattr(table, "ref_url") and table.ref_url().endswith(".table.json"))
)
def _ensure_table_in_artifact(self, table, artifact, table_ndx):
"""Helper method to add the table to the incoming artifact. Returns the path."""
if isinstance(table, Table) or isinstance(table, PartitionedTable):
table_name = f"t{table_ndx}_{str(id(self))}"
if (
table._artifact_source is not None
and table._artifact_source.name is not None
):
table_name = os.path.basename(table._artifact_source.name)
entry = artifact.add(table, table_name)
table = entry.path
# Check if this is an ArtifactManifestEntry
elif hasattr(table, "ref_url"):
# Give the new object a unique, yet deterministic name
name = binascii.hexlify(base64.standard_b64decode(table.digest)).decode(
"ascii"
)[:20]
entry = artifact.add_reference(
table.ref_url(), "{}.{}.json".format(name, table.name.split(".")[-2])
)[0]
table = entry.path
err_str = "JoinedTable table:{} not found in artifact. Add a table to the artifact using Artifact#add(<table>, {}) before adding this JoinedTable"
if table not in artifact._manifest.entries:
raise ValueError(err_str.format(table, table))
return table
def to_json(self, artifact_or_run):
json_obj = {
"_type": JoinedTable._log_type,
}
if isinstance(artifact_or_run, wandb.Run):
artifact_entry_url = self._get_artifact_entry_ref_url()
if artifact_entry_url is None:
raise ValueError(
"JoinedTables must first be added to an Artifact before logging to a Run"
)
json_obj["artifact_path"] = artifact_entry_url
else:
table1 = self._ensure_table_in_artifact(self._table1, artifact_or_run, 1)
table2 = self._ensure_table_in_artifact(self._table2, artifact_or_run, 2)
json_obj.update(
{
"table1": table1,
"table2": table2,
"join_key": self._join_key,
}
)
return json_obj
def __ne__(self, other):
return not self.__eq__(other)
def _eq_debug(self, other, should_assert=False):
eq = isinstance(other, JoinedTable)
assert not should_assert or eq, (
f"Found type {other.__class__}, expected {JoinedTable}"
)
eq = eq and self._join_key == other._join_key
assert not should_assert or eq, (
f"Found {other._join_key} join key, expected {self._join_key}"
)
eq = eq and self._table1._eq_debug(other._table1, should_assert)
eq = eq and self._table2._eq_debug(other._table2, should_assert)
return eq
def __eq__(self, other):
return self._eq_debug(other, False)
def bind_to_run(self, *args, **kwargs):
raise ValueError("JoinedTables cannot be bound to runs")
| JoinedTable |
python | Textualize__textual | docs/examples/styles/height_comparison.py | {
"start": 131,
"end": 254
} | class ____(Static):
def compose(self):
ruler_text = "·\n·\n·\n·\n•\n" * 100
yield Label(ruler_text)
| Ruler |
python | sqlalchemy__sqlalchemy | test/dialect/oracle/test_dialect.py | {
"start": 8633,
"end": 9983
} | class ____(fixtures.TestBase):
__backend__ = True
__only_on__ = "oracle"
def test_default_name_is_the_user(self):
default_schema_name = testing.db.dialect.default_schema_name
with testing.db.connect() as conn:
oracles_known_default_schema_name = (
testing.db.dialect.normalize_name(
conn.exec_driver_sql("SELECT USER FROM DUAL").scalar()
)
)
eq_(oracles_known_default_schema_name, default_schema_name)
def test_default_schema_detected(self):
default_schema_name = testing.db.dialect.default_schema_name
eng = engines.testing_engine()
with eng.connect() as conn:
trans = conn.begin()
eq_(
testing.db.dialect._get_default_schema_name(conn),
default_schema_name,
)
conn.exec_driver_sql(
"ALTER SESSION SET CURRENT_SCHEMA=%s" % config.test_schema
)
eq_(
testing.db.dialect._get_default_schema_name(conn),
config.test_schema,
)
conn.invalidate()
trans.rollback()
eq_(
testing.db.dialect._get_default_schema_name(conn),
default_schema_name,
)
| DefaultSchemaNameTest |
python | getsentry__sentry | src/sentry/integrations/vsts/handlers/azure_devops_handler.py | {
"start": 410,
"end": 571
} | class ____(TicketingActionHandler):
group = ActionHandler.Group.TICKET_CREATION
provider_slug = IntegrationProviderSlug.AZURE_DEVOPS
| AzureDevopsActionHandler |
python | kamyu104__LeetCode-Solutions | Python/apply-operations-to-maximize-frequency-score.py | {
"start": 70,
"end": 726
} | class ____(object):
def maxFrequencyScore(self, nums, k):
"""
:type nums: List[int]
:type k: int
:rtype: int
"""
nums.sort()
result = left = curr = 0
for right in xrange(len(nums)):
# "-+ " => "-0+ "
# "-0+ " => "--++"
curr += nums[right]-nums[(left+right)//2]
if not curr <= k:
# "--++" => " -0+"
# " -0+" => " -+"
curr -= nums[((left+1)+right)//2]-nums[left]
left += 1
return right-left+1
# Time: O(nlogn)
# Space: O(1)
# sort, two pointers, sliding window
| Solution |
python | docker__docker-py | docker/types/services.py | {
"start": 13409,
"end": 15835
} | class ____(dict):
"""
Configures resource allocation for containers when made part of a
:py:class:`~docker.types.ContainerSpec`.
Args:
cpu_limit (int): CPU limit in units of 10^9 CPU shares.
mem_limit (int): Memory limit in Bytes.
cpu_reservation (int): CPU reservation in units of 10^9 CPU shares.
mem_reservation (int): Memory reservation in Bytes.
generic_resources (dict or :py:class:`list`): Node level generic
resources, for example a GPU, using the following format:
``{ resource_name: resource_value }``. Alternatively, a list of
of resource specifications as defined by the Engine API.
"""
def __init__(self, cpu_limit=None, mem_limit=None, cpu_reservation=None,
mem_reservation=None, generic_resources=None):
limits = {}
reservation = {}
if cpu_limit is not None:
limits['NanoCPUs'] = cpu_limit
if mem_limit is not None:
limits['MemoryBytes'] = mem_limit
if cpu_reservation is not None:
reservation['NanoCPUs'] = cpu_reservation
if mem_reservation is not None:
reservation['MemoryBytes'] = mem_reservation
if generic_resources is not None:
reservation['GenericResources'] = (
_convert_generic_resources_dict(generic_resources)
)
if limits:
self['Limits'] = limits
if reservation:
self['Reservations'] = reservation
def _convert_generic_resources_dict(generic_resources):
if isinstance(generic_resources, list):
return generic_resources
if not isinstance(generic_resources, dict):
raise errors.InvalidArgument(
'generic_resources must be a dict or a list '
f'(found {type(generic_resources)})'
)
resources = []
for kind, value in generic_resources.items():
resource_type = None
if isinstance(value, int):
resource_type = 'DiscreteResourceSpec'
elif isinstance(value, str):
resource_type = 'NamedResourceSpec'
else:
kv = {kind: value}
raise errors.InvalidArgument(
f'Unsupported generic resource reservation type: {kv}'
)
resources.append({
resource_type: {'Kind': kind, 'Value': value}
})
return resources
| Resources |
python | ansible__ansible | test/lib/ansible_test/_internal/test.py | {
"start": 5497,
"end": 5778
} | class ____(TestResult):
"""Test success."""
def write_junit(self, args: TestConfig) -> None:
"""Write results to a junit XML file."""
test_case = junit_xml.TestCase(classname=self.command, name=self.name)
self.save_junit(args, test_case)
| TestSuccess |
python | getsentry__sentry | tests/sentry/notifications/api/endpoints/test_notification_actions_index.py | {
"start": 1768,
"end": 17574
} | class ____(APITestCase):
endpoint = "sentry-api-0-organization-notification-actions"
@patch.dict(NotificationAction._registry, {})
def setUp(self) -> None:
self.user = self.create_user("thepaleking@hk.com")
self.organization = self.create_organization(name="hallownest", owner=self.user)
self.other_organization = self.create_organization(name="pharloom", owner=self.user)
self.team = self.create_team(
name="pale beings", organization=self.organization, members=[self.user]
)
self.projects = [
self.create_project(name="greenpath", organization=self.organization),
self.create_project(name="dirtmouth", organization=self.organization),
]
self.base_data: MutableMapping[str, Any] = {
"serviceType": "email",
"triggerType": "audit-log",
"targetType": "specific",
"targetDisplay": "@hollowknight",
"targetIdentifier": "THK",
}
self.login_as(user=self.user)
def test_get_simple(self) -> None:
notif_actions = [
self.create_notification_action(organization=self.organization),
self.create_notification_action(organization=self.organization),
]
other_notif_action = self.create_notification_action(organization=self.other_organization)
response = self.get_success_response(
self.organization.slug,
status_code=status.HTTP_200_OK,
)
assert len(response.data) == len(notif_actions)
assert serialize(other_notif_action) not in response.data
for action in notif_actions:
assert serialize(action) in response.data
@patch.object(
NotificationAction,
"get_trigger_types",
return_value=[(0, "teacher"), (1, "watcher"), (2, "beast")],
)
def test_get_with_queries(self, mock_trigger_types: MagicMock) -> None:
project = self.create_project(name="deepnest", organization=self.organization)
no_team_project = self.create_project(
name="waterways", organization=self.organization, teams=[]
)
na1 = self.create_notification_action(
organization=self.organization,
projects=self.projects,
trigger_type=0,
)
na2 = self.create_notification_action(
organization=self.organization,
projects=[project],
trigger_type=0,
)
na3 = self.create_notification_action(
organization=self.organization,
projects=[project, *self.projects],
trigger_type=1,
)
na4 = self.create_notification_action(
organization=self.organization,
trigger_type=0,
)
na5 = self.create_notification_action(
organization=self.organization,
projects=[no_team_project],
trigger_type=1,
)
query_data: dict[str, _QueryResult] = {
"checks projects by default": {"query": {}, "result": {na1, na2, na3, na4}},
"regular project": {
"query": {"project": project.id},
"result": {na2, na3},
},
"regular trigger": {
"query": {"triggerType": "teacher"},
"result": {na1, na2, na4},
},
"using both": {
"query": {"project": project.id, "triggerType": "teacher"},
"result": {na2},
},
"empty result": {
"query": {"triggerType": "beast"},
"result": set(),
},
"not member": {"query": {"triggerType": "watcher"}, "result": {na3}},
"not member but has access": {
"query": {"project": -1, "triggerType": "watcher"},
"result": {na3, na5},
},
}
for data in query_data.values():
response = self.get_success_response(
self.organization.slug,
status_code=status.HTTP_200_OK,
qs_params=data["query"],
)
assert len(response.data) == len(data["result"])
for action in data["result"]:
assert serialize(action) in response.data
def test_post_missing_fields(self) -> None:
required_fields = ["serviceType", "triggerType"]
response = self.get_error_response(
self.organization.slug,
status_code=status.HTTP_400_BAD_REQUEST,
method="POST",
)
for field in required_fields:
assert field in response.data
def test_post_invalid_types(self) -> None:
invalid_types = {
"serviceType": "stag",
"triggerType": "ascension",
"targetType": "shade",
}
for type_key, invalid_value in invalid_types.items():
data = {**self.base_data}
data[type_key] = invalid_value
response = self.get_error_response(
self.organization.slug,
status_code=status.HTTP_400_BAD_REQUEST,
method="POST",
**data,
)
assert type_key in response.data
def test_post_invalid_integration(self) -> None:
data = {**self.base_data}
# Unknown integration
data["integrationId"] = -1
response = self.get_error_response(
self.organization.slug,
status_code=status.HTTP_400_BAD_REQUEST,
method="POST",
**data,
)
assert "integrationId" in response.data
# Integration from another organization
integration = self.create_integration(
organization=self.other_organization, external_id="sp1d3r"
)
data["integrationId"] = integration.id
response = self.get_error_response(
self.organization.slug,
status_code=status.HTTP_400_BAD_REQUEST,
method="POST",
**data,
)
assert "integrationId" in response.data
def test_post_invalid_projects(self) -> None:
data = {**self.base_data}
# Unknown project
data["projects"] = ["deep nest"]
response = self.get_error_response(
self.organization.slug,
status_code=status.HTTP_400_BAD_REQUEST,
method="POST",
**data,
)
assert "projects" in response.data
# Project from another organization
project = self.create_project(name="citadel", organization=self.other_organization)
data["projects"] = [project.slug]
response = self.get_error_response(
self.organization.slug,
status_code=status.HTTP_400_BAD_REQUEST,
method="POST",
**data,
)
assert "projects" in response.data
def test_post_no_project_access(self) -> None:
user = self.create_user("hornet@hk.com")
self.create_member(user=user, organization=self.organization)
self.login_as(user)
data = {
**self.base_data,
"projects": [p.slug for p in self.projects],
}
self.get_error_response(
self.organization.slug,
status_code=status.HTTP_403_FORBIDDEN,
method="POST",
**data,
)
def test_post_org_member(self) -> None:
user = self.create_user("hornet@hk.com")
self.create_member(user=user, organization=self.organization, teams=[self.team])
self.login_as(user)
data = {
**self.base_data,
"projects": [p.slug for p in self.projects],
}
self.get_error_response(
self.organization.slug,
status_code=status.HTTP_403_FORBIDDEN,
method="POST",
**data,
)
@patch.dict(NotificationAction._registry, {})
def test_post_raises_validation_from_registry(self) -> None:
error_message = "oops-idea-installed"
class MockActionRegistration(ActionRegistration):
validate_action = MagicMock(side_effect=serializers.ValidationError(error_message))
def fire(self, data: Any) -> None:
raise NotImplementedError
_mock_register(self.base_data)(MockActionRegistration)
response = self.get_error_response(
self.organization.slug,
status_code=status.HTTP_400_BAD_REQUEST,
method="POST",
**self.base_data,
)
assert error_message in str(response.data)
@patch.dict(NotificationAction._registry, {})
@responses.activate
@mock_slack_response(
"chat_scheduleMessage",
body={"ok": True, "channel": "CABC123", "scheduled_message_id": "Q1298393284"},
)
@mock_slack_response("chat_deleteScheduledMessage", body={"ok": True})
def test_post_with_slack_validation(self, mock_delete, mock_schedule) -> None:
class MockActionRegistration(ActionRegistration):
def fire(self, data: Any) -> None:
raise NotImplementedError
channel_name = "journal"
channel_id = "CABC123"
integration = install_slack(organization=self.organization)
data = {
"triggerType": "audit-log",
"targetType": "specific",
"serviceType": "slack",
"integrationId": integration.id,
"targetDisplay": f"#{channel_name}",
}
_mock_register(data)(MockActionRegistration)
response = self.get_success_response(
self.organization.slug,
status_code=status.HTTP_201_CREATED,
method="POST",
**data,
)
assert response.data["targetIdentifier"] == channel_id
@patch.dict(NotificationAction._registry, {})
def test_post_with_pagerduty_validation(self) -> None:
class MockActionRegistration(ActionRegistration):
def fire(self, data: Any) -> None:
raise NotImplementedError
service_name = "palace"
integration = self.create_integration(
organization=self.organization, external_id="pd-id", provider="pagerduty", name="dream"
)
second_integration = self.create_integration(
organization=self.organization, external_id="pd-id-2", provider="pagerduty", name="nail"
)
data = {
"triggerType": "audit-log",
"targetType": "specific",
"serviceType": "pagerduty",
"integrationId": integration.id,
"targetDisplay": "incorrect_service_name",
}
_mock_register(data)(MockActionRegistration)
# Didn't provide a targetIdentifier key
response = self.get_error_response(
self.organization.slug,
status_code=status.HTTP_400_BAD_REQUEST,
method="POST",
**data,
)
assert "Did not recieve PagerDuty service id" in str(response.data["targetIdentifier"])
with assume_test_silo_mode(SiloMode.CONTROL):
org_integration = second_integration.organizationintegration_set.first()
assert org_integration is not None, "org integration needs to exist!"
service = add_service(
org_integration,
service_name=service_name,
integration_key="abc",
)
data["targetIdentifier"] = service["id"]
response = self.get_error_response(
self.organization.slug,
status_code=status.HTTP_400_BAD_REQUEST,
method="POST",
**data,
)
assert "ensure Sentry has access" in str(response.data["targetIdentifier"])
with assume_test_silo_mode(SiloMode.CONTROL):
org_integration = integration.organizationintegration_set.first()
assert org_integration is not None, "org integration needs to exist!"
service = add_service(
org_integration,
service_name=service_name,
integration_key="def",
)
data["targetIdentifier"] = service["id"]
response = self.get_success_response(
self.organization.slug,
status_code=status.HTTP_201_CREATED,
method="POST",
**data,
)
assert response.data["targetIdentifier"] == service["id"]
assert response.data["targetDisplay"] == service["service_name"]
@patch.dict(NotificationAction._registry, {})
def test_post_simple(self) -> None:
class MockActionRegistration(ActionRegistration):
validate_action = MagicMock()
def fire(self, data: Any) -> None:
raise NotImplementedError
registration = MockActionRegistration
_mock_register(self.base_data)(registration)
data = {
**self.base_data,
"projects": [p.slug for p in self.projects],
}
registration.validate_action.assert_not_called()
response = self.get_success_response(
self.organization.slug,
status_code=status.HTTP_201_CREATED,
method="POST",
**data,
)
# Database reflects changes
registration.validate_action.assert_called()
notif_action = NotificationAction.objects.get(id=response.data.get("id"))
assert response.data == serialize(notif_action)
# Relation table has been updated
notif_action_projects = NotificationActionProject.objects.filter(action_id=notif_action.id)
assert len(notif_action_projects) == len(self.projects)
@patch.dict(NotificationAction._registry, {})
def test_post_org_admin(self) -> None:
user = self.create_user()
self.create_member(organization=self.organization, user=user, role="admin")
self.login_as(user)
self.test_post_simple()
@patch.dict(NotificationAction._registry, {})
def test_post_team_admin__success(self) -> None:
user = self.create_user()
member = self.create_member(organization=self.organization, user=user, role="member")
OrganizationMemberTeam.objects.create(
team=self.team, organizationmember=member, role="admin"
)
self.login_as(user)
self.test_post_simple()
@patch.dict(NotificationAction._registry, {})
def test_post_team_admin__missing_access(self) -> None:
user = self.create_user()
member = self.create_member(organization=self.organization, user=user, role="member")
OrganizationMemberTeam.objects.create(
team=self.team, organizationmember=member, role="admin"
)
self.login_as(user)
non_admin_project = self.create_project(
organization=self.organization, teams=[self.create_team()]
)
class MockActionRegistration(ActionRegistration):
validate_action = MagicMock()
def fire(self, data: Any) -> None:
raise NotImplementedError
registration = MockActionRegistration
_mock_register(self.base_data)(registration)
data = {
**self.base_data,
"projects": [p.slug for p in self.projects] + [non_admin_project.slug],
}
assert not registration.validate_action.called
response = self.get_error_response(
self.organization.slug,
status_code=status.HTTP_403_FORBIDDEN,
method="POST",
**data,
)
assert (
"You do not have permission to create notification actions for projects"
in response.data["detail"]
)
| NotificationActionsIndexEndpointTest |
python | django__django | tests/migrations/test_migrations_squashed_complex/3_squashed_5.py | {
"start": 35,
"end": 314
} | class ____(migrations.Migration):
replaces = [
("migrations", "3_auto"),
("migrations", "4_auto"),
("migrations", "5_auto"),
]
dependencies = [("migrations", "2_auto")]
operations = [migrations.RunPython(migrations.RunPython.noop)]
| Migration |
python | astropy__astropy | astropy/utils/masked/tests/test_masked.py | {
"start": 18046,
"end": 20083
} | class ____(MaskedArraySetup):
def test_copy(self):
ma_copy = self.ma.copy()
assert type(ma_copy) is type(self.ma)
assert_array_equal(ma_copy.unmasked, self.ma.unmasked)
assert_array_equal(ma_copy.mask, self.ma.mask)
assert not np.may_share_memory(ma_copy.unmasked, self.ma.unmasked)
assert not np.may_share_memory(ma_copy.mask, self.ma.mask)
@pytest.mark.parametrize("fill_value", (0, 1))
def test_filled(self, fill_value):
fill_value = fill_value * getattr(self.a, "unit", 1)
expected = self.a.copy()
expected[self.ma.mask] = fill_value
result = self.ma.filled(fill_value)
assert_array_equal(expected, result)
def test_filled_no_fill_value(self):
with pytest.raises(TypeError, match="missing 1 required"):
self.ma.filled()
@pytest.mark.parametrize("fill_value", [(0, 1), (-1, -1)])
def test_filled_structured(self, fill_value):
fill_value = np.array(fill_value, dtype=self.sdt)
if hasattr(self.sa, "unit"):
fill_value = fill_value << self.sa.unit
expected = self.sa.copy()
expected["a"][self.msa.mask["a"]] = fill_value["a"]
expected["b"][self.msa.mask["b"]] = fill_value["b"]
result = self.msa.filled(fill_value)
assert_array_equal(expected, result)
def test_flat(self):
ma_copy = self.ma.copy()
ma_flat = ma_copy.flat
# Check that single item keeps class and mask
ma_flat1 = ma_flat[1]
assert ma_flat1.unmasked == self.a.flat[1]
assert ma_flat1.mask == self.mask_a.flat[1]
# As well as getting items via iteration.
assert all(
(ma.unmasked == a and ma.mask == m)
for (ma, a, m) in zip(self.ma.flat, self.a.flat, self.mask_a.flat)
)
# check that flat works like a view of the real array
ma_flat[1] = self.b[1]
assert ma_flat[1] == self.b[1]
assert ma_copy[0, 1] == self.b[1]
| TestMaskedArrayCopyFilled |
python | dagster-io__dagster | python_modules/dagster/dagster/_core/storage/io_manager.py | {
"start": 4814,
"end": 9524
} | class ____(InputManager, OutputManager):
"""Base class for user-provided IO managers.
IOManagers are used to store op outputs and load them as inputs to downstream ops.
Extend this class to handle how objects are loaded and stored. Users should implement
``handle_output`` to store an object and ``load_input`` to retrieve an object.
"""
@public
@abstractmethod
def load_input(self, context: "InputContext") -> Any:
"""User-defined method that loads an input to an op.
Args:
context (InputContext): The input context, which describes the input that's being loaded
and the upstream output that's being loaded from.
Returns:
Any: The data object.
"""
@public
@abstractmethod
def handle_output(self, context: "OutputContext", obj: Any) -> None:
"""User-defined method that stores an output of an op.
Args:
context (OutputContext): The context of the step output that produces this object.
obj (Any): The object, returned by the op, to be stored.
"""
@overload
def io_manager(config_schema: IOManagerFunction) -> IOManagerDefinition: ...
@overload
def io_manager(
config_schema: CoercableToConfigSchema = None,
description: Optional[str] = None,
output_config_schema: CoercableToConfigSchema = None,
input_config_schema: CoercableToConfigSchema = None,
required_resource_keys: Optional[set[str]] = None,
version: Optional[str] = None,
) -> Callable[[IOManagerFunction], IOManagerDefinition]: ...
@public
def io_manager(
config_schema: Union[IOManagerFunction, CoercableToConfigSchema] = None,
description: Optional[str] = None,
output_config_schema: CoercableToConfigSchema = None,
input_config_schema: CoercableToConfigSchema = None,
required_resource_keys: Optional[set[str]] = None,
version: Optional[str] = None,
) -> Union[
IOManagerDefinition,
Callable[[IOManagerFunction], IOManagerDefinition],
]:
"""Define an IO manager.
IOManagers are used to store op outputs and load them as inputs to downstream ops.
The decorated function should accept an :py:class:`InitResourceContext` and return an
:py:class:`IOManager`.
Args:
config_schema (Optional[ConfigSchema]): The schema for the resource config. Configuration
data available in `init_context.resource_config`. If not set, Dagster will accept any
config provided.
description(Optional[str]): A human-readable description of the resource.
output_config_schema (Optional[ConfigSchema]): The schema for per-output config. If not set,
no per-output configuration will be allowed.
input_config_schema (Optional[ConfigSchema]): The schema for per-input config. If not set,
Dagster will accept any config provided.
required_resource_keys (Optional[Set[str]]): Keys for the resources required by the object
manager.
version (Optional[str]): The version of a resource function. Two wrapped
resource functions should only have the same version if they produce the same resource
definition when provided with the same inputs.
**Examples:**
.. code-block:: python
class MyIOManager(IOManager):
def handle_output(self, context, obj):
write_csv("some/path")
def load_input(self, context):
return read_csv("some/path")
@io_manager
def my_io_manager(init_context):
return MyIOManager()
@op(out=Out(io_manager_key="my_io_manager_key"))
def my_op(_):
return do_stuff()
@job(resource_defs={"my_io_manager_key": my_io_manager})
def my_job():
my_op()
"""
if callable(config_schema) and not is_callable_valid_config_arg(config_schema):
config_schema = cast("IOManagerFunction", config_schema)
return _IOManagerDecoratorCallable()(config_schema)
def _wrap(resource_fn: IOManagerFunction) -> IOManagerDefinition:
return _IOManagerDecoratorCallable(
config_schema=cast("Optional[UserConfigSchema]", config_schema),
description=description,
required_resource_keys=required_resource_keys,
version=version,
output_config_schema=output_config_schema,
input_config_schema=input_config_schema,
)(resource_fn)
return _wrap
def dagster_maintained_io_manager(io_manager_def: IOManagerDefinition) -> IOManagerDefinition:
io_manager_def._dagster_maintained = True # noqa: SLF001
return io_manager_def
| IOManager |
python | catalyst-team__catalyst | catalyst/core/callback.py | {
"start": 5230,
"end": 7791
} | class ____(Callback):
"""Enable/disable callback execution.
Args:
base_callback: callback to wrap
enable_callback: indicator to enable/disable
callback, if ``True`` then callback will be enabled,
default ``True``
"""
def __init__(self, base_callback: Callback, enable_callback: bool = True):
"""Init."""
if base_callback is None or not isinstance(base_callback, Callback):
raise ValueError(f"Expected callback but got - {type(base_callback)}!")
super().__init__(order=base_callback.order)
self.callback = base_callback
self._is_enabled = enable_callback
def on_experiment_start(self, runner: "IRunner") -> None:
"""Event handler for experiment start."""
if self._is_enabled:
self.callback.on_experiment_start(runner)
def on_epoch_start(self, runner: "IRunner") -> None:
"""Event handler for epoch start."""
if self._is_enabled:
self.callback.on_epoch_start(runner)
def on_loader_start(self, runner: "IRunner") -> None:
"""Event handler for loader start."""
if self._is_enabled:
self.callback.on_loader_start(runner)
def on_batch_start(self, runner: "IRunner") -> None:
"""Event handler for batch start."""
if self._is_enabled:
self.callback.on_batch_start(runner)
def on_batch_end(self, runner: "IRunner") -> None:
"""Event handler for batch end."""
if self._is_enabled:
self.callback.on_batch_end(runner)
def on_loader_end(self, runner: "IRunner") -> None:
"""Event handler for loader end."""
if self._is_enabled:
self.callback.on_loader_end(runner)
def on_epoch_end(self, runner: "IRunner") -> None:
"""Event handler for epoch end."""
if self._is_enabled:
self.callback.on_epoch_end(runner)
def on_experiment_end(self, runner: "IRunner") -> None:
"""Event handler for experiment end."""
if self._is_enabled:
self.callback.on_experiment_end(runner)
def on_exception(self, runner: "IRunner") -> None:
"""Event handler for exception case."""
if self._is_enabled:
self.callback.on_exception(runner)
__all__ = [
"ICallback",
"Callback",
"CallbackOrder",
"IMetricCallback",
"ICriterionCallback",
"IBackwardCallback",
"IOptimizerCallback",
"ISchedulerCallback",
"ICheckpointCallback",
"CallbackWrapper",
]
| CallbackWrapper |
python | pydantic__pydantic | tests/mypy/modules/plugin_fail.py | {
"start": 1093,
"end": 1206
} | class ____(BaseModel):
model_config = ConfigDict(from_attributes={}) # type: ignore[typeddict-item]
| BadConfig1 |
python | openai__openai-python | src/openai/types/responses/response_image_gen_call_completed_event.py | {
"start": 211,
"end": 671
} | class ____(BaseModel):
item_id: str
"""The unique identifier of the image generation item being processed."""
output_index: int
"""The index of the output item in the response's output array."""
sequence_number: int
"""The sequence number of this event."""
type: Literal["response.image_generation_call.completed"]
"""The type of the event. Always 'response.image_generation_call.completed'."""
| ResponseImageGenCallCompletedEvent |
python | qdrant__qdrant-client | qdrant_client/http/models/models.py | {
"start": 25091,
"end": 25280
} | class ____(BaseModel, extra="forbid"):
field_name: str = Field(..., description="")
field_schema: Optional["PayloadFieldSchema"] = Field(default=None, description="")
| CreateFieldIndex |
python | readthedocs__readthedocs.org | readthedocs/projects/migrations/0156_project_search_indexing_enabled.py | {
"start": 149,
"end": 1073
} | class ____(migrations.Migration):
safe = Safe.before_deploy()
dependencies = [
("projects", "0155_custom_git_checkout_step"),
]
operations = [
migrations.AddField(
model_name="historicalproject",
name="search_indexing_enabled",
field=models.BooleanField(
db_default=True,
default=True,
help_text="Enable/disable search indexing for this project",
verbose_name="Enable search indexing",
),
),
migrations.AddField(
model_name="project",
name="search_indexing_enabled",
field=models.BooleanField(
db_default=True,
default=True,
help_text="Enable/disable search indexing for this project",
verbose_name="Enable search indexing",
),
),
]
| Migration |
python | tensorflow__tensorflow | tensorflow/lite/tools/optimize/debugging/python/debugger.py | {
"start": 5270,
"end": 23908
} | class ____:
"""Debugger for Quantized TensorFlow Lite debug mode models.
This can run the TensorFlow Lite converted models equipped with debug ops and
collect debug information. This debugger calculates statistics from
user-defined post-processing functions as well as default ones.
"""
def __init__(self,
quant_debug_model_path: Optional[str] = None,
quant_debug_model_content: Optional[bytes] = None,
float_model_path: Optional[str] = None,
float_model_content: Optional[bytes] = None,
debug_dataset: Optional[Callable[
[], Iterable[Sequence[np.ndarray]]]] = None,
debug_options: Optional[QuantizationDebugOptions] = None,
converter: Optional[TFLiteConverter] = None) -> None:
"""Runs the TFLite debugging model with given debug options.
Args:
quant_debug_model_path: Path to the quantized debug TFLite model file.
quant_debug_model_content: Content of the quantized debug TFLite model.
float_model_path: Path to float TFLite model file.
float_model_content: Content of the float TFLite model.
debug_dataset: a factory function that returns dataset generator which is
used to generate input samples (list of np.ndarray) for the model. The
generated elements must have same types and shape as inputs to the
model.
debug_options: Debug options to debug the given model.
converter: Optional, use converter instead of quantized model.
Raises:
ValueError: If the debugger was unable to be created.
Attributes:
layer_statistics: results of error metrics for each NumericVerify op
results. in {layer_name: {metric_name: metric}} format.
model_statistics: results of error metrics for difference between float
and quantized models. in {metric_name: metric} format.
"""
self._data_gen = debug_dataset
self._debug_options = debug_options or QuantizationDebugOptions()
self.converter = None
self.calibrated_model = None
self.float_model = None
self._float_interpreter = None
if converter is not None:
if self._debug_options.model_debug_metrics:
old_optimizations = converter.optimizations
self.converter = self._set_converter_options_for_float(converter)
self.float_model = self.converter.convert()
converter.optimizations = old_optimizations
self.converter = self._set_converter_options_for_calibration(converter)
self.calibrated_model = self.converter.convert()
# Converter should be already set up with all options
self._init_from_converter(
self._debug_options,
self.converter,
self.calibrated_model,
float_model=self.float_model)
else:
self._quant_interpreter = _interpreter.Interpreter(
quant_debug_model_path,
quant_debug_model_content,
experimental_preserve_all_tensors=(
self._debug_options.layer_direct_compare_metrics is not None))
if self._debug_options.model_debug_metrics:
self._float_interpreter = _interpreter.Interpreter(
float_model_path, float_model_content)
self._initialize_stats()
@property
def options(self) -> QuantizationDebugOptions:
return self._debug_options
@options.setter
def options(self, options: QuantizationDebugOptions) -> None:
self._debug_options = options
if not self.converter or not self.calibrated_model:
return
self._init_from_converter(
self._debug_options,
self.converter,
self.calibrated_model,
float_model=self.float_model)
self._initialize_stats()
def _initialize_stats(self):
"""Helper function initializes stats."""
# TODO(b/177749613) : Fix the dependency on tf.lite._get_ops_details()
# Following code is needed to get op's name from the output tensor index,
# since NumericVerify op only provides its quantized input tensor index.
self._defining_op = dict()
for op_info in self._quant_interpreter._get_ops_details(): # pylint: disable=protected-access
self._defining_op.update(
{tensor_idx: op_info['index'] for tensor_idx in op_info['outputs']})
self._numeric_verify_tensor_details = None
self._numeric_verify_op_details = None
if not self._get_numeric_verify_tensor_details():
raise ValueError('Please check if the quantized model is in debug mode')
self._layer_debug_metrics = _DEFAULT_LAYER_DEBUG_METRICS.copy()
if self._debug_options.layer_debug_metrics:
self._layer_debug_metrics.update(self._debug_options.layer_debug_metrics)
self.layer_statistics = None
self.model_statistics = None
self._metrics = metrics_stub.TFLiteMetrics()
self._metrics.increase_counter_debugger_creation()
def _get_quantized_model(self, is_debug: bool) -> bytes:
if not self.converter:
raise ValueError('No converter found, use this function with the '
'converter option in the constructor.')
return convert.mlir_quantize(
self.calibrated_model,
disable_per_channel=self.converter._experimental_disable_per_channel, # pylint: disable=protected-access
fully_quantize=self._debug_options.fully_quantize,
enable_numeric_verify=is_debug,
denylisted_ops=self._debug_options.denylisted_ops,
denylisted_nodes=self._debug_options.denylisted_nodes)
def get_nondebug_quantized_model(self) -> bytes:
"""Returns a non-instrumented quantized model.
Convert the quantized model with the initialized converter and
return bytes for nondebug model. The model will not be instrumented with
numeric verification operations.
Returns:
Model bytes corresponding to the model.
Raises:
ValueError: if converter is not passed to the debugger.
"""
return self._get_quantized_model(is_debug=False)
def get_debug_quantized_model(self) -> bytes:
"""Returns an instrumented quantized model.
Convert the quantized model with the initialized converter and
return bytes for model. The model will be instrumented with numeric
verification operations and should only be used for debugging.
Returns:
Model bytes corresponding to the model.
Raises:
ValueError: if converter is not passed to the debugger.
"""
return self._get_quantized_model(is_debug=True)
def _init_from_converter(self,
options: QuantizationDebugOptions,
converter: TFLiteConverter,
calibrated_model: Optional[bytes] = None,
float_model: Optional[bytes] = None) -> None:
"""Convert the model and apply options.
Converts the quantized model and initializes a quantized model interpreter
with the quantized model. Returns a float model interpreter if float model
is provided.
Args:
options: a QuantizationDebugOptions object.
converter: an initialized tf.lite.TFLiteConverter.
calibrated_model: Calibrated model bytes.
float_model: Float model bytes.
"""
self.quant_model = convert.mlir_quantize(
calibrated_model,
disable_per_channel=converter._experimental_disable_per_channel, # pylint: disable=protected-access
fully_quantize=options.fully_quantize,
enable_numeric_verify=True,
denylisted_ops=options.denylisted_ops,
denylisted_nodes=options.denylisted_nodes)
self._quant_interpreter = _interpreter.Interpreter(
model_content=self.quant_model)
self._float_interpreter = None
if float_model is not None:
self._float_interpreter = _interpreter.Interpreter(
model_content=float_model)
def _set_converter_options_for_float(
self, converter: TFLiteConverter) -> TFLiteConverter:
"""Verify converter options and set required experimental options."""
if converter.optimizations:
converter.optimizations = []
return converter
def _set_converter_options_for_calibration(
self, converter: TFLiteConverter) -> TFLiteConverter:
"""Verify converter options and set required experimental options."""
if not converter.optimizations:
raise ValueError(
'converter object must set optimizations to lite.Optimize.DEFAULT')
if not converter.representative_dataset:
raise ValueError('converter object must set representative_dataset')
converter.experimental_mlir_quantizer = True
converter._experimental_calibrate_only = True # pylint: disable=protected-access
return converter
def run(self) -> None:
"""Runs models and gets metrics."""
self.layer_statistics = self._collect_layer_statistics()
if self._debug_options.model_debug_metrics:
self.model_statistics = self._collect_model_statistics()
def _collect_layer_statistics(self) -> Dict[str, Dict[str, float]]:
"""Collects layer statistics by applying layer debug metrics.
For all data from the given RepresentativeDataset, collect statistics per
example by getting the NumericVerify op results in _quant_interpreter
and calculating layer debug metrics on the results.
Returns:
aggregated per-layer statistics of NumericVerify results.
{layer_name: {metric_name: metric}}
"""
layer_statistics = collections.defaultdict(
lambda: collections.defaultdict(list))
initialize = True
for tensor_data in self._data_gen():
self._set_input_tensors(self._quant_interpreter, tensor_data, initialize)
initialize = False
# Run the model.
self._quant_interpreter.invoke()
# Collect the statistics of this invoke result.
for tensor_detail in self._get_numeric_verify_tensor_details():
tensor_name = tensor_detail['name'] # pytype: disable=unsupported-operands # dynamic-method-lookup
diffs = self._quant_interpreter.get_tensor(tensor_detail['index']) # pytype: disable=unsupported-operands # dynamic-method-lookup
for metric_name, metric_fn in self._layer_debug_metrics.items():
layer_statistics[tensor_name][metric_name].append(metric_fn(diffs))
if self._debug_options.layer_direct_compare_metrics is not None:
for tensor_detail in self._get_numeric_verify_tensor_details():
tensor_name = tensor_detail['name'] # pytype: disable=unsupported-operands # dynamic-method-lookup
op_idx = self._defining_op[tensor_detail['index']] # pytype: disable=unsupported-operands # dynamic-method-lookup
op_detail = self._quant_interpreter._get_op_details(op_idx) # pylint: disable=protected-access
q_idx, f_idx = op_detail['inputs']
quant_input_detail = self._quant_interpreter._get_tensor_details( # pylint: disable=protected-access
q_idx, subgraph_index=0)
for (metric_name, metric_fn
) in self._debug_options.layer_direct_compare_metrics.items():
layer_statistics[tensor_name][metric_name].append(
metric_fn(
self._quant_interpreter.get_tensor(f_idx),
self._quant_interpreter.get_tensor(q_idx),
quant_input_detail['quantization_parameters']['scales'][0],
quant_input_detail['quantization_parameters']['zero_points']
[0]))
# Calculate final aggregated metrics for each layer.
for metrics in layer_statistics.values():
for metric_name in metrics:
metrics[metric_name] = np.nanmean(metrics[metric_name])
return layer_statistics
def _collect_model_statistics(self) -> Dict[str, float]:
"""Collects model output metrics.
For all data from the given RepresentativeDataset, collect all model output
results from float model & quantized debug model, and calculate metrics
by using model output functions. As a result, self.model_results is filled,
where self.model_results[model_output_function_name] = `aggregated model
output function value` (a scalar).
Returns:
aggregated per-model output discrepancy metrics.
{metric_name: aggregated_metric}
"""
model_statistics = collections.defaultdict(list)
initialize = True
for tensor_data in self._data_gen():
# Run quantized debug model and collect output results.
self._set_input_tensors(self._quant_interpreter, tensor_data, initialize)
self._quant_interpreter.invoke()
quant_tensor_data = self._get_output_tensors(self._quant_interpreter)
# Run float model if it's initialized.
float_tensor_data = []
if self._float_interpreter:
self._set_input_tensors(
self._float_interpreter, tensor_data, initialize)
self._float_interpreter.invoke()
float_tensor_data = self._get_output_tensors(self._float_interpreter)
initialize = False
# Calculate the metrics.
for (metric_name,
metric_fn) in self._debug_options.model_debug_metrics.items():
model_statistics[metric_name].append(
metric_fn(float_tensor_data, quant_tensor_data))
# Calculate final aggregated metrics for each outputs.
return {
metric_name: np.mean(metric)
for metric_name, metric in model_statistics.items()
}
def _set_input_tensors(self, interpreter: _interpreter.Interpreter,
tensor_data: Sequence[np.ndarray],
initialize: bool) -> None:
"""Sets input tensors into TFLite model Interpreter.
Args:
interpreter: a tf.lite.Interpreter object with allocated tensors.
tensor_data: a list of Numpy array data.
initialize: set to true when input is first set for the interpreter, to
set input shapes and allocate tensors.
Raises:
ValueError: when inputs can't be set, or size of provided inputs does not
match size of model inputs.
"""
input_details = interpreter.get_input_details()
if len(input_details) != len(tensor_data):
raise ValueError(
'Number of inputs provided ({}) does not match number of inputs to '
'the model ({})'.format(len(tensor_data), len(input_details)))
if initialize:
for input_detail, tensor in zip(input_details, tensor_data):
interpreter.resize_tensor_input(input_detail['index'], tensor.shape)
interpreter.allocate_tensors()
for input_detail, tensor in zip(input_details, tensor_data):
if tensor.dtype == np.float32 and input_detail['dtype'] == np.int8:
quant_params = _get_quant_params(input_detail)
if quant_params:
scale, zero_point = quant_params
tensor = np.round((tensor / scale) + zero_point).astype(np.int8)
interpreter.set_tensor(input_detail['index'], tensor)
def _get_output_tensors(
self, interpreter: _interpreter.Interpreter) -> List[np.ndarray]:
"""Returns output tensors of given TFLite model Interpreter.
Args:
interpreter: a tf.lite.Interpreter object with allocated tensors.
Returns:
a list of numpy arrays representing output tensor results.
"""
outputs = []
for output_detail in interpreter.get_output_details():
tensor = interpreter.get_tensor(output_detail['index'])
if output_detail['dtype'] == np.int8:
quant_params = _get_quant_params(output_detail)
if quant_params:
scale, zero_point = quant_params
tensor = ((tensor.astype(np.float32) - zero_point) * scale).astype(
np.float32)
outputs.append(tensor)
return outputs
def _get_numeric_verify_tensor_details(self) -> List[str]:
"""Returns all names of all tensors from NumericVerify op."""
# pylint: disable=protected-access
if not self._numeric_verify_tensor_details:
self._numeric_verify_tensor_details = []
self._numeric_verify_op_details = {}
for op_info in self._quant_interpreter._get_ops_details():
if op_info['op_name'] == _NUMERIC_VERIFY_OP_NAME:
self._numeric_verify_tensor_details.append(
self._quant_interpreter._get_tensor_details(
op_info['outputs'][0], subgraph_index=0))
tensor_name = self._numeric_verify_tensor_details[-1]['name']
self._numeric_verify_op_details[tensor_name] = op_info
# pylint: enable=protected-access
return self._numeric_verify_tensor_details
def _get_operand_name_and_index(self,
numeric_verify_name: str) -> Tuple[str, int]:
"""Gets the index and name of NumericVerify Op's quantized input tensor.
Args:
numeric_verify_name: name of the NumericVerify op's output tensor. It has
format of `NumericVerify/{quantized_tensor_name}:{quantized_tensor_idx}`
Returns:
Tuple of (tensor_name, tensor_idx) for quantized op's output tensor.
"""
tensor_name, tensor_idx = numeric_verify_name.rsplit(':', 1)
float_tensor_name = tensor_name[len(_NUMERIC_VERIFY_OP_NAME) + 1:]
if re.match(r'\d', float_tensor_name[-1]):
float_tensor_name = float_tensor_name[:-1]
return (float_tensor_name, int(tensor_idx))
def layer_statistics_dump(self, file: IO[str]) -> None:
"""Dumps layer statistics into file, in csv format.
Args:
file: file, or file-like object to write.
"""
# order of `fields` is the order of fields in csv.
fields = ['op_name', 'tensor_idx'] + list(self._layer_debug_metrics.keys())
if self._debug_options.layer_direct_compare_metrics is not None:
fields += list(self._debug_options.layer_direct_compare_metrics.keys())
fields += ['scale', 'zero_point', 'tensor_name']
writer = csv.DictWriter(file, fields)
writer.writeheader()
if self.layer_statistics:
for name, metrics in self.layer_statistics.items():
data = metrics.copy()
(data['tensor_name'], _) = self._get_operand_name_and_index(name)
data['tensor_idx'] = self._numeric_verify_op_details[name]['inputs'][0]
data['op_name'] = self._quant_interpreter._get_op_details( # pylint: disable=protected-access
self._defining_op[data['tensor_idx']])['op_name']
details = self._quant_interpreter._get_tensor_details( # pylint: disable=protected-access
data['tensor_idx'], subgraph_index=0)
data['scale'], data['zero_point'] = (
details['quantization_parameters']['scales'][0],
details['quantization_parameters']['zero_points'][0])
writer.writerow(data)
| QuantizationDebugger |
python | langchain-ai__langchain | libs/core/langchain_core/callbacks/manager.py | {
"start": 15625,
"end": 17392
} | class ____(RunManagerMixin):
"""Base class for run manager (a bound callback manager)."""
def __init__(
self,
*,
run_id: UUID,
handlers: list[BaseCallbackHandler],
inheritable_handlers: list[BaseCallbackHandler],
parent_run_id: UUID | None = None,
tags: list[str] | None = None,
inheritable_tags: list[str] | None = None,
metadata: dict[str, Any] | None = None,
inheritable_metadata: dict[str, Any] | None = None,
) -> None:
"""Initialize the run manager.
Args:
run_id: The ID of the run.
handlers: The list of handlers.
inheritable_handlers: The list of inheritable handlers.
parent_run_id: The ID of the parent run.
tags: The list of tags.
inheritable_tags: The list of inheritable tags.
metadata: The metadata.
inheritable_metadata: The inheritable metadata.
"""
self.run_id = run_id
self.handlers = handlers
self.inheritable_handlers = inheritable_handlers
self.parent_run_id = parent_run_id
self.tags = tags or []
self.inheritable_tags = inheritable_tags or []
self.metadata = metadata or {}
self.inheritable_metadata = inheritable_metadata or {}
@classmethod
def get_noop_manager(cls) -> Self:
"""Return a manager that doesn't perform any operations.
Returns:
The noop manager.
"""
return cls(
run_id=uuid.uuid4(),
handlers=[],
inheritable_handlers=[],
tags=[],
inheritable_tags=[],
metadata={},
inheritable_metadata={},
)
| BaseRunManager |
python | pytorch__pytorch | torch/export/dynamic_shapes.py | {
"start": 887,
"end": 1229
} | class ____(Enum):
"""
Enum for dynamic shape hints.
- AUTO means automatic inference of shape (static or dynamic).
- STATIC means static shape (always specialized).
- DYNAMIC means dynamic, will error out if specialized.
"""
AUTO = auto()
STATIC = auto()
DYNAMIC = auto()
@dataclasses.dataclass
| _DimHintType |
python | getsentry__sentry | src/sentry/auth_v2/endpoints/auth_merge_user_accounts.py | {
"start": 1044,
"end": 4271
} | class ____(AuthV2Endpoint):
publish_status = {
"GET": ApiPublishStatus.PRIVATE,
"POST": ApiPublishStatus.PRIVATE,
}
owner = ApiOwner.ENTERPRISE
"""
List and merge user accounts with the same primary email address.
"""
def get(self, request: Request) -> Response:
user = request.user
if not user.is_authenticated:
return Response(status=401)
shared_email = user.email
if not shared_email:
return Response(
status=400,
data={"error": "Shared email is empty or null"},
)
queryset = User.objects.filter(email=shared_email).order_by("last_active")
return self.paginate(
request=request,
queryset=queryset,
on_results=lambda x: serialize(x, user, UserSerializerWithOrgMemberships()),
paginator_cls=OffsetPaginator,
)
def post(self, request: Request) -> Response:
if not request.user.is_authenticated:
return Response(status=401)
validator = AuthMergeUserAccountsValidator(data=request.data)
if not validator.is_valid():
return Response(validator.errors, status=400)
result = validator.validated_data
primary_user = User.objects.get(id=request.user.id)
verification_code = UserMergeVerificationCode.objects.filter(
user_id=primary_user.id
).first()
if verification_code is None or verification_code.token != result["verification_code"]:
return Response(
status=403,
data={"error": "Incorrect verification code"},
)
ids_to_merge = result["ids_to_merge"]
ids_to_delete = result["ids_to_delete"]
if not set(ids_to_merge).isdisjoint(set(ids_to_delete)):
return Response(
status=400,
data={
"error": "The set of IDs to merge and the set of IDs to delete must be disjoint"
},
)
if primary_user.id in ids_to_merge or primary_user.id in ids_to_delete:
return Response(
status=400,
data={"error": "You may not merge the user attached to your current session"},
)
shared_email = primary_user.email
affected_user_emails = User.objects.filter(
id__in=(ids_to_delete + ids_to_merge)
).values_list("email", flat=True)
if any(email != shared_email for email in affected_user_emails):
return Response(
status=403,
data={
"error": "One or more of the accounts in your request does not share your primary email address"
},
)
users_to_delete = User.objects.filter(id__in=ids_to_delete)
for user in users_to_delete:
user.delete()
users_to_merge = User.objects.filter(id__in=ids_to_merge)
for user in users_to_merge:
user.merge_to(primary_user)
user.delete()
return Response(serialize([primary_user], request.user, UserSerializerWithOrgMemberships()))
| AuthMergeUserAccountsEndpoint |
python | dagster-io__dagster | python_modules/libraries/dagster-shared/dagster_shared_tests/test_match.py | {
"start": 223,
"end": 1952
} | class ____(NamedTuple):
x: int
y: str
@pytest.mark.parametrize(
"obj,type_,expected",
[
# Concrete types
(5, int, True),
("hello", str, True),
(None, type(None), True),
(5.5, int, False),
(5.5, Any, True),
("abc", Any, True),
([1], Any, True),
# Union
(5, Union[int, str], True),
("hi", Union[int, str], True),
(5.0, Union[int, str], False),
# Literal
("hello", Literal["hello", "world"], True),
("nope", Literal["hello", "world"], False),
(3, Literal[3, 5], True),
(4, Literal[3, 5], False),
# list[]
([1, 2, 3], list[int], True),
([1, "x"], list[int], False),
([], list[int], True),
# Sequence[] support
([1, 2], Sequence[int], True),
((1, 2), Sequence[int], True),
({1, 2}, Sequence[int], False),
# tuple[...] fixed-length
((1, "x"), tuple[int, str], True),
((1, 2), tuple[int, str], False),
((1,), tuple[int, str], False),
# tuple[...] variable-length
((1, 2, 3), tuple[int, ...], True),
((1, "x"), tuple[int, ...], False),
# dict[]
({"a": 1, "b": 2}, dict[str, int], True),
({"a": 1, 2: "b"}, dict[str, int], False),
({}, dict[str, int], True),
# NamedTuple
(MyTuple(1, "a"), NamedTuple, True),
((1, "a"), NamedTuple, False),
# tuple of types
("abc", (int, str), True),
("abc", (int, Literal["abc", "def"]), True),
("ghi", (int, Literal["abc", "def"]), False),
],
)
def test_match_type(obj, type_, expected):
assert match_type(obj, type_) == expected
| MyTuple |
python | kamyu104__LeetCode-Solutions | Python/reverse-linked-list.py | {
"start": 255,
"end": 573
} | class ____(object):
# @param {ListNode} head
# @return {ListNode}
def reverseList(self, head):
dummy = ListNode(float("-inf"))
while head:
dummy.next, head.next, head = head, dummy.next, head.next
return dummy.next
# Time: O(n)
# Space: O(n)
# Recursive solution.
| Solution |
python | wandb__wandb | tests/system_tests/backend_fixtures.py | {
"start": 1535,
"end": 1639
} | class ____:
path: ClassVar[str] # e.g. "db/user"
command: str
@dataclass(frozen=True)
| FixtureCmd |
python | jmcnamara__XlsxWriter | xlsxwriter/test/utility/test_xl_range.py | {
"start": 304,
"end": 2551
} | class ____(unittest.TestCase):
"""
Test xl_range() utility function.
"""
def test_xl_range(self):
"""Test xl_range()"""
tests = [
# first_row, first_col, last_row, last_col, Range
(0, 0, 9, 0, "A1:A10"),
(1, 2, 8, 2, "C2:C9"),
(0, 0, 3, 4, "A1:E4"),
(0, 0, 0, 0, "A1"),
(0, 0, 0, 1, "A1:B1"),
(0, 2, 0, 9, "C1:J1"),
(1, 0, 2, 0, "A2:A3"),
(9, 0, 1, 24, "A10:Y2"),
(7, 25, 9, 26, "Z8:AA10"),
(1, 254, 1, 255, "IU2:IV2"),
(1, 256, 0, 16383, "IW2:XFD1"),
(0, 0, 1048576, 16384, "A1:XFE1048577"),
(-1, 0, 0, 0, ""),
(0, -1, 0, 0, ""),
(0, 0, -1, 0, ""),
(0, 0, 0, -1, ""),
]
for first_row, first_col, last_row, last_col, cell_range in tests:
exp = cell_range
got = xl_range(first_row, first_col, last_row, last_col)
# Ignore the warnings for negative values.
warnings.filterwarnings("ignore")
self.assertEqual(exp, got)
def test_xl_range_abs(self):
"""Test xl_range_abs()"""
tests = [
# first_row, first_col, last_row, last_col, Range
(0, 0, 9, 0, "$A$1:$A$10"),
(1, 2, 8, 2, "$C$2:$C$9"),
(0, 0, 3, 4, "$A$1:$E$4"),
(0, 0, 0, 0, "$A$1"),
(0, 0, 0, 1, "$A$1:$B$1"),
(0, 2, 0, 9, "$C$1:$J$1"),
(1, 0, 2, 0, "$A$2:$A$3"),
(9, 0, 1, 24, "$A$10:$Y$2"),
(7, 25, 9, 26, "$Z$8:$AA$10"),
(1, 254, 1, 255, "$IU$2:$IV$2"),
(1, 256, 0, 16383, "$IW$2:$XFD$1"),
(0, 0, 1048576, 16384, "$A$1:$XFE$1048577"),
(-1, 0, 0, 0, ""),
(0, -1, 0, 0, ""),
(0, 0, -1, 0, ""),
(0, 0, 0, -1, ""),
]
for first_row, first_col, last_row, last_col, cell_range in tests:
exp = cell_range
got = xl_range_abs(first_row, first_col, last_row, last_col)
# Ignore the warnings for negative values.
warnings.filterwarnings("ignore")
self.assertEqual(exp, got)
| TestUtility |
python | sympy__sympy | sympy/solvers/solveset.py | {
"start": 3341,
"end": 30991
} | class ____(ValueError):
"""Raised when unexpectedly encountering nonlinear equations"""
pass
def _masked(f, *atoms):
"""Return ``f``, with all objects given by ``atoms`` replaced with
Dummy symbols, ``d``, and the list of replacements, ``(d, e)``,
where ``e`` is an object of type given by ``atoms`` in which
any other instances of atoms have been recursively replaced with
Dummy symbols, too. The tuples are ordered so that if they are
applied in sequence, the origin ``f`` will be restored.
Examples
========
>>> from sympy import cos
>>> from sympy.abc import x
>>> from sympy.solvers.solveset import _masked
>>> f = cos(cos(x) + 1)
>>> f, reps = _masked(cos(1 + cos(x)), cos)
>>> f
_a1
>>> reps
[(_a1, cos(_a0 + 1)), (_a0, cos(x))]
>>> for d, e in reps:
... f = f.xreplace({d: e})
>>> f
cos(cos(x) + 1)
"""
sym = numbered_symbols('a', cls=Dummy, real=True)
mask = []
for a in ordered(f.atoms(*atoms)):
for i in mask:
a = a.replace(*i)
mask.append((a, next(sym)))
for i, (o, n) in enumerate(mask):
f = f.replace(o, n)
mask[i] = (n, o)
mask = list(reversed(mask))
return f, mask
def _invert(f_x, y, x, domain=S.Complexes):
r"""
Reduce the complex valued equation $f(x) = y$ to a set of equations
$$\left\{g(x) = h_1(y),\ g(x) = h_2(y),\ \dots,\ g(x) = h_n(y) \right\}$$
where $g(x)$ is a simpler function than $f(x)$. The return value is a tuple
$(g(x), \mathrm{set}_h)$, where $g(x)$ is a function of $x$ and $\mathrm{set}_h$ is
the set of function $\left\{h_1(y), h_2(y), \dots, h_n(y)\right\}$.
Here, $y$ is not necessarily a symbol.
$\mathrm{set}_h$ contains the functions, along with the information
about the domain in which they are valid, through set
operations. For instance, if :math:`y = |x| - n` is inverted
in the real domain, then $\mathrm{set}_h$ is not simply
$\{-n, n\}$ as the nature of `n` is unknown; rather, it is:
$$ \left(\left[0, \infty\right) \cap \left\{n\right\}\right) \cup
\left(\left(-\infty, 0\right] \cap \left\{- n\right\}\right)$$
By default, the complex domain is used which means that inverting even
seemingly simple functions like $\exp(x)$ will give very different
results from those obtained in the real domain.
(In the case of $\exp(x)$, the inversion via $\log$ is multi-valued
in the complex domain, having infinitely many branches.)
If you are working with real values only (or you are not sure which
function to use) you should probably set the domain to
``S.Reals`` (or use ``invert_real`` which does that automatically).
Examples
========
>>> from sympy.solvers.solveset import invert_complex, invert_real
>>> from sympy.abc import x, y
>>> from sympy import exp
When does exp(x) == y?
>>> invert_complex(exp(x), y, x)
(x, ImageSet(Lambda(_n, I*(2*_n*pi + arg(y)) + log(Abs(y))), Integers))
>>> invert_real(exp(x), y, x)
(x, Intersection({log(y)}, Reals))
When does exp(x) == 1?
>>> invert_complex(exp(x), 1, x)
(x, ImageSet(Lambda(_n, 2*_n*I*pi), Integers))
>>> invert_real(exp(x), 1, x)
(x, {0})
See Also
========
invert_real, invert_complex
"""
x = sympify(x)
if not x.is_Symbol:
raise ValueError("x must be a symbol")
f_x = sympify(f_x)
if x not in f_x.free_symbols:
raise ValueError("Inverse of constant function doesn't exist")
y = sympify(y)
if x in y.free_symbols:
raise ValueError("y should be independent of x ")
if domain.is_subset(S.Reals):
x1, s = _invert_real(f_x, FiniteSet(y), x)
else:
x1, s = _invert_complex(f_x, FiniteSet(y), x)
# f couldn't be inverted completely; return unmodified.
if x1 != x:
return x1, s
# Avoid adding gratuitous intersections with S.Complexes. Actual
# conditions should be handled by the respective inverters.
if domain is S.Complexes:
return x1, s
if isinstance(s, FiniteSet):
return x1, s.intersect(domain)
# "Fancier" solution sets like those obtained by inversion of trigonometric
# functions already include general validity conditions (i.e. conditions on
# the domain of the respective inverse functions), so we should avoid adding
# blanket intersections with S.Reals. But subsets of R (or C) must still be
# accounted for.
if domain is S.Reals:
return x1, s
else:
return x1, s.intersect(domain)
invert_complex = _invert
def invert_real(f_x, y, x):
"""
Inverts a real-valued function. Same as :func:`invert_complex`, but sets
the domain to ``S.Reals`` before inverting.
"""
return _invert(f_x, y, x, S.Reals)
def _invert_real(f, g_ys, symbol):
"""Helper function for _invert."""
if f == symbol or g_ys is S.EmptySet:
return (symbol, g_ys)
n = Dummy('n', real=True)
if isinstance(f, exp) or (f.is_Pow and f.base == S.Exp1):
return _invert_real(f.exp,
imageset(Lambda(n, log(n)), g_ys),
symbol)
if hasattr(f, 'inverse') and f.inverse() is not None and not isinstance(f, (
TrigonometricFunction,
HyperbolicFunction,
)):
if len(f.args) > 1:
raise ValueError("Only functions with one argument are supported.")
return _invert_real(f.args[0],
imageset(Lambda(n, f.inverse()(n)), g_ys),
symbol)
if isinstance(f, Abs):
return _invert_abs(f.args[0], g_ys, symbol)
if f.is_Add:
# f = g + h
g, h = f.as_independent(symbol)
if g is not S.Zero:
return _invert_real(h, imageset(Lambda(n, n - g), g_ys), symbol)
if f.is_Mul:
# f = g*h
g, h = f.as_independent(symbol)
if g is not S.One:
return _invert_real(h, imageset(Lambda(n, n/g), g_ys), symbol)
if f.is_Pow:
base, expo = f.args
base_has_sym = base.has(symbol)
expo_has_sym = expo.has(symbol)
if not expo_has_sym:
if expo.is_rational:
num, den = expo.as_numer_denom()
if den % 2 == 0 and num % 2 == 1 and den.is_zero is False:
# Here we have f(x)**(num/den) = y
# where den is nonzero and even and y is an element
# of the set g_ys.
# den is even, so we are only interested in the cases
# where both f(x) and y are positive.
# Restricting y to be positive (using the set g_ys_pos)
# means that y**(den/num) is always positive.
# Therefore it isn't necessary to also constrain f(x)
# to be positive because we are only going to
# find solutions of f(x) = y**(d/n)
# where the rhs is already required to be positive.
root = Lambda(n, real_root(n, expo))
g_ys_pos = g_ys & Interval(0, oo)
res = imageset(root, g_ys_pos)
_inv, _set = _invert_real(base, res, symbol)
return (_inv, _set)
if den % 2 == 1:
root = Lambda(n, real_root(n, expo))
res = imageset(root, g_ys)
if num % 2 == 0:
neg_res = imageset(Lambda(n, -n), res)
return _invert_real(base, res + neg_res, symbol)
if num % 2 == 1:
return _invert_real(base, res, symbol)
elif expo.is_irrational:
root = Lambda(n, real_root(n, expo))
g_ys_pos = g_ys & Interval(0, oo)
res = imageset(root, g_ys_pos)
return _invert_real(base, res, symbol)
else:
# indeterminate exponent, e.g. Float or parity of
# num, den of rational could not be determined
pass # use default return
if not base_has_sym:
rhs = g_ys.args[0]
if base.is_positive:
return _invert_real(expo,
imageset(Lambda(n, log(n, base, evaluate=False)), g_ys), symbol)
elif base.is_negative:
s, b = integer_log(rhs, base)
if b:
return _invert_real(expo, FiniteSet(s), symbol)
else:
return (expo, S.EmptySet)
elif base.is_zero:
one = Eq(rhs, 1)
if one == S.true:
# special case: 0**x - 1
return _invert_real(expo, FiniteSet(0), symbol)
elif one == S.false:
return (expo, S.EmptySet)
if isinstance(f, (TrigonometricFunction, HyperbolicFunction)):
return _invert_trig_hyp_real(f, g_ys, symbol)
return (f, g_ys)
# Dictionaries of inverses will be cached after first use.
_trig_inverses = None
_hyp_inverses = None
def _invert_trig_hyp_real(f, g_ys, symbol):
"""Helper function for inverting trigonometric and hyperbolic functions.
This helper only handles inversion over the reals.
For trigonometric functions only finite `g_ys` sets are implemented.
For hyperbolic functions the set `g_ys` is checked against the domain of the
respective inverse functions. Infinite `g_ys` sets are also supported.
"""
if isinstance(f, HyperbolicFunction):
n = Dummy('n', real=True)
if isinstance(f, sinh):
# asinh is defined over R.
return _invert_real(f.args[0], imageset(n, asinh(n), g_ys), symbol)
if isinstance(f, cosh):
g_ys_dom = g_ys.intersect(Interval(1, oo))
if isinstance(g_ys_dom, Intersection):
# could not properly resolve domain check
if isinstance(g_ys, FiniteSet):
# If g_ys is a `FiniteSet`` it should be sufficient to just
# let the calling `_invert_real()` add an intersection with
# `S.Reals` (or a subset `domain`) to ensure that only valid
# (real) solutions are returned.
# This avoids adding "too many" Intersections or
# ConditionSets in the returned set.
g_ys_dom = g_ys
else:
return (f, g_ys)
return _invert_real(f.args[0], Union(
imageset(n, acosh(n), g_ys_dom),
imageset(n, -acosh(n), g_ys_dom)), symbol)
if isinstance(f, sech):
g_ys_dom = g_ys.intersect(Interval.Lopen(0, 1))
if isinstance(g_ys_dom, Intersection):
if isinstance(g_ys, FiniteSet):
g_ys_dom = g_ys
else:
return (f, g_ys)
return _invert_real(f.args[0], Union(
imageset(n, asech(n), g_ys_dom),
imageset(n, -asech(n), g_ys_dom)), symbol)
if isinstance(f, tanh):
g_ys_dom = g_ys.intersect(Interval.open(-1, 1))
if isinstance(g_ys_dom, Intersection):
if isinstance(g_ys, FiniteSet):
g_ys_dom = g_ys
else:
return (f, g_ys)
return _invert_real(f.args[0],
imageset(n, atanh(n), g_ys_dom), symbol)
if isinstance(f, coth):
g_ys_dom = g_ys - Interval(-1, 1)
if isinstance(g_ys_dom, Complement):
if isinstance(g_ys, FiniteSet):
g_ys_dom = g_ys
else:
return (f, g_ys)
return _invert_real(f.args[0],
imageset(n, acoth(n), g_ys_dom), symbol)
if isinstance(f, csch):
g_ys_dom = g_ys - FiniteSet(0)
if isinstance(g_ys_dom, Complement):
if isinstance(g_ys, FiniteSet):
g_ys_dom = g_ys
else:
return (f, g_ys)
return _invert_real(f.args[0],
imageset(n, acsch(n), g_ys_dom), symbol)
elif isinstance(f, TrigonometricFunction) and isinstance(g_ys, FiniteSet):
def _get_trig_inverses(func):
global _trig_inverses
if _trig_inverses is None:
_trig_inverses = {
sin : ((asin, lambda y: pi-asin(y)), 2*pi, Interval(-1, 1)),
cos : ((acos, lambda y: -acos(y)), 2*pi, Interval(-1, 1)),
tan : ((atan,), pi, S.Reals),
cot : ((acot,), pi, S.Reals),
sec : ((asec, lambda y: -asec(y)), 2*pi,
Union(Interval(-oo, -1), Interval(1, oo))),
csc : ((acsc, lambda y: pi-acsc(y)), 2*pi,
Union(Interval(-oo, -1), Interval(1, oo)))}
return _trig_inverses[func]
invs, period, rng = _get_trig_inverses(f.func)
n = Dummy('n', integer=True)
def create_return_set(g):
# returns ConditionSet that will be part of the final (x, set) tuple
invsimg = Union(*[
imageset(n, period*n + inv(g), S.Integers) for inv in invs])
inv_f, inv_g_ys = _invert_real(f.args[0], invsimg, symbol)
if inv_f == symbol: # inversion successful
conds = rng.contains(g)
return ConditionSet(symbol, conds, inv_g_ys)
else:
return ConditionSet(symbol, Eq(f, g), S.Reals)
retset = Union(*[create_return_set(g) for g in g_ys])
return (symbol, retset)
else:
return (f, g_ys)
def _invert_trig_hyp_complex(f, g_ys, symbol):
"""Helper function for inverting trigonometric and hyperbolic functions.
This helper only handles inversion over the complex numbers.
Only finite `g_ys` sets are implemented.
Handling of singularities is only implemented for hyperbolic equations.
In case of a symbolic element g in g_ys a ConditionSet may be returned.
"""
if isinstance(f, TrigonometricFunction) and isinstance(g_ys, FiniteSet):
def inv(trig):
if isinstance(trig, (sin, csc)):
F = asin if isinstance(trig, sin) else acsc
return (
lambda a: 2*n*pi + F(a),
lambda a: 2*n*pi + pi - F(a))
if isinstance(trig, (cos, sec)):
F = acos if isinstance(trig, cos) else asec
return (
lambda a: 2*n*pi + F(a),
lambda a: 2*n*pi - F(a))
if isinstance(trig, (tan, cot)):
return (lambda a: n*pi + trig.inverse()(a),)
n = Dummy('n', integer=True)
invs = S.EmptySet
for L in inv(f):
invs += Union(*[imageset(Lambda(n, L(g)), S.Integers) for g in g_ys])
return _invert_complex(f.args[0], invs, symbol)
elif isinstance(f, HyperbolicFunction) and isinstance(g_ys, FiniteSet):
# There are two main options regarding singularities / domain checking
# for symbolic elements in g_ys:
# 1. Add a "catch-all" intersection with S.Complexes.
# 2. ConditionSets.
# At present ConditionSets seem to work better and have the additional
# benefit of representing the precise conditions that must be satisfied.
# The conditions are also rather straightforward. (At most two isolated
# points.)
def _get_hyp_inverses(func):
global _hyp_inverses
if _hyp_inverses is None:
_hyp_inverses = {
sinh : ((asinh, lambda y: I*pi-asinh(y)), 2*I*pi, ()),
cosh : ((acosh, lambda y: -acosh(y)), 2*I*pi, ()),
tanh : ((atanh,), I*pi, (-1, 1)),
coth : ((acoth,), I*pi, (-1, 1)),
sech : ((asech, lambda y: -asech(y)), 2*I*pi, (0, )),
csch : ((acsch, lambda y: I*pi-acsch(y)), 2*I*pi, (0, ))}
return _hyp_inverses[func]
# invs: iterable of main inverses, e.g. (acosh, -acosh).
# excl: iterable of singularities to be checked for.
invs, period, excl = _get_hyp_inverses(f.func)
n = Dummy('n', integer=True)
def create_return_set(g):
# returns ConditionSet that will be part of the final (x, set) tuple
invsimg = Union(*[
imageset(n, period*n + inv(g), S.Integers) for inv in invs])
inv_f, inv_g_ys = _invert_complex(f.args[0], invsimg, symbol)
if inv_f == symbol: # inversion successful
conds = And(*[Ne(g, e) for e in excl])
return ConditionSet(symbol, conds, inv_g_ys)
else:
return ConditionSet(symbol, Eq(f, g), S.Complexes)
retset = Union(*[create_return_set(g) for g in g_ys])
return (symbol, retset)
else:
return (f, g_ys)
def _invert_complex(f, g_ys, symbol):
"""Helper function for _invert."""
if f == symbol or g_ys is S.EmptySet:
return (symbol, g_ys)
n = Dummy('n')
if f.is_Add:
# f = g + h
g, h = f.as_independent(symbol)
if g is not S.Zero:
return _invert_complex(h, imageset(Lambda(n, n - g), g_ys), symbol)
if f.is_Mul:
# f = g*h
g, h = f.as_independent(symbol)
if g is not S.One:
if g in {S.NegativeInfinity, S.ComplexInfinity, S.Infinity}:
return (h, S.EmptySet)
return _invert_complex(h, imageset(Lambda(n, n/g), g_ys), symbol)
if f.is_Pow:
base, expo = f.args
# special case: g**r = 0
# Could be improved like `_invert_real` to handle more general cases.
if expo.is_Rational and g_ys == FiniteSet(0):
if expo.is_positive:
return _invert_complex(base, g_ys, symbol)
if hasattr(f, 'inverse') and f.inverse() is not None and \
not isinstance(f, TrigonometricFunction) and \
not isinstance(f, HyperbolicFunction) and \
not isinstance(f, exp):
if len(f.args) > 1:
raise ValueError("Only functions with one argument are supported.")
return _invert_complex(f.args[0],
imageset(Lambda(n, f.inverse()(n)), g_ys), symbol)
if isinstance(f, exp) or (f.is_Pow and f.base == S.Exp1):
if isinstance(g_ys, ImageSet):
# can solve up to `(d*exp(exp(...(exp(a*x + b))...) + c)` format.
# Further can be improved to `(d*exp(exp(...(exp(a*x**n + b*x**(n-1) + ... + f))...) + c)`.
g_ys_expr = g_ys.lamda.expr
g_ys_vars = g_ys.lamda.variables
k = Dummy('k{}'.format(len(g_ys_vars)))
g_ys_vars_1 = (k,) + g_ys_vars
exp_invs = Union(*[imageset(Lambda((g_ys_vars_1,), (I*(2*k*pi + arg(g_ys_expr))
+ log(Abs(g_ys_expr)))), S.Integers**(len(g_ys_vars_1)))])
return _invert_complex(f.exp, exp_invs, symbol)
elif isinstance(g_ys, FiniteSet):
exp_invs = Union(*[imageset(Lambda(n, I*(2*n*pi + arg(g_y)) +
log(Abs(g_y))), S.Integers)
for g_y in g_ys if g_y != 0])
return _invert_complex(f.exp, exp_invs, symbol)
if isinstance(f, (TrigonometricFunction, HyperbolicFunction)):
return _invert_trig_hyp_complex(f, g_ys, symbol)
return (f, g_ys)
def _invert_abs(f, g_ys, symbol):
"""Helper function for inverting absolute value functions.
Returns the complete result of inverting an absolute value
function along with the conditions which must also be satisfied.
If it is certain that all these conditions are met, a :class:`~.FiniteSet`
of all possible solutions is returned. If any condition cannot be
satisfied, an :class:`~.EmptySet` is returned. Otherwise, a
:class:`~.ConditionSet` of the solutions, with all the required conditions
specified, is returned.
"""
if not g_ys.is_FiniteSet:
# this could be used for FiniteSet, but the
# results are more compact if they aren't, e.g.
# ConditionSet(x, Contains(n, Interval(0, oo)), {-n, n}) vs
# Union(Intersection(Interval(0, oo), {n}), Intersection(Interval(-oo, 0), {-n}))
# for the solution of abs(x) - n
pos = Intersection(g_ys, Interval(0, S.Infinity))
parg = _invert_real(f, pos, symbol)
narg = _invert_real(-f, pos, symbol)
if parg[0] != narg[0]:
raise NotImplementedError
return parg[0], Union(narg[1], parg[1])
# check conditions: all these must be true. If any are unknown
# then return them as conditions which must be satisfied
unknown = []
for a in g_ys.args:
ok = a.is_nonnegative if a.is_Number else a.is_positive
if ok is None:
unknown.append(a)
elif not ok:
return symbol, S.EmptySet
if unknown:
conditions = And(*[Contains(i, Interval(0, oo))
for i in unknown])
else:
conditions = True
n = Dummy('n', real=True)
# this is slightly different than above: instead of solving
# +/-f on positive values, here we solve for f on +/- g_ys
g_x, values = _invert_real(f, Union(
imageset(Lambda(n, n), g_ys),
imageset(Lambda(n, -n), g_ys)), symbol)
return g_x, ConditionSet(g_x, conditions, values)
def domain_check(f, symbol, p):
"""Returns False if point p is infinite or any subexpression of f
is infinite or becomes so after replacing symbol with p. If none of
these conditions is met then True will be returned.
Examples
========
>>> from sympy import Mul, oo
>>> from sympy.abc import x
>>> from sympy.solvers.solveset import domain_check
>>> g = 1/(1 + (1/(x + 1))**2)
>>> domain_check(g, x, -1)
False
>>> domain_check(x**2, x, 0)
True
>>> domain_check(1/x, x, oo)
False
* The function relies on the assumption that the original form
of the equation has not been changed by automatic simplification.
>>> domain_check(x/x, x, 0) # x/x is automatically simplified to 1
True
* To deal with automatic evaluations use evaluate=False:
>>> domain_check(Mul(x, 1/x, evaluate=False), x, 0)
False
"""
f, p = sympify(f), sympify(p)
if p.is_infinite:
return False
return _domain_check(f, symbol, p)
def _domain_check(f, symbol, p):
# helper for domain check
if f.is_Atom and f.is_finite:
return True
elif f.subs(symbol, p).is_infinite:
return False
elif isinstance(f, Piecewise):
# Check the cases of the Piecewise in turn. There might be invalid
# expressions in later cases that don't apply e.g.
# solveset(Piecewise((0, Eq(x, 0)), (1/x, True)), x)
for expr, cond in f.args:
condsubs = cond.subs(symbol, p)
if condsubs is S.false:
continue
elif condsubs is S.true:
return _domain_check(expr, symbol, p)
else:
# We don't know which case of the Piecewise holds. On this
# basis we cannot decide whether any solution is in or out of
# the domain. Ideally this function would allow returning a
# symbolic condition for the validity of the solution that
# could be handled in the calling code. In the mean time we'll
# give this particular solution the benefit of the doubt and
# let it pass.
return True
else:
# TODO : We should not blindly recurse through all args of arbitrary expressions like this
return all(_domain_check(g, symbol, p)
for g in f.args)
def _is_finite_with_finite_vars(f, domain=S.Complexes):
"""
Return True if the given expression is finite. For symbols that
do not assign a value for `complex` and/or `real`, the domain will
be used to assign a value; symbols that do not assign a value
for `finite` will be made finite. All other assumptions are
left unmodified.
"""
def assumptions(s):
A = s.assumptions0
A.setdefault('finite', A.get('finite', True))
if domain.is_subset(S.Reals):
# if this gets set it will make complex=True, too
A.setdefault('real', True)
else:
# don't change 'real' because being complex implies
# nothing about being real
A.setdefault('complex', True)
return A
reps = {s: Dummy(**assumptions(s)) for s in f.free_symbols}
return f.xreplace(reps).is_finite
def _is_function_class_equation(func_class, f, symbol):
""" Tests whether the equation is an equation of the given function class.
The given equation belongs to the given function class if it is
comprised of functions of the function class which are multiplied by
or added to expressions independent of the symbol. In addition, the
arguments of all such functions must be linear in the symbol as well.
Examples
========
>>> from sympy.solvers.solveset import _is_function_class_equation
>>> from sympy import tan, sin, tanh, sinh, exp
>>> from sympy.abc import x
>>> from sympy.functions.elementary.trigonometric import TrigonometricFunction
>>> from sympy.functions.elementary.hyperbolic import HyperbolicFunction
>>> _is_function_class_equation(TrigonometricFunction, exp(x) + tan(x), x)
False
>>> _is_function_class_equation(TrigonometricFunction, tan(x) + sin(x), x)
True
>>> _is_function_class_equation(TrigonometricFunction, tan(x**2), x)
False
>>> _is_function_class_equation(TrigonometricFunction, tan(x + 2), x)
True
>>> _is_function_class_equation(HyperbolicFunction, tanh(x) + sinh(x), x)
True
"""
if f.is_Mul or f.is_Add:
return all(_is_function_class_equation(func_class, arg, symbol)
for arg in f.args)
if f.is_Pow:
if not f.exp.has(symbol):
return _is_function_class_equation(func_class, f.base, symbol)
else:
return False
if not f.has(symbol):
return True
if isinstance(f, func_class):
try:
g = Poly(f.args[0], symbol)
return g.degree() <= 1
except PolynomialError:
return False
else:
return False
def _solve_as_rational(f, symbol, domain):
""" solve rational functions"""
f = together(_mexpand(f, recursive=True), deep=True)
g, h = fraction(f)
if not h.has(symbol):
try:
return _solve_as_poly(g, symbol, domain)
except NotImplementedError:
# The polynomial formed from g could end up having
# coefficients in a ring over which finding roots
# isn't implemented yet, e.g. ZZ[a] for some symbol a
return ConditionSet(symbol, Eq(f, 0), domain)
except CoercionFailed:
# contained oo, zoo or nan
return S.EmptySet
else:
valid_solns = _solveset(g, symbol, domain)
invalid_solns = _solveset(h, symbol, domain)
return valid_solns - invalid_solns
| NonlinearError |
python | scikit-image__scikit-image | src/skimage/util/_backends.py | {
"start": 4750,
"end": 5067
} | class ____:
"""Information about a backend
A backend that wants to provide additional information about itself
should return an instance of this from its information entry point.
"""
def __init__(self, supported_functions):
self.supported_functions = supported_functions
| BackendInformation |
python | rq__rq | rq/cli/helpers.py | {
"start": 11527,
"end": 15244
} | class ____:
"""A helper class to be used with click commands, to handle shared options"""
def __init__(
self,
url=None,
config=None,
worker_class=DEFAULT_WORKER_CLASS,
job_class=DEFAULT_JOB_CLASS,
death_penalty_class=DEFAULT_DEATH_PENALTY_CLASS,
queue_class=DEFAULT_QUEUE_CLASS,
connection_class=DEFAULT_CONNECTION_CLASS,
path=None,
*args,
**kwargs,
) -> None:
self._connection = None
self.url = url
self.config = config
if path:
for pth in path:
sys.path.append(pth)
try:
self.worker_class = import_worker_class(worker_class)
except (ImportError, AttributeError) as exc:
raise click.BadParameter(str(exc), param_hint='--worker-class')
try:
self.job_class = import_job_class(job_class)
except (ImportError, AttributeError) as exc:
raise click.BadParameter(str(exc), param_hint='--job-class')
try:
self.death_penalty_class = import_attribute(death_penalty_class)
except (ImportError, AttributeError) as exc:
raise click.BadParameter(str(exc), param_hint='--death-penalty-class')
try:
self.queue_class = import_attribute(queue_class)
except (ImportError, AttributeError) as exc:
raise click.BadParameter(str(exc), param_hint='--queue-class')
try:
self.connection_class: Type[Redis] = cast(Type[Redis], import_attribute(connection_class))
except (ImportError, AttributeError) as exc:
raise click.BadParameter(str(exc), param_hint='--connection-class')
@property
def connection(self):
if self._connection is None:
if self.url:
self._connection = self.connection_class.from_url(self.url)
elif self.config:
settings = read_config_file(self.config) if self.config else {}
self._connection = get_redis_from_config(settings, self.connection_class)
else:
self._connection = get_redis_from_config(os.environ, self.connection_class)
return self._connection
shared_options = [
click.option('--url', '-u', envvar='RQ_REDIS_URL', help='URL describing Redis connection details.'),
click.option('--config', '-c', envvar='RQ_CONFIG', help='Module containing RQ settings.'),
click.option(
'--worker-class', '-w', envvar='RQ_WORKER_CLASS', default=DEFAULT_WORKER_CLASS, help='RQ Worker class to use'
),
click.option('--job-class', '-j', envvar='RQ_JOB_CLASS', default=DEFAULT_JOB_CLASS, help='RQ Job class to use'),
click.option('--queue-class', envvar='RQ_QUEUE_CLASS', default=DEFAULT_QUEUE_CLASS, help='RQ Queue class to use'),
click.option(
'--connection-class',
envvar='RQ_CONNECTION_CLASS',
default=DEFAULT_CONNECTION_CLASS,
help='Redis client class to use',
),
click.option('--path', '-P', default=['.'], help='Specify the import path.', multiple=True),
click.option(
'--serializer',
'-S',
default=DEFAULT_SERIALIZER_CLASS,
help='Path to serializer, defaults to rq.serializers.DefaultSerializer',
),
]
def pass_cli_config(func):
# add all the shared options to the command
for option in shared_options:
func = option(func)
# pass the cli config object into the command
def wrapper(*args, **kwargs):
ctx = click.get_current_context()
cli_config = CliConfig(**kwargs)
return ctx.invoke(func, cli_config, *args[1:], **kwargs)
return update_wrapper(wrapper, func)
| CliConfig |
python | streamlit__streamlit | lib/tests/streamlit/delta_generator_test.py | {
"start": 16067,
"end": 16247
} | class ____(DeltaGeneratorTestCase):
def test_nested_expanders_allowed(self):
level1 = st.expander("level 1")
level1.expander("level 2")
| DeltaGeneratorExpanderTest |
python | sphinx-doc__sphinx | sphinx/domains/cpp/_ast.py | {
"start": 947,
"end": 1077
} | class ____(ASTBaseBase):
pass
# Names
################################################################################
| ASTBase |
python | pytest-dev__pytest | testing/test_assertrewrite.py | {
"start": 46209,
"end": 47237
} | class ____:
def test_simple_case(self, pytester: Pytester) -> None:
pytester.makepyfile(
"""
def test_ternary_display():
assert (False == False) == False
"""
)
result = pytester.runpytest()
result.stdout.fnmatch_lines(["*E*assert (False == False) == False"])
def test_long_case(self, pytester: Pytester) -> None:
pytester.makepyfile(
"""
def test_ternary_display():
assert False == (False == True) == True
"""
)
result = pytester.runpytest()
result.stdout.fnmatch_lines(["*E*assert (False == True) == True"])
def test_many_brackets(self, pytester: Pytester) -> None:
pytester.makepyfile(
"""
def test_ternary_display():
assert True == ((False == True) == True)
"""
)
result = pytester.runpytest()
result.stdout.fnmatch_lines(["*E*assert True == ((False == True) == True)"])
| TestIssue925 |
python | redis__redis-py | redis/auth/token_manager.py | {
"start": 358,
"end": 1113
} | class ____:
"""
Listeners that will be notified on events related to credentials.
Accepts callbacks and awaitable callbacks.
"""
def __init__(self):
self._on_next = None
self._on_error = None
@property
def on_next(self) -> Union[Callable[[Any], None], Awaitable]:
return self._on_next
@on_next.setter
def on_next(self, callback: Union[Callable[[Any], None], Awaitable]) -> None:
self._on_next = callback
@property
def on_error(self) -> Union[Callable[[Exception], None], Awaitable]:
return self._on_error
@on_error.setter
def on_error(self, callback: Union[Callable[[Exception], None], Awaitable]) -> None:
self._on_error = callback
| CredentialsListener |
python | protocolbuffers__protobuf | python/google/protobuf/internal/numpy/numpy_test.py | {
"start": 7718,
"end": 21839
} | class ____(parameterized.TestCase):
@parameterized.product(
message_module=[unittest_pb2, unittest_proto3_arena_pb2],
field_name=[
'repeated_int32',
'repeated_int64',
'repeated_uint32',
'repeated_uint64',
'repeated_sint32',
'repeated_sint64',
'repeated_fixed32',
'repeated_fixed64',
'repeated_sfixed32',
'repeated_sfixed64',
'repeated_float',
'repeated_double',
],
)
def test_simple_np_array_from_repeated(self, message_module, field_name):
m = message_module.TestAllTypes()
field = getattr(m, field_name)
field.append(42)
field.append(127)
arr = np.asarray(field)
self.assertIsInstance(arr, np.ndarray)
np.testing.assert_equal(arr, np.array([42, 127]))
@parameterized.named_parameters(
('_proto2', unittest_pb2), ('_proto3', unittest_proto3_arena_pb2)
)
def test_simple_np_array_from_repeated_continue(self, message_module):
m = message_module.TestAllTypes()
m.repeated_nested_enum.extend([1, 2, 3])
arr = np.asarray(m.repeated_nested_enum)
self.assertIsInstance(arr, np.ndarray)
np.testing.assert_equal(arr, np.array([1, 2, 3]))
m.repeated_bool.append(False)
m.repeated_bool.append(True)
arr = np.asarray(m.repeated_bool)
self.assertIsInstance(arr, np.ndarray)
np.testing.assert_equal(arr, np.array([False, True]))
m.repeated_string.extend([
'One',
'Two',
'Three',
])
arr = np.asarray(m.repeated_string)
self.assertIsInstance(arr, np.ndarray)
np.testing.assert_equal(arr, np.array(['One', 'Two', 'Three']))
m.repeated_bytes.append(b'1')
m.repeated_bytes.append(b'2')
m.repeated_bytes.append(b'3')
arr = np.asarray(m.repeated_bytes)
self.assertIsInstance(arr, np.ndarray)
np.testing.assert_equal(arr, np.array([b'1', b'2', b'3']))
@parameterized.named_parameters(
('_proto2', unittest_pb2), ('_proto3', unittest_proto3_arena_pb2)
)
def test_simple_n_array_from_repeated_message(self, message_module):
m = message_module.TestAllTypes(
repeated_nested_message=(
message_module.TestAllTypes.NestedMessage(bb=9),
message_module.TestAllTypes.NestedMessage(bb=8),
)
)
arr = np.array(m.repeated_nested_message)
self.assertIsInstance(arr, np.ndarray)
self.assertEqual(arr[0].bb, 9)
self.assertEqual(arr[1].bb, 8)
@parameterized.product(
message_module=[unittest_pb2, unittest_proto3_arena_pb2],
field_name=[
'repeated_int32',
'repeated_int64',
'repeated_sint32',
'repeated_sint64',
'repeated_sfixed32',
'repeated_sfixed64',
'repeated_float',
'repeated_double',
],
)
def test_numpy_signed_arrays_from_repeated(self, message_module, field_name):
m = message_module.TestAllTypes()
field = getattr(m, field_name)
field.append(-42)
field.append(0)
field.append(127)
arr = np.asarray(field)
self.assertIsInstance(arr, np.ndarray)
np.testing.assert_equal(arr, np.array([-42, 0, 127]))
@parameterized.product(
message_module=[unittest_pb2, unittest_proto3_arena_pb2],
field_name=[
'repeated_int32',
'repeated_int64',
'repeated_uint32',
'repeated_uint64',
'repeated_sint32',
'repeated_sint64',
'repeated_fixed32',
'repeated_fixed64',
'repeated_sfixed32',
'repeated_sfixed64',
'repeated_float',
'repeated_double',
'repeated_nested_enum',
],
)
def test_numpy_empty_repeated(self, message_module, field_name):
m = message_module.TestAllTypes()
field = getattr(m, field_name)
arr = np.array(field)
arr2 = np.array(field, dtype=np.int8)
self.assertIsInstance(arr, np.ndarray)
self.assertIsInstance(arr2, np.ndarray)
np.testing.assert_equal(arr, np.array([]))
np.testing.assert_equal(arr2, np.array([], dtype=np.int8))
@parameterized.product(
message_module=[unittest_pb2, unittest_proto3_arena_pb2],
field_name=['packed_sint32', 'packed_sint64'],
)
def test_numpy_signed_packed_arrays_from_repeated(
self, message_module, field_name
):
m = message_module.TestPackedTypes()
field = getattr(m, field_name)
field.append(-42)
field.append(0)
field.append(127)
arr = np.asarray(field)
self.assertIsInstance(arr, np.ndarray)
np.testing.assert_equal(arr, np.array([-42, 0, 127]))
@parameterized.product(
message_module=[unittest_pb2, unittest_proto3_arena_pb2],
dtype=[
'int8',
'int16',
'int32',
'int64',
'float16',
'float32',
'float64',
'str',
'bool',
'object',
],
)
def test_repeated_bytes_to_all_types(self, message_module, dtype):
m = message_module.TestAllTypes()
m.repeated_bytes.extend([b'11', b'12'])
arr = np.asarray(m.repeated_bytes, dtype=dtype)
self.assertIsInstance(arr, np.ndarray)
self.assertTrue(arr.flags.contiguous)
@parameterized.named_parameters(
('_proto2', unittest_pb2), ('_proto3', unittest_proto3_arena_pb2)
)
def test_repeated_string_tobytes(self, message_module):
m = message_module.TestAllTypes(repeated_string=['12'])
arr = np.array(m.repeated_string)
self.assertEqual(arr.tobytes(), b'1\x00\x00\x002\x00\x00\x00')
@parameterized.named_parameters(
('_proto2', unittest_pb2), ('_proto3', unittest_proto3_arena_pb2)
)
def test_repeated_bytes_tobytes(self, message_module):
m = message_module.TestAllTypes(repeated_bytes=[b'11', b'12', b'13'])
arr = np.array(m.repeated_bytes)
np.testing.assert_array_equal(
arr, np.asarray([b'11', b'12', b'13'], dtype=bytes)
)
self.assertEqual(arr.tobytes(), b'111213')
@parameterized.named_parameters(
('_proto2', unittest_pb2), ('_proto3', unittest_proto3_arena_pb2)
)
def test_repeated_string_none_dtype(self, message_module):
m = message_module.TestAllTypes()
m.repeated_string.extend(['12', '2321'])
arr = np.asarray(m.repeated_string, dtype=None)
self.assertIsInstance(arr, np.ndarray)
np.testing.assert_array_equal(arr, np.asarray(['12', '2321'], dtype=str))
@parameterized.named_parameters(
('_proto2', unittest_pb2), ('_proto3', unittest_proto3_arena_pb2)
)
def test_repeated_string_int8_dtype(self, message_module):
m = message_module.TestAllTypes()
m.repeated_string.extend(['123', '-15'])
arr = np.asarray(m.repeated_string, dtype=np.int8)
self.assertIsInstance(arr, np.ndarray)
self.assertEqual(arr.dtype, np.int8)
np.testing.assert_array_equal(arr, np.asarray([123, -15], dtype=np.int8))
@parameterized.named_parameters(
('_proto2', unittest_pb2), ('_proto3', unittest_proto3_arena_pb2)
)
def test_repeated_bytes_none_dtype(self, message_module):
m = message_module.TestAllTypes()
m.repeated_bytes.append(bytes([122, 124]))
m.repeated_bytes.append(bytes([13]))
arr = np.asarray(m.repeated_bytes, dtype=None)
self.assertIsInstance(arr, np.ndarray)
expected = np.asarray([b'\x7A\x7C', b'\x0d'])
np.testing.assert_array_equal(arr, expected)
@parameterized.named_parameters(
('_proto2', unittest_pb2), ('_proto3', unittest_proto3_arena_pb2)
)
def test_repeated_bytes_object_dtype(self, message_module):
m = message_module.TestAllTypes()
t = np.array([b'932', b'124\x00'], dtype=object)
m.repeated_bytes.extend(t)
ss = m.SerializeToString()
m2 = message_module.TestAllTypes.FromString(ss)
arr = np.asarray(m2.repeated_bytes, dtype=object)
self.assertIsInstance(arr, np.ndarray)
np.testing.assert_array_equal(arr, t)
@parameterized.named_parameters(
('_proto2', unittest_pb2), ('_proto3', unittest_proto3_arena_pb2)
)
def test_empty_list_object_dtype(self, message_module):
m = message_module.TestAllTypes()
t = np.array([], dtype=object)
m.repeated_bytes.extend(t)
ss = m.SerializeToString()
m2 = message_module.TestAllTypes.FromString(ss)
arr = np.asarray(m2.repeated_bytes, dtype=object)
self.assertIsInstance(arr, np.ndarray)
np.testing.assert_array_equal(arr, t)
@parameterized.named_parameters(
('_proto2', unittest_pb2), ('_proto3', unittest_proto3_arena_pb2)
)
def test_default_dtype(self, message_module):
m = message_module.TestAllTypes(
repeated_int32=[1, 2],
repeated_uint32=[2],
repeated_int64=[1],
repeated_uint64=[1],
repeated_float=[1],
repeated_double=[0.1],
repeated_string=['1'],
repeated_bytes=[b'123'],
repeated_bool=[True],
)
self.assertEqual(np.array(m.repeated_int32).dtype, np.int32)
self.assertEqual(np.array(m.repeated_uint32).dtype, np.uint32)
self.assertEqual(np.array(m.repeated_int64).dtype, np.int64)
self.assertEqual(np.array(m.repeated_uint64).dtype, np.uint64)
self.assertEqual(np.array(m.repeated_float).dtype, np.float32)
self.assertEqual(np.array(m.repeated_double).dtype, np.float64)
self.assertEqual(np.array(m.repeated_string).dtype, np.dtype('<U1'))
self.assertEqual(np.array(m.repeated_bytes).dtype, np.dtype('S3'))
self.assertEqual(np.array(m.repeated_bool).dtype, np.dtype('bool'))
message = json_format_pb2.TestRepeatedEnum(
repeated_enum=[json_format_pb2.BUFFER]
)
self.assertEqual(np.array(message.repeated_enum).dtype, np.int32)
@parameterized.named_parameters(
('_proto2', unittest_pb2), ('_proto3', unittest_proto3_arena_pb2)
)
def test_set_dtype(self, message_module):
m = message_module.TestAllTypes(
repeated_int32=[1, 2],
repeated_uint32=[2],
repeated_int64=[1],
repeated_uint64=[1],
repeated_float=[1.2, 1],
repeated_double=[0.1],
repeated_string=['1'],
repeated_bytes=[b'123'],
repeated_bool=[True],
)
arr = np.array(m.repeated_float)
self.assertEqual(np.array(m.repeated_int32, dtype=np.int32).dtype, np.int32)
self.assertEqual(
np.array(m.repeated_uint32, dtype=np.int32).dtype, np.int32
)
self.assertEqual(
np.array(m.repeated_int64, dtype=np.uint32).dtype, np.uint32
)
self.assertEqual(
np.array(m.repeated_uint64, dtype=np.uint32).dtype, np.uint32
)
self.assertEqual(
np.array(m.repeated_float, dtype=np.float32).dtype, np.float32
)
self.assertEqual(
np.array(m.repeated_double, dtype=np.float32).dtype, np.float32
)
self.assertEqual(
np.array(m.repeated_string, dtype=object).dtype, np.dtype('O')
)
self.assertEqual(
np.array(m.repeated_bytes, dtype=object).dtype, np.dtype('O')
)
self.assertEqual(np.array(m.repeated_bool, dtype=np.int32).dtype, np.int32)
@parameterized.named_parameters(
('_proto2', unittest_pb2), ('_proto3', unittest_proto3_arena_pb2)
)
def test_empty_repeated_default_dtype(self, message_module):
m = message_module.TestAllTypes()
self.assertEqual(np.array(m.repeated_int32).dtype, np.int32)
self.assertEqual(np.array(m.repeated_uint32).dtype, np.uint32)
self.assertEqual(np.array(m.repeated_int64).dtype, np.int64)
self.assertEqual(np.array(m.repeated_uint64).dtype, np.uint64)
self.assertEqual(np.array(m.repeated_float).dtype, np.float32)
self.assertEqual(np.array(m.repeated_double).dtype, np.float64)
self.assertEqual(np.array(m.repeated_string).dtype, np.dtype('<U1'))
self.assertEqual(np.array(m.repeated_bytes).dtype, np.dtype('S1'))
@parameterized.product(
message_module=[unittest_pb2, unittest_proto3_arena_pb2],
field_name=[
'repeated_int32',
'repeated_int64',
'repeated_uint32',
'repeated_uint64',
'repeated_sint32',
'repeated_sint64',
'repeated_fixed32',
'repeated_fixed64',
'repeated_sfixed32',
'repeated_sfixed64',
'repeated_float',
'repeated_double',
'repeated_nested_enum',
],
)
def test_empty_repeated_set_dtype(self, message_module, field_name):
m = message_module.TestAllTypes()
field = getattr(m, field_name)
self.assertEqual(np.array(field, dtype=np.int32).dtype, np.int32)
@parameterized.named_parameters(
('_proto2', unittest_pb2), ('_proto3', unittest_proto3_arena_pb2)
)
def test_nested_message(self, message_module):
m = message_module.NestedTestAllTypes()
arr = np.array(m.child.payload.repeated_float)
self.assertEqual(arr.dtype, np.float32)
@parameterized.named_parameters(
('_proto2', unittest_pb2), ('_proto3', unittest_proto3_arena_pb2)
)
def test_float_compare(self, message_module):
m = message_module.TestAllTypes()
expected = [87.5011, 1.1]
m.repeated_float.extend(expected)
np.testing.assert_equal(
np.array(m.repeated_float), np.array(expected, np.float32)
)
m.repeated_double.extend(expected)
np.testing.assert_equal(np.array(m.repeated_double), np.array(expected))
@parameterized.named_parameters(
('_proto2', unittest_pb2), ('_proto3', unittest_proto3_arena_pb2)
)
def test_nparray_modify(self, message_module):
m = message_module.TestAllTypes()
size = 10
expected = np.full(size, 123, dtype=np.int32)
m.repeated_int32.extend(expected)
arr = np.array(m.repeated_int32, np.int32)
arr[2] = 111
self.assertEqual(arr[2], 111)
self.assertEqual(arr[3], 123)
self.assertEqual(m.repeated_int32[2], 123)
@parameterized.named_parameters(
('_proto2', unittest_pb2), ('_proto3', unittest_proto3_arena_pb2)
)
def test_nparray_order(self, message_module):
m = message_module.TestAllTypes(repeated_int32=[1, 2, 3])
arr = np.array(m.repeated_int32, order='F')
np.testing.assert_equal(arr, np.array([1, 2, 3]))
if __name__ == '__main__':
unittest.main()
| NumpyBindingTest |
python | pypa__pipenv | pipenv/vendor/click/_winconsole.py | {
"start": 5095,
"end": 7859
} | class ____:
def __init__(self, text_stream: t.TextIO, byte_stream: t.BinaryIO) -> None:
self._text_stream = text_stream
self.buffer = byte_stream
@property
def name(self) -> str:
return self.buffer.name
def write(self, x: t.AnyStr) -> int:
if isinstance(x, str):
return self._text_stream.write(x)
try:
self.flush()
except Exception:
pass
return self.buffer.write(x)
def writelines(self, lines: t.Iterable[t.AnyStr]) -> None:
for line in lines:
self.write(line)
def __getattr__(self, name: str) -> t.Any:
return getattr(self._text_stream, name)
def isatty(self) -> bool:
return self.buffer.isatty()
def __repr__(self):
return f"<ConsoleStream name={self.name!r} encoding={self.encoding!r}>"
def _get_text_stdin(buffer_stream: t.BinaryIO) -> t.TextIO:
text_stream = _NonClosingTextIOWrapper(
io.BufferedReader(_WindowsConsoleReader(STDIN_HANDLE)),
"utf-16-le",
"strict",
line_buffering=True,
)
return t.cast(t.TextIO, ConsoleStream(text_stream, buffer_stream))
def _get_text_stdout(buffer_stream: t.BinaryIO) -> t.TextIO:
text_stream = _NonClosingTextIOWrapper(
io.BufferedWriter(_WindowsConsoleWriter(STDOUT_HANDLE)),
"utf-16-le",
"strict",
line_buffering=True,
)
return t.cast(t.TextIO, ConsoleStream(text_stream, buffer_stream))
def _get_text_stderr(buffer_stream: t.BinaryIO) -> t.TextIO:
text_stream = _NonClosingTextIOWrapper(
io.BufferedWriter(_WindowsConsoleWriter(STDERR_HANDLE)),
"utf-16-le",
"strict",
line_buffering=True,
)
return t.cast(t.TextIO, ConsoleStream(text_stream, buffer_stream))
_stream_factories: t.Mapping[int, t.Callable[[t.BinaryIO], t.TextIO]] = {
0: _get_text_stdin,
1: _get_text_stdout,
2: _get_text_stderr,
}
def _is_console(f: t.TextIO) -> bool:
if not hasattr(f, "fileno"):
return False
try:
fileno = f.fileno()
except (OSError, io.UnsupportedOperation):
return False
handle = msvcrt.get_osfhandle(fileno)
return bool(GetConsoleMode(handle, byref(DWORD())))
def _get_windows_console_stream(
f: t.TextIO, encoding: t.Optional[str], errors: t.Optional[str]
) -> t.Optional[t.TextIO]:
if (
get_buffer is not None
and encoding in {"utf-16-le", None}
and errors in {"strict", None}
and _is_console(f)
):
func = _stream_factories.get(f.fileno())
if func is not None:
b = getattr(f, "buffer", None)
if b is None:
return None
return func(b)
| ConsoleStream |
python | PrefectHQ__prefect | src/integrations/prefect-aws/prefect_aws/client_parameters.py | {
"start": 304,
"end": 6457
} | class ____(BaseModel):
"""
Model used to manage extra parameters that you can pass when you initialize
the Client. If you want to find more information, see
[boto3 docs](https://boto3.amazonaws.com/v1/documentation/api/latest/reference/core/session.html)
for more info about the possible client configurations.
Attributes:
api_version: The API version to use. By default, botocore will
use the latest API version when creating a client. You only need
to specify this parameter if you want to use a previous API version
of the client.
use_ssl: Whether or not to use SSL. By default, SSL is used.
Note that not all services support non-ssl connections.
verify: Whether or not to verify SSL certificates. By default
SSL certificates are verified. If False, SSL will still be used
(unless use_ssl is False), but SSL certificates
will not be verified. Passing a file path to this is deprecated.
verify_cert_path: A filename of the CA cert bundle to
use. You can specify this argument if you want to use a
different CA cert bundle than the one used by botocore.
endpoint_url: The complete URL to use for the constructed
client. Normally, botocore will automatically construct the
appropriate URL to use when communicating with a service. You
can specify a complete URL (including the "http/https" scheme)
to override this behavior. If this value is provided,
then ``use_ssl`` is ignored.
config: Advanced configuration for Botocore clients. See
[botocore docs](https://botocore.amazonaws.com/v1/documentation/api/latest/reference/config.html)
for more details.
""" # noqa E501
api_version: Optional[str] = Field(
default=None, description="The API version to use.", title="API Version"
)
use_ssl: bool = Field(
default=True, description="Whether or not to use SSL.", title="Use SSL"
)
verify: Union[bool, FilePath, None] = Field(
default=None, description="Whether or not to verify SSL certificates."
)
verify_cert_path: Optional[FilePath] = Field(
default=None,
description="Path to the CA cert bundle to use.",
title="Certificate Authority Bundle File Path",
)
endpoint_url: Optional[str] = Field(
default=None,
description="The complete URL to use for the constructed client.",
title="Endpoint URL",
)
config: Optional[Dict[str, Any]] = Field(
default=None,
description="Advanced configuration for Botocore clients.",
title="Botocore Config",
)
def __hash__(self):
return hash(
(
self.api_version,
self.use_ssl,
self.verify,
self.verify_cert_path,
self.endpoint_url,
hash_collection(self.config),
)
)
@field_validator("config", mode="before")
@classmethod
def instantiate_config(cls, value: Union[Config, Dict[str, Any]]) -> Dict[str, Any]:
"""
Casts lists to Config instances.
"""
if isinstance(value, Config):
return value.__dict__["_user_provided_options"]
return value
@model_validator(mode="before")
@classmethod
def deprecated_verify_cert_path(cls, values: Dict[str, Any]) -> Dict[str, Any]:
"""
If verify is not a bool, raise a warning.
"""
verify = values.get("verify")
# deprecate using verify in favor of verify_cert_path
# so the UI looks nicer
if verify is not None and not isinstance(verify, bool):
warnings.warn(
(
"verify should be a boolean. "
"If you want to use a CA cert bundle, use verify_cert_path instead."
),
DeprecationWarning,
)
return values
@model_validator(mode="before")
@classmethod
def verify_cert_path_and_verify(cls, values: Dict[str, Any]) -> Dict[str, Any]:
"""
If verify_cert_path is set but verify is False, raise a warning.
"""
verify = values.get("verify", True)
verify_cert_path = values.get("verify_cert_path")
if not verify and verify_cert_path:
warnings.warn(
"verify_cert_path is set but verify is False. "
"verify_cert_path will be ignored."
)
values["verify_cert_path"] = None
elif not isinstance(verify, bool) and verify_cert_path:
warnings.warn(
"verify_cert_path is set but verify is also set as a file path. "
"verify_cert_path will take precedence."
)
values["verify"] = True
return values
def get_params_override(self) -> Dict[str, Any]:
"""
Return the dictionary of the parameters to override.
The parameters to override are the one which are not None.
"""
params = self.model_dump()
if params.get("verify_cert_path"):
# to ensure that verify doesn't re-overwrite verify_cert_path
params.pop("verify")
params_override = {}
for key, value in params.items():
if value is None:
continue
elif key == "config":
params_override[key] = Config(**value)
# botocore UNSIGNED is an instance while actual signers can
# be fetched as strings
if params_override[key].signature_version == "unsigned":
params_override[key].signature_version = UNSIGNED
elif key == "verify_cert_path":
params_override["verify"] = value
elif key == "verify":
if value is not None:
params_override[key] = value
else:
params_override[key] = value
return params_override
| AwsClientParameters |
python | getsentry__sentry | fixtures/safe_migrations_apps/good_flow_delete_model_state_app/migrations/0002_delete_model_state.py | {
"start": 236,
"end": 478
} | class ____(CheckedMigration):
dependencies = [
("good_flow_delete_model_state_app", "0001_initial"),
]
operations = [
SafeDeleteModel(name="TestTable", deletion_action=DeletionAction.MOVE_TO_PENDING),
]
| Migration |
python | django__django | tests/validators/tests.py | {
"start": 31805,
"end": 37939
} | class ____(TestCase):
"""
Validators have valid equality operators (#21638)
"""
def test_regex_equality(self):
self.assertEqual(
RegexValidator(r"^(?:[a-z0-9.-]*)://"),
RegexValidator(r"^(?:[a-z0-9.-]*)://"),
)
self.assertNotEqual(
RegexValidator(r"^(?:[a-z0-9.-]*)://"),
RegexValidator(r"^(?:[0-9.-]*)://"),
)
self.assertEqual(
RegexValidator(r"^(?:[a-z0-9.-]*)://", "oh noes", "invalid"),
RegexValidator(r"^(?:[a-z0-9.-]*)://", "oh noes", "invalid"),
)
self.assertNotEqual(
RegexValidator(r"^(?:[a-z0-9.-]*)://", "oh", "invalid"),
RegexValidator(r"^(?:[a-z0-9.-]*)://", "oh noes", "invalid"),
)
self.assertNotEqual(
RegexValidator(r"^(?:[a-z0-9.-]*)://", "oh noes", "invalid"),
RegexValidator(r"^(?:[a-z0-9.-]*)://"),
)
self.assertNotEqual(
RegexValidator("", flags=re.IGNORECASE),
RegexValidator(""),
)
self.assertNotEqual(
RegexValidator(""),
RegexValidator("", inverse_match=True),
)
def test_regex_equality_nocache(self):
pattern = r"^(?:[a-z0-9.-]*)://"
left = RegexValidator(pattern)
re.purge()
right = RegexValidator(pattern)
self.assertEqual(
left,
right,
)
def test_regex_equality_blank(self):
self.assertEqual(
RegexValidator(),
RegexValidator(),
)
def test_email_equality(self):
self.assertEqual(
EmailValidator(),
EmailValidator(),
)
self.assertNotEqual(
EmailValidator(message="BAD EMAIL"),
EmailValidator(),
)
self.assertEqual(
EmailValidator(message="BAD EMAIL", code="bad"),
EmailValidator(message="BAD EMAIL", code="bad"),
)
self.assertEqual(
EmailValidator(allowlist=["127.0.0.1", "localhost"]),
EmailValidator(allowlist=["localhost", "127.0.0.1"]),
)
def test_basic_equality(self):
self.assertEqual(
MaxValueValidator(44),
MaxValueValidator(44),
)
self.assertEqual(MaxValueValidator(44), mock.ANY)
self.assertEqual(
StepValueValidator(0.003),
StepValueValidator(0.003),
)
self.assertNotEqual(
MaxValueValidator(44),
MinValueValidator(44),
)
self.assertNotEqual(
MinValueValidator(45),
MinValueValidator(11),
)
self.assertNotEqual(
StepValueValidator(3),
StepValueValidator(2),
)
def test_decimal_equality(self):
self.assertEqual(
DecimalValidator(1, 2),
DecimalValidator(1, 2),
)
self.assertNotEqual(
DecimalValidator(1, 2),
DecimalValidator(1, 1),
)
self.assertNotEqual(
DecimalValidator(1, 2),
DecimalValidator(2, 2),
)
self.assertNotEqual(
DecimalValidator(1, 2),
MinValueValidator(11),
)
def test_file_extension_equality(self):
self.assertEqual(FileExtensionValidator(), FileExtensionValidator())
self.assertEqual(
FileExtensionValidator(["txt"]), FileExtensionValidator(["txt"])
)
self.assertEqual(
FileExtensionValidator(["TXT"]), FileExtensionValidator(["txt"])
)
self.assertEqual(
FileExtensionValidator(["TXT", "png"]),
FileExtensionValidator(["txt", "png"]),
)
self.assertEqual(
FileExtensionValidator(["jpg", "png", "txt"]),
FileExtensionValidator(["txt", "jpg", "png"]),
)
self.assertEqual(
FileExtensionValidator(["txt"]),
FileExtensionValidator(["txt"], code="invalid_extension"),
)
self.assertNotEqual(
FileExtensionValidator(["txt"]), FileExtensionValidator(["png"])
)
self.assertNotEqual(
FileExtensionValidator(["txt"]), FileExtensionValidator(["png", "jpg"])
)
self.assertNotEqual(
FileExtensionValidator(["txt"]),
FileExtensionValidator(["txt"], code="custom_code"),
)
self.assertNotEqual(
FileExtensionValidator(["txt"]),
FileExtensionValidator(["txt"], message="custom error message"),
)
def test_prohibit_null_characters_validator_equality(self):
self.assertEqual(
ProhibitNullCharactersValidator(message="message", code="code"),
ProhibitNullCharactersValidator(message="message", code="code"),
)
self.assertEqual(
ProhibitNullCharactersValidator(), ProhibitNullCharactersValidator()
)
self.assertNotEqual(
ProhibitNullCharactersValidator(message="message1", code="code"),
ProhibitNullCharactersValidator(message="message2", code="code"),
)
self.assertNotEqual(
ProhibitNullCharactersValidator(message="message", code="code1"),
ProhibitNullCharactersValidator(message="message", code="code2"),
)
def test_domain_name_equality(self):
self.assertEqual(
DomainNameValidator(),
DomainNameValidator(),
)
self.assertNotEqual(
DomainNameValidator(),
EmailValidator(),
)
self.assertNotEqual(
DomainNameValidator(),
DomainNameValidator(code="custom_code"),
)
self.assertEqual(
DomainNameValidator(message="custom error message"),
DomainNameValidator(message="custom error message"),
)
self.assertNotEqual(
DomainNameValidator(message="custom error message"),
DomainNameValidator(message="custom error message", code="custom_code"),
)
| TestValidatorEquality |
python | dask__dask | dask/_task_spec.py | {
"start": 27243,
"end": 29362
} | class ____(NestedContainer, Mapping):
klass = dict
def __init__(self, /, *args: Any, **kwargs: Any):
if args:
assert not kwargs
if len(args) == 1:
args = args[0]
if isinstance(args, dict): # type: ignore[unreachable]
args = tuple(itertools.chain(*args.items())) # type: ignore[unreachable]
elif isinstance(args, (list, tuple)):
if all(
len(el) == 2 if isinstance(el, (list, tuple)) else False
for el in args
):
args = tuple(itertools.chain(*args))
else:
raise ValueError("Invalid argument provided")
if len(args) % 2 != 0:
raise ValueError("Invalid number of arguments provided")
elif kwargs:
assert not args
args = tuple(itertools.chain(*kwargs.items()))
super().__init__(*args)
def __repr__(self):
values = ", ".join(f"{k}: {v}" for k, v in batched(self.args, 2, strict=True))
return f"Dict({values})"
def substitute(
self, subs: dict[KeyType, KeyType | GraphNode], key: KeyType | None = None
) -> Dict:
subs_filtered = {
k: v for k, v in subs.items() if k in self.dependencies and k != v
}
if not subs_filtered:
return self
new_args = []
for arg in self.args:
new_arg = (
arg.substitute(subs_filtered)
if isinstance(arg, (GraphNode, TaskRef))
else arg
)
new_args.append(new_arg)
return type(self)(new_args)
def __iter__(self):
yield from self.args[::2]
def __len__(self):
return len(self.args) // 2
def __getitem__(self, key):
for k, v in batched(self.args, 2, strict=True):
if k == key:
return v
raise KeyError(key)
@staticmethod
def constructor(args):
return dict(batched(args, 2, strict=True))
| Dict |
python | matplotlib__matplotlib | lib/matplotlib/testing/jpl_units/StrConverter.py | {
"start": 141,
"end": 2865
} | class ____(units.ConversionInterface):
"""
A Matplotlib converter class for string data values.
Valid units for string are:
- 'indexed' : Values are indexed as they are specified for plotting.
- 'sorted' : Values are sorted alphanumerically.
- 'inverted' : Values are inverted so that the first value is on top.
- 'sorted-inverted' : A combination of 'sorted' and 'inverted'
"""
@staticmethod
def axisinfo(unit, axis):
# docstring inherited
return None
@staticmethod
def convert(value, unit, axis):
# docstring inherited
if value == []:
return []
# we delay loading to make matplotlib happy
ax = axis.axes
if axis is ax.xaxis:
isXAxis = True
else:
isXAxis = False
axis.get_major_ticks()
ticks = axis.get_ticklocs()
labels = axis.get_ticklabels()
labels = [l.get_text() for l in labels if l.get_text()]
if not labels:
ticks = []
labels = []
if not np.iterable(value):
value = [value]
newValues = []
for v in value:
if v not in labels and v not in newValues:
newValues.append(v)
labels.extend(newValues)
# DISABLED: This is disabled because matplotlib bar plots do not
# DISABLED: recalculate the unit conversion of the data values
# DISABLED: this is due to design and is not really a bug.
# DISABLED: If this gets changed, then we can activate the following
# DISABLED: block of code. Note that this works for line plots.
# DISABLED if unit:
# DISABLED if unit.find("sorted") > -1:
# DISABLED labels.sort()
# DISABLED if unit.find("inverted") > -1:
# DISABLED labels = labels[::-1]
# add padding (so they do not appear on the axes themselves)
labels = [''] + labels + ['']
ticks = list(range(len(labels)))
ticks[0] = 0.5
ticks[-1] = ticks[-1] - 0.5
axis.set_ticks(ticks)
axis.set_ticklabels(labels)
# we have to do the following lines to make ax.autoscale_view work
loc = axis.get_major_locator()
loc.set_bounds(ticks[0], ticks[-1])
if isXAxis:
ax.set_xlim(ticks[0], ticks[-1])
else:
ax.set_ylim(ticks[0], ticks[-1])
result = [ticks[labels.index(v)] for v in value]
ax.viewLim.ignore(-1)
return result
@staticmethod
def default_units(value, axis):
# docstring inherited
# The default behavior for string indexing.
return "indexed"
| StrConverter |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.