language stringclasses 1
value | repo stringclasses 346
values | path stringlengths 6 201 | class_span dict | source stringlengths 21 2.38M | target stringlengths 1 96 |
|---|---|---|---|---|---|
python | davidhalter__parso | test/fuzz_diff_parser.py | {
"start": 7799,
"end": 11338
} | class ____:
def __init__(self, file_path, test_count, change_count):
self._path = file_path
with open(file_path, errors='replace') as f:
code = f.read()
self._code_lines = split_lines(code, keepends=True)
self._test_count = test_count
self._code_lines = self._code_lines
self._change_count = change_count
self._file_modifications = []
def _run(self, grammar, file_modifications, debugger, print_code=False):
try:
for i, fm in enumerate(file_modifications, 1):
fm.run(grammar, self._code_lines, print_code=print_code)
print('.', end='')
sys.stdout.flush()
print()
except Exception:
print("Issue in file: %s" % self._path)
if debugger:
einfo = sys.exc_info()
pdb = __import__(debugger)
pdb.post_mortem(einfo[2])
raise
def redo(self, grammar, debugger, only_last, print_code):
mods = self._file_modifications
if only_last is not None:
mods = mods[-only_last:]
self._run(grammar, mods, debugger, print_code=print_code)
def run(self, grammar, debugger):
def iterate():
fm = None
for _ in range(self._test_count):
fm = FileModification.generate(
self._code_lines, self._change_count,
previous_file_modification=fm
)
self._file_modifications.append(fm)
yield fm
self._run(grammar, iterate(), debugger)
def main(arguments):
debugger = 'pdb' if arguments['--pdb'] else \
'ipdb' if arguments['--ipdb'] else None
redo_file = os.path.join(os.path.dirname(__file__), 'fuzz-redo.pickle')
if arguments['--logging']:
root = logging.getLogger()
root.setLevel(logging.DEBUG)
ch = logging.StreamHandler(sys.stdout)
ch.setLevel(logging.DEBUG)
root.addHandler(ch)
grammar = parso.load_grammar()
parso.python.diff.DEBUG_DIFF_PARSER = True
if arguments['redo']:
with open(redo_file, 'rb') as f:
file_tests_obj = pickle.load(f)
only_last = arguments['--only-last'] and int(arguments['--only-last'])
file_tests_obj.redo(
grammar,
debugger,
only_last=only_last,
print_code=arguments['--print-code']
)
elif arguments['random']:
# A random file is used to do diff parser checks if no file is given.
# This helps us to find errors in a lot of different files.
file_paths = list(find_python_files_in_tree(arguments['<path>'] or '.'))
max_tries = int(arguments['--maxtries'])
tries = 0
try:
while tries < max_tries:
path = random.choice(file_paths)
print("Checking %s: %s tries" % (path, tries))
now_tries = min(1000, max_tries - tries)
file_tests_obj = FileTests(path, now_tries, int(arguments['--changes']))
file_tests_obj.run(grammar, debugger)
tries += now_tries
except Exception:
with open(redo_file, 'wb') as f:
pickle.dump(file_tests_obj, f)
raise
else:
raise NotImplementedError('Command is not implemented')
if __name__ == '__main__':
from docopt import docopt
arguments = docopt(__doc__)
main(arguments)
| FileTests |
python | matplotlib__matplotlib | lib/matplotlib/backend_bases.py | {
"start": 43368,
"end": 43450
} | class ____(Event):
"""An event triggered by a figure being closed."""
| CloseEvent |
python | weaviate__weaviate-python-client | weaviate/backup/executor.py | {
"start": 867,
"end": 24321
} | class ____(Generic[ConnectionType]):
def __init__(self, connection: Connection):
self._connection = connection
def create(
self,
backup_id: str,
backend: BackupStorage,
include_collections: Union[List[str], str, None] = None,
exclude_collections: Union[List[str], str, None] = None,
wait_for_completion: bool = False,
config: Optional[BackupConfigCreate] = None,
backup_location: Optional[BackupLocationType] = None,
) -> executor.Result[BackupReturn]:
"""Create a backup of all/per collection Weaviate objects.
Args:
backup_id: The identifier name of the backup. NOTE: Case insensitive.
backend: The backend storage where to create the backup.
include_collections: The collection/list of collections to be included in the backup. If not specified all
collections will be included. Either `include_collections` or `exclude_collections` can be set. By default None.
exclude_collections: The collection/list of collections to be excluded in the backup.
Either `include_collections` or `exclude_collections` can be set. By default None.
wait_for_completion: Whether to wait until the backup is done. By default False.
config: The configuration of the backup creation. By default None.
backup_location: The dynamic location of a backup. By default None.
Returns:
A `_BackupReturn` object that contains the backup creation response.
Raises:
requests.ConnectionError: If the network connection to weaviate fails.
weaviate.exceptions.UnexpectedStatusCodeError: If weaviate reports a none OK status.
TypeError: One of the arguments have a wrong type.
"""
(
backup_id,
backend,
include_collections,
exclude_collections,
) = _get_and_validate_create_restore_arguments(
backup_id=backup_id,
backend=backend, # can be removed when we remove the old backup class
include_classes=include_collections,
exclude_classes=exclude_collections,
wait_for_completion=wait_for_completion,
)
payload: dict = {
"id": backup_id,
"include": include_collections,
"exclude": exclude_collections,
}
if config is not None:
payload["config"] = config._to_dict()
if backup_location is not None:
if self._connection._weaviate_version.is_lower_than(1, 27, 2):
raise WeaviateUnsupportedFeatureError(
"BackupConfigCreate dynamic backup location",
str(self._connection._weaviate_version),
"1.27.2",
)
if "config" not in payload:
payload["config"] = {}
payload["config"].update(backup_location._to_dict())
path = f"/backups/{backend.value}"
if isinstance(self._connection, ConnectionAsync):
async def _execute() -> BackupReturn:
res = await executor.aresult(
self._connection.post(
path=path,
weaviate_object=payload,
error_msg="Backup creation failed due to connection error.",
)
)
create_status = _decode_json_response_dict(res, "Backup creation")
assert create_status is not None
if wait_for_completion:
while True:
status = await executor.aresult(
self.get_create_status(
backup_id=backup_id,
backend=backend,
backup_location=backup_location,
)
)
create_status["status"] = status.status
if status.status == BackupStatus.SUCCESS:
break
if status.status == BackupStatus.FAILED:
raise BackupFailedException(
f"Backup failed: {create_status} with error: {status.error}"
)
if status.status == BackupStatus.CANCELED:
raise BackupCanceledError(
f"Backup was canceled: {create_status} with error: {status.error}"
)
await asyncio.sleep(1)
return BackupReturn(**create_status)
return _execute()
res = executor.result(
self._connection.post(
path=path,
weaviate_object=payload,
error_msg="Backup creation failed due to connection error.",
)
)
create_status = _decode_json_response_dict(res, "Backup creation")
assert create_status is not None
if wait_for_completion:
while True:
status = executor.result(
self.get_create_status(
backup_id=backup_id,
backend=backend,
backup_location=backup_location,
)
)
create_status["status"] = status.status
if status.status == BackupStatus.SUCCESS:
break
if status.status == BackupStatus.FAILED:
raise BackupFailedException(
f"Backup failed: {create_status} with error: {status.error}"
)
if status.status == BackupStatus.CANCELED:
raise BackupCanceledError(
f"Backup was canceled: {create_status} with error: {status.error}"
)
time.sleep(1)
return BackupReturn(**create_status)
def get_create_status(
self,
backup_id: str,
backend: BackupStorage,
backup_location: Optional[BackupLocationType] = None,
) -> executor.Result[BackupStatusReturn]:
"""Checks if a started backup job has completed.
Args:
backup_id: The identifier name of the backup. NOTE: Case insensitive.
backend: The backend storage where the backup was created.
backup_location: The dynamic location of a backup. By default None.
Returns:
A `BackupStatusReturn` object that contains the backup creation status response.
"""
backup_id, backend = _get_and_validate_get_status(
backup_id=backup_id,
backend=backend, # this check can be removed when we remove the old backup class
)
path = f"/backups/{backend.value}/{backup_id}"
params: Dict[str, str] = {}
if backup_location is not None:
if self._connection._weaviate_version.is_lower_than(1, 27, 2):
raise WeaviateUnsupportedFeatureError(
"BackupConfigCreateStatus dynamic backup location",
str(self._connection._weaviate_version),
"1.27.2",
)
params.update(backup_location._to_dict())
def resp(res: Response) -> BackupStatusReturn:
typed_response = _decode_json_response_dict(res, "Backup status check")
if typed_response is None:
raise EmptyResponseException()
typed_response["id"] = backup_id
return BackupStatusReturn(**typed_response)
return executor.execute(
response_callback=resp,
method=self._connection.get,
path=path,
params=params,
error_msg="Backup creation status failed due to connection error.",
)
def restore(
self,
backup_id: str,
backend: BackupStorage,
include_collections: Union[List[str], str, None] = None,
exclude_collections: Union[List[str], str, None] = None,
roles_restore: Optional[Literal["noRestore", "all"]] = None,
users_restore: Optional[Literal["noRestore", "all"]] = None,
wait_for_completion: bool = False,
config: Optional[BackupConfigRestore] = None,
backup_location: Optional[BackupLocationType] = None,
overwrite_alias: bool = False,
) -> executor.Result[BackupReturn]:
"""Restore a backup of all/per collection Weaviate objects.
Args:
backup_id: The identifier name of the backup. NOTE: Case insensitive.
backend: The backend storage from where to restore the backup.
include_collections: The collection/list of collections to be included in the backup restore. If not specified all
collections will be included (that were backup-ed). Either `include_collections` or
`exclude_collections` can be set. By default None.
exclude_collections: The collection/list of collections to be excluded in the backup restore.
Either `include_collections` or `exclude_collections` can be set. By default None.
wait_for_completion: Whether to wait until the backup restore is done.
config: The configuration of the backup restoration. By default None.
backup_location: The dynamic location of a backup. By default None.
overwrite_alias: Allows ovewriting the collection alias if there is a conflict.
Returns:
A `BackupReturn` object that contains the backup restore response.
Raises:
requests.ConnectionError: If the network connection to weaviate fails.
weaviate.exceptions.UnexpectedStatusCodeError: If weaviate reports a none OK status.
"""
(
backup_id,
backend,
include_collections,
exclude_collections,
) = _get_and_validate_create_restore_arguments(
backup_id=backup_id,
backend=backend,
include_classes=include_collections,
exclude_classes=exclude_collections,
wait_for_completion=wait_for_completion,
)
payload: dict = {
"include": include_collections,
"exclude": exclude_collections,
"overwriteAlias": overwrite_alias,
}
configPayload = {}
if config is not None:
configPayload = config._to_dict()
if backup_location is not None:
if self._connection._weaviate_version.is_lower_than(1, 27, 2):
raise WeaviateUnsupportedFeatureError(
"BackupConfigRestore dynamic backup location",
str(self._connection._weaviate_version),
"1.27.2",
)
if "config" not in payload:
payload["config"] = {}
configPayload.update(backup_location._to_dict())
if roles_restore is not None:
configPayload["rolesOptions"] = roles_restore
if users_restore is not None:
configPayload["usersOptions"] = users_restore
if len(configPayload) > 0:
payload["config"] = configPayload
path = f"/backups/{backend.value}/{backup_id}/restore"
if isinstance(self._connection, ConnectionAsync):
async def _execute() -> BackupReturn:
response = await executor.aresult(
self._connection.post(
path=path,
weaviate_object=payload,
error_msg="Backup restore failed due to connection error.",
)
)
restore_status = _decode_json_response_dict(response, "Backup restore")
assert restore_status is not None
if wait_for_completion:
while True:
status = await executor.aresult(
self.get_restore_status(
backup_id=backup_id,
backend=backend,
backup_location=backup_location,
)
)
restore_status["status"] = status.status
if status.status == BackupStatus.SUCCESS:
break
if status.status == BackupStatus.FAILED:
raise BackupFailedException(
f"Backup restore failed: {restore_status} with error: {status.error}"
)
if status.status == BackupStatus.CANCELED:
raise BackupCanceledError(
f"Backup restore canceled: {restore_status} with error: {status.error}"
)
await asyncio.sleep(1)
return BackupReturn(**restore_status)
return _execute()
response = executor.result(
self._connection.post(
path=path,
weaviate_object=payload,
error_msg="Backup restore failed due to connection error.",
)
)
restore_status = _decode_json_response_dict(response, "Backup restore")
assert restore_status is not None
if wait_for_completion:
while True:
status = executor.result(
self.get_restore_status(
backup_id=backup_id,
backend=backend,
backup_location=backup_location,
)
)
restore_status["status"] = status.status
if status.status == BackupStatus.SUCCESS:
break
if status.status == BackupStatus.FAILED:
raise BackupFailedException(
f"Backup restore failed: {restore_status} with error: {status.error}"
)
if status.status == BackupStatus.CANCELED:
raise BackupCanceledError(
f"Backup restore canceled: {restore_status} with error: {status.error}"
)
time.sleep(1)
return BackupReturn(**restore_status)
def get_restore_status(
self,
backup_id: str,
backend: BackupStorage,
backup_location: Optional[BackupLocationType] = None,
) -> executor.Result[BackupStatusReturn]:
"""Checks if a started restore job has completed.
Args:
backup_id: The identifier name of the backup. NOTE: Case insensitive.
backend: The backend storage where to create the backup.
backup_location: The dynamic location of a backup. By default None.
Returns:
A `BackupStatusReturn` object that contains the backup restore status response.
"""
backup_id, backend = _get_and_validate_get_status(
backup_id=backup_id,
backend=backend,
)
path = f"/backups/{backend.value}/{backup_id}/restore"
params: Dict[str, str] = {}
if backup_location is not None:
if self._connection._weaviate_version.is_lower_than(1, 27, 2):
raise WeaviateUnsupportedFeatureError(
"BackupConfigRestore status dynamic backup location",
str(self._connection._weaviate_version),
"1.27.2",
)
params.update(backup_location._to_dict())
def resp(res: Response) -> BackupStatusReturn:
typed_response = _decode_json_response_dict(res, "Backup restore status check")
if typed_response is None:
raise EmptyResponseException()
typed_response["id"] = backup_id
return BackupStatusReturn(**typed_response)
return executor.execute(
response_callback=resp,
method=self._connection.get,
path=path,
params=params,
error_msg="Backup restore status failed due to connection error.",
)
def cancel(
self,
backup_id: str,
backend: BackupStorage,
backup_location: Optional[BackupLocationType] = None,
) -> executor.Result[bool]:
"""Cancels a running backup.
Args:
backup_id: The identifier name of the backup. NOTE: Case insensitive.
backend: The backend storage where to create the backup.
backup_location: The dynamic location of a backup. By default None.
Raises:
weaviate.exceptions.UnexpectedStatusCodeError: If weaviate reports a none OK status.
Returns:
A bool indicating if the cancellation was successful.
"""
backup_id, backend = _get_and_validate_get_status(
backup_id=backup_id,
backend=backend,
)
path = f"/backups/{backend.value}/{backup_id}"
params: Dict[str, str] = {}
if backup_location is not None:
if self._connection._weaviate_version.is_lower_than(1, 27, 2):
raise WeaviateUnsupportedFeatureError(
"BackupConfigCancel dynamic backup location",
str(self._connection._weaviate_version),
"1.27.2",
)
params.update(backup_location._to_dict())
def resp(res: Response) -> bool:
if res.status_code == 204:
return True
typed_response = _decode_json_response_dict(res, "Backup cancel")
if typed_response is None:
raise EmptyResponseException()
return False
return executor.execute(
response_callback=resp,
method=self._connection.delete,
path=path,
params=params,
error_msg="Backup cancel failed due to connection error.",
status_codes=_ExpectedStatusCodes(ok_in=[204, 404], error="cancel backup"),
)
def list_backups(
self, backend: BackupStorage, sort_by_starting_time_asc: Optional[bool] = None
) -> executor.Result[List[BackupListReturn]]:
_, backend = _get_and_validate_get_status(backend=backend, backup_id="dummy")
path = f"/backups/{backend.value}"
params = {}
if sort_by_starting_time_asc:
params["order"] = "asc"
def resp(res: Response) -> List[BackupListReturn]:
typed_response = _decode_json_response_list(res, "Backup list")
if typed_response is None:
raise EmptyResponseException()
return [BackupListReturn(**entry) for entry in typed_response]
return executor.execute(
response_callback=resp,
method=self._connection.get,
params=params,
path=path,
error_msg="Backup listing failed due to connection error.",
status_codes=_ExpectedStatusCodes(ok_in=[200], error="list backup"),
)
def _get_and_validate_create_restore_arguments(
backup_id: str,
backend: Union[str, BackupStorage],
include_classes: Union[List[str], str, None],
exclude_classes: Union[List[str], str, None],
wait_for_completion: bool,
) -> Tuple[str, BackupStorage, List[str], List[str]]:
"""Validate and return the Backup.create/Backup.restore arguments.
Args:
backup_id: The identifier name of the backup.
backend: The backend storage. Currently available options are: "filesystem", "s3", "gcs" and "azure".
include_classes: The class/list of classes to be included in the backup. If not specified all classes
will be included. Either `include_classes` or `exclude_classes` can be set.
exclude_classes: The class/list of classes to be excluded from the backup.
Either `include_classes` or `exclude_classes` can be set.
wait_for_completion: Whether to wait until the backup restore is done.
Returns:
Validated and processed (backup_id, backend, include_classes, exclude_classes).
Raises:
TypeError: If one of the arguments have a wrong type.
ValueError: If 'backend' does not have an accepted value.
"""
if not isinstance(backup_id, str):
raise TypeError(f"'backup_id' must be of type str. Given type: {type(backup_id)}.")
if isinstance(backend, str):
try:
backend = BackupStorage(backend.lower())
except KeyError:
raise ValueError(
f"'backend' must have one of these values: {STORAGE_NAMES}. Given value: {backend}."
)
if not isinstance(wait_for_completion, bool):
raise TypeError(
f"'wait_for_completion' must be of type bool. Given type: {type(wait_for_completion)}."
)
if include_classes is not None:
if isinstance(include_classes, str):
include_classes = [include_classes]
elif not isinstance(include_classes, list):
raise TypeError(
"'include_classes' must be of type str, list of str or None. "
f"Given type: {type(include_classes)}."
)
else:
include_classes = []
if exclude_classes is not None:
if isinstance(exclude_classes, str):
exclude_classes = [exclude_classes]
elif not isinstance(exclude_classes, list):
raise TypeError(
"'exclude_classes' must be of type str, list of str or None. "
f"Given type: {type(exclude_classes)}."
)
else:
exclude_classes = []
if include_classes and exclude_classes:
raise TypeError("Either 'include_classes' OR 'exclude_classes' can be set, not both.")
include_classes = [_capitalize_first_letter(cls) for cls in include_classes]
exclude_classes = [_capitalize_first_letter(cls) for cls in exclude_classes]
return (backup_id.lower(), backend, include_classes, exclude_classes)
def _get_and_validate_get_status(
backup_id: str, backend: Union[str, BackupStorage]
) -> Tuple[str, BackupStorage]:
"""Checks if a started classification job has completed.
Args:
backup_id: The identifier name of the backup. NOTE: Case insensitive.
backend: The backend storage where to create the backup. Currently available options are:
"filesystem", "s3", "gcs" and "azure".
Returns:
Validated and processed (backup_id, backend, include_classes, exclude_classes).
Raises:
TypeError: One of the arguments is of a wrong type.
"""
if not isinstance(backup_id, str):
raise TypeError(f"'backup_id' must be of type str. Given type: {type(backup_id)}.")
if isinstance(backend, str):
try:
backend = BackupStorage(backend.lower())
except KeyError:
raise ValueError(
f"'backend' must have one of these values: {STORAGE_NAMES}. Given value: {backend}."
)
return (backup_id.lower(), backend)
| _BackupExecutor |
python | great-expectations__great_expectations | great_expectations/render/renderer_configuration.py | {
"start": 1338,
"end": 2299
} | class ____(str, Enum):
"""Type used in renderer param json schema dictionary."""
ARRAY = "array"
BOOLEAN = "boolean"
DATETIME = "datetime"
NUMBER = "number"
OBJECT = "object"
STRING = "string"
@classmethod
def from_value(cls, value: Any) -> RendererValueType: # noqa: PLR0911
if value is None:
return RendererValueType.STRING
if isinstance(value, list):
return RendererValueType.ARRAY
elif isinstance(value, bool):
return RendererValueType.BOOLEAN
elif isinstance(value, (date, datetime)):
return RendererValueType.DATETIME
elif isinstance(value, Number):
return RendererValueType.NUMBER
elif isinstance(value, dict):
return RendererValueType.OBJECT
elif isinstance(value, (str, uuid.UUID)):
return RendererValueType.STRING
else:
raise TypeError
| RendererValueType |
python | run-llama__llama_index | llama-index-integrations/llms/llama-index-llms-cleanlab/llama_index/llms/cleanlab/base.py | {
"start": 799,
"end": 4138
} | class ____(CustomLLM):
"""
Cleanlab TLM.
Examples:
`pip install llama-index-llms-cleanlab`
```python
from llama_index.llms.cleanlab import CleanlabTLM
llm = CleanlabTLM(api_key=api_key, quality_preset="best", options={"log": ["explanation"]})
resp = llm.complete("Who is Paul Graham?")
print(resp)
```
Arguments:
`quality_preset` and `options` are configuration settings you can optionally specify to improve latency or accuracy.
More information can be found here:
https://help.cleanlab.ai/tlm/
"""
model: str = Field(
default=DEFAULT_MODEL,
description="Base LLM to use with TLM.",
)
max_tokens: int = Field(
default=DEFAULT_MAX_TOKENS,
description="The maximum number of tokens to generate in TLM response.",
)
_client: Any = PrivateAttr()
def __init__(
self,
api_key: Optional[str] = None,
quality_preset: Optional[str] = DEFAULT_QUALITY_PRESET,
options: Optional[Dict] = None,
callback_manager: Optional[CallbackManager] = None,
additional_kwargs: Optional[Dict[str, Any]] = None,
) -> None:
super().__init__(
additional_kwargs=additional_kwargs or {},
callback_manager=callback_manager,
)
self.max_tokens = (
options.get("max_tokens")
if options and "max_tokens" in options
else DEFAULT_MAX_TOKENS
)
api_key = get_from_param_or_env("api_key", api_key, "CLEANLAB_API_KEY")
self._client = TLM(
api_key=api_key, quality_preset=quality_preset, options=options
)
self.model = self._client.get_model_name()
@classmethod
def class_name(cls) -> str:
return "CleanlabTLM"
@property
def metadata(self) -> LLMMetadata:
"""Get LLM metadata."""
return LLMMetadata(
context_window=get_default_context_limit(),
num_output=self.max_tokens,
model_name=self.model,
)
def _parse_response(self, response: Dict) -> CompletionResponse:
"""Parse the response from TLM and return a CompletionResponse object."""
try:
text = response["response"]
trust_score = response["trustworthiness_score"]
except KeyError as e:
raise ValueError(f"Missing expected key in response: {e}")
additional_data = {"trustworthiness_score": trust_score}
if "log" in response and "explanation" in response["log"]:
additional_data["explanation"] = response["log"]["explanation"]
return CompletionResponse(text=text, additional_kwargs=additional_data)
@llm_completion_callback()
def complete(self, prompt: str, **kwargs: Any) -> CompletionResponse:
response = self._client.prompt(prompt)
return self._parse_response(response)
@llm_completion_callback()
def stream_complete(self, prompt: str, **kwargs: Any) -> CompletionResponseGen:
# Raise implementation error since TLM doesn't support native streaming
raise NotImplementedError(
"Streaming is not supported in TLM. Instead stream in the response from the LLM and subsequently use TLM to score its trustworthiness."
)
| CleanlabTLM |
python | numpy__numpy | numpy/_core/tests/test_casting_unittests.py | {
"start": 5616,
"end": 40941
} | class ____:
size = 1500 # Best larger than NPY_LOWLEVEL_BUFFER_BLOCKSIZE * itemsize
def get_data(self, dtype1, dtype2):
if dtype2 is None or dtype1.itemsize >= dtype2.itemsize:
length = self.size // dtype1.itemsize
else:
length = self.size // dtype2.itemsize
# Assume that the base array is well enough aligned for all inputs.
arr1 = np.empty(length, dtype=dtype1)
assert arr1.flags.c_contiguous
assert arr1.flags.aligned
values = [random.randrange(-128, 128) for _ in range(length)]
for i, value in enumerate(values):
# Use item assignment to ensure this is not using casting:
if value < 0 and dtype1.kind == "u":
# Manually rollover unsigned integers (-1 -> int.max)
value = value + np.iinfo(dtype1).max + 1
arr1[i] = value
if dtype2 is None:
if dtype1.char == "?":
values = [bool(v) for v in values]
return arr1, values
if dtype2.char == "?":
values = [bool(v) for v in values]
arr2 = np.empty(length, dtype=dtype2)
assert arr2.flags.c_contiguous
assert arr2.flags.aligned
for i, value in enumerate(values):
# Use item assignment to ensure this is not using casting:
if value < 0 and dtype2.kind == "u":
# Manually rollover unsigned integers (-1 -> int.max)
value = value + np.iinfo(dtype2).max + 1
arr2[i] = value
return arr1, arr2, values
def get_data_variation(self, arr1, arr2, aligned=True, contig=True):
"""
Returns a copy of arr1 that may be non-contiguous or unaligned, and a
matching array for arr2 (although not a copy).
"""
if contig:
stride1 = arr1.dtype.itemsize
stride2 = arr2.dtype.itemsize
elif aligned:
stride1 = 2 * arr1.dtype.itemsize
stride2 = 2 * arr2.dtype.itemsize
else:
stride1 = arr1.dtype.itemsize + 1
stride2 = arr2.dtype.itemsize + 1
max_size1 = len(arr1) * 3 * arr1.dtype.itemsize + 1
max_size2 = len(arr2) * 3 * arr2.dtype.itemsize + 1
from_bytes = np.zeros(max_size1, dtype=np.uint8)
to_bytes = np.zeros(max_size2, dtype=np.uint8)
# Sanity check that the above is large enough:
assert stride1 * len(arr1) <= from_bytes.nbytes
assert stride2 * len(arr2) <= to_bytes.nbytes
if aligned:
new1 = as_strided(from_bytes[:-1].view(arr1.dtype),
arr1.shape, (stride1,))
new2 = as_strided(to_bytes[:-1].view(arr2.dtype),
arr2.shape, (stride2,))
else:
new1 = as_strided(from_bytes[1:].view(arr1.dtype),
arr1.shape, (stride1,))
new2 = as_strided(to_bytes[1:].view(arr2.dtype),
arr2.shape, (stride2,))
new1[...] = arr1
if not contig:
# Ensure we did not overwrite bytes that should not be written:
offset = arr1.dtype.itemsize if aligned else 0
buf = from_bytes[offset::stride1].tobytes()
assert buf.count(b"\0") == len(buf)
if contig:
assert new1.flags.c_contiguous
assert new2.flags.c_contiguous
else:
assert not new1.flags.c_contiguous
assert not new2.flags.c_contiguous
if aligned:
assert new1.flags.aligned
assert new2.flags.aligned
else:
assert not new1.flags.aligned or new1.dtype.alignment == 1
assert not new2.flags.aligned or new2.dtype.alignment == 1
return new1, new2
@pytest.mark.parametrize("from_Dt", simple_dtypes)
def test_simple_cancast(self, from_Dt):
for to_Dt in simple_dtypes:
cast = get_castingimpl(from_Dt, to_Dt)
for from_dt in [from_Dt(), from_Dt().newbyteorder()]:
default = cast._resolve_descriptors((from_dt, None))[1][1]
assert default == to_Dt()
del default
for to_dt in [to_Dt(), to_Dt().newbyteorder()]:
casting, (from_res, to_res), view_off = (
cast._resolve_descriptors((from_dt, to_dt)))
assert type(from_res) == from_Dt
assert type(to_res) == to_Dt
if view_off is not None:
# If a view is acceptable, this is "no" casting
# and byte order must be matching.
assert casting == Casting.no | Casting.same_value
# The above table lists this as "equivalent", perhaps
# with "same_value"
v = CAST_TABLE[from_Dt][to_Dt] & ~Casting.same_value
assert Casting.equiv == v
# Note that to_res may not be the same as from_dt
assert from_res.isnative == to_res.isnative
else:
if from_Dt == to_Dt:
# Note that to_res may not be the same as from_dt
assert from_res.isnative != to_res.isnative
assert casting == CAST_TABLE[from_Dt][to_Dt]
if from_Dt is to_Dt:
assert from_dt is from_res
assert to_dt is to_res
@pytest.mark.filterwarnings("ignore::numpy.exceptions.ComplexWarning")
@pytest.mark.parametrize("from_dt", simple_dtype_instances())
def test_simple_direct_casts(self, from_dt):
"""
This test checks numeric direct casts for dtypes supported also by the
struct module (plus complex). It tries to be test a wide range of
inputs, but skips over possibly undefined behaviour (e.g. int rollover).
Longdouble and CLongdouble are tested, but only using double precision.
If this test creates issues, it should possibly just be simplified
or even removed (checking whether unaligned/non-contiguous casts give
the same results is useful, though).
"""
for to_dt in simple_dtype_instances():
to_dt = to_dt.values[0]
cast = get_castingimpl(type(from_dt), type(to_dt))
# print("from_dt", from_dt, "to_dt", to_dt)
casting, (from_res, to_res), view_off = cast._resolve_descriptors(
(from_dt, to_dt))
if from_res is not from_dt or to_res is not to_dt:
# Do not test this case, it is handled in multiple steps,
# each of which should is tested individually.
return
safe = casting <= Casting.safe
del from_res, to_res, casting
arr1, arr2, values = self.get_data(from_dt, to_dt)
# print("2", arr1, arr2, cast)
cast._simple_strided_call((arr1, arr2))
# print("3")
# Check via python list
assert arr2.tolist() == values
# Check that the same results are achieved for strided loops
arr1_o, arr2_o = self.get_data_variation(arr1, arr2, True, False)
cast._simple_strided_call((arr1_o, arr2_o))
assert_array_equal(arr2_o, arr2)
assert arr2_o.tobytes() == arr2.tobytes()
# Check if alignment makes a difference, but only if supported
# and only if the alignment can be wrong
if ((from_dt.alignment == 1 and to_dt.alignment == 1) or
not cast._supports_unaligned):
return
arr1_o, arr2_o = self.get_data_variation(arr1, arr2, False, True)
cast._simple_strided_call((arr1_o, arr2_o))
assert_array_equal(arr2_o, arr2)
assert arr2_o.tobytes() == arr2.tobytes()
arr1_o, arr2_o = self.get_data_variation(arr1, arr2, False, False)
cast._simple_strided_call((arr1_o, arr2_o))
assert_array_equal(arr2_o, arr2)
assert arr2_o.tobytes() == arr2.tobytes()
del arr1_o, arr2_o, cast
@pytest.mark.parametrize("from_Dt", simple_dtypes)
def test_numeric_to_times(self, from_Dt):
# We currently only implement contiguous loops, so only need to
# test those.
from_dt = from_Dt()
time_dtypes = [np.dtype("M8"), np.dtype("M8[ms]"), np.dtype("M8[4D]"),
np.dtype("m8"), np.dtype("m8[ms]"), np.dtype("m8[4D]")]
for time_dt in time_dtypes:
cast = get_castingimpl(type(from_dt), type(time_dt))
casting, (from_res, to_res), view_off = cast._resolve_descriptors(
(from_dt, time_dt))
assert from_res is from_dt
assert to_res is time_dt
del from_res, to_res
assert casting & CAST_TABLE[from_Dt][type(time_dt)]
assert view_off is None
int64_dt = np.dtype(np.int64)
arr1, arr2, values = self.get_data(from_dt, int64_dt)
arr2 = arr2.view(time_dt)
arr2[...] = np.datetime64("NaT")
if time_dt == np.dtype("M8"):
# This is a bit of a strange path, and could probably be removed
arr1[-1] = 0 # ensure at least one value is not NaT
# The cast currently succeeds, but the values are invalid:
cast._simple_strided_call((arr1, arr2))
with pytest.raises(ValueError):
str(arr2[-1]) # e.g. conversion to string fails
return
cast._simple_strided_call((arr1, arr2))
assert [int(v) for v in arr2.tolist()] == values
# Check that the same results are achieved for strided loops
arr1_o, arr2_o = self.get_data_variation(arr1, arr2, True, False)
cast._simple_strided_call((arr1_o, arr2_o))
assert_array_equal(arr2_o, arr2)
assert arr2_o.tobytes() == arr2.tobytes()
@pytest.mark.parametrize(
["from_dt", "to_dt", "expected_casting", "expected_view_off",
"nom", "denom"],
[("M8[ns]", None, Casting.no, 0, 1, 1),
(str(np.dtype("M8[ns]").newbyteorder()), None,
Casting.equiv, None, 1, 1),
("M8", "M8[ms]", Casting.safe, 0, 1, 1),
# should be invalid cast:
("M8[ms]", "M8", Casting.unsafe, None, 1, 1),
("M8[5ms]", "M8[5ms]", Casting.no, 0, 1, 1),
("M8[ns]", "M8[ms]", Casting.same_kind, None, 1, 10**6),
("M8[ms]", "M8[ns]", Casting.safe, None, 10**6, 1),
("M8[ms]", "M8[7ms]", Casting.same_kind, None, 1, 7),
("M8[4D]", "M8[1M]", Casting.same_kind, None, None,
# give full values based on NumPy 1.19.x
[-2**63, 0, -1, 1314, -1315, 564442610]),
("m8[ns]", None, Casting.no, 0, 1, 1),
(str(np.dtype("m8[ns]").newbyteorder()), None,
Casting.equiv, None, 1, 1),
("m8", "m8[ms]", Casting.safe, 0, 1, 1),
# should be invalid cast:
("m8[ms]", "m8", Casting.unsafe, None, 1, 1),
("m8[5ms]", "m8[5ms]", Casting.no, 0, 1, 1),
("m8[ns]", "m8[ms]", Casting.same_kind, None, 1, 10**6),
("m8[ms]", "m8[ns]", Casting.safe, None, 10**6, 1),
("m8[ms]", "m8[7ms]", Casting.same_kind, None, 1, 7),
("m8[4D]", "m8[1M]", Casting.unsafe, None, None,
# give full values based on NumPy 1.19.x
[-2**63, 0, 0, 1314, -1315, 564442610])])
def test_time_to_time(self, from_dt, to_dt,
expected_casting, expected_view_off,
nom, denom):
from_dt = np.dtype(from_dt)
if to_dt is not None:
to_dt = np.dtype(to_dt)
# Test a few values for casting (results generated with NumPy 1.19)
values = np.array([-2**63, 1, 2**63 - 1, 10000, -10000, 2**32])
values = values.astype(np.dtype("int64").newbyteorder(from_dt.byteorder))
assert values.dtype.byteorder == from_dt.byteorder
assert np.isnat(values.view(from_dt)[0])
DType = type(from_dt)
cast = get_castingimpl(DType, DType)
casting, (from_res, to_res), view_off = cast._resolve_descriptors(
(from_dt, to_dt))
assert from_res is from_dt
assert to_res is to_dt or to_dt is None
assert casting == expected_casting
assert view_off == expected_view_off
if nom is not None:
expected_out = (values * nom // denom).view(to_res)
expected_out[0] = "NaT"
else:
expected_out = np.empty_like(values)
expected_out[...] = denom
expected_out = expected_out.view(to_dt)
orig_arr = values.view(from_dt)
orig_out = np.empty_like(expected_out)
if casting == Casting.unsafe and (to_dt == "m8" or to_dt == "M8"): # noqa: PLR1714
# Casting from non-generic to generic units is an error and should
# probably be reported as an invalid cast earlier.
with pytest.raises(ValueError):
cast._simple_strided_call((orig_arr, orig_out))
return
for aligned in [True, True]:
for contig in [True, True]:
arr, out = self.get_data_variation(
orig_arr, orig_out, aligned, contig)
out[...] = 0
cast._simple_strided_call((arr, out))
assert_array_equal(out.view("int64"), expected_out.view("int64"))
def string_with_modified_length(self, dtype, change_length):
fact = 1 if dtype.char == "S" else 4
length = dtype.itemsize // fact + change_length
return np.dtype(f"{dtype.byteorder}{dtype.char}{length}")
@pytest.mark.parametrize("other_DT", simple_dtypes)
@pytest.mark.parametrize("string_char", ["S", "U"])
def test_string_cancast(self, other_DT, string_char):
fact = 1 if string_char == "S" else 4
string_DT = type(np.dtype(string_char))
cast = get_castingimpl(other_DT, string_DT)
other_dt = other_DT()
expected_length = get_expected_stringlength(other_dt)
string_dt = np.dtype(f"{string_char}{expected_length}")
safety, (res_other_dt, res_dt), view_off = cast._resolve_descriptors(
(other_dt, None))
assert res_dt.itemsize == expected_length * fact
assert safety == Casting.safe # we consider to string casts "safe"
assert view_off is None
assert isinstance(res_dt, string_DT)
# These casts currently implement changing the string length, so
# check the cast-safety for too long/fixed string lengths:
for change_length in [-1, 0, 1]:
if change_length >= 0:
expected_safety = Casting.safe
else:
expected_safety = Casting.same_kind
to_dt = self.string_with_modified_length(string_dt, change_length)
safety, (_, res_dt), view_off = cast._resolve_descriptors(
(other_dt, to_dt))
assert res_dt is to_dt
assert safety == expected_safety
assert view_off is None
# The opposite direction is always considered unsafe:
cast = get_castingimpl(string_DT, other_DT)
safety, _, view_off = cast._resolve_descriptors((string_dt, other_dt))
assert safety == Casting.unsafe
assert view_off is None
cast = get_castingimpl(string_DT, other_DT)
safety, (_, res_dt), view_off = cast._resolve_descriptors(
(string_dt, None))
assert safety == Casting.unsafe
assert view_off is None
assert other_dt is res_dt # returns the singleton for simple dtypes
@pytest.mark.parametrize("string_char", ["S", "U"])
@pytest.mark.parametrize("other_dt", simple_dtype_instances())
def test_simple_string_casts_roundtrip(self, other_dt, string_char):
"""
Tests casts from and to string by checking the roundtripping property.
The test also covers some string to string casts (but not all).
If this test creates issues, it should possibly just be simplified
or even removed (checking whether unaligned/non-contiguous casts give
the same results is useful, though).
"""
string_DT = type(np.dtype(string_char))
cast = get_castingimpl(type(other_dt), string_DT)
cast_back = get_castingimpl(string_DT, type(other_dt))
_, (res_other_dt, string_dt), _ = cast._resolve_descriptors(
(other_dt, None))
if res_other_dt is not other_dt:
# do not support non-native byteorder, skip test in that case
assert other_dt.byteorder != res_other_dt.byteorder
return
orig_arr, values = self.get_data(other_dt, None)
str_arr = np.zeros(len(orig_arr), dtype=string_dt)
string_dt_short = self.string_with_modified_length(string_dt, -1)
str_arr_short = np.zeros(len(orig_arr), dtype=string_dt_short)
string_dt_long = self.string_with_modified_length(string_dt, 1)
str_arr_long = np.zeros(len(orig_arr), dtype=string_dt_long)
assert not cast._supports_unaligned # if support is added, should test
assert not cast_back._supports_unaligned
for contig in [True, False]:
other_arr, str_arr = self.get_data_variation(
orig_arr, str_arr, True, contig)
_, str_arr_short = self.get_data_variation(
orig_arr, str_arr_short.copy(), True, contig)
_, str_arr_long = self.get_data_variation(
orig_arr, str_arr_long, True, contig)
cast._simple_strided_call((other_arr, str_arr))
cast._simple_strided_call((other_arr, str_arr_short))
assert_array_equal(str_arr.astype(string_dt_short), str_arr_short)
cast._simple_strided_call((other_arr, str_arr_long))
assert_array_equal(str_arr, str_arr_long)
if other_dt.kind == "b":
# Booleans do not roundtrip
continue
other_arr[...] = 0
cast_back._simple_strided_call((str_arr, other_arr))
assert_array_equal(orig_arr, other_arr)
other_arr[...] = 0
cast_back._simple_strided_call((str_arr_long, other_arr))
assert_array_equal(orig_arr, other_arr)
@pytest.mark.parametrize("other_dt", ["S8", "<U8", ">U8"])
@pytest.mark.parametrize("string_char", ["S", "U"])
def test_string_to_string_cancast(self, other_dt, string_char):
other_dt = np.dtype(other_dt)
fact = 1 if string_char == "S" else 4
div = 1 if other_dt.char == "S" else 4
string_DT = type(np.dtype(string_char))
cast = get_castingimpl(type(other_dt), string_DT)
expected_length = other_dt.itemsize // div
string_dt = np.dtype(f"{string_char}{expected_length}")
safety, (res_other_dt, res_dt), view_off = cast._resolve_descriptors(
(other_dt, None))
assert res_dt.itemsize == expected_length * fact
assert isinstance(res_dt, string_DT)
expected_view_off = None
if other_dt.char == string_char:
if other_dt.isnative:
expected_safety = Casting.no
expected_view_off = 0
else:
expected_safety = Casting.equiv
elif string_char == "U":
expected_safety = Casting.safe
else:
expected_safety = Casting.unsafe
assert view_off == expected_view_off
assert expected_safety == safety
for change_length in [-1, 0, 1]:
to_dt = self.string_with_modified_length(string_dt, change_length)
safety, (_, res_dt), view_off = cast._resolve_descriptors(
(other_dt, to_dt))
assert res_dt is to_dt
if change_length <= 0:
assert view_off == expected_view_off
else:
assert view_off is None
if expected_safety == Casting.unsafe:
assert safety == expected_safety
elif change_length < 0:
assert safety == Casting.same_kind
elif change_length == 0:
assert safety == expected_safety
elif change_length > 0:
assert safety == Casting.safe
@pytest.mark.parametrize("order1", [">", "<"])
@pytest.mark.parametrize("order2", [">", "<"])
def test_unicode_byteswapped_cast(self, order1, order2):
# Very specific tests (not using the castingimpl directly)
# that tests unicode bytedwaps including for unaligned array data.
dtype1 = np.dtype(f"{order1}U30")
dtype2 = np.dtype(f"{order2}U30")
data1 = np.empty(30 * 4 + 1, dtype=np.uint8)[1:].view(dtype1)
data2 = np.empty(30 * 4 + 1, dtype=np.uint8)[1:].view(dtype2)
if dtype1.alignment != 1:
# alignment should always be >1, but skip the check if not
assert not data1.flags.aligned
assert not data2.flags.aligned
element = "this is a ünicode string‽"
data1[()] = element
# Test both `data1` and `data1.copy()` (which should be aligned)
for data in [data1, data1.copy()]:
data2[...] = data1
assert data2[()] == element
assert data2.copy()[()] == element
def test_void_to_string_special_case(self):
# Cover a small special case in void to string casting that could
# probably just as well be turned into an error (compare
# `test_object_to_parametric_internal_error` below).
assert np.array([], dtype="V5").astype("S").dtype.itemsize == 5
assert np.array([], dtype="V5").astype("U").dtype.itemsize == 4 * 5
def test_object_to_parametric_internal_error(self):
# We reject casting from object to a parametric type, without
# figuring out the correct instance first.
object_dtype = type(np.dtype(object))
other_dtype = type(np.dtype(str))
cast = get_castingimpl(object_dtype, other_dtype)
with pytest.raises(TypeError,
match="casting from object to the parametric DType"):
cast._resolve_descriptors((np.dtype("O"), None))
@pytest.mark.parametrize("dtype", simple_dtype_instances())
def test_object_and_simple_resolution(self, dtype):
# Simple test to exercise the cast when no instance is specified
object_dtype = type(np.dtype(object))
cast = get_castingimpl(object_dtype, type(dtype))
safety, (_, res_dt), view_off = cast._resolve_descriptors(
(np.dtype("O"), dtype))
assert safety == Casting.unsafe
assert view_off is None
assert res_dt is dtype
safety, (_, res_dt), view_off = cast._resolve_descriptors(
(np.dtype("O"), None))
assert safety == Casting.unsafe
assert view_off is None
assert res_dt == dtype.newbyteorder("=")
@pytest.mark.parametrize("dtype", simple_dtype_instances())
def test_simple_to_object_resolution(self, dtype):
# Simple test to exercise the cast when no instance is specified
object_dtype = type(np.dtype(object))
cast = get_castingimpl(type(dtype), object_dtype)
safety, (_, res_dt), view_off = cast._resolve_descriptors(
(dtype, None))
assert safety == Casting.safe
assert view_off is None
assert res_dt is np.dtype("O")
@pytest.mark.parametrize("casting", ["no", "unsafe"])
def test_void_and_structured_with_subarray(self, casting):
# test case corresponding to gh-19325
dtype = np.dtype([("foo", "<f4", (3, 2))])
expected = casting == "unsafe"
assert np.can_cast("V4", dtype, casting=casting) == expected
assert np.can_cast(dtype, "V4", casting=casting) == expected
@pytest.mark.parametrize(["to_dt", "expected_off"],
[ # Same as `from_dt` but with both fields shifted:
(np.dtype({"names": ["a", "b"], "formats": ["i4", "f4"],
"offsets": [0, 4]}), 2),
# Additional change of the names
(np.dtype({"names": ["b", "a"], "formats": ["i4", "f4"],
"offsets": [0, 4]}), 2),
# Incompatible field offset change
(np.dtype({"names": ["b", "a"], "formats": ["i4", "f4"],
"offsets": [0, 6]}), None)])
def test_structured_field_offsets(self, to_dt, expected_off):
# This checks the cast-safety and view offset for swapped and "shifted"
# fields which are viewable
from_dt = np.dtype({"names": ["a", "b"],
"formats": ["i4", "f4"],
"offsets": [2, 6]})
cast = get_castingimpl(type(from_dt), type(to_dt))
safety, _, view_off = cast._resolve_descriptors((from_dt, to_dt))
if from_dt.names == to_dt.names:
assert safety == Casting.equiv
else:
assert safety == Casting.safe
# Shifting the original data pointer by -2 will align both by
# effectively adding 2 bytes of spacing before `from_dt`.
assert view_off == expected_off
@pytest.mark.parametrize(("from_dt", "to_dt", "expected_off"), [
# Subarray cases:
("i", "(1,1)i", 0),
("(1,1)i", "i", 0),
("(2,1)i", "(2,1)i", 0),
# field cases (field to field is tested explicitly also):
# Not considered viewable, because a negative offset would allow
# may structured dtype to indirectly access invalid memory.
("i", {"names": ["a"], "formats": ["i"], "offsets": [2]}, None),
({"names": ["a"], "formats": ["i"], "offsets": [2]}, "i", 2),
# Currently considered not viewable, due to multiple fields
# even though they overlap (maybe we should not allow that?)
("i", {"names": ["a", "b"], "formats": ["i", "i"], "offsets": [2, 2]},
None),
# different number of fields can't work, should probably just fail
# so it never reports "viewable":
("i,i", "i,i,i", None),
# Unstructured void cases:
("i4", "V3", 0), # void smaller or equal
("i4", "V4", 0), # void smaller or equal
("i4", "V10", None), # void is larger (no view)
("O", "V4", None), # currently reject objects for view here.
("O", "V8", None), # currently reject objects for view here.
("V4", "V3", 0),
("V4", "V4", 0),
("V3", "V4", None),
# Note that currently void-to-other cast goes via byte-strings
# and is not a "view" based cast like the opposite direction:
("V4", "i4", None),
# completely invalid/impossible cast:
("i,i", "i,i,i", None),
])
def test_structured_view_offsets_parametric(
self, from_dt, to_dt, expected_off):
# TODO: While this test is fairly thorough, right now, it does not
# really test some paths that may have nonzero offsets (they don't
# really exists).
from_dt = np.dtype(from_dt)
to_dt = np.dtype(to_dt)
cast = get_castingimpl(type(from_dt), type(to_dt))
_, _, view_off = cast._resolve_descriptors((from_dt, to_dt))
assert view_off == expected_off
@pytest.mark.parametrize("dtype", np.typecodes["All"])
def test_object_casts_NULL_None_equivalence(self, dtype):
# None to <other> casts may succeed or fail, but a NULL'ed array must
# behave the same as one filled with None's.
arr_normal = np.array([None] * 5)
arr_NULLs = np.empty_like(arr_normal)
ctypes.memset(arr_NULLs.ctypes.data, 0, arr_NULLs.nbytes)
# If the check fails (maybe it should) the test would lose its purpose:
assert arr_NULLs.tobytes() == b"\x00" * arr_NULLs.nbytes
try:
expected = arr_normal.astype(dtype)
except TypeError:
with pytest.raises(TypeError):
arr_NULLs.astype(dtype)
else:
assert_array_equal(expected, arr_NULLs.astype(dtype))
@pytest.mark.parametrize("dtype",
np.typecodes["AllInteger"] + np.typecodes["AllFloat"])
def test_nonstandard_bool_to_other(self, dtype):
# simple test for casting bool_ to numeric types, which should not
# expose the detail that NumPy bools can sometimes take values other
# than 0 and 1. See also gh-19514.
nonstandard_bools = np.array([0, 3, -7], dtype=np.int8).view(bool)
res = nonstandard_bools.astype(dtype)
expected = [0, 1, 1]
assert_array_equal(res, expected)
@pytest.mark.parametrize("to_dtype",
np.typecodes["AllInteger"] + np.typecodes["AllFloat"])
@pytest.mark.parametrize("from_dtype",
np.typecodes["AllInteger"] + np.typecodes["AllFloat"])
@pytest.mark.filterwarnings("ignore::numpy.exceptions.ComplexWarning")
def test_same_value_overflow(self, from_dtype, to_dtype):
if from_dtype == to_dtype:
return
top1 = 0
top2 = 0
try:
top1 = np.iinfo(from_dtype).max
except ValueError:
top1 = np.finfo(from_dtype).max
try:
top2 = np.iinfo(to_dtype).max
except ValueError:
top2 = np.finfo(to_dtype).max
# No need to test if top2 > top1, since the test will also do the
# reverse dtype matching. Catch then warning if the comparison warns,
# i.e. np.int16(65535) < np.float16(6.55e4)
with warnings.catch_warnings(record=True):
warnings.simplefilter("always", RuntimeWarning)
if top2 >= top1:
# will be tested when the dtypes are reversed
return
# Happy path
arr1 = np.array([0] * 10, dtype=from_dtype)
arr2 = np.array([0] * 10, dtype=to_dtype)
arr1_astype = arr1.astype(to_dtype, casting='same_value')
assert_equal(arr1_astype, arr2, strict=True)
# Make it overflow, both aligned and unaligned
arr1[0] = top1
aligned = np.empty(arr1.itemsize * arr1.size + 1, 'uint8')
unaligned = aligned[1:].view(arr1.dtype)
unaligned[:] = arr1
with pytest.raises(ValueError):
# Casting float to float with overflow should raise
# RuntimeWarning (fperror)
# Casting float to int with overflow sometimes raises
# RuntimeWarning (fperror)
# Casting with overflow and 'same_value', should raise ValueError
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always", RuntimeWarning)
arr1.astype(to_dtype, casting='same_value')
assert len(w) < 2
with pytest.raises(ValueError):
# again, unaligned
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always", RuntimeWarning)
unaligned.astype(to_dtype, casting='same_value')
assert len(w) < 2
@pytest.mark.parametrize("to_dtype",
np.typecodes["AllInteger"])
@pytest.mark.parametrize("from_dtype",
np.typecodes["AllFloat"])
@pytest.mark.filterwarnings("ignore::RuntimeWarning")
def test_same_value_float_to_int(self, from_dtype, to_dtype):
# Should not raise, since the values can round trip
arr1 = np.arange(10, dtype=from_dtype)
aligned = np.empty(arr1.itemsize * arr1.size + 1, 'uint8')
unaligned = aligned[1:].view(arr1.dtype)
unaligned[:] = arr1
arr2 = np.arange(10, dtype=to_dtype)
assert_array_equal(arr1.astype(to_dtype, casting='same_value'), arr2)
assert_array_equal(unaligned.astype(to_dtype, casting='same_value'), arr2)
# Should raise, since values cannot round trip. Might warn too about
# FPE errors
arr1_66 = arr1 + 0.666
unaligned_66 = unaligned + 0.66
with pytest.raises(ValueError):
arr1_66.astype(to_dtype, casting='same_value')
with pytest.raises(ValueError):
unaligned_66.astype(to_dtype, casting='same_value')
@pytest.mark.parametrize("to_dtype",
np.typecodes["AllInteger"])
@pytest.mark.parametrize("from_dtype",
np.typecodes["AllFloat"])
@pytest.mark.filterwarnings("ignore::RuntimeWarning")
def test_same_value_float_to_int_scalar(self, from_dtype, to_dtype):
# Should not raise, since the values can round trip
s1 = np.array(10, dtype=from_dtype)
assert s1.astype(to_dtype, casting='same_value') == 10
# Should raise, since values cannot round trip
s1_66 = s1 + 0.666
with pytest.raises(ValueError):
s1_66.astype(to_dtype, casting='same_value')
@pytest.mark.parametrize("value", [np.nan, np.inf, -np.inf])
@pytest.mark.filterwarnings("ignore::numpy.exceptions.ComplexWarning")
@pytest.mark.filterwarnings("ignore::RuntimeWarning")
def test_same_value_naninf(self, value):
# These work, but may trigger FPE warnings on macOS
np.array([value], dtype=np.half).astype(np.cdouble, casting='same_value')
np.array([value], dtype=np.half).astype(np.double, casting='same_value')
np.array([value], dtype=np.float32).astype(np.cdouble, casting='same_value')
np.array([value], dtype=np.float32).astype(np.double, casting='same_value')
np.array([value], dtype=np.float32).astype(np.half, casting='same_value')
np.array([value], dtype=np.complex64).astype(np.half, casting='same_value')
# These fail
with pytest.raises(ValueError):
np.array([value], dtype=np.half).astype(np.int64, casting='same_value')
with pytest.raises(ValueError):
np.array([value], dtype=np.complex64).astype(np.int64, casting='same_value')
with pytest.raises(ValueError):
np.array([value], dtype=np.float32).astype(np.int64, casting='same_value')
@pytest.mark.filterwarnings("ignore::numpy.exceptions.ComplexWarning")
def test_same_value_complex(self):
arr = np.array([complex(1, 1)], dtype=np.cdouble)
# This works
arr.astype(np.complex64, casting='same_value')
# Casting with a non-zero imag part fails
with pytest.raises(ValueError):
arr.astype(np.float32, casting='same_value')
def test_same_value_scalar(self):
i = np.array(123, dtype=np.int64)
f = np.array(123, dtype=np.float64)
assert i.astype(np.float64, casting='same_value') == f
assert f.astype(np.int64, casting='same_value') == f
| TestCasting |
python | doocs__leetcode | solution/2700-2799/2786.Visit Array Positions to Maximize Score/Solution.py | {
"start": 0,
"end": 236
} | class ____:
def maxScore(self, nums: List[int], x: int) -> int:
f = [-inf] * 2
f[nums[0] & 1] = nums[0]
for v in nums[1:]:
f[v & 1] = max(f[v & 1], f[v & 1 ^ 1] - x) + v
return max(f)
| Solution |
python | walkccc__LeetCode | solutions/200. Number of Islands/200.py | {
"start": 0,
"end": 720
} | class ____:
def numIslands(self, grid: list[list[str]]) -> int:
DIRS = ((0, 1), (1, 0), (0, -1), (-1, 0))
m = len(grid)
n = len(grid[0])
def bfs(r, c):
q = collections.deque([(r, c)])
grid[r][c] = '2' # Mark '2' as visited.
while q:
i, j = q.popleft()
for dx, dy in DIRS:
x = i + dx
y = j + dy
if x < 0 or x == m or y < 0 or y == n:
continue
if grid[x][y] != '1':
continue
q.append((x, y))
grid[x][y] = '2' # Mark '2' as visited.
ans = 0
for i in range(m):
for j in range(n):
if grid[i][j] == '1':
bfs(i, j)
ans += 1
return ans
| Solution |
python | pandas-dev__pandas | pandas/io/pytables.py | {
"start": 68951,
"end": 79633
} | class ____:
"""
an index column description class
Parameters
----------
axis : axis which I reference
values : the ndarray like converted values
kind : a string description of this type
typ : the pytables type
pos : the position in the pytables
"""
is_an_indexable: bool = True
is_data_indexable: bool = True
_info_fields = ["freq", "tz", "index_name"]
def __init__(
self,
name: str,
values=None,
kind=None,
typ=None,
cname: str | None = None,
axis=None,
pos=None,
freq=None,
tz=None,
index_name=None,
ordered=None,
table=None,
meta=None,
metadata=None,
) -> None:
if not isinstance(name, str):
raise ValueError("`name` must be a str.")
self.values = values
self.kind = kind
self.typ = typ
self.name = name
self.cname = cname or name
self.axis = axis
self.pos = pos
self.freq = freq
self.tz = tz
self.index_name = index_name
self.ordered = ordered
self.table = table
self.meta = meta
self.metadata = metadata
if pos is not None:
self.set_pos(pos)
# These are ensured as long as the passed arguments match the
# constructor annotations.
assert isinstance(self.name, str)
assert isinstance(self.cname, str)
@property
def itemsize(self) -> int:
# Assumes self.typ has already been initialized
return self.typ.itemsize
@property
def kind_attr(self) -> str:
return f"{self.name}_kind"
def set_pos(self, pos: int) -> None:
"""set the position of this column in the Table"""
self.pos = pos
if pos is not None and self.typ is not None:
self.typ._v_pos = pos
def __repr__(self) -> str:
temp = tuple(
map(pprint_thing, (self.name, self.cname, self.axis, self.pos, self.kind))
)
return ",".join(
[
f"{key}->{value}"
for key, value in zip(
["name", "cname", "axis", "pos", "kind"], temp, strict=True
)
]
)
def __eq__(self, other: object) -> bool:
"""compare 2 col items"""
return all(
getattr(self, a, None) == getattr(other, a, None)
for a in ["name", "cname", "axis", "pos"]
)
def __ne__(self, other) -> bool:
return not self.__eq__(other)
@property
def is_indexed(self) -> bool:
"""return whether I am an indexed column"""
if not hasattr(self.table, "cols"):
# e.g. if infer hasn't been called yet, self.table will be None.
return False
return getattr(self.table.cols, self.cname).is_indexed
def convert(
self, values: np.ndarray, nan_rep, encoding: str, errors: str
) -> tuple[np.ndarray, np.ndarray] | tuple[Index, Index]:
"""
Convert the data from this selection to the appropriate pandas type.
"""
assert isinstance(values, np.ndarray), type(values)
# values is a recarray
if values.dtype.fields is not None:
# Copy, otherwise values will be a view
# preventing the original recarry from being free'ed
values = values[self.cname].copy()
val_kind = self.kind
values = _maybe_convert(values, val_kind, encoding, errors)
kwargs = {}
kwargs["name"] = self.index_name
if self.freq is not None:
kwargs["freq"] = self.freq
factory: type[Index | DatetimeIndex] = Index
if lib.is_np_dtype(values.dtype, "M") or isinstance(
values.dtype, DatetimeTZDtype
):
factory = DatetimeIndex
elif values.dtype == "i8" and "freq" in kwargs:
# PeriodIndex data is stored as i8
# error: Incompatible types in assignment (expression has type
# "Callable[[Any, KwArg(Any)], PeriodIndex]", variable has type
# "Union[Type[Index], Type[DatetimeIndex]]")
factory = lambda x, **kwds: PeriodIndex.from_ordinals( # type: ignore[assignment]
x, freq=kwds.get("freq", None)
)._rename(kwds["name"])
# making an Index instance could throw a number of different errors
try:
new_pd_index = factory(values, **kwargs)
except UnicodeEncodeError as err:
if (
errors == "surrogatepass"
and using_string_dtype()
and str(err).endswith("surrogates not allowed")
and HAS_PYARROW
):
new_pd_index = factory(
values,
dtype=StringDtype(storage="python", na_value=np.nan),
**kwargs,
)
else:
raise
except ValueError:
# if the output freq is different that what we recorded,
# it should be None (see also 'doc example part 2')
if "freq" in kwargs:
kwargs["freq"] = None
new_pd_index = factory(values, **kwargs)
final_pd_index: Index
if self.tz is not None and isinstance(new_pd_index, DatetimeIndex):
final_pd_index = new_pd_index.tz_localize("UTC").tz_convert(self.tz)
else:
final_pd_index = new_pd_index
return final_pd_index, final_pd_index
def take_data(self):
"""return the values"""
return self.values
@property
def attrs(self):
return self.table._v_attrs
@property
def description(self):
return self.table.description
@property
def col(self):
"""return my current col description"""
return getattr(self.description, self.cname, None)
@property
def cvalues(self):
"""return my cython values"""
return self.values
def __iter__(self) -> Iterator:
return iter(self.values)
def maybe_set_size(self, min_itemsize=None) -> None:
"""
maybe set a string col itemsize:
min_itemsize can be an integer or a dict with this columns name
with an integer size
"""
if self.kind == "string":
if isinstance(min_itemsize, dict):
min_itemsize = min_itemsize.get(self.name)
if min_itemsize is not None and self.typ.itemsize < min_itemsize:
self.typ = _tables().StringCol(itemsize=min_itemsize, pos=self.pos)
def validate_names(self) -> None:
pass
def validate_and_set(self, handler: AppendableTable, append: bool) -> None:
self.table = handler.table
self.validate_col()
self.validate_attr(append)
self.validate_metadata(handler)
self.write_metadata(handler)
self.set_attr()
def validate_col(self, itemsize=None):
"""validate this column: return the compared against itemsize"""
# validate this column for string truncation (or reset to the max size)
if self.kind == "string":
c = self.col
if c is not None:
if itemsize is None:
itemsize = self.itemsize
if c.itemsize < itemsize:
raise ValueError(
f"Trying to store a string with len [{itemsize}] in "
f"[{self.cname}] column but\nthis column has a limit of "
f"[{c.itemsize}]!\nConsider using min_itemsize to "
"preset the sizes on these columns"
)
return c.itemsize
return None
def validate_attr(self, append: bool) -> None:
# check for backwards incompatibility
if append:
existing_kind = getattr(self.attrs, self.kind_attr, None)
if existing_kind is not None and existing_kind != self.kind:
raise TypeError(
f"incompatible kind in col [{existing_kind} - {self.kind}]"
)
def update_info(self, info) -> None:
"""
set/update the info for this indexable with the key/value
if there is a conflict raise/warn as needed
"""
for key in self._info_fields:
value = getattr(self, key, None)
idx = info.setdefault(self.name, {})
existing_value = idx.get(key)
if key in idx and value is not None and existing_value != value:
# frequency/name just warn
if key in ["freq", "index_name"]:
ws = attribute_conflict_doc % (key, existing_value, value)
warnings.warn(
ws, AttributeConflictWarning, stacklevel=find_stack_level()
)
# reset
idx[key] = None
setattr(self, key, None)
else:
raise ValueError(
f"invalid info for [{self.name}] for [{key}], "
f"existing_value [{existing_value}] conflicts with "
f"new value [{value}]"
)
elif value is not None or existing_value is not None:
idx[key] = value
def set_info(self, info) -> None:
"""set my state from the passed info"""
idx = info.get(self.name)
if idx is not None:
self.__dict__.update(idx)
def set_attr(self) -> None:
"""set the kind for this column"""
setattr(self.attrs, self.kind_attr, self.kind)
def validate_metadata(self, handler: AppendableTable) -> None:
"""validate that kind=category does not change the categories"""
if self.meta == "category":
new_metadata = self.metadata
cur_metadata = handler.read_metadata(self.cname)
if (
new_metadata is not None
and cur_metadata is not None
and not array_equivalent(
new_metadata, cur_metadata, strict_nan=True, dtype_equal=True
)
):
raise ValueError(
"cannot append a categorical with "
"different categories to the existing"
)
def write_metadata(self, handler: AppendableTable) -> None:
"""set the meta data"""
if self.metadata is not None:
handler.write_metadata(self.cname, self.metadata)
| IndexCol |
python | sympy__sympy | sympy/physics/continuum_mechanics/arch.py | {
"start": 574,
"end": 39243
} | class ____:
"""
This class is used to solve problems related to a three hinged arch(determinate) structure.\n
An arch is a curved vertical structure spanning an open space underneath it.\n
Arches can be used to reduce the bending moments in long-span structures.\n
Arches are used in structural engineering(over windows, door and even bridges)\n
because they can support a very large mass placed on top of them.
Example
========
>>> from sympy.physics.continuum_mechanics.arch import Arch
>>> a = Arch((0,0),(10,0),crown_x=5,crown_y=5)
>>> a.get_shape_eqn
5 - (x - 5)**2/5
>>> from sympy.physics.continuum_mechanics.arch import Arch
>>> a = Arch((0,0),(10,1),crown_x=6)
>>> a.get_shape_eqn
9/5 - (x - 6)**2/20
"""
def __init__(self,left_support,right_support,**kwargs):
self._shape_eqn = None
self._left_support = (sympify(left_support[0]),sympify(left_support[1]))
self._right_support = (sympify(right_support[0]),sympify(right_support[1]))
self._crown_x = None
self._crown_y = None
if 'crown_x' in kwargs:
self._crown_x = sympify(kwargs['crown_x'])
if 'crown_y' in kwargs:
self._crown_y = sympify(kwargs['crown_y'])
self._shape_eqn = self.get_shape_eqn
self._conc_loads = {}
self._distributed_loads = {}
self._loads = {'concentrated': self._conc_loads, 'distributed':self._distributed_loads}
self._loads_applied = {}
self._supports = {'left':'hinge', 'right':'hinge'}
self._member = None
self._member_force = None
self._reaction_force = {Symbol('R_A_x'):0, Symbol('R_A_y'):0, Symbol('R_B_x'):0, Symbol('R_B_y'):0}
self._points_disc_x = set()
self._points_disc_y = set()
self._moment_x = {}
self._moment_y = {}
self._load_x = {}
self._load_y = {}
self._moment_x_func = Piecewise((0,True))
self._moment_y_func = Piecewise((0,True))
self._load_x_func = Piecewise((0,True))
self._load_y_func = Piecewise((0,True))
self._bending_moment = None
self._shear_force = None
self._axial_force = None
# self._crown = (sympify(crown[0]),sympify(crown[1]))
@property
def get_shape_eqn(self):
"returns the equation of the shape of arch developed"
if self._shape_eqn:
return self._shape_eqn
x,y,c = symbols('x y c')
a = Symbol('a',positive=False)
if self._crown_x and self._crown_y:
x0 = self._crown_x
y0 = self._crown_y
parabola_eqn = a*(x-x0)**2 + y0 - y
eq1 = parabola_eqn.subs({x:self._left_support[0], y:self._left_support[1]})
solution = solve((eq1),(a))
parabola_eqn = solution[0]*(x-x0)**2 + y0
if(parabola_eqn.subs({x:self._right_support[0]}) != self._right_support[1]):
raise ValueError("provided coordinates of crown and supports are not consistent with parabolic arch")
elif self._crown_x:
x0 = self._crown_x
parabola_eqn = a*(x-x0)**2 + c - y
eq1 = parabola_eqn.subs({x:self._left_support[0], y:self._left_support[1]})
eq2 = parabola_eqn.subs({x:self._right_support[0], y:self._right_support[1]})
solution = solve((eq1,eq2),(a,c))
if len(solution) <2 or solution[a] == 0:
raise ValueError("parabolic arch cannot be constructed with the provided coordinates, try providing crown_y")
parabola_eqn = solution[a]*(x-x0)**2+ solution[c]
self._crown_y = solution[c]
else:
raise KeyError("please provide crown_x to construct arch")
return parabola_eqn
@property
def get_loads(self):
"""
return the position of the applied load and angle (for concentrated loads)
"""
return self._loads
@property
def supports(self):
"""
Returns the type of support
"""
return self._supports
@property
def left_support(self):
"""
Returns the position of the left support.
"""
return self._left_support
@property
def right_support(self):
"""
Returns the position of the right support.
"""
return self._right_support
@property
def reaction_force(self):
"""
return the reaction forces generated
"""
return self._reaction_force
def apply_load(self,order,label,start,mag,end=None,angle=None):
"""
This method adds load to the Arch.
Parameters
==========
order : Integer
Order of the applied load.
- For point/concentrated loads, order = -1
- For distributed load, order = 0
label : String or Symbol
The label of the load
- should not use 'A' or 'B' as it is used for supports.
start : Float
- For concentrated/point loads, start is the x coordinate
- For distributed loads, start is the starting position of distributed load
mag : Sympifyable
Magnitude of the applied load. Must be positive
end : Float
Required for distributed loads
- For concentrated/point load , end is None(may not be given)
- For distributed loads, end is the end position of distributed load
angle: Sympifyable
The angle in degrees, the load vector makes with the horizontal
in the counter-clockwise direction.
Examples
========
For applying distributed load
>>> from sympy.physics.continuum_mechanics.arch import Arch
>>> a = Arch((0,0),(10,0),crown_x=5,crown_y=5)
>>> a.apply_load(0,'C',start=3,end=5,mag=-10)
For applying point/concentrated_loads
>>> from sympy.physics.continuum_mechanics.arch import Arch
>>> a = Arch((0,0),(10,0),crown_x=5,crown_y=5)
>>> a.apply_load(-1,'C',start=2,mag=15,angle=45)
"""
y = Symbol('y')
x = Symbol('x')
x0 = Symbol('x0')
# y0 = Symbol('y0')
order= sympify(order)
mag = sympify(mag)
angle = sympify(angle)
if label in self._loads_applied:
raise ValueError("load with the given label already exists")
if label in ['A','B']:
raise ValueError("cannot use the given label, reserved for supports")
if order == 0:
if end is None or end<start:
raise KeyError("provide end greater than start")
self._distributed_loads[label] = {'start':start, 'end':end, 'f_y': mag}
self._points_disc_y.add(start)
if start in self._moment_y:
self._moment_y[start] -= mag*(Min(x,end)-start)*(x0-(start+(Min(x,end)))/2)
self._load_y[start] += mag*(Min(end,x)-start)
else:
self._moment_y[start] = -mag*(Min(x,end)-start)*(x0-(start+(Min(x,end)))/2)
self._load_y[start] = mag*(Min(end,x)-start)
self._loads_applied[label] = 'distributed'
if order == -1:
if angle is None:
raise TypeError("please provide direction of force")
height = self._shape_eqn.subs({'x':start})
self._conc_loads[label] = {'x':start, 'y':height, 'f_x':mag*cos(rad(angle)), 'f_y': mag*sin(rad(angle)), 'mag':mag, 'angle':angle}
self._points_disc_x.add(start)
self._points_disc_y.add(start)
if start in self._moment_x:
self._moment_x[start] += self._conc_loads[label]['f_x']*(y-self._conc_loads[label]['y'])
self._load_x[start] += self._conc_loads[label]['f_x']
else:
self._moment_x[start] = self._conc_loads[label]['f_x']*(y-self._conc_loads[label]['y'])
self._load_x[start] = self._conc_loads[label]['f_x']
if start in self._moment_y:
self._moment_y[start] -= self._conc_loads[label]['f_y']*(x0-start)
self._load_y[start] += self._conc_loads[label]['f_y']
else:
self._moment_y[start] = -self._conc_loads[label]['f_y']*(x0-start)
self._load_y[start] = self._conc_loads[label]['f_y']
self._loads_applied[label] = 'concentrated'
def remove_load(self,label):
"""
This methods removes the load applied to the arch
Parameters
==========
label : String or Symbol
The label of the applied load
Examples
========
>>> from sympy.physics.continuum_mechanics.arch import Arch
>>> a = Arch((0,0),(10,0),crown_x=5,crown_y=5)
>>> a.apply_load(0,'C',start=3,end=5,mag=-10)
>>> a.remove_load('C')
removed load C: {'start': 3, 'end': 5, 'f_y': -10}
"""
y = Symbol('y')
x = Symbol('x')
x0 = Symbol('x0')
if label in self._distributed_loads :
self._loads_applied.pop(label)
start = self._distributed_loads[label]['start']
end = self._distributed_loads[label]['end']
mag = self._distributed_loads[label]['f_y']
self._points_disc_y.remove(start)
self._load_y[start] -= mag*(Min(x,end)-start)
self._moment_y[start] += mag*(Min(x,end)-start)*(x0-(start+(Min(x,end)))/2)
val = self._distributed_loads.pop(label)
print(f"removed load {label}: {val}")
elif label in self._conc_loads :
self._loads_applied.pop(label)
start = self._conc_loads[label]['x']
self._points_disc_x.remove(start)
self._points_disc_y.remove(start)
self._moment_y[start] += self._conc_loads[label]['f_y']*(x0-start)
self._moment_x[start] -= self._conc_loads[label]['f_x']*(y-self._conc_loads[label]['y'])
self._load_x[start] -= self._conc_loads[label]['f_x']
self._load_y[start] -= self._conc_loads[label]['f_y']
val = self._conc_loads.pop(label)
print(f"removed load {label}: {val}")
else :
raise ValueError("label not found")
def change_support_position(self, left_support=None, right_support=None):
"""
Change position of supports.
If not provided , defaults to the old value.
Parameters
==========
left_support: tuple (x, y)
x: float
x-coordinate value of the left_support
y: float
y-coordinate value of the left_support
right_support: tuple (x, y)
x: float
x-coordinate value of the right_support
y: float
y-coordinate value of the right_support
"""
if left_support is not None:
self._left_support = (left_support[0],left_support[1])
if right_support is not None:
self._right_support = (right_support[0],right_support[1])
self._shape_eqn = None
self._shape_eqn = self.get_shape_eqn
def change_crown_position(self,crown_x=None,crown_y=None):
"""
Change the position of the crown/hinge of the arch
Parameters
==========
crown_x: Float
The x coordinate of the position of the hinge
- if not provided, defaults to old value
crown_y: Float
The y coordinate of the position of the hinge
- if not provided defaults to None
"""
self._crown_x = crown_x
self._crown_y = crown_y
self._shape_eqn = None
self._shape_eqn = self.get_shape_eqn
def change_support_type(self,left_support=None,right_support=None):
"""
Add the type for support at each end.
Can use roller or hinge support at each end.
Parameters
==========
left_support, right_support : string
Type of support at respective end
- For roller support , left_support/right_support = "roller"
- For hinged support, left_support/right_support = "hinge"
- defaults to hinge if value not provided
Examples
========
For applying roller support at right end
>>> from sympy.physics.continuum_mechanics.arch import Arch
>>> a = Arch((0,0),(10,0),crown_x=5,crown_y=5)
>>> a.change_support_type(right_support="roller")
"""
support_types = ['roller','hinge']
if left_support:
if left_support not in support_types:
raise ValueError("supports must only be roller or hinge")
self._supports['left'] = left_support
if right_support:
if right_support not in support_types:
raise ValueError("supports must only be roller or hinge")
self._supports['right'] = right_support
def add_member(self,y):
"""
This method adds a member/rod at a particular height y.
A rod is used for stability of the structure in case of a roller support.
"""
if y>self._crown_y or y<min(self._left_support[1], self._right_support[1]):
raise ValueError(f"position of support must be between y={min(self._left_support[1], self._right_support[1])} and y={self._crown_y}")
x = Symbol('x')
a = diff(self._shape_eqn,x).subs(x,self._crown_x+1)/2
x_diff = sqrt((y - self._crown_y)/a)
x1 = self._crown_x + x_diff
x2 = self._crown_x - x_diff
self._member = (x1,x2,y)
def shear_force_at(self, pos = None, **kwargs):
"""
return the shear at some x-coordinates
if no x value provided, returns the formula
"""
if pos is None:
return self._shear_force
else:
x = Symbol('x')
if 'dir' in kwargs:
dir = kwargs['dir']
return limit(self._shear_force,x,pos,dir=dir)
return self._shear_force.subs(x,pos)
def bending_moment_at(self, pos = None, **kwargs):
"""
return the bending moment at some x-coordinates
if no x value provided, returns the formula
"""
if pos is None:
return self._bending_moment
else:
x0 = Symbol('x0')
if 'dir' in kwargs:
dir = kwargs['dir']
return limit(self._bending_moment,x0,pos,dir=dir)
return self._bending_moment.subs(x0,pos)
def axial_force_at(self,pos = None, **kwargs):
"""
return the axial/normal force generated at some x-coordinate
if no x value provided, returns the formula
"""
if pos is None:
return self._axial_force
else:
x = Symbol('x')
if 'dir' in kwargs:
dir = kwargs['dir']
return limit(self._axial_force,x,pos,dir=dir)
return self._axial_force.subs(x,pos)
def solve(self):
"""
This method solves for the reaction forces generated at the supports,\n
and bending moment and generated in the arch and tension produced in the member if used.
Examples
========
>>> from sympy.physics.continuum_mechanics.arch import Arch
>>> a = Arch((0,0),(10,0),crown_x=5,crown_y=5)
>>> a.apply_load(0,'C',start=3,end=5,mag=-10)
>>> a.solve()
>>> a.reaction_force
{R_A_x: 8, R_A_y: 12, R_B_x: -8, R_B_y: 8}
>>> from sympy import Symbol
>>> t = Symbol('t')
>>> from sympy.physics.continuum_mechanics.arch import Arch
>>> a = Arch((0,0),(16,0),crown_x=8,crown_y=5)
>>> a.apply_load(0,'C',start=3,end=5,mag=t)
>>> a.solve()
>>> a.reaction_force
{R_A_x: -4*t/5, R_A_y: -3*t/2, R_B_x: 4*t/5, R_B_y: -t/2}
>>> a.bending_moment_at(4)
-5*t/2
"""
y = Symbol('y')
x = Symbol('x')
x0 = Symbol('x0')
discontinuity_points_x = sorted(self._points_disc_x)
discontinuity_points_y = sorted(self._points_disc_y)
self._moment_x_func = Piecewise((0,True))
self._moment_y_func = Piecewise((0,True))
self._load_x_func = Piecewise((0,True))
self._load_y_func = Piecewise((0,True))
accumulated_x_moment = 0
accumulated_y_moment = 0
accumulated_x_load = 0
accumulated_y_load = 0
for point in discontinuity_points_x:
cond = (x >= point)
accumulated_x_load += self._load_x[point]
accumulated_x_moment += self._moment_x[point]
self._load_x_func = Piecewise((accumulated_x_load,cond),(self._load_x_func,True))
self._moment_x_func = Piecewise((accumulated_x_moment,cond),(self._moment_x_func,True))
for point in discontinuity_points_y:
cond = (x >= point)
accumulated_y_moment += self._moment_y[point]
accumulated_y_load += self._load_y[point]
self._load_y_func = Piecewise((accumulated_y_load,cond),(self._load_y_func,True))
self._moment_y_func = Piecewise((accumulated_y_moment,cond),(self._moment_y_func,True))
moment_A = self._moment_y_func.subs(x,self._right_support[0]).subs(x0,self._left_support[0]) +\
self._moment_x_func.subs(x,self._right_support[0]).subs(y,self._left_support[1])
moment_hinge_left = self._moment_y_func.subs(x,self._crown_x).subs(x0,self._crown_x) +\
self._moment_x_func.subs(x,self._crown_x).subs(y,self._crown_y)
moment_hinge_right = self._moment_y_func.subs(x,self._right_support[0]).subs(x0,self._crown_x)- \
self._moment_y_func.subs(x,self._crown_x).subs(x0,self._crown_x) +\
self._moment_x_func.subs(x,self._right_support[0]).subs(y,self._crown_y) -\
self._moment_x_func.subs(x,self._crown_x).subs(y,self._crown_y)
net_x = self._load_x_func.subs(x,self._right_support[0])
net_y = self._load_y_func.subs(x,self._right_support[0])
if (self._supports['left']=='roller' or self._supports['right']=='roller') and not self._member:
print("member must be added if any of the supports is roller")
return
R_A_x, R_A_y, R_B_x, R_B_y, T = symbols('R_A_x R_A_y R_B_x R_B_y T')
if self._supports['left'] == 'roller' and self._supports['right'] == 'roller':
if self._member[2]>=max(self._left_support[1],self._right_support[1]):
if net_x!=0:
raise ValueError("net force in x direction not possible under the specified conditions")
else:
eq1 = Eq(R_A_x ,0)
eq2 = Eq(R_B_x, 0)
eq3 = Eq(R_A_y + R_B_y + net_y,0)
eq4 = Eq(R_B_y*(self._right_support[0]-self._left_support[0])-\
R_B_x*(self._right_support[1]-self._left_support[1])+moment_A,0)
eq5 = Eq(moment_hinge_right + R_B_y*(self._right_support[0]-self._crown_x) +\
T*(self._member[2]-self._crown_y),0)
solution = solve((eq1,eq2,eq3,eq4,eq5),(R_A_x,R_A_y,R_B_x,R_B_y,T))
elif self._member[2]>=self._left_support[1]:
eq1 = Eq(R_A_x ,0)
eq2 = Eq(R_B_x, 0)
eq3 = Eq(R_A_y + R_B_y + net_y,0)
eq4 = Eq(R_B_y*(self._right_support[0]-self._left_support[0])-\
T*(self._member[2]-self._left_support[1])+moment_A,0)
eq5 = Eq(T+net_x,0)
solution = solve((eq1,eq2,eq3,eq4,eq5),(R_A_x,R_A_y,R_B_x,R_B_y,T))
elif self._member[2]>=self._right_support[1]:
eq1 = Eq(R_A_x ,0)
eq2 = Eq(R_B_x, 0)
eq3 = Eq(R_A_y + R_B_y + net_y,0)
eq4 = Eq(R_B_y*(self._right_support[0]-self._left_support[0])+\
T*(self._member[2]-self._left_support[1])+moment_A,0)
eq5 = Eq(T-net_x,0)
solution = solve((eq1,eq2,eq3,eq4,eq5),(R_A_x,R_A_y,R_B_x,R_B_y,T))
elif self._supports['left'] == 'roller':
if self._member[2]>=max(self._left_support[1], self._right_support[1]):
eq1 = Eq(R_A_x ,0)
eq2 = Eq(R_B_x+net_x,0)
eq3 = Eq(R_A_y + R_B_y + net_y,0)
eq4 = Eq(R_B_y*(self._right_support[0]-self._left_support[0])-\
R_B_x*(self._right_support[1]-self._left_support[1])+moment_A,0)
eq5 = Eq(moment_hinge_left + R_A_y*(self._left_support[0]-self._crown_x) -\
T*(self._member[2]-self._crown_y),0)
solution = solve((eq1,eq2,eq3,eq4,eq5),(R_A_x,R_A_y,R_B_x,R_B_y,T))
elif self._member[2]>=self._left_support[1]:
eq1 = Eq(R_A_x ,0)
eq2 = Eq(R_B_x+ T +net_x,0)
eq3 = Eq(R_A_y + R_B_y + net_y,0)
eq4 = Eq(R_B_y*(self._right_support[0]-self._left_support[0])-\
R_B_x*(self._right_support[1]-self._left_support[1])-\
T*(self._member[2]-self._left_support[0])+moment_A,0)
eq5 = Eq(moment_hinge_left + R_A_y*(self._left_support[0]-self._crown_x)-\
T*(self._member[2]-self._crown_y),0)
solution = solve((eq1,eq2,eq3,eq4,eq5),(R_A_x,R_A_y,R_B_x,R_B_y,T))
elif self._member[2]>=self._right_support[0]:
eq1 = Eq(R_A_x,0)
eq2 = Eq(R_B_x- T +net_x,0)
eq3 = Eq(R_A_y + R_B_y + net_y,0)
eq4 = Eq(moment_hinge_left+R_A_y*(self._left_support[0]-self._crown_x),0)
eq5 = Eq(moment_A+R_B_y*(self._right_support[0]-self._left_support[0])-\
R_B_x*(self._right_support[1]-self._left_support[1])+\
T*(self._member[2]-self._left_support[1]),0)
solution = solve((eq1,eq2,eq3,eq4,eq5),(R_A_x,R_A_y,R_B_x,R_B_y,T))
elif self._supports['right'] == 'roller':
if self._member[2]>=max(self._left_support[1], self._right_support[1]):
eq1 = Eq(R_B_x,0)
eq2 = Eq(R_A_x+net_x,0)
eq3 = Eq(R_A_y+R_B_y+net_y,0)
eq4 = Eq(moment_hinge_right+R_B_y*(self._right_support[0]-self._crown_x)+\
T*(self._member[2]-self._crown_y),0)
eq5 = Eq(moment_A+R_B_y*(self._right_support[0]-self._left_support[0]),0)
solution = solve((eq1,eq2,eq3,eq4,eq5),(R_A_x,R_A_y,R_B_x,R_B_y,T))
elif self._member[2]>=self._left_support[1]:
eq1 = Eq(R_B_x,0)
eq2 = Eq(R_A_x+T+net_x,0)
eq3 = Eq(R_A_y+R_B_y+net_y,0)
eq4 = Eq(moment_hinge_right+R_B_y*(self._right_support[0]-self._crown_x),0)
eq5 = Eq(moment_A-T*(self._member[2]-self._left_support[1])+\
R_B_y*(self._right_support[0]-self._left_support[0]),0)
solution = solve((eq1,eq2,eq3,eq4,eq5),(R_A_x,R_A_y,R_B_x,R_B_y,T))
elif self._member[2]>=self._right_support[1]:
eq1 = Eq(R_B_x,0)
eq2 = Eq(R_A_x-T+net_x,0)
eq3 = Eq(R_A_y+R_B_y+net_y,0)
eq4 = Eq(moment_hinge_right+R_B_y*(self._right_support[0]-self._crown_x)+\
T*(self._member[2]-self._crown_y),0)
eq5 = Eq(moment_A+T*(self._member[2]-self._left_support[1])+\
R_B_y*(self._right_support[0]-self._left_support[0]))
solution = solve((eq1,eq2,eq3,eq4,eq5),(R_A_x,R_A_y,R_B_x,R_B_y,T))
else:
eq1 = Eq(R_A_x + R_B_x + net_x,0)
eq2 = Eq(R_A_y + R_B_y + net_y,0)
eq3 = Eq(R_B_y*(self._right_support[0]-self._left_support[0])-\
R_B_x*(self._right_support[1]-self._left_support[1])+moment_A,0)
eq4 = Eq(moment_hinge_right + R_B_y*(self._right_support[0]-self._crown_x) -\
R_B_x*(self._right_support[1]-self._crown_y),0)
solution = solve((eq1,eq2,eq3,eq4),(R_A_x,R_A_y,R_B_x,R_B_y))
for symb in self._reaction_force:
self._reaction_force[symb] = solution[symb]
self._bending_moment = - (self._moment_x_func.subs(x,x0) + self._moment_y_func.subs(x,x0) -\
solution[R_A_y]*(x0-self._left_support[0]) +\
solution[R_A_x]*(self._shape_eqn.subs({x:x0})-self._left_support[1]))
angle = atan(diff(self._shape_eqn,x))
fx = (self._load_x_func+solution[R_A_x])
fy = (self._load_y_func+solution[R_A_y])
axial_force = fx*cos(angle) + fy*sin(angle)
shear_force = -fx*sin(angle) + fy*cos(angle)
self._axial_force = axial_force
self._shear_force = shear_force
@doctest_depends_on(modules=('numpy',))
def draw(self):
"""
This method returns a plot object containing the diagram of the specified arch along with the supports
and forces applied to the structure.
Examples
========
>>> from sympy import Symbol
>>> t = Symbol('t')
>>> from sympy.physics.continuum_mechanics.arch import Arch
>>> a = Arch((0,0),(40,0),crown_x=20,crown_y=12)
>>> a.apply_load(-1,'C',8,150,angle=270)
>>> a.apply_load(0,'D',start=20,end=40,mag=-4)
>>> a.apply_load(-1,'E',10,t,angle=300)
>>> p = a.draw()
>>> p # doctest: +ELLIPSIS
Plot object containing:
[0]: cartesian line: 11.325 - 3*(x - 20)**2/100 for x over (0.0, 40.0)
[1]: cartesian line: 12 - 3*(x - 20)**2/100 for x over (0.0, 40.0)
...
>>> p.show()
"""
x = Symbol('x')
markers = []
annotations = self._draw_loads()
rectangles = []
supports = self._draw_supports()
markers+=supports
xmax = self._right_support[0]
xmin = self._left_support[0]
ymin = min(self._left_support[1],self._right_support[1])
ymax = self._crown_y
lim = max(xmax*1.1-xmin*0.8+1, ymax*1.1-ymin*0.8+1)
rectangles = self._draw_rectangles()
filler = self._draw_filler()
rectangles+=filler
if self._member is not None:
if(self._member[2]>=self._right_support[1]):
markers.append(
{
'args':[[self._member[1]+0.005*lim],[self._member[2]]],
'marker':'o',
'markersize': 4,
'color': 'white',
'markerfacecolor':'none'
}
)
if(self._member[2]>=self._left_support[1]):
markers.append(
{
'args':[[self._member[0]-0.005*lim],[self._member[2]]],
'marker':'o',
'markersize': 4,
'color': 'white',
'markerfacecolor':'none'
}
)
markers.append({
'args':[[self._crown_x],[self._crown_y-0.005*lim]],
'marker':'o',
'markersize': 5,
'color':'white',
'markerfacecolor':'none',
})
if lim==xmax*1.1-xmin*0.8+1:
sing_plot = plot(self._shape_eqn-0.015*lim,
self._shape_eqn,
(x, self._left_support[0], self._right_support[0]),
markers=markers,
show=False,
annotations=annotations,
rectangles = rectangles,
xlim=(xmin-0.05*lim, xmax*1.1),
ylim=(xmin-0.05*lim, xmax*1.1),
axis=False,
line_color='brown')
else:
sing_plot = plot(self._shape_eqn-0.015*lim,
self._shape_eqn,
(x, self._left_support[0], self._right_support[0]),
markers=markers,
show=False,
annotations=annotations,
rectangles = rectangles,
xlim=(ymin-0.05*lim, ymax*1.1),
ylim=(ymin-0.05*lim, ymax*1.1),
axis=False,
line_color='brown')
return sing_plot
def _draw_supports(self):
support_markers = []
xmax = self._right_support[0]
xmin = self._left_support[0]
ymin = min(self._left_support[1],self._right_support[1])
ymax = self._crown_y
if abs(1.1*xmax-0.8*xmin)>abs(1.1*ymax-0.8*ymin):
max_diff = 1.1*xmax-0.8*xmin
else:
max_diff = 1.1*ymax-0.8*ymin
if self._supports['left']=='roller':
support_markers.append(
{
'args':[
[self._left_support[0]],
[self._left_support[1]-0.02*max_diff]
],
'marker':'o',
'markersize':11,
'color':'black',
'markerfacecolor':'none'
}
)
else:
support_markers.append(
{
'args':[
[self._left_support[0]],
[self._left_support[1]-0.007*max_diff]
],
'marker':6,
'markersize':15,
'color':'black',
'markerfacecolor':'none'
}
)
if self._supports['right']=='roller':
support_markers.append(
{
'args':[
[self._right_support[0]],
[self._right_support[1]-0.02*max_diff]
],
'marker':'o',
'markersize':11,
'color':'black',
'markerfacecolor':'none'
}
)
else:
support_markers.append(
{
'args':[
[self._right_support[0]],
[self._right_support[1]-0.007*max_diff]
],
'marker':6,
'markersize':15,
'color':'black',
'markerfacecolor':'none'
}
)
support_markers.append(
{
'args':[
[self._right_support[0]],
[self._right_support[1]-0.036*max_diff]
],
'marker':'_',
'markersize':15,
'color':'black',
'markerfacecolor':'none'
}
)
support_markers.append(
{
'args':[
[self._left_support[0]],
[self._left_support[1]-0.036*max_diff]
],
'marker':'_',
'markersize':15,
'color':'black',
'markerfacecolor':'none'
}
)
return support_markers
def _draw_rectangles(self):
member = []
xmax = self._right_support[0]
xmin = self._left_support[0]
ymin = min(self._left_support[1],self._right_support[1])
ymax = self._crown_y
if abs(1.1*xmax-0.8*xmin)>abs(1.1*ymax-0.8*ymin):
max_diff = 1.1*xmax-0.8*xmin
else:
max_diff = 1.1*ymax-0.8*ymin
if self._member is not None:
if self._member[2]>= max(self._left_support[1],self._right_support[1]):
member.append(
{
'xy':(self._member[0],self._member[2]-0.005*max_diff),
'width':self._member[1]-self._member[0],
'height': 0.01*max_diff,
'angle': 0,
'color':'brown',
}
)
elif self._member[2]>=self._left_support[1]:
member.append(
{
'xy':(self._member[0],self._member[2]-0.005*max_diff),
'width':self._right_support[0]-self._member[0],
'height': 0.01*max_diff,
'angle': 0,
'color':'brown',
}
)
else:
member.append(
{
'xy':(self._member[1],self._member[2]-0.005*max_diff),
'width':abs(self._left_support[0]-self._member[1]),
'height': 0.01*max_diff,
'angle': 180,
'color':'brown',
}
)
if self._distributed_loads:
for loads in self._distributed_loads:
start = self._distributed_loads[loads]['start']
end = self._distributed_loads[loads]['end']
member.append(
{
'xy':(start,self._crown_y+max_diff*0.15),
'width': (end-start),
'height': max_diff*0.01,
'color': 'orange'
}
)
return member
def _draw_loads(self):
load_annotations = []
xmax = self._right_support[0]
xmin = self._left_support[0]
ymin = min(self._left_support[1],self._right_support[1])
ymax = self._crown_y
if abs(1.1*xmax-0.8*xmin)>abs(1.1*ymax-0.8*ymin):
max_diff = 1.1*xmax-0.8*xmin
else:
max_diff = 1.1*ymax-0.8*ymin
for load in self._conc_loads:
x = self._conc_loads[load]['x']
y = self._conc_loads[load]['y']
angle = self._conc_loads[load]['angle']
mag = self._conc_loads[load]['mag']
load_annotations.append(
{
'text':'',
'xy':(
x+cos(rad(angle))*max_diff*0.08,
y+sin(rad(angle))*max_diff*0.08
),
'xytext':(x,y),
'fontsize':10,
'fontweight': 'bold',
'arrowprops':{'width':1.5, 'headlength':5, 'headwidth':5, 'facecolor':'blue','edgecolor':'blue'}
}
)
load_annotations.append(
{
'text':f'{load}: {mag} N',
'fontsize':10,
'fontweight': 'bold',
'xy': (x+cos(rad(angle))*max_diff*0.12,y+sin(rad(angle))*max_diff*0.12)
}
)
for load in self._distributed_loads:
start = self._distributed_loads[load]['start']
end = self._distributed_loads[load]['end']
mag = self._distributed_loads[load]['f_y']
x_points = numpy.arange(start,end,(end-start)/(max_diff*0.25))
x_points = numpy.append(x_points,end)
for point in x_points:
if(mag<0):
load_annotations.append(
{
'text':'',
'xy':(point,self._crown_y+max_diff*0.05),
'xytext': (point,self._crown_y+max_diff*0.15),
'arrowprops':{'width':1.5, 'headlength':5, 'headwidth':5, 'facecolor':'orange','edgecolor':'orange'}
}
)
else:
load_annotations.append(
{
'text':'',
'xy':(point,self._crown_y+max_diff*0.2),
'xytext': (point,self._crown_y+max_diff*0.15),
'arrowprops':{'width':1.5, 'headlength':5, 'headwidth':5, 'facecolor':'orange','edgecolor':'orange'}
}
)
if(mag<0):
load_annotations.append(
{
'text':f'{load}: {abs(mag)} N/m',
'fontsize':10,
'fontweight': 'bold',
'xy':((start+end)/2,self._crown_y+max_diff*0.175)
}
)
else:
load_annotations.append(
{
'text':f'{load}: {abs(mag)} N/m',
'fontsize':10,
'fontweight': 'bold',
'xy':((start+end)/2,self._crown_y+max_diff*0.125)
}
)
return load_annotations
def _draw_filler(self):
x = Symbol('x')
filler = []
xmax = self._right_support[0]
xmin = self._left_support[0]
ymin = min(self._left_support[1],self._right_support[1])
ymax = self._crown_y
if abs(1.1*xmax-0.8*xmin)>abs(1.1*ymax-0.8*ymin):
max_diff = 1.1*xmax-0.8*xmin
else:
max_diff = 1.1*ymax-0.8*ymin
x_points = numpy.arange(self._left_support[0],self._right_support[0],(self._right_support[0]-self._left_support[0])/(max_diff*max_diff))
for point in x_points:
filler.append(
{
'xy':(point,self._shape_eqn.subs(x,point)-max_diff*0.015),
'width': (self._right_support[0]-self._left_support[0])/(max_diff*max_diff),
'height': max_diff*0.015,
'color': 'brown'
}
)
return filler
| Arch |
python | kamyu104__LeetCode-Solutions | Python/minimum-operations-to-convert-all-elements-to-zero.py | {
"start": 50,
"end": 416
} | class ____(object):
def minOperations(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
result = 0
stk = [0]
for x in nums:
while stk and stk[-1] > x:
stk.pop()
if stk[-1] < x:
result += 1
stk.append(x)
return result
| Solution |
python | pennersr__django-allauth | tests/apps/account/test_auth_backends.py | {
"start": 284,
"end": 3601
} | class ____(TestCase):
def setUp(self):
user = get_user_model().objects.create(
is_active=True, email="john@example.com", username="john"
)
user.set_password(user.username)
user.save()
self.user = user
@override_settings(
ACCOUNT_LOGIN_METHODS={app_settings.LoginMethod.USERNAME}
) # noqa
def test_auth_by_username(self):
user = self.user
backend = AuthenticationBackend()
self.assertEqual(
backend.authenticate(
request=None, username=user.username, password=user.username
).pk,
user.pk,
)
self.assertEqual(
backend.authenticate(
request=None, username=user.email, password=user.username
),
None,
)
@override_settings(ACCOUNT_LOGIN_METHODS={app_settings.LoginMethod.EMAIL}) # noqa
def test_auth_by_email(self):
user = self.user
backend = AuthenticationBackend()
self.assertEqual(
backend.authenticate(
request=None, username=user.email, password=user.username
).pk,
user.pk,
)
self.assertEqual(
backend.authenticate(
request=None, username=user.username, password=user.username
),
None,
)
@override_settings(
ACCOUNT_LOGIN_METHODS={
app_settings.LoginMethod.EMAIL,
app_settings.LoginMethod.USERNAME,
}
) # noqa
def test_auth_by_username_or_email(self):
user = self.user
backend = AuthenticationBackend()
self.assertEqual(
backend.authenticate(
request=None, username=user.email, password=user.username
).pk,
user.pk,
)
self.assertEqual(
backend.authenticate(
request=None, username=user.username, password=user.username
).pk,
user.pk,
)
@pytest.mark.parametrize(
"login_methods",
[
{app_settings.LoginMethod.EMAIL},
{app_settings.LoginMethod.USERNAME},
{app_settings.LoginMethod.USERNAME, app_settings.LoginMethod.EMAIL},
],
)
def test_account_enumeration_timing_attack(user, db, rf, settings, login_methods):
settings.ACCOUNT_LOGIN_METHODS = login_methods
with patch("django.contrib.auth.models.User.set_password") as set_password_mock:
with patch(
"django.contrib.auth.models.User.check_password", new=set_password_mock
):
backend = AuthenticationBackend()
backend.authenticate(
rf.get("/"),
email="not@known.org",
username="not-known",
password="secret",
)
set_password_mock.assert_called_once()
set_password_mock.reset_mock()
backend.authenticate(rf.get("/"), username=user.username, password="secret")
set_password_mock.assert_called_once()
set_password_mock.reset_mock()
backend.authenticate(
rf.get("/"), email=user.email, username="not-known", password="secret"
)
set_password_mock.assert_called_once()
| AuthenticationBackendTests |
python | kamyu104__LeetCode-Solutions | Python/max-chunks-to-make-sorted-ii.py | {
"start": 543,
"end": 1021
} | class ____(object):
def maxChunksToSorted(self, arr):
"""
:type arr: List[int]
:rtype: int
"""
def compare(i1, i2):
return arr[i1]-arr[i2] if arr[i1] != arr[i2] else i1-i2
idxs = [i for i in xrange(len(arr))]
result, max_i = 0, 0
for i, v in enumerate(sorted(idxs, cmp=compare)):
max_i = max(max_i, v)
if max_i == i:
result += 1
return result
| Solution2 |
python | kamyu104__LeetCode-Solutions | Python/min-cost-climbing-stairs.py | {
"start": 29,
"end": 333
} | class ____(object):
def minCostClimbingStairs(self, cost):
"""
:type cost: List[int]
:rtype: int
"""
dp = [0] * 3
for i in reversed(xrange(len(cost))):
dp[i%3] = cost[i] + min(dp[(i+1)%3], dp[(i+2)%3])
return min(dp[0], dp[1])
| Solution |
python | optuna__optuna | optuna/_gp/acqf.py | {
"start": 6881,
"end": 7305
} | class ____(BaseAcquisitionFunc):
def __init__(
self,
gpr: GPRegressor,
search_space: SearchSpace,
beta: float,
) -> None:
self._gpr = gpr
self._beta = beta
super().__init__(gpr.length_scales, search_space)
def eval_acqf(self, x: torch.Tensor) -> torch.Tensor:
mean, var = self._gpr.posterior(x)
return mean - torch.sqrt(self._beta * var)
| LCB |
python | keras-team__keras | keras/src/optimizers/adafactor.py | {
"start": 193,
"end": 8418
} | class ____(optimizer.Optimizer):
"""Optimizer that implements the Adafactor algorithm.
Adafactor is commonly used in NLP tasks, and has the advantage
of taking less memory because it only saves partial information of previous
gradients.
The default argument setup is based on the original paper (see reference).
When gradients are of dimension > 2, Adafactor optimizer will delete the
last 2 dimensions separately in its accumulator variables.
Args:
learning_rate: A float, a
`keras.optimizers.schedules.LearningRateSchedule` instance, or
a callable that takes no arguments and returns the actual value to
use. The learning rate. Defaults to `0.001`.
beta_2_decay: float, defaults to -0.8. The decay rate of `beta_2`.
epsilon_1: float, defaults to 1e-30. A small offset to keep denominator
away from 0.
epsilon_2: float, defaults to 1e-3. A small offset to avoid learning
rate becoming too small by time.
clip_threshold: float, defaults to 1.0. Clipping threshold. This is a
part of Adafactor algorithm, independent from `clipnorm`,
`clipvalue`, and `global_clipnorm`.
relative_step: bool, defaults to `True`. If `learning_rate` is a
constant and `relative_step=True`, learning rate will be adjusted
based on current iterations. This is a default learning rate decay
in Adafactor.
{{base_optimizer_keyword_args}}
Reference:
- [Shazeer, Noam et al., 2018](https://arxiv.org/abs/1804.04235).
"""
def __init__(
self,
learning_rate=0.001,
beta_2_decay=-0.8,
epsilon_1=1e-30,
epsilon_2=1e-3,
clip_threshold=1.0,
relative_step=True,
weight_decay=None,
clipnorm=None,
clipvalue=None,
global_clipnorm=None,
use_ema=False,
ema_momentum=0.99,
ema_overwrite_frequency=None,
loss_scale_factor=None,
gradient_accumulation_steps=None,
name="adafactor",
**kwargs,
):
super().__init__(
learning_rate=learning_rate,
name=name,
weight_decay=weight_decay,
clipnorm=clipnorm,
clipvalue=clipvalue,
global_clipnorm=global_clipnorm,
use_ema=use_ema,
ema_momentum=ema_momentum,
ema_overwrite_frequency=ema_overwrite_frequency,
loss_scale_factor=loss_scale_factor,
gradient_accumulation_steps=gradient_accumulation_steps,
**kwargs,
)
self.beta_2_decay = beta_2_decay
self.epsilon_1 = epsilon_1
self.epsilon_2 = epsilon_2
self.clip_threshold = clip_threshold
self.relative_step = relative_step
def build(self, var_list):
"""Initialize optimizer variables.
Adam optimizer has 3 types of variables: momentums, velocities and
velocity_hat (only set when amsgrad is applied),
Args:
var_list: list of model variables to build Adam variables on.
"""
if self.built:
return
super().build(var_list)
self._r = []
self._c = []
self._v = []
for var in var_list:
if len(var.shape) < 2:
# Don't factor if variable is of dimension < 2, but we still
# need to create dummy variables as placeholder.
self._r.append(
backend.Variable(0, name=var.name, trainable=False)
)
self._c.append(
backend.Variable(0, name=var.name, trainable=False)
)
elif self._overwrite_variable_with_gradient(var):
self._r.append(None)
self._c.append(None)
else:
# Always factor the last 2 dimensions.
r_shape = var.shape[:-1]
c_shape = var.shape[:-2] + (var.shape[-1],)
self._r.append(
self.add_variable(
shape=r_shape,
dtype=var.dtype,
name=var.name,
)
)
self._c.append(
self.add_variable(
shape=c_shape,
dtype=var.dtype,
name=var.name,
)
)
if self._overwrite_variable_with_gradient(var):
self._v.append(None)
else:
self._v.append(
self.add_variable_from_reference(
reference_variable=var, name="velocity"
)
)
def _rms(self, x):
return ops.sqrt(ops.mean(ops.square(x)))
def update_step(self, gradient, variable, learning_rate):
"""Update step given gradient and the associated model variable."""
lr = ops.cast(learning_rate, variable.dtype)
gradient = ops.cast(gradient, variable.dtype)
epsilon_2 = ops.cast(self.epsilon_2, variable.dtype)
one = ops.cast(1.0, variable.dtype)
local_step = ops.cast(self.iterations + 1, variable.dtype)
if not callable(self._learning_rate) and self.relative_step:
lr = ops.minimum(lr, 1 / ops.sqrt(local_step))
r = self._r[self._get_variable_index(variable)]
c = self._c[self._get_variable_index(variable)]
v = self._v[self._get_variable_index(variable)]
rho_t = ops.minimum(lr, 1 / ops.sqrt(local_step))
alpha_t = ops.maximum(epsilon_2, self._rms(variable)) * rho_t
regulated_grad_square = ops.add(ops.square(gradient), self.epsilon_1)
beta_2_t = ops.subtract(1, ops.power(local_step, self.beta_2_decay))
if len(variable.shape) >= 2:
# `r` deletes the last dimension of gradient, so it is of shape
# `gradient.shape[:-1]`.
self.assign(
r,
ops.add(
ops.multiply(beta_2_t, r),
ops.multiply(
ops.subtract(1, beta_2_t),
ops.mean(regulated_grad_square, axis=-1),
),
),
)
# `c` deletes the second last dimension of gradient, so it is of
# shape `gradient.shape[:-2] + gradient.shape[-1]`.
self.assign(
c,
ops.add(
ops.multiply(beta_2_t, c),
ops.multiply(
ops.subtract(1, beta_2_t),
ops.mean(regulated_grad_square, axis=-2),
),
),
)
self.assign(
v,
ops.multiply(
ops.expand_dims(
ops.divide(r, ops.mean(r, axis=-1, keepdims=True)),
axis=-1,
),
ops.expand_dims(c, -2),
),
)
else:
self.assign(
v,
ops.add(
ops.multiply(beta_2_t, v),
ops.multiply(
ops.subtract(1, beta_2_t), regulated_grad_square
),
),
)
u_t = ops.divide(gradient, ops.sqrt(v))
u_t_hat = ops.divide(
u_t,
ops.maximum(one, ops.divide(self._rms(u_t), self.clip_threshold)),
)
self.assign_sub(variable, ops.multiply(alpha_t, u_t_hat))
def get_config(self):
config = super().get_config()
config.update(
{
"beta_2_decay": self.beta_2_decay,
"epsilon_1": self.epsilon_1,
"epsilon_2": self.epsilon_2,
"clip_threshold": self.clip_threshold,
"relative_step": self.relative_step,
}
)
return config
Adafactor.__doc__ = Adafactor.__doc__.replace(
"{{base_optimizer_keyword_args}}", optimizer.base_optimizer_keyword_args
)
| Adafactor |
python | apache__airflow | airflow-core/src/airflow/lineage/hook.py | {
"start": 2501,
"end": 3022
} | class ____:
"""
Holds lineage collected by HookLineageCollector.
This class represents the lineage information collected by the `HookLineageCollector`. It stores
the input and output assets, each with an associated count indicating how many times the asset
has been encountered during the hook execution.
"""
inputs: list[AssetLineageInfo] = attr.ib(factory=list)
outputs: list[AssetLineageInfo] = attr.ib(factory=list)
extra: list[ExtraLineageInfo] = attr.ib(factory=list)
| HookLineage |
python | Netflix__metaflow | metaflow/runner/deployer.py | {
"start": 16522,
"end": 17138
} | class ____(metaclass=DeployedFlowMeta):
"""
DeployedFlow class represents a flow that has been deployed.
This class is not meant to be instantiated directly. Instead, it is returned from
methods of `Deployer`.
"""
# This should match the TYPE value in DeployerImpl for proper stub generation
TYPE: ClassVar[Optional[str]] = None
def __init__(self, deployer: "metaflow.runner.deployer_impl.DeployerImpl"):
self.deployer = deployer
self.name = self.deployer.name
self.flow_name = self.deployer.flow_name
self.metadata = self.deployer.metadata
| DeployedFlow |
python | Netflix__metaflow | metaflow/plugins/azure/azure_secret_manager_secrets_provider.py | {
"start": 762,
"end": 895
} | class ____(MetaflowException):
"""Raised when the secret name does not match expected pattern"""
| MetaflowAzureKeyVaultBadSecretName |
python | pytorch__pytorch | test/inductor/test_lookup_table.py | {
"start": 2822,
"end": 5862
} | class ____(TestCase):
"""Base class for lookup table tests with common setup and utilities"""
def setUp(self):
super().setUp()
self.original_table = inductor_config.lookup_table.table
self.original_max_autotune = getattr(inductor_config, "max_autotune", False)
inductor_config.max_autotune = True
# Set the lookup table choices handler
V.set_choices_handler(LookupTableChoices())
def tearDown(self):
inductor_config.lookup_table.table = self.original_table
inductor_config.max_autotune = self.original_max_autotune
# Restore original choices handler
V.set_choices_handler(InductorChoices())
super().tearDown()
def create_mock_mm_kernel_inputs(
self,
shapes: Optional[list[tuple[int, ...]]] = None,
device: torch.device = torch.device("cuda"),
dtype: torch.dtype = torch.float32,
scalars: Optional[dict[str, Union[float, int]]] = None,
) -> MockMMKernelInputs:
"""Create MockMMKernelInputs with real tensors"""
if shapes is None:
shapes = [(128, 128), (128, 128)] # Default MM shapes
tensors = []
for shape in shapes:
# Create a real tensor with the specified shape, device, and dtype
tensor = torch.randn(shape, device=device, dtype=dtype)
tensors.append(tensor)
return MockMMKernelInputs(tensors, scalars)
def create_lookup_key(self, method, kernel_inputs):
"""Create a lookup key using LookupTableChoices"""
choices = LookupTableChoices()
return choices.make_lookup_key(kernel_inputs, method)
def create_config(self, template_id, **kwargs):
"""Create a backend configuration with template_id field"""
config = {"template_id": template_id}
# Add minimal defaults based on template type
if template_id == "triton":
config.update(
{
"BLOCK_M": 128,
"BLOCK_N": 128,
"BLOCK_K": 64,
"num_stages": 2,
"num_warps": 2,
"EVEN_K": True,
"USE_FAST_ACCUM": False,
"ACC_TYPE": "tl.float32",
"GROUP_M": 8,
}
)
elif template_id == "tma":
config.update(
{
"BLOCK_M": 256,
"BLOCK_N": 128,
"BLOCK_K": 64,
"num_stages": 4,
"num_warps": 8,
"EVEN_K": True,
"USE_FAST_ACCUM": False,
"ACC_TYPE": "tl.float32",
"GROUP_M": 8,
}
)
elif template_id == "decompose_k":
config.update({"k": 4})
config.update(kwargs)
return config
@unittest.skipIf(not HAS_CUDA_AND_TRITON, "CUDA not available")
@instantiate_parametrized_tests
| BaseLookupTableTest |
python | jmcnamara__XlsxWriter | xlsxwriter/test/comparison/test_chart_axis20.py | {
"start": 315,
"end": 1455
} | class ____(ExcelComparisonTest):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.set_filename("chart_axis20.xlsx")
def test_create_file(self):
"""Test the creation of a simple XlsxWriter file."""
workbook = Workbook(self.got_filename)
worksheet = workbook.add_worksheet()
chart = workbook.add_chart({"type": "column"})
chart.axis_ids = [43572224, 43812352]
data = [
[1, 2, 3, 4, 5],
[2, 4, 6, 8, 10],
[3, 6, 9, 12, 15],
]
worksheet.write_column("A1", data[0])
worksheet.write_column("B1", data[1])
worksheet.write_column("C1", data[2])
chart.add_series({"values": "=Sheet1!$A$1:$A$5"})
chart.add_series({"values": "=Sheet1!$B$1:$B$5"})
chart.add_series({"values": "=Sheet1!$C$1:$C$5"})
chart.set_x_axis({"label_position": "next_to"})
chart.set_y_axis({"label_position": "none"})
worksheet.insert_chart("E9", chart)
workbook.close()
self.assertExcelEqual()
| TestCompareXLSXFiles |
python | pyca__cryptography | tests/hazmat/primitives/test_aead.py | {
"start": 26979,
"end": 36292
} | class ____:
@pytest.mark.skipif(
sys.platform not in {"linux", "darwin"} or sys.maxsize < 2**31,
reason="mmap and 64-bit platform required",
)
def test_data_too_large(self):
key = AESOCB3.generate_key(128)
aesocb3 = AESOCB3(key)
nonce = b"0" * 12
large_data = large_mmap()
with pytest.raises(OverflowError):
aesocb3.encrypt(nonce, large_data, b"")
with pytest.raises(OverflowError):
aesocb3.encrypt(nonce, b"", large_data)
def test_vectors(self, backend, subtests):
vectors = []
for f in [
"rfc7253.txt",
"openssl.txt",
"test-vector-1-nonce104.txt",
"test-vector-1-nonce112.txt",
"test-vector-1-nonce120.txt",
]:
vectors.extend(
load_vectors_from_file(
os.path.join("ciphers", "AES", "OCB3", f),
load_nist_vectors,
)
)
for vector in vectors:
with subtests.test():
nonce = binascii.unhexlify(vector["nonce"])
key = binascii.unhexlify(vector["key"])
aad = binascii.unhexlify(vector["aad"])
ct = binascii.unhexlify(vector["ciphertext"])
pt = binascii.unhexlify(vector.get("plaintext", b""))
aesocb3 = AESOCB3(key)
computed_ct = aesocb3.encrypt(nonce, pt, aad)
assert computed_ct == ct
computed_pt = aesocb3.decrypt(nonce, ct, aad)
assert computed_pt == pt
def test_vectors_invalid(self, backend, subtests):
vectors = load_vectors_from_file(
os.path.join("ciphers", "AES", "OCB3", "rfc7253.txt"),
load_nist_vectors,
)
for vector in vectors:
with subtests.test():
nonce = binascii.unhexlify(vector["nonce"])
key = binascii.unhexlify(vector["key"])
aad = binascii.unhexlify(vector["aad"])
ct = binascii.unhexlify(vector["ciphertext"])
aesocb3 = AESOCB3(key)
with pytest.raises(InvalidTag):
badkey = AESOCB3(AESOCB3.generate_key(128))
badkey.decrypt(nonce, ct, aad)
with pytest.raises(InvalidTag):
aesocb3.decrypt(nonce, b"nonsense", aad)
with pytest.raises(InvalidTag):
aesocb3.decrypt(b"\x00" * 12, ct, aad)
with pytest.raises(InvalidTag):
aesocb3.decrypt(nonce, ct, b"nonsense")
@pytest.mark.parametrize(
("key_len", "expected"),
[
(128, b"g\xe9D\xd22V\xc5\xe0\xb6\xc6\x1f\xa2/\xdf\x1e\xa2"),
(192, b"\xf6s\xf2\xc3\xe7\x17J\xae{\xae\x98l\xa9\xf2\x9e\x17"),
(256, b"\xd9\x0e\xb8\xe9\xc9w\xc8\x8by\xddy=\x7f\xfa\x16\x1c"),
],
)
def test_rfc7253(self, backend, key_len, expected):
# This is derived from page 18 of RFC 7253, with a tag length of
# 128 bits.
k = AESOCB3(b"\x00" * ((key_len - 8) // 8) + b"\x80")
c = b""
for i in range(0, 128):
s = b"\x00" * i
n = (3 * i + 1).to_bytes(12, "big")
c += k.encrypt(n, s, s)
n = (3 * i + 2).to_bytes(12, "big")
c += k.encrypt(n, s, b"")
n = (3 * i + 3).to_bytes(12, "big")
c += k.encrypt(n, b"", s)
assert len(c) == 22400
n = (385).to_bytes(12, "big")
output = k.encrypt(n, b"", c)
assert output == expected
@pytest.mark.parametrize(
("nonce", "data", "associated_data"),
[
[object(), b"data", b""],
[b"0" * 12, object(), b""],
[b"0" * 12, b"data", object()],
],
)
def test_params_not_bytes(self, nonce, data, associated_data, backend):
key = AESOCB3.generate_key(128)
aesocb3 = AESOCB3(key)
with pytest.raises(TypeError):
aesocb3.encrypt(nonce, data, associated_data)
with pytest.raises(TypeError):
aesocb3.decrypt(nonce, data, associated_data)
def test_invalid_nonce_length(self, backend):
key = AESOCB3.generate_key(128)
aesocb3 = AESOCB3(key)
with pytest.raises(ValueError):
aesocb3.encrypt(b"\x00" * 11, b"hi", None)
with pytest.raises(ValueError):
aesocb3.encrypt(b"\x00" * 16, b"hi", None)
with pytest.raises(ValueError):
buf = bytearray(18)
aesocb3.encrypt_into(b"\x00" * 11, b"hi", None, buf)
with pytest.raises(ValueError):
buf = bytearray(18)
aesocb3.encrypt_into(b"\x00" * 16, b"hi", None, buf)
with pytest.raises(ValueError):
aesocb3.decrypt(b"\x00" * 11, b"hi", None)
with pytest.raises(ValueError):
aesocb3.decrypt(b"\x00" * 16, b"hi", None)
with pytest.raises(ValueError):
buf = bytearray(16)
aesocb3.decrypt_into(b"\x00" * 11, b"x" * 20, None, buf)
with pytest.raises(ValueError):
buf = bytearray(16)
aesocb3.decrypt_into(b"\x00" * 16, b"x" * 20, None, buf)
def test_bad_key(self, backend):
with pytest.raises(TypeError):
AESOCB3(object()) # type:ignore[arg-type]
with pytest.raises(ValueError):
AESOCB3(b"0" * 31)
def test_bad_generate_key(self, backend):
with pytest.raises(TypeError):
AESOCB3.generate_key(object()) # type:ignore[arg-type]
with pytest.raises(ValueError):
AESOCB3.generate_key(129)
def test_associated_data_none_equal_to_empty_bytestring(self, backend):
key = AESOCB3.generate_key(128)
aesocb3 = AESOCB3(key)
nonce = os.urandom(12)
ct1 = aesocb3.encrypt(nonce, b"some_data", None)
ct2 = aesocb3.encrypt(nonce, b"some_data", b"")
assert ct1 == ct2
pt1 = aesocb3.decrypt(nonce, ct1, None)
pt2 = aesocb3.decrypt(nonce, ct2, b"")
assert pt1 == pt2
def test_buffer_protocol(self, backend):
key = AESOCB3.generate_key(128)
aesocb3 = AESOCB3(key)
pt = b"encrypt me"
ad = b"additional"
nonce = os.urandom(12)
ct = aesocb3.encrypt(nonce, pt, ad)
computed_pt = aesocb3.decrypt(nonce, ct, ad)
assert computed_pt == pt
aesocb3_ = AESOCB3(bytearray(key))
ct2 = aesocb3_.encrypt(bytearray(nonce), pt, ad)
assert ct2 == ct
computed_pt2 = aesocb3_.decrypt(bytearray(nonce), ct2, ad)
assert computed_pt2 == pt
def test_encrypt_into(self, backend):
key = AESOCB3.generate_key(128)
aesocb3 = AESOCB3(key)
nonce = os.urandom(12)
pt = b"encrypt me"
ad = b"additional"
buf = bytearray(len(pt) + 16)
n = aesocb3.encrypt_into(nonce, pt, ad, buf)
assert n == len(pt) + 16
ct = aesocb3.encrypt(nonce, pt, ad)
assert buf == ct
@pytest.mark.parametrize(
("ptlen", "buflen"), [(10, 25), (10, 27), (15, 30), (20, 37)]
)
def test_encrypt_into_buffer_incorrect_size(self, ptlen, buflen, backend):
key = AESOCB3.generate_key(128)
aesocb3 = AESOCB3(key)
nonce = os.urandom(12)
pt = b"x" * ptlen
buf = bytearray(buflen)
with pytest.raises(ValueError, match="buffer must be"):
aesocb3.encrypt_into(nonce, pt, None, buf)
def test_decrypt_into(self, backend):
key = AESOCB3.generate_key(128)
aesocb3 = AESOCB3(key)
nonce = os.urandom(12)
pt = b"decrypt me"
ad = b"additional"
ct = aesocb3.encrypt(nonce, pt, ad)
buf = bytearray(len(pt))
n = aesocb3.decrypt_into(nonce, ct, ad, buf)
assert n == len(pt)
assert buf == pt
@pytest.mark.parametrize(
("ctlen", "buflen"), [(26, 9), (26, 11), (31, 14), (36, 21)]
)
def test_decrypt_into_buffer_incorrect_size(self, ctlen, buflen, backend):
key = AESOCB3.generate_key(128)
aesocb3 = AESOCB3(key)
nonce = os.urandom(12)
ct = b"x" * ctlen
buf = bytearray(buflen)
with pytest.raises(ValueError, match="buffer must be"):
aesocb3.decrypt_into(nonce, ct, None, buf)
def test_decrypt_into_invalid_tag(self, backend):
key = AESOCB3.generate_key(128)
aesocb3 = AESOCB3(key)
nonce = os.urandom(12)
pt = b"some data"
ad = b"additional"
ct = aesocb3.encrypt(nonce, pt, ad)
# Corrupt the ciphertext
corrupted_ct = bytearray(ct)
corrupted_ct[0] ^= 1
buf = bytearray(len(pt))
with pytest.raises(InvalidTag):
aesocb3.decrypt_into(nonce, bytes(corrupted_ct), ad, buf)
def test_decrypt_into_data_too_short(self, backend):
key = AESOCB3.generate_key(128)
aesocb3 = AESOCB3(key)
nonce = os.urandom(12)
buf = bytearray(16)
with pytest.raises(InvalidTag):
aesocb3.decrypt_into(nonce, b"short", None, buf)
@pytest.mark.skipif(
not _aead_supported(AESSIV),
reason="Does not support AESSIV",
)
| TestAESOCB3 |
python | allegroai__clearml | clearml/backend_api/services/v2_9/projects.py | {
"start": 79858,
"end": 81004
} | class ____(Response):
"""
Response of projects.make_private endpoint.
:param updated: Number of projects updated
:type updated: int
"""
_service = "projects"
_action = "make_private"
_version = "2.9"
_schema = {
"definitions": {},
"properties": {
"updated": {
"description": "Number of projects updated",
"type": ["integer", "null"],
}
},
"type": "object",
}
def __init__(self, updated: Optional[int] = None, **kwargs: Any) -> None:
super(MakePrivateResponse, self).__init__(**kwargs)
self.updated = updated
@schema_property("updated")
def updated(self) -> Optional[int]:
return self._property_updated
@updated.setter
def updated(self, value: Optional[int]) -> None:
if value is None:
self._property_updated = None
return
if isinstance(value, float) and value.is_integer():
value = int(value)
self.assert_isinstance(value, "updated", six.integer_types)
self._property_updated = value
| MakePrivateResponse |
python | doocs__leetcode | solution/2700-2799/2733.Neither Minimum nor Maximum/Solution.py | {
"start": 0,
"end": 175
} | class ____:
def findNonMinOrMax(self, nums: List[int]) -> int:
mi, mx = min(nums), max(nums)
return next((x for x in nums if x != mi and x != mx), -1)
| Solution |
python | facebook__pyre-check | client/commands/commands.py | {
"start": 338,
"end": 938
} | class ____(enum.IntEnum):
SUCCESS = 0
FOUND_ERRORS = 1
FAILURE = 2
BUCK_INTERNAL_ERROR = 3
SERVER_NOT_FOUND = 4
INCONSISTENT_SERVER = 5
CONFIGURATION_ERROR = 6
BUCK_USER_ERROR = 7
WATCHMAN_ERROR = 8
TAINT_CONFIGURATION_ERROR = 9
MODEL_VERIFICATION_ERROR = 10
UNSUPPORTED_PLATFORM = 11
CLICK_EXCEPTION = 12
# Exit should only happen after a shutdown message
LANGUAGE_SERVER_EXIT = 13
PYREFLY_FILE_FORMAT_ERROR = 14
# If the process exited due to a signal, this will be the negative signal number.
SIGSEGV = -signal.SIGSEGV
| ExitCode |
python | cython__cython | Demos/benchmarks/bm_raytrace.py | {
"start": 3922,
"end": 4368
} | class ____(object):
def __init__(self, point, normal):
self.point = point
self.normal = normal.normalized()
def __repr__(self):
return 'Halfspace(%s,%s)' % (repr(self.point), repr(self.normal))
def intersectionTime(self, ray):
v = ray.vector.dot(self.normal)
if v:
return 1 / -v
else:
return None
def normalAt(self, p):
return self.normal
| Halfspace |
python | milvus-io__pymilvus | pymilvus/client/prepare.py | {
"start": 1545,
"end": 96866
} | class ____:
@classmethod
def create_collection_request(
cls,
collection_name: str,
fields: Union[Dict[str, Iterable], CollectionSchema],
**kwargs,
) -> milvus_types.CreateCollectionRequest:
"""
Args:
fields (Union(Dict[str, Iterable], CollectionSchema)).
{"fields": [
{"name": "A", "type": DataType.INT32}
{"name": "B", "type": DataType.INT64, "auto_id": True, "is_primary": True},
{"name": "C", "type": DataType.FLOAT},
{"name": "Vec", "type": DataType.FLOAT_VECTOR, "params": {"dim": 128}}]
}
Returns:
milvus_types.CreateCollectionRequest
"""
if isinstance(fields, CollectionSchema):
schema = cls.get_schema_from_collection_schema(collection_name, fields)
else:
schema = cls.get_schema(collection_name, fields, **kwargs)
consistency_level = get_consistency_level(
kwargs.get("consistency_level", DEFAULT_CONSISTENCY_LEVEL)
)
req = milvus_types.CreateCollectionRequest(
collection_name=collection_name,
schema=bytes(schema.SerializeToString()),
consistency_level=consistency_level,
)
properties = kwargs.get("properties")
if is_legal_collection_properties(properties):
properties = [
common_types.KeyValuePair(key=str(k), value=str(v)) for k, v in properties.items()
]
req.properties.extend(properties)
same_key = set(kwargs.keys()).intersection({"num_shards", "shards_num"})
if len(same_key) > 0:
if len(same_key) > 1:
msg = "got both num_shards and shards_num in kwargs, expected only one of them"
raise ParamError(message=msg)
num_shards = kwargs[next(iter(same_key))]
if not isinstance(num_shards, int):
msg = f"invalid num_shards type, got {type(num_shards)}, expected int"
raise ParamError(message=msg)
req.shards_num = num_shards
num_partitions = kwargs.get("num_partitions")
if num_partitions is not None:
if not isinstance(num_partitions, int) or isinstance(num_partitions, bool):
msg = f"invalid num_partitions type, got {type(num_partitions)}, expected int"
raise ParamError(message=msg)
if num_partitions < 1:
msg = f"The specified num_partitions should be greater than or equal to 1, got {num_partitions}"
raise ParamError(message=msg)
req.num_partitions = num_partitions
return req
@classmethod
def get_schema_from_collection_schema(
cls,
collection_name: str,
fields: CollectionSchema,
) -> schema_types.CollectionSchema:
coll_description = fields.description
if not isinstance(coll_description, (str, bytes)):
msg = (
f"description [{coll_description}] has type {type(coll_description).__name__}, "
"but expected one of: bytes, str"
)
raise ParamError(message=msg)
schema = schema_types.CollectionSchema(
name=collection_name,
autoID=fields.auto_id,
description=coll_description,
enable_dynamic_field=fields.enable_dynamic_field,
)
for f in fields.fields:
field_schema = schema_types.FieldSchema(
name=f.name,
data_type=f.dtype,
description=f.description,
is_primary_key=f.is_primary,
default_value=f.default_value,
nullable=f.nullable,
autoID=f.auto_id,
is_partition_key=f.is_partition_key,
is_dynamic=f.is_dynamic,
element_type=f.element_type,
is_clustering_key=f.is_clustering_key,
is_function_output=f.is_function_output,
)
for k, v in f.params.items():
kv_pair = common_types.KeyValuePair(
key=str(k) if k != "mmap_enabled" else "mmap.enabled",
value=(
orjson.dumps(v).decode(Config.EncodeProtocol)
if not isinstance(v, str)
else str(v)
),
)
field_schema.type_params.append(kv_pair)
schema.fields.append(field_schema)
for struct in fields.struct_fields:
# Validate that max_capacity is set
if struct.max_capacity is None:
raise ParamError(message=f"max_capacity not set for struct field: {struct.name}")
struct_schema = schema_types.StructArrayFieldSchema(
name=struct.name,
fields=[],
description=struct.description,
)
if struct.params:
for k, v in struct.params.items():
kv_pair = common_types.KeyValuePair(
key=str(k) if k != "mmap_enabled" else "mmap.enabled",
value=(
orjson.dumps(v).decode(Config.EncodeProtocol)
if not isinstance(v, str)
else str(v)
),
)
struct_schema.type_params.append(kv_pair)
for f in struct.fields:
# Convert struct field types to backend representation
# As struct itself only support array type, so all it's fields are array type
# internally
# So we need to convert the fields to array types
actual_dtype = f.dtype
actual_element_type = None
# Convert to appropriate array type
if isVectorDataType(f.dtype):
actual_dtype = DataType._ARRAY_OF_VECTOR
actual_element_type = f.dtype
else:
actual_dtype = DataType.ARRAY
actual_element_type = f.dtype
field_schema = schema_types.FieldSchema(
name=f.name,
data_type=actual_dtype,
description=f.description,
is_primary_key=f.is_primary,
default_value=f.default_value,
nullable=f.nullable,
autoID=f.auto_id,
is_partition_key=f.is_partition_key,
is_dynamic=f.is_dynamic,
element_type=actual_element_type,
is_clustering_key=f.is_clustering_key,
is_function_output=f.is_function_output,
)
# Copy field params and add max_capacity from struct_schema
field_params = dict(f.params) if f.params else {}
# max_capacity is required for struct fields
field_params["max_capacity"] = struct.max_capacity
for k, v in field_params.items():
kv_pair = common_types.KeyValuePair(
key=str(k) if k != "mmap_enabled" else "mmap.enabled", value=json.dumps(v)
)
field_schema.type_params.append(kv_pair)
struct_schema.fields.append(field_schema)
schema.struct_array_fields.append(struct_schema)
for f in fields.functions:
function_schema = schema_types.FunctionSchema(
name=f.name,
description=f.description,
type=f.type,
input_field_names=f.input_field_names,
output_field_names=f.output_field_names,
)
for k, v in f.params.items():
kv_pair = common_types.KeyValuePair(key=str(k), value=str(v))
function_schema.params.append(kv_pair)
schema.functions.append(function_schema)
return schema
@staticmethod
def get_field_schema(
field: Dict,
primary_field: Optional[str] = None,
auto_id_field: Optional[str] = None,
) -> (schema_types.FieldSchema, Optional[str], Optional[str]):
field_name = field.get("name")
if field_name is None:
raise ParamError(message="You should specify the name of field!")
data_type = field.get("type")
if data_type is None:
raise ParamError(message="You should specify the data type of field!")
if not isinstance(data_type, (int, DataType)):
raise ParamError(message="Field type must be of DataType!")
is_primary = field.get("is_primary", False)
if not isinstance(is_primary, bool):
raise ParamError(message="is_primary must be boolean")
if is_primary:
if primary_field is not None:
raise ParamError(message="A collection should only have one primary field")
if DataType(data_type) not in [DataType.INT64, DataType.VARCHAR]:
msg = "int64 and varChar are the only supported types of primary key"
raise ParamError(message=msg)
primary_field = field_name
nullable = field.get("nullable", False)
if not isinstance(nullable, bool):
raise ParamError(message="nullable must be boolean")
auto_id = field.get("auto_id", False)
if not isinstance(auto_id, bool):
raise ParamError(message="auto_id must be boolean")
if auto_id:
if auto_id_field is not None:
raise ParamError(message="A collection should only have one autoID field")
if DataType(data_type) != DataType.INT64:
msg = "int64 is the only supported type of automatic generated id"
raise ParamError(message=msg)
auto_id_field = field_name
field_schema = schema_types.FieldSchema(
name=field_name,
data_type=data_type,
description=field.get("description", ""),
is_primary_key=is_primary,
autoID=auto_id,
is_partition_key=field.get("is_partition_key", False),
is_clustering_key=field.get("is_clustering_key", False),
nullable=nullable,
default_value=field.get("default_value"),
element_type=field.get("element_type"),
)
type_params = field.get("params", {})
if not isinstance(type_params, dict):
raise ParamError(message="params should be dictionary type")
kvs = [
common_types.KeyValuePair(
key=str(k) if k != "mmap_enabled" else "mmap.enabled",
value=str(v),
)
for k, v in type_params.items()
]
field_schema.type_params.extend(kvs)
return field_schema, primary_field, auto_id_field
@classmethod
def get_schema(
cls,
collection_name: str,
fields: Dict[str, Iterable],
**kwargs,
) -> schema_types.CollectionSchema:
if not isinstance(fields, dict):
raise ParamError(message="Param fields must be a dict")
all_fields = fields.get("fields")
if all_fields is None:
raise ParamError(message="Param fields must contain key 'fields'")
if len(all_fields) == 0:
raise ParamError(message="Param fields value cannot be empty")
enable_dynamic_field = kwargs.get("enable_dynamic_field", False)
if "enable_dynamic_field" in fields:
enable_dynamic_field = fields["enable_dynamic_field"]
schema = schema_types.CollectionSchema(
name=collection_name,
autoID=False,
description=fields.get("description", ""),
enable_dynamic_field=enable_dynamic_field,
)
primary_field, auto_id_field = None, None
for field in all_fields:
(field_schema, primary_field, auto_id_field) = cls.get_field_schema(
field, primary_field, auto_id_field
)
schema.fields.append(field_schema)
return schema
@classmethod
def drop_collection_request(cls, collection_name: str) -> milvus_types.DropCollectionRequest:
return milvus_types.DropCollectionRequest(collection_name=collection_name)
@classmethod
def add_collection_field_request(
cls,
collection_name: str,
field_schema: FieldSchema,
) -> milvus_types.AddCollectionFieldRequest:
(field_schema_proto, _, _) = cls.get_field_schema(field=field_schema.to_dict())
return milvus_types.AddCollectionFieldRequest(
collection_name=collection_name,
schema=bytes(field_schema_proto.SerializeToString()),
)
@classmethod
def describe_collection_request(
cls,
collection_name: str,
) -> milvus_types.DescribeCollectionRequest:
return milvus_types.DescribeCollectionRequest(collection_name=collection_name)
@classmethod
def alter_collection_request(
cls,
collection_name: str,
properties: Optional[Dict] = None,
delete_keys: Optional[List[str]] = None,
) -> milvus_types.AlterCollectionRequest:
kvs = []
if properties:
kvs = [common_types.KeyValuePair(key=k, value=str(v)) for k, v in properties.items()]
return milvus_types.AlterCollectionRequest(
collection_name=collection_name, properties=kvs, delete_keys=delete_keys
)
@classmethod
def alter_collection_field_request(
cls, collection_name: str, field_name: str, field_param: Dict
) -> milvus_types.AlterCollectionFieldRequest:
kvs = []
if field_param:
kvs = [common_types.KeyValuePair(key=k, value=str(v)) for k, v in field_param.items()]
return milvus_types.AlterCollectionFieldRequest(
collection_name=collection_name, field_name=field_name, properties=kvs
)
@classmethod
def collection_stats_request(cls, collection_name: str):
return milvus_types.CollectionStatsRequest(collection_name=collection_name)
@classmethod
def show_collections_request(cls, collection_names: Optional[List[str]] = None):
req = milvus_types.ShowCollectionsRequest()
if collection_names:
if not isinstance(collection_names, (list,)):
msg = f"collection_names must be a list of strings, but got: {collection_names}"
raise ParamError(message=msg)
for collection_name in collection_names:
check_pass_param(collection_name=collection_name)
req.collection_names.extend(collection_names)
req.type = milvus_types.ShowType.InMemory
return req
@classmethod
def rename_collections_request(cls, old_name: str, new_name: str, new_db_name: str):
return milvus_types.RenameCollectionRequest(
oldName=old_name, newName=new_name, newDBName=new_db_name
)
@classmethod
def create_partition_request(cls, collection_name: str, partition_name: str):
return milvus_types.CreatePartitionRequest(
collection_name=collection_name, partition_name=partition_name
)
@classmethod
def drop_partition_request(cls, collection_name: str, partition_name: str):
return milvus_types.DropPartitionRequest(
collection_name=collection_name, partition_name=partition_name
)
@classmethod
def has_partition_request(cls, collection_name: str, partition_name: str):
return milvus_types.HasPartitionRequest(
collection_name=collection_name, partition_name=partition_name
)
@classmethod
def partition_stats_request(cls, collection_name: str, partition_name: str):
return milvus_types.PartitionStatsRequest(
collection_name=collection_name, partition_name=partition_name
)
@classmethod
def show_partitions_request(
cls,
collection_name: str,
partition_names: Optional[List[str]] = None,
type_in_memory: bool = False,
):
check_pass_param(collection_name=collection_name, partition_name_array=partition_names)
req = milvus_types.ShowPartitionsRequest(collection_name=collection_name)
if partition_names:
if not isinstance(partition_names, (list,)):
msg = f"partition_names must be a list of strings, but got: {partition_names}"
raise ParamError(message=msg)
for partition_name in partition_names:
check_pass_param(partition_name=partition_name)
req.partition_names.extend(partition_names)
if type_in_memory is False:
req.type = milvus_types.ShowType.All
else:
req.type = milvus_types.ShowType.InMemory
return req
@classmethod
def get_loading_progress(
cls, collection_name: str, partition_names: Optional[List[str]] = None
):
check_pass_param(collection_name=collection_name, partition_name_array=partition_names)
req = milvus_types.GetLoadingProgressRequest(collection_name=collection_name)
if partition_names:
req.partition_names.extend(partition_names)
return req
@classmethod
def get_load_state(cls, collection_name: str, partition_names: Optional[List[str]] = None):
check_pass_param(collection_name=collection_name, partition_name_array=partition_names)
req = milvus_types.GetLoadStateRequest(collection_name=collection_name)
if partition_names:
req.partition_names.extend(partition_names)
return req
@classmethod
def empty(cls):
msg = "no empty request later"
raise DeprecationWarning(msg)
@classmethod
def register_link_request(cls):
return milvus_types.RegisterLinkRequest()
@classmethod
def partition_name(cls, collection_name: str, partition_name: str):
if not isinstance(collection_name, str):
raise ParamError(message="collection_name must be of str type")
if not isinstance(partition_name, str):
raise ParamError(message="partition_name must be of str type")
return milvus_types.PartitionName(collection_name=collection_name, tag=partition_name)
@staticmethod
def _is_input_field(field: Dict, is_upsert: bool):
return (not field.get("auto_id", False) or is_upsert) and not field.get(
"is_function_output", False
)
@staticmethod
def _function_output_field_names(fields_info: List[Dict]):
return [field["name"] for field in fields_info if field.get("is_function_output", False)]
@staticmethod
def _num_input_fields(fields_info: List[Dict], is_upsert: bool):
return len([field for field in fields_info if Prepare._is_input_field(field, is_upsert)])
@staticmethod
def _process_struct_field(
field_name: str,
values: Any,
struct_info: Dict,
struct_sub_field_info: Dict,
struct_sub_fields_data: Dict,
):
"""Process a single struct field's data.
Args:
field_name: Name of the struct field
values: List of struct values
struct_info: Info about the struct field
struct_sub_field_info: Two-level dict [struct_name][field_name] -> field info
struct_sub_fields_data: Two-level dict [struct_name][field_name] -> FieldData
"""
# Convert numpy ndarray to list if needed
if isinstance(values, np.ndarray):
values = values.tolist()
if not isinstance(values, list):
msg = f"Field '{field_name}': Expected list, got {type(values).__name__}"
raise TypeError(msg)
# Get expected fields for this specific struct
expected_fields = {field["name"] for field in struct_info["fields"]}
# Handle empty array - create empty data structures
if not values:
# Get relevant field info and data for this struct
relevant_field_info = struct_sub_field_info[field_name]
relevant_fields_data = struct_sub_fields_data[field_name]
Prepare._add_empty_struct_data(relevant_field_info, relevant_fields_data)
return
# Validate and collect values
field_values = Prepare._validate_and_collect_struct_values(
values, expected_fields, field_name
)
# Process collected values using the struct-specific sub-dictionaries
relevant_field_info = struct_sub_field_info[field_name]
relevant_fields_data = struct_sub_fields_data[field_name]
Prepare._process_struct_values(field_values, relevant_field_info, relevant_fields_data)
@staticmethod
def _add_empty_struct_data(struct_field_info: Dict, struct_sub_fields_data: Dict):
"""Add empty data for struct fields."""
for field_name, field_info in struct_field_info.items():
field_data = struct_sub_fields_data[field_name]
if field_info["type"] == DataType.ARRAY:
field_data.scalars.array_data.data.append(convert_to_array([], field_info))
elif field_info["type"] == DataType._ARRAY_OF_VECTOR:
field_data.vectors.vector_array.dim = Prepare._get_dim_value(field_info)
field_data.vectors.vector_array.data.append(
convert_to_array_of_vector([], field_info)
)
@staticmethod
def _validate_and_collect_struct_values(
values: List, expected_fields: set, struct_field_name: str = ""
) -> Dict[str, List]:
"""Validate struct items and collect field values."""
field_values = {field: [] for field in expected_fields}
field_prefix = f"Field '{struct_field_name}': " if struct_field_name else ""
for idx, struct_item in enumerate(values):
if not isinstance(struct_item, dict):
msg = f"{field_prefix}Element at index {idx} must be dict, got {type(struct_item).__name__}"
raise TypeError(msg)
# Validate fields
actual_fields = set(struct_item.keys())
missing_fields = expected_fields - actual_fields
extra_fields = actual_fields - expected_fields
if missing_fields:
msg = f"{field_prefix}Element at index {idx} missing required fields: {missing_fields}"
raise ValueError(msg)
if extra_fields:
msg = f"{field_prefix}Element at index {idx} has unexpected fields: {extra_fields}"
raise ValueError(msg)
# Collect values
for field_name in expected_fields:
value = struct_item[field_name]
if value is None:
msg = f"{field_prefix}Field '{field_name}' in element at index {idx} cannot be None"
raise ValueError(msg)
field_values[field_name].append(value)
return field_values
@staticmethod
def _process_struct_values(
field_values: Dict[str, List], struct_field_info: Dict, struct_sub_fields_data: Dict
):
"""Process collected struct field values."""
for field_name, values in field_values.items():
field_data = struct_sub_fields_data[field_name]
field_info = struct_field_info[field_name]
if field_info["type"] == DataType.ARRAY:
field_data.scalars.array_data.data.append(convert_to_array(values, field_info))
elif field_info["type"] == DataType._ARRAY_OF_VECTOR:
field_data.vectors.vector_array.dim = Prepare._get_dim_value(field_info)
field_data.vectors.vector_array.data.append(
convert_to_array_of_vector(values, field_info)
)
else:
raise ParamError(message=f"Unsupported data type: {field_info['type']}")
@staticmethod
def _get_dim_value(field_info: Dict) -> int:
"""Extract dimension value from field info."""
dim_value = field_info.get("params", {}).get("dim", 0)
return int(dim_value) if isinstance(dim_value, str) else dim_value
@staticmethod
def _setup_struct_data_structures(struct_fields_info: Optional[List[Dict]]):
"""Setup common data structures for struct field processing.
Returns:
Tuple containing:
- struct_fields_data: Dict of FieldData for struct fields
- struct_info_map: Dict mapping struct field names to their info
- struct_sub_fields_data: Two-level Dict of FieldData for
sub-fields [struct_name][field_name]
- struct_sub_field_info: Two-level Dict mapping sub-field names
to their info [struct_name][field_name]
- input_struct_field_info: List of struct fields info
"""
struct_fields_data = {}
struct_info_map = {}
struct_sub_fields_data = {}
struct_sub_field_info = {}
input_struct_field_info = []
if struct_fields_info:
struct_fields_data = {
field["name"]: schema_types.FieldData(field_name=field["name"], type=field["type"])
for field in struct_fields_info
}
input_struct_field_info = list(struct_fields_info)
struct_info_map = {struct["name"]: struct for struct in struct_fields_info}
# Use two-level maps to avoid overwrite when different structs have fields
# with same name
# First level: struct name, Second level: field name
for struct_field_info in struct_fields_info:
struct_name = struct_field_info["name"]
struct_sub_fields_data[struct_name] = {}
struct_sub_field_info[struct_name] = {}
for field in struct_field_info["fields"]:
field_name = field["name"]
field_data = schema_types.FieldData(field_name=field_name, type=field["type"])
# Set dim for ARRAY_OF_VECTOR types
if field["type"] == DataType._ARRAY_OF_VECTOR:
field_data.vectors.dim = Prepare._get_dim_value(field)
struct_sub_fields_data[struct_name][field_name] = field_data
struct_sub_field_info[struct_name][field_name] = field
return (
struct_fields_data,
struct_info_map,
struct_sub_fields_data,
struct_sub_field_info,
input_struct_field_info,
)
@staticmethod
def _parse_row_request(
request: Union[milvus_types.InsertRequest, milvus_types.UpsertRequest],
fields_info: List[Dict],
struct_fields_info: List[Dict],
enable_dynamic: bool,
entities: List,
):
input_fields_info = [
field for field in fields_info if Prepare._is_input_field(field, is_upsert=False)
]
# check if pk exists in entities
primary_field_info = next(
(field for field in fields_info if field.get("is_primary", False)), None
)
if (
primary_field_info
and primary_field_info.get("auto_id", False)
and entities
and primary_field_info["name"] in entities[0]
):
input_fields_info.append(primary_field_info)
function_output_field_names = Prepare._function_output_field_names(fields_info)
fields_data = {
field["name"]: schema_types.FieldData(field_name=field["name"], type=field["type"])
for field in input_fields_info
}
field_info_map = {field["name"]: field for field in input_fields_info}
(
struct_fields_data,
struct_info_map,
struct_sub_fields_data,
struct_sub_field_info,
input_struct_field_info,
) = Prepare._setup_struct_data_structures(struct_fields_info)
if enable_dynamic:
d_field = schema_types.FieldData(
field_name=DYNAMIC_FIELD_NAME, is_dynamic=True, type=DataType.JSON
)
fields_data[d_field.field_name] = d_field
field_info_map[d_field.field_name] = d_field
try:
for entity in entities:
if not isinstance(entity, Dict):
msg = f"expected Dict, got '{type(entity).__name__}'"
raise TypeError(msg)
for k, v in entity.items():
if k not in fields_data and k not in struct_fields_data:
if k in function_output_field_names:
raise DataNotMatchException(
message=ExceptionsMessage.InsertUnexpectedFunctionOutputField % k
)
if not enable_dynamic:
raise DataNotMatchException(
message=ExceptionsMessage.InsertUnexpectedField % k
)
if k in fields_data:
field_info, field_data = field_info_map[k], fields_data[k]
if field_info.get("nullable", False) or field_info.get(
"default_value", None
):
field_data.valid_data.append(v is not None)
entity_helper.pack_field_value_to_field_data(v, field_data, field_info)
elif k in struct_fields_data:
# Array of structs format
try:
Prepare._process_struct_field(
k,
v,
struct_info_map[k],
struct_sub_field_info,
struct_sub_fields_data,
)
except (TypeError, ValueError) as e:
raise DataNotMatchException(
message=f"{ExceptionsMessage.FieldDataInconsistent % (k, 'struct array', type(v))} Detail: {e!s}"
) from e
for field in input_fields_info:
key = field["name"]
if key in entity:
continue
field_info, field_data = field_info_map[key], fields_data[key]
if field_info.get("nullable", False) or field_info.get("default_value", None):
field_data.valid_data.append(False)
entity_helper.pack_field_value_to_field_data(None, field_data, field_info)
else:
raise DataNotMatchException(
message=ExceptionsMessage.InsertMissedField % key
)
json_dict = {
k: v
for k, v in entity.items()
if k not in fields_data and k not in struct_fields_data and enable_dynamic
}
if enable_dynamic:
json_value = entity_helper.convert_to_json(json_dict)
d_field.scalars.json_data.data.append(json_value)
except (TypeError, ValueError) as e:
raise DataNotMatchException(message=ExceptionsMessage.DataTypeInconsistent) from e
# reconstruct the struct array fields data
for struct in input_struct_field_info:
struct_name = struct["name"]
struct_field_data = struct_fields_data[struct_name]
for field_info in struct["fields"]:
# Use two-level map to get the correct sub-field data
field_name = field_info["name"]
struct_field_data.struct_arrays.fields.append(
struct_sub_fields_data[struct_name][field_name]
)
request.fields_data.extend(fields_data.values())
request.fields_data.extend(struct_fields_data.values())
expected_num_input_fields = (
len(input_fields_info) + len(input_struct_field_info) + (1 if enable_dynamic else 0)
)
if len(request.fields_data) != expected_num_input_fields:
msg = f"{ExceptionsMessage.FieldsNumInconsistent}, expected {expected_num_input_fields} fields, got {len(request.fields_data)}"
raise ParamError(message=msg)
return request
@staticmethod
def _parse_upsert_row_request(
request: Union[milvus_types.InsertRequest, milvus_types.UpsertRequest],
fields_info: List[Dict],
struct_fields_info: List[Dict],
enable_dynamic: bool,
entities: List,
partial_update: bool = False,
):
# For partial update, struct fields are not supported
if partial_update and struct_fields_info:
raise ParamError(message="Struct fields are not supported in partial update")
input_fields_info = [
field for field in fields_info if Prepare._is_input_field(field, is_upsert=True)
]
function_output_field_names = Prepare._function_output_field_names(fields_info)
fields_data = {
field["name"]: schema_types.FieldData(field_name=field["name"], type=field["type"])
for field in input_fields_info
}
field_info_map = {field["name"]: field for field in input_fields_info}
field_len = {field["name"]: 0 for field in input_fields_info}
# Use common struct data setup (only if not partial update)
if partial_update:
struct_fields_data = {}
struct_info_map = {}
struct_sub_fields_data = {}
struct_sub_field_info = {}
input_struct_field_info = []
else:
(
struct_fields_data,
struct_info_map,
struct_sub_fields_data,
struct_sub_field_info,
input_struct_field_info,
) = Prepare._setup_struct_data_structures(struct_fields_info)
if enable_dynamic:
d_field = schema_types.FieldData(
field_name=DYNAMIC_FIELD_NAME, is_dynamic=True, type=DataType.JSON
)
fields_data[d_field.field_name] = d_field
field_info_map[d_field.field_name] = d_field
field_len[DYNAMIC_FIELD_NAME] = 0
try:
for entity in entities:
if not isinstance(entity, Dict):
msg = f"expected Dict, got '{type(entity).__name__}'"
raise TypeError(msg)
for k, v in entity.items():
if k not in fields_data and k not in struct_fields_data:
if k in function_output_field_names:
raise DataNotMatchException(
message=ExceptionsMessage.InsertUnexpectedFunctionOutputField % k
)
if not enable_dynamic:
raise DataNotMatchException(
message=ExceptionsMessage.InsertUnexpectedField % k
)
if k in fields_data:
field_info, field_data = field_info_map[k], fields_data[k]
if field_info.get("nullable", False) or field_info.get(
"default_value", None
):
field_data.valid_data.append(v is not None)
entity_helper.pack_field_value_to_field_data(v, field_data, field_info)
field_len[k] += 1
elif k in struct_fields_data:
# Handle struct field (array of structs)
try:
Prepare._process_struct_field(
k,
v,
struct_info_map[k],
struct_sub_field_info,
struct_sub_fields_data,
)
except (TypeError, ValueError) as e:
raise DataNotMatchException(
message=f"{ExceptionsMessage.FieldDataInconsistent % (k, 'struct array', type(v))} Detail: {e!s}"
) from e
for field in input_fields_info:
key = field["name"]
if key in entity:
continue
# Skip missing field validation for partial updates
# Also skip set null value or default value for partial updates,
# in case of field is updated to null
if partial_update:
continue
field_info, field_data = field_info_map[key], fields_data[key]
if field_info.get("nullable", False) or field_info.get("default_value", None):
field_data.valid_data.append(False)
field_len[key] += 1
entity_helper.pack_field_value_to_field_data(None, field_data, field_info)
else:
raise DataNotMatchException(
message=ExceptionsMessage.InsertMissedField % key
)
json_dict = {
k: v
for k, v in entity.items()
if k not in fields_data and k not in struct_fields_data and enable_dynamic
}
if enable_dynamic:
json_value = entity_helper.convert_to_json(json_dict)
d_field.scalars.json_data.data.append(json_value)
field_len[DYNAMIC_FIELD_NAME] += 1
except (TypeError, ValueError) as e:
raise DataNotMatchException(message=ExceptionsMessage.DataTypeInconsistent) from e
if partial_update:
# cause partial_update won't set null for missing fields,
# so the field_len must be the same
row_counts = {v for v in field_len.values() if v > 0}
if len(row_counts) > 1:
counts = list(row_counts)
raise DataNotMatchException(
message=ExceptionsMessage.InsertFieldsLenInconsistent % (counts[0], counts[1])
)
fields_data = {k: v for k, v in fields_data.items() if field_len[k] > 0}
request.fields_data.extend(fields_data.values())
if struct_fields_data:
# reconstruct the struct array fields data (same as in insert)
for struct in input_struct_field_info:
struct_name = struct["name"]
struct_field_data = struct_fields_data[struct_name]
for field_info in struct["fields"]:
# Use two-level map to get the correct sub-field data
field_name = field_info["name"]
struct_field_data.struct_arrays.fields.append(
struct_sub_fields_data[struct_name][field_name]
)
request.fields_data.extend(struct_fields_data.values())
for _, field in enumerate(input_fields_info):
is_dynamic = False
field_name = field["name"]
if field.get("is_dynamic", False):
is_dynamic = True
for j, entity in enumerate(entities):
if is_dynamic and field_name in entity:
raise ParamError(
message=f"dynamic field enabled, {field_name} shouldn't in entities[{j}]"
)
# Include struct fields in expected count (if not partial update)
struct_field_count = len(input_struct_field_info) if not partial_update else 0
expected_num_input_fields = (
len(input_fields_info) + struct_field_count + (1 if enable_dynamic else 0)
)
if not partial_update and len(request.fields_data) != expected_num_input_fields:
msg = f"{ExceptionsMessage.FieldsNumInconsistent}, expected {expected_num_input_fields} fields, got {len(request.fields_data)}"
raise ParamError(message=msg)
return request
@classmethod
def row_insert_param(
cls,
collection_name: str,
entities: List,
partition_name: str,
fields_info: Dict,
struct_fields_info: Optional[Dict] = None,
schema_timestamp: int = 0,
enable_dynamic: bool = False,
):
if not fields_info:
raise ParamError(message="Missing collection meta to validate entities")
# insert_request.hash_keys won't be filled in client.
p_name = partition_name if isinstance(partition_name, str) else ""
request = milvus_types.InsertRequest(
collection_name=collection_name,
partition_name=p_name,
num_rows=len(entities),
schema_timestamp=schema_timestamp,
)
return cls._parse_row_request(
request, fields_info, struct_fields_info, enable_dynamic, entities
)
@classmethod
def row_upsert_param(
cls,
collection_name: str,
entities: List,
partition_name: str,
fields_info: Any,
struct_fields_info: Any = None,
enable_dynamic: bool = False,
schema_timestamp: int = 0,
partial_update: bool = False,
):
if not fields_info:
raise ParamError(message="Missing collection meta to validate entities")
# upsert_request.hash_keys won't be filled in client.
p_name = partition_name if isinstance(partition_name, str) else ""
request = milvus_types.UpsertRequest(
collection_name=collection_name,
partition_name=p_name,
num_rows=len(entities),
schema_timestamp=schema_timestamp,
partial_update=partial_update,
)
return cls._parse_upsert_row_request(
request, fields_info, struct_fields_info, enable_dynamic, entities, partial_update
)
@staticmethod
def _pre_insert_batch_check(
entities: List,
fields_info: Any,
):
for entity in entities:
if (
entity.get("name") is None
or entity.get("values") is None
or entity.get("type") is None
):
raise ParamError(
message="Missing param in entities, a field must have type, name and values"
)
if not fields_info:
raise ParamError(message="Missing collection meta to validate entities")
location, primary_key_loc, _ = traverse_info(fields_info)
# though impossible from sdk
if primary_key_loc is None:
raise ParamError(message="primary key not found")
expected_num_input_fields = Prepare._num_input_fields(fields_info, is_upsert=False)
if len(entities) != expected_num_input_fields:
msg = f"expected number of fields: {expected_num_input_fields}, actual number of fields in entities: {len(entities)}"
raise ParamError(message=msg)
return location
@staticmethod
def _pre_upsert_batch_check(
entities: List,
fields_info: Any,
partial_update: bool = False,
):
for entity in entities:
if (
entity.get("name") is None
or entity.get("values") is None
or entity.get("type") is None
):
raise ParamError(
message="Missing param in entities, a field must have type, name and values"
)
if not fields_info:
raise ParamError(message="Missing collection meta to validate entities")
location, primary_key_loc = traverse_upsert_info(fields_info)
# though impossible from sdk
if primary_key_loc is None:
raise ParamError(message="primary key not found")
# Skip field count validation for partial updates
if not partial_update:
expected_num_input_fields = Prepare._num_input_fields(fields_info, is_upsert=True)
if len(entities) != expected_num_input_fields:
msg = f"expected number of fields: {expected_num_input_fields}, actual number of fields in entities: {len(entities)}"
raise ParamError(message=msg)
return location
@staticmethod
def _parse_batch_request(
request: Union[milvus_types.InsertRequest, milvus_types.UpsertRequest],
entities: List,
fields_info: Any,
location: Dict,
):
pre_field_size = 0
try:
for entity in entities:
latest_field_size = entity_helper.get_input_num_rows(entity.get("values"))
if latest_field_size != 0:
if pre_field_size not in (0, latest_field_size):
raise ParamError(
message=(
f"Field data size misaligned for field [{entity.get('name')}] ",
f"got size=[{latest_field_size}] ",
f"alignment size=[{pre_field_size}]",
)
)
pre_field_size = latest_field_size
if pre_field_size == 0:
raise ParamError(message=ExceptionsMessage.NumberRowsInvalid)
request.num_rows = pre_field_size
for entity in entities:
field_name = entity.get("name")
field_data = entity_helper.entity_to_field_data(
entity, fields_info[location[field_name]], request.num_rows
)
request.fields_data.append(field_data)
except (TypeError, ValueError) as e:
raise DataNotMatchException(message=ExceptionsMessage.DataTypeInconsistent) from e
if pre_field_size == 0:
raise ParamError(message=ExceptionsMessage.NumberRowsInvalid)
request.num_rows = pre_field_size
return request
@classmethod
def batch_insert_param(
cls,
collection_name: str,
entities: List,
partition_name: str,
fields_info: Any,
):
location = cls._pre_insert_batch_check(entities, fields_info)
tag = partition_name if isinstance(partition_name, str) else ""
request = milvus_types.InsertRequest(collection_name=collection_name, partition_name=tag)
return cls._parse_batch_request(
request,
entities,
fields_info,
location,
)
@classmethod
def batch_upsert_param(
cls,
collection_name: str,
entities: List,
partition_name: str,
fields_info: Any,
partial_update: bool = False,
):
location = cls._pre_upsert_batch_check(entities, fields_info, partial_update)
tag = partition_name if isinstance(partition_name, str) else ""
request = milvus_types.UpsertRequest(
collection_name=collection_name,
partition_name=tag,
partial_update=partial_update,
)
return cls._parse_batch_request(request, entities, fields_info, location)
@classmethod
def delete_request(
cls,
collection_name: str,
filter: str,
partition_name: Optional[str] = None,
consistency_level: Optional[Union[int, str]] = None,
**kwargs,
):
check.validate_strs(
collection_name=collection_name,
filter=filter,
)
check.validate_nullable_strs(partition_name=partition_name)
return milvus_types.DeleteRequest(
collection_name=collection_name,
partition_name=partition_name,
expr=filter,
consistency_level=get_consistency_level(consistency_level),
expr_template_values=cls.prepare_expression_template(kwargs.get("expr_params", {})),
)
@classmethod
def _prepare_placeholder_str(cls, data: Any, is_embedding_list: bool = False):
# sparse vector
if entity_helper.entity_is_sparse_matrix(data):
pl_type = PlaceholderType.SparseFloatVector
pl_values = entity_helper.sparse_rows_to_proto(data).contents
elif isinstance(data[0], np.ndarray):
dtype = data[0].dtype
if dtype == "bfloat16":
pl_type = (
PlaceholderType.BFLOAT16_VECTOR
if not is_embedding_list
else PlaceholderType.EmbListBFloat16Vector
)
pl_values = (array.tobytes() for array in data)
elif dtype == "float16":
pl_type = (
PlaceholderType.FLOAT16_VECTOR
if not is_embedding_list
else PlaceholderType.EmbListFloat16Vector
)
pl_values = (array.tobytes() for array in data)
elif dtype in ("float32", "float64"):
pl_type = (
PlaceholderType.FloatVector
if not is_embedding_list
else PlaceholderType.EmbListFloatVector
)
pl_values = (blob.vector_float_to_bytes(entity) for entity in data)
elif dtype == "int8":
pl_type = (
PlaceholderType.Int8Vector
if not is_embedding_list
else PlaceholderType.EmbListInt8Vector
)
pl_values = (array.tobytes() for array in data)
elif dtype == "byte":
pl_type = PlaceholderType.BinaryVector
pl_values = data
else:
err_msg = f"unsupported data type: {dtype}"
raise ParamError(message=err_msg)
elif isinstance(data[0], bytes):
pl_type = PlaceholderType.BinaryVector
pl_values = data # data is already a list of bytes
elif isinstance(data[0], str):
pl_type = PlaceholderType.VARCHAR
pl_values = (value.encode("utf-8") for value in data)
else:
pl_type = PlaceholderType.FloatVector
pl_values = (blob.vector_float_to_bytes(entity) for entity in data)
pl = common_types.PlaceholderValue(tag="$0", type=pl_type, values=pl_values)
return common_types.PlaceholderGroup.SerializeToString(
common_types.PlaceholderGroup(placeholders=[pl])
)
@classmethod
def prepare_expression_template(cls, values: Dict) -> Any:
def all_elements_same_type(lst: List):
return all(isinstance(item, type(lst[0])) for item in lst)
def add_array_data(v: List) -> schema_types.TemplateArrayValue:
data = schema_types.TemplateArrayValue()
if len(v) == 0:
return data
element_type = (
infer_dtype_by_scalar_data(v[0]) if all_elements_same_type(v) else schema_types.JSON
)
if element_type in (schema_types.Bool,):
data.bool_data.data.extend(v)
return data
if element_type in (
schema_types.Int8,
schema_types.Int16,
schema_types.Int32,
schema_types.Int64,
):
data.long_data.data.extend(v)
return data
if element_type in (schema_types.Float, schema_types.Double):
data.double_data.data.extend(v)
return data
if element_type in (schema_types.VarChar, schema_types.String):
data.string_data.data.extend(v)
return data
if element_type in (schema_types.Array,):
for e in v:
data.array_data.data.append(add_array_data(e))
return data
if element_type in (schema_types.JSON,):
for e in v:
data.json_data.data.append(entity_helper.convert_to_json(e))
return data
raise ParamError(message=f"Unsupported element type: {element_type}")
def add_data(v: Any) -> schema_types.TemplateValue:
dtype = infer_dtype_by_scalar_data(v)
data = schema_types.TemplateValue()
if dtype in (schema_types.Bool,):
data.bool_val = v
return data
if dtype in (
schema_types.Int8,
schema_types.Int16,
schema_types.Int32,
schema_types.Int64,
):
data.int64_val = v
return data
if dtype in (schema_types.Float, schema_types.Double):
data.float_val = v
return data
if dtype in (schema_types.VarChar, schema_types.String):
data.string_val = v
return data
if dtype in (schema_types.Array,):
data.array_val.CopyFrom(add_array_data(v))
return data
raise ParamError(message=f"Unsupported element type: {dtype}")
expression_template_values = {}
for k, v in values.items():
expression_template_values[k] = add_data(v)
return expression_template_values
@classmethod
def search_requests_with_expr(
cls,
collection_name: str,
data: Union[List, utils.SparseMatrixInputType],
anns_field: str,
param: Dict,
limit: int,
expr: Optional[str] = None,
partition_names: Optional[List[str]] = None,
output_fields: Optional[List[str]] = None,
round_decimal: int = -1,
ranker: Optional[Union[Function, FunctionScore]] = None,
**kwargs,
) -> milvus_types.SearchRequest:
use_default_consistency = ts_utils.construct_guarantee_ts(collection_name, kwargs)
ignore_growing = param.get("ignore_growing", False) or kwargs.get("ignore_growing", False)
params = param.get("params", {})
if not isinstance(params, dict):
raise ParamError(message=f"Search params must be a dict, got {type(params)}")
if PAGE_RETAIN_ORDER_FIELD in kwargs and PAGE_RETAIN_ORDER_FIELD in param:
raise ParamError(
message="Provide page_retain_order both in kwargs and param, expect just one"
)
page_retain_order = kwargs.get(PAGE_RETAIN_ORDER_FIELD) or param.get(
PAGE_RETAIN_ORDER_FIELD
)
if page_retain_order is not None:
if not isinstance(page_retain_order, bool):
raise ParamError(
message=f"wrong type for page_retain_order, expect bool, got {type(page_retain_order)}"
)
params[PAGE_RETAIN_ORDER_FIELD] = page_retain_order
search_params = {
"topk": limit,
"round_decimal": round_decimal,
"ignore_growing": ignore_growing,
}
# parse offset
if "offset" in kwargs and "offset" in param:
raise ParamError(message="Provide offset both in kwargs and param, expect just one")
offset = kwargs.get("offset") or param.get("offset")
if offset is not None:
if not isinstance(offset, int):
raise ParamError(message=f"wrong type for offset, expect int, got {type(offset)}")
search_params["offset"] = offset
is_iterator = kwargs.get(ITERATOR_FIELD)
if is_iterator is not None:
search_params[ITERATOR_FIELD] = is_iterator
collection_id = kwargs.get(COLLECTION_ID)
if collection_id is not None:
search_params[COLLECTION_ID] = str(collection_id)
is_search_iter_v2 = kwargs.get(ITER_SEARCH_V2_KEY)
if is_search_iter_v2 is not None:
search_params[ITER_SEARCH_V2_KEY] = is_search_iter_v2
search_iter_batch_size = kwargs.get(ITER_SEARCH_BATCH_SIZE_KEY)
if search_iter_batch_size is not None:
search_params[ITER_SEARCH_BATCH_SIZE_KEY] = search_iter_batch_size
search_iter_last_bound = kwargs.get(ITER_SEARCH_LAST_BOUND_KEY)
if search_iter_last_bound is not None:
search_params[ITER_SEARCH_LAST_BOUND_KEY] = search_iter_last_bound
search_iter_id = kwargs.get(ITER_SEARCH_ID_KEY)
if search_iter_id is not None:
search_params[ITER_SEARCH_ID_KEY] = search_iter_id
group_by_field = kwargs.get(GROUP_BY_FIELD)
if group_by_field is not None:
search_params[GROUP_BY_FIELD] = group_by_field
group_size = kwargs.get(GROUP_SIZE)
if group_size is not None:
search_params[GROUP_SIZE] = group_size
strict_group_size = kwargs.get(STRICT_GROUP_SIZE)
if strict_group_size is not None:
search_params[STRICT_GROUP_SIZE] = strict_group_size
json_path = kwargs.get(JSON_PATH)
if json_path is not None:
search_params[JSON_PATH] = json_path
json_type = kwargs.get(JSON_TYPE)
if json_type is not None:
if json_type == DataType.INT8:
search_params[JSON_TYPE] = "Int8"
elif json_type == DataType.INT16:
search_params[JSON_TYPE] = "Int16"
elif json_type == DataType.INT32:
search_params[JSON_TYPE] = "Int32"
elif json_type == DataType.INT64:
search_params[JSON_TYPE] = "Int64"
elif json_type == DataType.BOOL:
search_params[JSON_TYPE] = "Bool"
elif json_type in (DataType.VARCHAR, DataType.STRING):
search_params[JSON_TYPE] = "VarChar"
else:
raise ParamError(message=f"Unsupported json cast type: {json_type}")
strict_cast = kwargs.get(STRICT_CAST)
if strict_cast is not None:
search_params[STRICT_CAST] = strict_cast
if param.get("metric_type") is not None:
search_params["metric_type"] = param["metric_type"]
if anns_field:
search_params["anns_field"] = anns_field
if param.get(HINTS) is not None:
search_params[HINTS] = param[HINTS]
if param.get("analyzer_name") is not None:
search_params["analyzer_name"] = param["analyzer_name"]
if kwargs.get("timezone") is not None:
search_params["timezone"] = kwargs["timezone"]
if kwargs.get("time_fields") is not None:
search_params["time_fields"] = kwargs["time_fields"]
search_params["params"] = get_params(param)
req_params = [
common_types.KeyValuePair(key=str(key), value=utils.dumps(value))
for key, value in search_params.items()
]
is_embedding_list = kwargs.get(IS_EMBEDDING_LIST, False)
nq = entity_helper.get_input_num_rows(data)
plg_str = cls._prepare_placeholder_str(data, is_embedding_list)
request = milvus_types.SearchRequest(
collection_name=collection_name,
partition_names=partition_names,
output_fields=output_fields,
guarantee_timestamp=kwargs.get("guarantee_timestamp", 0),
use_default_consistency=use_default_consistency,
consistency_level=kwargs.get("consistency_level", 0),
nq=nq,
placeholder_group=plg_str,
dsl_type=common_types.DslType.BoolExprV1,
search_params=req_params,
expr_template_values=cls.prepare_expression_template(
{} if kwargs.get("expr_params") is None else kwargs.get("expr_params")
),
)
if expr is not None:
request.dsl = expr
if isinstance(ranker, Function):
request.function_score.CopyFrom(Prepare.ranker_to_function_score(ranker))
elif isinstance(ranker, FunctionScore):
request.function_score.CopyFrom(Prepare.function_score_schema(ranker))
elif ranker is not None:
raise ParamError(message="The search ranker must be a Function or FunctionScore.")
return request
@classmethod
def hybrid_search_request_with_ranker(
cls,
collection_name: str,
reqs: List,
rerank: Union[BaseRanker, Function],
limit: int,
partition_names: Optional[List[str]] = None,
output_fields: Optional[List[str]] = None,
round_decimal: int = -1,
**kwargs,
) -> milvus_types.HybridSearchRequest:
use_default_consistency = ts_utils.construct_guarantee_ts(collection_name, kwargs)
if rerank is not None and not isinstance(rerank, (Function, BaseRanker)):
raise ParamError(message="The hybrid search rerank must be a Function or a Ranker.")
rerank_param = {}
if isinstance(rerank, BaseRanker):
rerank_param = rerank.dict()
rerank_param["limit"] = limit
rerank_param["round_decimal"] = round_decimal
rerank_param["offset"] = kwargs.get("offset", 0)
request = milvus_types.HybridSearchRequest(
collection_name=collection_name,
partition_names=partition_names,
requests=reqs,
output_fields=output_fields,
guarantee_timestamp=kwargs.get("guarantee_timestamp", 0),
use_default_consistency=use_default_consistency,
consistency_level=kwargs.get("consistency_level", 0),
)
request.rank_params.extend(
[
common_types.KeyValuePair(key=str(key), value=utils.dumps(value))
for key, value in rerank_param.items()
]
)
if kwargs.get(RANK_GROUP_SCORER) is not None:
request.rank_params.extend(
[
common_types.KeyValuePair(
key=RANK_GROUP_SCORER, value=kwargs.get(RANK_GROUP_SCORER)
)
]
)
if kwargs.get(GROUP_BY_FIELD) is not None:
request.rank_params.extend(
[
common_types.KeyValuePair(
key=GROUP_BY_FIELD, value=utils.dumps(kwargs.get(GROUP_BY_FIELD))
)
]
)
if kwargs.get(GROUP_SIZE) is not None:
request.rank_params.extend(
[
common_types.KeyValuePair(
key=GROUP_SIZE, value=utils.dumps(kwargs.get(GROUP_SIZE))
)
]
)
if kwargs.get(STRICT_GROUP_SIZE) is not None:
request.rank_params.extend(
[
common_types.KeyValuePair(
key=STRICT_GROUP_SIZE, value=utils.dumps(kwargs.get(STRICT_GROUP_SIZE))
)
]
)
if isinstance(rerank, Function):
request.function_score.CopyFrom(Prepare.ranker_to_function_score(rerank))
return request
@staticmethod
def common_kv_value(v: Any) -> str:
if isinstance(v, (dict, list)):
return json.dumps(v)
return str(v)
@staticmethod
def function_score_schema(function_score: FunctionScore) -> schema_types.FunctionScore:
functions = [
schema_types.FunctionSchema(
name=ranker.name,
type=ranker.type,
description=ranker.description,
input_field_names=ranker.input_field_names,
params=[
common_types.KeyValuePair(key=str(k), value=Prepare.common_kv_value(v))
for k, v in ranker.params.items()
],
)
for ranker in function_score.functions
]
return schema_types.FunctionScore(
functions=functions,
params=[
common_types.KeyValuePair(key=str(k), value=Prepare.common_kv_value(v))
for k, v in function_score.params.items()
],
)
@staticmethod
def ranker_to_function_score(ranker: Function) -> schema_types.FunctionScore:
function_score = schema_types.FunctionScore(
functions=[
schema_types.FunctionSchema(
name=ranker.name,
type=ranker.type,
description=ranker.description,
input_field_names=ranker.input_field_names,
)
],
)
for k, v in ranker.params.items():
if isinstance(v, (dict, list)):
kv_pair = common_types.KeyValuePair(key=str(k), value=json.dumps(v))
else:
kv_pair = common_types.KeyValuePair(key=str(k), value=str(v))
function_score.functions[0].params.append(kv_pair)
return function_score
@classmethod
def create_alias_request(cls, collection_name: str, alias: str):
return milvus_types.CreateAliasRequest(collection_name=collection_name, alias=alias)
@classmethod
def drop_alias_request(cls, alias: str):
return milvus_types.DropAliasRequest(alias=alias)
@classmethod
def alter_alias_request(cls, collection_name: str, alias: str):
return milvus_types.AlterAliasRequest(collection_name=collection_name, alias=alias)
@classmethod
def describe_alias_request(cls, alias: str):
return milvus_types.DescribeAliasRequest(alias=alias)
@classmethod
def list_aliases_request(cls, collection_name: str, db_name: str = ""):
return milvus_types.ListAliasesRequest(collection_name=collection_name, db_name=db_name)
@classmethod
def create_index_request(cls, collection_name: str, field_name: str, params: Dict, **kwargs):
index_params = milvus_types.CreateIndexRequest(
collection_name=collection_name,
field_name=field_name,
index_name=kwargs.get("index_name", ""),
)
if isinstance(params, dict):
for tk, tv in params.items():
if tk == "dim" and (not tv or not isinstance(tv, int)):
raise ParamError(message="dim must be of int!")
if tv:
kv_pair = common_types.KeyValuePair(key=str(tk), value=utils.dumps(tv))
index_params.extra_params.append(kv_pair)
return index_params
@classmethod
def alter_index_properties_request(
cls, collection_name: str, index_name: str, properties: dict
):
params = []
for k, v in properties.items():
params.append(common_types.KeyValuePair(key=str(k), value=utils.dumps(v)))
return milvus_types.AlterIndexRequest(
collection_name=collection_name, index_name=index_name, extra_params=params
)
@classmethod
def drop_index_properties_request(
cls, collection_name: str, index_name: str, delete_keys: List[str]
):
return milvus_types.AlterIndexRequest(
collection_name=collection_name, index_name=index_name, delete_keys=delete_keys
)
@classmethod
def describe_index_request(
cls, collection_name: str, index_name: str, timestamp: Optional[int] = None
):
return milvus_types.DescribeIndexRequest(
collection_name=collection_name, index_name=index_name, timestamp=timestamp
)
@classmethod
def get_index_build_progress(cls, collection_name: str, index_name: str):
return milvus_types.GetIndexBuildProgressRequest(
collection_name=collection_name, index_name=index_name
)
@classmethod
def get_index_state_request(cls, collection_name: str, index_name: str):
return milvus_types.GetIndexStateRequest(
collection_name=collection_name, index_name=index_name
)
@classmethod
def load_collection(
cls,
collection_name: str,
replica_number: Optional[int] = None,
**kwargs,
):
check_pass_param(collection_name=collection_name)
req = milvus_types.LoadCollectionRequest(
collection_name=collection_name,
)
if replica_number:
check_pass_param(replica_number=replica_number)
req.replica_number = replica_number
# Keep underscore key for backward compatibility
if "refresh" in kwargs or "_refresh" in kwargs:
refresh = kwargs.get("refresh", kwargs.get("_refresh", False))
req.refresh = refresh
if "resource_groups" in kwargs or "_resource_groups" in kwargs:
resource_groups = kwargs.get("resource_groups", kwargs.get("_resource_groups"))
req.resource_groups.extend(resource_groups)
if "load_fields" in kwargs or "_load_fields" in kwargs:
load_fields = kwargs.get("load_fields", kwargs.get("_load_fields"))
req.load_fields.extend(load_fields)
if "skip_load_dynamic_field" in kwargs or "_skip_load_dynamic_field" in kwargs:
skip_load_dynamic_field = kwargs.get(
"skip_load_dynamic_field", kwargs.get("_skip_load_dynamic_field", False)
)
req.skip_load_dynamic_field = skip_load_dynamic_field
if "priority" in kwargs:
priority = kwargs.get("priority")
req.load_params["load_priority"] = priority
return req
@classmethod
def release_collection(cls, db_name: str, collection_name: str):
return milvus_types.ReleaseCollectionRequest(
db_name=db_name, collection_name=collection_name
)
@classmethod
def load_partitions(
cls,
collection_name: str,
partition_names: List[str],
replica_number: Optional[int] = None,
**kwargs,
):
check_pass_param(collection_name=collection_name)
req = milvus_types.LoadPartitionsRequest(
collection_name=collection_name,
)
if partition_names:
check_pass_param(partition_name_array=partition_names)
req.partition_names.extend(partition_names)
if replica_number:
check_pass_param(replica_number=replica_number)
req.replica_number = replica_number
# Keep underscore key for backward compatibility
if "refresh" in kwargs or "_refresh" in kwargs:
refresh = kwargs.get("refresh", kwargs.get("_refresh", False))
req.refresh = refresh
if "resource_groups" in kwargs or "_resource_groups" in kwargs:
resource_groups = kwargs.get("resource_groups", kwargs.get("_resource_groups"))
req.resource_groups.extend(resource_groups)
if "load_fields" in kwargs or "_load_fields" in kwargs:
load_fields = kwargs.get("load_fields", kwargs.get("_load_fields"))
req.load_fields.extend(load_fields)
if "skip_load_dynamic_field" in kwargs or "_skip_load_dynamic_field" in kwargs:
skip_load_dynamic_field = kwargs.get(
"skip_load_dynamic_field", kwargs.get("_skip_load_dynamic_field", False)
)
req.skip_load_dynamic_field = skip_load_dynamic_field
if "priority" in kwargs:
priority = kwargs.get("priority")
req.load_params["load_priority"] = priority
return req
@classmethod
def release_partitions(cls, db_name: str, collection_name: str, partition_names: List[str]):
return milvus_types.ReleasePartitionsRequest(
db_name=db_name, collection_name=collection_name, partition_names=partition_names
)
@classmethod
def get_collection_stats_request(cls, collection_name: str):
return milvus_types.GetCollectionStatisticsRequest(collection_name=collection_name)
@classmethod
def get_persistent_segment_info_request(cls, collection_name: str):
return milvus_types.GetPersistentSegmentInfoRequest(collectionName=collection_name)
@classmethod
def get_flush_state_request(cls, segment_ids: List[int], collection_name: str, flush_ts: int):
return milvus_types.GetFlushStateRequest(
segmentIDs=segment_ids, collection_name=collection_name, flush_ts=flush_ts
)
@classmethod
def get_query_segment_info_request(cls, collection_name: str):
return milvus_types.GetQuerySegmentInfoRequest(collectionName=collection_name)
@classmethod
def flush_param(cls, collection_names: List[str]):
return milvus_types.FlushRequest(collection_names=collection_names)
@classmethod
def drop_index_request(cls, collection_name: str, field_name: str, index_name: str):
return milvus_types.DropIndexRequest(
db_name="",
collection_name=collection_name,
field_name=field_name,
index_name=index_name,
)
@classmethod
def get_partition_stats_request(cls, collection_name: str, partition_name: str):
return milvus_types.GetPartitionStatisticsRequest(
db_name="", collection_name=collection_name, partition_name=partition_name
)
@classmethod
def dummy_request(cls, request_type: Any):
return milvus_types.DummyRequest(request_type=request_type)
@classmethod
def retrieve_request(
cls,
collection_name: str,
ids: List[str],
output_fields: List[str],
partition_names: List[str],
):
ids = schema_types.IDs(int_id=schema_types.LongArray(data=ids))
return milvus_types.RetrieveRequest(
db_name="",
collection_name=collection_name,
ids=ids,
output_fields=output_fields,
partition_names=partition_names,
)
@classmethod
def query_request(
cls,
collection_name: str,
expr: str,
output_fields: List[str],
partition_names: List[str],
**kwargs,
):
use_default_consistency = ts_utils.construct_guarantee_ts(collection_name, kwargs)
req = milvus_types.QueryRequest(
db_name="",
collection_name=collection_name,
expr=expr,
output_fields=output_fields,
partition_names=partition_names,
guarantee_timestamp=kwargs.get("guarantee_timestamp", 0),
use_default_consistency=use_default_consistency,
consistency_level=kwargs.get("consistency_level", 0),
expr_template_values=cls.prepare_expression_template(kwargs.get("expr_params", {})),
)
collection_id = kwargs.get(COLLECTION_ID)
if collection_id is not None:
req.query_params.append(
common_types.KeyValuePair(key=COLLECTION_ID, value=str(collection_id))
)
limit = kwargs.get("limit")
if limit is not None:
req.query_params.append(common_types.KeyValuePair(key="limit", value=str(limit)))
offset = kwargs.get("offset")
if offset is not None:
req.query_params.append(common_types.KeyValuePair(key="offset", value=str(offset)))
timezone = kwargs.get("timezone")
if timezone is not None:
req.query_params.append(common_types.KeyValuePair(key="timezone", value=timezone))
timefileds = kwargs.get("time_fields")
if timefileds is not None:
req.query_params.append(common_types.KeyValuePair(key="time_fields", value=timefileds))
ignore_growing = kwargs.get("ignore_growing", False)
stop_reduce_for_best = kwargs.get(REDUCE_STOP_FOR_BEST, False)
is_iterator = kwargs.get(ITERATOR_FIELD)
if is_iterator is not None:
req.query_params.append(
common_types.KeyValuePair(key=ITERATOR_FIELD, value=is_iterator)
)
req.query_params.append(
common_types.KeyValuePair(key="ignore_growing", value=str(ignore_growing))
)
req.query_params.append(
common_types.KeyValuePair(key=REDUCE_STOP_FOR_BEST, value=str(stop_reduce_for_best))
)
return req
@classmethod
def load_balance_request(
cls,
collection_name: str,
src_node_id: int,
dst_node_ids: List[int],
sealed_segment_ids: List[int],
):
return milvus_types.LoadBalanceRequest(
collectionName=collection_name,
src_nodeID=src_node_id,
dst_nodeIDs=dst_node_ids,
sealed_segmentIDs=sealed_segment_ids,
)
@classmethod
def manual_compaction(
cls,
collection_name: str,
is_clustering: bool,
is_l0: bool,
collection_id: Optional[int] = None,
target_size: Optional[int] = None,
):
if is_clustering is None or not isinstance(is_clustering, bool):
raise ParamError(message=f"is_clustering value {is_clustering} is illegal")
if is_l0 is None or not isinstance(is_l0, bool):
raise ParamError(message=f"is_l0 value {is_l0} is illegal")
request = milvus_types.ManualCompactionRequest(
collection_name=collection_name,
majorCompaction=is_clustering,
l0Compaction=is_l0,
target_size=target_size,
)
if collection_id is not None:
request.collectionID = collection_id
return request
@classmethod
def get_compaction_state(cls, compaction_id: int):
if compaction_id is None or not isinstance(compaction_id, int):
raise ParamError(message=f"compaction_id value {compaction_id} is illegal")
request = milvus_types.GetCompactionStateRequest()
request.compactionID = compaction_id
return request
@classmethod
def get_compaction_state_with_plans(cls, compaction_id: int):
if compaction_id is None or not isinstance(compaction_id, int):
raise ParamError(message=f"compaction_id value {compaction_id} is illegal")
request = milvus_types.GetCompactionPlansRequest()
request.compactionID = compaction_id
return request
@classmethod
def get_replicas(cls, collection_id: int):
if collection_id is None or not isinstance(collection_id, int):
raise ParamError(message=f"collection_id value {collection_id} is illegal")
return milvus_types.GetReplicasRequest(
collectionID=collection_id,
with_shard_nodes=True,
)
@classmethod
def do_bulk_insert(cls, collection_name: str, partition_name: str, files: list, **kwargs):
channel_names = kwargs.get("channel_names")
req = milvus_types.ImportRequest(
collection_name=collection_name,
partition_name=partition_name,
files=files,
)
if channel_names is not None:
req.channel_names.extend(channel_names)
for k, v in kwargs.items():
if k in ("bucket", "backup", "sep", "nullkey"):
kv_pair = common_types.KeyValuePair(key=str(k), value=str(v))
req.options.append(kv_pair)
return req
@classmethod
def get_bulk_insert_state(cls, task_id: int):
if task_id is None or not isinstance(task_id, int):
msg = f"task_id value {task_id} is not an integer"
raise ParamError(message=msg)
return milvus_types.GetImportStateRequest(task=task_id)
@classmethod
def list_bulk_insert_tasks(cls, limit: int, collection_name: str):
if limit is None or not isinstance(limit, int):
msg = f"limit value {limit} is not an integer"
raise ParamError(message=msg)
return milvus_types.ListImportTasksRequest(
collection_name=collection_name,
limit=limit,
)
@classmethod
def create_user_request(cls, user: str, password: str):
check_pass_param(user=user, password=password)
return milvus_types.CreateCredentialRequest(
username=user, password=base64.b64encode(password.encode("utf-8"))
)
@classmethod
def update_password_request(cls, user: str, old_password: str, new_password: str):
check_pass_param(user=user)
check_pass_param(password=old_password)
check_pass_param(password=new_password)
return milvus_types.UpdateCredentialRequest(
username=user,
oldPassword=base64.b64encode(old_password.encode("utf-8")),
newPassword=base64.b64encode(new_password.encode("utf-8")),
)
@classmethod
def delete_user_request(cls, user: str):
if not isinstance(user, str):
raise ParamError(message=f"invalid user {user}")
return milvus_types.DeleteCredentialRequest(username=user)
@classmethod
def list_usernames_request(cls):
return milvus_types.ListCredUsersRequest()
@classmethod
def create_role_request(cls, role_name: str):
check_pass_param(role_name=role_name)
return milvus_types.CreateRoleRequest(entity=milvus_types.RoleEntity(name=role_name))
@classmethod
def drop_role_request(cls, role_name: str, force_drop: bool = False):
check_pass_param(role_name=role_name)
return milvus_types.DropRoleRequest(role_name=role_name, force_drop=force_drop)
@classmethod
def operate_user_role_request(cls, username: str, role_name: str, operate_user_role_type: Any):
check_pass_param(user=username)
check_pass_param(role_name=role_name)
check_pass_param(operate_user_role_type=operate_user_role_type)
return milvus_types.OperateUserRoleRequest(
username=username, role_name=role_name, type=operate_user_role_type
)
@classmethod
def select_role_request(cls, role_name: str, include_user_info: bool):
if role_name:
check_pass_param(role_name=role_name)
check_pass_param(include_user_info=include_user_info)
return milvus_types.SelectRoleRequest(
role=milvus_types.RoleEntity(name=role_name) if role_name else None,
include_user_info=include_user_info,
)
@classmethod
def select_user_request(cls, username: str, include_role_info: bool):
if username:
check_pass_param(user=username)
check_pass_param(include_role_info=include_role_info)
return milvus_types.SelectUserRequest(
user=milvus_types.UserEntity(name=username) if username else None,
include_role_info=include_role_info,
)
@classmethod
def operate_privilege_request(
cls,
role_name: str,
object: Any,
object_name: str,
privilege: str,
db_name: str,
operate_privilege_type: Any,
):
check_pass_param(role_name=role_name)
check_pass_param(object=object)
check_pass_param(object_name=object_name)
check_pass_param(privilege=privilege)
check_pass_param(operate_privilege_type=operate_privilege_type)
return milvus_types.OperatePrivilegeRequest(
entity=milvus_types.GrantEntity(
role=milvus_types.RoleEntity(name=role_name),
object=milvus_types.ObjectEntity(name=object),
object_name=object_name,
db_name=db_name,
grantor=milvus_types.GrantorEntity(
privilege=milvus_types.PrivilegeEntity(name=privilege)
),
),
type=operate_privilege_type,
)
@classmethod
def operate_privilege_v2_request(
cls,
role_name: str,
privilege: str,
operate_privilege_type: Any,
db_name: str,
collection_name: str,
):
check_pass_param(
role_name=role_name,
privilege=privilege,
collection_name=collection_name,
operate_privilege_type=operate_privilege_type,
)
if db_name:
check_pass_param(db_name=db_name)
return milvus_types.OperatePrivilegeV2Request(
role=milvus_types.RoleEntity(name=role_name),
grantor=milvus_types.GrantorEntity(
privilege=milvus_types.PrivilegeEntity(name=privilege)
),
type=operate_privilege_type,
db_name=db_name,
collection_name=collection_name,
)
@classmethod
def select_grant_request(cls, role_name: str, object: str, object_name: str, db_name: str):
check_pass_param(role_name=role_name)
if object:
check_pass_param(object=object)
if object_name:
check_pass_param(object_name=object_name)
return milvus_types.SelectGrantRequest(
entity=milvus_types.GrantEntity(
role=milvus_types.RoleEntity(name=role_name),
object=milvus_types.ObjectEntity(name=object) if object else None,
object_name=object_name if object_name else None,
db_name=db_name,
),
)
@classmethod
def get_server_version(cls):
return milvus_types.GetVersionRequest()
@classmethod
def create_resource_group(cls, name: str, **kwargs):
check_pass_param(resource_group_name=name)
return milvus_types.CreateResourceGroupRequest(
resource_group=name,
config=kwargs.get("config"),
)
@classmethod
def update_resource_groups(cls, configs: Mapping[str, ResourceGroupConfig]):
return milvus_types.UpdateResourceGroupsRequest(
resource_groups=configs,
)
@classmethod
def drop_resource_group(cls, name: str):
check_pass_param(resource_group_name=name)
return milvus_types.DropResourceGroupRequest(resource_group=name)
@classmethod
def list_resource_groups(cls):
return milvus_types.ListResourceGroupsRequest()
@classmethod
def describe_resource_group(cls, name: str):
check_pass_param(resource_group_name=name)
return milvus_types.DescribeResourceGroupRequest(resource_group=name)
@classmethod
def transfer_node(cls, source: str, target: str, num_node: int):
check_pass_param(resource_group_name=source)
check_pass_param(resource_group_name=target)
return milvus_types.TransferNodeRequest(
source_resource_group=source, target_resource_group=target, num_node=num_node
)
@classmethod
def transfer_replica(cls, source: str, target: str, collection_name: str, num_replica: int):
check_pass_param(resource_group_name=source)
check_pass_param(resource_group_name=target)
return milvus_types.TransferReplicaRequest(
source_resource_group=source,
target_resource_group=target,
collection_name=collection_name,
num_replica=num_replica,
)
@classmethod
def flush_all_request(cls, db_name: str):
return milvus_types.FlushAllRequest(db_name=db_name)
@classmethod
def get_flush_all_state_request(cls, flush_all_ts: int, db_name: str):
return milvus_types.GetFlushAllStateRequest(flush_all_ts=flush_all_ts, db_name=db_name)
@classmethod
def register_request(cls, user: str, host: str, **kwargs):
reserved = {}
for k, v in kwargs.items():
reserved[k] = v
now = datetime.datetime.now()
this = common_types.ClientInfo(
sdk_type="Python",
sdk_version=__version__,
local_time=now.__str__(),
reserved=reserved,
)
if user is not None:
this.user = user
if host is not None:
this.host = host
return milvus_types.ConnectRequest(
client_info=this,
)
@classmethod
def create_database_req(cls, db_name: str, properties: Optional[dict] = None):
req = milvus_types.CreateDatabaseRequest(db_name=db_name)
if is_legal_collection_properties(properties):
properties = [
common_types.KeyValuePair(key=str(k), value=str(v)) for k, v in properties.items()
]
req.properties.extend(properties)
return req
@classmethod
def drop_database_req(cls, db_name: str):
check_pass_param(db_name=db_name)
return milvus_types.DropDatabaseRequest(db_name=db_name)
@classmethod
def list_database_req(cls):
return milvus_types.ListDatabasesRequest()
@classmethod
def alter_database_properties_req(cls, db_name: str, properties: Dict):
check_pass_param(db_name=db_name)
kvs = [common_types.KeyValuePair(key=k, value=str(v)) for k, v in properties.items()]
return milvus_types.AlterDatabaseRequest(db_name=db_name, properties=kvs)
@classmethod
def drop_database_properties_req(cls, db_name: str, property_keys: List[str]):
check_pass_param(db_name=db_name)
return milvus_types.AlterDatabaseRequest(db_name=db_name, delete_keys=property_keys)
@classmethod
def describe_database_req(cls, db_name: str):
check_pass_param(db_name=db_name)
return milvus_types.DescribeDatabaseRequest(db_name=db_name)
@classmethod
def create_privilege_group_req(cls, privilege_group: str):
check_pass_param(privilege_group=privilege_group)
return milvus_types.CreatePrivilegeGroupRequest(group_name=privilege_group)
@classmethod
def drop_privilege_group_req(cls, privilege_group: str):
check_pass_param(privilege_group=privilege_group)
return milvus_types.DropPrivilegeGroupRequest(group_name=privilege_group)
@classmethod
def list_privilege_groups_req(cls):
return milvus_types.ListPrivilegeGroupsRequest()
@classmethod
def operate_privilege_group_req(
cls, privilege_group: str, privileges: List[str], operate_privilege_group_type: Any
):
check_pass_param(privilege_group=privilege_group)
check_pass_param(privileges=privileges)
check_pass_param(operate_privilege_group_type=operate_privilege_group_type)
return milvus_types.OperatePrivilegeGroupRequest(
group_name=privilege_group,
privileges=[milvus_types.PrivilegeEntity(name=p) for p in privileges],
type=operate_privilege_group_type,
)
@classmethod
def run_analyzer(
cls,
texts: Union[str, List[str]],
analyzer_params: Optional[Union[str, Dict]] = None,
with_hash: bool = False,
with_detail: bool = False,
collection_name: Optional[str] = None,
field_name: Optional[str] = None,
analyzer_names: Optional[Union[str, List[str]]] = None,
):
req = milvus_types.RunAnalyzerRequest(with_hash=with_hash, with_detail=with_detail)
if isinstance(texts, str):
req.placeholder.append(texts.encode("utf-8"))
else:
req.placeholder.extend([text.encode("utf-8") for text in texts])
if analyzer_params is not None:
if isinstance(analyzer_params, dict):
req.analyzer_params = orjson.dumps(analyzer_params).decode(Config.EncodeProtocol)
else:
req.analyzer_params = analyzer_params
if collection_name is not None:
req.collection_name = collection_name
if field_name is not None:
req.field_name = field_name
if analyzer_names is not None:
if isinstance(analyzer_names, str):
req.analyzer_names.extend([analyzer_names])
else:
req.analyzer_names.extend(analyzer_names)
return req
@classmethod
def update_replicate_configuration_request(
cls,
clusters: Optional[List[Dict]] = None,
cross_cluster_topology: Optional[List[Dict]] = None,
):
# Validate input parameters
if clusters is None and cross_cluster_topology is None:
msg = "Either 'clusters' or 'cross_cluster_topology' must be provided"
raise ParamError(message=msg)
# Build ReplicateConfiguration from simplified parameters
replicate_configuration = common_pb2.ReplicateConfiguration()
# Add clusters
if clusters is not None:
for cluster_config in clusters:
cluster = common_pb2.MilvusCluster()
if "cluster_id" not in cluster_config:
msg = "cluster_id is required for each cluster"
raise ParamError(message=msg)
cluster.cluster_id = cluster_config["cluster_id"]
if "connection_param" not in cluster_config:
msg = "connection_param is required for each cluster"
raise ParamError(message=msg)
conn_param = cluster_config["connection_param"]
if "uri" not in conn_param:
msg = "uri is required in connection_param"
raise ParamError(message=msg)
cluster.connection_param.uri = conn_param["uri"]
cluster.connection_param.token = conn_param.get("token", "")
if "pchannels" in cluster_config:
cluster.pchannels.extend(cluster_config["pchannels"])
replicate_configuration.clusters.append(cluster)
# Add cross-cluster topology
if cross_cluster_topology is not None:
for topology_config in cross_cluster_topology:
topology = common_pb2.CrossClusterTopology()
if "source_cluster_id" not in topology_config:
msg = "source_cluster_id is required for each topology"
raise ParamError(message=msg)
topology.source_cluster_id = topology_config["source_cluster_id"]
if "target_cluster_id" not in topology_config:
msg = "target_cluster_id is required for each topology"
raise ParamError(message=msg)
topology.target_cluster_id = topology_config["target_cluster_id"]
replicate_configuration.cross_cluster_topology.append(topology)
return milvus_types.UpdateReplicateConfigurationRequest(
replicate_configuration=replicate_configuration
)
| Prepare |
python | numba__llvmlite | llvmlite/tests/test_binding.py | {
"start": 22177,
"end": 24691
} | class ____(BaseTest):
"""
Test calling convention of floating point arguments of RISC-V
using different ABI.
"""
triple = "riscv32-unknown-linux"
def setUp(self):
super().setUp()
llvm.initialize_all_targets()
llvm.initialize_all_asmprinters()
def check_riscv_target(self):
try:
llvm.Target.from_triple(self.triple)
except RuntimeError as e:
if "No available targets are compatible with triple" in str(e):
self.skipTest("RISCV target unsupported by linked LLVM.")
else:
raise e
def riscv_target_machine(self, **kwarg):
lltarget = llvm.Target.from_triple(self.triple)
return lltarget.create_target_machine(**kwarg)
def fpadd_ll_module(self):
f64 = ir.DoubleType()
f32 = ir.FloatType()
fnty = ir.FunctionType(f64, (f32, f64))
module = ir.Module()
func = ir.Function(module, fnty, name="fpadd")
block = func.append_basic_block()
builder = ir.IRBuilder(block)
a, b = func.args
arg0 = builder.fpext(a, f64)
result = builder.fadd(arg0, b)
builder.ret(result)
llmod = llvm.parse_assembly(str(module))
llmod.verify()
return llmod
def break_up_asm(self, asm):
asm_list = []
for line in asm.splitlines():
s_line = line.strip()
if not (s_line.startswith(".") or s_line.startswith("fpadd")
or s_line == ""):
asm_list.append(s_line)
return asm_list
def test_rv32d_ilp32(self):
self.check_riscv_target()
llmod = self.fpadd_ll_module()
target = self.riscv_target_machine(features="+f,+d", abiname="ilp32")
self.assertEqual(self.break_up_asm(target.emit_assembly(llmod)),
riscv_asm_ilp32)
def test_rv32d_ilp32f(self):
self.check_riscv_target()
llmod = self.fpadd_ll_module()
target = self.riscv_target_machine(features="+f,+d", abiname="ilp32f")
self.assertEqual(self.break_up_asm(target.emit_assembly(llmod)),
riscv_asm_ilp32f)
def test_rv32d_ilp32d(self):
self.check_riscv_target()
llmod = self.fpadd_ll_module()
target = self.riscv_target_machine(features="+f,+d", abiname="ilp32d")
self.assertEqual(self.break_up_asm(target.emit_assembly(llmod)),
riscv_asm_ilp32d)
| TestRISCVABI |
python | dask__distributed | distributed/client.py | {
"start": 21129,
"end": 21214
} | class ____(Exception):
"""Custom exception class to exit All(...) early."""
| AllExit |
python | scipy__scipy | scipy/stats/_continuous_distns.py | {
"start": 13959,
"end": 15501
} | class ____(rv_continuous):
r"""An alpha continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `alpha` ([1]_, [2]_) is:
.. math::
f(x, a) = \frac{1}{x^2 \Phi(a) \sqrt{2\pi}} *
\exp(-\frac{1}{2} (a-1/x)^2)
where :math:`\Phi` is the normal CDF, :math:`x > 0`, and :math:`a > 0`.
`alpha` takes ``a`` as a shape parameter.
%(after_notes)s
References
----------
.. [1] Johnson, Kotz, and Balakrishnan, "Continuous Univariate
Distributions, Volume 1", Second Edition, John Wiley and Sons,
p. 173 (1994).
.. [2] Anthony A. Salvia, "Reliability applications of the Alpha
Distribution", IEEE Transactions on Reliability, Vol. R-34,
No. 3, pp. 251-252 (1985).
%(example)s
"""
_support_mask = rv_continuous._open_support_mask
def _shape_info(self):
return [_ShapeInfo("a", False, (0, np.inf), (False, False))]
def _pdf(self, x, a):
# alpha.pdf(x, a) = 1/(x**2*Phi(a)*sqrt(2*pi)) * exp(-1/2 * (a-1/x)**2)
return 1.0/(x**2)/_norm_cdf(a)*_norm_pdf(a-1.0/x)
def _logpdf(self, x, a):
return -2*np.log(x) + _norm_logpdf(a-1.0/x) - np.log(_norm_cdf(a))
def _cdf(self, x, a):
return _norm_cdf(a-1.0/x) / _norm_cdf(a)
def _ppf(self, q, a):
return 1.0/np.asarray(a - _norm_ppf(q*_norm_cdf(a)))
def _stats(self, a):
return [np.inf]*2 + [np.nan]*2
alpha = alpha_gen(a=0.0, name='alpha')
| alpha_gen |
python | getsentry__sentry | src/sentry/workflow_engine/endpoints/serializers/action_serializer.py | {
"start": 303,
"end": 470
} | class ____(TypedDict):
id: str
type: str
integrationId: str | None
data: dict
config: dict
status: str
@register(Action)
| ActionSerializerResponse |
python | pytorch__pytorch | test/distributed/test_c10d_ucc.py | {
"start": 4054,
"end": 11405
} | class ____(MultiProcessTestCase):
def _create_process_group_ucc(self):
store = c10d.FileStore(self.file_name, self.world_size)
return c10d.ProcessGroupUCC(store, self.rank, self.world_size)
def setUp(self):
super().setUp()
self._spawn_processes()
def tearDown(self):
super().tearDown()
try:
os.remove(self.file_name)
except OSError:
pass
@requires_ucc()
def test_empty_tensors(self):
pg = self._create_process_group_ucc()
xs = [torch.FloatTensor([])]
fut = pg.broadcast(xs).get_future()
fut.wait()
output = fut.value()
self.assertEqual(0, output[0].numel())
self.assertEqual(xs[0], output[0], exact_dtype=False)
# TODO: add error check testing
def _test_broadcast_basics(self, fn):
pg = self._create_process_group_ucc()
def broadcast(xs, rootRank, rootTensor):
opts = c10d.BroadcastOptions()
opts.rootRank = rootRank
opts.rootTensor = rootTensor
fut = pg.broadcast(xs, opts).get_future()
fut.wait()
return fut.value()
# Every rank is root once
for i in range(self.world_size):
# Run with 1 input tensor
x = fn(torch.tensor([self.rank]))
output = broadcast([x], i, 0)
self.assertEqual(torch.tensor([i]), output[0], exact_dtype=False)
# TODO: UCC currently does not support multi tensor input
# Test overloaded convenience function
x = torch.tensor([self.rank + 1.0])
fut = pg.broadcast(x, root=0).get_future()
fut.wait()
result = fut.value()
self.assertEqual(torch.tensor([1.0]), result[0])
@requires_ucc()
def test_broadcast_basics(self):
self._test_broadcast_basics(lambda t: t.clone())
# TODO: test_broadcast_basics_cuda times out locally
def _test_allreduce_basics(self, fn):
pg = self._create_process_group_ucc()
# Single input tests
tests = simple_reduce_tests(self.rank, self.world_size)
for op, input, expected in tests:
opts = c10d.AllreduceOptions()
opts.reduceOp = op
tensor = fn(input)
fut = pg.allreduce([tensor], opts).get_future()
fut.wait()
result = fut.value()
self.assertEqual(expected, result[0], exact_dtype=False)
# TODO: UCC currently does not support multi tensor input
# Test overloaded convenience function (defaults to using sum)
x = fn(torch.tensor([self.rank + 1.0]))
fut = pg.allreduce(x).get_future()
fut.wait()
result = fut.value()
self.assertEqual(
torch.tensor([float(self.world_size * (self.world_size + 1) / 2)]),
result[0],
)
@requires_ucc()
def test_allreduce_basics(self):
self._test_allreduce_basics(lambda t: t.clone())
# TODO: test_allreduce_basics_cuda times out locally
def _test_allgather_basics(self, fn):
pg = self._create_process_group_ucc()
# TODO: Run with N input tensor per rank; for now, UCC only supports single tensor input so N=1
for n in [1]:
input = [fn(torch.tensor([n * self.rank + i])) for i in range(n)]
output = [
[fn(torch.tensor([-1])) for _ in range(n * self.world_size)]
for _ in range(n)
]
expected_output = [
[fn(torch.tensor([i])) for i in range(n * self.world_size)]
for _ in range(n)
]
fut = pg.allgather(output, input).get_future()
fut.wait()
result = fut.value()
if n == 1:
result = [result]
self.assertEqual(expected_output, result)
def test_allgather_basics(self):
self._test_allgather_basics(lambda t: t.clone())
def _test_reduce_basics(self, fn):
pg = self._create_process_group_ucc()
for op, input, output in simple_reduce_tests(self.rank, self.world_size):
for root in range(self.world_size):
opts = c10d.ReduceOptions()
opts.reduceOp = op
opts.rootRank = root
tmp = fn(input)
fut = pg.reduce([tmp], opts).get_future()
fut.wait()
result = fut.value()
if root == self.rank:
self.assertEqual(output, result[0], exact_dtype=False)
@requires_ucc()
def test_reduce_basics(self):
self._test_reduce_basics(lambda t: t.clone())
# TODO: test_reduce_basics_cuda times out locally
@requires_ucc()
def test_send_recv_all_to_all(self):
pg = self._create_process_group_ucc()
# Preallocate tensors for input/output
inputs = [torch.tensor([self.rank]) for _ in range(self.world_size)]
outputs = [torch.tensor([-1]) for _ in range(self.world_size)]
# Issue sends
send_work = []
for i in range(self.world_size):
if i == self.rank:
continue
send_work.append(pg.send([inputs[i]], i, 0))
# Issue recvs
recv_work = []
for i in range(self.world_size):
if i == self.rank:
continue
recv_work.append(pg.recv([outputs[i]], i, 0))
# Wait for sends to complete
for work in send_work:
work.wait()
self.assertTrue(work.is_completed())
# Wait for recvs to complete
for work in recv_work:
work.wait()
self.assertTrue(work.is_completed())
# Test that every output other than our own contains the respective rank
for i in range(self.world_size):
if i == self.rank:
continue
self.assertEqual(torch.tensor([i]), outputs[i])
# TODO: test_barrier_implies_wait fails with numerical mismatch, will investigate later
@skip_but_pass_in_sandcastle("fails with numerical mismatch, skip for now")
@requires_ucc()
def test_barrier_implies_wait(self):
pg = self._create_process_group_ucc()
# Kick off allreduce operations
size = (100, 100)
num = 16
tensors = [torch.full(size, float(i)) for i in range(num)]
for tensor in tensors:
# Note: leak the returned work handle
pg.allreduce(tensor)
# Barrier should ensure all previous work has completed
pg.barrier().get_future().wait()
for i, tensor in enumerate(tensors):
self.assertEqual(torch.full(size, float(i * self.world_size)), tensor)
@requires_ucc()
def _test_reduce_scatter_base_basics(self, fn):
pg = self._create_process_group_ucc()
n = self.world_size
input = fn(torch.ones(n, n, 10) * (self.rank + 1.0))
output = fn(torch.zeros(10))
expected_output = fn(torch.ones(10) * (n + 1) * n / 2)
fut = pg._reduce_scatter_base(output, input).get_future()
fut.wait()
result = fut.value()
self.assertEqual(result[0], expected_output)
def test_reduce_scatter_base_basics(self):
self._test_reduce_scatter_base_basics(lambda t: t.clone())
| ProcessGroupUCCTest |
python | py-pdf__pypdf | pypdf/annotations/_non_markup_annotations.py | {
"start": 2692,
"end": 3649
} | class ____(AnnotationDictionary):
def __init__(
self,
*,
rect: Union[RectangleObject, tuple[float, float, float, float]],
parent: Optional[DictionaryObject] = None,
open: bool = False,
**kwargs: Any,
) -> None:
super().__init__(**kwargs)
self.update(
{
NameObject("/Subtype"): NameObject("/Popup"),
NameObject("/Rect"): RectangleObject(rect),
NameObject("/Open"): BooleanObject(open),
}
)
if parent:
# This needs to be an indirect object
try:
self[NameObject("/Parent")] = parent.indirect_reference
except AttributeError:
from .._utils import logger_warning # noqa: PLC0415
logger_warning(
"Unregistered Parent object : No Parent field set",
__name__,
)
| Popup |
python | spack__spack | lib/spack/spack/llnl/util/filesystem.py | {
"start": 76581,
"end": 95079
} | class ____(FileList):
"""Sequence of absolute paths to libraries
Provides a few convenience methods to manipulate library paths and get
commonly used compiler flags or names
"""
@property
def libraries(self) -> List[str]:
"""Stable de-duplication of library files.
Returns:
A list of library files
"""
return self.files
@property
def names(self) -> List[str]:
"""Stable de-duplication of library names in the list
>>> l = LibraryList(["/dir1/liba.a", "/dir2/libb.a", "/dir3/liba.so"])
>>> l.names
["a", "b"]
Returns:
A list of library names
"""
names = []
for x in self.basenames:
name = x
if x.startswith("lib"):
name = x[3:]
# Valid extensions include: ['.dylib', '.so', '.a']
# on non Windows platform
# Windows valid library extensions are:
# ['.dll', '.lib']
valid_exts = [".dll", ".lib"] if sys.platform == "win32" else [".dylib", ".so", ".a"]
for ext in valid_exts:
i = name.rfind(ext)
if i != -1:
names.append(name[:i])
break
else:
# No valid extension, should we still include it?
names.append(name)
return list(dedupe(names))
@property
def search_flags(self) -> str:
"""Search flags for the libraries
>>> l = LibraryList(["/dir1/liba.a", "/dir2/libb.a", "/dir1/liba.so"])
>>> l.search_flags
"-L/dir1 -L/dir2"
Returns:
A joined list of search flags
"""
return " ".join(["-L" + x for x in self.directories])
@property
def link_flags(self) -> str:
"""Link flags for the libraries
>>> l = LibraryList(["/dir1/liba.a", "/dir2/libb.a", "/dir1/liba.so"])
>>> l.link_flags
"-la -lb"
Returns:
A joined list of link flags
"""
return " ".join(["-l" + name for name in self.names])
@property
def ld_flags(self) -> str:
"""Search flags + link flags
>>> l = LibraryList(["/dir1/liba.a", "/dir2/libb.a", "/dir1/liba.so"])
>>> l.ld_flags
"-L/dir1 -L/dir2 -la -lb"
Returns:
A joined list of search flags and link flags
"""
return self.search_flags + " " + self.link_flags
def find_system_libraries(libraries: Union[str, List[str]], shared: bool = True) -> LibraryList:
"""Searches the usual system library locations for ``libraries``.
Search order is as follows:
1. ``/lib64``
2. ``/lib``
3. ``/usr/lib64``
4. ``/usr/lib``
5. ``/usr/local/lib64``
6. ``/usr/local/lib``
Accepts any glob characters accepted by :py:func:`fnmatch.fnmatch`:
========== ====================================
Pattern Meaning
========== ====================================
``*`` matches one or more characters
``?`` matches any single character
``[seq]`` matches any character in ``seq``
``[!seq]`` matches any character not in ``seq``
========== ====================================
Parameters:
libraries: Library name(s) to search for
shared: if :data:`True` searches for shared libraries,
otherwise for static. Defaults to :data:`True`.
Returns:
The libraries that have been found
"""
if isinstance(libraries, str):
libraries = [libraries]
elif not isinstance(libraries, collections.abc.Sequence):
message = "{0} expects a string or sequence of strings as the "
message += "first argument [got {1} instead]"
message = message.format(find_system_libraries.__name__, type(libraries))
raise TypeError(message)
libraries_found = LibraryList([])
search_locations = [
"/lib64",
"/lib",
"/usr/lib64",
"/usr/lib",
"/usr/local/lib64",
"/usr/local/lib",
]
for library in libraries:
for root in search_locations:
result = find_libraries(library, root, shared, recursive=True)
if result:
libraries_found += result
break
return libraries_found
def find_libraries(
libraries: Union[str, List[str]],
root: str,
shared: bool = True,
recursive: bool = False,
runtime: bool = True,
max_depth: Optional[int] = None,
) -> LibraryList:
"""Returns an iterable of full paths to libraries found in a root dir.
Accepts any glob characters accepted by :py:func:`fnmatch.fnmatch`:
========== ====================================
Pattern Meaning
========== ====================================
``*`` matches one or more characters
``?`` matches any single character
``[seq]`` matches any character in ``seq``
``[!seq]`` matches any character not in ``seq``
========== ====================================
Parameters:
libraries: Library name(s) to search for
root: The root directory to start searching from
shared: if :data:`True` searches for shared libraries,
otherwise for static. Defaults to :data:`True`.
recursive: if :data:`False` search only root folder,
if :data:`True` descends top-down from the root. Defaults to :data:`False`.
max_depth: if set, don't search below this depth. Cannot be set
if recursive is :data:`False`
runtime: Windows only option, no-op elsewhere. If :data:`True`,
search for runtime shared libs (``.DLL``), otherwise, search
for ``.Lib`` files. If ``shared`` is :data:`False`, this has no meaning.
Defaults to :data:`True`.
Returns:
The libraries that have been found
"""
if isinstance(libraries, str):
libraries = [libraries]
elif not isinstance(libraries, collections.abc.Sequence):
message = "{0} expects a string or sequence of strings as the "
message += "first argument [got {1} instead]"
message = message.format(find_libraries.__name__, type(libraries))
raise TypeError(message)
if sys.platform == "win32":
static_ext = "lib"
# For linking (runtime=False) you need the .lib files regardless of
# whether you are doing a shared or static link
shared_ext = "dll" if runtime else "lib"
else:
# Used on both Linux and macOS
static_ext = "a"
shared_ext = "so"
# Construct the right suffix for the library
if shared:
# Used on both Linux and macOS
suffixes = [shared_ext]
if sys.platform == "darwin":
# Only used on macOS
suffixes.append("dylib")
else:
suffixes = [static_ext]
# List of libraries we are searching with suffixes
libraries = ["{0}.{1}".format(lib, suffix) for lib in libraries for suffix in suffixes]
if not recursive:
if max_depth:
raise ValueError(f"max_depth ({max_depth}) cannot be set if recursive is False")
# If not recursive, look for the libraries directly in root
return LibraryList(find(root, libraries, recursive=False))
# To speedup the search for external packages configured e.g. in /usr,
# perform first non-recursive search in root/lib then in root/lib64 and
# finally search all of root recursively. The search stops when the first
# match is found.
common_lib_dirs = ["lib", "lib64"]
if sys.platform == "win32":
common_lib_dirs.extend(["bin", "Lib"])
for subdir in common_lib_dirs:
dirname = join_path(root, subdir)
if not os.path.isdir(dirname):
continue
found_libs = find(dirname, libraries, False)
if found_libs:
break
else:
found_libs = find(root, libraries, recursive=True, max_depth=max_depth)
return LibraryList(found_libs)
def find_all_shared_libraries(
root: str, recursive: bool = False, runtime: bool = True
) -> LibraryList:
"""Convenience function that returns the list of all shared libraries found
in the directory passed as argument.
See documentation for :py:func:`find_libraries` for more information
"""
return find_libraries("*", root=root, shared=True, recursive=recursive, runtime=runtime)
def find_all_static_libraries(root: str, recursive: bool = False) -> LibraryList:
"""Convenience function that returns the list of all static libraries found
in the directory passed as argument.
See documentation for :py:func:`find_libraries` for more information
"""
return find_libraries("*", root=root, shared=False, recursive=recursive)
def find_all_libraries(root: str, recursive: bool = False) -> LibraryList:
"""Convenience function that returns the list of all libraries found
in the directory passed as argument.
See documentation for :py:func:`find_libraries` for more information
"""
return find_all_shared_libraries(root, recursive=recursive) + find_all_static_libraries(
root, recursive=recursive
)
@system_path_filter
@memoized
def can_access_dir(path):
"""Returns True if the argument is an accessible directory.
Args:
path: path to be tested
Returns:
True if ``path`` is an accessible directory, else False
"""
return os.path.isdir(path) and os.access(path, os.R_OK | os.X_OK)
@system_path_filter
@memoized
def can_write_to_dir(path):
"""Return True if the argument is a directory in which we can write.
Args:
path: path to be tested
Returns:
True if ``path`` is an writeable directory, else False
"""
return os.path.isdir(path) and os.access(path, os.R_OK | os.X_OK | os.W_OK)
@system_path_filter
@memoized
def files_in(*search_paths):
"""Returns all the files in paths passed as arguments.
Caller must ensure that each path in ``search_paths`` is a directory.
Args:
*search_paths: directories to be searched
Returns:
List of (file, full_path) tuples with all the files found.
"""
files = []
for d in filter(can_access_dir, search_paths):
files.extend(
filter(
lambda x: os.path.isfile(x[1]), [(f, os.path.join(d, f)) for f in os.listdir(d)]
)
)
return files
def is_readable_file(file_path):
"""Return True if the path passed as argument is readable"""
return os.path.isfile(file_path) and os.access(file_path, os.R_OK)
@system_path_filter
def search_paths_for_executables(*path_hints):
"""Given a list of path hints returns a list of paths where
to search for an executable.
Args:
*path_hints (list of paths): list of paths taken into
consideration for a search
Returns:
A list containing the real path of every existing directory
in `path_hints` and its `bin` subdirectory if it exists.
"""
executable_paths = []
for path in path_hints:
if not os.path.isdir(path):
continue
path = os.path.abspath(path)
executable_paths.append(path)
bin_dir = os.path.join(path, "bin")
if os.path.isdir(bin_dir):
executable_paths.append(bin_dir)
return executable_paths
@system_path_filter
def search_paths_for_libraries(*path_hints):
"""Given a list of path hints returns a list of paths where
to search for a shared library.
Args:
*path_hints (list of paths): list of paths taken into
consideration for a search
Returns:
A list containing the real path of every existing directory
in `path_hints` and its `lib` and `lib64` subdirectory if it exists.
"""
library_paths = []
for path in path_hints:
if not os.path.isdir(path):
continue
path = os.path.abspath(path)
library_paths.append(path)
lib_dir = os.path.join(path, "lib")
if os.path.isdir(lib_dir):
library_paths.append(lib_dir)
lib64_dir = os.path.join(path, "lib64")
if os.path.isdir(lib64_dir):
library_paths.append(lib64_dir)
return library_paths
@system_path_filter
def partition_path(path, entry=None):
"""
Split the prefixes of the path at the first occurrence of entry and
return a 3-tuple containing a list of the prefixes before the entry, a
string of the prefix ending with the entry, and a list of the prefixes
after the entry.
If the entry is not a node in the path, the result will be the prefix list
followed by an empty string and an empty list.
"""
paths = prefixes(path)
if entry is not None:
# Derive the index of entry within paths, which will correspond to
# the location of the entry in within the path.
try:
sep = os.sep
entries = path.split(sep)
if entries[0].endswith(":"):
# Handle drive letters e.g. C:/ on Windows
entries[0] = entries[0] + sep
i = entries.index(entry)
if "" in entries:
i -= 1
return paths[:i], paths[i], paths[i + 1 :]
except ValueError:
pass
return paths, "", []
@system_path_filter
def prefixes(path):
"""
Returns a list containing the path and its ancestors, top-to-bottom.
The list for an absolute path will not include an ``os.sep`` entry.
For example, assuming ``os.sep`` is ``/``, given path ``/ab/cd/efg``
the resulting paths will be, in order: ``/ab``, ``/ab/cd``, and
``/ab/cd/efg``
The list for a relative path starting ``./`` will not include ``.``.
For example, path ``./hi/jkl/mn`` results in a list with the following
paths, in order: ``./hi``, ``./hi/jkl``, and ``./hi/jkl/mn``.
On Windows, paths will be normalized to use ``/`` and ``/`` will always
be used as the separator instead of ``os.sep``.
Parameters:
path (str): the string used to derive ancestor paths
Returns:
A list containing ancestor paths in order and ending with the path
"""
if not path:
return []
sep = os.sep
parts = path.strip(sep).split(sep)
if path.startswith(sep):
parts.insert(0, sep)
elif parts[0].endswith(":"):
# Handle drive letters e.g. C:/ on Windows
parts[0] = parts[0] + sep
paths = [os.path.join(*parts[: i + 1]) for i in range(len(parts))]
try:
paths.remove(sep)
except ValueError:
pass
try:
paths.remove(".")
except ValueError:
pass
return paths
@system_path_filter
def remove_directory_contents(dir):
"""Remove all contents of a directory."""
if os.path.exists(dir):
for entry in [os.path.join(dir, entry) for entry in os.listdir(dir)]:
if os.path.isfile(entry) or islink(entry):
os.unlink(entry)
else:
shutil.rmtree(entry)
@contextmanager
@system_path_filter
def keep_modification_time(*filenames: str) -> Generator[None, None, None]:
"""
Context manager to keep the modification timestamps of the input files.
Tolerates and has no effect on non-existent files and files that are
deleted by the nested code.
Example::
with keep_modification_time("file1.txt", "file2.txt"):
# do something that modifies file1.txt and file2.txt
Parameters:
*filenames: one or more files that must have their modification
timestamps unchanged
"""
mtimes = {}
for f in filenames:
if os.path.exists(f):
mtimes[f] = os.path.getmtime(f)
yield
for f, mtime in mtimes.items():
if os.path.exists(f):
os.utime(f, (os.path.getatime(f), mtime))
@contextmanager
def temporary_file_position(stream):
orig_pos = stream.tell()
yield
stream.seek(orig_pos)
@contextmanager
def current_file_position(stream: IO, loc: int, relative_to=io.SEEK_CUR):
with temporary_file_position(stream):
stream.seek(loc, relative_to)
yield
@contextmanager
def temporary_dir(
suffix: Optional[str] = None, prefix: Optional[str] = None, dir: Optional[str] = None
):
"""Create a temporary directory and cd's into it. Delete the directory
on exit.
Takes the same arguments as tempfile.mkdtemp()
"""
tmp_dir = tempfile.mkdtemp(suffix=suffix, prefix=prefix, dir=dir)
try:
with working_dir(tmp_dir):
yield tmp_dir
finally:
remove_directory_contents(tmp_dir)
@contextmanager
def edit_in_place_through_temporary_file(file_path: str) -> Generator[str, None, None]:
"""Context manager for modifying ``file_path`` in place, preserving its inode and hardlinks,
for functions or external tools that do not support in-place editing. Notice that this function
is unsafe in that it works with paths instead of a file descriptors, but this is by design,
since we assume the call site will create a new inode at the same path."""
tmp_fd, tmp_path = tempfile.mkstemp(
dir=os.path.dirname(file_path), prefix=f"{os.path.basename(file_path)}."
)
# windows cannot replace a file with open fds, so close since the call site needs to replace.
os.close(tmp_fd)
try:
shutil.copyfile(file_path, tmp_path, follow_symlinks=True)
yield tmp_path
shutil.copyfile(tmp_path, file_path, follow_symlinks=True)
finally:
os.unlink(tmp_path)
def filesummary(path, print_bytes=16) -> Tuple[int, bytes]:
"""Create a small summary of the given file. Does not error
when file does not exist.
Args:
print_bytes (int): Number of bytes to print from start/end of file
Returns:
Tuple of size and byte string containing first n .. last n bytes.
Size is 0 if file cannot be read."""
try:
n = print_bytes
with open(path, "rb") as f:
size = os.fstat(f.fileno()).st_size
if size <= 2 * n:
short_contents = f.read(2 * n)
else:
short_contents = f.read(n)
f.seek(-n, 2)
short_contents += b"..." + f.read(n)
return size, short_contents
except OSError:
return 0, b""
| LibraryList |
python | google__jax | tests/stax_test.py | {
"start": 1696,
"end": 8198
} | class ____(jtu.JaxTestCase):
@jtu.sample_product(shape=[(2, 3), (5,)])
def testRandnInitShape(self, shape):
key = random.PRNGKey(0)
out = stax.randn()(key, shape)
self.assertEqual(out.shape, shape)
@jtu.sample_product(shape=[(2, 3), (2, 3, 4)])
def testGlorotInitShape(self, shape):
key = random.PRNGKey(0)
out = stax.glorot()(key, shape)
self.assertEqual(out.shape, shape)
@jtu.sample_product(
channels=[2, 3],
filter_shape=[(1, 1), (2, 3)],
padding=["SAME", "VALID"],
strides=[None, (2, 1)],
input_shape=[(2, 10, 11, 1)],
)
def testConvShape(self, channels, filter_shape, padding, strides,
input_shape):
init_fun, apply_fun = stax.Conv(channels, filter_shape, strides=strides,
padding=padding)
_CheckShapeAgreement(self, init_fun, apply_fun, input_shape)
@jtu.sample_product(
channels=[2, 3],
filter_shape=[(1, 1), (2, 3), (3, 3)],
padding=["SAME", "VALID"],
strides=[None, (2, 1), (2, 2)],
input_shape=[(2, 10, 11, 1)],
)
def testConvTransposeShape(self, channels, filter_shape, padding, strides,
input_shape):
init_fun, apply_fun = stax.ConvTranspose(channels, filter_shape, # 2D
strides=strides, padding=padding)
_CheckShapeAgreement(self, init_fun, apply_fun, input_shape)
@jtu.sample_product(
channels=[2, 3],
filter_shape=[(1,), (2,), (3,)],
padding=["SAME", "VALID"],
strides=[None, (1,), (2,)],
input_shape=[(2, 10, 1)],
)
def testConv1DTransposeShape(self, channels, filter_shape, padding, strides,
input_shape):
init_fun, apply_fun = stax.Conv1DTranspose(channels, filter_shape,
strides=strides, padding=padding)
_CheckShapeAgreement(self, init_fun, apply_fun, input_shape)
@jtu.sample_product(
out_dim=[3, 4],
input_shape=[(2, 3), (3, 4)],
)
def testDenseShape(self, out_dim, input_shape):
init_fun, apply_fun = stax.Dense(out_dim)
_CheckShapeAgreement(self, init_fun, apply_fun, input_shape)
@jtu.sample_product(
input_shape=[(2, 3), (2, 3, 4)],
nonlinear=["Relu", "Sigmoid", "Elu", "LeakyRelu"],
)
def testNonlinearShape(self, input_shape, nonlinear):
init_fun, apply_fun = getattr(stax, nonlinear)
_CheckShapeAgreement(self, init_fun, apply_fun, input_shape)
@jtu.sample_product(
window_shape=[(1, 1), (2, 3)],
padding=["VALID"],
strides=[None, (2, 1)],
input_shape=[(2, 5, 6, 4)],
max_pool=[False, True],
spec=["NHWC", "NCHW", "WHNC", "WHCN"],
)
def testPoolingShape(self, window_shape, padding, strides, input_shape,
max_pool, spec):
layer = stax.MaxPool if max_pool else stax.AvgPool
init_fun, apply_fun = layer(window_shape, padding=padding, strides=strides,
spec=spec)
_CheckShapeAgreement(self, init_fun, apply_fun, input_shape)
@jtu.sample_product(input_shape=[(2, 3), (2, 3, 4)])
def testFlattenShape(self, input_shape):
init_fun, apply_fun = stax.Flatten
_CheckShapeAgreement(self, init_fun, apply_fun, input_shape)
@jtu.sample_product(
input_shape=[(2, 5, 6, 1)],
spec=[
[stax.Conv(3, (2, 2))],
[stax.Conv(3, (2, 2)), stax.Flatten, stax.Dense(4)],
],
)
def testSerialComposeLayersShape(self, input_shape, spec):
init_fun, apply_fun = stax.serial(*spec)
_CheckShapeAgreement(self, init_fun, apply_fun, input_shape)
@jtu.sample_product(input_shape=[(3, 4), (2, 5, 6, 1)])
def testDropoutShape(self, input_shape):
init_fun, apply_fun = stax.Dropout(0.9)
_CheckShapeAgreement(self, init_fun, apply_fun, input_shape)
@jtu.sample_product(input_shape=[(3, 4), (2, 5, 6, 1)])
def testFanInSum(self, input_shape):
init_fun, apply_fun = stax.FanInSum
_CheckShapeAgreement(self, init_fun, apply_fun, [input_shape, input_shape])
@jtu.sample_product(
[dict(input_shapes=input_shapes, axis=axis)
for input_shapes, axis in [
([(2, 3), (2, 1)], 1),
([(2, 3), (2, 1)], -1),
([(1, 2, 4), (1, 1, 4)], 1),
]
],
)
def testFanInConcat(self, input_shapes, axis):
init_fun, apply_fun = stax.FanInConcat(axis)
_CheckShapeAgreement(self, init_fun, apply_fun, input_shapes)
def testIssue182(self):
key = random.PRNGKey(0)
init_fun, apply_fun = stax.Softmax
input_shape = (10, 3)
inputs = np.arange(30.).astype("float32").reshape(input_shape)
out_shape, params = init_fun(key, input_shape)
out = apply_fun(params, inputs)
assert out_shape == out.shape
assert np.allclose(np.sum(np.asarray(out), -1), 1.)
def testBatchNormNoScaleOrCenter(self):
key = random.PRNGKey(0)
axes = (0, 1, 2)
init_fun, apply_fun = stax.BatchNorm(axis=axes, center=False, scale=False)
input_shape = (4, 5, 6, 7)
inputs = random_inputs(self.rng(), input_shape)
out_shape, params = init_fun(key, input_shape)
out = apply_fun(params, inputs)
means = np.mean(out, axis=(0, 1, 2))
std_devs = np.std(out, axis=(0, 1, 2))
assert np.allclose(means, np.zeros_like(means), atol=1e-4)
assert np.allclose(std_devs, np.ones_like(std_devs), atol=1e-4)
def testBatchNormShapeNHWC(self):
key = random.PRNGKey(0)
init_fun, apply_fun = stax.BatchNorm(axis=(0, 1, 2))
input_shape = (4, 5, 6, 7)
out_shape, params = init_fun(key, input_shape)
inputs = random_inputs(self.rng(), input_shape).astype(params[0].dtype)
out = apply_fun(params, inputs)
self.assertEqual(out_shape, input_shape)
beta, gamma = params
self.assertEqual(beta.shape, (7,))
self.assertEqual(gamma.shape, (7,))
self.assertEqual(out_shape, out.shape)
def testBatchNormShapeNCHW(self):
key = random.PRNGKey(0)
# Regression test for https://github.com/jax-ml/jax/issues/461
init_fun, apply_fun = stax.BatchNorm(axis=(0, 2, 3))
input_shape = (4, 5, 6, 7)
out_shape, params = init_fun(key, input_shape)
inputs = random_inputs(self.rng(), input_shape).astype(params[0].dtype)
out = apply_fun(params, inputs)
self.assertEqual(out_shape, input_shape)
beta, gamma = params
self.assertEqual(beta.shape, (5,))
self.assertEqual(gamma.shape, (5,))
self.assertEqual(out_shape, out.shape)
if __name__ == "__main__":
absltest.main(testLoader=jtu.JaxTestLoader())
| StaxTest |
python | django__django | tests/foreign_object/models/article.py | {
"start": 2836,
"end": 3057
} | class ____(models.Model):
article = models.ForeignKey(
Article,
models.CASCADE,
related_name="tags",
related_query_name="tag",
)
name = models.CharField(max_length=255)
| ArticleTag |
python | lxml__lxml | src/lxml/tests/test_xpathevaluator.py | {
"start": 19703,
"end": 20829
} | class ____(HelperTestCase):
"Tests for the EXSLT support in XPath (requires libxslt 1.1.25+)"
NSMAP = dict(
date = "http://exslt.org/dates-and-times",
math = "http://exslt.org/math",
set = "http://exslt.org/sets",
str = "http://exslt.org/strings",
)
def test_xpath_exslt_functions_date(self):
tree = self.parse('<a><b>2009-11-12</b><b>2008-12-11</b></a>')
match_dates = tree.xpath('//b[date:year(string()) = 2009]',
namespaces=self.NSMAP)
self.assertTrue(match_dates, str(match_dates))
self.assertEqual(len(match_dates), 1, str(match_dates))
self.assertEqual(match_dates[0].text, '2009-11-12')
def test_xpath_exslt_functions_strings(self):
tree = self.parse('<a><b>2009-11-12</b><b>2008-12-11</b></a>')
aligned_date = tree.xpath(
'str:align(string(//b[1]), "%s", "center")' % ('-'*20),
namespaces=self.NSMAP)
self.assertTrue(aligned_date, str(aligned_date))
self.assertEqual(aligned_date, '-----2009-11-12-----')
| ETreeXPathExsltTestCase |
python | weaviate__weaviate-python-client | weaviate/collections/backups/async_.py | {
"start": 188,
"end": 271
} | class ____(_CollectionBackupExecutor[ConnectionAsync]):
pass
| _CollectionBackupAsync |
python | huggingface__transformers | src/transformers/generation/logits_process.py | {
"start": 118387,
"end": 120514
} | class ____(LogitsProcessor):
r"""This processor ensures that the EOS token is selected if its probability is greater than the `min_eos_p`.
<Tip warning={true}>
This logits processor is exclusively compatible with
[Bark](https://huggingface.co/docs/transformers/en/model_doc/bark). See the model documentation for examples.
</Tip>
Args:
eos_token_id (`Union[int, list[int], torch.Tensor]`):
The id(s) of the *end-of-sequence* token.
min_eos_p (`float`, *optional*):
Minimum end of speech threshold.
"""
def __init__(self, eos_token_id: int | list[int] | torch.Tensor, min_eos_p: float, device: str = "cpu"):
if not isinstance(eos_token_id, torch.Tensor):
if isinstance(eos_token_id, int):
eos_token_id = [eos_token_id]
eos_token_id = torch.tensor(eos_token_id, device=device)
self.eos_token_id = eos_token_id
if torch.is_floating_point(eos_token_id) or (eos_token_id < 0).any():
raise ValueError(f"`eos_token_id` has to be a list of positive integers, but is {eos_token_id}")
if min_eos_p is not None and min_eos_p <= 0:
raise ValueError(f"`min_eos_p` has to be a positive float, but is {min_eos_p}")
self.min_eos_p = min_eos_p
@add_start_docstrings(LOGITS_PROCESSOR_INPUTS_DOCSTRING)
def __call__(self, input_ids: torch.LongTensor, scores: torch.FloatTensor) -> torch.FloatTensor:
scores_processed = scores
if self.min_eos_p:
probs = torch.nn.functional.softmax(scores.float(), dim=-1)
# create scores full of -inf except for the eos_token_id
early_stop_scores = torch.ones_like(scores) * -float("inf")
early_stop_scores[:, self.eos_token_id] = scores[:, self.eos_token_id]
do_early_stop = probs[:, self.eos_token_id] > self.min_eos_p
do_early_stop = torch.any(do_early_stop, dim=1, keepdim=True)
scores_processed = torch.where(do_early_stop, early_stop_scores, scores)
return scores_processed
| BarkEosPrioritizerLogitsProcessor |
python | pytorch__pytorch | torch/_dynamo/variables/misc.py | {
"start": 28606,
"end": 37751
} | class ____(VariableTracker):
"""represents a torch.autograd.Function subclass"""
_nonvar_fields = {
"fn_cls",
*VariableTracker._nonvar_fields,
}
def __init__(self, fn_cls, **kwargs) -> None:
super().__init__(**kwargs)
self.fn_cls = fn_cls
def call_apply(self, tx: "InstructionTranslator", args, kwargs):
requires_grad = False
def visit(vt):
nonlocal requires_grad
if isinstance(vt, variables.TensorVariable):
if vt.requires_grad is not False:
requires_grad = True
if isinstance(vt, variables.NNModuleVariable):
if vt.is_training(tx):
requires_grad = True
VariableTracker.visit(visit, (args, kwargs))
if requires_grad and torch.is_grad_enabled():
if config.capture_autograd_function is False:
warnings.warn(
"The config.capture_autograd_function flag is deprecated, it's now always true."
)
from torch._functorch.autograd_function import (
autograd_function_forward_rewritten,
)
from torch.autograd.function import _is_setup_context_defined
forward_fn = self.fn_cls.forward
is_setup_ctx_defined = _is_setup_context_defined(self.fn_cls.setup_context)
if is_setup_ctx_defined:
# If setup_context is defined, we generate a new forward function which includes
# the original forward and setup_context function, and trace the new forward function.
forward_fn = autograd_function_forward_rewritten(
self.fn_cls.forward, self.fn_cls.setup_context
)
vjp_fn = self.fn_cls.vjp # type: ignore[attr-defined]
if vjp_fn is not torch.autograd.Function.vjp:
unimplemented(
gb_type="Unsupported custom vjp",
context=f"call_apply {self} {args} {kwargs}",
explanation="Dynamo does not support tracing "
"`torch.autograd.Function` subclasses that define "
"a custom `vjp` method.",
hints=[
"Remove the custom `vjp` method if possible.",
"Use standard `backward` instead if applicable.",
*graph_break_hints.SUPPORTABLE,
],
)
jvp_fn = self.fn_cls.jvp # type: ignore[attr-defined]
if jvp_fn is not torch.autograd.Function.jvp:
unimplemented(
gb_type="Unsupported custom jvp",
context=f"call_apply {self} {args} {kwargs}",
explanation="Dynamo does not support tracing "
"`torch.autograd.Function` subclasses that define "
"a custom `jvp` method.",
hints=[
"Remove the custom `jvp` method if possible.",
*graph_break_hints.SUPPORTABLE,
],
)
from .higher_order_ops import AutogradFunctionApplyVariable
source = self.source
if source is None:
source = AttrSource(
tx.import_source(self.fn_cls.__module__), self.fn_cls.__name__
)
val = AutogradFunctionApplyVariable(
forward_fn,
self.fn_cls.backward,
source,
source=AttrSource(source, member="apply"),
).call_function(tx, args, kwargs)
# Inside of AutogradFunctionApplyVariable.call_function, we use sourceless variable wrapping
# the forward function, as we don't want to generate guards for new_forward.__closure__
# if forward is rewritten by autograd_function_forward_rewritten.
# But we still need to generate correct guards for the original forward and setup_context
# functions, so we have to add guards manually.
if self.source:
fwd_src = AttrSource(self.source, "forward")
install_guard(fwd_src.make_guard(GuardBuilder.CLOSURE_MATCH))
if is_setup_ctx_defined:
setup_ctx_src = AttrSource(self.source, "setup_context")
install_guard(setup_ctx_src.make_guard(GuardBuilder.CLOSURE_MATCH))
return val
if self.source:
source = AttrSource(self.source, "forward")
else:
source = None
fn = self.fn_cls.forward
ctx = AutogradFunctionContextVariable.create(tx, args, kwargs)
args = [ctx, *args]
if isinstance(fn, types.FunctionType):
sig = inspect.signature(fn)
if len(args) - 1 == len(sig._parameters):
args = args[1:] # Don't use context
fn_vt = VariableTracker.build(tx, fn, source=source)
return fn_vt.call_function(tx, args, kwargs)
elif isinstance(fn, types.MethodType):
return variables.UserMethodVariable(
fn.__func__,
variables.UserDefinedClassVariable(self.fn_cls),
source=source,
).call_function(tx, args, kwargs)
else:
unimplemented(
gb_type="Non-function or method in subclass of torch.autograd.Function",
context=f"call_apply {self} {args} {kwargs}",
explanation="Dynamo requires the `forward` attribute of a "
"`torch.autograd.Function` subclass to be a standard Python "
f"function or method. Found type `{type(fn).__name__}` instead.",
hints=[
"Ensure the `forward` method is defined as a regular "
"function or instance method."
],
)
def call_backward(self, tx: "InstructionTranslator", args, kwargs):
fn = self.fn_cls.backward
assert type(args[0].value) is torch._dynamo.external_utils.FakeBackwardCFunction
assert isinstance(fn, types.FunctionType)
fn_source = AttrSource(self.source, "backward")
fn_vt = VariableTracker.build(tx, fn, source=fn_source)
return fn_vt.call_function(tx, args, kwargs)
def call_function(self, tx: "InstructionTranslator", args, kwargs):
return AutogradFunctionVariable(self.fn_cls)
def call_method(
self,
tx: "InstructionTranslator",
name,
args: "list[VariableTracker]",
kwargs: "dict[str, VariableTracker]",
):
from .builder import wrap_fx_proxy
if name == "apply":
if trace_rules.is_callable_allowed(self.fn_cls):
trampoline_autograd_apply = produce_trampoline_autograd_apply(
self.fn_cls
)
return wrap_fx_proxy(
tx=tx,
proxy=tx.output.create_proxy(
"call_function",
trampoline_autograd_apply,
*proxy_args_kwargs(args, kwargs),
),
)
else:
return self.call_apply(tx, args, kwargs)
elif name == "backward":
return self.call_backward(tx, args, kwargs)
else:
source = AttrSource(self.source, name) if self.source is not None else None
try:
obj = inspect.getattr_static(self.fn_cls, name)
except AttributeError:
obj = None
if isinstance(obj, staticmethod):
func = obj.__get__(self.fn_cls)
if source is not None:
return (
trace_rules.lookup(func)
.create_with_source(func, source=source)
.call_function(tx, args, kwargs)
)
else:
return trace_rules.lookup(func)(func).call_function(
tx, args, kwargs
)
elif isinstance(obj, classmethod):
return variables.UserMethodVariable(
obj.__func__, self, source=source
).call_function(tx, args, kwargs)
else:
unimplemented(
gb_type="Unsupported autograd.Function method",
context=f"call_method {self} {name}",
explanation="Dynamo does not support calling the method "
f"`{name}` directly on the `torch.autograd.Function` "
"instance. Supported methods include `apply`, `backward`, "
"static methods, and class methods.",
hints=[
"Ensure the method is decorated with `@staticmethod` "
"or `@classmethod` if it's meant to be called on the class.",
],
)
@dataclasses.dataclass
| AutogradFunctionVariable |
python | pytest-dev__pytest | src/_pytest/_code/code.py | {
"start": 48240,
"end": 51344
} | class ____(TerminalRepr):
lines: Sequence[str]
reprfuncargs: ReprFuncArgs | None
reprlocals: ReprLocals | None
reprfileloc: ReprFileLocation | None
style: TracebackStyle
def _write_entry_lines(self, tw: TerminalWriter) -> None:
"""Write the source code portions of a list of traceback entries with syntax highlighting.
Usually entries are lines like these:
" x = 1"
"> assert x == 2"
"E assert 1 == 2"
This function takes care of rendering the "source" portions of it (the lines without
the "E" prefix) using syntax highlighting, taking care to not highlighting the ">"
character, as doing so might break line continuations.
"""
if not self.lines:
return
if self.style == "value":
# Using tw.write instead of tw.line for testing purposes due to TWMock implementation;
# lines written with TWMock.line and TWMock._write_source cannot be distinguished
# from each other, whereas lines written with TWMock.write are marked with TWMock.WRITE
for line in self.lines:
tw.write(line)
tw.write("\n")
return
# separate indents and source lines that are not failures: we want to
# highlight the code but not the indentation, which may contain markers
# such as "> assert 0"
fail_marker = f"{FormattedExcinfo.fail_marker} "
indent_size = len(fail_marker)
indents: list[str] = []
source_lines: list[str] = []
failure_lines: list[str] = []
for index, line in enumerate(self.lines):
is_failure_line = line.startswith(fail_marker)
if is_failure_line:
# from this point on all lines are considered part of the failure
failure_lines.extend(self.lines[index:])
break
else:
indents.append(line[:indent_size])
source_lines.append(line[indent_size:])
tw._write_source(source_lines, indents)
# failure lines are always completely red and bold
for line in failure_lines:
tw.line(line, bold=True, red=True)
def toterminal(self, tw: TerminalWriter) -> None:
if self.style == "short":
if self.reprfileloc:
self.reprfileloc.toterminal(tw)
self._write_entry_lines(tw)
if self.reprlocals:
self.reprlocals.toterminal(tw, indent=" " * 8)
return
if self.reprfuncargs:
self.reprfuncargs.toterminal(tw)
self._write_entry_lines(tw)
if self.reprlocals:
tw.line("")
self.reprlocals.toterminal(tw)
if self.reprfileloc:
if self.lines:
tw.line("")
self.reprfileloc.toterminal(tw)
def __str__(self) -> str:
return "{}\n{}\n{}".format(
"\n".join(self.lines), self.reprlocals, self.reprfileloc
)
@dataclasses.dataclass(eq=False)
| ReprEntry |
python | rushter__MLAlgorithms | mla/svm/kernerls.py | {
"start": 74,
"end": 214
} | class ____(object):
def __call__(self, x, y):
return np.dot(x, y.T)
def __repr__(self):
return "Linear kernel"
| Linear |
python | openai__openai-python | src/openai/types/beta/realtime/realtime_connect_params.py | {
"start": 212,
"end": 290
} | class ____(TypedDict, total=False):
model: Required[str]
| RealtimeConnectParams |
python | conda__conda | conda/exceptions.py | {
"start": 6406,
"end": 7579
} | class ____(ClobberError):
def __init__(
self,
target_path: PathType,
colliding_dist_being_linked: PackageRecord | str,
colliding_linked_dist: PackageRecord | str,
context: Context,
):
message = dals(
"""
The package '%(colliding_dist_being_linked)s' cannot be installed due to a
path collision for '%(target_path)s'.
This path already exists in the target prefix, and it won't be removed by
an uninstall action in this transaction. The path appears to be coming from
the package '%(colliding_linked_dist)s', which is already installed in the prefix.
"""
)
if context.path_conflict == PathConflict.prevent:
message += (
"If you'd like to proceed anyway, re-run the command with "
"the `--clobber` flag.\n."
)
super().__init__(
message,
context.path_conflict,
target_path=target_path,
colliding_dist_being_linked=colliding_dist_being_linked,
colliding_linked_dist=colliding_linked_dist,
)
| KnownPackageClobberError |
python | pandas-dev__pandas | asv_bench/benchmarks/io/hdf.py | {
"start": 154,
"end": 3302
} | class ____(BaseIO):
def setup(self):
N = 25000
index = Index([f"i-{i}" for i in range(N)], dtype=object)
self.df = DataFrame(
{"float1": np.random.randn(N), "float2": np.random.randn(N)}, index=index
)
self.df_mixed = DataFrame(
{
"float1": np.random.randn(N),
"float2": np.random.randn(N),
"string1": ["foo"] * N,
"bool1": [True] * N,
"int1": np.random.randint(0, N, size=N),
},
index=index,
)
self.df_wide = DataFrame(np.random.randn(N, 100))
self.start_wide = self.df_wide.index[10000]
self.stop_wide = self.df_wide.index[15000]
self.df2 = DataFrame(
{"float1": np.random.randn(N), "float2": np.random.randn(N)},
index=date_range("1/1/2000", periods=N),
)
self.start = self.df2.index[10000]
self.stop = self.df2.index[15000]
self.df_wide2 = DataFrame(
np.random.randn(N, 100), index=date_range("1/1/2000", periods=N)
)
self.df_dc = DataFrame(
np.random.randn(N, 10), columns=[f"C{i:03d}" for i in range(10)]
)
self.fname = "__test__.h5"
self.store = HDFStore(self.fname)
self.store.put("fixed", self.df)
self.store.put("fixed_mixed", self.df_mixed)
self.store.append("table", self.df2)
self.store.append("table_mixed", self.df_mixed)
self.store.append("table_wide", self.df_wide)
self.store.append("table_wide2", self.df_wide2)
def teardown(self):
self.store.close()
self.remove(self.fname)
def time_read_store(self):
self.store.get("fixed")
def time_read_store_mixed(self):
self.store.get("fixed_mixed")
def time_write_store(self):
self.store.put("fixed_write", self.df)
def time_write_store_mixed(self):
self.store.put("fixed_mixed_write", self.df_mixed)
def time_read_store_table_mixed(self):
self.store.select("table_mixed")
def time_write_store_table_mixed(self):
self.store.append("table_mixed_write", self.df_mixed)
def time_read_store_table(self):
self.store.select("table")
def time_write_store_table(self):
self.store.append("table_write", self.df)
def time_read_store_table_wide(self):
self.store.select("table_wide")
def time_write_store_table_wide(self):
self.store.append("table_wide_write", self.df_wide)
def time_write_store_table_dc(self):
self.store.append("table_dc_write", self.df_dc, data_columns=True)
def time_query_store_table_wide(self):
self.store.select(
"table_wide", where="index > self.start_wide and index < self.stop_wide"
)
def time_query_store_table(self):
self.store.select("table", where="index > self.start and index < self.stop")
def time_store_repr(self):
repr(self.store)
def time_store_str(self):
str(self.store)
def time_store_info(self):
self.store.info()
| HDFStoreDataFrame |
python | coleifer__peewee | playhouse/postgres_ext.py | {
"start": 3098,
"end": 3342
} | class ____(_JsonLookupBase):
def __sql__(self, ctx):
return (ctx
.sql(self.node)
.literal('#>' if self._as_json else '#>>')
.sql(Value('{%s}' % ','.join(map(str, self.parts)))))
| JsonPath |
python | PrefectHQ__prefect | tests/utilities/test_hashing.py | {
"start": 2011,
"end": 2928
} | class ____:
def test_hash_objects_handles_unhashable_objects_gracefully(self):
"""Test that unhashable objects return None by default"""
lock = threading.Lock()
result = hash_objects({"data": "hello", "lock": lock})
assert result is None
def test_hash_objects_raises_with_helpful_message(self):
"""Test that unhashable objects raise HashError when raise_on_failure=True"""
lock = threading.Lock()
mock_file = MagicMock()
mock_file.__str__ = lambda _: "<file object>"
with pytest.raises(HashError) as exc:
hash_objects(
{"data": "hello", "lock": lock, "file": mock_file},
raise_on_failure=True,
)
error_msg = str(exc.value)
assert "Unable to create hash" in error_msg
assert "JSON error" in error_msg
assert "Pickle error" in error_msg
| TestHashObjects |
python | django__django | tests/template_tests/test_origin.py | {
"start": 111,
"end": 1111
} | class ____(TestCase):
def setUp(self):
self.engine = Engine(dirs=[TEMPLATE_DIR])
def test_origin_compares_equal(self):
a = self.engine.get_template("index.html")
b = self.engine.get_template("index.html")
self.assertEqual(a.origin, b.origin)
# Use assertIs() to test __eq__/__ne__.
self.assertIs(a.origin == b.origin, True)
self.assertIs(a.origin != b.origin, False)
def test_origin_compares_not_equal(self):
a = self.engine.get_template("first/test.html")
b = self.engine.get_template("second/test.html")
self.assertNotEqual(a.origin, b.origin)
# Use assertIs() to test __eq__/__ne__.
self.assertIs(a.origin == b.origin, False)
self.assertIs(a.origin != b.origin, True)
def test_repr(self):
a = self.engine.get_template("index.html")
name = os.path.join(TEMPLATE_DIR, "index.html")
self.assertEqual(repr(a.origin), "<Origin name=%r>" % name)
| OriginTestCase |
python | pennersr__django-allauth | allauth/socialaccount/providers/saml/views.py | {
"start": 6329,
"end": 7147
} | class ____(SAMLViewMixin, View):
def dispatch(self, request, organization_slug):
provider = self.get_provider(organization_slug)
config = build_saml_config(
self.request, provider.app.settings, organization_slug
)
saml_settings = OneLogin_Saml2_Settings(
settings=config, sp_validation_only=True
)
metadata = saml_settings.get_sp_metadata()
errors = saml_settings.validate_metadata(metadata)
if len(errors) > 0:
resp = JsonResponse({"errors": errors})
resp.status_code = HTTPStatus.INTERNAL_SERVER_ERROR
return resp
return HttpResponse(content=metadata, content_type="text/xml")
metadata = MetadataView.as_view()
@method_decorator(login_not_required, name="dispatch")
| MetadataView |
python | pypa__pip | src/pip/_vendor/idna/codec.py | {
"start": 2880,
"end": 2939
} | class ____(Codec, codecs.StreamWriter):
pass
| StreamWriter |
python | getsentry__sentry | src/sentry/utils/circuit_breaker2.py | {
"start": 1158,
"end": 2319
} | class ____(TypedDict):
# The number of errors within the given time period necessary to trip the breaker
error_limit: int
# The time period, in seconds, over which we're tracking errors
error_limit_window: int
# How long, in seconds, to stay in the BROKEN state (blocking all requests) before entering the
# RECOVERY phase
broken_state_duration: int
# The number of errors within the given time period necessary to trip the breaker while in
# RECOVERY. Will be set automatically to 10% of `error_limit` if not provided.
recovery_error_limit: NotRequired[int]
# The length, in seconds, of each time bucket ("granule") used by the underlying rate limiter -
# effectively the resolution of the time window. Will be set automatically based on
# `error_limit_window` if not provided.
error_limit_window_granularity: NotRequired[int]
# How long, in seconds, to stay in the RECOVERY state (allowing requests but with a stricter
# error limit) before returning to normal operation. Will be set to twice `error_limit_window`
# if not provided.
recovery_duration: NotRequired[int]
| CircuitBreakerConfig |
python | ray-project__ray | rllib/examples/_old_api_stack/models/action_mask_model.py | {
"start": 484,
"end": 2416
} | class ____(TFModelV2):
"""Model that handles simple discrete action masking.
This assumes the outputs are logits for a single Categorical action dist.
Getting this to work with a more complex output (e.g., if the action space
is a tuple of several distributions) is also possible but left as an
exercise to the reader.
"""
def __init__(
self, obs_space, action_space, num_outputs, model_config, name, **kwargs
):
orig_space = getattr(obs_space, "original_space", obs_space)
assert (
isinstance(orig_space, Dict)
and "action_mask" in orig_space.spaces
and "observations" in orig_space.spaces
)
super().__init__(obs_space, action_space, num_outputs, model_config, name)
self.internal_model = FullyConnectedNetwork(
orig_space["observations"],
action_space,
num_outputs,
model_config,
name + "_internal",
)
# disable action masking --> will likely lead to invalid actions
self.no_masking = model_config["custom_model_config"].get("no_masking", False)
def forward(self, input_dict, state, seq_lens):
# Extract the available actions tensor from the observation.
action_mask = input_dict["obs"]["action_mask"]
# Compute the unmasked logits.
logits, _ = self.internal_model({"obs": input_dict["obs"]["observations"]})
# If action masking is disabled, directly return unmasked logits
if self.no_masking:
return logits, state
# Convert action_mask into a [0.0 || -inf]-type mask.
inf_mask = tf.maximum(tf.math.log(action_mask), tf.float32.min)
masked_logits = logits + inf_mask
# Return masked logits.
return masked_logits, state
def value_function(self):
return self.internal_model.value_function()
| ActionMaskModel |
python | sqlalchemy__sqlalchemy | lib/sqlalchemy/orm/mapped_collection.py | {
"start": 11817,
"end": 19727
} | class ____(Dict[_KT, _VT]):
"""Base for ORM mapped dictionary classes.
Extends the ``dict`` type with additional methods needed by SQLAlchemy ORM
collection classes. Use of :class:`_orm.KeyFuncDict` is most directly
by using the :func:`.attribute_keyed_dict` or
:func:`.column_keyed_dict` class factories.
:class:`_orm.KeyFuncDict` may also serve as the base for user-defined
custom dictionary classes.
.. versionchanged:: 2.0 Renamed :class:`.MappedCollection` to
:class:`.KeyFuncDict`.
.. seealso::
:func:`_orm.attribute_keyed_dict`
:func:`_orm.column_keyed_dict`
:ref:`orm_dictionary_collection`
:ref:`orm_custom_collection`
"""
def __init__(
self,
keyfunc: Callable[[Any], Any],
*dict_args: Any,
ignore_unpopulated_attribute: bool = False,
) -> None:
"""Create a new collection with keying provided by keyfunc.
keyfunc may be any callable that takes an object and returns an object
for use as a dictionary key.
The keyfunc will be called every time the ORM needs to add a member by
value-only (such as when loading instances from the database) or
remove a member. The usual cautions about dictionary keying apply-
``keyfunc(object)`` should return the same output for the life of the
collection. Keying based on mutable properties can result in
unreachable instances "lost" in the collection.
"""
self.keyfunc = keyfunc
self.ignore_unpopulated_attribute = ignore_unpopulated_attribute
super().__init__(*dict_args)
@classmethod
def _unreduce(
cls,
keyfunc: Callable[[Any], Any],
values: Dict[_KT, _KT],
adapter: Optional[CollectionAdapter] = None,
) -> "KeyFuncDict[_KT, _KT]":
mp: KeyFuncDict[_KT, _KT] = KeyFuncDict(keyfunc)
mp.update(values)
# note that the adapter sets itself up onto this collection
# when its `__setstate__` method is called
return mp
def __reduce__(
self,
) -> Tuple[
Callable[[_KT, _KT], KeyFuncDict[_KT, _KT]],
Tuple[Any, Union[Dict[_KT, _KT], Dict[_KT, _KT]], CollectionAdapter],
]:
return (
KeyFuncDict._unreduce,
(
self.keyfunc,
dict(self),
collection_adapter(self),
),
)
@util.preload_module("sqlalchemy.orm.attributes")
def _raise_for_unpopulated(
self,
value: _KT,
initiator: Union[AttributeEventToken, Literal[None, False]] = None,
*,
warn_only: bool,
) -> None:
mapper = base.instance_state(value).mapper
attributes = util.preloaded.orm_attributes
if not isinstance(initiator, attributes.AttributeEventToken):
relationship = "unknown relationship"
elif initiator.key in mapper.attrs:
relationship = f"{mapper.attrs[initiator.key]}"
else:
relationship = initiator.key
if warn_only:
util.warn(
f"Attribute keyed dictionary value for "
f"attribute '{relationship}' was None; this will raise "
"in a future release. "
f"To skip this assignment entirely, "
f'Set the "ignore_unpopulated_attribute=True" '
f"parameter on the mapped collection factory."
)
else:
raise sa_exc.InvalidRequestError(
"In event triggered from population of "
f"attribute '{relationship}' "
"(potentially from a backref), "
f"can't populate value in KeyFuncDict; "
"dictionary key "
f"derived from {base.instance_str(value)} is not "
f"populated. Ensure appropriate state is set up on "
f"the {base.instance_str(value)} object "
f"before assigning to the {relationship} attribute. "
f"To skip this assignment entirely, "
f'Set the "ignore_unpopulated_attribute=True" '
f"parameter on the mapped collection factory."
)
@collection.appender # type: ignore[untyped-decorator]
@collection.internally_instrumented # type: ignore[untyped-decorator]
def set(
self,
value: _KT,
_sa_initiator: Union[AttributeEventToken, Literal[None, False]] = None,
) -> None:
"""Add an item by value, consulting the keyfunc for the key."""
key = self.keyfunc(value)
if key is base.NO_VALUE:
if not self.ignore_unpopulated_attribute:
self._raise_for_unpopulated(
value, _sa_initiator, warn_only=False
)
else:
return
elif key is Missing:
if not self.ignore_unpopulated_attribute:
self._raise_for_unpopulated(
value, _sa_initiator, warn_only=True
)
key = None
else:
return
self.__setitem__(key, value, _sa_initiator) # type: ignore[call-arg]
@collection.remover # type: ignore[untyped-decorator]
@collection.internally_instrumented # type: ignore[untyped-decorator]
def remove(
self,
value: _KT,
_sa_initiator: Union[AttributeEventToken, Literal[None, False]] = None,
) -> None:
"""Remove an item by value, consulting the keyfunc for the key."""
key = self.keyfunc(value)
if key is base.NO_VALUE:
if not self.ignore_unpopulated_attribute:
self._raise_for_unpopulated(
value, _sa_initiator, warn_only=False
)
return
elif key is Missing:
if not self.ignore_unpopulated_attribute:
self._raise_for_unpopulated(
value, _sa_initiator, warn_only=True
)
key = None
else:
return
# Let self[key] raise if key is not in this collection
# testlib.pragma exempt:__ne__
if self[key] != value:
raise sa_exc.InvalidRequestError(
"Can not remove '%s': collection holds '%s' for key '%s'. "
"Possible cause: is the KeyFuncDict key function "
"based on mutable properties or properties that only obtain "
"values after flush?" % (value, self[key], key)
)
self.__delitem__(key, _sa_initiator) # type: ignore[call-arg]
def _mapped_collection_cls(
keyfunc: Callable[[Any], Any], ignore_unpopulated_attribute: bool
) -> Type[KeyFuncDict[_KT, _KT]]:
class _MKeyfuncMapped(KeyFuncDict[_KT, _KT]):
def __init__(self, *dict_args: Any) -> None:
super().__init__(
keyfunc,
*dict_args,
ignore_unpopulated_attribute=ignore_unpopulated_attribute,
)
return _MKeyfuncMapped
MappedCollection = KeyFuncDict
"""A synonym for :class:`.KeyFuncDict`.
.. versionchanged:: 2.0 Renamed :class:`.MappedCollection` to
:class:`.KeyFuncDict`.
"""
mapped_collection = keyfunc_mapping
"""A synonym for :func:`_orm.keyfunc_mapping`.
.. versionchanged:: 2.0 Renamed :data:`.mapped_collection` to
:func:`_orm.keyfunc_mapping`
"""
attribute_mapped_collection = attribute_keyed_dict
"""A synonym for :func:`_orm.attribute_keyed_dict`.
.. versionchanged:: 2.0 Renamed :data:`.attribute_mapped_collection` to
:func:`_orm.attribute_keyed_dict`
"""
column_mapped_collection = column_keyed_dict
"""A synonym for :func:`_orm.column_keyed_dict.
.. versionchanged:: 2.0 Renamed :func:`.column_mapped_collection` to
:func:`_orm.column_keyed_dict`
"""
| KeyFuncDict |
python | pytorch__pytorch | benchmarks/dynamo/genai_layers/kernels.py | {
"start": 9062,
"end": 11426
} | class ____(BenchmarkKernel):
def __init__(self, script_args):
super().__init__(script_args)
self.available_backends = ["eager", "compiled", "quack", "liger"]
def get_shapes(self) -> tuple[tuple[int, ...], ...]:
return (
(32768, 256),
(32768, 512),
(32768, 1024),
(32768, 2048),
(32768, 4096),
(32768, 8192),
(32768, 16384),
(32768, 32768),
(32768, 65536),
(16384, 131072),
(8192, 262144),
)
def get_memory_bytes(self, args, kwargs) -> int:
# Memory: read dy and y, write ax backward
x, dy = args
M, N = x.shape
return 3 * M * N * x.dtype.itemsize
def eager(self, args, kwargs=None) -> Any:
assert kwargs is None
x, dy = args
y = F.softmax(x, dim=-1)
return lambda: torch.autograd.grad(y, x, grad_outputs=dy, retain_graph=True)
def compiled(self, args, kwargs=None) -> Any:
assert kwargs is None
x, dy = args
compiled_softmax = torch.compile(
lambda x: F.softmax(x, dim=-1), mode=self.compile_mode, fullgraph=True
)
y = compiled_softmax(x)
return lambda: torch.autograd.grad(y, x, grad_outputs=dy, retain_graph=True)
def quack(self, args, kwargs=None) -> Any:
from quack.softmax import softmax
assert kwargs is None
x, dy = args
y = softmax(x)
return lambda: torch.autograd.grad(y, x, grad_outputs=dy, retain_graph=True)
def liger(self, args, kwargs=None) -> Any:
from liger_kernel.transformers.softmax import LigerSoftmax
assert kwargs is None
x, dy = args
softmax = LigerSoftmax().to("cuda")
y = softmax(x)
return lambda: torch.autograd.grad(y, x, grad_outputs=dy, retain_graph=True)
def benchmark(self):
for M, N in self.get_shapes():
print(f"Tensor dimensions: [{M}, {N}]")
torch_dtype = cutlass_torch.dtype(cutlass.BFloat16)
x = 0.1 * torch.randn(
M, N, device="cuda", dtype=torch_dtype, requires_grad=True
)
dy = torch.randn(M, N, device="cuda", dtype=torch_dtype)
self.benchmark_single_shape((x, dy), setting=f"shape: [{M}, {N}]")
| SoftmaxBackward |
python | google__jax | tests/pallas/mosaic_gpu_test.py | {
"start": 213455,
"end": 213559
} | class ____(
ExamplesTest, lowering_semantics=plgpu.LoweringSemantics.Warpgroup
):
...
| ExamplesWGTest |
python | apache__airflow | airflow-core/tests/unit/api_fastapi/execution_api/versions/head/test_task_instances.py | {
"start": 95465,
"end": 97273
} | class ____:
def setup_method(self):
clear_db_runs()
def teardown_method(self):
clear_db_runs()
def test_ti_patch_rendered_map_index(self, client, session, create_task_instance):
"""Test updating rendered_map_index for a task instance."""
ti = create_task_instance(
task_id="test_ti_patch_rendered_map_index",
state=State.RUNNING,
session=session,
)
session.commit()
rendered_map_index = "custom_label_123"
response = client.patch(
f"/execution/task-instances/{ti.id}/rendered-map-index",
json=rendered_map_index,
)
assert response.status_code == 204
assert response.text == ""
session.expire_all()
ti = session.get(TaskInstance, ti.id)
assert ti.rendered_map_index == rendered_map_index
def test_ti_patch_rendered_map_index_not_found(self, client, session):
"""Test 404 error when task instance does not exist."""
fake_id = str(uuid4())
response = client.patch(
f"/execution/task-instances/{fake_id}/rendered-map-index",
json="test",
)
assert response.status_code == 404
def test_ti_patch_rendered_map_index_empty_string(self, client, session, create_task_instance):
"""Test that empty string is accepted (clears the rendered_map_index)."""
ti = create_task_instance(
task_id="test_ti_patch_rendered_map_index_empty",
state=State.RUNNING,
session=session,
)
session.commit()
response = client.patch(
f"/execution/task-instances/{ti.id}/rendered-map-index",
json="",
)
assert response.status_code == 422
| TestTIPatchRenderedMapIndex |
python | tornadoweb__tornado | tornado/test/gen_test.py | {
"start": 22437,
"end": 28117
} | class ____(AsyncTestCase):
@gen_test
def test_empty_iterator(self):
g = gen.WaitIterator()
self.assertTrue(g.done(), "empty generator iterated")
with self.assertRaises(ValueError):
g = gen.WaitIterator(Future(), bar=Future())
self.assertIsNone(g.current_index, "bad nil current index")
self.assertIsNone(g.current_future, "bad nil current future")
@gen_test
def test_already_done(self):
f1 = Future() # type: Future[int]
f2 = Future() # type: Future[int]
f3 = Future() # type: Future[int]
f1.set_result(24)
f2.set_result(42)
f3.set_result(84)
g = gen.WaitIterator(f1, f2, f3)
i = 0
while not g.done():
r = yield g.next()
# Order is not guaranteed, but the current implementation
# preserves ordering of already-done Futures.
if i == 0:
self.assertEqual(g.current_index, 0)
self.assertIs(g.current_future, f1)
self.assertEqual(r, 24)
elif i == 1:
self.assertEqual(g.current_index, 1)
self.assertIs(g.current_future, f2)
self.assertEqual(r, 42)
elif i == 2:
self.assertEqual(g.current_index, 2)
self.assertIs(g.current_future, f3)
self.assertEqual(r, 84)
i += 1
self.assertIsNone(g.current_index, "bad nil current index")
self.assertIsNone(g.current_future, "bad nil current future")
dg = gen.WaitIterator(f1=f1, f2=f2)
while not dg.done():
dr = yield dg.next()
if dg.current_index == "f1":
self.assertTrue(
dg.current_future == f1 and dr == 24,
"WaitIterator dict status incorrect",
)
elif dg.current_index == "f2":
self.assertTrue(
dg.current_future == f2 and dr == 42,
"WaitIterator dict status incorrect",
)
else:
self.fail(f"got bad WaitIterator index {dg.current_index}")
i += 1
self.assertIsNone(g.current_index, "bad nil current index")
self.assertIsNone(g.current_future, "bad nil current future")
def finish_coroutines(self, iteration, futures):
if iteration == 3:
futures[2].set_result(24)
elif iteration == 5:
futures[0].set_exception(ZeroDivisionError())
elif iteration == 8:
futures[1].set_result(42)
futures[3].set_result(84)
if iteration < 8:
self.io_loop.add_callback(self.finish_coroutines, iteration + 1, futures)
@gen_test
def test_iterator(self):
futures = [Future(), Future(), Future(), Future()] # type: List[Future[int]]
self.finish_coroutines(0, futures)
g = gen.WaitIterator(*futures)
i = 0
while not g.done():
try:
r = yield g.next()
except ZeroDivisionError:
self.assertIs(g.current_future, futures[0], "exception future invalid")
else:
if i == 0:
self.assertEqual(r, 24, "iterator value incorrect")
self.assertEqual(g.current_index, 2, "wrong index")
elif i == 2:
self.assertEqual(r, 42, "iterator value incorrect")
self.assertEqual(g.current_index, 1, "wrong index")
elif i == 3:
self.assertEqual(r, 84, "iterator value incorrect")
self.assertEqual(g.current_index, 3, "wrong index")
i += 1
@gen_test
def test_iterator_async_await(self):
# Recreate the previous test with py35 syntax. It's a little clunky
# because of the way the previous test handles an exception on
# a single iteration.
futures = [Future(), Future(), Future(), Future()] # type: List[Future[int]]
self.finish_coroutines(0, futures)
self.finished = False
async def f():
i = 0
g = gen.WaitIterator(*futures)
try:
async for r in g:
if i == 0:
self.assertEqual(r, 24, "iterator value incorrect")
self.assertEqual(g.current_index, 2, "wrong index")
else:
raise Exception("expected exception on iteration 1")
i += 1
except ZeroDivisionError:
i += 1
async for r in g:
if i == 2:
self.assertEqual(r, 42, "iterator value incorrect")
self.assertEqual(g.current_index, 1, "wrong index")
elif i == 3:
self.assertEqual(r, 84, "iterator value incorrect")
self.assertEqual(g.current_index, 3, "wrong index")
else:
raise Exception("didn't expect iteration %d" % i)
i += 1
self.finished = True
yield f()
self.assertTrue(self.finished)
@gen_test
def test_no_ref(self):
# In this usage, there is no direct hard reference to the
# WaitIterator itself, only the Future it returns. Since
# WaitIterator uses weak references internally to improve GC
# performance, this used to cause problems.
yield gen.with_timeout(
datetime.timedelta(seconds=0.1), gen.WaitIterator(gen.sleep(0)).next()
)
| WaitIteratorTest |
python | apache__airflow | task-sdk/src/airflow/sdk/execution_time/supervisor.py | {
"start": 60661,
"end": 81972
} | class ____(ActivitySubprocess):
"""A supervisor that runs tasks in-process for easier testing."""
comms: InProcessSupervisorComms = attrs.field(init=False)
stdin: socket = attrs.field(init=False)
class _Client(Client):
def request(self, *args, **kwargs):
# Bypass the tenacity retries!
return super().request.__wrapped__(self, *args, **kwargs) # type: ignore[attr-defined]
def _check_subprocess_exit(
self, raise_on_timeout: bool = False, expect_signal: None | int = None
) -> int | None:
# InProcessSupervisor has no subprocess, so we don't need to poll anything. This is called from
# _handle_socket_comms, so we need to override it
return None
def _handle_socket_comms(self):
while self._open_sockets:
self._service_subprocess(1.0)
@contextlib.contextmanager
def _setup_subprocess_socket(self):
thread = threading.Thread(target=self._handle_socket_comms, daemon=True)
requests, child_sock = socketpair()
self._open_sockets[requests] = "requests"
self.stdin = requests
self.selector.register(
requests,
selectors.EVENT_READ,
length_prefixed_frame_reader(self.handle_requests(log), on_close=self._on_socket_closed),
)
os.set_inheritable(child_sock.fileno(), True)
os.environ["__AIRFLOW_SUPERVISOR_FD"] = str(child_sock.fileno())
try:
thread.start()
yield child_sock
finally:
requests.close()
child_sock.close()
self._on_socket_closed(requests)
thread.join(0)
os.environ.pop("__AIRFLOW_SUPERVISOR_FD", None)
@classmethod
def start( # type: ignore[override]
cls,
*,
what: TaskInstance,
task,
logger: FilteringBoundLogger | None = None,
**kwargs,
) -> TaskRunResult:
"""
Run a task in-process without spawning a new child process.
This bypasses the standard `ActivitySubprocess.start()` behavior, which expects
to launch a subprocess and communicate via stdin/stdout. Instead, it constructs
the `RuntimeTaskInstance` directly — useful in contexts like `dag.test()` where the
Dag is already parsed in memory.
Supervisor state and communications are simulated in-memory via `InProcessSupervisorComms`.
"""
# Create supervisor instance
supervisor = cls(
id=what.id,
pid=os.getpid(), # Use current process
process=psutil.Process(), # Current process
process_log=logger or structlog.get_logger(logger_name="task").bind(),
client=cls._api_client(task.dag),
**kwargs,
)
from airflow.sdk.execution_time.task_runner import RuntimeTaskInstance, finalize, run
supervisor.comms = InProcessSupervisorComms(supervisor=supervisor)
with set_supervisor_comms(supervisor.comms):
supervisor.ti = what # type: ignore[assignment]
# We avoid calling `task_runner.startup()` because we are already inside a
# parsed Dag file (e.g. via dag.test()).
# In normal execution, `startup()` parses the Dag based on info in a `StartupDetails` message.
# By directly constructing the `RuntimeTaskInstance`,
# we skip re-parsing (`task_runner.parse()`) and avoid needing to set Dag Bundle config
# and run the task in-process.
start_date = datetime.now(tz=timezone.utc)
ti_context = supervisor.client.task_instances.start(supervisor.id, supervisor.pid, start_date)
ti = RuntimeTaskInstance.model_construct(
**what.model_dump(exclude_unset=True),
task=task,
_ti_context_from_server=ti_context,
max_tries=ti_context.max_tries,
start_date=start_date,
state=TaskInstanceState.RUNNING,
)
# Create a socketpair preemptively, in case the task process runs VirtualEnv operator or run_as_user
with supervisor._setup_subprocess_socket():
context = ti.get_template_context()
log = structlog.get_logger(logger_name="task")
state, msg, error = run(ti, context, log)
finalize(ti, state, context, log, error)
# In the normal subprocess model, the task runner calls this before exiting.
# Since we're running in-process, we manually notify the API server that
# the task has finished—unless the terminal state was already sent explicitly.
supervisor.update_task_state_if_needed()
return TaskRunResult(ti=ti, state=state, msg=msg, error=error)
@staticmethod
def _api_client(dag=None):
api = in_process_api_server()
if dag is not None:
from airflow.api_fastapi.common.dagbag import dag_bag_from_app
from airflow.models.dagbag import DBDagBag
# This is needed since the Execution API server uses the DBDagBag in its "state".
# This `app.state.dag_bag` is used to get some Dag properties like `fail_fast`.
dag_bag = DBDagBag()
api.app.dependency_overrides[dag_bag_from_app] = lambda: dag_bag
client = InProcessTestSupervisor._Client(
base_url=None, token="", dry_run=True, transport=api.transport
)
# Mypy is wrong -- the setter accepts a string on the property setter! `URLType = URL | str`
client.base_url = "http://in-process.invalid./"
return client
def send_msg(
self, msg: BaseModel | None, request_id: int, error: ErrorResponse | None = None, **dump_opts
):
"""Override to use in-process comms."""
self.comms.messages.append(msg)
@classmethod
def run_trigger_in_process(cls, *, trigger, ti):
"""
Run a trigger in-process for testing, similar to how we run tasks.
This creates a minimal supervisor instance specifically for trigger execution
and ensures the trigger has access to SUPERVISOR_COMMS for connection access.
"""
# Create a minimal supervisor instance for trigger execution
supervisor = cls(
id=ti.id,
pid=os.getpid(), # Use current process
process=psutil.Process(), # Current process - note the underscore prefix
process_log=structlog.get_logger(logger_name="task").bind(),
client=cls._api_client(),
)
supervisor.comms = InProcessSupervisorComms(supervisor=supervisor)
# Run the trigger with supervisor comms available
with set_supervisor_comms(supervisor.comms):
# Run the trigger's async generator and get the first event
import asyncio
async def _run_trigger():
return await anext(trigger.run(), None)
return asyncio.run(_run_trigger())
@property
def final_state(self):
"""Override to use in-process comms."""
# Since we're running in-process, we don't have a final state until the task has finished.
# We also don't have a process exit code to determine success/failure.
return self._terminal_state
@contextmanager
def set_supervisor_comms(temp_comms):
"""
Temporarily override `SUPERVISOR_COMMS` in the `task_runner` module.
This is used to simulate task-runner ↔ supervisor communication in-process,
by injecting a test Comms implementation (e.g. `InProcessSupervisorComms`)
in place of the real inter-process communication layer.
Some parts of the code (e.g. models.Variable.get) check for the presence
of `task_runner.SUPERVISOR_COMMS` to determine if the code is running in a Task SDK execution context.
This override ensures those code paths behave correctly during in-process tests.
"""
from airflow.sdk.execution_time import task_runner
sentinel = object()
old = getattr(task_runner, "SUPERVISOR_COMMS", sentinel)
if temp_comms is not None:
task_runner.SUPERVISOR_COMMS = temp_comms
elif old is not sentinel:
delattr(task_runner, "SUPERVISOR_COMMS")
try:
yield
finally:
if old is sentinel:
if hasattr(task_runner, "SUPERVISOR_COMMS"):
delattr(task_runner, "SUPERVISOR_COMMS")
else:
task_runner.SUPERVISOR_COMMS = old
def run_task_in_process(ti: TaskInstance, task) -> TaskRunResult:
"""Run a task in-process for testing."""
# Run the task
return InProcessTestSupervisor.start(what=ti, task=task)
# Sockets, even the `.makefile()` function don't correctly do line buffering on reading. If a chunk is read
# and it doesn't contain a new line character, `.readline()` will just return the chunk as is.
#
# This returns a callback suitable for attaching to a `selector` that reads in to a buffer, and yields lines
# to a (sync) generator
def make_buffered_socket_reader(
gen: Generator[None, bytes | bytearray, None],
on_close: Callable[[socket], None],
buffer_size: int = 4096,
):
buffer = bytearray() # This will hold our accumulated binary data
read_buffer = bytearray(buffer_size) # Temporary buffer for each read
# We need to start up the generator to get it to the point it's at waiting on the yield
next(gen)
def cb(sock: socket):
nonlocal buffer, read_buffer
# Read up to `buffer_size` bytes of data from the socket
n_received = sock.recv_into(read_buffer)
if not n_received:
# If no data is returned, the connection is closed. Return whatever is left in the buffer
if len(buffer):
with suppress(StopIteration):
gen.send(buffer)
return False
buffer.extend(read_buffer[:n_received])
# We could have read multiple lines in one go, yield them all
while (newline_pos := buffer.find(b"\n")) != -1:
line = buffer[: newline_pos + 1]
try:
gen.send(line)
except StopIteration:
return False
buffer = buffer[newline_pos + 1 :] # Update the buffer with remaining data
return True
return cb, on_close
def length_prefixed_frame_reader(
gen: Generator[None, _RequestFrame, None], on_close: Callable[[socket], None]
):
length_needed: int | None = None
# This will hold our accumulated/partial binary frame if it doesn't come in a single read
buffer: memoryview | None = None
# position in the buffer to store next read
pos = 0
decoder = msgspec.msgpack.Decoder[_RequestFrame](_RequestFrame)
# We need to start up the generator to get it to the point it's at waiting on the yield
next(gen)
def cb(sock: socket):
nonlocal buffer, length_needed, pos
if length_needed is None:
# Read the 32bit length of the frame
bytes = sock.recv(4)
if bytes == b"":
return False
length_needed = int.from_bytes(bytes, byteorder="big")
buffer = memoryview(bytearray(length_needed))
if length_needed and buffer:
n = sock.recv_into(buffer[pos:])
if n == 0:
# EOF
return False
pos += n
if pos >= length_needed:
request = decoder.decode(buffer)
buffer = None
pos = 0
length_needed = None
try:
gen.send(request)
except StopIteration:
return False
return True
return cb, on_close
def process_log_messages_from_subprocess(
loggers: tuple[FilteringBoundLogger, ...],
) -> Generator[None, bytes | bytearray, None]:
from structlog.stdlib import NAME_TO_LEVEL
loggers = tuple(
reconfigure_logger(
log,
structlog.processors.CallsiteParameterAdder,
# We need these logger to print _everything_ they are given. The subprocess itself does the level
# filtering.
level_override=logging.NOTSET,
)
for log in loggers
)
while True:
# Generator receive syntax, values are "sent" in by the `make_buffered_socket_reader` and returned to
# the yield.
line = yield
try:
event = msgspec.json.decode(line)
except Exception:
log.exception("Malformed json log line", line=line)
continue
if ts := event.get("timestamp"):
# We use msgspec to decode the timestamp as it does it orders of magnitude quicker than
# datetime.strptime cn
event["timestamp"] = msgspec.json.decode(f'"{ts}"', type=datetime)
if exc := event.pop("exception", None):
# TODO: convert the dict back to a pretty stack trace
event["error_detail"] = exc
level = NAME_TO_LEVEL[event.pop("level")]
msg = event.pop("event", None)
for target in loggers:
target.log(level, msg, **event)
def forward_to_log(
target_loggers: tuple[FilteringBoundLogger, ...], logger: str, level: int
) -> Generator[None, bytes | bytearray, None]:
while True:
line = yield
# Strip off new line
line = line.rstrip()
try:
msg = line.decode("utf-8", errors="replace")
except UnicodeDecodeError:
msg = line.decode("ascii", errors="replace")
for log in target_loggers:
log.log(level, msg, logger=logger)
def ensure_secrets_backend_loaded() -> list[BaseSecretsBackend]:
"""
Initialize secrets backend with auto-detected context.
Detection strategy:
1. SUPERVISOR_COMMS exists and is set → client chain (ExecutionAPISecretsBackend)
2. _AIRFLOW_PROCESS_CONTEXT=server env var → server chain (MetastoreBackend)
3. Neither → fallback chain (only env vars + external backends, no MetastoreBackend)
Client contexts: task runner in worker (has SUPERVISOR_COMMS)
Server contexts: API server, scheduler (set _AIRFLOW_PROCESS_CONTEXT=server)
Fallback contexts: supervisor, unknown contexts (no SUPERVISOR_COMMS, no env var)
The fallback chain ensures supervisor can use external secrets (AWS Secrets Manager,
Vault, etc.) while falling back to API client, without trying MetastoreBackend.
"""
import os
from airflow.sdk.configuration import ensure_secrets_loaded
from airflow.sdk.execution_time.secrets import DEFAULT_SECRETS_SEARCH_PATH_WORKERS
# 1. Check for client context (SUPERVISOR_COMMS)
try:
from airflow.sdk.execution_time import task_runner
if hasattr(task_runner, "SUPERVISOR_COMMS") and task_runner.SUPERVISOR_COMMS is not None:
# Client context: task runner with SUPERVISOR_COMMS
return ensure_secrets_loaded(default_backends=DEFAULT_SECRETS_SEARCH_PATH_WORKERS)
except (ImportError, AttributeError):
pass
# 2. Check for explicit server context
if os.environ.get("_AIRFLOW_PROCESS_CONTEXT") == "server":
# Server context: API server, scheduler
# uses the default server list
return ensure_secrets_loaded()
# 3. Fallback for unknown contexts (supervisor, etc.)
# Only env vars + external backends from config, no MetastoreBackend, no ExecutionAPISecretsBackend
fallback_backends = [
"airflow.secrets.environment_variables.EnvironmentVariablesBackend",
]
return ensure_secrets_loaded(default_backends=fallback_backends)
def _configure_logging(log_path: str, client: Client) -> tuple[FilteringBoundLogger, BinaryIO | TextIO]:
# If we are told to write logs to a file, redirect the task logger to it. Make sure we append to the
# file though, otherwise when we resume we would lose the logs from the start->deferral segment if it
# lands on the same node as before.
from airflow.sdk.log import init_log_file, logging_processors
log_file_descriptor: BinaryIO | TextIO | None = None
log_file = init_log_file(log_path)
json_logs = True
if json_logs:
log_file_descriptor = log_file.open("ab")
underlying_logger: WrappedLogger = structlog.BytesLogger(cast("BinaryIO", log_file_descriptor))
else:
log_file_descriptor = log_file.open("a", buffering=1)
underlying_logger = structlog.WriteLogger(cast("TextIO", log_file_descriptor))
with _remote_logging_conn(client):
processors = logging_processors(json_output=json_logs)
logger = structlog.wrap_logger(underlying_logger, processors=processors, logger_name="task").bind()
return logger, log_file_descriptor
def supervise(
*,
ti: TaskInstance,
bundle_info: BundleInfo,
dag_rel_path: str | os.PathLike[str],
token: str,
server: str | None = None,
dry_run: bool = False,
log_path: str | None = None,
subprocess_logs_to_stdout: bool = False,
client: Client | None = None,
sentry_integration: str = "",
) -> int:
"""
Run a single task execution to completion.
:param ti: The task instance to run.
:param bundle_info: Information of the Dag bundle to use for this task instance.
:param dag_rel_path: The file path to the Dag.
:param token: Authentication token for the API client.
:param server: Base URL of the API server.
:param dry_run: If True, execute without actual task execution (simulate run).
:param log_path: Path to write logs, if required.
:param subprocess_logs_to_stdout: Should task logs also be sent to stdout via the main logger.
:param client: Optional preconfigured client for communication with the server (Mostly for tests).
:param sentry_integration: If the executor has a Sentry integration, import
path to a callable to initialize it (empty means no integration).
:return: Exit code of the process.
:raises ValueError: If server URL is empty or invalid.
"""
# One or the other
from airflow.sdk._shared.secrets_masker import reset_secrets_masker
if not client:
if dry_run and server:
raise ValueError(f"Can only specify one of {server=} or {dry_run=}")
if not dry_run:
if not server:
raise ValueError(
"Invalid execution API server URL. Please ensure that a valid URL is configured."
)
try:
parsed_url = urlparse(server)
except Exception as e:
raise ValueError(
f"Invalid execution API server URL '{server}': {e}. "
"Please ensure that a valid URL is configured."
) from e
if parsed_url.scheme not in ("http", "https"):
raise ValueError(
f"Invalid execution API server URL '{server}': "
"URL must use http:// or https:// scheme. "
"Please ensure that a valid URL is configured."
)
if not parsed_url.netloc:
raise ValueError(
f"Invalid execution API server URL '{server}': "
"URL must include a valid host. "
"Please ensure that a valid URL is configured."
)
if not dag_rel_path:
raise ValueError("dag_path is required")
close_client = False
if not client:
limits = httpx.Limits(max_keepalive_connections=1, max_connections=10)
client = Client(base_url=server or "", limits=limits, dry_run=dry_run, token=token)
close_client = True
log.debug("Connecting to execution API server", server=server)
start = time.monotonic()
# TODO: Use logging providers to handle the chunked upload for us etc.
logger: FilteringBoundLogger | None = None
log_file_descriptor: BinaryIO | TextIO | None = None
if log_path:
logger, log_file_descriptor = _configure_logging(log_path, client)
backends = ensure_secrets_backend_loaded()
log.info(
"Secrets backends loaded for worker",
count=len(backends),
backend_classes=[type(b).__name__ for b in backends],
)
reset_secrets_masker()
try:
process = ActivitySubprocess.start(
dag_rel_path=dag_rel_path,
what=ti,
client=client,
logger=logger,
bundle_info=bundle_info,
subprocess_logs_to_stdout=subprocess_logs_to_stdout,
sentry_integration=sentry_integration,
)
exit_code = process.wait()
end = time.monotonic()
log.info(
"Task finished",
task_instance_id=str(ti.id),
exit_code=exit_code,
duration=end - start,
final_state=process.final_state,
)
return exit_code
finally:
if log_path and log_file_descriptor:
log_file_descriptor.close()
if close_client and client:
with suppress(Exception):
client.close()
| InProcessTestSupervisor |
python | conda__conda | conda/exceptions.py | {
"start": 14730,
"end": 14888
} | class ____(CondaError, OSError):
def __init__(self, message: str, **kwargs):
msg = f"{message}"
super().__init__(msg, **kwargs)
| CondaOSError |
python | PrefectHQ__prefect | src/integrations/prefect-github/prefect_github/schemas/graphql_schema.py | {
"start": 285176,
"end": 285491
} | class ____(sgqlc.types.Type):
"""
See source code for more info.
"""
__schema__ = graphql_schema
__field_names__ = ("cursor", "node")
cursor = sgqlc.types.Field(sgqlc.types.non_null(String), graphql_name="cursor")
node = sgqlc.types.Field("Discussion", graphql_name="node")
| DiscussionEdge |
python | eth-brownie__brownie | tests/test_expansion.py | {
"start": 80,
"end": 2505
} | class ____(unittest.TestCase):
def setUp(self):
self.v = str(uuid.uuid4())
self.input = {
"non": "b",
"simple": "${FOO}",
"partial": "the ${FOO}",
"number": 1,
"bool": True,
"nested": {
"one": "nest ${FOO}",
"super_nested": {"two": "real ${FOO}", "three": "not"},
},
"${A}": "abc",
"default_envvar_present": "${FOO:-xyz}",
"default_envvar_missing": "${ABC:-bar}",
"default_int_present": "${NUM:-42}",
"default_int_missing": "${ABC:-42}",
"arr": [{"a": False, "b": False}, {"a": True, "b": "${FOO}"}],
}
variables = {"FOO": self.v, "NUM": 314}
self.res = expand_posix_vars(
self.input,
variables,
)
def test_basic_string(self):
assert self.res["non"] == "b"
def test_simple_expansion(self):
assert self.res["simple"] == self.v
def test_partial_string_expansion(self):
assert self.res["partial"] == f"the {self.v}"
def test_number(self):
assert self.res["number"] == 1
def test_bool(self):
assert self.res["bool"] == True # noqa: E712
def test_nested_partial_string(self):
assert self.res["nested"]["one"] == f"nest {self.v}"
def test_double_nested_partial_string(self):
assert self.res["nested"]["super_nested"]["two"] == f"real {self.v}"
def test_double_nested_plain(self):
assert self.res["nested"]["super_nested"]["three"] == "not"
def test_variable_name_not_expanded(self):
assert self.res["${A}"] == "abc"
def test_list_basic(self):
assert self.res["arr"][0]["a"] == False # noqa: E712
def test_list_bool(self):
assert self.res["arr"][1]["a"] == True # noqa: E712
def test_arr_expanded(self):
assert self.res["arr"][1]["b"] == self.v
def test_envvar_with_default_value_present(self):
assert self.res["default_envvar_present"] == self.v
def test_envvar_with_default_value_missing(self):
assert self.res["default_envvar_missing"] == "bar"
def test_envvar_with_default_int_value_present(self):
assert self.res["default_int_present"] == 314
def test_envvar_with_default_int_value_missing(self):
assert self.res["default_int_missing"] == 42
| TestExpandDict |
python | apache__airflow | airflow-core/tests/unit/models/test_xcom.py | {
"start": 17882,
"end": 19988
} | class ____:
@pytest.mark.parametrize(
("value", "expected_value"),
[
pytest.param(1, 1, id="int"),
pytest.param(1.0, 1.0, id="float"),
pytest.param("string", "string", id="str"),
pytest.param(True, True, id="bool"),
pytest.param({"key": "value"}, {"key": "value"}, id="dict"),
pytest.param([1, 2, 3], [1, 2, 3], id="list"),
pytest.param((1, 2, 3), (1, 2, 3), id="tuple"), # tuple is preserved
pytest.param(None, None, id="none"),
],
)
def test_xcom_round_trip(self, value, expected_value, push_simple_json_xcom, task_instance, session):
"""Test that XComModel serialization and deserialization work as expected."""
push_simple_json_xcom(ti=task_instance, key="xcom_1", value=value)
stored_value = session.execute(
XComModel.get_many(
key="xcom_1",
dag_ids=task_instance.dag_id,
task_ids=task_instance.task_id,
run_id=task_instance.run_id,
).with_only_columns(XComModel.value)
).first()
deserialized_value = XComModel.deserialize_value(stored_value)
assert deserialized_value == expected_value
@pytest.mark.parametrize(
("value", "expected_value"),
[
pytest.param(1, 1, id="int"),
pytest.param(1.0, 1.0, id="float"),
pytest.param("string", "string", id="str"),
pytest.param(True, True, id="bool"),
pytest.param({"key": "value"}, {"key": "value"}, id="dict"),
pytest.param([1, 2, 3], [1, 2, 3], id="list"),
pytest.param((1, 2, 3), (1, 2, 3), id="tuple"), # tuple is preserved
pytest.param(None, None, id="none"),
],
)
def test_xcom_deser_fallback(self, value, expected_value):
"""Test fallback in deserialization."""
mock_xcom = MagicMock(value=value)
deserialized_value = XComModel.deserialize_value(mock_xcom)
assert deserialized_value == expected_value
| TestXComRoundTrip |
python | tensorflow__tensorflow | tensorflow/python/framework/tensor_shape_test.py | {
"start": 1093,
"end": 10304
} | class ____(test_util.TensorFlowTestCase):
def testDimension(self):
dim = tensor_shape.Dimension(12)
self.assertEqual(12, dim.value)
self.assertEqual(12, int(dim))
self.assertEqual(dim, tensor_shape.Dimension(12))
self.assertEqual(
tensor_shape.Dimension(15), dim + tensor_shape.Dimension(3))
self.assertEqual(tensor_shape.Dimension(15), dim + 3)
self.assertEqual(tensor_shape.Dimension(15), 3 + dim)
self.assertEqual(tensor_shape.Dimension(9), dim - 3)
self.assertEqual(tensor_shape.Dimension(1), 13 - dim)
self.assertEqual(
tensor_shape.Dimension(24), dim * tensor_shape.Dimension(2))
self.assertEqual(tensor_shape.Dimension(24), dim * 2)
self.assertEqual(tensor_shape.Dimension(24), 2 * dim)
self.assertEqual([4] * 12, [4] * dim)
self.assertEqual(12 * [4], dim * [4])
self.assertEqual(tensor_shape.Dimension(24), 2 * dim)
self.assertEqual(
tensor_shape.Dimension(6), dim // tensor_shape.Dimension(2))
self.assertEqual(tensor_shape.Dimension(6), dim // 2)
self.assertEqual(tensor_shape.Dimension(0), 2 // dim)
self.assertEqual(
tensor_shape.Dimension(12), dim.merge_with(tensor_shape.Dimension(12)))
self.assertEqual(tensor_shape.Dimension(12), dim.merge_with(12))
self.assertLess(tensor_shape.Dimension(12), tensor_shape.Dimension(13))
self.assertGreater(tensor_shape.Dimension(13), tensor_shape.Dimension(12))
self.assertLessEqual(tensor_shape.Dimension(12), tensor_shape.Dimension(12))
self.assertLessEqual(tensor_shape.Dimension(12), tensor_shape.Dimension(13))
self.assertGreater(tensor_shape.Dimension(13), tensor_shape.Dimension(12))
self.assertGreaterEqual(
tensor_shape.Dimension(12), tensor_shape.Dimension(12))
self.assertGreaterEqual(
tensor_shape.Dimension(13), tensor_shape.Dimension(12))
self.assertNotEqual(dim, (12,))
with self.assertRaises(ValueError):
dim.merge_with(tensor_shape.Dimension(13))
def testUnknownDimension(self):
dim = tensor_shape.Dimension(None)
self.assertIsNone(dim.value)
self.assertEqual(dim.value, tensor_shape.Dimension(None).value)
self.assertEqual(
tensor_shape.Dimension(None).value,
(dim + tensor_shape.Dimension(None)).value)
self.assertEqual(
tensor_shape.Dimension(None).value,
(dim * tensor_shape.Dimension(None)).value)
self.assertEqual(
tensor_shape.Dimension(None).value,
(dim // tensor_shape.Dimension(None)).value)
self.assertEqual(
tensor_shape.Dimension(None).value,
dim.merge_with(tensor_shape.Dimension(None)).value)
self.assertIsNone(
tensor_shape.Dimension(None) < tensor_shape.Dimension(None))
self.assertIsNone(
tensor_shape.Dimension(None) <= tensor_shape.Dimension(None))
self.assertIsNone(
tensor_shape.Dimension(None) > tensor_shape.Dimension(None))
self.assertIsNone(
tensor_shape.Dimension(None) >= tensor_shape.Dimension(None))
def testKnownAndUnknownDimensions(self):
known = tensor_shape.Dimension(12)
unknown = tensor_shape.Dimension(None)
self.assertEqual(
tensor_shape.Dimension(None).value, (known + unknown).value)
self.assertEqual(
tensor_shape.Dimension(None).value, (unknown + known).value)
self.assertEqual(
tensor_shape.Dimension(None).value, (known * unknown).value)
self.assertEqual(
tensor_shape.Dimension(None).value, (unknown * known).value)
self.assertEqual(
tensor_shape.Dimension(None).value, (known // unknown).value)
self.assertEqual(
tensor_shape.Dimension(None).value, (unknown // known).value)
self.assertEqual(tensor_shape.Dimension(12), known.merge_with(unknown))
self.assertEqual(tensor_shape.Dimension(12), unknown.merge_with(known))
self.assertIsNone(tensor_shape.Dimension(12) < tensor_shape.Dimension(None))
self.assertIsNone(
tensor_shape.Dimension(12) <= tensor_shape.Dimension(None))
self.assertIsNone(tensor_shape.Dimension(12) > tensor_shape.Dimension(None))
self.assertIsNone(
tensor_shape.Dimension(12) >= tensor_shape.Dimension(None))
self.assertIsNone(tensor_shape.Dimension(None) < tensor_shape.Dimension(12))
self.assertIsNone(
tensor_shape.Dimension(None) <= tensor_shape.Dimension(12))
self.assertIsNone(tensor_shape.Dimension(None) > tensor_shape.Dimension(12))
self.assertIsNone(
tensor_shape.Dimension(None) >= tensor_shape.Dimension(12))
def testAsDimension(self):
self.assertEqual(
tensor_shape.Dimension(12),
tensor_shape.as_dimension(tensor_shape.Dimension(12)))
self.assertEqual(tensor_shape.Dimension(12), tensor_shape.as_dimension(12))
self.assertEqual(
tensor_shape.Dimension(None).value,
tensor_shape.as_dimension(tensor_shape.Dimension(None)).value)
self.assertEqual(
tensor_shape.Dimension(None).value,
tensor_shape.as_dimension(None).value)
def testEquality(self):
self.assertEqual(tensor_shape.Dimension(12), tensor_shape.Dimension(12))
self.assertNotEqual(tensor_shape.Dimension(12), tensor_shape.Dimension(13))
self.assertIsNone(
tensor_shape.Dimension(12) == tensor_shape.Dimension(None))
self.assertIsNone(
tensor_shape.Dimension(None) == tensor_shape.Dimension(12))
self.assertIsNone(
tensor_shape.Dimension(None) == tensor_shape.Dimension(None))
# None indicates ambiguous comparison, but comparison vs the wrong type
# is unambiguously False.
self.assertIsNotNone(tensor_shape.Dimension(None) == 12.99)
self.assertNotEqual(tensor_shape.Dimension(None), 12.99)
# pylint: disable=singleton-comparison, g-equals-none
self.assertIsNone(tensor_shape.Dimension(None) == None)
# pylint: enable=singleton-comparison, g-equals-none
self.assertNotEqual(tensor_shape.Dimension(12), 12.99)
def testInequality(self):
self.assertNotEqual(tensor_shape.Dimension(12), tensor_shape.Dimension(13))
self.assertEqual(tensor_shape.Dimension(12), tensor_shape.Dimension(12))
self.assertIsNone(
tensor_shape.Dimension(12) != tensor_shape.Dimension(None))
self.assertIsNone(
tensor_shape.Dimension(None) != tensor_shape.Dimension(12))
self.assertIsNone(
tensor_shape.Dimension(None) != tensor_shape.Dimension(None))
# None indicates ambiguous comparison, but comparison vs the wrong type
# is unambiguously False.
self.assertIsNotNone(tensor_shape.Dimension(None) != 12.99)
self.assertNotEqual(tensor_shape.Dimension(None), 12.99)
self.assertIsNone(tensor_shape.Dimension(None) != None) # pylint: disable=g-equals-none
self.assertNotEqual(tensor_shape.Dimension(12), 12.99)
def testIsCompatibleWithError(self):
with self.assertRaisesRegex(TypeError, "must be integer or None"):
tensor_shape.Dimension(42).is_compatible_with([])
with self.assertRaisesRegex(ValueError, "must be >= 0"):
tensor_shape.Dimension(42).is_compatible_with(-1)
def testMergeWithError(self):
with self.assertRaisesRegex(TypeError, "must be integer or None"):
tensor_shape.Dimension(42).merge_with([])
with self.assertRaisesRegex(ValueError, "must be >= 0"):
tensor_shape.Dimension(42).merge_with(-1)
def testRepr(self):
self.assertEqual(repr(tensor_shape.Dimension(7)), "Dimension(7)")
self.assertEqual(repr(tensor_shape.Dimension(None)), "Dimension(None)")
def testStr(self):
self.assertEqual(str(tensor_shape.Dimension(7)), "7")
self.assertEqual(str(tensor_shape.Dimension(None)), "?")
def testUnsupportedType(self):
with self.assertRaises(TypeError):
tensor_shape.Dimension(dtypes.string)
def testBool(self):
one = tensor_shape.Dimension(1)
zero = tensor_shape.Dimension(0)
has_none = tensor_shape.Dimension(None)
self.assertTrue(one)
self.assertFalse(zero)
self.assertFalse(has_none)
def testMod(self):
four = tensor_shape.Dimension(4)
nine = tensor_shape.Dimension(9)
self.assertEqual(nine % four, 1)
# test both __mod__ and __rmod__.
self.assertEqual(nine % 4, 1)
self.assertEqual(4 % nine, 4)
def testReduce(self):
dim = tensor_shape.Dimension(5)
ctor, args = dim.__reduce__()
self.assertEqual(ctor, tensor_shape.Dimension)
self.assertEqual(args, (5,))
reconstructed = ctor(*args)
self.assertEqual(reconstructed, dim)
def testDiv(self):
# Note: This test is related to GitHub issue 25790.
six = tensor_shape.Dimension(6)
two = tensor_shape.Dimension(2)
message = (r"unsupported operand type\(s\) for /: "
r"'Dimension' and 'Dimension', please use // instead")
with self.assertRaisesRegex(TypeError, message):
_ = six / two
message = (r"unsupported operand type\(s\) for /: "
r"'Dimension' and 'int', please use // instead")
with self.assertRaisesRegex(TypeError, message):
_ = six / 2
message = (r"unsupported operand type\(s\) for /: "
r"'int' and 'Dimension', please use // instead")
with self.assertRaisesRegex(TypeError, message):
_ = 6 / two
| DimensionTest |
python | ray-project__ray | python/ray/llm/_internal/common/utils/cloud_filesystem/gcs_filesystem.py | {
"start": 473,
"end": 2752
} | class ____(BaseCloudFileSystem):
"""GCS-specific implementation of cloud filesystem operations.
**Note**: This implementation currently delegates to PyArrowFileSystem to maintain
stability. Optimized implementation using google-cloud-storage SDK and gsutil
will be added in a future PR.
"""
@staticmethod
def get_file(
object_uri: str, decode_as_utf_8: bool = True
) -> Optional[Union[str, bytes]]:
"""Download a file from cloud storage into memory.
Args:
object_uri: URI of the file (gs://)
decode_as_utf_8: If True, decode the file as UTF-8
Returns:
File contents as string or bytes, or None if file doesn't exist
"""
return PyArrowFileSystem.get_file(object_uri, decode_as_utf_8)
@staticmethod
def list_subfolders(folder_uri: str) -> List[str]:
"""List the immediate subfolders in a cloud directory.
Args:
folder_uri: URI of the directory (gs://)
Returns:
List of subfolder names (without trailing slashes)
"""
return PyArrowFileSystem.list_subfolders(folder_uri)
@staticmethod
def download_files(
path: str,
bucket_uri: str,
substrings_to_include: Optional[List[str]] = None,
suffixes_to_exclude: Optional[List[str]] = None,
) -> None:
"""Download files from cloud storage to a local directory.
Args:
path: Local directory where files will be downloaded
bucket_uri: URI of cloud directory
substrings_to_include: Only include files containing these substrings
suffixes_to_exclude: Exclude certain files from download (e.g .safetensors)
"""
PyArrowFileSystem.download_files(
path, bucket_uri, substrings_to_include, suffixes_to_exclude
)
@staticmethod
def upload_files(
local_path: str,
bucket_uri: str,
) -> None:
"""Upload files to cloud storage.
Args:
local_path: The local path of the files to upload.
bucket_uri: The bucket uri to upload the files to, must start with `gs://`.
"""
PyArrowFileSystem.upload_files(local_path, bucket_uri)
| GCSFileSystem |
python | plotly__plotly.py | plotly/graph_objs/layout/coloraxis/colorbar/_tickformatstop.py | {
"start": 235,
"end": 8562
} | class ____(_BaseLayoutHierarchyType):
_parent_path_str = "layout.coloraxis.colorbar"
_path_str = "layout.coloraxis.colorbar.tickformatstop"
_valid_props = {"dtickrange", "enabled", "name", "templateitemname", "value"}
@property
def dtickrange(self):
"""
range [*min*, *max*], where "min", "max" - dtick values which
describe some zoom level, it is possible to omit "min" or "max"
value by passing "null"
The 'dtickrange' property is an info array that may be specified as:
* a list or tuple of 2 elements where:
(0) The 'dtickrange[0]' property accepts values of any type
(1) The 'dtickrange[1]' property accepts values of any type
Returns
-------
list
"""
return self["dtickrange"]
@dtickrange.setter
def dtickrange(self, val):
self["dtickrange"] = val
@property
def enabled(self):
"""
Determines whether or not this stop is used. If `false`, this
stop is ignored even within its `dtickrange`.
The 'enabled' property must be specified as a bool
(either True, or False)
Returns
-------
bool
"""
return self["enabled"]
@enabled.setter
def enabled(self, val):
self["enabled"] = val
@property
def name(self):
"""
When used in a template, named items are created in the output
figure in addition to any items the figure already has in this
array. You can modify these items in the output figure by
making your own item with `templateitemname` matching this
`name` alongside your modifications (including `visible: false`
or `enabled: false` to hide it). Has no effect outside of a
template.
The 'name' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["name"]
@name.setter
def name(self, val):
self["name"] = val
@property
def templateitemname(self):
"""
Used to refer to a named item in this array in the template.
Named items from the template will be created even without a
matching item in the input figure, but you can modify one by
making an item with `templateitemname` matching its `name`,
alongside your modifications (including `visible: false` or
`enabled: false` to hide it). If there is no template or no
matching item, this item will be hidden unless you explicitly
show it with `visible: true`.
The 'templateitemname' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["templateitemname"]
@templateitemname.setter
def templateitemname(self, val):
self["templateitemname"] = val
@property
def value(self):
"""
string - dtickformat for described zoom level, the same as
"tickformat"
The 'value' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["value"]
@value.setter
def value(self, val):
self["value"] = val
@property
def _prop_descriptions(self):
return """\
dtickrange
range [*min*, *max*], where "min", "max" - dtick values
which describe some zoom level, it is possible to omit
"min" or "max" value by passing "null"
enabled
Determines whether or not this stop is used. If
`false`, this stop is ignored even within its
`dtickrange`.
name
When used in a template, named items are created in the
output figure in addition to any items the figure
already has in this array. You can modify these items
in the output figure by making your own item with
`templateitemname` matching this `name` alongside your
modifications (including `visible: false` or `enabled:
false` to hide it). Has no effect outside of a
template.
templateitemname
Used to refer to a named item in this array in the
template. Named items from the template will be created
even without a matching item in the input figure, but
you can modify one by making an item with
`templateitemname` matching its `name`, alongside your
modifications (including `visible: false` or `enabled:
false` to hide it). If there is no template or no
matching item, this item will be hidden unless you
explicitly show it with `visible: true`.
value
string - dtickformat for described zoom level, the same
as "tickformat"
"""
def __init__(
self,
arg=None,
dtickrange=None,
enabled=None,
name=None,
templateitemname=None,
value=None,
**kwargs,
):
"""
Construct a new Tickformatstop object
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of :class:`plotly.graph_objs.layout.colorax
is.colorbar.Tickformatstop`
dtickrange
range [*min*, *max*], where "min", "max" - dtick values
which describe some zoom level, it is possible to omit
"min" or "max" value by passing "null"
enabled
Determines whether or not this stop is used. If
`false`, this stop is ignored even within its
`dtickrange`.
name
When used in a template, named items are created in the
output figure in addition to any items the figure
already has in this array. You can modify these items
in the output figure by making your own item with
`templateitemname` matching this `name` alongside your
modifications (including `visible: false` or `enabled:
false` to hide it). Has no effect outside of a
template.
templateitemname
Used to refer to a named item in this array in the
template. Named items from the template will be created
even without a matching item in the input figure, but
you can modify one by making an item with
`templateitemname` matching its `name`, alongside your
modifications (including `visible: false` or `enabled:
false` to hide it). If there is no template or no
matching item, this item will be hidden unless you
explicitly show it with `visible: true`.
value
string - dtickformat for described zoom level, the same
as "tickformat"
Returns
-------
Tickformatstop
"""
super().__init__("tickformatstops")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError("""\
The first argument to the plotly.graph_objs.layout.coloraxis.colorbar.Tickformatstop
constructor must be a dict or
an instance of :class:`plotly.graph_objs.layout.coloraxis.colorbar.Tickformatstop`""")
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
self._set_property("dtickrange", arg, dtickrange)
self._set_property("enabled", arg, enabled)
self._set_property("name", arg, name)
self._set_property("templateitemname", arg, templateitemname)
self._set_property("value", arg, value)
self._process_kwargs(**dict(arg, **kwargs))
self._skip_invalid = False
| Tickformatstop |
python | doocs__leetcode | lcof/面试题16. 数值的整数次方/Solution.py | {
"start": 0,
"end": 333
} | class ____:
def myPow(self, x: float, n: int) -> float:
def qpow(a: float, n: int) -> float:
ans = 1
while n:
if n & 1:
ans *= a
a *= a
n >>= 1
return ans
return qpow(x, n) if n >= 0 else 1 / qpow(x, -n)
| Solution |
python | ray-project__ray | python/ray/serve/llm/__init__.py | {
"start": 1807,
"end": 12794
} | class ____(_OpenAiIngress):
pass
##########
# Builders
##########
@PublicAPI(stability="alpha")
def build_llm_deployment(
llm_config: "LLMConfig",
*,
name_prefix: Optional[str] = None,
bind_kwargs: Optional[dict] = None,
override_serve_options: Optional[dict] = None,
deployment_cls: Optional[Type[LLMServer]] = None,
) -> "Application":
"""Helper to build a single vllm deployment from the given llm config.
Examples:
.. testcode::
:skipif: True
from ray import serve
from ray.serve.llm import LLMConfig, build_llm_deployment
# Configure the model
llm_config = LLMConfig(
model_loading_config=dict(
model_id="llama-3.1-8b",
model_source="meta-llama/Llama-3.1-8b-instruct",
),
deployment_config=dict(
autoscaling_config=dict(
min_replicas=1,
max_replicas=2,
)
),
accelerator_type="A10G",
)
# Build the deployment
llm_app = build_llm_deployment(llm_config)
# Deploy the application
model_handle = serve.run(llm_app)
# Querying the model handle
import asyncio
model_handle = model_handle.options(stream=True)
async def query_model(model_handle):
from ray.serve.llm.openai_api_models import ChatCompletionRequest
request = ChatCompletionRequest(
model="qwen-0.5b",
messages=[
{
"role": "user",
"content": "Hello, world!"
}
]
)
resp = model_handle.chat.remote(request)
async for message in resp:
print("message: ", message)
asyncio.run(query_model(model_handle))
Args:
llm_config: The llm config to build vllm deployment.
name_prefix: Optional prefix to be used for the deployment name.
bind_kwargs: Optional kwargs to pass to the deployment.
override_serve_options: Optional serve options to override the original serve options based on the llm_config.
deployment_cls: Optional deployment class to use.
Returns:
The configured Ray Serve Application for vllm deployment.
"""
from ray.llm._internal.serve.core.server.builder import (
build_llm_deployment,
)
return build_llm_deployment(
llm_config=llm_config,
name_prefix=name_prefix,
bind_kwargs=bind_kwargs,
override_serve_options=override_serve_options,
deployment_cls=deployment_cls,
)
@PublicAPI(stability="alpha")
def build_openai_app(llm_serving_args: dict) -> "Application":
"""Helper to build an OpenAI compatible app with the llm deployment setup from
the given llm serving args. This is the main entry point for users to create a
Serve application serving LLMs.
Examples:
.. code-block:: python
:caption: Example usage in code.
from ray import serve
from ray.serve.llm import LLMConfig, LLMServingArgs, build_openai_app
llm_config1 = LLMConfig(
model_loading_config=dict(
model_id="qwen-0.5b",
model_source="Qwen/Qwen2.5-0.5B-Instruct",
),
deployment_config=dict(
autoscaling_config=dict(
min_replicas=1, max_replicas=2,
)
),
accelerator_type="A10G",
)
llm_config2 = LLMConfig(
model_loading_config=dict(
model_id="qwen-1.5b",
model_source="Qwen/Qwen2.5-1.5B-Instruct",
),
deployment_config=dict(
autoscaling_config=dict(
min_replicas=1, max_replicas=2,
)
),
accelerator_type="A10G",
)
# Deploy the application
llm_app = build_openai_app(
LLMServingArgs(
llm_configs=[
llm_config1,
llm_config2,
]
)
)
serve.run(llm_app)
# Querying the model via openai client
from openai import OpenAI
# Initialize client
client = OpenAI(base_url="http://localhost:8000/v1", api_key="fake-key")
# Basic completion
response = client.chat.completions.create(
model="qwen-0.5b",
messages=[{"role": "user", "content": "Hello!"}]
)
.. code-block:: yaml
:caption: Example usage in YAML.
# config.yaml
applications:
- args:
llm_configs:
- model_loading_config:
model_id: qwen-0.5b
model_source: Qwen/Qwen2.5-0.5B-Instruct
accelerator_type: A10G
deployment_config:
autoscaling_config:
min_replicas: 1
max_replicas: 2
- model_loading_config:
model_id: qwen-1.5b
model_source: Qwen/Qwen2.5-1.5B-Instruct
accelerator_type: A10G
deployment_config:
autoscaling_config:
min_replicas: 1
max_replicas: 2
import_path: ray.serve.llm:build_openai_app
name: llm_app
route_prefix: "/"
Args:
llm_serving_args: A dict that conforms to the LLMServingArgs pydantic model.
Returns:
The configured Ray Serve Application router.
"""
from ray.llm._internal.serve.core.ingress.builder import (
build_openai_app,
)
return build_openai_app(builder_config=llm_serving_args)
@PublicAPI(stability="alpha")
def build_pd_openai_app(pd_serving_args: dict) -> "Application":
"""Build a deployable application utilizing P/D disaggregation.
Examples:
.. code-block:: python
:caption: Example usage in code.
from ray import serve
from ray.serve.llm import LLMConfig, build_pd_openai_app
config = LLMConfig(
model_loading_config=dict(
model_id="qwen-0.5b",
model_source="Qwen/Qwen2.5-0.5B-Instruct",
),
deployment_config=dict(
autoscaling_config=dict(
min_replicas=1, max_replicas=2,
)
),
accelerator_type="A10G",
)
# Deploy the application
llm_app = build_pd_openai_app(
dict(
prefill_config=config,
decode_config=config,
)
)
serve.run(llm_app)
# Querying the model via openai client
from openai import OpenAI
# Initialize client
client = OpenAI(base_url="http://localhost:8000/v1", api_key="fake-key")
# Basic completion
response = client.chat.completions.create(
model="qwen-0.5b",
messages=[{"role": "user", "content": "Hello!"}]
)
.. code-block:: yaml
:caption: Example usage in YAML.
# config.yaml
applications:
- args:
prefill_config:
model_loading_config:
model_id: qwen-0.5b
model_source: Qwen/Qwen2.5-0.5B-Instruct
accelerator_type: A10G
deployment_config:
autoscaling_config:
min_replicas: 1
max_replicas: 2
decode_config:
model_loading_config:
model_id: qwen-1.5b
model_source: Qwen/Qwen2.5-1.5B-Instruct
accelerator_type: A10G
deployment_config:
autoscaling_config:
min_replicas: 1
max_replicas: 2
import_path: ray.serve.llm:build_pd_openai_app
name: llm_app
route_prefix: "/"
Args:
pd_serving_args: The dictionary containing prefill and decode configs. See PDServingArgs for more details.
Returns:
The configured Ray Serve Application router.
"""
from ray.llm._internal.serve.serving_patterns.prefill_decode.builder import (
build_pd_openai_app,
)
return build_pd_openai_app(pd_serving_args=pd_serving_args)
@PublicAPI(stability="alpha")
def build_dp_deployment(
llm_config: "LLMConfig",
*,
name_prefix: Optional[str] = None,
override_serve_options: Optional[dict] = None,
) -> "Application":
"""Build a data parallel attention LLM deployment.
Args:
llm_config: The LLM configuration.
name_prefix: The prefix to add to the deployment name.
override_serve_options: The optional serve options to override the
default options.
Returns:
The Ray Serve Application for the data parallel attention LLM deployment.
"""
from ray.llm._internal.serve.serving_patterns.data_parallel.builder import (
build_dp_deployment,
)
return build_dp_deployment(
llm_config=llm_config,
name_prefix=name_prefix,
override_serve_options=override_serve_options,
)
@PublicAPI(stability="alpha")
def build_dp_openai_app(dp_serving_args: dict) -> "Application":
"""Build an OpenAI compatible app with the DP attention deployment
setup from the given builder configuration.
Args:
dp_serving_args: The configuration for the builder. It has to conform
to the DPOpenAiServingArgs pydantic model.
Returns:
The configured Ray Serve Application.
"""
from ray.llm._internal.serve.serving_patterns.data_parallel.builder import (
build_dp_openai_app,
)
return build_dp_openai_app(builder_config=dp_serving_args)
__all__ = [
"LLMConfig",
"LLMServingArgs",
"ModelLoadingConfig",
"CloudMirrorConfig",
"LoraConfig",
"build_llm_deployment",
"build_openai_app",
"build_pd_openai_app",
"build_dp_deployment",
"build_dp_openai_app",
"LLMServer",
"LLMRouter",
]
| LLMRouter |
python | donnemartin__interactive-coding-challenges | arrays_strings/compress_alt/test_compress.py | {
"start": 18,
"end": 603
} | class ____(unittest.TestCase):
def test_compress(self, func):
self.assertEqual(func(None), None)
self.assertEqual(func(''), '')
self.assertEqual(func('AABBCC'), 'AABBCC')
self.assertEqual(func('AAABCCDDDD'), 'A3BCCD4')
self.assertEqual(
func('aaBCCEFFFFKKMMMMMMP taaammanlaarrrr seeeeeeeeek tooo'),
'aaBCCEF4KKM6P ta3mmanlaar4 se9k to3',
)
print('Success: test_compress')
def main():
test = TestCompress()
test.test_compress(compress_string)
if __name__ == '__main__':
main()
| TestCompress |
python | docker__docker-py | tests/unit/utils_json_stream_test.py | {
"start": 82,
"end": 589
} | class ____:
def test_json_splitter_no_object(self):
data = '{"foo": "bar'
assert json_splitter(data) is None
def test_json_splitter_with_object(self):
data = '{"foo": "bar"}\n \n{"next": "obj"}'
assert json_splitter(data) == ({'foo': 'bar'}, '{"next": "obj"}')
def test_json_splitter_leading_whitespace(self):
data = '\n \r{"foo": "bar"}\n\n {"next": "obj"}'
assert json_splitter(data) == ({'foo': 'bar'}, '{"next": "obj"}')
| TestJsonSplitter |
python | pytorch__pytorch | test/distributed/tensor/test_op_strategy.py | {
"start": 6063,
"end": 19402
} | class ____(DTensorOpTestBase):
@property
def world_size(self) -> int:
return 4
def test_redistribute_cost_mesh_1d(self):
mesh_1d = self.build_device_mesh()
shard_placement = (Shard(0),)
replica_placement = (Replicate(),)
partial_placement = (Partial(),)
global_tensor = torch.randn(10, 10)
global_tensor_meta = extract_tensor_meta(global_tensor)
# shard spec
shard_spec = DTensorSpec(mesh_1d, shard_placement, global_tensor_meta)
# replica spec
replica_spec = DTensorSpec(mesh_1d, replica_placement, global_tensor_meta)
# partial spec
partial_spec = DTensorSpec(mesh_1d, partial_placement, global_tensor_meta)
# make sure reshard cost is 0 for the same spec redistribute
for spec in [shard_spec, replica_spec, partial_spec]:
cost = redistribute_cost(spec, spec)
self.assertEqual(cost, 0)
# shard -> replicate
allgather_cost = redistribute_cost(shard_spec, replica_spec)
# partial -> shard
reduce_scatter_cost = redistribute_cost(partial_spec, shard_spec)
# partial -> replicate
allreduce_cost = redistribute_cost(partial_spec, replica_spec)
self.assertEqual(allgather_cost, reduce_scatter_cost)
self.assertTrue(allreduce_cost + 1 < allgather_cost + reduce_scatter_cost)
# shard to partial
cost = redistribute_cost(shard_spec, partial_spec)
self.assertEqual(cost, float("inf"))
def test_redistribute_cost_latency(self):
# test cost model on addmm op
from torch.distributed.tensor._ops._matrix_ops import addmm_strategy
mesh = self.build_device_mesh()
shard0_placement = (Shard(0),)
partial_placement = (Partial(),)
shard1_placement = (Shard(1),)
shard0_tensor_meta = extract_tensor_meta(torch.randn(8))
partial_tensor_meta = extract_tensor_meta(torch.randn(50, 6))
shard1_tensor_meta = extract_tensor_meta(torch.randn(6, 8))
# shard spec
shard0_spec = DTensorSpec(mesh, shard0_placement, shard0_tensor_meta)
# replica spec
partial_spec = DTensorSpec(mesh, partial_placement, partial_tensor_meta)
# partial spec
shard1_spec = DTensorSpec(mesh, shard1_placement, shard1_tensor_meta)
op_schema = OpSchema(
torch.ops.aten.addmm.default,
(
OpStrategy([OpSpec(shard0_spec)]),
OpStrategy([OpSpec(partial_spec)]),
OpStrategy([OpSpec(shard1_spec)]),
),
{},
)
output_strategy = addmm_strategy(op_schema)
strategy_costs = {}
for strategy in output_strategy.strategies:
redistribute_cost = sum(chain.from_iterable(strategy.redistribute_cost))
strategy_costs[str(strategy)] = redistribute_cost
# assert that cost model counts for collective latency (i.e. multiple comm is penalized)
self.assertTrue(
strategy_costs["(S(0), R, S(1)) -> S(1)"]
< strategy_costs["(R, S(0), R) -> S(0)"]
)
# assert a single allreduce is the best one
self.assertEqual(
strategy_costs["(S(0), R, S(1)) -> S(1)"], min(strategy_costs.values())
)
def test_redistribute_cost_mesh_2d(self):
mesh_2d = DeviceMesh(
self.device_type, torch.arange(self.world_size).reshape(2, 2)
)
shard_placement = (Shard(0), Shard(0))
replica_placement = (Replicate(), Replicate())
partial_placement = (Partial(), Partial())
global_tensor = torch.randn(8, 8)
global_tensor_meta = extract_tensor_meta(global_tensor)
# shard spec
shard_spec = DTensorSpec(mesh_2d, shard_placement, global_tensor_meta)
# replica spec
replica_spec = DTensorSpec(mesh_2d, replica_placement, global_tensor_meta)
# partial spec
partial_spec = DTensorSpec(mesh_2d, partial_placement, global_tensor_meta)
# make sure reshard cost is 0 for the same spec redistribute
for spec in [shard_spec, replica_spec, partial_spec]:
cost = redistribute_cost(spec, spec)
self.assertEqual(cost, 0)
# shard -> replicate
allgather_cost = redistribute_cost(shard_spec, replica_spec)
# partial -> replicate
allreduce_cost = redistribute_cost(partial_spec, replica_spec)
# partial -> shard
reduce_scatter_cost = redistribute_cost(partial_spec, shard_spec)
self.assertTrue(allreduce_cost > allgather_cost)
self.assertTrue(allreduce_cost > reduce_scatter_cost)
def test_mm_strategies(self):
from torch.distributed.tensor._ops._matrix_ops import mm_strategy
mesh = self.build_device_mesh()
lhs_tensor = torch.randn(6, 8)
rhs_tensor = torch.randn(8, 12)
lhs_tensor_meta = extract_tensor_meta(lhs_tensor)
rhs_tensor_meta = extract_tensor_meta(rhs_tensor)
mm_combs = (
(Shard(0), Replicate()),
(Replicate(), Shard(1)),
(Shard(1), Shard(0)),
(Replicate(), Replicate()),
)
for lhs, rhs in mm_combs:
lhs_spec = DTensorSpec(mesh, (lhs,), lhs_tensor_meta)
rhs_spec = DTensorSpec(mesh, (rhs,), rhs_tensor_meta)
op_schema = OpSchema(
torch.ops.aten.mm.default,
(
OpStrategy([OpSpec(lhs_spec)]),
OpStrategy([OpSpec(rhs_spec)]),
),
{},
)
# test the strategy
res_strategies = mm_strategy(op_schema)
for strtgy in res_strategies.strategies:
if strtgy.input_specs == (lhs_spec, rhs_spec):
self.assertEqual(strtgy.redistribute_cost, [[0.0], [0.0]])
break
op_schema = OpSchema(
torch.ops.aten.mm.default,
(lhs_spec, rhs_spec),
{},
)
# test sharding prop
output_sharding = DTensor._op_dispatcher.sharding_propagator.propagate_op_sharding_non_cached(
op_schema
)
self.assertFalse(output_sharding.needs_redistribute)
def test_bmm_strategies(self):
from torch.distributed.tensor._ops._matrix_ops import bmm_strategy
mesh = self.build_device_mesh()
lhs_tensor = torch.randn(8, 6, 8)
rhs_tensor = torch.randn(8, 8, 12)
lhs_tensor_meta = extract_tensor_meta(lhs_tensor)
rhs_tensor_meta = extract_tensor_meta(rhs_tensor)
bmm_combs = (
(Shard(0), Shard(0)),
(Shard(1), Replicate()),
(Replicate(), Shard(2)),
(Shard(2), Shard(1)),
(Replicate(), Replicate()),
)
for lhs, rhs in bmm_combs:
lhs_spec = DTensorSpec(mesh, (lhs,), lhs_tensor_meta)
rhs_spec = DTensorSpec(mesh, (rhs,), rhs_tensor_meta)
op_schema = OpSchema(
torch.ops.aten.bmm.default,
(
OpStrategy([OpSpec(lhs_spec)]),
OpStrategy([OpSpec(rhs_spec)]),
),
{},
)
# test the strategy
res_strategies = bmm_strategy(op_schema)
for strtgy in res_strategies.strategies:
if strtgy.input_specs == (lhs_spec, rhs_spec):
self.assertEqual(strtgy.redistribute_cost, [[0.0], [0.0]])
break
op_schema = OpSchema(
torch.ops.aten.bmm.default,
(lhs_spec, rhs_spec),
{},
)
# test sharding prop
output_sharding = DTensor._op_dispatcher.sharding_propagator.propagate_op_sharding_non_cached(
op_schema
)
self.assertFalse(output_sharding.needs_redistribute)
# -------------Test op strategy registration-------------
# custom op without List[Tensor] as input
# reference: https://docs.pytorch.org/docs/stable/library.html#torch.library.register_autograd
@torch.library.custom_op("mylib::numpy_sin", mutates_args=())
def numpy_sin(x: torch.Tensor, y: torch.Tensor) -> torch.Tensor:
x_np = x.cpu().numpy()
y_np = y.cpu().numpy()
out_np = np.sin(x_np) + np.sin(y_np)
return torch.from_numpy(out_np).to(device=x.device)
def setup_context(ctx, inputs, output):
(x, y) = inputs
ctx.save_for_backward(x, y)
def backward(ctx, grad):
(x, y) = ctx.saved_tensors
return grad * x.cos(), grad * y.cos()
@numpy_sin.register_fake
def _fw(x, y):
return torch.empty_like(x)
torch.library.register_autograd(
"mylib::numpy_sin", backward, setup_context=setup_context
)
# custom op with List[Tensor] as input
@torch.library.custom_op("mylib::numpy_tuple_sin", mutates_args=())
def numpy_tuple_sin(
x: torch.Tensor, y: list[torch.Tensor], z: torch.Tensor
) -> torch.Tensor:
x_np = x.cpu().numpy()
y_np = [i.cpu().numpy() for i in y]
z_np = z.cpu().numpy()
out_np = np.sin(x_np) + np.sin(z_np) + sum(np.sin(i) for i in y_np)
return torch.from_numpy(out_np).to(device=x.device)
def setup_tuple_context(ctx, inputs, output):
(x, y, z) = inputs
ctx.save_for_backward(x, y, z)
def tuple_backward(ctx, grad):
(x, y, z) = ctx.saved_tensors
return grad * x.cos(), [grad * i.cos() for i in y], grad * z.cos()
@numpy_tuple_sin.register_fake
def _fw_tuple(x, y, z):
return torch.empty_like(x)
torch.library.register_autograd(
"mylib::numpy_tuple_sin", tuple_backward, setup_context=setup_tuple_context
)
@contextmanager
def op_strategy_context(op_overload, strategy_func, schema_info=None):
"""
Context manager for setting and clearing op strategies.
Args:
op_overload: The operator overload to set or clear the strategy for.
strategy_func: The strategy function to set for the operator overload.
schema_info: Optional schema information for the operator overload.
Yields:
None
"""
propagator = DTensor._op_dispatcher.sharding_propagator
_origin_op_strategy_funcs = None
_origin_op_strategy_schema = None
try:
# register the op strategy
if op_overload in propagator.op_strategy_funcs:
_origin_op_strategy_funcs = propagator.op_strategy_funcs[op_overload]
del propagator.op_strategy_funcs[op_overload]
if op_overload in propagator.op_to_schema_info:
_origin_op_strategy_schema = propagator.op_to_schema_info[op_overload]
del propagator.op_to_schema_info[op_overload]
register_op_strategy(op_overload, schema_info=schema_info)(strategy_func)
yield
finally:
# clear this op strategy cache
if _origin_op_strategy_funcs is None:
if op_overload in propagator.op_strategy_funcs:
del propagator.op_strategy_funcs[op_overload]
else:
propagator.op_strategy_funcs[op_overload] = _origin_op_strategy_funcs
if _origin_op_strategy_schema is None:
if op_overload in propagator.op_to_schema_info:
del propagator.op_to_schema_info[op_overload]
else:
propagator.op_to_schema_info[op_overload] = _origin_op_strategy_schema
_clear_fast_path_sharding_prop_cache()
_clear_python_sharding_prop_cache()
def detect_exists_identical_opspec(*args, op, mesh, strategy_function) -> bool:
"""
Given sample input args, detect if identical OpSpecs exists under the same
OpStrategy.
"""
tree_args = tree_leaves(args)
# metadata for each argument
arg_tensor_metadata = [extract_tensor_meta(i) for i in args]
# possible combination of placements for each arg
arg_placement_comb = []
for i in tree_args:
if isinstance(i, torch.Tensor):
# possible placement choice for argument i
placement_choices = (Replicate(), *[Shard(i) for i in range(i.ndim)])
# expand placement choice into full Placements for argument i
arg_placement_comb.append(
list(itertools.product(placement_choices, repeat=mesh.ndim))
)
random.shuffle(arg_placement_comb[-1])
arg_opspec_list = []
for idx, arg_placement in enumerate(arg_placement_comb):
arg_opspec_list.append([])
for placement in arg_placement:
arg_opspec_list[idx].append(
OpSpec(
output_specs=DTensorSpec(
mesh, placement, tensor_meta=arg_tensor_metadata[idx]
)
)
)
op_schema = OpSchema(
op,
args_schema=(tuple(OpStrategy(i) for i in arg_opspec_list)),
kwargs_schema={},
)
with op_strategy_context(op, strategy_function):
output_strategy = strategy_function(op_schema)
# OpSpec doesn't have hashing, convert to str to compare
output_strategy_str_list = [
str(j) for i in tree_leaves(output_strategy) for j in i.strategies
]
return len(output_strategy_str_list) == len(set(output_strategy_str_list))
| TestCostModel |
python | matplotlib__matplotlib | lib/matplotlib/figure.py | {
"start": 2201,
"end": 3775
} | class ____:
"""
Helper class to track Axes in a figure.
Axes are tracked both in the order in which they have been added
(``self._axes`` insertion/iteration order) and in the separate "gca" stack
(which is the index to which they map in the ``self._axes`` dict).
"""
def __init__(self):
self._axes = {} # Mapping of Axes to "gca" order.
self._counter = itertools.count()
def as_list(self):
"""List the Axes that have been added to the figure."""
return [*self._axes] # This relies on dict preserving order.
def remove(self, a):
"""Remove the Axes from the stack."""
self._axes.pop(a)
def bubble(self, a):
"""Move an Axes, which must already exist in the stack, to the top."""
if a not in self._axes:
raise ValueError("Axes has not been added yet")
self._axes[a] = next(self._counter)
def add(self, a):
"""Add an Axes to the stack, ignoring it if already present."""
if a not in self._axes:
self._axes[a] = next(self._counter)
def current(self):
"""Return the active Axes, or None if the stack is empty."""
return max(self._axes, key=self._axes.__getitem__, default=None)
def __getstate__(self):
return {
**vars(self),
"_counter": max(self._axes.values(), default=0)
}
def __setstate__(self, state):
next_counter = state.pop('_counter')
vars(self).update(state)
self._counter = itertools.count(next_counter)
| _AxesStack |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/genericType28.py | {
"start": 502,
"end": 661
} | class ____(list[T_co]):
pass
# This should generate an error because the type parameter for list
# is invariant, so T_co here cannot be contravariant.
| Class1 |
python | jazzband__django-simple-history | simple_history/tests/tests/test_utils.py | {
"start": 14029,
"end": 14600
} | class ____(TestCase):
def setUp(self):
self.data = [
BulkCreateManyToManyModel(name="Object 1"),
BulkCreateManyToManyModel(name="Object 2"),
BulkCreateManyToManyModel(name="Object 3"),
BulkCreateManyToManyModel(name="Object 4"),
BulkCreateManyToManyModel(name="Object 5"),
]
def test_bulk_create_with_history(self):
bulk_create_with_history(self.data, BulkCreateManyToManyModel)
self.assertEqual(BulkCreateManyToManyModel.objects.count(), 5)
| BulkCreateWithManyToManyField |
python | tensorflow__tensorflow | tensorflow/python/framework/type_spec.py | {
"start": 25339,
"end": 29104
} | class ____(object, metaclass=abc.ABCMeta):
"""Class used to encode and decode composite tensor values for batching.
In order to be batched and unbatched by APIs such as `tf.data.Dataset` and
`tf.map_fn`, composite tensors must be encoded using flat tensors that can
themselves be batched or unbatched. `TypeSpecBatchEncoder`s are
responsible for implementing this encoding.
If a composite tensor's shape is a prefix of the shape of all of its
component tensors, then this encoding can usually be performed by just
returning those component tensors as a list. But if the composite tensor
has components whose shape has a more complex relationship to the shape
of the composite tensor, then a custom `TypeSpecBatchEncoder` may
need to be implemented.
"""
@abc.abstractmethod
def batch(self, spec, batch_size):
"""Returns the TypeSpec representing a batch of values described by `spec`.
Args:
spec: The `TypeSpec` for an individual value.
batch_size: An `int` indicating the number of values that are batched
together, or `None` if the batch size is not known.
Returns:
A `TypeSpec` for a batch of values.
"""
raise NotImplementedError(f"{type(self).__name__}.batch")
@abc.abstractmethod
def unbatch(self, spec):
"""Returns the TypeSpec for a single unbatched element in `spec`.
Args:
spec: The `TypeSpec` for a batch of values.
Returns:
A `TypeSpec` for an individual value.
"""
raise NotImplementedError(f"{type(self).__name__}.unbatch")
@abc.abstractmethod
def encode(self, spec, value, minimum_rank=0):
"""Encodes `value` as a nest of batchable `Tensor` or `CompositeTensor`.
Args:
spec: The TypeSpec of the value to encode.
value: A value compatible with `spec`.
minimum_rank: The minimum rank for the returned Tensors, CompositeTensors,
and ExtensionType values. This can be used to ensure that the encoded
values can be unbatched this number of times. If `minimum_rank>0`,
then `t.shape[:minimum_rank]` must be compatible for all values `t`
returned by `encode`.
Returns:
A nest (as defined by `tf.nest`) of `tf.Tensor`s, batchable
`tf.CompositeTensor`s, or `tf.ExtensionType`s. Stacking, unstacking, or
concatenating these encoded values and then decoding the result must be
equivalent to stacking, unstacking, or concatenating the original values.
"""
raise NotImplementedError(f"{type(self).__name__}.encode")
@abc.abstractmethod
def decode(self, spec, encoded_value):
"""Decodes `value` from a batchable tensor encoding.
Args:
spec: The TypeSpec for the result value. If encoded values with spec `s`
were batched, then `spec` should be `s.batch(batch_size)`; or if encoded
values with spec `s` were unbatched, then `spec` should be
`s.unbatch()`.
encoded_value: A nest of values returned by `encode`; or a nest of values
that was formed by stacking, unstacking, or concatenating the
corresponding elements of values returned by `encode`.
Returns:
A value compatible with `type_spec`.
"""
raise NotImplementedError(f"{type(self).__name__}.decode")
@abc.abstractmethod
def encoding_specs(self, spec):
"""Returns a nest of `TypeSpec`(s) describing the encoding for `spec`.
Args:
spec: The TypeSpec whose encoding should be described.
Returns:
A nest (as defined by `tf.nest) of `tf.TypeSpec`, describing the values
that are returned by `self.encode(spec, ...)`. All TypeSpecs in this
nest must be batchable.
"""
raise NotImplementedError(f"{type(self).__name__}.encoding_specs")
| TypeSpecBatchEncoder |
python | vyperlang__vyper | vyper/venom/analysis/mem_ssa.py | {
"start": 2997,
"end": 16480
} | class ____(IRAnalysis):
"""
This analysis converts memory/storage operations into Memory SSA form.
The analysis is based on LLVM's https://llvm.org/docs/MemorySSA.html.
Notably, the LLVM design does not partition memory into ranges.
Rather, it keeps track of memory _states_ (each write increments a
generation counter), and provides "walk" methods to track memory
clobbers. This counterintuitively results in a simpler design
and, according to LLVM, better performance.
See https://llvm.org/docs/MemorySSA.html#design-tradeoffs.
"""
addr_space: AddrSpace
mem_alias_type: type[MemoryAliasAnalysisAbstract]
volatiles: list[MemoryLocation]
def __init__(self, analyses_cache, function):
super().__init__(analyses_cache, function)
self.next_id = 1 # Start from 1 since 0 will be live_on_entry
# live_on_entry node
self.live_on_entry = LiveOnEntry(0)
self.memory_defs: dict[IRBasicBlock, list[MemoryDef]] = {}
self.memory_uses: dict[IRBasicBlock, list[MemoryUse]] = {}
# merge memory states
self.memory_phis: dict[IRBasicBlock, MemoryPhi] = {}
self.inst_to_def: dict[IRInstruction, MemoryDef] = {}
self.inst_to_use: dict[IRInstruction, MemoryUse] = {}
self.volatiles = []
def analyze(self):
# Request required analyses
self.cfg: CFGAnalysis = self.analyses_cache.request_analysis(CFGAnalysis)
self.dom: DominatorTreeAnalysis = self.analyses_cache.request_analysis(
DominatorTreeAnalysis
)
self.memalias = self.analyses_cache.request_analysis(self.mem_alias_type)
# Build initial memory SSA form
self._build_memory_ssa()
# Clean up unnecessary phi nodes
self._remove_redundant_phis()
def mark_location_volatile(self, loc: MemoryLocation) -> MemoryLocation:
self.volatiles.append(loc)
volatile_loc = self.memalias.mark_volatile(loc)
for bb in self.memory_defs:
for mem_def in self.memory_defs[bb]:
if self.memalias.may_alias(mem_def.loc, loc):
mem_def.loc = mem_def.loc.mk_volatile()
return volatile_loc
def get_memory_def(self, inst: IRInstruction) -> Optional[MemoryDef]:
return self.inst_to_def.get(inst)
def get_memory_use(self, inst: IRInstruction) -> Optional[MemoryUse]:
return self.inst_to_use.get(inst)
def get_memory_uses(self) -> Iterable[MemoryUse]:
return self.inst_to_use.values()
def get_memory_defs(self) -> Iterable[MemoryDef]:
return self.inst_to_def.values()
def _build_memory_ssa(self):
"""Build the memory SSA form for the function"""
# First pass: process definitions and uses
for bb in self.cfg.dfs_pre_walk:
self._process_block_definitions(bb)
# Second pass: insert phi nodes where needed
self._insert_phi_nodes()
# Third pass: connect all memory accesses to their reaching definitions
self._connect_uses_to_defs()
self._connect_defs_to_defs()
def _process_block_definitions(self, block: IRBasicBlock):
"""Process memory definitions and uses in a basic block"""
for inst in block.instructions:
# Check for memory reads
loc = self.memalias._get_read_location(inst, self.addr_space)
if loc != MemoryLocation.EMPTY:
mem_use = MemoryUse(self.next_id, inst, loc)
self.next_id += 1
self.memory_uses.setdefault(block, []).append(mem_use)
self.inst_to_use[inst] = mem_use
# Check for memory writes
loc = self.memalias._get_write_location(inst, self.addr_space)
if loc != MemoryLocation.EMPTY:
mem_def = MemoryDef(self.next_id, inst, loc)
self.next_id += 1
self.memory_defs.setdefault(block, []).append(mem_def)
self.inst_to_def[inst] = mem_def
def _insert_phi_nodes(self) -> None:
"""Insert phi nodes at appropriate points in the CFG"""
worklist = list(self.memory_defs.keys())
while worklist:
block = worklist.pop()
for frontier in self.dom.dominator_frontiers[block]:
if frontier not in self.memory_phis:
phi = MemoryPhi(self.next_id, frontier)
# Add operands from each predecessor block
for pred in self.cfg.cfg_in(frontier):
reaching_def = self.get_exit_def(pred)
if reaching_def:
phi.operands.append((reaching_def, pred))
self.next_id += 1
self.memory_phis[frontier] = phi
worklist.append(frontier)
def _connect_uses_to_defs(self):
"""Connect memory uses to their reaching definitions"""
for bb in self.cfg.dfs_pre_walk:
if bb in self.memory_uses:
uses = self.memory_uses[bb]
for use in uses:
use.reaching_def = self._get_reaching_def(use)
def get_exit_def(self, bb: IRBasicBlock) -> Optional[MemoryPhiOperand]:
"""
Get the memory def (or phi) that exits a basic block.
This method determines which memory definition is "live"
at the exit point of a block by:
1. First checking if the block itself contains any
memory definitions and returning the last one
2. If not, checking if the block has a phi node (which
combines definitions from multiple paths)
3. If not, recursively checking the immediate
dominator block
4. If there's no dominator, returning the
live-on-entry definition (initial state)
"""
if bb in self.memory_defs and len(self.memory_defs[bb]) > 0:
return self.memory_defs[bb][-1]
if bb in self.memory_phis:
return self.memory_phis[bb]
if bb == self.dom.entry_block:
return self.live_on_entry
# Get reaching def from immediate dominator
idom = self.dom.immediate_dominators.get(bb)
return self.get_exit_def(idom) if idom else self.live_on_entry
def _get_reaching_def(self, mem_access: MemoryDefOrUse) -> Optional[MemoryAccess]:
"""
Finds the memory definition that reaches a specific memory def or use.
This method searches for the most recent memory definition that affects
the given memory def or use by first looking backwards in the same basic block.
If none is found, it checks for phi nodes in the block or returns the
"in def" from the immediate dominator block. If there is no immediate
dominator, it returns the live-on-entry definition.
"""
assert isinstance(mem_access, MemoryDef) or isinstance(
mem_access, MemoryUse
), "Only MemoryDef or MemoryUse is supported"
bb = mem_access.inst.parent
use_idx = bb.instructions.index(mem_access.inst)
for inst in reversed(bb.instructions[:use_idx]):
if inst in self.inst_to_def:
return self.inst_to_def[inst]
if bb in self.memory_phis:
return self.memory_phis[bb]
if self.cfg.cfg_in(bb):
idom = self.dom.immediate_dominators.get(bb)
return self.get_exit_def(idom) if idom else self.live_on_entry
return self.live_on_entry
def _connect_defs_to_defs(self):
for bb in self.cfg.dfs_pre_walk:
if bb in self.memory_defs:
for mem_def in self.memory_defs[bb]:
mem_def.reaching_def = self._get_reaching_def(mem_def)
def _remove_redundant_phis(self):
"""Remove phi nodes whose arguments are all the same"""
for phi in list(self.memory_phis.values()):
op0 = phi.operands[0]
if all(op[0] == op0[0] for op in phi.operands[1:]):
del self.memory_phis[phi.block]
def get_aliased_memory_accesses(self, access: MemoryAccess) -> OrderedSet[MemoryAccess]:
"""
Get all memory accesses that are aliased with the provided access.
"""
if access.is_live_on_entry:
return OrderedSet()
query_loc = access.loc
return self._walk_for_aliased_access(access, query_loc, OrderedSet())
def _walk_for_aliased_access(
self,
current: Optional[MemoryAccess],
query_loc: MemoryLocation,
visited: OrderedSet[MemoryAccess],
) -> OrderedSet[MemoryAccess]:
aliased_accesses: OrderedSet[MemoryAccess] = OrderedSet()
while current is not None:
if current in visited:
break
visited.add(current)
# If the current node is a memory definition, check if
# it is aliased with the query location.
if isinstance(current, MemoryDef):
if self.memalias.may_alias(query_loc, current.loc):
aliased_accesses.add(current)
# If the current node is a phi node, recursively walk
# the operands.
elif isinstance(current, MemoryPhi):
for access, _ in current.operands:
aliased_accesses.update(
self._walk_for_aliased_access(access, query_loc, visited)
)
# move up the definition chain
current = current.reaching_def
return aliased_accesses
def get_clobbered_memory_access(self, access: MemoryAccess) -> Optional[MemoryAccess]:
"""
Get the memory access that gets clobbered by the provided access.
Returns None if provided the live-on-entry node, otherwise if no clobber
is found, it will return the live-on-entry node.
For example:
```
mstore 0, ... ; 1
mstore 0, ... ; 2
mload 0 ; 2 is clobbered by this memory access
NOTE: This function will return a MemoryPhi if there are multiple clobbering
memory accesses. It is to be seen if we should change this behavior in the future
to return multiple clobbering memory accesses.
NOTE: This corresponds to getClobberingMemoryAccess(!) in LLVM's MemorySSA.h
"""
if access.is_live_on_entry:
return None
clobber = self._walk_for_clobbered_access(access.reaching_def, access.loc, OrderedSet())
return clobber or self.live_on_entry
def _walk_for_clobbered_access(
self,
current: Optional[MemoryAccess],
query_loc: MemoryLocation,
visited: OrderedSet[MemoryAccess],
) -> Optional[MemoryAccess]:
while current is not None and not current.is_live_on_entry:
if current in visited:
break
visited.add(current)
# If the current node is a memory definition, check if
# it completely contains the query location.
if isinstance(current, MemoryDef):
if query_loc.completely_contains(current.loc):
return current
# If the current node is a phi node, check if any of the operands
elif isinstance(current, MemoryPhi):
clobbering_operands = []
for access, _ in current.operands:
clobber = self._walk_for_clobbered_access(access, query_loc, visited)
if clobber:
clobbering_operands.append(clobber)
# Return the phi node if multiple operands have clobbering accesses
if len(clobbering_operands) > 1:
return current
# Return the single clobbering access
if len(clobbering_operands) == 1:
return clobbering_operands[0]
return None
# move up the definition chain
current = current.reaching_def
return None
#
# Printing context methods
#
def _post_instruction(self, inst: IRInstruction) -> str:
s = ""
if inst.parent in self.memory_uses:
for use in self.memory_uses[inst.parent]:
if use.inst == inst:
s += f"\t; use: {use.reaching_def.id_str if use.reaching_def else None}"
if inst.parent in self.memory_defs:
for def_ in self.memory_defs[inst.parent]:
if def_.inst == inst:
s += f"\t; def: {def_.id_str} "
s += f"({def_.reaching_def.id_str if def_.reaching_def else None}) "
clobber = self.get_clobbered_memory_access(def_)
if clobber is not None:
s += f"clobber: {clobber.id_str}"
return s
def _pre_block(self, bb: IRBasicBlock) -> str:
s = ""
if bb in self.memory_phis:
phi = self.memory_phis[bb]
s += f" ; phi: {phi.id_str} <- "
s += ", ".join(f"{op[0].id_str} from @{op[1].label}" for op in phi.operands)
s += "\n"
return s
@contextlib.contextmanager
def print_context(self):
ir_printer.set(self)
try:
yield
finally:
ir_printer.set(None)
| MemSSAAbstract |
python | great-expectations__great_expectations | great_expectations/datasource/fluent/fabric.py | {
"start": 8884,
"end": 14661
} | class ____(Datasource):
"""
Microsoft Fabric Datasource.
https://pypi.org/project/semantic-link/
"""
# class var definitions
asset_types: ClassVar[List[Type[DataAsset]]] = [
PowerBIDax,
PowerBIMeasure,
PowerBITable,
]
# any fabric datsource specific fields should be added to this set
# example a connection_string field or a data directory field
_EXTRA_EXCLUDED_EXEC_ENG_ARGS: ClassVar[set] = {"workspace", "dataset"}
# right side of the operator determines the type name
# left side enforces the names on instance creation
type: Literal["fabric_powerbi"] = "fabric_powerbi"
assets: List[AssetTypes] = []
# fabric datasource specific fields
workspace: Optional[Union[uuid.UUID, str]] = None
dataset: Union[uuid.UUID, str]
@property
@override
def execution_engine_type(self) -> Type[PandasExecutionEngine]:
"""Return the PandasExecutionEngine unless the override is set"""
from great_expectations.execution_engine.pandas_execution_engine import (
PandasExecutionEngine,
)
return PandasExecutionEngine
@override
def test_connection(self, test_assets: bool = True) -> None:
"""Test the connection for the FabricPowerBIDatasource.
Args:
test_assets: If assets have been passed to the Datasource, whether to test them as well.
Raises:
TestConnectionError: If the connection test fails.
"""
if not self._running_on_fabric():
raise TestConnectionError("Must be running Microsoft Fabric to use this datasource") # noqa: TRY003 # FIXME CoP
try:
from sempy import fabric # noqa: F401 # test if fabric is installed
except Exception as import_err:
raise TestConnectionError( # noqa: TRY003 # FIXME CoP
"Could not import `sempy.fabric`\npip install semantic-link-sempy"
) from import_err
if self.assets and test_assets:
for asset in self.assets:
asset._datasource = self
asset.test_connection()
@public_api
def add_powerbi_dax_asset(
self,
name: str,
dax_string: str,
batch_metadata: Optional[BatchMetadata] = None,
) -> PowerBIDax:
"""Adds a PowerBIDax asset to this datasource.
Args:
name: The name of this asset.
TODO: other args
batch_metadata: BatchMetadata we want to associate with this DataAsset and all batches derived from it.
Returns:
The asset that is added to the datasource.
""" # noqa: E501 # FIXME CoP
asset = PowerBIDax(
name=name,
batch_metadata=batch_metadata or {},
dax_string=dax_string,
)
return self._add_asset(asset)
@public_api
def add_powerbi_measure_asset( # noqa: PLR0913 # FIXME CoP
self,
name: str,
measure: Union[str, List[str]],
batch_metadata: Optional[BatchMetadata] = None,
groupby_columns: Optional[List[str]] = None,
filters: Optional[Dict[str, List[str]]] = None,
fully_qualified_columns: Optional[bool] = None,
num_rows: Optional[int] = None,
use_xmla: bool = False,
) -> PowerBIMeasure:
"""Adds a PowerBIMeasure asset to this datasource.
Args:
name: The name of this asset.
batch_metadata: BatchMetadata we want to associate with this DataAsset and all batches derived from it.
Returns:
The asset that is added to the datasource.
""" # noqa: E501 # FIXME CoP
asset = PowerBIMeasure(
name=name,
batch_metadata=batch_metadata or {},
groupby_columns=groupby_columns,
measure=measure,
# TODO: require custom serde for keys that are tuples
filters=filters,
fully_qualified_columns=fully_qualified_columns,
num_rows=num_rows,
use_xmla=use_xmla,
)
return self._add_asset(asset)
@public_api
def add_powerbi_table_asset( # noqa: PLR0913 # FIXME CoP
self,
name: str,
table: str,
batch_metadata: Optional[BatchMetadata] = None,
fully_qualified_columns: bool = False,
num_rows: Optional[int] = None,
multiindex_hierarchies: bool = False,
mode: Mode = "xmla",
) -> PowerBITable:
"""Adds a PowerBITable asset to this datasource.
Args:
name: The name of this table asset.
table_name: The table where the data resides.
schema: The schema that holds the table.
batch_metadata: BatchMetadata we want to associate with this DataAsset and all batches derived from it.
Returns:
The asset that is added to the datasource.
""" # noqa: E501 # FIXME CoP
asset = PowerBITable(
name=name,
batch_metadata=batch_metadata or {},
table=table,
fully_qualified_columns=fully_qualified_columns,
num_rows=num_rows,
multiindex_hierarchies=multiindex_hierarchies,
mode=mode,
)
return self._add_asset(asset)
@staticmethod
def _running_on_fabric() -> bool:
if (
os.environ.get("AZURE_SERVICE") # noqa: TID251 # needed for fabric
!= _REQUIRED_FABRIC_SERVICE
):
return False
from pyspark.sql import SparkSession # noqa: TID251 # needed for fabric
sc = SparkSession.builder.getOrCreate().sparkContext
return sc.getConf().get("spark.cluster.type") != "synapse"
| FabricPowerBIDatasource |
python | pola-rs__polars | py-polars/tests/unit/io/database/test_write.py | {
"start": 1035,
"end": 11565
} | class ____:
"""Database write tests that share common pytest/parametrize options."""
@staticmethod
def _get_connection(uri: str, engine: DbWriteEngine, uri_connection: bool) -> Any:
if uri_connection:
return uri
elif engine == "sqlalchemy":
return create_engine(uri)
else:
return _open_adbc_connection(uri)
def test_write_database_create(
self, engine: DbWriteEngine, uri_connection: bool, tmp_path: Path
) -> None:
"""Test basic database table creation."""
df = pl.DataFrame(
{
"id": [1234, 5678],
"name": ["misc", "other"],
"value": [1000.0, -9999.0],
}
)
tmp_path.mkdir(exist_ok=True)
test_db_uri = f"sqlite:///{tmp_path}/test_create_{int(uri_connection)}.db"
table_name = "test_create"
conn = self._get_connection(test_db_uri, engine, uri_connection)
assert (
df.write_database(
table_name=table_name,
connection=conn,
engine=engine,
)
== 2
)
result = pl.read_database(
query=f"SELECT * FROM {table_name}",
connection=create_engine(test_db_uri),
)
assert_frame_equal(result, df)
if hasattr(conn, "close"):
conn.close()
def test_write_database_append_replace(
self, engine: DbWriteEngine, uri_connection: bool, tmp_path: Path
) -> None:
"""Test append/replace ops against existing database table."""
df = pl.DataFrame(
{
"key": ["xx", "yy", "zz"],
"value": [123, None, 789],
"other": [5.5, 7.0, None],
}
)
tmp_path.mkdir(exist_ok=True)
test_db_uri = f"sqlite:///{tmp_path}/test_append_{int(uri_connection)}.db"
table_name = "test_append"
conn = self._get_connection(test_db_uri, engine, uri_connection)
assert (
df.write_database(
table_name=table_name,
connection=conn,
engine=engine,
)
== 3
)
with pytest.raises(Exception): # noqa: B017
df.write_database(
table_name=table_name,
connection=conn,
if_table_exists="fail",
engine=engine,
)
assert (
df.write_database(
table_name=table_name,
connection=conn,
if_table_exists="replace",
engine=engine,
)
== 3
)
result = pl.read_database(
query=f"SELECT * FROM {table_name}",
connection=create_engine(test_db_uri),
)
assert_frame_equal(result, df)
assert (
df[:2].write_database(
table_name=table_name,
connection=conn,
if_table_exists="append",
engine=engine,
)
== 2
)
result = pl.read_database(
query=f"SELECT * FROM {table_name}",
connection=create_engine(test_db_uri),
)
assert_frame_equal(result, pl.concat([df, df[:2]]))
if engine == "adbc" and not uri_connection:
assert conn._closed is False
if hasattr(conn, "close"):
conn.close()
def test_write_database_create_quoted_tablename(
self, engine: DbWriteEngine, uri_connection: bool, tmp_path: Path
) -> None:
"""Test parsing/handling of quoted database table names."""
df = pl.DataFrame(
{
"col x": [100, 200, 300],
"col y": ["a", "b", "c"],
}
)
tmp_path.mkdir(exist_ok=True)
test_db_uri = f"sqlite:///{tmp_path}/test_create_quoted.db"
# table name has some special chars, so requires quoting, and
# is explicitly qualified with the sqlite 'main' schema
qualified_table_name = f'main."test-append-{engine}-{int(uri_connection)}"'
conn = self._get_connection(test_db_uri, engine, uri_connection)
assert (
df.write_database(
table_name=qualified_table_name,
connection=conn,
engine=engine,
)
== 3
)
assert (
df.write_database(
table_name=qualified_table_name,
connection=conn,
if_table_exists="replace",
engine=engine,
)
== 3
)
result = pl.read_database(
query=f"SELECT * FROM {qualified_table_name}",
connection=create_engine(test_db_uri),
)
assert_frame_equal(result, df)
if engine == "adbc" and not uri_connection:
assert conn._closed is False
if hasattr(conn, "close"):
conn.close()
def test_write_database_errors(
self, engine: DbWriteEngine, uri_connection: bool, tmp_path: Path
) -> None:
"""Confirm that expected errors are raised."""
df = pl.DataFrame({"colx": [1, 2, 3]})
with pytest.raises(
ValueError, match=r"`table_name` appears to be invalid: 'w.x.y.z'"
):
df.write_database(
connection="sqlite:///:memory:",
table_name="w.x.y.z",
engine=engine,
)
with pytest.raises(
ValueError,
match=r"`if_table_exists` must be one of .* got 'do_something'",
):
df.write_database(
connection="sqlite:///:memory:",
table_name="main.test_errs",
if_table_exists="do_something", # type: ignore[arg-type]
engine=engine,
)
with pytest.raises(
TypeError,
match=r"unrecognised connection type.*",
):
df.write_database(connection=True, table_name="misc", engine=engine) # type: ignore[arg-type]
def test_write_database_adbc_missing_driver_error(
self, engine: DbWriteEngine, uri_connection: bool, tmp_path: Path
) -> None:
# Skip for sqlalchemy
if engine == "sqlalchemy":
return
df = pl.DataFrame({"colx": [1, 2, 3]})
with pytest.raises(
ModuleNotFoundError, match=r"ADBC 'adbc_driver_mysql' driver not detected."
):
df.write_database(
table_name="my_schema.my_table",
connection="mysql:///:memory:",
engine=engine,
)
@pytest.mark.write_disk
def test_write_database_using_sa_session(tmp_path: str) -> None:
df = pl.DataFrame(
{
"key": ["xx", "yy", "zz"],
"value": [123, None, 789],
"other": [5.5, 7.0, None],
}
)
table_name = "test_sa_session"
test_db_uri = f"sqlite:///{tmp_path}/test_sa_session.db"
engine = create_engine(test_db_uri, poolclass=NullPool)
with Session(engine) as session:
df.write_database(table_name, session)
session.commit()
with Session(engine) as session:
result = pl.read_database(
query=f"select * from {table_name}", connection=session
)
assert_frame_equal(result, df)
@pytest.mark.write_disk
@pytest.mark.parametrize("pass_connection", [True, False])
def test_write_database_sa_rollback(tmp_path: str, pass_connection: bool) -> None:
df = pl.DataFrame(
{
"key": ["xx", "yy", "zz"],
"value": [123, None, 789],
"other": [5.5, 7.0, None],
}
)
table_name = "test_sa_rollback"
test_db_uri = f"sqlite:///{tmp_path}/test_sa_rollback.db"
engine = create_engine(test_db_uri, poolclass=NullPool)
with Session(engine) as session:
if pass_connection:
conn = session.connection()
df.write_database(table_name, conn)
else:
df.write_database(table_name, session)
session.rollback()
with Session(engine) as session:
count = pl.read_database(
query=f"select count(*) from {table_name}", connection=session
).item(0, 0)
assert isinstance(count, int)
assert count == 0
@pytest.mark.write_disk
@pytest.mark.parametrize("pass_connection", [True, False])
def test_write_database_sa_commit(tmp_path: str, pass_connection: bool) -> None:
df = pl.DataFrame(
{
"key": ["xx", "yy", "zz"],
"value": [123, None, 789],
"other": [5.5, 7.0, None],
}
)
table_name = "test_sa_commit"
test_db_uri = f"sqlite:///{tmp_path}/test_sa_commit.db"
engine = create_engine(test_db_uri, poolclass=NullPool)
with Session(engine) as session:
if pass_connection:
conn = session.connection()
df.write_database(table_name, conn)
else:
df.write_database(table_name, session)
session.commit()
with Session(engine) as session:
result = pl.read_database(
query=f"select * from {table_name}", connection=session
)
assert_frame_equal(result, df)
@pytest.mark.skipif(sys.platform == "win32", reason="adbc not available on Windows")
def test_write_database_adbc_temporary_table() -> None:
"""Confirm that execution_options are passed along to create temporary tables."""
df = pl.DataFrame({"colx": [1, 2, 3]})
temp_tbl_name = "should_be_temptable"
expected_temp_table_create_sql = (
"""CREATE TABLE "should_be_temptable" ("colx" INTEGER)"""
)
# test with sqlite in memory
conn = _open_adbc_connection("sqlite:///:memory:")
assert (
df.write_database(
temp_tbl_name,
connection=conn,
if_table_exists="fail",
engine_options={"temporary": True},
)
== 3
)
temp_tbl_sql_df = pl.read_database(
"select sql from sqlite_temp_master where type='table' and tbl_name = ?",
connection=conn,
execute_options={"parameters": [temp_tbl_name]},
)
assert temp_tbl_sql_df.shape[0] == 1, "no temp table created"
actual_temp_table_create_sql = temp_tbl_sql_df["sql"][0]
assert expected_temp_table_create_sql == actual_temp_table_create_sql
if hasattr(conn, "close"):
conn.close()
| TestWriteDatabase |
python | spack__spack | lib/spack/spack/solver/core.py | {
"start": 9057,
"end": 9901
} | class ____:
"""Tracks context in which a Spec's clause-set is generated (i.e.
with ``SpackSolverSetup.spec_clauses``).
Facts generated for the spec may include this context.
"""
def __init__(self, *, source: Optional[str] = None):
# This can be "literal" for constraints that come from a user
# spec (e.g. from the command line); it can be the output of
# `ConstraintOrigin.append_type_suffix`; the default is "none"
# (which means it isn't important to keep track of the source
# in that case).
self.source = "none" if source is None else source
self.wrap_node_requirement: Optional[bool] = None
def using_libc_compatibility() -> bool:
"""Returns True if we are currently using libc compatibility"""
return spack.platforms.host().name == "linux"
| SourceContext |
python | pytorch__pytorch | torch/nn/modules/activation.py | {
"start": 8204,
"end": 9046
} | class ____(Hardtanh):
r"""Applies the ReLU6 function element-wise.
.. math::
\text{ReLU6}(x) = \min(\max(0,x), 6)
Args:
inplace: can optionally do the operation in-place. Default: ``False``
Shape:
- Input: :math:`(*)`, where :math:`*` means any number of dimensions.
- Output: :math:`(*)`, same shape as the input.
.. image:: ../scripts/activation_images/ReLU6.png
Examples::
>>> m = nn.ReLU6()
>>> input = torch.randn(2)
>>> output = m(input)
"""
def __init__(self, inplace: bool = False) -> None:
super().__init__(0.0, 6.0, inplace)
def extra_repr(self) -> str:
"""
Return the extra representation of the module.
"""
inplace_str = "inplace=True" if self.inplace else ""
return inplace_str
| ReLU6 |
python | doocs__leetcode | solution/0700-0799/0746.Min Cost Climbing Stairs/Solution3.py | {
"start": 0,
"end": 214
} | class ____:
def minCostClimbingStairs(self, cost: List[int]) -> int:
f = g = 0
for i in range(2, len(cost) + 1):
f, g = g, min(f + cost[i - 2], g + cost[i - 1])
return g
| Solution |
python | getsentry__sentry | tests/sentry/sentry_apps/web/test_sentryapp_avatar.py | {
"start": 353,
"end": 1191
} | class ____(APITestCase):
def test_headers_control_file(self) -> None:
sentry_app = self.create_sentry_app(name="Meow", organization=self.organization)
photo = ControlFile.objects.create(name="test.png", type="avatar.file")
photo.putfile(BytesIO(b"test"))
avatar = SentryAppAvatar.objects.create(
sentry_app=sentry_app, avatar_type=1, color=True, control_file_id=photo.id
)
url = reverse("sentry-app-avatar-url", kwargs={"avatar_id": avatar.ident})
response = self.client.get(url)
assert response.status_code == 200
assert response["Cache-Control"] == FOREVER_CACHE
assert response.get("Vary") == "Accept-Language, Cookie"
assert response.get("Set-Cookie") is None
assert response["Access-Control-Allow-Origin"]
| SentryAppAvatarTest |
python | PyCQA__pylint | pylint/checkers/utils.py | {
"start": 15689,
"end": 15801
} | class ____(Exception):
"""A format string ended in the middle of a format specifier."""
| IncompleteFormatString |
python | PyCQA__pylint | tests/functional/i/invalid/invalid_format_returned.py | {
"start": 195,
"end": 331
} | class ____:
"""__format__ returns <type 'str'>"""
def __format__(self, format_spec):
return "some format"
| FirstGoodFormat |
python | altair-viz__altair | tests/utils/test_schemapi.py | {
"start": 4583,
"end": 41128
} | class ____(_TestSchema):
_schema = {
**_validation_selection_schema,
"$schema": "http://json-schema.org/draft-06/schema#",
}
def test_construct_multifaceted_schema():
dct = {
"a": {"foo": "bar"},
"a2": {"foo": 42},
"b": ["a", "b", "c"],
"b2": [1, 2, 3],
"c": 42,
"d": ["x", "y", "z"],
"e": ["a", "b"],
}
myschema = MySchema.from_dict(dct)
assert myschema.to_dict() == dct
myschema2 = MySchema(**dct)
assert myschema2.to_dict() == dct
assert isinstance(myschema.a, StringMapping)
assert isinstance(myschema.a2, dict)
assert isinstance(myschema.b, StringArray)
assert isinstance(myschema.b2, list)
assert isinstance(myschema.d, StringArray)
def test_schema_cases():
assert Derived(a=4, b="yo").to_dict() == {"a": 4, "b": "yo"}
assert Derived(a=4, c={"d": "hey"}).to_dict() == {"a": 4, "c": {"d": "hey"}}
assert Derived(a=4, b="5", c=Foo(d="val")).to_dict() == {
"a": 4,
"b": "5",
"c": {"d": "val"},
}
assert Foo(d="hello", f=4).to_dict() == {"d": "hello", "f": 4}
assert Derived().to_dict() == {}
assert Foo().to_dict() == {}
with pytest.raises(jsonschema.ValidationError):
# a needs to be an integer
Derived(a="yo").to_dict()
with pytest.raises(jsonschema.ValidationError):
# Foo.d needs to be a string
Derived(c=Foo(4)).to_dict()
with pytest.raises(jsonschema.ValidationError):
# no additional properties allowed
Derived(foo="bar").to_dict()
def test_round_trip():
D = {"a": 4, "b": "yo"}
assert Derived.from_dict(D).to_dict() == D
D = {"a": 4, "c": {"d": "hey"}}
assert Derived.from_dict(D).to_dict() == D
D = {"a": 4, "b": "5", "c": {"d": "val"}}
assert Derived.from_dict(D).to_dict() == D
D = {"d": "hello", "f": 4}
assert Foo.from_dict(D).to_dict() == D
def test_from_dict():
D = {"a": 4, "b": "5", "c": {"d": "val"}}
obj = Derived.from_dict(D)
assert obj.a == 4
assert obj.b == "5"
assert isinstance(obj.c, Foo)
def test_simple_type():
assert SimpleUnion(4).to_dict() == 4
def test_simple_array():
assert SimpleArray([4, 5, "six"]).to_dict() == [4, 5, "six"]
assert SimpleArray.from_dict(list("abc")).to_dict() == list("abc") # pyright: ignore[reportArgumentType]
def test_definition_union():
obj = DefinitionUnion.from_dict("A") # pyright: ignore[reportArgumentType]
assert isinstance(obj, Bar)
assert obj.to_dict() == "A"
obj = DefinitionUnion.from_dict("B") # pyright: ignore[reportArgumentType]
assert isinstance(obj, Bar)
assert obj.to_dict() == "B"
obj = DefinitionUnion.from_dict({"d": "yo"})
assert isinstance(obj, Foo)
assert obj.to_dict() == {"d": "yo"}
def test_invalid_properties():
dct = {"for": 2, "as": 3, "vega-lite": 4, "$schema": 5}
invalid = InvalidProperties.from_dict(dct)
assert invalid["for"] == 2
assert invalid["as"] == 3
assert invalid["vega-lite"] == 4
assert invalid["$schema"] == 5
assert invalid.to_dict() == dct
def test_undefined_singleton():
assert Undefined is UndefinedType()
def test_schema_validator_selection():
# Tests if the correct validator class is chosen based on the $schema
# property in the schema. This uses a backwards-incompatible change
# in Draft 6 which introduced exclusiveMinimum as a number instead of a boolean.
# Therefore, with Draft 4 there is no actual minimum set as a number and validating
# the dictionary below passes. With Draft 6, it correctly checks if the number is
# > 10 and raises a ValidationError. See
# https://json-schema.org/draft-06/json-schema-release-notes.html#q-what-are-
# the-changes-between-draft-04-and-draft-06 for more details
dct = {
"e": 9,
}
assert Draft4Schema.from_dict(dct).to_dict() == dct
with pytest.raises(
jsonschema.exceptions.ValidationError,
match="9 is less than or equal to the minimum of 10",
):
Draft6Schema.from_dict(dct)
@pytest.fixture
def dct():
return {
"a": {"foo": "bar"},
"a2": {"foo": 42},
"b": ["a", "b", "c"],
"b2": [1, 2, 3],
"c": 42,
"d": ["x", "y", "z"],
}
def test_copy_method(dct):
myschema = MySchema.from_dict(dct)
# Make sure copy is deep
copy = myschema.copy(deep=True)
copy["a"]["foo"] = "new value"
copy["b"] = ["A", "B", "C"]
copy["c"] = 164
assert myschema.to_dict() == dct
# If we ignore a value, changing the copy changes the original
copy = myschema.copy(deep=True, ignore=["a"])
copy["a"]["foo"] = "new value"
copy["b"] = ["A", "B", "C"]
copy["c"] = 164
mydct = myschema.to_dict()
assert mydct["a"]["foo"] == "new value"
assert mydct["b"][0] == dct["b"][0]
assert mydct["c"] == dct["c"]
# If copy is not deep, then changing copy below top level changes original
copy = myschema.copy(deep=False)
copy["a"]["foo"] = "baz"
copy["b"] = ["A", "B", "C"]
copy["c"] = 164
mydct = myschema.to_dict()
assert mydct["a"]["foo"] == "baz"
assert mydct["b"] == dct["b"]
assert mydct["c"] == dct["c"]
def test_copy_module(dct):
myschema = MySchema.from_dict(dct)
cp = copy.deepcopy(myschema)
cp["a"]["foo"] = "new value"
cp["b"] = ["A", "B", "C"]
cp["c"] = 164
assert myschema.to_dict() == dct
def test_attribute_error():
m = MySchema()
invalid_attr = "invalid_attribute"
with pytest.raises(AttributeError) as err:
getattr(m, invalid_attr)
assert str(err.value) == ("'MySchema' object has no attribute 'invalid_attribute'")
def test_to_from_json(dct):
json_str = MySchema.from_dict(dct).to_json()
new_dct = MySchema.from_json(json_str).to_dict()
assert new_dct == dct
def test_to_from_pickle(dct):
myschema = MySchema.from_dict(dct)
output = io.BytesIO()
pickle.dump(myschema, output)
output.seek(0)
myschema_new = pickle.load(output)
assert myschema_new.to_dict() == dct
def test_class_with_no_schema():
class BadSchema(SchemaBase):
pass
with pytest.raises(ValueError) as err: # noqa: PT011
BadSchema(4)
assert str(err.value).startswith("Cannot instantiate object")
@pytest.mark.parametrize("use_json", [True, False])
def test_hash_schema(use_json):
classes = _TestSchema._default_wrapper_classes()
for cls in classes:
hsh1 = _FromDict.hash_schema(cls._schema, use_json=use_json)
hsh2 = _FromDict.hash_schema(cls._schema, use_json=use_json)
assert hsh1 == hsh2
assert hash(hsh1) == hash(hsh2)
def test_schema_validation_error():
try:
MySchema(a={"foo": 4})
the_err = None
except jsonschema.ValidationError as err:
the_err = err
assert isinstance(the_err, SchemaValidationError)
message = str(the_err)
assert the_err.message in message
def chart_error_example__layer():
# Error: Width is not a valid property of a VConcatChart
points = (
alt.Chart(data.cars.url)
.mark_point()
.encode(
x="Horsepower:Q",
y="Miles_per_Gallon:Q",
)
)
return (points & points).properties(width=400)
def chart_error_example__hconcat():
# Error: Invalid value for title in Text
source = data.cars()
points = (
alt.Chart(source)
.mark_point()
.encode(
x="Horsepower",
y="Miles_per_Gallon",
)
)
text = (
alt.Chart(source)
.mark_text()
.encode(
alt.Text("Horsepower:N", title={"text": "Horsepower", "align": "right"}) # pyright: ignore[reportArgumentType]
)
)
return points | text
def chart_error_example__invalid_y_option_value_unknown_x_option():
# Error 1: unknown is an invalid channel option for X
# Error 2: Invalid Y option value "asdf" and unknown option "unknown" for X
return (
alt.Chart(data.barley())
.mark_bar()
.encode(
x=alt.X("variety", unknown=2),
y=alt.Y("sum(yield)", stack="asdf"), # pyright: ignore[reportArgumentType]
)
)
def chart_error_example__invalid_y_option_value():
# Error: Invalid Y option value "asdf"
return (
alt.Chart(data.barley())
.mark_bar()
.encode(
x=alt.X("variety"),
y=alt.Y("sum(yield)", stack="asdf"), # pyright: ignore[reportArgumentType]
)
)
def chart_error_example__invalid_y_option_value_with_condition():
# Error: Invalid Y option value "asdf". Condition is correct
# but is added below as in previous implementations of Altair this interfered
# with finding the invalidChannel error
return (
alt.Chart(data.barley())
.mark_bar()
.encode(
x="variety",
y=alt.Y("sum(yield)", stack="asdf"), # pyright: ignore[reportArgumentType]
opacity=alt.condition("datum.yield > 0", alt.value(1), alt.value(0.2)),
)
)
def chart_error_example__invalid_timeunit_value():
# Error: Invalid value for Angle.timeUnit
return alt.Chart().encode(alt.Angle().timeUnit("invalid_value")) # pyright: ignore[reportArgumentType]
def chart_error_example__invalid_sort_value():
# Error: Invalid value for Angle.sort
return alt.Chart().encode(alt.Angle().sort("invalid_value"))
def chart_error_example__invalid_bandposition_value():
# Error: Invalid value for Text.bandPosition
return (
alt.Chart(data.cars())
.mark_text(align="right")
.encode(alt.Text("Horsepower:N", bandPosition="4")) # pyright: ignore[reportArgumentType]
)
def chart_error_example__invalid_type():
# Error: Invalid value for type
return alt.Chart().encode(alt.X(type="unknown")) # pyright: ignore[reportArgumentType]
def chart_error_example__additional_datum_argument():
# Error: wrong_argument is not a valid argument to datum
return alt.Chart().mark_point().encode(x=alt.datum(1, wrong_argument=1))
def chart_error_example__additional_value_argument():
# Error: `ColorValue` has no parameter named 'predicate'
return alt.Chart().mark_point().encode(color=alt.value("red", predicate=True))
def chart_error_example__invalid_value_type():
# Error: Value cannot be an integer in this case
return (
alt.Chart(data.cars())
.mark_point()
.encode(
x="Acceleration:Q",
y="Horsepower:Q",
color=alt.value(1), # should be eg. alt.value('red')
)
)
def chart_error_example__wrong_tooltip_type_in_faceted_chart():
# Error: Wrong data type to pass to tooltip
return (
alt.Chart(pd.DataFrame({"a": [1]}))
.mark_point()
.encode(tooltip=[{"wrong"}]) # pyright: ignore[reportArgumentType]
.facet()
)
def chart_error_example__wrong_tooltip_type_in_layered_chart():
# Error: Wrong data type to pass to tooltip
return alt.layer(alt.Chart().mark_point().encode(tooltip=[{"wrong"}])) # pyright: ignore[reportArgumentType]
def chart_error_example__two_errors_in_layered_chart():
# Error 1: Wrong data type to pass to tooltip
# Error 2: `Color` has no parameter named 'invalidArgument'
return alt.layer(
alt.Chart().mark_point().encode(tooltip=[{"wrong"}]), # pyright: ignore[reportArgumentType]
alt.Chart().mark_line().encode(alt.Color(invalidArgument="unknown")),
)
def chart_error_example__two_errors_in_complex_concat_layered_chart():
# Error 1: Wrong data type to pass to tooltip
# Error 2: Invalid value for bandPosition
return (
chart_error_example__wrong_tooltip_type_in_layered_chart()
| chart_error_example__invalid_bandposition_value()
)
def chart_error_example__three_errors_in_complex_concat_layered_chart():
# Error 1: Wrong data type to pass to tooltip
# Error 2: `Color` has no parameter named 'invalidArgument'
# Error 3: Invalid value for bandPosition
return (
chart_error_example__two_errors_in_layered_chart()
| chart_error_example__invalid_bandposition_value()
)
def chart_error_example__two_errors_with_one_in_nested_layered_chart():
# Error 1: invalidOption is not a valid option for Scale
# Error 2: `Color` has no parameter named 'invalidArgument'
# In the final chart object, the `layer` attribute will look like this:
# [alt.Chart(...), alt.Chart(...), alt.LayerChart(...)]
# We can therefore use this example to test if an error is also
# spotted in a layered chart within another layered chart
source = pd.DataFrame(
[
{"Day": 1, "Value": 103.3},
{"Day": 2, "Value": 394.8},
{"Day": 3, "Value": 199.5},
]
)
blue_bars = (
alt.Chart(source)
.encode(alt.X("Day:O").scale(invalidOption=10), alt.Y("Value:Q")) # pyright: ignore[reportCallIssue]
.mark_bar()
)
red_bars = (
alt.Chart(source)
.transform_filter(alt.datum.Value >= 300)
.transform_calculate(as_="baseline", calculate="300")
.encode(
alt.X("Day:O"),
alt.Y("baseline:Q"),
alt.Y2("Value:Q"),
color=alt.value("#e45755"),
)
.mark_bar()
)
bars = blue_bars + red_bars
base = alt.Chart().encode(y=alt.datum(300))
rule = base.mark_rule().encode(alt.Color(invalidArgument="unknown"))
text = base.mark_text(text="hazardous")
rule_text = rule + text
chart = bars + rule_text
return chart
def chart_error_example__four_errors_hide_fourth():
# Error 1: unknown is not a valid encoding channel option
# Error 2: Invalid Y option value "asdf".
# Error 3: another_unknown is not a valid encoding channel option
# Error 4: fourth_error is not a valid encoding channel option <- this error
# should not show up in the final error message as it is capped at showing
# 3 errors
return (
alt.Chart(data.barley())
.mark_bar()
.encode(
x=alt.X("variety", unknown=2),
y=alt.Y("sum(yield)", stack="asdf"), # pyright: ignore[reportArgumentType]
color=alt.Color("variety", another_unknown=2),
opacity=alt.Opacity("variety", fourth_error=1),
)
)
def id_func_chart_error_example(val) -> str:
"""
Ensures the generated test-id name uses only the unique portion of `chart_func`.
Otherwise the name is like below, but ``...`` represents the full error message::
"test_chart_validation_errors[chart_error_example__two_errors_with_one_in_nested_layered_chart-...]"
"""
if isinstance(val, types.FunctionType):
return val.__name__.replace("chart_error_example__", "")
else:
return ""
# NOTE: Avoids all cases appearing in a failure traceback
# At the time of writing, this is over 300 lines
chart_funcs_error_message: list[tuple[Callable[..., Any], str]] = [
(
chart_error_example__invalid_y_option_value_unknown_x_option,
rf"""Multiple errors were found.
Error 1: `X` has no parameter named 'unknown'
Existing parameter names are:
shorthand bin scale timeUnit
aggregate field sort title
axis impute stack type
bandPosition
See the help for `X` to read the full description of these parameters
Error 2: 'asdf' is an invalid value for `stack`. Valid values are:
- One of \['zero', 'center', 'normalize'\]
- Of type {re.escape("`bool | None`")}$""",
),
(
chart_error_example__wrong_tooltip_type_in_faceted_chart,
rf"""'\['wrong'\]' is an invalid value for `field`. Valid values are of type {re.escape("`str | Mapping[str, Any]`")}.$""",
),
(
chart_error_example__wrong_tooltip_type_in_layered_chart,
rf"""'\['wrong'\]' is an invalid value for `field`. Valid values are of type {re.escape("`str | Mapping[str, Any]`")}.$""",
),
(
chart_error_example__two_errors_in_layered_chart,
rf"""Multiple errors were found.
Error 1: '\['wrong'\]' is an invalid value for `field`. Valid values are of type {re.escape("`str | Mapping[str, Any]`")}.
Error 2: `Color` has no parameter named 'invalidArgument'
Existing parameter names are:
shorthand bin legend timeUnit
aggregate condition scale title
bandPosition field sort type
See the help for `Color` to read the full description of these parameters$""",
),
(
chart_error_example__two_errors_in_complex_concat_layered_chart,
rf"""Multiple errors were found.
Error 1: '\['wrong'\]' is an invalid value for `field`. Valid values are of type {re.escape("`str | Mapping[str, Any]`")}.
Error 2: '4' is an invalid value for `bandPosition`. Valid values are of type `float`.$""",
),
(
chart_error_example__three_errors_in_complex_concat_layered_chart,
rf"""Multiple errors were found.
Error 1: '\['wrong'\]' is an invalid value for `field`. Valid values are of type {re.escape("`str | Mapping[str, Any]`")}.
Error 2: `Color` has no parameter named 'invalidArgument'
Existing parameter names are:
shorthand bin legend timeUnit
aggregate condition scale title
bandPosition field sort type
See the help for `Color` to read the full description of these parameters
Error 3: '4' is an invalid value for `bandPosition`. Valid values are of type `float`.$""",
),
(
chart_error_example__two_errors_with_one_in_nested_layered_chart,
r"""Multiple errors were found.
Error 1: `Scale` has no parameter named 'invalidOption'
Existing parameter names are:
align domain exponent paddingOuter round
base domainMax interpolate range scheme
bins domainMid nice rangeMax type
clamp domainMin padding rangeMin zero
constant domainRaw paddingInner reverse
See the help for `Scale` to read the full description of these parameters
Error 2: `Color` has no parameter named 'invalidArgument'
Existing parameter names are:
shorthand bin legend timeUnit
aggregate condition scale title
bandPosition field sort type
See the help for `Color` to read the full description of these parameters$""",
),
(
chart_error_example__layer,
r"""`VConcatChart` has no parameter named 'width'
Existing parameter names are:
vconcat center description params title
autosize config name resolve transform
background data padding spacing usermeta
bounds datasets
See the help for `VConcatChart` to read the full description of these parameters$""",
),
(
chart_error_example__invalid_y_option_value,
rf"""'asdf' is an invalid value for `stack`. Valid values are:
- One of \['zero', 'center', 'normalize'\]
- Of type {re.escape("`bool | None`")}$""",
),
(
chart_error_example__invalid_y_option_value_with_condition,
rf"""'asdf' is an invalid value for `stack`. Valid values are:
- One of \['zero', 'center', 'normalize'\]
- Of type {re.escape("`bool | None`")}$""",
),
(
chart_error_example__hconcat,
rf"""'{{'text': 'Horsepower', 'align': 'right'}}' is an invalid value for `title`. Valid values are of type {re.escape("`str | Sequence | None`")}.$""",
),
(
chart_error_example__invalid_timeunit_value,
rf"""'invalid_value' is an invalid value for `timeUnit`. Valid values are:
- One of \['year', 'quarter', 'month', 'week', 'day', 'dayofyear', 'date', 'hours', 'minutes', 'seconds', 'milliseconds'\]
- One of \['utcyear', 'utcquarter', 'utcmonth', 'utcweek', 'utcday', 'utcdayofyear', 'utcdate', 'utchours', 'utcminutes', 'utcseconds', 'utcmilliseconds'\]
- One of \['yearquarter', 'yearquartermonth', 'yearmonth', 'yearmonthdate', 'yearmonthdatehours', 'yearmonthdatehoursminutes', 'yearmonthdatehoursminutesseconds', 'yearweek', 'yearweekday', 'yearweekdayhours', 'yearweekdayhoursminutes', 'yearweekdayhoursminutesseconds', 'yeardayofyear', 'quartermonth', 'monthdate', 'monthdatehours', 'monthdatehoursminutes', 'monthdatehoursminutesseconds', 'weekday', 'weekdayhours', 'weekdayhoursminutes', 'weekdayhoursminutesseconds', 'dayhours', 'dayhoursminutes', 'dayhoursminutesseconds', 'hoursminutes', 'hoursminutesseconds', 'minutesseconds', 'secondsmilliseconds'\]
- One of \['utcyearquarter', 'utcyearquartermonth', 'utcyearmonth', 'utcyearmonthdate', 'utcyearmonthdatehours', 'utcyearmonthdatehoursminutes', 'utcyearmonthdatehoursminutesseconds', 'utcyearweek', 'utcyearweekday', 'utcyearweekdayhours', 'utcyearweekdayhoursminutes', 'utcyearweekdayhoursminutesseconds', 'utcyeardayofyear', 'utcquartermonth', 'utcmonthdate', 'utcmonthdatehours', 'utcmonthdatehoursminutes', 'utcmonthdatehoursminutesseconds', 'utcweekday', 'utcweekdayhours', 'utcweekdayhoursminutes', 'utcweekdayhoursminutesseconds', 'utcdayhours', 'utcdayhoursminutes', 'utcdayhoursminutesseconds', 'utchoursminutes', 'utchoursminutesseconds', 'utcminutesseconds', 'utcsecondsmilliseconds'\]
- One of \['binnedyear', 'binnedyearquarter', 'binnedyearquartermonth', 'binnedyearmonth', 'binnedyearmonthdate', 'binnedyearmonthdatehours', 'binnedyearmonthdatehoursminutes', 'binnedyearmonthdatehoursminutesseconds', 'binnedyearweek', 'binnedyearweekday', 'binnedyearweekdayhours', 'binnedyearweekdayhoursminutes', 'binnedyearweekdayhoursminutesseconds', 'binnedyeardayofyear', 'binnedutcyear', 'binnedutcyearquarter', 'binnedutcyearquartermonth', 'binnedutcyearmonth', 'binnedutcyearmonthdate', 'binnedutcyearmonthdatehours', 'binnedutcyearmonthdatehoursminutes', 'binnedutcyearmonthdatehoursminutesseconds', 'binnedutcyearweek', 'binnedutcyearweekday', 'binnedutcyearweekdayhours', 'binnedutcyearweekdayhoursminutes', 'binnedutcyearweekdayhoursminutesseconds', 'binnedutcyeardayofyear'\]
- Of type {re.escape("`Mapping[str, Any]`")}$""",
),
(
chart_error_example__invalid_sort_value,
rf"""'invalid_value' is an invalid value for `sort`. Valid values are:
- One of \['ascending', 'descending'\]
- One of \['x', 'y', 'color', 'fill', 'stroke', 'strokeWidth', 'size', 'shape', 'fillOpacity', 'strokeOpacity', 'opacity', 'text'\]
- One of \['-x', '-y', '-color', '-fill', '-stroke', '-strokeWidth', '-size', '-shape', '-fillOpacity', '-strokeOpacity', '-opacity', '-text'\]
- Of type {re.escape("`Sequence | Mapping[str, Any] | None`")}$""",
),
(
chart_error_example__invalid_bandposition_value,
r"""'4' is an invalid value for `bandPosition`. Valid values are of type `float`.$""",
),
(
chart_error_example__invalid_type,
r"""'unknown' is an invalid value for `type`. Valid values are one of \['quantitative', 'ordinal', 'temporal', 'nominal', 'geojson'\].$""",
),
(
chart_error_example__additional_datum_argument,
r"""`XDatum` has no parameter named 'wrong_argument'
Existing parameter names are:
datum impute title
axis scale type
bandPosition stack
See the help for `XDatum` to read the full description of these parameters$""",
),
(
chart_error_example__additional_value_argument,
r"""`ColorValue` has no parameter named 'predicate'
Existing parameter names are:
value condition
See the help for `ColorValue` to read the full description of these parameters$""",
),
(
chart_error_example__invalid_value_type,
rf"""'1' is an invalid value for `value`. Valid values are of type {re.escape("`str | Mapping[str, Any] | None`")}.$""",
),
(
chart_error_example__four_errors_hide_fourth,
r"""Multiple errors were found.
Error 1: `Color` has no parameter named 'another_unknown'
Existing parameter names are:
shorthand bin legend timeUnit
aggregate condition scale title
bandPosition field sort type
See the help for `Color` to read the full description of these parameters
Error 2: `Opacity` has no parameter named 'fourth_error'
Existing parameter names are:
shorthand bin legend timeUnit
aggregate condition scale title
bandPosition field sort type
See the help for `Opacity` to read the full description of these parameters
Error 3: `X` has no parameter named 'unknown'
Existing parameter names are:
shorthand bin scale timeUnit
aggregate field sort title
axis impute stack type
bandPosition
See the help for `X` to read the full description of these parameters$""",
),
]
@pytest.mark.parametrize(
("chart_func", "expected_error_message"),
chart_funcs_error_message,
ids=id_func_chart_error_example,
)
def test_chart_validation_errors(chart_func, expected_error_message):
# For some wrong chart specifications such as an unknown encoding channel,
# Altair already raises a warning before the chart specifications are validated.
# We can ignore these warnings as we are interested in the errors being raised
# during validation which is triggered by to_dict
with warnings.catch_warnings():
warnings.filterwarnings("ignore", category=UserWarning)
chart = chart_func()
expected_error_message = inspect.cleandoc(expected_error_message)
with pytest.raises(SchemaValidationError, match=expected_error_message):
chart.to_dict()
def test_multiple_field_strings_in_condition():
selection = alt.selection_point()
expected_error_message = "A field cannot be used for both the `if_true` and `if_false` values of a condition. One of them has to specify a `value` or `datum` definition."
with pytest.raises(ValueError, match=expected_error_message):
chart = ( # noqa: F841
alt.Chart(data.cars())
.mark_circle()
.add_params(selection)
.encode(color=alt.condition(selection, "Origin", "Origin"))
.to_dict()
)
@pytest.mark.parametrize("tp", [pd.DataFrame, pl.DataFrame])
def test_non_existent_column_name(tp: Callable[..., IntoDataFrame]) -> None:
df = tp({"a": [1, 2], "b": [4, 5]})
msg = (
'Unable to determine data type for the field "c"; verify that the field name '
"is not misspelled. If you are referencing a field from a transform, also "
"confirm that the data type is specified correctly."
)
with pytest.raises(ValueError, match=msg):
alt.Chart(df).mark_line().encode(x="a", y="c").to_json()
def test_serialize_numpy_types():
m = MySchema(
a={"date": np.datetime64("2019-01-01")},
a2={"int64": np.int64(1), "float64": np.float64(2)},
b2=np.arange(4),
)
out = m.to_json()
dct = json.loads(out)
assert dct == {
"a": {"date": "2019-01-01T00:00:00"},
"a2": {"int64": 1, "float64": 2},
"b2": [0, 1, 2, 3],
}
def test_to_dict_no_side_effects():
# Tests that shorthands are expanded in returned dictionary when calling to_dict
# but that they remain untouched in the chart object. Goal is to ensure that
# the chart object stays unchanged when to_dict is called
def validate_encoding(encoding):
assert encoding.x["shorthand"] == "a"
assert encoding.x["field"] is alt.Undefined
assert encoding.x["type"] is alt.Undefined
assert encoding.y["shorthand"] == "b:Q"
assert encoding.y["field"] is alt.Undefined
assert encoding.y["type"] is alt.Undefined
data = pd.DataFrame(
{
"a": ["A", "B", "C", "D", "E", "F", "G", "H", "I"],
"b": [28, 55, 43, 91, 81, 53, 19, 87, 52],
}
)
chart = alt.Chart(data).mark_bar().encode(x="a", y="b:Q")
validate_encoding(chart.encoding)
dct = chart.to_dict()
validate_encoding(chart.encoding)
assert "shorthand" not in dct["encoding"]["x"]
assert dct["encoding"]["x"]["field"] == "a"
assert "shorthand" not in dct["encoding"]["y"]
assert dct["encoding"]["y"]["field"] == "b"
assert dct["encoding"]["y"]["type"] == "quantitative"
def test_to_dict_expand_mark_spec():
# Test that `to_dict` correctly expands marks to a dictionary
# without impacting the original spec which remains a string
chart = alt.Chart().mark_bar()
assert chart.to_dict()["mark"] == {"type": "bar"}
assert chart.mark == "bar"
@pytest.mark.parametrize(
"expected",
[list("cdfabe"), [0, 3, 4, 5, 8]],
)
@pytest.mark.parametrize(
"tp",
[
tuple,
list,
deque,
pl.Series,
pd.Series,
pd.Index,
pd.Categorical,
pd.CategoricalIndex,
np.array,
],
)
@pytest.mark.parametrize(
"schema_param",
[
(partial(X, "x:N"), "sort"),
(partial(FieldOneOfPredicate, "name"), "oneOf"),
(Legend, "values"),
],
)
def test_to_dict_iterables(
tp: Callable[..., Iterable[Any]],
expected: Sequence[Any],
schema_param: tuple[Callable[..., SchemaBase], str],
) -> None:
"""
Confirm `SchemaBase` can convert common `(Sequence|Iterable)` types to `list`.
Parameters
----------
tp
Constructor for test `Iterable`.
expected
Values wrapped by `tp`.
schema_param
Constructor for `SchemaBase` subclass, and target parameter name.
Notes
-----
`partial` can be used to reshape the `SchemaBase` constructor.
References
----------
- https://github.com/vega/altair/issues/2808
- https://github.com/vega/altair/issues/2877
"""
tp_schema, param = schema_param
validated = tp_schema(**{param: tp(expected)}).to_dict()
actual = validated[param]
assert actual == expected
@pytest.mark.parametrize(
"tp", [range, np.arange, partial(pl.int_range, eager=True), pd.RangeIndex]
)
def test_to_dict_range(tp) -> None:
expected = [0, 1, 2, 3, 4]
x_dict = alt.X("x:O", sort=tp(0, 5)).to_dict()
actual = x_dict["sort"] # type: ignore
assert actual == expected
@pytest.fixture
def stocks() -> alt.Chart:
source = "https://cdn.jsdelivr.net/npm/vega-datasets@v3.2.1/data/sp500.csv"
return alt.Chart(source).mark_area().encode(x="date:T", y="price:Q")
def DateTime(
year: int,
month: int,
day: int,
hour: int = 0,
minute: int = 0,
second: int = 0,
milliseconds: int = 0,
*,
utc: bool | None = None,
) -> alt.DateTime:
"""Factory for positionally aligning `datetime.datetime`/ `alt.DateTime`."""
kwds: dict[str, Any] = {}
if utc is True:
kwds.update(utc=utc)
if (hour, minute, second, milliseconds) != (0, 0, 0, 0):
kwds.update(
hours=hour, minutes=minute, seconds=second, milliseconds=milliseconds
)
return alt.DateTime(year=year, month=month, date=day, **kwds)
@pytest.mark.parametrize(
("window", "expected"),
[
(
(dt.date(2005, 1, 1), dt.date(2009, 1, 1)),
(DateTime(2005, 1, 1), DateTime(2009, 1, 1)),
),
(
(dt.datetime(2005, 1, 1), dt.datetime(2009, 1, 1)),
(
# NOTE: Keep this to test truncating independently!
alt.DateTime(year=2005, month=1, date=1),
alt.DateTime(year=2009, month=1, date=1),
),
),
(
(
dt.datetime(2001, 1, 1, 9, 30, 0, 2999),
dt.datetime(2002, 1, 1, 17, 0, 0, 5000),
),
(
DateTime(2001, 1, 1, 9, 30, 0, 2),
DateTime(2002, 1, 1, 17, 0, 0, 5),
),
),
(
(
dt.datetime(2003, 5, 1, 1, 30, tzinfo=dt.timezone.utc),
dt.datetime(2003, 6, 3, 4, 3, tzinfo=dt.timezone.utc),
),
(
DateTime(2003, 5, 1, 1, 30, 0, 0, utc=True),
DateTime(2003, 6, 3, 4, 3, 0, 0, utc=True),
),
),
],
ids=["date", "datetime (no time)", "datetime (microseconds)", "datetime (UTC)"],
)
def test_to_dict_datetime(
stocks, window: tuple[dt.date, dt.date], expected: tuple[alt.DateTime, alt.DateTime]
) -> None:
"""
Includes `datetime.datetime` with an empty time component.
This confirms that conversion matches how `alt.DateTime` omits `Undefined`.
"""
expected_dicts = [e.to_dict() for e in expected]
brush = alt.selection_interval(encodings=["x"], value={"x": window})
base = stocks
upper = base.encode(alt.X("date:T").scale(domain=brush))
lower = base.add_params(brush)
chart = upper & lower
mapping = chart.to_dict()
params_value = mapping["params"][0]["value"]["x"]
assert isinstance(params_value, list)
assert params_value == expected_dicts
@pytest.mark.parametrize(
"tzinfo",
[
dt.timezone(dt.timedelta(hours=2), "UTC+2"),
dt.timezone(dt.timedelta(hours=1), "BST"),
dt.timezone(dt.timedelta(hours=-7), "pdt"),
dt.timezone(dt.timedelta(hours=-3), "BRT"),
dt.timezone(dt.timedelta(hours=9), "UTC"),
dt.timezone(dt.timedelta(minutes=60), "utc"),
],
)
def test_to_dict_datetime_unsupported_timezone(tzinfo: dt.timezone) -> None:
datetime = dt.datetime(2003, 5, 1, 1, 30)
result = alt.FieldEqualPredicate(datetime, "column 1")
assert result.to_dict()
with pytest.raises(TypeError, match=r"Unsupported timezone.+\n.+UTC.+local"):
alt.FieldEqualPredicate(datetime.replace(tzinfo=tzinfo), "column 1")
def test_to_dict_datetime_typing() -> None:
"""
Enumerating various places that need updated annotations.
All work at runtime, just need to give the type checkers the new info.
Sub-issue of https://github.com/vega/altair/issues/3650
"""
datetime = dt.datetime(2003, 5, 1, 1, 30)
datetime_seq = [datetime, datetime.replace(2005), datetime.replace(2008)]
assert alt.FieldEqualPredicate(datetime, field="column 1")
assert alt.FieldOneOfPredicate(oneOf=datetime_seq, field="column 1")
assert alt.Legend(values=datetime_seq)
assert alt.Scale(domain=datetime_seq)
assert alt.Scale(domainMin=datetime_seq[0], domainMax=datetime_seq[2])
# NOTE: `datum` is not annotated?
assert alt.XDatum(datum=datetime).to_dict()
# NOTE: `*args` is not annotated?
# - All of these uses *args incorrectly
assert alt.Vector2DateTime(datetime_seq[:2])
| Draft6Schema |
python | lazyprogrammer__machine_learning_examples | tf2.0/keras_trader.py | {
"start": 2784,
"end": 6632
} | class ____:
"""
A 3-stock trading environment.
State: vector of size 7 (n_stock * 2 + 1)
- # shares of stock 1 owned
- # shares of stock 2 owned
- # shares of stock 3 owned
- price of stock 1 (using daily close price)
- price of stock 2
- price of stock 3
- cash owned (can be used to purchase more stocks)
Action: categorical variable with 27 (3^3) possibilities
- for each stock, you can:
- 0 = sell
- 1 = hold
- 2 = buy
"""
def __init__(self, data, initial_investment=20000):
# data
self.stock_price_history = data
self.n_step, self.n_stock = self.stock_price_history.shape
# instance attributes
self.initial_investment = initial_investment
self.cur_step = None
self.stock_owned = None
self.stock_price = None
self.cash_in_hand = None
self.action_space = np.arange(3**self.n_stock)
# action permutations
# returns a nested list with elements like:
# [0,0,0]
# [0,0,1]
# [0,0,2]
# [0,1,0]
# [0,1,1]
# etc.
# 0 = sell
# 1 = hold
# 2 = buy
self.action_list = list(map(list, itertools.product([0, 1, 2], repeat=self.n_stock)))
# calculate size of state
self.state_dim = self.n_stock * 2 + 1
self.reset()
def reset(self):
self.cur_step = 0
self.stock_owned = np.zeros(self.n_stock)
self.stock_price = self.stock_price_history[self.cur_step]
self.cash_in_hand = self.initial_investment
return self._get_obs()
def step(self, action):
assert action in self.action_space
# get current value before performing the action
prev_val = self._get_val()
# perform the trade
self._trade(action)
# update price, i.e. go to the next day
self.cur_step += 1
self.stock_price = self.stock_price_history[self.cur_step]
# get the new value after taking the action
cur_val = self._get_val()
# reward is the increase in porfolio value
reward = cur_val - prev_val
# done if we have run out of data
done = self.cur_step == self.n_step - 1
# store the current value of the portfolio here
info = {'cur_val': cur_val}
# conform to the Gym API
return self._get_obs(), reward, done, info
def _get_obs(self):
obs = np.empty(self.state_dim)
obs[:self.n_stock] = self.stock_owned
obs[self.n_stock:2*self.n_stock] = self.stock_price
obs[-1] = self.cash_in_hand
return obs
def _get_val(self):
return self.stock_owned.dot(self.stock_price) + self.cash_in_hand
def _trade(self, action):
# index the action we want to perform
# 0 = sell
# 1 = hold
# 2 = buy
# e.g. [2,1,0] means:
# buy first stock
# hold second stock
# sell third stock
action_vec = self.action_list[action]
# determine which stocks to buy or sell
sell_index = [] # stores index of stocks we want to sell
buy_index = [] # stores index of stocks we want to buy
for i, a in enumerate(action_vec):
if a == 0:
sell_index.append(i)
elif a == 2:
buy_index.append(i)
# sell any stocks we want to sell
# then buy any stocks we want to buy
if sell_index:
# NOTE: to simplify the problem, when we sell, we will sell ALL shares of that stock
for i in sell_index:
self.cash_in_hand += self.stock_price[i] * self.stock_owned[i]
self.stock_owned[i] = 0
if buy_index:
# NOTE: when buying, we will loop through each stock we want to buy,
# and buy one share at a time until we run out of cash
can_buy = True
while can_buy:
for i in buy_index:
if self.cash_in_hand > self.stock_price[i]:
self.stock_owned[i] += 1 # buy one share
self.cash_in_hand -= self.stock_price[i]
else:
can_buy = False
| MultiStockEnv |
python | Pylons__pyramid | tests/test_authentication.py | {
"start": 67873,
"end": 68048
} | class ____:
def remember(self, environ, identity):
return environ, identity
def forget(self, environ, identity):
return environ, identity
| DummyWhoPlugin |
python | python-poetry__poetry | src/poetry/repositories/pypi_repository.py | {
"start": 1216,
"end": 8410
} | class ____(HTTPRepository):
def __init__(
self,
url: str = "https://pypi.org/",
*,
config: Config | None = None,
disable_cache: bool = False,
pool_size: int = requests.adapters.DEFAULT_POOLSIZE,
fallback: bool = True,
) -> None:
super().__init__(
"PyPI",
url.rstrip("/") + "/simple/",
config=config,
disable_cache=disable_cache,
pool_size=pool_size,
)
self._base_url = url
self._fallback = fallback
def search(self, query: str | list[str]) -> list[Package]:
results = []
response = requests.get(
self._base_url + "search", params={"q": query}, timeout=REQUESTS_TIMEOUT
)
parser = SearchResultParser()
parser.feed(response.text)
for result in parser.results:
try:
package = Package(result.name, result.version)
package.description = result.description.strip()
results.append(package)
except InvalidVersionError:
self._log(
f'Unable to parse version "{result.version}" for the'
f" {result.name} package, skipping",
level="debug",
)
if not results:
# in cases like PyPI search might not be available, we fallback to explicit searches
# to allow for a nicer ux rather than finding nothing at all
# see: https://discuss.python.org/t/fastly-interfering-with-pypi-search/73597/6
#
tokens = query if isinstance(query, list) else [query]
for token in tokens:
with contextlib.suppress(InvalidRequirementError):
results.extend(
self.find_packages(Dependency.create_from_pep_508(token))
)
return results
def get_package_info(self, name: NormalizedName) -> dict[str, Any]:
"""
Return the package information given its name.
The information is returned from the cache if it exists
or retrieved from the remote server.
"""
return self._get_package_info(name)
def _find_packages(
self, name: NormalizedName, constraint: VersionConstraint
) -> list[Package]:
"""
Find packages on the remote server.
"""
try:
json_page = self.get_page(name)
except PackageNotFoundError:
self._log(f"No packages found for {name}", level="debug")
return []
versions = [
(version, json_page.yanked(name, version))
for version in json_page.versions(name)
if constraint.allows(version)
]
return [Package(name, version, yanked=yanked) for version, yanked in versions]
def _get_package_info(self, name: NormalizedName) -> dict[str, Any]:
headers = {"Accept": "application/vnd.pypi.simple.v1+json"}
info = self._get(f"simple/{name}/", headers=headers)
if info is None:
raise PackageNotFoundError(f"Package [{name}] not found.")
return info
def find_links_for_package(self, package: Package) -> list[Link]:
json_data = self._get(f"pypi/{package.name}/{package.version}/json")
if json_data is None:
return []
links = []
for url in json_data["urls"]:
if url["packagetype"] in SUPPORTED_PACKAGE_TYPES:
h = f"sha256={url['digests']['sha256']}"
links.append(Link(url["url"] + "#" + h, yanked=self._get_yanked(url)))
return links
def _get_release_info(
self, name: NormalizedName, version: Version
) -> dict[str, Any]:
from poetry.inspection.info import PackageInfo
self._log(f"Getting info for {name} ({version}) from PyPI", "debug")
json_data = self._get(f"pypi/{name}/{version}/json")
if json_data is None:
raise PackageNotFoundError(f"Package [{name}] not found.")
info = json_data["info"]
data = PackageInfo(
name=info["name"],
version=info["version"],
summary=info["summary"],
requires_dist=info["requires_dist"],
requires_python=info["requires_python"],
yanked=self._get_yanked(info),
cache_version=str(self.CACHE_VERSION),
)
try:
version_info = json_data["urls"]
except KeyError:
version_info = []
files = info.get("files", [])
for file_info in version_info:
if file_info["packagetype"] in SUPPORTED_PACKAGE_TYPES:
files.append(
{
"file": file_info["filename"],
"hash": "sha256:" + file_info["digests"]["sha256"],
}
)
data.files = files
if self._fallback and data.requires_dist is None:
self._log(
"No dependencies found, downloading metadata and/or archives",
level="debug",
)
# No dependencies set (along with other information)
# This might be due to actually no dependencies
# or badly set metadata when uploading.
# So, we need to make sure there is actually no
# dependencies by introspecting packages.
page = self.get_page(name)
links = list(page.links_for_version(name, version))
info = self._get_info_from_links(links, ignore_yanked=not data.yanked)
data.requires_dist = info.requires_dist
if not data.requires_python:
data.requires_python = info.requires_python
return data.asdict()
def _get_page(self, name: NormalizedName) -> SimpleJsonPage:
source = self._base_url + f"simple/{name}/"
info = self.get_package_info(name)
return SimpleJsonPage(source, info)
def _get(
self, endpoint: str, headers: dict[str, str] | None = None
) -> dict[str, Any] | None:
try:
json_response = self.session.get(
self._base_url + endpoint,
raise_for_status=False,
timeout=REQUESTS_TIMEOUT,
headers=headers,
)
except requests.exceptions.TooManyRedirects:
# Cache control redirect loop.
# We try to remove the cache and try again
self.session.delete_cache(self._base_url + endpoint)
json_response = self.session.get(
self._base_url + endpoint,
raise_for_status=False,
timeout=REQUESTS_TIMEOUT,
headers=headers,
)
if json_response.status_code != 200:
return None
json: dict[str, Any] = json_response.json()
return json
@staticmethod
def _get_yanked(json_data: dict[str, Any]) -> str | bool:
if json_data.get("yanked", False):
return json_data.get("yanked_reason") or True
return False
| PyPiRepository |
python | pandas-dev__pandas | pandas/tests/arrays/test_datetimelike.py | {
"start": 31421,
"end": 35927
} | class ____(SharedTests):
index_cls = TimedeltaIndex
array_cls = TimedeltaArray
scalar_type = pd.Timedelta
example_dtype = "m8[ns]"
def test_from_tdi(self):
tdi = TimedeltaIndex(["1 Day", "3 Hours"])
arr = tdi._data
assert list(arr) == list(tdi)
# Check that Index.__new__ knows what to do with TimedeltaArray
tdi2 = pd.Index(arr)
assert isinstance(tdi2, TimedeltaIndex)
assert list(tdi2) == list(arr)
def test_astype_object(self):
tdi = TimedeltaIndex(["1 Day", "3 Hours"])
arr = tdi._data
asobj = arr.astype("O")
assert isinstance(asobj, np.ndarray)
assert asobj.dtype == "O"
assert list(asobj) == list(tdi)
def test_to_pytimedelta(self, timedelta_index):
tdi = timedelta_index
arr = tdi._data
expected = tdi.to_pytimedelta()
result = arr.to_pytimedelta()
tm.assert_numpy_array_equal(result, expected)
def test_total_seconds(self, timedelta_index):
tdi = timedelta_index
arr = tdi._data
expected = tdi.total_seconds()
result = arr.total_seconds()
tm.assert_numpy_array_equal(result, expected.values)
@pytest.mark.parametrize("propname", TimedeltaArray._field_ops)
def test_int_properties(self, timedelta_index, propname):
tdi = timedelta_index
arr = tdi._data
result = getattr(arr, propname)
expected = np.array(getattr(tdi, propname), dtype=result.dtype)
tm.assert_numpy_array_equal(result, expected)
def test_array_interface(self, timedelta_index):
arr = timedelta_index._data
copy_false = None if np_version_gt2 else False
# default asarray gives the same underlying data
result = np.asarray(arr)
expected = arr._ndarray
assert result is expected
tm.assert_numpy_array_equal(result, expected)
result = np.array(arr, copy=copy_false)
assert result is expected
tm.assert_numpy_array_equal(result, expected)
# specifying m8[ns] gives the same result as default
result = np.asarray(arr, dtype="timedelta64[ns]")
expected = arr._ndarray
assert result is expected
tm.assert_numpy_array_equal(result, expected)
result = np.array(arr, dtype="timedelta64[ns]", copy=copy_false)
assert result is expected
tm.assert_numpy_array_equal(result, expected)
result = np.array(arr, dtype="timedelta64[ns]")
if not np_version_gt2:
# TODO: GH 57739
assert result is not expected
tm.assert_numpy_array_equal(result, expected)
# to object dtype
result = np.asarray(arr, dtype=object)
expected = np.array(list(arr), dtype=object)
tm.assert_numpy_array_equal(result, expected)
# to other dtype always copies
result = np.asarray(arr, dtype="int64")
assert result is not arr.asi8
assert not np.may_share_memory(arr, result)
expected = arr.asi8.copy()
tm.assert_numpy_array_equal(result, expected)
# other dtypes handled by numpy
for dtype in ["float64", str]:
result = np.asarray(arr, dtype=dtype)
expected = np.asarray(arr).astype(dtype)
tm.assert_numpy_array_equal(result, expected)
def test_take_fill_valid(self, timedelta_index, fixed_now_ts):
tdi = timedelta_index
arr = tdi._data
td1 = pd.Timedelta(days=1)
result = arr.take([-1, 1], allow_fill=True, fill_value=td1)
assert result[0] == td1
value = fixed_now_ts
msg = f"value should be a '{arr._scalar_type.__name__}' or 'NaT'. Got"
with pytest.raises(TypeError, match=msg):
# fill_value Timestamp invalid
arr.take([0, 1], allow_fill=True, fill_value=value)
value = fixed_now_ts.to_period("D")
with pytest.raises(TypeError, match=msg):
# fill_value Period invalid
arr.take([0, 1], allow_fill=True, fill_value=value)
value = np.datetime64("NaT", "ns")
with pytest.raises(TypeError, match=msg):
# require appropriate-dtype if we have a NA value
arr.take([-1, 1], allow_fill=True, fill_value=value)
@pytest.mark.filterwarnings(r"ignore:Period with BDay freq is deprecated:FutureWarning")
@pytest.mark.filterwarnings(r"ignore:PeriodDtype\[B\] is deprecated:FutureWarning")
| TestTimedeltaArray |
python | huggingface__transformers | src/transformers/models/convbert/modeling_convbert.py | {
"start": 1460,
"end": 4255
} | class ____(nn.Module):
"""Construct the embeddings from word, position and token_type embeddings."""
def __init__(self, config):
super().__init__()
self.word_embeddings = nn.Embedding(config.vocab_size, config.embedding_size, padding_idx=config.pad_token_id)
self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.embedding_size)
self.token_type_embeddings = nn.Embedding(config.type_vocab_size, config.embedding_size)
self.LayerNorm = nn.LayerNorm(config.embedding_size, eps=config.layer_norm_eps)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
# position_ids (1, len position emb) is contiguous in memory and exported when serialized
self.register_buffer(
"position_ids", torch.arange(config.max_position_embeddings).expand((1, -1)), persistent=False
)
self.register_buffer(
"token_type_ids", torch.zeros(self.position_ids.size(), dtype=torch.long), persistent=False
)
def forward(
self,
input_ids: Optional[torch.LongTensor] = None,
token_type_ids: Optional[torch.LongTensor] = None,
position_ids: Optional[torch.LongTensor] = None,
inputs_embeds: Optional[torch.FloatTensor] = None,
) -> torch.LongTensor:
if input_ids is not None:
input_shape = input_ids.size()
else:
input_shape = inputs_embeds.size()[:-1]
seq_length = input_shape[1]
if position_ids is None:
position_ids = self.position_ids[:, :seq_length]
# Setting the token_type_ids to the registered buffer in constructor where it is all zeros, which usually occurs
# when its auto-generated, registered buffer helps users when tracing the model without passing token_type_ids, solves
# issue #5664
if token_type_ids is None:
if hasattr(self, "token_type_ids"):
buffered_token_type_ids = self.token_type_ids[:, :seq_length]
buffered_token_type_ids_expanded = buffered_token_type_ids.expand(input_shape[0], seq_length)
token_type_ids = buffered_token_type_ids_expanded
else:
token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=self.position_ids.device)
if inputs_embeds is None:
inputs_embeds = self.word_embeddings(input_ids)
position_embeddings = self.position_embeddings(position_ids)
token_type_embeddings = self.token_type_embeddings(token_type_ids)
embeddings = inputs_embeds + position_embeddings + token_type_embeddings
embeddings = self.LayerNorm(embeddings)
embeddings = self.dropout(embeddings)
return embeddings
@auto_docstring
| ConvBertEmbeddings |
python | sympy__sympy | sympy/physics/quantum/density.py | {
"start": 679,
"end": 9546
} | class ____(HermitianOperator):
"""Density operator for representing mixed states.
TODO: Density operator support for Qubits
Parameters
==========
values : tuples/lists
Each tuple/list should be of form (state, prob) or [state,prob]
Examples
========
Create a density operator with 2 states represented by Kets.
>>> from sympy.physics.quantum.state import Ket
>>> from sympy.physics.quantum.density import Density
>>> d = Density([Ket(0), 0.5], [Ket(1),0.5])
>>> d
Density((|0>, 0.5),(|1>, 0.5))
"""
@classmethod
def _eval_args(cls, args):
# call this to qsympify the args
args = super()._eval_args(args)
for arg in args:
# Check if arg is a tuple
if not (isinstance(arg, Tuple) and len(arg) == 2):
raise ValueError("Each argument should be of form [state,prob]"
" or ( state, prob )")
return args
def states(self):
"""Return list of all states.
Examples
========
>>> from sympy.physics.quantum.state import Ket
>>> from sympy.physics.quantum.density import Density
>>> d = Density([Ket(0), 0.5], [Ket(1),0.5])
>>> d.states()
(|0>, |1>)
"""
return Tuple(*[arg[0] for arg in self.args])
def probs(self):
"""Return list of all probabilities.
Examples
========
>>> from sympy.physics.quantum.state import Ket
>>> from sympy.physics.quantum.density import Density
>>> d = Density([Ket(0), 0.5], [Ket(1),0.5])
>>> d.probs()
(0.5, 0.5)
"""
return Tuple(*[arg[1] for arg in self.args])
def get_state(self, index):
"""Return specific state by index.
Parameters
==========
index : index of state to be returned
Examples
========
>>> from sympy.physics.quantum.state import Ket
>>> from sympy.physics.quantum.density import Density
>>> d = Density([Ket(0), 0.5], [Ket(1),0.5])
>>> d.states()[1]
|1>
"""
state = self.args[index][0]
return state
def get_prob(self, index):
"""Return probability of specific state by index.
Parameters
===========
index : index of states whose probability is returned.
Examples
========
>>> from sympy.physics.quantum.state import Ket
>>> from sympy.physics.quantum.density import Density
>>> d = Density([Ket(0), 0.5], [Ket(1),0.5])
>>> d.probs()[1]
0.500000000000000
"""
prob = self.args[index][1]
return prob
def apply_op(self, op):
"""op will operate on each individual state.
Parameters
==========
op : Operator
Examples
========
>>> from sympy.physics.quantum.state import Ket
>>> from sympy.physics.quantum.density import Density
>>> from sympy.physics.quantum.operator import Operator
>>> A = Operator('A')
>>> d = Density([Ket(0), 0.5], [Ket(1),0.5])
>>> d.apply_op(A)
Density((A*|0>, 0.5),(A*|1>, 0.5))
"""
new_args = [(op*state, prob) for (state, prob) in self.args]
return Density(*new_args)
def doit(self, **hints):
"""Expand the density operator into an outer product format.
Examples
========
>>> from sympy.physics.quantum.state import Ket
>>> from sympy.physics.quantum.density import Density
>>> from sympy.physics.quantum.operator import Operator
>>> A = Operator('A')
>>> d = Density([Ket(0), 0.5], [Ket(1),0.5])
>>> d.doit()
0.5*|0><0| + 0.5*|1><1|
"""
terms = []
for (state, prob) in self.args:
state = state.expand() # needed to break up (a+b)*c
if (isinstance(state, Add)):
for arg in product(state.args, repeat=2):
terms.append(prob*self._generate_outer_prod(arg[0],
arg[1]))
else:
terms.append(prob*self._generate_outer_prod(state, state))
return Add(*terms)
def _generate_outer_prod(self, arg1, arg2):
c_part1, nc_part1 = arg1.args_cnc()
c_part2, nc_part2 = arg2.args_cnc()
if (len(nc_part1) == 0 or len(nc_part2) == 0):
raise ValueError('Atleast one-pair of'
' Non-commutative instance required'
' for outer product.')
# We were able to remove some tensor product simplifications that
# used to be here as those transformations are not automatically
# applied by transforms.py.
op = Mul(*nc_part1)*Dagger(Mul(*nc_part2))
return Mul(*c_part1)*Mul(*c_part2) * op
def _represent(self, **options):
return represent(self.doit(), **options)
def _print_operator_name_latex(self, printer, *args):
return r'\rho'
def _print_operator_name_pretty(self, printer, *args):
return prettyForm('\N{GREEK SMALL LETTER RHO}')
def _eval_trace(self, **kwargs):
indices = kwargs.get('indices', [])
return Tr(self.doit(), indices).doit()
def entropy(self):
""" Compute the entropy of a density matrix.
Refer to density.entropy() method for examples.
"""
return entropy(self)
def entropy(density):
"""Compute the entropy of a matrix/density object.
This computes -Tr(density*ln(density)) using the eigenvalue decomposition
of density, which is given as either a Density instance or a matrix
(numpy.ndarray, sympy.Matrix or scipy.sparse).
Parameters
==========
density : density matrix of type Density, SymPy matrix,
scipy.sparse or numpy.ndarray
Examples
========
>>> from sympy.physics.quantum.density import Density, entropy
>>> from sympy.physics.quantum.spin import JzKet
>>> from sympy import S
>>> up = JzKet(S(1)/2,S(1)/2)
>>> down = JzKet(S(1)/2,-S(1)/2)
>>> d = Density((up,S(1)/2),(down,S(1)/2))
>>> entropy(d)
log(2)/2
"""
if isinstance(density, Density):
density = represent(density) # represent in Matrix
if isinstance(density, scipy_sparse_matrix):
density = to_numpy(density)
if isinstance(density, Matrix):
eigvals = density.eigenvals().keys()
return expand(-sum(e*log(e) for e in eigvals))
elif isinstance(density, numpy_ndarray):
import numpy as np
eigvals = np.linalg.eigvals(density)
return -np.sum(eigvals*np.log(eigvals))
else:
raise ValueError(
"numpy.ndarray, scipy.sparse or SymPy matrix expected")
def fidelity(state1, state2):
""" Computes the fidelity [1]_ between two quantum states
The arguments provided to this function should be a square matrix or a
Density object. If it is a square matrix, it is assumed to be diagonalizable.
Parameters
==========
state1, state2 : a density matrix or Matrix
Examples
========
>>> from sympy import S, sqrt
>>> from sympy.physics.quantum.dagger import Dagger
>>> from sympy.physics.quantum.spin import JzKet
>>> from sympy.physics.quantum.density import fidelity
>>> from sympy.physics.quantum.represent import represent
>>>
>>> up = JzKet(S(1)/2,S(1)/2)
>>> down = JzKet(S(1)/2,-S(1)/2)
>>> amp = 1/sqrt(2)
>>> updown = (amp*up) + (amp*down)
>>>
>>> # represent turns Kets into matrices
>>> up_dm = represent(up*Dagger(up))
>>> down_dm = represent(down*Dagger(down))
>>> updown_dm = represent(updown*Dagger(updown))
>>>
>>> fidelity(up_dm, up_dm)
1
>>> fidelity(up_dm, down_dm) #orthogonal states
0
>>> fidelity(up_dm, updown_dm).evalf().round(3)
0.707
References
==========
.. [1] https://en.wikipedia.org/wiki/Fidelity_of_quantum_states
"""
state1 = represent(state1) if isinstance(state1, Density) else state1
state2 = represent(state2) if isinstance(state2, Density) else state2
if not isinstance(state1, Matrix) or not isinstance(state2, Matrix):
raise ValueError("state1 and state2 must be of type Density or Matrix "
"received type=%s for state1 and type=%s for state2" %
(type(state1), type(state2)))
if state1.shape != state2.shape and state1.is_square:
raise ValueError("The dimensions of both args should be equal and the "
"matrix obtained should be a square matrix")
sqrt_state1 = state1**S.Half
return Tr((sqrt_state1*state2*sqrt_state1)**S.Half).doit()
| Density |
python | ansible__ansible | hacking/create-bulk-issues.py | {
"start": 1614,
"end": 3257
} | class ____:
title: str
summary: str
component: str
labels: list[str] | None = None
assignee: str | None = None
@staticmethod
def from_dict(data: dict[str, t.Any]) -> Feature:
title = data.get('title')
summary = data.get('summary')
component = data.get('component')
labels = data.get('labels')
assignee = data.get('assignee')
if not isinstance(title, str):
raise RuntimeError(f'`title` is not `str`: {title}')
if not isinstance(summary, str):
raise RuntimeError(f'`summary` is not `str`: {summary}')
if not isinstance(component, str):
raise RuntimeError(f'`component` is not `str`: {component}')
if not isinstance(assignee, (str, type(None))):
raise RuntimeError(f'`assignee` is not `str`: {assignee}')
if not isinstance(labels, list) or not all(isinstance(item, str) for item in labels):
raise RuntimeError(f'`labels` is not `list[str]`: {labels}')
return Feature(
title=title,
summary=summary,
component=component,
labels=labels,
assignee=assignee,
)
def create_issue(self, project: str) -> Issue:
body = f'''
### Summary
{self.summary}
### Issue Type
Feature Idea
### Component Name
`{self.component}`
'''
return Issue(
title=self.title,
summary=self.summary,
body=body.strip(),
project=project,
labels=self.labels,
assignee=self.assignee,
)
@dataclasses.dataclass(frozen=True)
| Feature |
python | python-pillow__Pillow | src/PIL/ImageWin.py | {
"start": 6685,
"end": 7590
} | class ____:
"""Create a Window with the given title size."""
def __init__(
self, title: str = "PIL", width: int | None = None, height: int | None = None
) -> None:
self.hwnd = Image.core.createwindow(
title, self.__dispatcher, width or 0, height or 0
)
def __dispatcher(self, action: str, *args: int) -> None:
getattr(self, f"ui_handle_{action}")(*args)
def ui_handle_clear(self, dc: int, x0: int, y0: int, x1: int, y1: int) -> None:
pass
def ui_handle_damage(self, x0: int, y0: int, x1: int, y1: int) -> None:
pass
def ui_handle_destroy(self) -> None:
pass
def ui_handle_repair(self, dc: int, x0: int, y0: int, x1: int, y1: int) -> None:
pass
def ui_handle_resize(self, width: int, height: int) -> None:
pass
def mainloop(self) -> None:
Image.core.eventloop()
| Window |
python | pandas-dev__pandas | asv_bench/benchmarks/stat_ops.py | {
"start": 3288,
"end": 4380
} | class ____:
params = [["spearman", "kendall", "pearson"]]
param_names = ["method"]
def setup(self, method):
self.df = pd.DataFrame(np.random.randn(500, 15))
self.df2 = pd.DataFrame(np.random.randn(500, 15))
self.df_wide = pd.DataFrame(np.random.randn(500, 100))
self.df_wide_nans = self.df_wide.where(np.random.random((500, 100)) < 0.9)
self.s = pd.Series(np.random.randn(500))
self.s2 = pd.Series(np.random.randn(500))
def time_corr(self, method):
self.df.corr(method=method)
def time_corr_wide(self, method):
self.df_wide.corr(method=method)
def time_corr_wide_nans(self, method):
self.df_wide_nans.corr(method=method)
def peakmem_corr_wide(self, method):
self.df_wide.corr(method=method)
def time_corr_series(self, method):
self.s.corr(self.s2, method=method)
def time_corrwith_cols(self, method):
self.df.corrwith(self.df2, method=method)
def time_corrwith_rows(self, method):
self.df.corrwith(self.df2, axis=1, method=method)
| Correlation |
python | langchain-ai__langchain | libs/partners/openai/langchain_openai/middleware/openai_moderation.py | {
"start": 1434,
"end": 15399
} | class ____(AgentMiddleware[AgentState[Any], Any]):
"""Moderate agent traffic using OpenAI's moderation endpoint."""
def __init__(
self,
*,
model: ModerationModel = "omni-moderation-latest",
check_input: bool = True,
check_output: bool = True,
check_tool_results: bool = False,
exit_behavior: Literal["error", "end", "replace"] = "end",
violation_message: str | None = None,
client: OpenAI | None = None,
async_client: AsyncOpenAI | None = None,
) -> None:
"""Create the middleware instance.
Args:
model: OpenAI moderation model to use.
check_input: Whether to check user input messages.
check_output: Whether to check model output messages.
check_tool_results: Whether to check tool result messages.
exit_behavior: How to handle violations
(`'error'`, `'end'`, or `'replace'`).
violation_message: Custom template for violation messages.
client: Optional pre-configured OpenAI client to reuse.
If not provided, a new client will be created.
async_client: Optional pre-configured AsyncOpenAI client to reuse.
If not provided, a new async client will be created.
"""
super().__init__()
self.model = model
self.check_input = check_input
self.check_output = check_output
self.check_tool_results = check_tool_results
self.exit_behavior = exit_behavior
self.violation_message = violation_message
self._client = client
self._async_client = async_client
@hook_config(can_jump_to=["end"])
def before_model(
self, state: AgentState[Any], runtime: Runtime[Any]
) -> dict[str, Any] | None: # type: ignore[override]
"""Moderate user input and tool results before the model is called.
Args:
state: Current agent state containing messages.
runtime: Agent runtime context.
Returns:
Updated state with moderated messages, or `None` if no changes.
"""
if not self.check_input and not self.check_tool_results:
return None
messages = list(state.get("messages", []))
if not messages:
return None
return self._moderate_inputs(messages)
@hook_config(can_jump_to=["end"])
def after_model(
self, state: AgentState[Any], runtime: Runtime[Any]
) -> dict[str, Any] | None: # type: ignore[override]
"""Moderate model output after the model is called.
Args:
state: Current agent state containing messages.
runtime: Agent runtime context.
Returns:
Updated state with moderated messages, or `None` if no changes.
"""
if not self.check_output:
return None
messages = list(state.get("messages", []))
if not messages:
return None
return self._moderate_output(messages)
@hook_config(can_jump_to=["end"])
async def abefore_model(
self, state: AgentState[Any], runtime: Runtime[Any]
) -> dict[str, Any] | None: # type: ignore[override]
"""Async version of before_model.
Args:
state: Current agent state containing messages.
runtime: Agent runtime context.
Returns:
Updated state with moderated messages, or `None` if no changes.
"""
if not self.check_input and not self.check_tool_results:
return None
messages = list(state.get("messages", []))
if not messages:
return None
return await self._amoderate_inputs(messages)
@hook_config(can_jump_to=["end"])
async def aafter_model(
self, state: AgentState[Any], runtime: Runtime[Any]
) -> dict[str, Any] | None: # type: ignore[override]
"""Async version of after_model.
Args:
state: Current agent state containing messages.
runtime: Agent runtime context.
Returns:
Updated state with moderated messages, or `None` if no changes.
"""
if not self.check_output:
return None
messages = list(state.get("messages", []))
if not messages:
return None
return await self._amoderate_output(messages)
def _moderate_inputs(
self, messages: Sequence[BaseMessage]
) -> dict[str, Any] | None:
working = list(messages)
modified = False
if self.check_tool_results:
action = self._moderate_tool_messages(working)
if action:
if "jump_to" in action:
return action
working = cast("list[BaseMessage]", action["messages"])
modified = True
if self.check_input:
action = self._moderate_user_message(working)
if action:
if "jump_to" in action:
return action
working = cast("list[BaseMessage]", action["messages"])
modified = True
if modified:
return {"messages": working}
return None
async def _amoderate_inputs(
self, messages: Sequence[BaseMessage]
) -> dict[str, Any] | None:
working = list(messages)
modified = False
if self.check_tool_results:
action = await self._amoderate_tool_messages(working)
if action:
if "jump_to" in action:
return action
working = cast("list[BaseMessage]", action["messages"])
modified = True
if self.check_input:
action = await self._amoderate_user_message(working)
if action:
if "jump_to" in action:
return action
working = cast("list[BaseMessage]", action["messages"])
modified = True
if modified:
return {"messages": working}
return None
def _moderate_output(
self, messages: Sequence[BaseMessage]
) -> dict[str, Any] | None:
last_ai_idx = self._find_last_index(messages, AIMessage)
if last_ai_idx is None:
return None
ai_message = messages[last_ai_idx]
text = self._extract_text(ai_message)
if not text:
return None
result = self._moderate(text)
if not result.flagged:
return None
return self._apply_violation(
messages, index=last_ai_idx, stage="output", content=text, result=result
)
async def _amoderate_output(
self, messages: Sequence[BaseMessage]
) -> dict[str, Any] | None:
last_ai_idx = self._find_last_index(messages, AIMessage)
if last_ai_idx is None:
return None
ai_message = messages[last_ai_idx]
text = self._extract_text(ai_message)
if not text:
return None
result = await self._amoderate(text)
if not result.flagged:
return None
return self._apply_violation(
messages, index=last_ai_idx, stage="output", content=text, result=result
)
def _moderate_tool_messages(
self, messages: Sequence[BaseMessage]
) -> dict[str, Any] | None:
last_ai_idx = self._find_last_index(messages, AIMessage)
if last_ai_idx is None:
return None
working = list(messages)
modified = False
for idx in range(last_ai_idx + 1, len(working)):
msg = working[idx]
if not isinstance(msg, ToolMessage):
continue
text = self._extract_text(msg)
if not text:
continue
result = self._moderate(text)
if not result.flagged:
continue
action = self._apply_violation(
working, index=idx, stage="tool", content=text, result=result
)
if action:
if "jump_to" in action:
return action
working = cast("list[BaseMessage]", action["messages"])
modified = True
if modified:
return {"messages": working}
return None
async def _amoderate_tool_messages(
self, messages: Sequence[BaseMessage]
) -> dict[str, Any] | None:
last_ai_idx = self._find_last_index(messages, AIMessage)
if last_ai_idx is None:
return None
working = list(messages)
modified = False
for idx in range(last_ai_idx + 1, len(working)):
msg = working[idx]
if not isinstance(msg, ToolMessage):
continue
text = self._extract_text(msg)
if not text:
continue
result = await self._amoderate(text)
if not result.flagged:
continue
action = self._apply_violation(
working, index=idx, stage="tool", content=text, result=result
)
if action:
if "jump_to" in action:
return action
working = cast("list[BaseMessage]", action["messages"])
modified = True
if modified:
return {"messages": working}
return None
def _moderate_user_message(
self, messages: Sequence[BaseMessage]
) -> dict[str, Any] | None:
idx = self._find_last_index(messages, HumanMessage)
if idx is None:
return None
message = messages[idx]
text = self._extract_text(message)
if not text:
return None
result = self._moderate(text)
if not result.flagged:
return None
return self._apply_violation(
messages, index=idx, stage="input", content=text, result=result
)
async def _amoderate_user_message(
self, messages: Sequence[BaseMessage]
) -> dict[str, Any] | None:
idx = self._find_last_index(messages, HumanMessage)
if idx is None:
return None
message = messages[idx]
text = self._extract_text(message)
if not text:
return None
result = await self._amoderate(text)
if not result.flagged:
return None
return self._apply_violation(
messages, index=idx, stage="input", content=text, result=result
)
def _apply_violation(
self,
messages: Sequence[BaseMessage],
*,
index: int | None,
stage: ViolationStage,
content: str,
result: Moderation,
) -> dict[str, Any] | None:
violation_text = self._format_violation_message(content, result)
if self.exit_behavior == "error":
raise OpenAIModerationError(
content=content,
stage=stage,
result=result,
message=violation_text,
)
if self.exit_behavior == "end":
return {"jump_to": "end", "messages": [AIMessage(content=violation_text)]}
if index is None:
return None
new_messages = list(messages)
original = new_messages[index]
new_messages[index] = cast(
BaseMessage, original.model_copy(update={"content": violation_text})
)
return {"messages": new_messages}
def _moderate(self, text: str) -> Moderation:
if self._client is None:
self._client = self._build_client()
response = self._client.moderations.create(model=self.model, input=text)
return response.results[0]
async def _amoderate(self, text: str) -> Moderation:
if self._async_client is None:
self._async_client = self._build_async_client()
response = await self._async_client.moderations.create(
model=self.model, input=text
)
return response.results[0]
def _build_client(self) -> OpenAI:
self._client = OpenAI()
return self._client
def _build_async_client(self) -> AsyncOpenAI:
self._async_client = AsyncOpenAI()
return self._async_client
def _format_violation_message(self, content: str, result: Moderation) -> str:
# Convert categories to dict and filter for flagged items
categories_dict = result.categories.model_dump()
categories = [
name.replace("_", " ")
for name, flagged in categories_dict.items()
if flagged
]
category_label = (
", ".join(categories) if categories else "OpenAI's safety policies"
)
template = self.violation_message or DEFAULT_VIOLATION_TEMPLATE
scores_json = json.dumps(result.category_scores.model_dump(), sort_keys=True)
try:
message = template.format(
categories=category_label,
category_scores=scores_json,
original_content=content,
)
except KeyError:
message = template
return message
def _find_last_index(
self, messages: Sequence[BaseMessage], message_type: type[BaseMessage]
) -> int | None:
for idx in range(len(messages) - 1, -1, -1):
if isinstance(messages[idx], message_type):
return idx
return None
def _extract_text(self, message: BaseMessage) -> str | None:
if message.content is None:
return None
text_accessor = getattr(message, "text", None)
if text_accessor is None:
return str(message.content)
text = str(text_accessor)
return text if text else None
__all__ = [
"OpenAIModerationError",
"OpenAIModerationMiddleware",
]
| OpenAIModerationMiddleware |
python | redis__redis-py | tests/test_retry.py | {
"start": 5737,
"end": 10410
} | class ____:
"Test the standalone Redis client behavior with retries"
def test_client_retry_on_error_with_success(self, request):
with patch.object(Redis, "parse_response") as parse_response:
def mock_parse_response(connection, *args, **options):
def ok_response(connection, *args, **options):
return "MOCK_OK"
parse_response.side_effect = ok_response
raise ReadOnlyError()
parse_response.side_effect = mock_parse_response
r = _get_client(Redis, request, retry_on_error=[ReadOnlyError])
assert r.get("foo") == "MOCK_OK"
assert parse_response.call_count == 2
def test_client_retry_on_error_raise(self, request):
with patch.object(Redis, "parse_response") as parse_response:
parse_response.side_effect = BusyLoadingError()
retries = 3
r = _get_client(
Redis,
request,
retry_on_error=[ReadOnlyError, BusyLoadingError],
retry=Retry(NoBackoff(), retries),
)
with pytest.raises(BusyLoadingError):
try:
r.get("foo")
finally:
assert parse_response.call_count == retries + 1
def test_client_retry_on_error_different_error_raised(self, request):
with patch.object(Redis, "parse_response") as parse_response:
parse_response.side_effect = OSError()
retries = 3
r = _get_client(
Redis,
request,
retry_on_error=[ReadOnlyError],
retry=Retry(NoBackoff(), retries),
)
with pytest.raises(OSError):
try:
r.get("foo")
finally:
assert parse_response.call_count == 1
def test_client_retry_on_error_and_timeout(self, request):
with patch.object(Redis, "parse_response") as parse_response:
parse_response.side_effect = TimeoutError()
retries = 3
r = _get_client(
Redis,
request,
retry_on_error=[ReadOnlyError],
retry_on_timeout=True,
retry=Retry(NoBackoff(), retries),
)
with pytest.raises(TimeoutError):
try:
r.get("foo")
finally:
assert parse_response.call_count == retries + 1
def test_client_retry_on_timeout(self, request):
with patch.object(Redis, "parse_response") as parse_response:
parse_response.side_effect = TimeoutError()
retries = 3
r = _get_client(
Redis, request, retry_on_timeout=True, retry=Retry(NoBackoff(), retries)
)
with pytest.raises(TimeoutError):
try:
r.get("foo")
finally:
assert parse_response.call_count == retries + 1
@pytest.mark.onlycluster
def test_get_set_retry_object_for_cluster_client(self, request):
retry = Retry(NoBackoff(), 2)
r = _get_client(Redis, request, retry_on_timeout=True, retry=retry)
exist_conn = r.connection_pool.get_connection()
assert r.retry._retries == retry._retries
assert isinstance(r.retry._backoff, NoBackoff)
new_retry_policy = Retry(ExponentialBackoff(), 3)
r.set_retry(new_retry_policy)
assert r.retry._retries == new_retry_policy._retries
assert isinstance(r.retry._backoff, ExponentialBackoff)
assert exist_conn.retry._retries == new_retry_policy._retries
new_conn = r.connection_pool.get_connection()
assert new_conn.retry._retries == new_retry_policy._retries
@pytest.mark.onlynoncluster
def test_get_set_retry_object(self, request):
retry = Retry(NoBackoff(), 2)
r = _get_client(Redis, request, retry_on_timeout=True, retry=retry)
exist_conn = r.connection_pool.get_connection()
assert r.get_retry()._retries == retry._retries
assert isinstance(r.get_retry()._backoff, NoBackoff)
new_retry_policy = Retry(ExponentialBackoff(), 3)
r.set_retry(new_retry_policy)
assert r.get_retry()._retries == new_retry_policy._retries
assert isinstance(r.get_retry()._backoff, ExponentialBackoff)
assert exist_conn.retry._retries == new_retry_policy._retries
new_conn = r.connection_pool.get_connection()
assert new_conn.retry._retries == new_retry_policy._retries
| TestRedisClientRetry |
python | getsentry__sentry | src/sentry/plugins/base/binding_manager.py | {
"start": 619,
"end": 701
} | class ____(ProviderManager):
type = RepositoryProvider
| RepositoryProviderManager |
python | huggingface__transformers | src/transformers/models/chinese_clip/modeling_chinese_clip.py | {
"start": 27127,
"end": 28744
} | class ____(nn.Module):
def __init__(self, config):
super().__init__()
self.config = config
self.layer = nn.ModuleList([ChineseCLIPTextLayer(config) for i in range(config.num_hidden_layers)])
self.gradient_checkpointing = False
@can_return_tuple
def forward(
self,
hidden_states: torch.Tensor,
attention_mask: Optional[torch.FloatTensor] = None,
output_attentions: Optional[bool] = False,
output_hidden_states: Optional[bool] = False,
return_dict: Optional[bool] = True,
**kwargs,
) -> Union[tuple[torch.Tensor], BaseModelOutput]:
all_hidden_states = () if output_hidden_states else None
all_self_attentions = () if output_attentions else None
for i, layer_module in enumerate(self.layer):
if output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
layer_outputs = layer_module(
hidden_states=hidden_states,
attention_mask=attention_mask,
output_attentions=output_attentions,
**kwargs,
)
hidden_states = layer_outputs[0]
if output_attentions:
all_self_attentions = all_self_attentions + (layer_outputs[1],)
if output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
return BaseModelOutput(
last_hidden_state=hidden_states,
hidden_states=all_hidden_states,
attentions=all_self_attentions,
)
| ChineseCLIPTextEncoder |
python | pytorch__pytorch | torch/utils/weak.py | {
"start": 2691,
"end": 4269
} | class ____(weakref.ref):
__slots__ = ["_id"]
def __init__(self, key, callback=None) -> None:
# Unlike stock weakref, which preserves hash semantics of the
# original object but lazily defers hash calls until the first
# time the user attempts to hash the weakref, we can eagerly
# cache the id of the key as we know this is definitely the hash
# method
self._id = id(key)
super().__init__(key, callback) # type: ignore[call-arg]
def __call__(self):
r = super().__call__()
# Special logic for Tensor PyObject resurrection
if hasattr(r, "_fix_weakref"):
r._fix_weakref() # type: ignore[union-attr]
return r
def __hash__(self):
return self._id
def __eq__(self, other):
# An attractive but wrong alternate implementation is to only test if
# the stored _ids match. This can lead to an ABA problem if you have:
#
# a1 = A()
# w1 = WeakIdRef(a1)
# del a1
# a2 = A() # suppose it gets the same ID as a1
# w2 = WeakIdRef(a2)
# print(w1 == w2)
#
# This should be False, as a1 and a2 are unrelated (and a1 is
# dead anyway)
a = self()
b = other()
if a is not None and b is not None:
return a is b
return self is other
# This is the same as WeakIdRef but equality is checked using hash() rather than id.
# This will be equivalent to the one above except for classes where hash is not their id.
| WeakIdRef |
python | getsentry__sentry | src/sentry/utils/snuba_rpc.py | {
"start": 2091,
"end": 2452
} | class ____:
table_response: list[TraceItemTableResponse]
timeseries_response: list[TimeSeriesResponse]
def log_snuba_info(content: str) -> None:
if SNUBA_INFO_FILE:
with open(SNUBA_INFO_FILE, "a") as file:
file.writelines(content)
else:
print(content) # NOQA: only prints when an env variable is set
| MultiRpcResponse |
python | Netflix__metaflow | metaflow/plugins/airflow/airflow_utils.py | {
"start": 699,
"end": 1099
} | class ____(Exception):
headline = "Metaflow is incompatible with current version of Airflow."
def __init__(self, version_number) -> None:
msg = (
"Airflow version %s is incompatible with Metaflow. Metaflow requires Airflow a minimum version %s"
% (version_number, AIRFLOW_MIN_SUPPORT_VERSION)
)
super().__init__(msg)
| IncompatibleVersionException |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.