language stringclasses 1
value | repo stringclasses 346
values | path stringlengths 6 201 | class_span dict | source stringlengths 21 2.38M | target stringlengths 1 96 |
|---|---|---|---|---|---|
python | kubernetes-client__python | kubernetes/client/models/v1_custom_resource_definition_condition.py | {
"start": 383,
"end": 7945
} | class ____(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'last_transition_time': 'datetime',
'message': 'str',
'reason': 'str',
'status': 'str',
'type': 'str'
}
attribute_map = {
'last_transition_time': 'lastTransitionTime',
'message': 'message',
'reason': 'reason',
'status': 'status',
'type': 'type'
}
def __init__(self, last_transition_time=None, message=None, reason=None, status=None, type=None, local_vars_configuration=None): # noqa: E501
"""V1CustomResourceDefinitionCondition - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._last_transition_time = None
self._message = None
self._reason = None
self._status = None
self._type = None
self.discriminator = None
if last_transition_time is not None:
self.last_transition_time = last_transition_time
if message is not None:
self.message = message
if reason is not None:
self.reason = reason
self.status = status
self.type = type
@property
def last_transition_time(self):
"""Gets the last_transition_time of this V1CustomResourceDefinitionCondition. # noqa: E501
lastTransitionTime last time the condition transitioned from one status to another. # noqa: E501
:return: The last_transition_time of this V1CustomResourceDefinitionCondition. # noqa: E501
:rtype: datetime
"""
return self._last_transition_time
@last_transition_time.setter
def last_transition_time(self, last_transition_time):
"""Sets the last_transition_time of this V1CustomResourceDefinitionCondition.
lastTransitionTime last time the condition transitioned from one status to another. # noqa: E501
:param last_transition_time: The last_transition_time of this V1CustomResourceDefinitionCondition. # noqa: E501
:type: datetime
"""
self._last_transition_time = last_transition_time
@property
def message(self):
"""Gets the message of this V1CustomResourceDefinitionCondition. # noqa: E501
message is a human-readable message indicating details about last transition. # noqa: E501
:return: The message of this V1CustomResourceDefinitionCondition. # noqa: E501
:rtype: str
"""
return self._message
@message.setter
def message(self, message):
"""Sets the message of this V1CustomResourceDefinitionCondition.
message is a human-readable message indicating details about last transition. # noqa: E501
:param message: The message of this V1CustomResourceDefinitionCondition. # noqa: E501
:type: str
"""
self._message = message
@property
def reason(self):
"""Gets the reason of this V1CustomResourceDefinitionCondition. # noqa: E501
reason is a unique, one-word, CamelCase reason for the condition's last transition. # noqa: E501
:return: The reason of this V1CustomResourceDefinitionCondition. # noqa: E501
:rtype: str
"""
return self._reason
@reason.setter
def reason(self, reason):
"""Sets the reason of this V1CustomResourceDefinitionCondition.
reason is a unique, one-word, CamelCase reason for the condition's last transition. # noqa: E501
:param reason: The reason of this V1CustomResourceDefinitionCondition. # noqa: E501
:type: str
"""
self._reason = reason
@property
def status(self):
"""Gets the status of this V1CustomResourceDefinitionCondition. # noqa: E501
status is the status of the condition. Can be True, False, Unknown. # noqa: E501
:return: The status of this V1CustomResourceDefinitionCondition. # noqa: E501
:rtype: str
"""
return self._status
@status.setter
def status(self, status):
"""Sets the status of this V1CustomResourceDefinitionCondition.
status is the status of the condition. Can be True, False, Unknown. # noqa: E501
:param status: The status of this V1CustomResourceDefinitionCondition. # noqa: E501
:type: str
"""
if self.local_vars_configuration.client_side_validation and status is None: # noqa: E501
raise ValueError("Invalid value for `status`, must not be `None`") # noqa: E501
self._status = status
@property
def type(self):
"""Gets the type of this V1CustomResourceDefinitionCondition. # noqa: E501
type is the type of the condition. Types include Established, NamesAccepted and Terminating. # noqa: E501
:return: The type of this V1CustomResourceDefinitionCondition. # noqa: E501
:rtype: str
"""
return self._type
@type.setter
def type(self, type):
"""Sets the type of this V1CustomResourceDefinitionCondition.
type is the type of the condition. Types include Established, NamesAccepted and Terminating. # noqa: E501
:param type: The type of this V1CustomResourceDefinitionCondition. # noqa: E501
:type: str
"""
if self.local_vars_configuration.client_side_validation and type is None: # noqa: E501
raise ValueError("Invalid value for `type`, must not be `None`") # noqa: E501
self._type = type
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, V1CustomResourceDefinitionCondition):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, V1CustomResourceDefinitionCondition):
return True
return self.to_dict() != other.to_dict()
| V1CustomResourceDefinitionCondition |
python | openai__openai-python | src/openai/cli/_api/image.py | {
"start": 2536,
"end": 2691
} | class ____(BaseModel):
image: str
num_images: int
size: str
response_format: str
model: Omittable[str] = omit
| CLIImageCreateVariationArgs |
python | GoogleCloudPlatform__python-docs-samples | appengine/standard/ndb/entities/snippets.py | {
"start": 6760,
"end": 7258
} | class ____(ndb.Model):
pass
def reserve_model_ids():
first, last = MyModel.allocate_ids(100)
return first, last
def reserve_model_ids_with_a_parent(p):
first, last = MyModel.allocate_ids(100, parent=p)
return first, last
def construct_keys_from_range_of_reserved_ids(first, last):
keys = [ndb.Key(MyModel, id) for id in range(first, last + 1)]
return keys
def reserve_model_ids_up_to(N):
first, last = MyModel.allocate_ids(max=N)
return first, last
| MyModel |
python | kamyu104__LeetCode-Solutions | Python/random-pick-with-weight.py | {
"start": 93,
"end": 522
} | class ____(object):
def __init__(self, w):
"""
:type w: List[int]
"""
self.__prefix_sum = list(w)
for i in xrange(1, len(w)):
self.__prefix_sum[i] += self.__prefix_sum[i-1]
def pickIndex(self):
"""
:rtype: int
"""
target = random.randint(0, self.__prefix_sum[-1]-1)
return bisect.bisect_right(self.__prefix_sum, target)
| Solution |
python | huggingface__transformers | src/transformers/models/video_llama_3/modular_video_llama_3.py | {
"start": 52085,
"end": 52164
} | class ____(Qwen2VLImageProcessorKwargs):
pass
| VideoLlama3ImageProcessorKwargs |
python | ray-project__ray | python/ray/autoscaler/_private/autoscaler.py | {
"start": 3868,
"end": 6074
} | class ____:
"""Class to extract and organize information on non-terminated nodes."""
def __init__(self, provider: NodeProvider):
start_time = time.time()
# All non-terminated nodes
self.all_node_ids = provider.non_terminated_nodes({})
# Managed worker nodes (node kind "worker"):
self.worker_ids: List[NodeID] = []
# The head node (node kind "head")
self.head_id: Optional[NodeID] = None
for node in self.all_node_ids:
node_kind = provider.node_tags(node)[TAG_RAY_NODE_KIND]
if node_kind == NODE_KIND_WORKER:
self.worker_ids.append(node)
elif node_kind == NODE_KIND_HEAD:
self.head_id = node
# Note: For typical use-cases, self.all_node_ids == self.worker_ids +
# [self.head_id]. The difference being in the case of unmanaged nodes.
# Record the time of the non_terminated nodes call. This typically
# translates to a "describe" or "list" call on most cluster managers
# which can be quite expensive. Note that we include the processing
# time because on some clients, there may be pagination and the
# underlying api calls may be done lazily.
self.non_terminated_nodes_time = time.time() - start_time
logger.info(
f"The autoscaler took {round(self.non_terminated_nodes_time, 3)}"
" seconds to fetch the list of non-terminated nodes."
)
def remove_terminating_nodes(self, terminating_nodes: List[NodeID]) -> None:
"""Remove nodes we're in the process of terminating from internal
state."""
def not_terminating(node):
return node not in terminating_nodes
self.worker_ids = list(filter(not_terminating, self.worker_ids))
self.all_node_ids = list(filter(not_terminating, self.all_node_ids))
# Whether a worker should be kept based on the min_workers and
# max_workers constraints.
#
# keep: should keep the worker
# terminate: should terminate the worker
# decide_later: the worker can be terminated if needed
KeepOrTerminate = Enum("KeepOrTerminate", "keep terminate decide_later")
| NonTerminatedNodes |
python | pypa__hatch | src/hatch/config/constants.py | {
"start": 466,
"end": 776
} | class ____:
USER = "HATCH_INDEX_USER"
AUTH = "HATCH_INDEX_AUTH"
REPO = "HATCH_INDEX_REPO"
CA_CERT = "HATCH_INDEX_CA_CERT"
CLIENT_CERT = "HATCH_INDEX_CLIENT_CERT"
CLIENT_KEY = "HATCH_INDEX_CLIENT_KEY"
PUBLISHER = "HATCH_PUBLISHER"
OPTIONS = "HATCH_PUBLISHER_OPTIONS"
| PublishEnvVars |
python | Netflix__metaflow | metaflow/runner/metaflow_runner.py | {
"start": 7625,
"end": 8845
} | class ____(ExecutingProcess):
"""
This class contains a reference to a `metaflow.Run` object representing
the currently executing or finished run, as well as metadata related
to the process.
`ExecutingRun` is returned by methods in `Runner` and `NBRunner`. It is not
meant to be instantiated directly.
This class works as a context manager, allowing you to use a pattern like
```python
with Runner(...).run() as running:
...
```
Note that you should use either this object as the context manager or
`Runner`, not both in a nested manner.
"""
def __init__(
self, runner: "Runner", command_obj: CommandManager, run_obj: Run
) -> None:
"""
Create a new ExecutingRun -- this should not be done by the user directly but
instead use Runner.run()
Parameters
----------
runner : Runner
Parent runner for this run.
command_obj : CommandManager
CommandManager containing the subprocess executing this run.
run_obj : Run
Run object corresponding to this run.
"""
super().__init__(runner, command_obj)
self.run = run_obj
| ExecutingRun |
python | huggingface__transformers | src/transformers/models/auto/auto_factory.py | {
"start": 26977,
"end": 31344
} | class ____(OrderedDict[type[PreTrainedConfig], _LazyAutoMappingValue]):
"""
" A mapping config to object (model or tokenizer for instance) that will load keys and values when it is accessed.
Args:
- config_mapping: The map model type to config class
- model_mapping: The map model type to model (or tokenizer) class
"""
def __init__(self, config_mapping, model_mapping) -> None:
self._config_mapping = config_mapping
self._reverse_config_mapping = {v: k for k, v in config_mapping.items()}
self._model_mapping = model_mapping
self._model_mapping._model_mapping = self
self._extra_content = {}
self._modules = {}
def __len__(self) -> int:
common_keys = set(self._config_mapping.keys()).intersection(self._model_mapping.keys())
return len(common_keys) + len(self._extra_content)
def __getitem__(self, key: type[PreTrainedConfig]) -> _LazyAutoMappingValue:
if key in self._extra_content:
return self._extra_content[key]
model_type = self._reverse_config_mapping[key.__name__]
if model_type in self._model_mapping:
model_name = self._model_mapping[model_type]
return self._load_attr_from_module(model_type, model_name)
# Maybe there was several model types associated with this config.
model_types = [k for k, v in self._config_mapping.items() if v == key.__name__]
for mtype in model_types:
if mtype in self._model_mapping:
model_name = self._model_mapping[mtype]
return self._load_attr_from_module(mtype, model_name)
raise KeyError(key)
def _load_attr_from_module(self, model_type, attr):
module_name = model_type_to_module_name(model_type)
if module_name not in self._modules:
self._modules[module_name] = importlib.import_module(f".{module_name}", "transformers.models")
return getattribute_from_module(self._modules[module_name], attr)
def keys(self) -> list[type[PreTrainedConfig]]:
mapping_keys = [
self._load_attr_from_module(key, name)
for key, name in self._config_mapping.items()
if key in self._model_mapping
]
return mapping_keys + list(self._extra_content.keys())
def get(self, key: type[PreTrainedConfig], default: _T) -> Union[_LazyAutoMappingValue, _T]:
try:
return self.__getitem__(key)
except KeyError:
return default
def __bool__(self) -> bool:
return bool(self.keys())
def values(self) -> list[_LazyAutoMappingValue]:
mapping_values = [
self._load_attr_from_module(key, name)
for key, name in self._model_mapping.items()
if key in self._config_mapping
]
return mapping_values + list(self._extra_content.values())
def items(self) -> list[tuple[type[PreTrainedConfig], _LazyAutoMappingValue]]:
mapping_items = [
(
self._load_attr_from_module(key, self._config_mapping[key]),
self._load_attr_from_module(key, self._model_mapping[key]),
)
for key in self._model_mapping
if key in self._config_mapping
]
return mapping_items + list(self._extra_content.items())
def __iter__(self) -> Iterator[type[PreTrainedConfig]]:
return iter(self.keys())
def __contains__(self, item: type) -> bool:
if item in self._extra_content:
return True
if not hasattr(item, "__name__") or item.__name__ not in self._reverse_config_mapping:
return False
model_type = self._reverse_config_mapping[item.__name__]
return model_type in self._model_mapping
def register(self, key: type[PreTrainedConfig], value: _LazyAutoMappingValue, exist_ok=False) -> None:
"""
Register a new model in this mapping.
"""
if hasattr(key, "__name__") and key.__name__ in self._reverse_config_mapping:
model_type = self._reverse_config_mapping[key.__name__]
if model_type in self._model_mapping and not exist_ok:
raise ValueError(f"'{key}' is already used by a Transformers model.")
self._extra_content[key] = value
__all__ = ["get_values"]
| _LazyAutoMapping |
python | scikit-learn__scikit-learn | sklearn/externals/array_api_compat/cupy/_info.py | {
"start": 441,
"end": 10125
} | class ____:
"""
Get the array API inspection namespace for CuPy.
The array API inspection namespace defines the following functions:
- capabilities()
- default_device()
- default_dtypes()
- dtypes()
- devices()
See
https://data-apis.org/array-api/latest/API_specification/inspection.html
for more details.
Returns
-------
info : ModuleType
The array API inspection namespace for CuPy.
Examples
--------
>>> info = xp.__array_namespace_info__()
>>> info.default_dtypes()
{'real floating': cupy.float64,
'complex floating': cupy.complex128,
'integral': cupy.int64,
'indexing': cupy.int64}
"""
__module__ = 'cupy'
def capabilities(self):
"""
Return a dictionary of array API library capabilities.
The resulting dictionary has the following keys:
- **"boolean indexing"**: boolean indicating whether an array library
supports boolean indexing. Always ``True`` for CuPy.
- **"data-dependent shapes"**: boolean indicating whether an array
library supports data-dependent output shapes. Always ``True`` for
CuPy.
See
https://data-apis.org/array-api/latest/API_specification/generated/array_api.info.capabilities.html
for more details.
See Also
--------
__array_namespace_info__.default_device,
__array_namespace_info__.default_dtypes,
__array_namespace_info__.dtypes,
__array_namespace_info__.devices
Returns
-------
capabilities : dict
A dictionary of array API library capabilities.
Examples
--------
>>> info = xp.__array_namespace_info__()
>>> info.capabilities()
{'boolean indexing': True,
'data-dependent shapes': True,
'max dimensions': 64}
"""
return {
"boolean indexing": True,
"data-dependent shapes": True,
"max dimensions": 64,
}
def default_device(self):
"""
The default device used for new CuPy arrays.
See Also
--------
__array_namespace_info__.capabilities,
__array_namespace_info__.default_dtypes,
__array_namespace_info__.dtypes,
__array_namespace_info__.devices
Returns
-------
device : Device
The default device used for new CuPy arrays.
Examples
--------
>>> info = xp.__array_namespace_info__()
>>> info.default_device()
Device(0)
Notes
-----
This method returns the static default device when CuPy is initialized.
However, the *current* device used by creation functions (``empty`` etc.)
can be changed globally or with a context manager.
See Also
--------
https://github.com/data-apis/array-api/issues/835
"""
return cuda.Device(0)
def default_dtypes(self, *, device=None):
"""
The default data types used for new CuPy arrays.
For CuPy, this always returns the following dictionary:
- **"real floating"**: ``cupy.float64``
- **"complex floating"**: ``cupy.complex128``
- **"integral"**: ``cupy.intp``
- **"indexing"**: ``cupy.intp``
Parameters
----------
device : str, optional
The device to get the default data types for.
Returns
-------
dtypes : dict
A dictionary describing the default data types used for new CuPy
arrays.
See Also
--------
__array_namespace_info__.capabilities,
__array_namespace_info__.default_device,
__array_namespace_info__.dtypes,
__array_namespace_info__.devices
Examples
--------
>>> info = xp.__array_namespace_info__()
>>> info.default_dtypes()
{'real floating': cupy.float64,
'complex floating': cupy.complex128,
'integral': cupy.int64,
'indexing': cupy.int64}
"""
# TODO: Does this depend on device?
return {
"real floating": dtype(float64),
"complex floating": dtype(complex128),
"integral": dtype(intp),
"indexing": dtype(intp),
}
def dtypes(self, *, device=None, kind=None):
"""
The array API data types supported by CuPy.
Note that this function only returns data types that are defined by
the array API.
Parameters
----------
device : str, optional
The device to get the data types for.
kind : str or tuple of str, optional
The kind of data types to return. If ``None``, all data types are
returned. If a string, only data types of that kind are returned.
If a tuple, a dictionary containing the union of the given kinds
is returned. The following kinds are supported:
- ``'bool'``: boolean data types (i.e., ``bool``).
- ``'signed integer'``: signed integer data types (i.e., ``int8``,
``int16``, ``int32``, ``int64``).
- ``'unsigned integer'``: unsigned integer data types (i.e.,
``uint8``, ``uint16``, ``uint32``, ``uint64``).
- ``'integral'``: integer data types. Shorthand for ``('signed
integer', 'unsigned integer')``.
- ``'real floating'``: real-valued floating-point data types
(i.e., ``float32``, ``float64``).
- ``'complex floating'``: complex floating-point data types (i.e.,
``complex64``, ``complex128``).
- ``'numeric'``: numeric data types. Shorthand for ``('integral',
'real floating', 'complex floating')``.
Returns
-------
dtypes : dict
A dictionary mapping the names of data types to the corresponding
CuPy data types.
See Also
--------
__array_namespace_info__.capabilities,
__array_namespace_info__.default_device,
__array_namespace_info__.default_dtypes,
__array_namespace_info__.devices
Examples
--------
>>> info = xp.__array_namespace_info__()
>>> info.dtypes(kind='signed integer')
{'int8': cupy.int8,
'int16': cupy.int16,
'int32': cupy.int32,
'int64': cupy.int64}
"""
# TODO: Does this depend on device?
if kind is None:
return {
"bool": dtype(bool),
"int8": dtype(int8),
"int16": dtype(int16),
"int32": dtype(int32),
"int64": dtype(int64),
"uint8": dtype(uint8),
"uint16": dtype(uint16),
"uint32": dtype(uint32),
"uint64": dtype(uint64),
"float32": dtype(float32),
"float64": dtype(float64),
"complex64": dtype(complex64),
"complex128": dtype(complex128),
}
if kind == "bool":
return {"bool": bool}
if kind == "signed integer":
return {
"int8": dtype(int8),
"int16": dtype(int16),
"int32": dtype(int32),
"int64": dtype(int64),
}
if kind == "unsigned integer":
return {
"uint8": dtype(uint8),
"uint16": dtype(uint16),
"uint32": dtype(uint32),
"uint64": dtype(uint64),
}
if kind == "integral":
return {
"int8": dtype(int8),
"int16": dtype(int16),
"int32": dtype(int32),
"int64": dtype(int64),
"uint8": dtype(uint8),
"uint16": dtype(uint16),
"uint32": dtype(uint32),
"uint64": dtype(uint64),
}
if kind == "real floating":
return {
"float32": dtype(float32),
"float64": dtype(float64),
}
if kind == "complex floating":
return {
"complex64": dtype(complex64),
"complex128": dtype(complex128),
}
if kind == "numeric":
return {
"int8": dtype(int8),
"int16": dtype(int16),
"int32": dtype(int32),
"int64": dtype(int64),
"uint8": dtype(uint8),
"uint16": dtype(uint16),
"uint32": dtype(uint32),
"uint64": dtype(uint64),
"float32": dtype(float32),
"float64": dtype(float64),
"complex64": dtype(complex64),
"complex128": dtype(complex128),
}
if isinstance(kind, tuple):
res = {}
for k in kind:
res.update(self.dtypes(kind=k))
return res
raise ValueError(f"unsupported kind: {kind!r}")
def devices(self):
"""
The devices supported by CuPy.
Returns
-------
devices : list[Device]
The devices supported by CuPy.
See Also
--------
__array_namespace_info__.capabilities,
__array_namespace_info__.default_device,
__array_namespace_info__.default_dtypes,
__array_namespace_info__.dtypes
"""
return [cuda.Device(i) for i in range(cuda.runtime.getDeviceCount())]
| __array_namespace_info__ |
python | run-llama__llama_index | llama-index-core/llama_index/core/indices/tree/base.py | {
"start": 1240,
"end": 7316
} | class ____(BaseIndex[IndexGraph]):
"""
Tree Index.
The tree index is a tree-structured index, where each node is a summary of
the children nodes. During index construction, the tree is constructed
in a bottoms-up fashion until we end up with a set of root_nodes.
There are a few different options during query time (see :ref:`Ref-Query`).
The main option is to traverse down the tree from the root nodes.
A secondary answer is to directly synthesize the answer from the root nodes.
Args:
summary_template (Optional[BasePromptTemplate]): A Summarization Prompt
(see :ref:`Prompt-Templates`).
insert_prompt (Optional[BasePromptTemplate]): An Tree Insertion Prompt
(see :ref:`Prompt-Templates`).
num_children (int): The number of children each node should have.
build_tree (bool): Whether to build the tree during index construction.
show_progress (bool): Whether to show progress bars. Defaults to False.
"""
index_struct_cls = IndexGraph
def __init__(
self,
nodes: Optional[Sequence[BaseNode]] = None,
objects: Optional[Sequence[IndexNode]] = None,
index_struct: Optional[IndexGraph] = None,
llm: Optional[LLM] = None,
summary_template: Optional[BasePromptTemplate] = None,
insert_prompt: Optional[BasePromptTemplate] = None,
num_children: int = 10,
build_tree: bool = True,
use_async: bool = False,
show_progress: bool = False,
**kwargs: Any,
) -> None:
"""Initialize params."""
# need to set parameters before building index in base class.
self.num_children = num_children
self.summary_template = summary_template or DEFAULT_SUMMARY_PROMPT
self.insert_prompt: BasePromptTemplate = insert_prompt or DEFAULT_INSERT_PROMPT
self.build_tree = build_tree
self._use_async = use_async
self._llm = llm or Settings.llm
super().__init__(
nodes=nodes,
index_struct=index_struct,
show_progress=show_progress,
objects=objects,
**kwargs,
)
def as_retriever(
self,
retriever_mode: Union[str, TreeRetrieverMode] = TreeRetrieverMode.SELECT_LEAF,
embed_model: Optional[BaseEmbedding] = None,
**kwargs: Any,
) -> BaseRetriever:
# NOTE: lazy import
from llama_index.core.indices.tree.all_leaf_retriever import (
TreeAllLeafRetriever,
)
from llama_index.core.indices.tree.select_leaf_embedding_retriever import (
TreeSelectLeafEmbeddingRetriever,
)
from llama_index.core.indices.tree.select_leaf_retriever import (
TreeSelectLeafRetriever,
)
from llama_index.core.indices.tree.tree_root_retriever import (
TreeRootRetriever,
)
self._validate_build_tree_required(TreeRetrieverMode(retriever_mode))
if retriever_mode == TreeRetrieverMode.SELECT_LEAF:
return TreeSelectLeafRetriever(self, object_map=self._object_map, **kwargs)
elif retriever_mode == TreeRetrieverMode.SELECT_LEAF_EMBEDDING:
embed_model = embed_model or Settings.embed_model
return TreeSelectLeafEmbeddingRetriever(
self, embed_model=embed_model, object_map=self._object_map, **kwargs
)
elif retriever_mode == TreeRetrieverMode.ROOT:
return TreeRootRetriever(self, object_map=self._object_map, **kwargs)
elif retriever_mode == TreeRetrieverMode.ALL_LEAF:
return TreeAllLeafRetriever(self, object_map=self._object_map, **kwargs)
else:
raise ValueError(f"Unknown retriever mode: {retriever_mode}")
def _validate_build_tree_required(self, retriever_mode: TreeRetrieverMode) -> None:
"""Check if index supports modes that require trees."""
if retriever_mode in REQUIRE_TREE_MODES and not self.build_tree:
raise ValueError(
"Index was constructed without building trees, "
f"but retriever mode {retriever_mode} requires trees."
)
def _build_index_from_nodes(
self, nodes: Sequence[BaseNode], **build_kwargs: Any
) -> IndexGraph:
"""Build the index from nodes."""
index_builder = GPTTreeIndexBuilder(
self.num_children,
self.summary_template,
llm=self._llm,
use_async=self._use_async,
show_progress=self._show_progress,
docstore=self._docstore,
)
return index_builder.build_from_nodes(nodes, build_tree=self.build_tree)
def _insert(self, nodes: Sequence[BaseNode], **insert_kwargs: Any) -> None:
"""Insert a document."""
# TODO: allow to customize insert prompt
inserter = TreeIndexInserter(
self.index_struct,
llm=self._llm,
num_children=self.num_children,
insert_prompt=self.insert_prompt,
summary_prompt=self.summary_template,
docstore=self._docstore,
)
inserter.insert(nodes)
def _delete_node(self, node_id: str, **delete_kwargs: Any) -> None:
"""Delete a node."""
raise NotImplementedError("Delete not implemented for tree index.")
@property
def ref_doc_info(self) -> Dict[str, RefDocInfo]:
"""Retrieve a dict mapping of ingested documents and their nodes+metadata."""
node_doc_ids = list(self.index_struct.all_nodes.values())
nodes = self.docstore.get_nodes(node_doc_ids)
all_ref_doc_info = {}
for node in nodes:
ref_node = node.source_node
if not ref_node:
continue
ref_doc_info = self.docstore.get_ref_doc_info(ref_node.node_id)
if not ref_doc_info:
continue
all_ref_doc_info[ref_node.node_id] = ref_doc_info
return all_ref_doc_info
# legacy
GPTTreeIndex = TreeIndex
| TreeIndex |
python | spack__spack | lib/spack/spack/installer.py | {
"start": 50660,
"end": 50993
} | class ____(BuildTask):
"""Blocking BuildTask executed directly in the main thread. Used for --fake installs."""
process_handle = MockBuildProcess() # type: ignore[assignment]
def _start_build_process(self):
build_process(self.pkg, self.request.install_args)
def poll(self):
return True
| FakeBuildTask |
python | kubernetes-client__python | kubernetes/client/models/v1alpha3_device_taint_selector.py | {
"start": 383,
"end": 8573
} | class ____(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'device': 'str',
'device_class_name': 'str',
'driver': 'str',
'pool': 'str',
'selectors': 'list[V1alpha3DeviceSelector]'
}
attribute_map = {
'device': 'device',
'device_class_name': 'deviceClassName',
'driver': 'driver',
'pool': 'pool',
'selectors': 'selectors'
}
def __init__(self, device=None, device_class_name=None, driver=None, pool=None, selectors=None, local_vars_configuration=None): # noqa: E501
"""V1alpha3DeviceTaintSelector - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._device = None
self._device_class_name = None
self._driver = None
self._pool = None
self._selectors = None
self.discriminator = None
if device is not None:
self.device = device
if device_class_name is not None:
self.device_class_name = device_class_name
if driver is not None:
self.driver = driver
if pool is not None:
self.pool = pool
if selectors is not None:
self.selectors = selectors
@property
def device(self):
"""Gets the device of this V1alpha3DeviceTaintSelector. # noqa: E501
If device is set, only devices with that name are selected. This field corresponds to slice.spec.devices[].name. Setting also driver and pool may be required to avoid ambiguity, but is not required. # noqa: E501
:return: The device of this V1alpha3DeviceTaintSelector. # noqa: E501
:rtype: str
"""
return self._device
@device.setter
def device(self, device):
"""Sets the device of this V1alpha3DeviceTaintSelector.
If device is set, only devices with that name are selected. This field corresponds to slice.spec.devices[].name. Setting also driver and pool may be required to avoid ambiguity, but is not required. # noqa: E501
:param device: The device of this V1alpha3DeviceTaintSelector. # noqa: E501
:type: str
"""
self._device = device
@property
def device_class_name(self):
"""Gets the device_class_name of this V1alpha3DeviceTaintSelector. # noqa: E501
If DeviceClassName is set, the selectors defined there must be satisfied by a device to be selected. This field corresponds to class.metadata.name. # noqa: E501
:return: The device_class_name of this V1alpha3DeviceTaintSelector. # noqa: E501
:rtype: str
"""
return self._device_class_name
@device_class_name.setter
def device_class_name(self, device_class_name):
"""Sets the device_class_name of this V1alpha3DeviceTaintSelector.
If DeviceClassName is set, the selectors defined there must be satisfied by a device to be selected. This field corresponds to class.metadata.name. # noqa: E501
:param device_class_name: The device_class_name of this V1alpha3DeviceTaintSelector. # noqa: E501
:type: str
"""
self._device_class_name = device_class_name
@property
def driver(self):
"""Gets the driver of this V1alpha3DeviceTaintSelector. # noqa: E501
If driver is set, only devices from that driver are selected. This fields corresponds to slice.spec.driver. # noqa: E501
:return: The driver of this V1alpha3DeviceTaintSelector. # noqa: E501
:rtype: str
"""
return self._driver
@driver.setter
def driver(self, driver):
"""Sets the driver of this V1alpha3DeviceTaintSelector.
If driver is set, only devices from that driver are selected. This fields corresponds to slice.spec.driver. # noqa: E501
:param driver: The driver of this V1alpha3DeviceTaintSelector. # noqa: E501
:type: str
"""
self._driver = driver
@property
def pool(self):
"""Gets the pool of this V1alpha3DeviceTaintSelector. # noqa: E501
If pool is set, only devices in that pool are selected. Also setting the driver name may be useful to avoid ambiguity when different drivers use the same pool name, but this is not required because selecting pools from different drivers may also be useful, for example when drivers with node-local devices use the node name as their pool name. # noqa: E501
:return: The pool of this V1alpha3DeviceTaintSelector. # noqa: E501
:rtype: str
"""
return self._pool
@pool.setter
def pool(self, pool):
"""Sets the pool of this V1alpha3DeviceTaintSelector.
If pool is set, only devices in that pool are selected. Also setting the driver name may be useful to avoid ambiguity when different drivers use the same pool name, but this is not required because selecting pools from different drivers may also be useful, for example when drivers with node-local devices use the node name as their pool name. # noqa: E501
:param pool: The pool of this V1alpha3DeviceTaintSelector. # noqa: E501
:type: str
"""
self._pool = pool
@property
def selectors(self):
"""Gets the selectors of this V1alpha3DeviceTaintSelector. # noqa: E501
Selectors contains the same selection criteria as a ResourceClaim. Currently, CEL expressions are supported. All of these selectors must be satisfied. # noqa: E501
:return: The selectors of this V1alpha3DeviceTaintSelector. # noqa: E501
:rtype: list[V1alpha3DeviceSelector]
"""
return self._selectors
@selectors.setter
def selectors(self, selectors):
"""Sets the selectors of this V1alpha3DeviceTaintSelector.
Selectors contains the same selection criteria as a ResourceClaim. Currently, CEL expressions are supported. All of these selectors must be satisfied. # noqa: E501
:param selectors: The selectors of this V1alpha3DeviceTaintSelector. # noqa: E501
:type: list[V1alpha3DeviceSelector]
"""
self._selectors = selectors
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, V1alpha3DeviceTaintSelector):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, V1alpha3DeviceTaintSelector):
return True
return self.to_dict() != other.to_dict()
| V1alpha3DeviceTaintSelector |
python | ipython__ipython | IPython/core/completer.py | {
"start": 21247,
"end": 21572
} | class ____(_MatcherAPIv1Base, Protocol):
#: API version
matcher_api_version: Optional[Literal[1]]
def __call__(self, text: str) -> list[str]:
"""Call signature."""
...
#: Protocol describing Matcher API v1.
MatcherAPIv1: TypeAlias = Union[_MatcherAPIv1Base, _MatcherAPIv1Total]
| _MatcherAPIv1Total |
python | automl__auto-sklearn | autosklearn/ensembles/abstract_ensemble.py | {
"start": 422,
"end": 4468
} | class ____(ABC):
@abstractmethod
def __init__(
self,
task_type: int,
metrics: Sequence[Scorer] | Scorer,
backend: Backend,
random_state: int | np.random.RandomState | None = None,
):
pass
def __getstate__(self) -> Dict[str, Any]:
# Cannot serialize a metric if
# it is user defined.
# That is, if doing pickle dump
# the metric won't be the same as the
# one in __main__. we don't use the metric
# in the EnsembleSelection so this should
# be fine
return {key: value for key, value in self.__dict__.items() if key != "metrics"}
@abstractmethod
def fit(
self,
base_models_predictions: np.ndarray | List[np.ndarray],
true_targets: np.ndarray,
model_identifiers: List[Tuple[int, int, float]],
runs: Sequence[Run],
X_data: SUPPORTED_FEAT_TYPES | None = None,
) -> "AbstractEnsemble":
"""Fit an ensemble given predictions of base models and targets.
Ensemble building maximizes performance (in contrast to
hyperparameter optimization)!
Parameters
----------
base_models_predictions: np.ndarray
shape = (n_base_models, n_data_points, n_targets)
n_targets is the number of classes in case of classification,
n_targets is 0 or 1 in case of regression
Can be a list of 2d numpy arrays as well to prevent copying all
predictions into a single, large numpy array.
X_data : list-like or sparse data
true_targets : array of shape [n_targets]
model_identifiers : identifier for each base model.
Can be used for practical text output of the ensemble.
runs: Sequence[Run]
Additional information for each run executed by SMAC that was
considered by the ensemble builder.
Returns
-------
self
"""
pass
@abstractmethod
def predict(
self, base_models_predictions: Union[np.ndarray, List[np.ndarray]]
) -> np.ndarray:
"""Create ensemble predictions from the base model predictions.
Parameters
----------
base_models_predictions : np.ndarray
shape = (n_base_models, n_data_points, n_targets)
Same as in the fit method.
Returns
-------
np.ndarray
"""
pass
@abstractmethod
def get_models_with_weights(
self, models: Dict[Tuple[int, int, float], BasePipeline]
) -> List[Tuple[float, BasePipeline]]:
"""List of (weight, model) pairs for all models included in the ensemble.
Parameters
----------
models : dict {identifier : model object}
The identifiers are the same as the one presented to the fit()
method. Models can be used for nice printing.
Returns
-------
List[Tuple[float, BasePipeline]]
"""
@abstractmethod
def get_identifiers_with_weights(
self,
) -> List[Tuple[Tuple[int, int, float], float]]:
"""Return a (identifier, weight)-pairs for all models that were passed to the
ensemble builder.
Parameters
----------
models : dict {identifier : model object}
The identifiers are the same as the one presented to the fit()
method. Models can be used for nice printing.
Returns
-------
List[Tuple[Tuple[int, int, float], float]
"""
@abstractmethod
def get_selected_model_identifiers(self) -> List[Tuple[int, int, float]]:
"""Return identifiers of models in the ensemble.
This includes models which have a weight of zero!
Returns
-------
list
"""
@abstractmethod
def get_validation_performance(self) -> float:
"""Return validation performance of ensemble.
Returns
-------
float
"""
| AbstractEnsemble |
python | kamyu104__LeetCode-Solutions | Python/palindrome-rearrangement-queries.py | {
"start": 80,
"end": 2355
} | class ____(object):
def canMakePalindromeQueries(self, s, queries):
"""
:type s: str
:type queries: List[List[int]]
:rtype: List[bool]
"""
def check(left1, right1, left2, right2):
def same(left, right):
return all(prefixs1[right+1][i]-prefixs1[left][i] == prefixs2[right+1][i]-prefixs2[left][i] for i in xrange(d))
min_left, max_left = min(left1, left2), max(left1, left2)
min_right, max_right = min(right1, right2), max(right1, right2)
if not (prefix[min_left]-prefix[0] == prefix[-1]-prefix[max_right+1] == 0):
return False
if min_right < max_left: # non-overlapped
return prefix[max_left]-prefix[min_right+1] == 0 and same(min_left, min_right) and same(max_left, max_right)
# overlapped
if (left1 == min_left) == (right1 == max_right): # inside another
return same(min_left, max_right)
# not inside another
p1, p2 = (prefixs1, prefixs2) if min_left == left1 else (prefixs2, prefixs1)
diff1 = [(p1[min_right+1][i]-p1[min_left][i])-(p2[max_left][i]-p2[min_left][i]) for i in xrange(d)]
diff2 = [(p2[max_right+1][i]-p2[max_left][i])-(p1[max_right+1][i]-p1[min_right+1][i]) for i in xrange(d)]
return diff1 == diff2 and all(x >= 0 for x in diff1) # test case: s = "aabbba", queries = [[0,1,3,4]]
lookup = [-1]*26
d = 0
for x in s:
if lookup[ord(x)-ord('a')] != -1:
continue
lookup[ord(x)-ord('a')] = d
d += 1
prefix = [0]*(len(s)//2+1)
prefixs1 = [[0]*d for _ in xrange(len(s)//2+1)]
prefixs2 = [[0]*d for _ in xrange(len(s)//2+1)]
for i in xrange(len(s)//2):
x, y = lookup[ord(s[i])-ord('a')], lookup[ord(s[~i])-ord('a')]
prefix[i+1] = prefix[i]+int(x != y)
for j in xrange(d):
prefixs1[i+1][j] = prefixs1[i][j]+int(j == x)
prefixs2[i+1][j] = prefixs2[i][j]+int(j == y)
return [check(q[0], q[1], (len(s)-1)-q[3], (len(s)-1)-q[2]) for q in queries]
# Time: O(26 * n + 26 * q)
# Space: O(26 * n)
# prefix sum, freq table
| Solution |
python | pallets__quart | src/quart/typing.py | {
"start": 4536,
"end": 4759
} | class ____(Protocol):
def __init__(self, app: Quart, scope: WebsocketScope) -> None: ...
async def __call__(
self, receive: ASGIReceiveCallable, send: ASGISendCallable
) -> None: ...
| ASGIWebsocketProtocol |
python | jmcnamara__XlsxWriter | xlsxwriter/test/comparison/test_utf8_08.py | {
"start": 314,
"end": 1153
} | class ____(ExcelComparisonTest):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.set_filename("utf8_08.xlsx")
self.ignore_files = [
"xl/printerSettings/printerSettings1.bin",
"xl/worksheets/_rels/sheet1.xml.rels",
]
self.ignore_elements = {"[Content_Types].xml": ['<Default Extension="bin"']}
def test_create_file(self):
"""Test the creation of an XlsxWriter file with utf-8 strings."""
workbook = Workbook(self.got_filename)
worksheet = workbook.add_worksheet()
worksheet.write("A1", "Foo")
worksheet.set_header("&LCafé")
worksheet.set_footer("&Rclé")
worksheet.set_paper(9)
workbook.close()
self.assertExcelEqual()
| TestCompareXLSXFiles |
python | google__python-fire | fire/decorators_test.py | {
"start": 1326,
"end": 1450
} | class ____:
@decorators.SetParseFns(float, arg2=str)
def example3(self, arg1, arg2):
return arg1, arg2
| MixedArguments |
python | google__jax | jax/_src/config.py | {
"start": 7542,
"end": 9759
} | class ____(config_ext.Config[_T]):
__slots__ = (
'_name', '_update_thread_local_hook', '_update_global_hook',
'_parser', '_default_context_manager_value', '__doc__', '__name__',
)
def __init__(
self,
name: str,
default: _T,
help,
update_global_hook: Callable[[_T], None] | None = None,
update_thread_local_hook: Callable[[_T | None], None] | None = None,
parser: Callable[[Any], Any] | None = None,
extra_description: str = '',
default_context_manager_value: Any = no_default,
include_in_jit_key: bool = False,
include_in_trace_context: bool = False,
):
if parser is not None:
default = parser(default)
super().__init__(name, default, include_in_jit_key=include_in_jit_key,
include_in_trace_context=include_in_trace_context)
self._name = name
self.__name__ = name[4:] if name.startswith('jax_') else name
self.__doc__ = (f"Context manager for `{name}` config option"
f"{extra_description}.\n\n{help}")
self._update_global_hook = update_global_hook
self._update_thread_local_hook = update_thread_local_hook
self._parser = parser
self._default_context_manager_value = default_context_manager_value
if self._update_global_hook:
self._update_global_hook(default)
config_states[name] = self
def __bool__(self) -> NoReturn:
raise TypeError(
"bool() not supported for instances of type '{0}' "
"(did you mean to use '{0}.value' instead?)".format(
type(self).__name__))
def _set(self, value: _T) -> None:
if self._parser:
value = self._parser(value)
self.set_global(value)
if self._update_global_hook:
self._update_global_hook(value)
def __call__(self, new_val: Any = no_default):
return StateContextManager(self, new_val)
def _add_hooks(self, update_global_hook, update_thread_local_hook):
"""Private method that adds hooks to an existing context-manager.
Used to avoid cyclic import dependencies."""
self._update_thread_local_hook = update_thread_local_hook
self._update_global_hook = update_global_hook
update_global_hook(self.get_global())
| State |
python | huggingface__transformers | src/transformers/models/biogpt/modular_biogpt.py | {
"start": 2038,
"end": 2087
} | class ____(BartAttention):
pass
| BioGptAttention |
python | bokeh__bokeh | src/bokeh/core/property/descriptors.py | {
"start": 25234,
"end": 27875
} | class ____(PropertyDescriptor):
""" A ``PropertyDescriptor`` specialized to handling ``ColumnData`` properties.
"""
def __set__(self, obj, value, *, setter=None):
""" Implement the setter for the Python `descriptor protocol`_.
This method first separately extracts and removes any ``units`` field
in the JSON, and sets the associated units property directly. The
remaining value is then passed to the superclass ``__set__`` to
be handled.
.. note::
An optional argument ``setter`` has been added to the standard
setter arguments. When needed, this value should be provided by
explicitly invoking ``__set__``. See below for more information.
Args:
obj (HasProps) :
The instance to set a new property value on
value (obj) :
The new value to set the property to
setter (ClientSession or ServerSession or None, optional) :
This is used to prevent "boomerang" updates to Bokeh apps.
(default: None)
In the context of a Bokeh server application, incoming updates
to properties will be annotated with the session that is
doing the updating. This value is propagated through any
subsequent change notifications that the update triggers.
The session can compare the event setter to itself, and
suppress any updates that originate from itself.
Returns:
None
"""
if not hasattr(obj, '_property_values'):
# Initial values should be passed in to __init__, not set directly
class_name = obj.__class__.__name__
raise RuntimeError(f"Cannot set a property value {self.name!r} on a {class_name} instance before HasProps.__init__")
if self.property.readonly and obj._initialized:
# Allow to set a value during object initialization (e.g. value -> value_throttled)
class_name = obj.__class__.__name__
raise RuntimeError(f"{class_name}.{self.name} is a readonly property")
if isinstance(value, PropertyValueColumnData):
raise ValueError(_CDS_SET_FROM_CDS_ERROR)
from ...document.events import ColumnDataChangedEvent
hint = ColumnDataChangedEvent(obj.document, obj, "data", setter=setter) if obj.document else None
value = self.property.prepare_value(obj, self.name, value)
old = self._get(obj)
self._set(obj, old, value, hint=hint, setter=setter)
| ColumnDataPropertyDescriptor |
python | kubernetes-client__python | kubernetes/client/models/v1alpha1_mutating_admission_policy_binding.py | {
"start": 383,
"end": 7132
} | class ____(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'api_version': 'str',
'kind': 'str',
'metadata': 'V1ObjectMeta',
'spec': 'V1alpha1MutatingAdmissionPolicyBindingSpec'
}
attribute_map = {
'api_version': 'apiVersion',
'kind': 'kind',
'metadata': 'metadata',
'spec': 'spec'
}
def __init__(self, api_version=None, kind=None, metadata=None, spec=None, local_vars_configuration=None): # noqa: E501
"""V1alpha1MutatingAdmissionPolicyBinding - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._api_version = None
self._kind = None
self._metadata = None
self._spec = None
self.discriminator = None
if api_version is not None:
self.api_version = api_version
if kind is not None:
self.kind = kind
if metadata is not None:
self.metadata = metadata
if spec is not None:
self.spec = spec
@property
def api_version(self):
"""Gets the api_version of this V1alpha1MutatingAdmissionPolicyBinding. # noqa: E501
APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources # noqa: E501
:return: The api_version of this V1alpha1MutatingAdmissionPolicyBinding. # noqa: E501
:rtype: str
"""
return self._api_version
@api_version.setter
def api_version(self, api_version):
"""Sets the api_version of this V1alpha1MutatingAdmissionPolicyBinding.
APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources # noqa: E501
:param api_version: The api_version of this V1alpha1MutatingAdmissionPolicyBinding. # noqa: E501
:type: str
"""
self._api_version = api_version
@property
def kind(self):
"""Gets the kind of this V1alpha1MutatingAdmissionPolicyBinding. # noqa: E501
Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds # noqa: E501
:return: The kind of this V1alpha1MutatingAdmissionPolicyBinding. # noqa: E501
:rtype: str
"""
return self._kind
@kind.setter
def kind(self, kind):
"""Sets the kind of this V1alpha1MutatingAdmissionPolicyBinding.
Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds # noqa: E501
:param kind: The kind of this V1alpha1MutatingAdmissionPolicyBinding. # noqa: E501
:type: str
"""
self._kind = kind
@property
def metadata(self):
"""Gets the metadata of this V1alpha1MutatingAdmissionPolicyBinding. # noqa: E501
:return: The metadata of this V1alpha1MutatingAdmissionPolicyBinding. # noqa: E501
:rtype: V1ObjectMeta
"""
return self._metadata
@metadata.setter
def metadata(self, metadata):
"""Sets the metadata of this V1alpha1MutatingAdmissionPolicyBinding.
:param metadata: The metadata of this V1alpha1MutatingAdmissionPolicyBinding. # noqa: E501
:type: V1ObjectMeta
"""
self._metadata = metadata
@property
def spec(self):
"""Gets the spec of this V1alpha1MutatingAdmissionPolicyBinding. # noqa: E501
:return: The spec of this V1alpha1MutatingAdmissionPolicyBinding. # noqa: E501
:rtype: V1alpha1MutatingAdmissionPolicyBindingSpec
"""
return self._spec
@spec.setter
def spec(self, spec):
"""Sets the spec of this V1alpha1MutatingAdmissionPolicyBinding.
:param spec: The spec of this V1alpha1MutatingAdmissionPolicyBinding. # noqa: E501
:type: V1alpha1MutatingAdmissionPolicyBindingSpec
"""
self._spec = spec
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, V1alpha1MutatingAdmissionPolicyBinding):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, V1alpha1MutatingAdmissionPolicyBinding):
return True
return self.to_dict() != other.to_dict()
| V1alpha1MutatingAdmissionPolicyBinding |
python | kamyu104__LeetCode-Solutions | Python/maximum-price-to-fill-a-bag.py | {
"start": 48,
"end": 484
} | class ____(object):
def maxPrice(self, items, capacity):
"""
:type items: List[List[int]]
:type capacity: int
:rtype: float
"""
result = 0
items.sort(key=lambda x: float(x[0])/x[1], reverse=True)
for p, c in items:
cnt = min(c, capacity)
capacity -= cnt
result += (float(p)/c)*cnt
return result if capacity == 0 else -1
| Solution |
python | gevent__gevent | src/greentest/3.10/test_socket.py | {
"start": 165732,
"end": 165959
} | class ____(SendmsgConnectionlessTests, SendrecvmsgUDPLITETestBase):
pass
@unittest.skipUnless(HAVE_SOCKET_UDPLITE,
'UDPLITE sockets required for this test.')
@requireAttrs(socket.socket, "recvmsg")
| SendmsgUDPLITETest |
python | huggingface__transformers | src/transformers/models/whisper/processing_whisper.py | {
"start": 698,
"end": 2972
} | class ____(ProcessorMixin):
r"""
Constructs a Whisper processor which wraps a Whisper feature extractor and a Whisper tokenizer into a single
processor.
[`WhisperProcessor`] offers all the functionalities of [`WhisperFeatureExtractor`] and [`WhisperTokenizer`]. See
the [`~WhisperProcessor.__call__`] and [`~WhisperProcessor.decode`] for more information.
Args:
feature_extractor (`WhisperFeatureExtractor`):
An instance of [`WhisperFeatureExtractor`]. The feature extractor is a required input.
tokenizer (`WhisperTokenizer`):
An instance of [`WhisperTokenizer`]. The tokenizer is a required input.
"""
def __init__(self, feature_extractor, tokenizer):
super().__init__(feature_extractor, tokenizer)
def get_decoder_prompt_ids(self, task=None, language=None, no_timestamps=True):
return self.tokenizer.get_decoder_prompt_ids(task=task, language=language, no_timestamps=no_timestamps)
def __call__(self, *args, **kwargs):
"""
Forwards the `audio` argument to WhisperFeatureExtractor's [`~WhisperFeatureExtractor.__call__`] and the `text`
argument to [`~WhisperTokenizer.__call__`]. Please refer to the docstring of the above two methods for more
information.
"""
audio = kwargs.pop("audio", None)
sampling_rate = kwargs.pop("sampling_rate", None)
text = kwargs.pop("text", None)
if len(args) > 0:
audio = args[0]
args = args[1:]
if audio is None and text is None:
raise ValueError("You need to specify either an `audio` or `text` input to process.")
if audio is not None:
inputs = self.feature_extractor(audio, *args, sampling_rate=sampling_rate, **kwargs)
if text is not None:
encodings = self.tokenizer(text, **kwargs)
if text is None:
return inputs
elif audio is None:
return encodings
else:
inputs["labels"] = encodings["input_ids"]
return inputs
def get_prompt_ids(self, text: str, return_tensors="np"):
return self.tokenizer.get_prompt_ids(text, return_tensors=return_tensors)
__all__ = ["WhisperProcessor"]
| WhisperProcessor |
python | scrapy__scrapy | tests/test_contracts.py | {
"start": 633,
"end": 694
} | class ____(Item):
name = Field()
url = Field()
| DemoItem |
python | tensorflow__tensorflow | tensorflow/python/training/localhost_cluster_performance_test.py | {
"start": 2256,
"end": 2895
} | class ____(test.Benchmark):
def benchmarkCreateLocalCluster(self):
deltas = []
iters = 5
for _ in range(iters):
start_time = time.time()
test.create_local_cluster(num_workers=1, num_ps=10)
end_time = time.time()
deltas.append(end_time - start_time)
median_deltas = np.median(deltas)
print("\n\nbenchmark_create_local_cluster_1_worker_10_ps. "
"iterations: %d, median wall time: %g\n\n" % (iters, median_deltas))
self.report_benchmark(
iters=iters,
wall_time=median_deltas,
name="benchmark_create_local_cluster_1_worker_10_ps")
| CreateLocalClusterBenchmark |
python | python-attrs__attrs | tests/test_make.py | {
"start": 69245,
"end": 69286
} | class ____:
__slots__ = ()
| BareSlottedC |
python | doocs__leetcode | solution/0700-0799/0705.Design HashSet/Solution2.py | {
"start": 0,
"end": 767
} | class ____:
def __init__(self):
self.size = 1000
self.data = [[] for _ in range(self.size)]
def add(self, key: int) -> None:
if self.contains(key):
return
idx = self.hash(key)
self.data[idx].append(key)
def remove(self, key: int) -> None:
if not self.contains(key):
return
idx = self.hash(key)
self.data[idx].remove(key)
def contains(self, key: int) -> bool:
idx = self.hash(key)
return any(v == key for v in self.data[idx])
def hash(self, key) -> int:
return key % self.size
# Your MyHashSet object will be instantiated and called as such:
# obj = MyHashSet()
# obj.add(key)
# obj.remove(key)
# param_3 = obj.contains(key)
| MyHashSet |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-github/source_github/github_schema.py | {
"start": 541159,
"end": 541628
} | class ____(sgqlc.types.Type):
"""Autogenerated return type of CreatePullRequest"""
__schema__ = github_schema
__field_names__ = ("client_mutation_id", "pull_request")
client_mutation_id = sgqlc.types.Field(String, graphql_name="clientMutationId")
"""A unique identifier for the client performing the mutation."""
pull_request = sgqlc.types.Field("PullRequest", graphql_name="pullRequest")
"""The new pull request."""
| CreatePullRequestPayload |
python | mlflow__mlflow | docs/api_reference/source/languagesections/__init__.py | {
"start": 590,
"end": 1607
} | class ____(Directive):
has_content = True
def run(self):
self.assert_has_content()
text = "\n".join(self.content)
node = nodes.container(text)
node["classes"].append("plain-section")
self.add_name(node)
self.state.nested_parse(self.content, self.content_offset, node)
return [node]
def add_assets(app):
app.add_js_file(JS_FILE)
def copy_assets(app, exception):
if app.builder.name != "html" or exception:
return
logger.info("Copying examplecode stylesheet/javascript... ", nonl=True)
dest = os.path.join(app.builder.outdir, "_static", JS_FILE)
source = os.path.join(os.path.abspath(os.path.dirname(__file__)), JS_FILE)
copyfile(source, dest)
logger.info("done")
def setup(app):
app.add_directive("code-section", CodeSectionDirective)
app.add_directive("plain-section", PlainSectionDirective)
app.connect("builder-inited", add_assets)
app.connect("build-finished", copy_assets)
| PlainSectionDirective |
python | realpython__materials | build-a-django-content-aggregator/source_code_final/podcasts/admin.py | {
"start": 89,
"end": 184
} | class ____(admin.ModelAdmin):
list_display = ("podcast_name", "title", "pub_date")
| EpisodeAdmin |
python | spack__spack | lib/spack/spack/test/repo.py | {
"start": 8344,
"end": 12593
} | class ____:
"""Test that the Repo class work correctly, and does not depend on globals,
except the REPOS_FINDER.
"""
def test_creation(self, mock_test_cache):
repo = spack.repo.Repo(spack.paths.mock_packages_path, cache=mock_test_cache)
assert repo.config_file.endswith("repo.yaml")
assert repo.namespace == "builtin_mock"
@pytest.mark.parametrize(
"name,expected", [("mpi", True), ("mpich", False), ("mpileaks", False)]
)
def test_is_virtual(self, name, expected, mock_test_cache):
repo = spack.repo.Repo(spack.paths.mock_packages_path, cache=mock_test_cache)
assert repo.is_virtual(name) is expected
assert repo.is_virtual_safe(name) is expected
repo_path = spack.repo.RepoPath(repo)
assert repo_path.is_virtual(name) is expected
assert repo_path.is_virtual_safe(name) is expected
@pytest.mark.parametrize(
"module_name,pkg_name",
[
("dla_future", "dla-future"),
("num7zip", "7zip"),
# If no package is there, None is returned
("unknown", None),
],
)
def test_real_name(self, module_name, pkg_name, mock_test_cache, tmp_path: pathlib.Path):
"""Test that we can correctly compute the 'real' name of a package, from the one
used to import the Python module.
"""
path, _ = spack.repo.create_repo(str(tmp_path), package_api=(1, 0))
if pkg_name is not None:
pkg_path = pathlib.Path(path) / "packages" / pkg_name / "package.py"
pkg_path.parent.mkdir(parents=True)
pkg_path.write_text("")
repo = spack.repo.Repo(
path, cache=spack.util.file_cache.FileCache(str(tmp_path / "cache"))
)
assert repo.real_name(module_name) == pkg_name
@pytest.mark.parametrize("name", ["mpileaks", "7zip", "dla-future"])
def test_get(self, name, mock_test_cache):
repo = spack.repo.Repo(spack.paths.mock_packages_path, cache=mock_test_cache)
mock_spec = spack.spec.Spec(name)
mock_spec._mark_concrete()
pkg = repo.get(mock_spec)
assert pkg.__class__ == repo.get_pkg_class(name)
@pytest.mark.parametrize("virtual_name,expected", [("mpi", ["mpich", "zmpi"])])
def test_providers(self, virtual_name, expected, mock_test_cache):
repo = spack.repo.Repo(spack.paths.mock_packages_path, cache=mock_test_cache)
provider_names = {x.name for x in repo.providers_for(virtual_name)}
assert provider_names.issuperset(expected)
@pytest.mark.parametrize(
"extended,expected",
[("python", ["py-extension1", "python-venv"]), ("perl", ["perl-extension"])],
)
def test_extensions(self, extended, expected, mock_test_cache):
repo = spack.repo.Repo(spack.paths.mock_packages_path, cache=mock_test_cache)
repo_path = spack.repo.RepoPath(repo)
for instance in (repo, repo_path):
provider_names = {x.name for x in instance.extensions_for(extended)}
assert provider_names.issuperset(expected)
def test_all_package_names(self, mock_test_cache):
repo = spack.repo.Repo(spack.paths.mock_packages_path, cache=mock_test_cache)
repo_path = spack.repo.RepoPath(repo)
for instance in (repo, repo_path):
all_names = instance.all_package_names(include_virtuals=True)
real_names = instance.all_package_names(include_virtuals=False)
assert set(all_names).issuperset(real_names)
for name in set(all_names) - set(real_names):
assert instance.is_virtual(name)
assert instance.is_virtual_safe(name)
def test_packages_with_tags(self, mock_test_cache):
repo = spack.repo.Repo(spack.paths.mock_packages_path, cache=mock_test_cache)
repo_path = spack.repo.RepoPath(repo)
for instance in (repo, repo_path):
r1 = instance.packages_with_tags("tag1")
r2 = instance.packages_with_tags("tag1", "tag2")
assert "mpich" in r1 and "mpich" in r2
assert "mpich2" in r1 and "mpich2" not in r2
assert r2.issubset(r1)
@pytest.mark.usefixtures("nullify_globals")
| TestRepo |
python | huggingface__transformers | tests/models/audioflamingo3/test_modeling_audioflamingo3.py | {
"start": 9104,
"end": 14093
} | class ____(unittest.TestCase):
"""
Slow tests against the public checkpoint to validate processor-model alignment and in-place fusion.
"""
@classmethod
def setUp(cls):
cleanup(torch_device, gc_collect=True)
cls.checkpoint = "nvidia/audio-flamingo-3-hf"
cls.processor = AutoProcessor.from_pretrained(cls.checkpoint)
def tearDown(self):
cleanup(torch_device, gc_collect=True)
@slow
def test_fixture_single_matches(self):
"""
reproducer (creates JSON directly in repo): https://gist.github.com/ebezzam/c979f0f1a2b9223fa137faf1c02022d4#file-reproducer-py
"""
path = Path(__file__).parent.parent.parent / "fixtures/audioflamingo3/expected_results_single.json"
with open(path, "r", encoding="utf-8") as f:
raw = json.load(f)
exp_ids = torch.tensor(raw["token_ids"])
exp_txt = raw["transcriptions"]
conversation = [
{
"role": "user",
"content": [
{
"type": "text",
"text": "Transcribe the input speech.",
},
{
"type": "audio",
"path": "https://huggingface.co/datasets/nvidia/AudioSkills/resolve/main/assets/Why_do_we_ask_questions_converted.wav",
},
],
}
]
model = AudioFlamingo3ForConditionalGeneration.from_pretrained(
self.checkpoint, device_map=torch_device, dtype=torch.bfloat16
).eval()
batch = self.processor.apply_chat_template(
conversation, tokenize=True, add_generation_prompt=True, return_dict=True
).to(model.device, dtype=model.dtype)
seq = model.generate(**batch)
inp_len = batch["input_ids"].shape[1]
gen_ids = seq[:, inp_len:] if seq.shape[1] >= inp_len else seq
torch.testing.assert_close(gen_ids.cpu(), exp_ids)
txt = self.processor.batch_decode(gen_ids, skip_special_tokens=True)
self.assertListEqual(txt, exp_txt)
@slow
def test_fixture_batched_matches(self):
"""
reproducer (creates JSON directly in repo): https://gist.github.com/ebezzam/c979f0f1a2b9223fa137faf1c02022d4#file-reproducer-py
"""
path = Path(__file__).parent.parent.parent / "fixtures/audioflamingo3/expected_results_batched.json"
with open(path, "r", encoding="utf-8") as f:
raw = json.load(f)
exp_ids = torch.tensor(raw["token_ids"])
exp_txt = raw["transcriptions"]
conversations = [
[
{
"role": "user",
"content": [
{
"type": "text",
"text": "What is surprising about the relationship between the barking and the music?",
},
{
"type": "audio",
"path": "https://huggingface.co/datasets/nvidia/AudioSkills/resolve/main/assets/dogs_barking_in_sync_with_the_music.wav",
},
],
}
],
[
{
"role": "user",
"content": [
{
"type": "text",
"text": "Why is the philosopher's name mentioned in the lyrics? "
"(A) To express a sense of nostalgia "
"(B) To indicate that language cannot express clearly, satirizing the inversion of black and white in the world "
"(C) To add depth and complexity to the lyrics "
"(D) To showcase the wisdom and influence of the philosopher",
},
{
"type": "audio",
"path": "https://huggingface.co/datasets/nvidia/AudioSkills/resolve/main/assets/Ch6Ae9DT6Ko_00-04-03_00-04-31.wav",
},
],
}
],
]
model = AudioFlamingo3ForConditionalGeneration.from_pretrained(
self.checkpoint, device_map=torch_device, dtype=torch.bfloat16
).eval()
batch = self.processor.apply_chat_template(
conversations, tokenize=True, add_generation_prompt=True, return_dict=True
).to(model.device, dtype=model.dtype)
seq = model.generate(**batch)
inp_len = batch["input_ids"].shape[1]
gen_ids = seq[:, inp_len:] if seq.shape[1] >= inp_len else seq
torch.testing.assert_close(gen_ids.cpu(), exp_ids)
txt = self.processor.batch_decode(gen_ids, skip_special_tokens=True)
self.assertListEqual(txt, exp_txt)
| AudioFlamingo3ForConditionalGenerationIntegrationTest |
python | kamyu104__LeetCode-Solutions | Python/kill-process.py | {
"start": 66,
"end": 685
} | class ____(object):
def killProcess(self, pid, ppid, kill):
"""
:type pid: List[int]
:type ppid: List[int]
:type kill: int
:rtype: List[int]
"""
def killAll(pid, children, killed):
killed.append(pid)
for child in children[pid]:
killAll(child, children, killed)
result = []
children = collections.defaultdict(set)
for i in xrange(len(pid)):
children[ppid[i]].add(pid[i])
killAll(kill, children, result)
return result
# Time: O(n)
# Space: O(n)
# BFS solution.
| Solution |
python | fluentpython__example-code-2e | 21-async/mojifinder/bottle.py | {
"start": 17630,
"end": 22730
} | class ____(object):
''' This class wraps a route callback along with route specific metadata and
configuration and applies Plugins on demand. It is also responsible for
turing an URL path rule into a regular expression usable by the Router.
'''
def __init__(self, app, rule, method, callback, name=None,
plugins=None, skiplist=None, **config):
#: The application this route is installed to.
self.app = app
#: The path-rule string (e.g. ``/wiki/:page``).
self.rule = rule
#: The HTTP method as a string (e.g. ``GET``).
self.method = method
#: The original callback with no plugins applied. Useful for introspection.
self.callback = callback
#: The name of the route (if specified) or ``None``.
self.name = name or None
#: A list of route-specific plugins (see :meth:`Bottle.route`).
self.plugins = plugins or []
#: A list of plugins to not apply to this route (see :meth:`Bottle.route`).
self.skiplist = skiplist or []
#: Additional keyword arguments passed to the :meth:`Bottle.route`
#: decorator are stored in this dictionary. Used for route-specific
#: plugin configuration and meta-data.
self.config = ConfigDict().load_dict(config, make_namespaces=True)
def __call__(self, *a, **ka):
depr("Some APIs changed to return Route() instances instead of"\
" callables. Make sure to use the Route.call method and not to"\
" call Route instances directly.") #0.12
return self.call(*a, **ka)
@cached_property
def call(self):
''' The route callback with all plugins applied. This property is
created on demand and then cached to speed up subsequent requests.'''
return self._make_callback()
def reset(self):
''' Forget any cached values. The next time :attr:`call` is accessed,
all plugins are re-applied. '''
self.__dict__.pop('call', None)
def prepare(self):
''' Do all on-demand work immediately (useful for debugging).'''
self.call
@property
def _context(self):
depr('Switch to Plugin API v2 and access the Route object directly.') #0.12
return dict(rule=self.rule, method=self.method, callback=self.callback,
name=self.name, app=self.app, config=self.config,
apply=self.plugins, skip=self.skiplist)
def all_plugins(self):
''' Yield all Plugins affecting this route. '''
unique = set()
for p in reversed(self.app.plugins + self.plugins):
if True in self.skiplist: break
name = getattr(p, 'name', False)
if name and (name in self.skiplist or name in unique): continue
if p in self.skiplist or type(p) in self.skiplist: continue
if name: unique.add(name)
yield p
def _make_callback(self):
callback = self.callback
for plugin in self.all_plugins():
try:
if hasattr(plugin, 'apply'):
api = getattr(plugin, 'api', 1)
context = self if api > 1 else self._context
callback = plugin.apply(callback, context)
else:
callback = plugin(callback)
except RouteReset: # Try again with changed configuration.
return self._make_callback()
if not callback is self.callback:
update_wrapper(callback, self.callback)
return callback
def get_undecorated_callback(self):
''' Return the callback. If the callback is a decorated function, try to
recover the original function. '''
func = self.callback
func = getattr(func, '__func__' if py3k else 'im_func', func)
closure_attr = '__closure__' if py3k else 'func_closure'
while hasattr(func, closure_attr) and getattr(func, closure_attr):
func = getattr(func, closure_attr)[0].cell_contents
return func
def get_callback_args(self):
''' Return a list of argument names the callback (most likely) accepts
as keyword arguments. If the callback is a decorated function, try
to recover the original function before inspection. '''
return getargspec(self.get_undecorated_callback())[0]
def get_config(self, key, default=None):
''' Lookup a config field and return its value, first checking the
route.config, then route.app.config.'''
for conf in (self.config, self.app.conifg):
if key in conf: return conf[key]
return default
def __repr__(self):
cb = self.get_undecorated_callback()
return '<%s %r %r>' % (self.method, self.rule, cb)
###############################################################################
# Application Object ###########################################################
###############################################################################
| Route |
python | readthedocs__readthedocs.org | readthedocs/core/migrations/0003_add_banned_status.py | {
"start": 100,
"end": 469
} | class ____(migrations.Migration):
safe = Safe.after_deploy()
dependencies = [
("core", "0002_make_userprofile_user_a_onetoonefield"),
]
operations = [
migrations.AddField(
model_name="userprofile",
name="banned",
field=models.BooleanField(default=False, verbose_name="Banned"),
),
]
| Migration |
python | numpy__numpy | numpy/_core/tests/test_numerictypes.py | {
"start": 22170,
"end": 23205
} | class ____:
# gh-9799
numeric_types = [
np.byte, np.short, np.intc, np.long, np.longlong,
np.ubyte, np.ushort, np.uintc, np.ulong, np.ulonglong,
np.half, np.single, np.double, np.longdouble,
np.csingle, np.cdouble, np.clongdouble,
]
def test_names_are_unique(self):
# none of the above may be aliases for each other
assert len(set(self.numeric_types)) == len(self.numeric_types)
# names must be unique
names = [t.__name__ for t in self.numeric_types]
assert len(set(names)) == len(names)
@pytest.mark.parametrize('t', numeric_types)
def test_names_reflect_attributes(self, t):
""" Test that names correspond to where the type is under ``np.`` """
assert getattr(np, t.__name__) is t
@pytest.mark.parametrize('t', numeric_types)
def test_names_are_undersood_by_dtype(self, t):
""" Test the dtype constructor maps names back to the type """
assert np.dtype(t.__name__).type is t
| TestScalarTypeNames |
python | dagster-io__dagster | python_modules/dagster-graphql/dagster_graphql/schema/partition_mappings.py | {
"start": 92,
"end": 530
} | class ____(graphene.ObjectType):
className = graphene.NonNull(graphene.String)
description = graphene.NonNull(graphene.String)
class Meta:
name = "PartitionMapping"
def __init__(
self,
partition_mapping: PartitionMapping,
):
super().__init__(
className=type(partition_mapping).__name__,
description=partition_mapping.description,
)
| GraphenePartitionMapping |
python | ray-project__ray | doc/source/serve/doc_code/grpc_proxy/grpc_guide.py | {
"start": 769,
"end": 5101
} | class ____:
def __call__(self, user_message: UserDefinedMessage) -> UserDefinedResponse:
greeting = f"Hello {user_message.name} from {user_message.origin}"
num = user_message.num * 2
user_response = UserDefinedResponse(
greeting=greeting,
num=num,
)
return user_response
@serve.multiplexed(max_num_models_per_replica=1)
async def get_model(self, model_id: str) -> str:
return f"loading model: {model_id}"
async def Multiplexing(
self, user_message: UserDefinedMessage2
) -> UserDefinedResponse2:
model_id = serve.get_multiplexed_model_id()
model = await self.get_model(model_id)
user_response = UserDefinedResponse2(
greeting=f"Method2 called model, {model}",
)
return user_response
def Streaming(
self, user_message: UserDefinedMessage
) -> Generator[UserDefinedResponse, None, None]:
for i in range(10):
greeting = f"{i}: Hello {user_message.name} from {user_message.origin}"
num = user_message.num * 2 + i
user_response = UserDefinedResponse(
greeting=greeting,
num=num,
)
yield user_response
time.sleep(0.1)
g = GrpcDeployment.bind()
# __end_grpc_deployment__
# __begin_deploy_grpc_app__
app1 = "app1"
serve.run(target=g, name=app1, route_prefix=f"/{app1}")
# __end_deploy_grpc_app__
# __begin_send_grpc_requests__
import grpc
from user_defined_protos_pb2_grpc import UserDefinedServiceStub
from user_defined_protos_pb2 import UserDefinedMessage
channel = grpc.insecure_channel("localhost:9000")
stub = UserDefinedServiceStub(channel)
request = UserDefinedMessage(name="foo", num=30, origin="bar")
response, call = stub.__call__.with_call(request=request)
print(f"status code: {call.code()}") # grpc.StatusCode.OK
print(f"greeting: {response.greeting}") # "Hello foo from bar"
print(f"num: {response.num}") # 60
# __end_send_grpc_requests__
# __begin_health_check__
import grpc
from ray.serve.generated.serve_pb2_grpc import RayServeAPIServiceStub
from ray.serve.generated.serve_pb2 import HealthzRequest, ListApplicationsRequest
channel = grpc.insecure_channel("localhost:9000")
stub = RayServeAPIServiceStub(channel)
request = ListApplicationsRequest()
response = stub.ListApplications(request=request)
print(f"Applications: {response.application_names}") # ["app1"]
request = HealthzRequest()
response = stub.Healthz(request=request)
print(f"Health: {response.message}") # "success"
# __end_health_check__
# __begin_metadata__
import grpc
from user_defined_protos_pb2_grpc import UserDefinedServiceStub
from user_defined_protos_pb2 import UserDefinedMessage2
channel = grpc.insecure_channel("localhost:9000")
stub = UserDefinedServiceStub(channel)
request = UserDefinedMessage2()
app_name = "app1"
request_id = "123"
multiplexed_model_id = "999"
metadata = (
("application", app_name),
("request_id", request_id),
("multiplexed_model_id", multiplexed_model_id),
)
response, call = stub.Multiplexing.with_call(request=request, metadata=metadata)
print(f"greeting: {response.greeting}") # "Method2 called model, loading model: 999"
for key, value in call.trailing_metadata():
print(f"trailing metadata key: {key}, value {value}") # "request_id: 123"
# __end_metadata__
# __begin_streaming__
import grpc
from user_defined_protos_pb2_grpc import UserDefinedServiceStub
from user_defined_protos_pb2 import UserDefinedMessage
channel = grpc.insecure_channel("localhost:9000")
stub = UserDefinedServiceStub(channel)
request = UserDefinedMessage(name="foo", num=30, origin="bar")
metadata = (("application", "app1"),)
responses = stub.Streaming(request=request, metadata=metadata)
for response in responses:
print(f"greeting: {response.greeting}") # greeting: n: Hello foo from bar
print(f"num: {response.num}") # num: 60 + n
# __end_streaming__
# __begin_model_composition_deployment__
import requests
import torch
from typing import List
from PIL import Image
from io import BytesIO
from torchvision import transforms
from user_defined_protos_pb2 import (
ImageClass,
ImageData,
)
from ray import serve
from ray.serve.handle import DeploymentHandle
@serve.deployment
| GrpcDeployment |
python | jpadilla__pyjwt | jwt/exceptions.py | {
"start": 2261,
"end": 2375
} | class ____(InvalidTokenError):
"""Raised when a token's ``jti`` claim is not a string"""
pass
| InvalidJTIError |
python | django__django | tests/admin_views/admin.py | {
"start": 8975,
"end": 9572
} | class ____(admin.ModelAdmin):
list_display = ("name", "gender", "alive")
list_editable = ("gender", "alive")
list_filter = ("gender",)
search_fields = ("^name",)
save_as = True
def get_changelist_formset(self, request, **kwargs):
return super().get_changelist_formset(
request, formset=BasePersonModelFormSet, **kwargs
)
def get_queryset(self, request):
# Order by a field that isn't in list display, to be able to test
# whether ordering is preserved.
return super().get_queryset(request).order_by("age")
| PersonAdmin |
python | numba__numba | numba/tests/test_extending.py | {
"start": 11925,
"end": 12540
} | class ____(models.OpaqueModel):
def __init__(self, dmm, fe_type):
models.OpaqueModel.__init__(self, dmm, fe_type)
infer_global(MyClass, CallableTypeRef(MyClass))
@lower_constant(CallableTypeRef)
def constant_callable_typeref(context, builder, ty, pyval):
return context.get_dummy_value()
# -----------------------------------------------------------------------
@overload(np.exp)
def overload_np_exp(obj):
if isinstance(obj, MyDummyType):
def imp(obj):
# Returns a constant if a MyDummyType is seen
return 0xDEADBEEF
return imp
| CallableTypeModel |
python | pytorch__pytorch | test/dynamo/test_verify_correctness.py | {
"start": 596,
"end": 1648
} | class ____(torch.nn.Module):
def __init__(self, in_channels, out_channels, **kwargs):
super().__init__()
self.conv = torch.nn.Conv2d(in_channels, out_channels, bias=False, **kwargs)
self.bn = torch.nn.BatchNorm2d(out_channels, eps=0.001)
self.relu = torch.nn.ReLU()
def forward(self, x):
return self.relu(self.bn(self.conv(x)))
def toy_example(a, b):
x = a / (torch.abs(a) + 1)
if b.sum() < 0:
b = b * -1
return x * b
def transform(gm: torch.fx.GraphModule) -> torch.fx.GraphModule:
for node in gm.graph.nodes:
# Checks if we're calling a function (i.e:
# operator.add)
if node.op == "call_function":
# The target attribute is the function
# that call_function calls.
if node.target == operator.mul:
node.target = operator.add
gm.graph.lint() # Does some checks to make sure the
# Graph is well-formed.
gm.recompile()
return gm
@config.patch("verify_correctness", True)
| Conv_Bn_Relu |
python | walkccc__LeetCode | solutions/1812. Determine Color of a Chessboard Square/1812.py | {
"start": 0,
"end": 146
} | class ____:
def squareIsWhite(self, coordinates: str) -> bool:
letter, digit = coordinates
return ord(letter) % 2 != int(digit) % 2
| Solution |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-github/source_github/github_schema.py | {
"start": 1045848,
"end": 1046378
} | class ____(sgqlc.types.Type):
"""Autogenerated return type of UpdateTeamDiscussionComment"""
__schema__ = github_schema
__field_names__ = ("client_mutation_id", "team_discussion_comment")
client_mutation_id = sgqlc.types.Field(String, graphql_name="clientMutationId")
"""A unique identifier for the client performing the mutation."""
team_discussion_comment = sgqlc.types.Field("TeamDiscussionComment", graphql_name="teamDiscussionComment")
"""The updated comment."""
| UpdateTeamDiscussionCommentPayload |
python | django__django | tests/view_tests/models.py | {
"start": 231,
"end": 609
} | class ____(models.Model):
"""
An abstract article Model so that we can create article models with and
without a get_absolute_url method (for create_update generic views tests).
"""
title = models.CharField(max_length=100)
slug = models.SlugField()
author = models.ForeignKey(Author, models.CASCADE)
class Meta:
abstract = True
| BaseArticle |
python | keras-team__keras | keras/src/ops/core.py | {
"start": 6649,
"end": 10553
} | class ____(Operation):
def __init__(self, reverse=False, axis=0, *, name=None):
super().__init__(name=name)
self.reverse = reverse
self.axis = axis
def call(self, f, elems):
return backend.core.associative_scan(
f, elems, reverse=self.reverse, axis=self.axis
)
def compute_output_spec(self, f, elems):
elems_flat = tree.flatten(elems)
lens = [elem.shape[self.axis] for elem in elems_flat]
if len(set(lens)) != 1:
raise ValueError(
"Array inputs to associative_scan must have the same "
"first dimension. (saw: {})".format(
[elem.shape for elem in elems_flat]
)
)
x = tree.pack_sequence_as(
elems,
[slice_along_axis(x, 0, 1, axis=self.axis) for x in elems_flat],
)
y_spec = backend.compute_output_spec(f, x, x)
def _restore_shape(x):
return KerasTensor(
shape=elems_flat[0].shape, dtype=x.dtype, sparse=x.sparse
)
y_spec = tree.map_structure(_restore_shape, y_spec)
return y_spec
@keras_export("keras.ops.associative_scan")
def associative_scan(f, elems, reverse=False, axis=0):
"""Performs a scan with an associative binary operation, in parallel.
This operation his similar to `scan`, with the key difference that
`associative_scan` is a parallel implementation with
potentially significant performance benefits, especially when jit compiled.
The catch is that it can only be used when `f` is a binary associative
operation (i.e. it must verify `f(a, f(b, c)) == f(f(a, b), c)`).
For an introduction to associative scans, refer to this paper:
Blelloch, Guy E. 1990.
[Prefix Sums and Their Applications](
https://www.cs.cmu.edu/~guyb/papers/Ble93.pdf).
Args:
f: A Python callable implementing an associative binary operation with
signature `r = f(a, b)`. Function `f` must be associative, i.e.,
it must satisfy the equation
`f(a, f(b, c)) == f(f(a, b), c)`.
The inputs and result are (possibly nested Python tree structures
of) array(s) matching `elems`. Each array has a dimension in place
of the `axis` dimension. `f` should be applied elementwise over
the `axis` dimension.
The result `r` has the same shape (and structure) as the
two inputs `a` and `b`.
elems: A (possibly nested Python tree structure of) array(s), each with
an `axis` dimension of size `num_elems`.
reverse: A boolean stating if the scan should be reversed with respect
to the `axis` dimension.
axis: an integer identifying the axis over which the scan should occur.
Returns:
A (possibly nested Python tree structure of) array(s) of the same shape
and structure as `elems`, in which the `k`'th element of `axis` is
the result of recursively applying `f` to combine the first `k`
elements of `elems` along `axis`. For example, given
`elems = [a, b, c, ...]`, the result would be
`[a, f(a, b), f(f(a, b), c), ...]`.
Examples:
>>> sum_fn = lambda x, y: x + y
>>> xs = keras.ops.arange(5)
>>> ys = keras.ops.associative_scan(sum_fn, xs, axis=0)
>>> ys
[0, 1, 3, 6, 10]
>>> sum_fn = lambda x, y: [x[0] + y[0], x[1] + y[1], x[2] + y[2]]
>>> xs = [keras.ops.array([[1, 2]]) for _ in range(3)]
>>> ys = keras.ops.associative_scan(sum_fn, xs, axis=0)
>>> ys
[[1, 3], [1, 3], [1, 3]]
"""
if any_symbolic_tensors((elems,)):
return AssociativeScan(reverse=reverse, axis=axis).symbolic_call(
f, elems
)
return backend.core.associative_scan(f, elems, reverse=reverse, axis=axis)
| AssociativeScan |
python | redis__redis-py | redis/cache.py | {
"start": 1034,
"end": 1564
} | class ____:
def __init__(
self,
cache_key: CacheKey,
cache_value: bytes,
status: CacheEntryStatus,
connection_ref,
):
self.cache_key = cache_key
self.cache_value = cache_value
self.status = status
self.connection_ref = connection_ref
def __hash__(self):
return hash(
(self.cache_key, self.cache_value, self.status, self.connection_ref)
)
def __eq__(self, other):
return hash(self) == hash(other)
| CacheEntry |
python | gevent__gevent | src/greentest/3.10/test_ssl.py | {
"start": 210459,
"end": 222757
} | class ____(unittest.TestCase):
"""Verify behavior of close sockets with received data before to the handshake.
"""
class SingleConnectionTestServerThread(threading.Thread):
def __init__(self, *, name, call_after_accept, timeout=None):
self.call_after_accept = call_after_accept
self.received_data = b'' # set by .run()
self.wrap_error = None # set by .run()
self.listener = None # set by .start()
self.port = None # set by .start()
if timeout is None:
self.timeout = support.SHORT_TIMEOUT
else:
self.timeout = timeout
super().__init__(name=name)
def __enter__(self):
self.start()
return self
def __exit__(self, *args):
try:
if self.listener:
self.listener.close()
except OSError:
pass
self.join()
self.wrap_error = None # avoid dangling references
def start(self):
self.ssl_ctx = ssl.create_default_context(ssl.Purpose.CLIENT_AUTH)
self.ssl_ctx.verify_mode = ssl.CERT_REQUIRED
self.ssl_ctx.load_verify_locations(cafile=ONLYCERT)
self.ssl_ctx.load_cert_chain(certfile=ONLYCERT, keyfile=ONLYKEY)
self.listener = socket.socket()
self.port = socket_helper.bind_port(self.listener)
self.listener.settimeout(self.timeout)
self.listener.listen(1)
super().start()
def run(self):
try:
conn, address = self.listener.accept()
except TimeoutError:
# on timeout, just close the listener
return
finally:
self.listener.close()
with conn:
if self.call_after_accept(conn):
return
try:
tls_socket = self.ssl_ctx.wrap_socket(conn, server_side=True)
except OSError as err: # ssl.SSLError inherits from OSError
self.wrap_error = err
else:
try:
self.received_data = tls_socket.recv(400)
except OSError:
pass # closed, protocol error, etc.
def non_linux_skip_if_other_okay_error(self, err):
if sys.platform == "linux":
return # Expect the full test setup to always work on Linux.
if (isinstance(err, ConnectionResetError) or
(isinstance(err, OSError) and err.errno == errno.EINVAL) or
re.search('wrong.version.number', getattr(err, "reason", ""), re.I)):
# On Windows the TCP RST leads to a ConnectionResetError
# (ECONNRESET) which Linux doesn't appear to surface to userspace.
# If wrap_socket() winds up on the "if connected:" path and doing
# the actual wrapping... we get an SSLError from OpenSSL. Typically
# WRONG_VERSION_NUMBER. While appropriate, neither is the scenario
# we're specifically trying to test. The way this test is written
# is known to work on Linux. We'll skip it anywhere else that it
# does not present as doing so.
try:
self.skipTest(f"Could not recreate conditions on {sys.platform}:"
f" {err=}")
finally:
# gh-108342: Explicitly break the reference cycle
err = None
# If maintaining this conditional winds up being a problem.
# just turn this into an unconditional skip anything but Linux.
# The important thing is that our CI has the logic covered.
def test_preauth_data_to_tls_server(self):
server_accept_called = threading.Event()
ready_for_server_wrap_socket = threading.Event()
def call_after_accept(unused):
server_accept_called.set()
if not ready_for_server_wrap_socket.wait(support.SHORT_TIMEOUT):
raise RuntimeError("wrap_socket event never set, test may fail.")
return False # Tell the server thread to continue.
server = self.SingleConnectionTestServerThread(
call_after_accept=call_after_accept,
name="preauth_data_to_tls_server")
server.__enter__() # starts it
self.addCleanup(server.__exit__) # ... & unittest.TestCase stops it.
with socket.socket() as client:
client.connect(server.listener.getsockname())
# This forces an immediate connection close via RST on .close().
set_socket_so_linger_on_with_zero_timeout(client)
client.setblocking(False)
server_accept_called.wait()
client.send(b"DELETE /data HTTP/1.0\r\n\r\n")
client.close() # RST
ready_for_server_wrap_socket.set()
server.join()
wrap_error = server.wrap_error
server.wrap_error = None
try:
self.assertEqual(b"", server.received_data)
self.assertIsInstance(wrap_error, OSError) # All platforms.
self.non_linux_skip_if_other_okay_error(wrap_error)
self.assertIsInstance(wrap_error, ssl.SSLError)
self.assertIn("before TLS handshake with data", wrap_error.args[1])
self.assertIn("before TLS handshake with data", wrap_error.reason)
self.assertNotEqual(0, wrap_error.args[0])
self.assertIsNone(wrap_error.library, msg="attr must exist")
finally:
# gh-108342: Explicitly break the reference cycle
wrap_error = None
server = None
def test_preauth_data_to_tls_client(self):
server_can_continue_with_wrap_socket = threading.Event()
client_can_continue_with_wrap_socket = threading.Event()
def call_after_accept(conn_to_client):
if not server_can_continue_with_wrap_socket.wait(support.SHORT_TIMEOUT):
print("ERROR: test client took too long")
# This forces an immediate connection close via RST on .close().
set_socket_so_linger_on_with_zero_timeout(conn_to_client)
conn_to_client.send(
b"HTTP/1.0 307 Temporary Redirect\r\n"
b"Location: https://example.com/someone-elses-server\r\n"
b"\r\n")
conn_to_client.close() # RST
client_can_continue_with_wrap_socket.set()
return True # Tell the server to stop.
server = self.SingleConnectionTestServerThread(
call_after_accept=call_after_accept,
name="preauth_data_to_tls_client")
server.__enter__() # starts it
self.addCleanup(server.__exit__) # ... & unittest.TestCase stops it.
# Redundant; call_after_accept sets SO_LINGER on the accepted conn.
set_socket_so_linger_on_with_zero_timeout(server.listener)
with socket.socket() as client:
client.connect(server.listener.getsockname())
server_can_continue_with_wrap_socket.set()
if not client_can_continue_with_wrap_socket.wait(support.SHORT_TIMEOUT):
self.fail("test server took too long")
ssl_ctx = ssl.create_default_context()
try:
tls_client = ssl_ctx.wrap_socket(
client, server_hostname="localhost")
except OSError as err: # SSLError inherits from OSError
wrap_error = err
received_data = b""
else:
wrap_error = None
received_data = tls_client.recv(400)
tls_client.close()
server.join()
try:
self.assertEqual(b"", received_data)
self.assertIsInstance(wrap_error, OSError) # All platforms.
self.non_linux_skip_if_other_okay_error(wrap_error)
self.assertIsInstance(wrap_error, ssl.SSLError)
self.assertIn("before TLS handshake with data", wrap_error.args[1])
self.assertIn("before TLS handshake with data", wrap_error.reason)
self.assertNotEqual(0, wrap_error.args[0])
self.assertIsNone(wrap_error.library, msg="attr must exist")
finally:
# gh-108342: Explicitly break the reference cycle
wrap_error = None
server = None
def test_https_client_non_tls_response_ignored(self):
server_responding = threading.Event()
class SynchronizedHTTPSConnection(http.client.HTTPSConnection):
def connect(self):
# Call clear text HTTP connect(), not the encrypted HTTPS (TLS)
# connect(): wrap_socket() is called manually below.
http.client.HTTPConnection.connect(self)
# Wait for our fault injection server to have done its thing.
if not server_responding.wait(support.SHORT_TIMEOUT) and support.verbose:
sys.stdout.write("server_responding event never set.")
self.sock = self._context.wrap_socket(
self.sock, server_hostname=self.host)
def call_after_accept(conn_to_client):
# This forces an immediate connection close via RST on .close().
set_socket_so_linger_on_with_zero_timeout(conn_to_client)
conn_to_client.send(
b"HTTP/1.0 402 Payment Required\r\n"
b"\r\n")
conn_to_client.close() # RST
server_responding.set()
return True # Tell the server to stop.
timeout = 2.0
server = self.SingleConnectionTestServerThread(
call_after_accept=call_after_accept,
name="non_tls_http_RST_responder",
timeout=timeout)
server.__enter__() # starts it
self.addCleanup(server.__exit__) # ... & unittest.TestCase stops it.
# Redundant; call_after_accept sets SO_LINGER on the accepted conn.
set_socket_so_linger_on_with_zero_timeout(server.listener)
connection = SynchronizedHTTPSConnection(
server.listener.getsockname()[0],
port=server.port,
context=ssl.create_default_context(),
timeout=timeout,
)
# There are lots of reasons this raises as desired, long before this
# test was added. Sending the request requires a successful TLS wrapped
# socket; that fails if the connection is broken. It may seem pointless
# to test this. It serves as an illustration of something that we never
# want to happen... properly not happening.
with self.assertRaises(OSError):
connection.request("HEAD", "/test", headers={"Host": "localhost"})
response = connection.getresponse()
server.join()
def setUpModule():
if support.verbose:
plats = {
'Mac': platform.mac_ver,
'Windows': platform.win32_ver,
}
for name, func in plats.items():
plat = func()
if plat and plat[0]:
plat = '%s %r' % (name, plat)
break
else:
plat = repr(platform.platform())
print("test_ssl: testing with %r %r" %
(ssl.OPENSSL_VERSION, ssl.OPENSSL_VERSION_INFO))
print(" under %s" % plat)
print(" HAS_SNI = %r" % ssl.HAS_SNI)
print(" OP_ALL = 0x%8x" % ssl.OP_ALL)
try:
print(" OP_NO_TLSv1_1 = 0x%8x" % ssl.OP_NO_TLSv1_1)
except AttributeError:
pass
for filename in [
CERTFILE, BYTES_CERTFILE,
ONLYCERT, ONLYKEY, BYTES_ONLYCERT, BYTES_ONLYKEY,
SIGNED_CERTFILE, SIGNED_CERTFILE2, SIGNING_CA,
BADCERT, BADKEY, EMPTYCERT]:
if not os.path.exists(filename):
raise support.TestFailed("Can't read certificate file %r" % filename)
thread_info = threading_helper.threading_setup()
unittest.addModuleCleanup(threading_helper.threading_cleanup, *thread_info)
if __name__ == "__main__":
unittest.main()
| TestPreHandshakeClose |
python | walkccc__LeetCode | solutions/3042. Count Prefix and Suffix Pairs I/3042.py | {
"start": 449,
"end": 596
} | class ____:
def countPrefixSuffixPairs(self, words: list[str]) -> int:
trie = Trie()
return sum(trie.insert(word) for word in words)
| Solution |
python | allegroai__clearml | clearml/backend_api/services/v2_20/models.py | {
"start": 52231,
"end": 63992
} | class ____(Request):
"""
Edit an existing model
:param model: Model ID
:type model: str
:param uri: URI for the model
:type uri: str
:param name: Model name Unique within the company.
:type name: str
:param comment: Model comment
:type comment: str
:param tags: User-defined tags list
:type tags: Sequence[str]
:param system_tags: System tags list. This field is reserved for system use,
please don't use it.
:type system_tags: Sequence[str]
:param framework: Framework on which the model is based. Case insensitive.
Should be identical to the framework of the task which created the model.
:type framework: str
:param design: Json[d] object representing the model design. Should be
identical to the network design of the task which created the model
:type design: dict
:param labels: Json object
:type labels: dict
:param ready: Indication if the model is final and can be used by other tasks
:type ready: bool
:param project: Project to which to model belongs
:type project: str
:param parent: Parent model
:type parent: str
:param task: Associated task ID
:type task: str
:param iteration: Iteration (used to update task statistics)
:type iteration: int
:param metadata: Model metadata
:type metadata: list
"""
_service = "models"
_action = "edit"
_version = "2.20"
_schema = {
"definitions": {
"metadata_item": {
"properties": {
"key": {
"description": "The key uniquely identifying the metadata item inside the given entity",
"type": "string",
},
"type": {
"description": "The type of the metadata item",
"type": "string",
},
"value": {
"description": "The value stored in the metadata item",
"type": "string",
},
},
"type": "object",
}
},
"properties": {
"comment": {"description": "Model comment", "type": "string"},
"design": {
"additionalProperties": True,
"description": "Json[d] object representing the model design. Should be identical to the network design of the task which created the model",
"type": "object",
},
"framework": {
"description": "Framework on which the model is based. Case insensitive. Should be identical to the framework of the task which created the model.",
"type": "string",
},
"iteration": {
"description": "Iteration (used to update task statistics)",
"type": "integer",
},
"labels": {
"additionalProperties": {"type": "integer"},
"description": "Json object",
"type": "object",
},
"model": {"description": "Model ID", "type": "string"},
"name": {
"description": "Model name Unique within the company.",
"type": "string",
},
"parent": {"description": "Parent model", "type": "string"},
"project": {
"description": "Project to which to model belongs",
"type": "string",
},
"ready": {
"description": "Indication if the model is final and can be used by other tasks",
"type": "boolean",
},
"system_tags": {
"description": "System tags list. This field is reserved for system use, please don't use it.",
"items": {"type": "string"},
"type": "array",
},
"tags": {
"description": "User-defined tags list",
"items": {"type": "string"},
"type": "array",
},
"task": {"description": "Associated task ID", "type": "string"},
"uri": {"description": "URI for the model", "type": "string"},
"metadata": {
"type": "array",
"items": {"$ref": "#/definitions/metadata_item"},
"description": "Model metadata",
},
},
"required": ["model"],
"type": "object",
}
def __init__(
self,
model: str,
uri: Optional[str] = None,
name: Optional[str] = None,
comment: Optional[str] = None,
tags: Optional[List[str]] = None,
system_tags: Optional[List[str]] = None,
framework: Optional[str] = None,
design: Optional[dict] = None,
labels: Optional[dict] = None,
ready: Optional[bool] = None,
project: Optional[str] = None,
parent: Optional[str] = None,
task: Optional[str] = None,
iteration: Optional[int] = None,
metadata: Optional[List[Any]] = None,
**kwargs: Any
) -> None:
super(EditRequest, self).__init__(**kwargs)
self.model = model
self.uri = uri
self.name = name
self.comment = comment
self.tags = tags
self.system_tags = system_tags
self.framework = framework
self.design = design
self.labels = labels
self.ready = ready
self.project = project
self.parent = parent
self.task = task
self.iteration = iteration
self.metadata = metadata
@schema_property("model")
def model(self) -> str:
return self._property_model
@model.setter
def model(self, value: str) -> None:
if value is None:
self._property_model = None
return
self.assert_isinstance(value, "model", six.string_types)
self._property_model = value
@schema_property("uri")
def uri(self) -> Optional[str]:
return self._property_uri
@uri.setter
def uri(self, value: Optional[str]) -> None:
if value is None:
self._property_uri = None
return
self.assert_isinstance(value, "uri", six.string_types)
self._property_uri = value
@schema_property("name")
def name(self) -> Optional[str]:
return self._property_name
@name.setter
def name(self, value: Optional[str]) -> None:
if value is None:
self._property_name = None
return
self.assert_isinstance(value, "name", six.string_types)
self._property_name = value
@schema_property("comment")
def comment(self) -> Optional[str]:
return self._property_comment
@comment.setter
def comment(self, value: Optional[str]) -> None:
if value is None:
self._property_comment = None
return
self.assert_isinstance(value, "comment", six.string_types)
self._property_comment = value
@schema_property("tags")
def tags(self) -> Optional[List[str]]:
return self._property_tags
@tags.setter
def tags(self, value: Optional[List[str]]) -> None:
if value is None:
self._property_tags = None
return
self.assert_isinstance(value, "tags", (list, tuple))
self.assert_isinstance(value, "tags", six.string_types, is_array=True)
self._property_tags = value
@schema_property("system_tags")
def system_tags(self) -> Optional[List[str]]:
return self._property_system_tags
@system_tags.setter
def system_tags(self, value: Optional[List[str]]) -> None:
if value is None:
self._property_system_tags = None
return
self.assert_isinstance(value, "system_tags", (list, tuple))
self.assert_isinstance(value, "system_tags", six.string_types, is_array=True)
self._property_system_tags = value
@schema_property("framework")
def framework(self) -> Optional[str]:
return self._property_framework
@framework.setter
def framework(self, value: Optional[str]) -> None:
if value is None:
self._property_framework = None
return
self.assert_isinstance(value, "framework", six.string_types)
self._property_framework = value
@schema_property("design")
def design(self) -> Optional[dict]:
return self._property_design
@design.setter
def design(self, value: Optional[dict]) -> None:
if value is None:
self._property_design = None
return
self.assert_isinstance(value, "design", (dict,))
self._property_design = value
@schema_property("labels")
def labels(self) -> Optional[dict]:
return self._property_labels
@labels.setter
def labels(self, value: Optional[dict]) -> None:
if value is None:
self._property_labels = None
return
self.assert_isinstance(value, "labels", (dict,))
self._property_labels = value
@schema_property("ready")
def ready(self) -> Optional[bool]:
return self._property_ready
@ready.setter
def ready(self, value: Optional[bool]) -> None:
if value is None:
self._property_ready = None
return
self.assert_isinstance(value, "ready", (bool,))
self._property_ready = value
@schema_property("project")
def project(self) -> Optional[str]:
return self._property_project
@project.setter
def project(self, value: Optional[str]) -> None:
if value is None:
self._property_project = None
return
self.assert_isinstance(value, "project", six.string_types)
self._property_project = value
@schema_property("parent")
def parent(self) -> Optional[str]:
return self._property_parent
@parent.setter
def parent(self, value: Optional[str]) -> None:
if value is None:
self._property_parent = None
return
self.assert_isinstance(value, "parent", six.string_types)
self._property_parent = value
@schema_property("task")
def task(self) -> Optional[str]:
return self._property_task
@task.setter
def task(self, value: Optional[str]) -> None:
if value is None:
self._property_task = None
return
self.assert_isinstance(value, "task", six.string_types)
self._property_task = value
@schema_property("iteration")
def iteration(self) -> Optional[int]:
return self._property_iteration
@iteration.setter
def iteration(self, value: Optional[int]) -> None:
if value is None:
self._property_iteration = None
return
if isinstance(value, float) and value.is_integer():
value = int(value)
self.assert_isinstance(value, "iteration", six.integer_types)
self._property_iteration = value
@schema_property("metadata")
def metadata(self) -> Optional[List[Any]]:
return self._property_metadata
@metadata.setter
def metadata(self, value: Optional[List[Any]]) -> None:
if value is None:
self._property_metadata = None
return
self.assert_isinstance(value, "metadata", (list, tuple))
if any((isinstance(v, dict) for v in value)):
value = [MetadataItem.from_dict(v) if isinstance(v, dict) else v for v in value]
else:
self.assert_isinstance(value, "metadata", MetadataItem, is_array=True)
self._property_metadata = value
| EditRequest |
python | google__pytype | pytype/overlays/special_builtins.py | {
"start": 5073,
"end": 5505
} | class ____(BuiltinFunction):
"""Implements round."""
_NAME = "round"
def call(self, node, func, args, alias_map=None):
self.match_args(node, args)
node, fn = self.get_underlying_method(node, args.posargs[0], "__round__")
if fn is None:
return super().call(node, func, args, alias_map)
new_args = args.replace(posargs=args.posargs[1:])
return function.call_function(self.ctx, node, fn, new_args)
| Round |
python | mlflow__mlflow | mlflow/utils/autologging_utils/__init__.py | {
"start": 5594,
"end": 9014
} | class ____:
"""
Stores info about the input example collection before it is needed.
For example, in xgboost and lightgbm, an InputExampleInfo object is attached to the dataset,
where its value is read later by the train method.
Exactly one of input_example or error_msg should be populated.
"""
def __init__(self, input_example=None, error_msg=None):
self.input_example = input_example
self.error_msg = error_msg
def resolve_input_example_and_signature(
get_input_example, infer_model_signature, log_input_example, log_model_signature, logger
):
"""Handles the logic of calling functions to gather the input example and infer the model
signature.
Args:
get_input_example: Function which returns an input example, usually sliced from a
dataset. This function can raise an exception, its message will be
shown to the user in a warning in the logs.
infer_model_signature: Function which takes an input example and returns the signature
of the inputs and outputs of the model. This function can raise
an exception, its message will be shown to the user in a warning
in the logs.
log_input_example: Whether to log errors while collecting the input example, and if it
succeeds, whether to return the input example to the user. We collect
it even if this parameter is False because it is needed for inferring
the model signature.
log_model_signature: Whether to infer and return the model signature.
logger: The logger instance used to log warnings to the user during input example
collection and model signature inference.
Returns:
A tuple of input_example and signature. Either or both could be None based on the
values of log_input_example and log_model_signature.
"""
input_example = None
input_example_user_msg = None
input_example_failure_msg = None
if log_input_example or log_model_signature:
try:
input_example = get_input_example()
except Exception as e:
input_example_failure_msg = str(e)
input_example_user_msg = "Failed to gather input example: " + str(e)
model_signature = None
model_signature_user_msg = None
if log_model_signature:
try:
if input_example is None:
raise Exception(
"could not sample data to infer model signature: " + input_example_failure_msg
)
model_signature = infer_model_signature(input_example)
except Exception as e:
model_signature_user_msg = "Failed to infer model signature: " + str(e)
# disable input_example signature inference in model logging if `log_model_signature`
# is set to `False` or signature inference in autologging fails
if (
model_signature is None
and input_example is not None
and (not log_model_signature or model_signature_user_msg is not None)
):
model_signature = False
if log_input_example and input_example_user_msg is not None:
logger.warning(input_example_user_msg)
if log_model_signature and model_signature_user_msg is not None:
logger.warning(model_signature_user_msg)
return input_example if log_input_example else None, model_signature
| InputExampleInfo |
python | sphinx-doc__sphinx | sphinx/config.py | {
"start": 1050,
"end": 2037
} | class ____(NamedTuple):
name: str
value: Any
rebuild: _ConfigRebuild
def is_serializable(obj: object, *, _seen: frozenset[int] = frozenset()) -> bool:
"""Check if an object is serializable or not."""
if isinstance(obj, UNSERIALIZABLE_TYPES):
return False
# use id() to handle un-hashable objects
if id(obj) in _seen:
return True
if isinstance(obj, dict):
seen = _seen | {id(obj)}
return all(
is_serializable(key, _seen=seen) and is_serializable(value, _seen=seen)
for key, value in obj.items()
)
elif isinstance(obj, (list, tuple, set, frozenset)):
seen = _seen | {id(obj)}
return all(is_serializable(item, _seen=seen) for item in obj)
# if an issue occurs for a non-serializable type, pickle will complain
# since the object is likely coming from a third-party extension
# (we natively expect 'simple' types and not weird ones)
return True
| ConfigValue |
python | huggingface__transformers | src/transformers/models/glpn/modeling_glpn.py | {
"start": 21215,
"end": 21885
} | class ____(nn.Module):
r"""
Implements the Scale-invariant log scale loss [Eigen et al., 2014](https://huggingface.co/papers/1406.2283).
$$L=\frac{1}{n} \sum_{i} d_{i}^{2}-\frac{1}{2 n^{2}}\left(\sum_{i} d_{i}^{2}\right)$$ where $d_{i}=\log y_{i}-\log
y_{i}^{*}$.
"""
def __init__(self, lambd=0.5):
super().__init__()
self.lambd = lambd
def forward(self, pred, target):
valid_mask = (target > 0).detach()
diff_log = torch.log(target[valid_mask]) - torch.log(pred[valid_mask])
loss = torch.sqrt(torch.pow(diff_log, 2).mean() - self.lambd * torch.pow(diff_log.mean(), 2))
return loss
| SiLogLoss |
python | dagster-io__dagster | python_modules/libraries/dagster-dg-cli/dagster_dg_cli/api_layer/schemas/secret.py | {
"start": 1536,
"end": 1654
} | class ____(BaseModel):
"""GET /api/secrets response."""
items: list[DgApiSecret]
total: int
| DgApiSecretList |
python | neetcode-gh__leetcode | python/0022-generate-parentheses.py | {
"start": 0,
"end": 571
} | class ____:
def generateParenthesis(self, n: int) -> List[str]:
stack = []
res = []
def backtrack(openN, closedN):
if openN == closedN == n:
res.append("".join(stack))
return
if openN < n:
stack.append("(")
backtrack(openN + 1, closedN)
stack.pop()
if closedN < openN:
stack.append(")")
backtrack(openN, closedN + 1)
stack.pop()
backtrack(0, 0)
return res
| Solution |
python | neetcode-gh__leetcode | python/0494-target-sum.py | {
"start": 0,
"end": 516
} | class ____:
def findTargetSumWays(self, nums: List[int], target: int) -> int:
dp = {} # (index, total) -> # of ways
def backtrack(i, total):
if i == len(nums):
return 1 if total == target else 0
if (i, total) in dp:
return dp[(i, total)]
dp[(i, total)] = backtrack(i + 1, total + nums[i]) + backtrack(
i + 1, total - nums[i]
)
return dp[(i, total)]
return backtrack(0, 0)
| Solution |
python | charliermarsh__ruff | crates/ruff_linter/resources/test/fixtures/flake8_pyi/PYI063.py | {
"start": 2388,
"end": 2535
} | class ____(type):
@classmethod
def __new__(metacls, name: str, bases: tuple[type, ...], namespace: dict, /, **kwds) -> Self: ...
| GoodMetaclass2 |
python | kamyu104__LeetCode-Solutions | Python/maximum-number-of-integers-to-choose-from-a-range-i.py | {
"start": 36,
"end": 794
} | class ____(object):
def maxCount(self, banned, n, maxSum):
"""
:type banned: List[int]
:type n: int
:type maxSum: int
:rtype: int
"""
k = min(int((-1+(1+8*maxSum))**0.5/2), n) # k = argmax((k+1)*k//2 <= maxSum)
total = (k+1)*k//2
result = k
lookup = set(banned)
for x in lookup:
if x <= k:
total -= x
result -= 1
for i in xrange(k+1, n+1):
if i in lookup:
continue
if total+i > maxSum:
break
total += i
result += 1
return result
# Time: O(blogb + logn * logb)
# Space: O(b)
import bisect
# binary search, prefix sum
| Solution |
python | huggingface__transformers | src/transformers/utils/dummy_essentia_and_librosa_and_pretty_midi_and_scipy_and_torch_objects.py | {
"start": 129,
"end": 392
} | class ____(metaclass=DummyObject):
_backends = ["essentia", "librosa", "pretty_midi", "scipy", "torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["essentia", "librosa", "pretty_midi", "scipy", "torch"])
| Pop2PianoFeatureExtractor |
python | pypa__pip | src/pip/_internal/index/sources.py | {
"start": 6039,
"end": 8639
} | class ____(LinkSource):
"""``--[extra-]index-url=<path-to-directory>``.
This is treated like a remote URL; ``candidates_from_page`` contains logic
for this by appending ``index.html`` to the link.
"""
def __init__(
self,
candidates_from_page: CandidatesFromPage,
link: Link,
) -> None:
self._candidates_from_page = candidates_from_page
self._link = link
@property
def link(self) -> Link | None:
return self._link
def page_candidates(self) -> FoundCandidates:
yield from self._candidates_from_page(self._link)
def file_links(self) -> FoundLinks:
return ()
def build_source(
location: str,
*,
candidates_from_page: CandidatesFromPage,
page_validator: PageValidator,
expand_dir: bool,
cache_link_parsing: bool,
project_name: str,
) -> tuple[str | None, LinkSource | None]:
path: str | None = None
url: str | None = None
if os.path.exists(location): # Is a local path.
url = path_to_url(location)
path = location
elif location.startswith("file:"): # A file: URL.
url = location
path = url_to_path(location)
elif is_url(location):
url = location
if url is None:
msg = (
"Location '%s' is ignored: "
"it is either a non-existing path or lacks a specific scheme."
)
logger.warning(msg, location)
return (None, None)
if path is None:
source: LinkSource = _RemoteFileSource(
candidates_from_page=candidates_from_page,
page_validator=page_validator,
link=Link(url, cache_link_parsing=cache_link_parsing),
)
return (url, source)
if os.path.isdir(path):
if expand_dir:
source = _FlatDirectorySource(
candidates_from_page=candidates_from_page,
path=path,
project_name=project_name,
)
else:
source = _IndexDirectorySource(
candidates_from_page=candidates_from_page,
link=Link(url, cache_link_parsing=cache_link_parsing),
)
return (url, source)
elif os.path.isfile(path):
source = _LocalFileSource(
candidates_from_page=candidates_from_page,
link=Link(url, cache_link_parsing=cache_link_parsing),
)
return (url, source)
logger.warning(
"Location '%s' is ignored: it is neither a file nor a directory.",
location,
)
return (url, None)
| _IndexDirectorySource |
python | tensorflow__tensorflow | tensorflow/python/eager/polymorphic_function/concrete_function.py | {
"start": 45210,
"end": 75037
} | class ____(core.ConcreteFunction, trackable.Trackable):
"""A `tf.types.experimental.ConcreteFunction` created from `tf.function`."""
def __init__(
self, atomic_fn: atomic_function.AtomicFunction, shared_func_graph=True
):
"""Initialize a `ConcreteFunction`.
Args:
atomic_fn: Inference atomic function to form basis of forward pass.
shared_func_graph: If False, the ConcreteFunction takes ownership of
`func_graph` and will break reference cycles when it is deleted. This
makes the FuncGraph inoperable.
Raises:
ValueError: If number of input_placeholders is not equal to the number
of function inputs.
"""
# _arg_keywords and _num_positional_args define the flat signature. They
# are assigned after construction.
self._arg_keywords = None
self._num_positional_args = None
self._func_graph = atomic_fn.graph
self._captured_inputs = (
self._func_graph.external_captures
+ self._func_graph.deferred_external_captures
)
self._function_type = atomic_fn.function_type
self._output_shapes = tuple(
output.shape for output in self._func_graph.outputs)
self._attrs = attributes_lib.parse_func_attrs(
atomic_fn.attributes or {}
)
if shared_func_graph:
self._garbage_collector = None
else:
self._garbage_collector = ConcreteFunctionGarbageCollector(
atomic_fn.graph
)
# Pairs of forward and backward functions used for computing gradients.
#
# These each get a reference to the FuncGraph deleter since they use the
# FuncGraph directly.
self._delayed_rewrite_functions = _DelayedRewriteGradientFunctions(
atomic_fn, self._garbage_collector)
self._first_order_tape_functions = {}
self._higher_order_tape_functions = {}
# Cache the inference function to avoid a (Python) function call when not
# building gradients.
self._inference_function = self._delayed_rewrite_functions.forward()
@classmethod
def from_func_graph(cls, graph, function_type, attrs, shared_func_graph=True):
atomic_fn = atomic_function.from_func_graph(
_inference_name(graph.name), graph, attrs, function_type
)
return ConcreteFunction(atomic_fn, shared_func_graph=shared_func_graph)
@property
def function_type(self):
"""Return the FunctionType associated with this ConcreteFunction."""
return self._function_type
@property
def inference_fn(self):
"""Return the inference function associated with this ConcreteFunction."""
return self._inference_function
# TODO(fmuham): Remove this property.
@property
def _function_spec(self):
if self.function_type is None:
return None
return function_type_utils.FunctionSpec(
self.function_type,
{
p.default
for p in self.function_type.parameters.values()
if p.optional
},
False,
name=self.name,
)
@property
def variables(self):
"""Sequence of variables for this function."""
return tuple(self._func_graph.variables)
def set_variables(self, variables):
self._func_graph.variables = variables
@property
def trainable_variables(self):
"""Sequence of trainable variables for this function."""
return tuple(self._func_graph.trainable_variables)
def __call__(self, *args, **kwargs):
"""Executes the wrapped function.
ConcreteFunctions have two signatures:
* The signature of the original function wrapped by this ConcreteFunction.
* A flat signature, where each argument accepts a single Tensor.
The original function signature is generally preferred, but the flat input
signature is supported for backward compatibility.
### Original Function Signature
When calling a ConcreteFunction with the signature of the original function,
each argument must match the type or value that was used when the
ConcreteFunction's graph was traced. In particular:
* Tensor arguments (including CompositeTensors, such as RaggedTensor) must
have matching `TypeSpec`s.
* Non-Tensor arguments (such as booleans or ints) must have equal values.
* Nested arguments (such as lists, tuples, or dictionaries) must have the
same nesting structure; and each nested value must have a matching type
or value.
The default value for any arguments that were traced with non-Tensor values
is the value that was used in the trace. Arguments that were traced with
tensor arguments do not have a default value (even if the original function
had a default value for that argument).
### Flat Signature
When calling a ConcreteFunction with the flat signature, the arguments
correspond to the flattened component tensors of the arguments that were
used to construct the ConcreteFunction. Parameter names are assigned based
on `TensorSpec.name` (when specified) or the original argument names (with
suffixes automatically added for nested arguments or composite tensors with
multiple components).
Args:
*args: Positional arguments to the concrete function.
**kwargs: Keyword arguments to the concrete function.
Returns:
The result of applying the TF function on the given Tensors.
Raises:
AssertionError: If this `ConcreteFunction` was not created through
`get_concrete_function`.
TypeError: If the arguments do not match the function's signature.
"""
return self._call_impl(args, kwargs)
def _call_impl(self, args, kwargs):
"""See `__call__` for details."""
with trace.Trace(self._func_graph.name, tf_function_call="concrete"):
# Construct the list of input tensors: check if the structured signature
# applies first; and if not, then use the flat signature.
if self.function_type is not None:
try:
return self._call_with_structured_signature(args, kwargs)
except TypeError as structured_err:
try:
return self._call_with_flat_signature(args, kwargs)
except (TypeError, ValueError) as flat_err:
raise TypeError( # pylint: disable=raise-missing-from
str(structured_err)
+ "\nFallback to flat signature also failed due to: "
+ str(flat_err)
)
return self._call_with_flat_signature(args, kwargs)
def _call_with_flat_signature(self, args, kwargs):
"""Executes the wrapped function with the flat signature.
Args:
args: Positional arguments to the concrete function.
kwargs: Keyword arguments to the concrete function.
Returns:
The result of applying the function on the Tensors/Variables contained in
`args` and `kwargs`.
Raises:
TypeError: if `args` and `kwargs` do not match the flat signature of this
`ConcreteFunction`.
"""
if len(args) > self._num_positional_args:
raise TypeError(
f"{self._flat_signature_summary()} takes {self._num_positional_args} "
f"positional arguments, got {len(args)}.")
args = list(args)
kwargs = dict(kwargs)
kwargs = {
function_type_lib.sanitize_arg_name(k): v for k, v in kwargs.items()
}
for keyword in self._arg_keywords[len(args):]:
try:
args.append(
kwargs.pop(
function_type_lib.sanitize_arg_name(compat.as_str(keyword))))
except KeyError:
specified_keywords = (
list(self._arg_keywords[:len(args)]) + list(kwargs.keys()))
missing_required_args = sorted(
set(self._arg_keywords) - set(specified_keywords))
raise TypeError(f"{self._flat_signature_summary()} missing required "
f"arguments: {', '.join(missing_required_args)}.")
if kwargs:
positional_arg_keywords = set(self._arg_keywords[:len(args)])
for unused_key in kwargs:
if unused_key in positional_arg_keywords:
raise TypeError(f"{self._flat_signature_summary()} got two values "
f"for '{unused_key}'.")
raise TypeError(f"{self._flat_signature_summary()} got unexpected "
f"keyword arguments: {', '.join(sorted(kwargs))}.")
for i, arg in enumerate(args):
if not isinstance(
arg, (tensor_lib.Tensor, resource_variable_ops.BaseResourceVariable)):
raise TypeError(f"{self._flat_signature_summary()}: expected argument "
f"#{i}(zero-based) to be a Tensor; "
f"got {type(arg).__name__} ({arg}).")
return self._call_flat(args, self.captured_inputs)
def _call_with_structured_signature(self, args, kwargs):
"""Executes the wrapped function with the structured signature.
Args:
args: Positional arguments to the concrete function.
kwargs: Keyword arguments to the concrete function.
Returns:
The result of applying the function on the Tensors/Variables contained in
`args` and `kwargs`.
Raises:
TypeError: if `args` and `kwargs` do not match the structured signature
of this `ConcreteFunction`.
"""
bound_args = (
function_type_utils.canonicalize_function_inputs(
args, kwargs, self.function_type)
)
filtered_flat_args = self.function_type.unpack_inputs(bound_args)
return self._call_flat(
filtered_flat_args,
captured_inputs=self.captured_inputs)
def _call_flat(self, tensor_inputs, captured_inputs):
"""Executes the wrapped function.
Args:
tensor_inputs: a list of only Tensors generated from args, kwargs.
captured_inputs: the captured inputs that are also part of the input args
to the actual execution. By default, it should be self._captured_inputs.
Returns:
The result of applying the TF function to `args`.
Raises:
ValueError: If `args` contains anything other than Tensors or Variables.
"""
ctx = context.context()
executing_eagerly = ctx.executing_eagerly()
# Copy saveable status of function's graph to current FuncGraph.
default_graph = ops.get_default_graph()
if default_graph.building_function and not self._func_graph.saveable:
default_graph.mark_as_unsaveable(self._func_graph.saving_errors)
if (record.could_possibly_record() or
hasattr(default_graph, "watch_variable")):
for v in self._func_graph.variables:
resource_variable_ops.variable_accessed(v)
# TODO(fmuham): check in eager mode too.
if not executing_eagerly:
for i, tensor_input in enumerate(tensor_inputs):
# Can not compare shapes in these cases
# TODO(b/216506654): Consider moving this check elsewhere and making it
# work for all types (e.g. by including shape for Variables).
if (tensor_input.dtype == dtypes.resource or
tensor_input.dtype == dtypes.variant):
continue
# If we're graph building, shape inference is on. We check for input
# compatibility up front to avoid hard to debug incompatibilities
# later.
graph_input_shape = tensor_shape.TensorShape(
self._func_graph.inputs[i].shape)
if not graph_input_shape.is_compatible_with(tensor_input.shape):
raise ValueError(
f"Tensor {tensor_input} is not compatible with the shape this "
f"function was traced with. Expected shape "
f"{self._func_graph.inputs[i].shape}, but got shape "
f"{tensor_input.shape}.\n\nIf you called get_concrete_function, "
f"you may need to pass a tf.TensorSpec(..., shape=...) with a "
f"less specific shape, having None on axes which can vary.")
args = tensor_inputs + captured_inputs
possible_gradient_type = gradients_util.PossibleTapeGradientTypes(args)
if (possible_gradient_type == gradients_util.POSSIBLE_GRADIENT_TYPES_NONE
and executing_eagerly):
# No tape is watching; skip to running the function.
return self._inference_function.call_preflattened(args)
forward_backward = self._select_forward_and_backward_functions(
args,
possible_gradient_type,
executing_eagerly)
forward_function, args_with_tangents = forward_backward.forward()
if executing_eagerly:
flat_outputs = forward_function.call_flat(*args_with_tangents)
else:
with default_graph._override_gradient_function( # pylint: disable=protected-access
{"PartitionedCall": self._get_gradient_function(),
"StatefulPartitionedCall": self._get_gradient_function()}):
flat_outputs = forward_function.call_flat(*args_with_tangents)
forward_backward.record(flat_outputs)
return self.function_type.pack_output(flat_outputs)
@property
def name(self):
"""`ConcreteFunction` name."""
return self._delayed_rewrite_functions.forward().name
@property
def graph(self):
"""Returns the graph from which this function was constructed."""
return self._func_graph
@property
def inputs(self):
"""Returns tensors in `self.graph` corresponding to arguments."""
return self._func_graph.inputs
@property
def structured_input_signature(self):
"""Returns structured signature for this concrete function.
Returns:
A tuple `(args, kwargs)`, where:
* `args` is a tuple that specifies the expected type or value each for
positional argument.
* `kwargs` is a dictionary that specifies the expected type or value
for each keyword-only argument.
The type or value for each argument is specified using one of the
following:
* A `tf.TypeSpec`, indicating that a Tensor or other TensorFlow-native
value is expected.
* A Python value, such as an integer, indicating that an equal value
is expected.
* A nested structure of `tf.TypeSpec`s and Python values, indicating
that a corresponding nested structure is expected.
"""
return self._func_graph.structured_input_signature
@property
def outputs(self):
"""Returns tensors in `self.graph` corresponding to returned tensors."""
return self._func_graph.outputs
@property
def structured_outputs(self):
"""Returns outputs in `self.graph` as returned by the original function."""
return self._func_graph.structured_outputs
def set_external_captures(self, captures):
"""Updates the function capture values.
The new values must have tensor types and shapes consistent with the
original captures of the concrete function, but it is allowed to change a
value captured with a deferred one and vice-versa.
Args:
captures: A list of tensors or closures. Tensors are value captures, and
closures are call-time (deferred captures).
"""
# TODO(wxinyi): 1. verify that the new captures' type spec is compatible
# with the original's. However, doing so requires MirroredVariable captures
# initialized. 2. replace the original/new captures/deferred
# captures in the wrapped graph. Doing such for a capture-to-deferred
# capture replacement requires more arguments than the deferred capture
# itself, e.g. default value, spec.
self._captured_inputs = captures
def replace_capture_with_deferred_capture(self,
tensor,
closure,
spec,
placeholder=None,
default_value=None):
"""Replaces existing capture `tensor` with a deferred capture `closure`.
This API replaces the capture `tensor` from the concrete function's captured
inputs list, and places the deferred capture `closure` in
its spot so the order of captured inputs is preserved. This is important
because the old `tensor` and the new `closure` will have the same internal
placeholder, which can be passed through the `placeholder` argument, or
skipped, in which case we find the placeholder from internal inputs by
indexing `tensor` in the external captured inputs list. Thus, it is
important that the new deferred capture has output spec (specified by the
`spec` argument) compatible with the internal placeholder (`placeholder`)
and the original capture (`tensor`).
For example,
```python
bool_captured_tensor = tf.constant(True)
float_captured_tensor = tf.constant([3.], dtype=tf.float32)
value = tf.constant([2.], dtype=tf.float32)
@tf.function
def fn():
deferred_tensor = ops.get_default_graph().capture_call_time_value(
lambda: value,
tf.TensorSpec(shape=(1,), dtype=tf.float32))
if bool_captured_tensor:
return deferred_tensor
else:
return deferred_tensor + float_captured_tensor
concrete_fn = fn.get_concrete_function()
print(concrete_fn()) # tf.Tensor([2.], shape=(1,), dtype=float32)
new_bool_captured_tensor = constant_op.constant(False)
def bool_closure():
return new_bool_captured_tensor
concrete_fn.replace_capture_with_deferred_capture(
bool_captured_tensor,
bool_closure,
spec=tensor_lib.TensorSpec(shape=(), dtype=dtypes.bool))
print(concrete_fn()) # tf.Tensor([5.], shape=(1,), dtype=float32)
```
Args:
tensor: Tensor already captured. This `tensor` should be listed in
concrete_function.captured_inputs except when it's empty such as when
the concrete function is restored from SavedModel.
closure: function which takes no arguments, to be evaluated at function
call time, returning a nest of tensors compatible with `spec`.
spec: nest of TypeSpec for the value to capture.
placeholder: optional. The internal placeholder corresponding to the
captured `tensor` and the new `closure`.
default_value: optional value to use in environments that cannot safely
evaluate closure.
"""
capture_index = None
for i, capture in enumerate(self._captured_inputs):
if id(tensor) == id(capture):
capture_index = i
break
if placeholder is None:
if capture_index is None:
raise ValueError(
f"Did not find `tensor` argument {tensor} in the ConcreteFunction's"
" captured inputs list, and did not receive a placeholder argument."
" Thus we're unable to infer the internal placeholder. ")
placeholder = self.inputs[-len(self._captured_inputs) + capture_index]
if not (spec.is_compatible_with(tensor) or
spec.is_compatible_with(placeholder)):
raise ValueError(
f"Attempting to substitute closure with spec {spec} that's "
f"incompatible with the original capture {tensor} or the internal "
f"placeholder {placeholder}.")
self._func_graph.replace_capture_with_deferred_capture(
tensor=tensor,
closure=closure,
spec=spec,
placeholder=placeholder,
default_value=default_value)
if capture_index is not None:
self._captured_inputs[capture_index] = closure
@property
def captured_inputs(self):
"""Returns external Tensors captured by this function.
self.__call__(*args) passes `args + self.captured_inputs` to the function.
"""
return nest.flatten(
[x() if callable(x) else x for x in self._captured_inputs],
expand_composites=True)
@property
def function_def(self):
"""Returns a `FunctionDef` object representing this function."""
return self._delayed_rewrite_functions.forward().cached_definition
@property
def output_shapes(self):
"""The function's output shapes."""
return nest.map_structure(
lambda x: getattr(x, "shape", tensor_shape.TensorShape(None)),
composite_tensor.replace_composites_with_components(
self._func_graph.structured_outputs),
expand_composites=False)
@property
def output_dtypes(self):
# TODO(akshayka): Consider removing this.
return nest.map_structure(
lambda x: x.dtype if x is not None else None,
composite_tensor.replace_composites_with_components(
self._func_graph.structured_outputs),
expand_composites=False)
def add_to_graph(self, g=None, overwrite=False):
"""Registers the function, adds it to the graph g or default graph.
Args:
g: If specified, registers the function with this graph. Defaults to the
current context (either the default graph or the eager context).
overwrite: A bool. If True, its forward function will overwrite
any existing function of the same signature name in the graph `g`.
"""
# If we are not executing eagerly, adds the function to default graph if no
# graph is specified.
# In case of eager execution, function definition gets added to context
# during construction itself.
if not context.executing_eagerly() and not g:
g = ops.get_default_graph()
if g is not None:
g._add_function_recursive(self._delayed_rewrite_functions.forward()) # pylint: disable=protected-access
def add_gradient_functions_to_graph(self, g=None):
"""Add forward/backward functions to graph `g` or the current context."""
if not context.executing_eagerly() and not g:
g = ops.get_default_graph()
g._add_function_recursive(self._delayed_rewrite_functions.forward()) # pylint: disable=protected-access
forward_function, backward_function = (
self._delayed_rewrite_functions.forward_backward())
g._add_function_recursive(forward_function) # pylint: disable=protected-access
backward_function.add_to_graph(g)
def _get_gradient_function(self):
"""Returns gradient function. It will be lazily created at first call."""
return self._delayed_rewrite_functions._rewrite_forward_and_call_backward # pylint: disable=protected-access
def _select_forward_and_backward_functions(
self, args, possible_gradient_type, executing_eagerly):
"""Selects forward and backward functions based on the calling context.
The forward function computes the "real" function outputs, `self._outputs`,
and any extra values needed by the corresponding backward function.
Args:
args: A flat list of Tensors with all of the inputs to the forward
function (including user-specified and captured inputs).
possible_gradient_type: One of gradients_util.POSSIBLE_GRADIENT_TYPES_*.
executing_eagerly: Boolean, the value of context.executing_eagerly().
Returns:
An object with a `forward` method returning a tuple of (forward_function :
AtomicFunction, augmented_arguments : List), and a corresponding
`record` method which takes outputs from the forward function and records
the operation. forward_function should be called with augmented_arguments.
"""
if executing_eagerly:
input_tangents = forwardprop_util.pack_tangents(args)
else:
input_tangents = forwardprop_util.TangentInfo()
need_gradients_for_jvps = record.should_record_backprop(
input_tangents.tangents)
# Allows re-use of forward and backward function pairs depending on the
# tapes and forward accumulators watching its inputs.
cache_key = (need_gradients_for_jvps, input_tangents.indices)
if (possible_gradient_type
== gradients_util.POSSIBLE_GRADIENT_TYPES_FIRST_ORDER):
if input_tangents.indices or executing_eagerly:
# There is a single non-persistent tape active, so the user can only
# request first-order gradients from a tape. We can spend less time
# graph building since we know this.
#
# We may still end up computing higher-order gradients, but that'd be
# through `tf.gradients`, which can re-write the forward pass and so
# needs no preparation here.
functions = self._first_order_tape_functions.get(cache_key, None)
if functions is None:
functions = _FirstOrderTapeGradientFunctions(
self._func_graph, self._attrs, self._garbage_collector,
forwardprop_input_indices=input_tangents.indices,
delayed_rewrite_functions=self._delayed_rewrite_functions,
need_gradients_for_jvps=need_gradients_for_jvps)
self._first_order_tape_functions[cache_key] = functions
return _ForwardBackwardCall(
functions, args, input_tangents.tangents, tape_watching=True)
else:
# We can avoid computing second-order gradients in some cases by doing a
# delayed rewrite when graph building. Since we know we'll only compute
# first-order tape gradients, the delayed rewrite is safe: we won't need
# to tell the tape about side outputs.
#
# TODO(allenl): This case is really dirty. It would be better if we
# could temporarily pop all of the current tapes to avoid
# accidentally taking second-order gradients.
return _ForwardBackwardCall(
self._delayed_rewrite_functions, args, input_tangents.tangents,
tape_watching=True)
elif (possible_gradient_type
== gradients_util.POSSIBLE_GRADIENT_TYPES_HIGHER_ORDER):
# Either there's a persistent tape watching, or there are multiple nested
# tapes. Either way, the user may request higher-order gradients. We'll
# spend a bit more time and make sure higher-order gradients are correct.
functions = self._higher_order_tape_functions.get(
cache_key, None)
if functions is None:
functions = _HigherOrderTapeGradientFunctions(
self._func_graph, self._attrs, self._garbage_collector,
forwardprop_input_indices=input_tangents.indices,
delayed_rewrite_functions=self._delayed_rewrite_functions,
need_gradients_for_jvps=need_gradients_for_jvps)
self._higher_order_tape_functions[cache_key] = functions
return _ForwardBackwardCall(functions, args, input_tangents.tangents,
tape_watching=True)
# else possible_gradient_type == POSSIBLE_GRADIENT_TYPES_NONE, meaning no
# tape is recording.
return _ForwardBackwardCall(
self._delayed_rewrite_functions, args, input_tangents.tangents,
tape_watching=False)
@property
def _as_name_attr_list(self):
"""Returns a `NameAttrList` representing this function."""
ret = attr_value_pb2.NameAttrList(name=self.name)
for name, value in self._attrs.items():
ret.attr[name].CopyFrom(value)
return ret
def _flat_signature_summary(self):
"""Returns a string summarizing this function's flat signature."""
assert self._arg_keywords is not None
assert self._num_positional_args is not None
arg_names = self._arg_keywords
if self._num_positional_args > len(arg_names):
arg_names.extend(
"<arg{}>".format(i + 1)
for i in range(len(arg_names), self._num_positional_args))
return f"{self._func_graph.name}({', '.join(arg_names)})"
def pretty_printed_signature(self, verbose=True):
"""Returns a string summarizing the signature of this concrete function."""
assert self.function_type is not None
if verbose:
return repr(self.function_type)
else:
return str(self.function_type)
def __repr__(self):
if self.function_type is not None:
return "<ConcreteFunction {} at 0x{:X}>".format(
self.pretty_printed_signature(verbose=False), id(self)
)
elif not (self._num_positional_args is None or self._arg_keywords is None):
return "<ConcreteFunction {} at 0x{:X}>".format(
self._flat_signature_summary(), id(self)
)
else:
return object.__repr__(self)
def __str__(self):
if self.function_type is not None:
return "ConcreteFunction {}".format(
self.pretty_printed_signature(verbose=True)
)
else:
return self.__repr__()
def _trackable_children(self, save_type="checkpoint", **kwargs):
"""Implements `Trackable`."""
if save_type == "checkpoint":
# Checkpoint dependencies do not include functions at all. Users
# expect the checkpointed variables to be saved using the model
# architecture, e.g. `model.layers[1].kernel` or `model.variables`.
return {}
captured_trackables = {}
for n, (capture, _) in enumerate(self.graph.captures):
if (capture.dtype not in (dtypes.variant, dtypes.resource) and
not resource_variable_ops.is_resource_variable(capture)):
# Variant/resource type tensors are skipped since we have no way of
# getting the `Trackable` wrapper for these tensors. The wrappers are
# expected to be elsewhere in the saved object graph.
# TODO(b/223866972): Directly encode/decode tensor captures.
# Resource variable captures are also skipped at this time, to maintain
# existing behavior.
# TODO(b/217979389): Return the non-constant captures as children.
captured_trackables[f"capture_{n}"] = capture
return captured_trackables
def _deserialization_dependencies(self, children):
return children
def _export_to_saved_model_graph(self, object_map, tensor_map,
**unused_kwargs):
if not self.graph.saveable:
raise ValueError(
(f"Unable to save function {self.name} for the following reason(s):\n"
+ "\n".join(self.graph.saving_errors)))
self.add_to_graph()
object_map[self] = saved_model_exported_concrete.ExportedConcreteFunction(
self, tensor_map)
return []
| ConcreteFunction |
python | sympy__sympy | sympy/core/operations.py | {
"start": 20777,
"end": 26087
} | class ____:
"""
Handler dispatcher for associative operators
.. notes::
This approach is experimental, and can be replaced or deleted in the future.
See https://github.com/sympy/sympy/pull/19463.
Explanation
===========
If arguments of different types are passed, the classes which handle the operation for each type
are collected. Then, a class which performs the operation is selected by recursive binary dispatching.
Dispatching relation can be registered by ``register_handlerclass`` method.
Priority registration is unordered. You cannot make ``A*B`` and ``B*A`` refer to
different handler classes. All logic dealing with the order of arguments must be implemented
in the handler class.
Examples
========
>>> from sympy import Add, Expr, Symbol
>>> from sympy.core.add import add
>>> class NewExpr(Expr):
... @property
... def _add_handler(self):
... return NewAdd
>>> class NewAdd(NewExpr, Add):
... pass
>>> add.register_handlerclass((Add, NewAdd), NewAdd)
>>> a, b = Symbol('a'), NewExpr()
>>> add(a, b) == NewAdd(a, b)
True
"""
def __init__(self, name, doc=None):
self.name = name
self.doc = doc
self.handlerattr = "_%s_handler" % name
self._handlergetter = attrgetter(self.handlerattr)
self._dispatcher = Dispatcher(name)
def __repr__(self):
return "<dispatched %s>" % self.name
def register_handlerclass(self, classes, typ, on_ambiguity=ambiguity_register_error_ignore_dup):
"""
Register the handler class for two classes, in both straight and reversed order.
Paramteters
===========
classes : tuple of two types
Classes who are compared with each other.
typ:
Class which is registered to represent *cls1* and *cls2*.
Handler method of *self* must be implemented in this class.
"""
if not len(classes) == 2:
raise RuntimeError(
"Only binary dispatch is supported, but got %s types: <%s>." % (
len(classes), str_signature(classes)
))
if len(set(classes)) == 1:
raise RuntimeError(
"Duplicate types <%s> cannot be dispatched." % str_signature(classes)
)
self._dispatcher.add(tuple(classes), typ, on_ambiguity=on_ambiguity)
self._dispatcher.add(tuple(reversed(classes)), typ, on_ambiguity=on_ambiguity)
@cacheit
def __call__(self, *args, _sympify=True, **kwargs):
"""
Parameters
==========
*args :
Arguments which are operated
"""
if _sympify:
args = tuple(map(_sympify_, args))
handlers = frozenset(map(self._handlergetter, args))
# no need to sympify again
return self.dispatch(handlers)(*args, _sympify=False, **kwargs)
@cacheit
def dispatch(self, handlers):
"""
Select the handler class, and return its handler method.
"""
# Quick exit for the case where all handlers are same
if len(handlers) == 1:
h, = handlers
if not isinstance(h, type):
raise RuntimeError("Handler {!r} is not a type.".format(h))
return h
# Recursively select with registered binary priority
for i, typ in enumerate(handlers):
if not isinstance(typ, type):
raise RuntimeError("Handler {!r} is not a type.".format(typ))
if i == 0:
handler = typ
else:
prev_handler = handler
handler = self._dispatcher.dispatch(prev_handler, typ)
if not isinstance(handler, type):
raise RuntimeError(
"Dispatcher for {!r} and {!r} must return a type, but got {!r}".format(
prev_handler, typ, handler
))
# return handler class
return handler
@property
def __doc__(self):
docs = [
"Multiply dispatched associative operator: %s" % self.name,
"Note that support for this is experimental, see the docs for :class:`AssocOpDispatcher` for details"
]
if self.doc:
docs.append(self.doc)
s = "Registered handler classes\n"
s += '=' * len(s)
docs.append(s)
amb_sigs = []
typ_sigs = defaultdict(list)
for sigs in self._dispatcher.ordering[::-1]:
key = self._dispatcher.funcs[sigs]
typ_sigs[key].append(sigs)
for typ, sigs in typ_sigs.items():
sigs_str = ', '.join('<%s>' % str_signature(sig) for sig in sigs)
if isinstance(typ, RaiseNotImplementedError):
amb_sigs.append(sigs_str)
continue
s = 'Inputs: %s\n' % sigs_str
s += '-' * len(s) + '\n'
s += typ.__name__
docs.append(s)
if amb_sigs:
s = "Ambiguous handler classes\n"
s += '=' * len(s)
docs.append(s)
s = '\n'.join(amb_sigs)
docs.append(s)
return '\n\n'.join(docs)
| AssocOpDispatcher |
python | keras-team__keras | keras/src/trainers/data_adapters/py_dataset_adapter_test.py | {
"start": 2915,
"end": 15525
} | class ____(testing.TestCase):
@parameterized.named_parameters(
named_product(
[
{
"testcase_name": "multiprocessing",
"workers": 2,
"use_multiprocessing": True,
"max_queue_size": 10,
"dataset_type": "np",
},
{
"testcase_name": "multithreading",
"workers": 2,
"use_multiprocessing": False,
"max_queue_size": 10,
"dataset_type": "np",
},
{
"testcase_name": "single_np",
"dataset_type": "np",
},
{
"testcase_name": "single_tf",
"dataset_type": "tf",
},
{
"testcase_name": "single_jax",
"dataset_type": "jax",
},
{
"testcase_name": "single_torch",
"dataset_type": "torch",
},
],
infinite=[True, False],
shuffle=[True, False],
)
)
def test_basic_flow(
self,
shuffle,
dataset_type,
infinite,
workers=0,
use_multiprocessing=False,
max_queue_size=0,
):
if use_multiprocessing and shuffle:
pytest.skip("Starting processes is slow, test fewer variants")
set_random_seed(1337)
x = np.random.random((64, 4)).astype("float32")
y = np.array([[i, i] for i in range(64)], dtype="float32")
CPU_DEVICES = {
"tensorflow": "CPU:0",
"jax": "cpu:0",
"torch": "cpu",
"numpy": "cpu",
}
with backend.device(CPU_DEVICES[backend.backend()]):
if dataset_type == "tf":
x, y = tf.constant(x), tf.constant(y)
elif dataset_type == "jax":
x, y = jax.numpy.array(x), jax.numpy.array(y)
elif dataset_type == "torch":
x, y = torch.as_tensor(x), torch.as_tensor(y)
py_dataset = ExamplePyDataset(
x,
y,
batch_size=16,
workers=workers,
use_multiprocessing=use_multiprocessing,
max_queue_size=max_queue_size,
infinite=infinite,
)
adapter = py_dataset_adapter.PyDatasetAdapter(
py_dataset, shuffle=shuffle
)
if backend.backend() == "numpy":
it = adapter.get_numpy_iterator()
expected_class = np.ndarray
elif backend.backend() == "tensorflow":
it = adapter.get_tf_dataset()
expected_class = tf.Tensor
elif backend.backend() == "jax":
it = adapter.get_jax_iterator()
expected_class = jax.Array if dataset_type == "jax" else np.ndarray
elif backend.backend() == "torch":
it = adapter.get_torch_dataloader()
expected_class = torch.Tensor
sample_order = []
adapter.on_epoch_begin()
for batch in it:
self.assertEqual(len(batch), 2)
bx, by = batch
self.assertIsInstance(bx, expected_class)
self.assertIsInstance(by, expected_class)
self.assertEqual(bx.dtype, by.dtype)
self.assertContainsExactSubsequence(str(bx.dtype), "float32")
self.assertEqual(bx.shape, (16, 4))
self.assertEqual(by.shape, (16, 2))
for i in range(by.shape[0]):
sample_order.append(by[i, 0])
if infinite:
if len(sample_order) == 64:
adapter.on_epoch_end()
adapter.on_epoch_begin()
elif len(sample_order) >= 128:
break
adapter.on_epoch_end()
expected_order = list(range(64))
if infinite:
self.assertAllClose(sample_order, expected_order + expected_order)
elif shuffle:
self.assertNotAllClose(sample_order, expected_order)
self.assertAllClose(sorted(sample_order), expected_order)
else:
self.assertAllClose(sample_order, expected_order)
# TODO: test sample weights
# TODO: test inference mode (single output)
def test_class_weight(self):
x = np.random.randint(1, 100, (4, 5))
y = np.array([0, 1, 2, 1])
class_w = {0: 2, 1: 1, 2: 3}
py_dataset = ExamplePyDataset(x, y, batch_size=2)
adapter = py_dataset_adapter.PyDatasetAdapter(
py_dataset, shuffle=False, class_weight=class_w
)
if backend.backend() == "numpy":
gen = adapter.get_numpy_iterator()
elif backend.backend() == "tensorflow":
gen = adapter.get_tf_dataset()
elif backend.backend() == "jax":
gen = adapter.get_jax_iterator()
elif backend.backend() == "torch":
gen = adapter.get_torch_dataloader()
for index, batch in enumerate(gen):
# Batch is a tuple of (x, y, class_weight)
self.assertLen(batch, 3)
batch = [backend.convert_to_numpy(x) for x in batch]
# Let's verify the data and class weights match for each element
# of the batch (2 elements in each batch)
for sub_elem in range(2):
self.assertAllEqual(batch[0][sub_elem], x[index * 2 + sub_elem])
self.assertEqual(batch[1][sub_elem], y[index * 2 + sub_elem])
class_key = np.int32(batch[1][sub_elem])
self.assertEqual(batch[2][sub_elem], class_w[class_key])
self.assertEqual(index, 1) # 2 batches
def test_speedup(self):
x = np.random.random((40, 4))
y = np.random.random((40, 2))
no_speedup_py_dataset = ExamplePyDataset(
x,
y,
batch_size=4,
delay=0.2,
)
adapter = py_dataset_adapter.PyDatasetAdapter(
no_speedup_py_dataset, shuffle=False
)
gen = adapter.get_numpy_iterator()
t0 = time.time()
for batch in gen:
pass
no_speedup_time = time.time() - t0
speedup_py_dataset = ExamplePyDataset(
x,
y,
batch_size=4,
workers=4,
# TODO: the github actions runner may have performance issue with
# multiprocessing
# use_multiprocessing=True,
max_queue_size=8,
delay=0.2,
)
adapter = py_dataset_adapter.PyDatasetAdapter(
speedup_py_dataset, shuffle=False
)
gen = adapter.get_numpy_iterator()
t0 = time.time()
for batch in gen:
pass
speedup_time = time.time() - t0
self.assertLess(speedup_time, no_speedup_time)
def test_dict_inputs(self):
inputs = {
"x": np.random.random((40, 4)),
"y": np.random.random((40, 2)),
}
py_dataset = DictPyDataset(inputs, batch_size=4)
adapter = py_dataset_adapter.PyDatasetAdapter(py_dataset, shuffle=False)
gen = adapter.get_numpy_iterator()
for batch in gen:
self.assertEqual(len(batch), 2)
bx, by = batch["x"], batch["y"]
self.assertIsInstance(bx, np.ndarray)
self.assertIsInstance(by, np.ndarray)
self.assertEqual(bx.dtype, by.dtype)
self.assertEqual(bx.shape, (4, 4))
self.assertEqual(by.shape, (4, 2))
ds = adapter.get_tf_dataset()
for batch in ds:
self.assertEqual(len(batch), 2)
bx, by = batch["x"], batch["y"]
self.assertIsInstance(bx, tf.Tensor)
self.assertIsInstance(by, tf.Tensor)
self.assertEqual(bx.dtype, by.dtype)
self.assertEqual(tuple(bx.shape), (4, 4))
self.assertEqual(tuple(by.shape), (4, 2))
def test_with_different_shapes(self):
class TestPyDataset(py_dataset_adapter.PyDataset):
@property
def num_batches(self):
return 3
def __getitem__(self, idx):
if idx == 0:
return np.ones([16, 4], "float32"), np.ones(
[16, 2], "float32"
)
if idx == 1:
return np.ones([16, 5], "float32"), np.ones(
[16, 2], "float32"
)
else:
return np.ones([2, 6], "float32"), np.ones(
[2, 2], "float32"
)
adapter = py_dataset_adapter.PyDatasetAdapter(
TestPyDataset(), shuffle=False
)
if backend.backend() == "numpy":
it = adapter.get_numpy_iterator()
elif backend.backend() == "tensorflow":
it = adapter.get_tf_dataset()
elif backend.backend() == "jax":
it = adapter.get_jax_iterator()
elif backend.backend() == "torch":
it = adapter.get_torch_dataloader()
for i, batch in enumerate(it):
self.assertEqual(len(batch), 2)
bx, by = batch
self.assertEqual(bx.dtype, by.dtype)
self.assertContainsExactSubsequence(str(bx.dtype), "float32")
if i == 0:
self.assertEqual(bx.shape, (16, 4))
self.assertEqual(by.shape, (16, 2))
elif i == 1:
self.assertEqual(bx.shape, (16, 5))
self.assertEqual(by.shape, (16, 2))
else:
self.assertEqual(bx.shape, (2, 6))
self.assertEqual(by.shape, (2, 2))
@parameterized.named_parameters(
[
{
"testcase_name": "multiprocessing",
"workers": 2,
"use_multiprocessing": True,
"max_queue_size": 10,
},
{
"testcase_name": "multithreading",
"workers": 2,
"max_queue_size": 10,
},
{
"testcase_name": "single",
},
]
)
def test_exception_reported(
self,
workers=0,
use_multiprocessing=False,
max_queue_size=0,
):
if backend.backend() == "jax" and use_multiprocessing is True:
self.skipTest(
"The CI failed for an unknown reason with "
"`use_multiprocessing=True` in the jax backend"
)
dataset = ExceptionPyDataset(
workers=workers,
use_multiprocessing=use_multiprocessing,
max_queue_size=max_queue_size,
)
adapter = py_dataset_adapter.PyDatasetAdapter(dataset, shuffle=False)
expected_exception_class = ValueError
if backend.backend() == "numpy":
it = adapter.get_numpy_iterator()
elif backend.backend() == "tensorflow":
it = adapter.get_tf_dataset()
# tf.data wraps the exception
expected_exception_class = tf.errors.InvalidArgumentError
elif backend.backend() == "jax":
it = adapter.get_jax_iterator()
elif backend.backend() == "torch":
it = adapter.get_torch_dataloader()
it = iter(it)
next(it)
next(it)
with self.assertRaisesRegex(
expected_exception_class, "Expected exception"
):
next(it)
def test_iterate_finite(self):
py_dataset = ExamplePyDataset(
np.ones((6, 11), dtype="int32"),
np.zeros((6, 11), dtype="int32"),
batch_size=2,
)
batches = [batch for batch in py_dataset]
self.assertLen(batches, 3)
def test_iterate_infinite_with_none_num_batches(self):
py_dataset = ExamplePyDataset(
np.ones((6, 11), dtype="int32"),
np.zeros((6, 11), dtype="int32"),
batch_size=2,
infinite=True,
)
for index, _ in enumerate(py_dataset):
if index >= 10:
break
def test_iterate_infinite_with_no_len(self):
class NoLenDataset(py_dataset_adapter.PyDataset):
def __getitem__(self, idx):
yield np.ones((2, 11), dtype="int32")
for index, _ in enumerate(NoLenDataset()):
if index >= 10:
break
| PyDatasetAdapterTest |
python | huggingface__transformers | tests/models/swin/test_modeling_swin.py | {
"start": 17322,
"end": 19416
} | class ____(unittest.TestCase):
@cached_property
def default_image_processor(self):
return (
AutoImageProcessor.from_pretrained("microsoft/swin-tiny-patch4-window7-224")
if is_vision_available()
else None
)
@slow
def test_inference_image_classification_head(self):
model = SwinForImageClassification.from_pretrained("microsoft/swin-tiny-patch4-window7-224").to(torch_device)
image_processor = self.default_image_processor
image = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png")
inputs = image_processor(images=image, return_tensors="pt").to(torch_device)
# forward pass
with torch.no_grad():
outputs = model(**inputs)
# verify the logits
expected_shape = torch.Size((1, 1000))
self.assertEqual(outputs.logits.shape, expected_shape)
expected_slice = torch.tensor([-0.0948, -0.6454, -0.0921]).to(torch_device)
torch.testing.assert_close(outputs.logits[0, :3], expected_slice, rtol=1e-4, atol=1e-4)
@slow
def test_inference_interpolate_pos_encoding(self):
# Swin models have an `interpolate_pos_encoding` argument in their forward method,
# allowing to interpolate the pre-trained position embeddings in order to use
# the model on higher resolutions.
model = SwinModel.from_pretrained("microsoft/swin-tiny-patch4-window7-224").to(torch_device)
image_processor = self.default_image_processor
image = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png")
inputs = image_processor(images=image, size={"height": 481, "width": 481}, return_tensors="pt")
pixel_values = inputs.pixel_values.to(torch_device)
# forward pass
with torch.no_grad():
outputs = model(pixel_values, interpolate_pos_encoding=True)
# verify the logits
expected_shape = torch.Size((1, 256, 768))
self.assertEqual(outputs.last_hidden_state.shape, expected_shape)
@require_torch
| SwinModelIntegrationTest |
python | tensorflow__tensorflow | tensorflow/python/compiler/tensorrt/test/memory_alignment_test.py | {
"start": 1190,
"end": 2529
} | class ____(trt_test.TfTrtIntegrationTestBase):
"""Testing conversion of BatchMatMul in TF-TRT conversion."""
def GraphFn(self, inp):
dtype = inp.dtype
e1 = constant_op.constant(
np.random.randn(1, 1, 3, 5), name="kernel_1", dtype=dtype)
e2 = constant_op.constant(
np.random.randn(1, 1, 5, 10), name="kernel_2", dtype=dtype)
conv = nn.conv2d(
input=inp,
filter=e1,
strides=[1, 1, 1, 1],
padding="VALID",
name="conv")
out = nn.conv2d(
input=conv,
filter=e2,
strides=[1, 1, 1, 1],
padding="VALID",
name="conv_2")
return array_ops.squeeze(out, name="output_0")
def GetParams(self):
return self.BuildParams(self.GraphFn, dtypes.float32, [[2, 15, 15, 3]],
[[2, 15, 15, 10]])
def ExpectedEnginesToBuild(self, run_params):
"""Return the expected engines to build."""
return ["TRTEngineOp_000"]
def ExpectedAbsoluteTolerance(self, run_params):
"""The absolute tolerance to compare floating point results."""
return 1.e-06 if run_params.precision_mode == "FP32" else 1.e-02
def ExpectedRelativeTolerance(self, run_params):
"""The relative tolerance to compare floating point results."""
return 0.1
if __name__ == "__main__":
test.main()
| MemoryAlignmentTest |
python | dask__distributed | distributed/worker_state_machine.py | {
"start": 28475,
"end": 28732
} | class ____(StateMachineEvent):
"""Event that requests a worker to release a key because it's now being computed
somewhere else.
See also
--------
StealResponseMsg
"""
__slots__ = ("key",)
key: Key
@dataclass
| StealRequestEvent |
python | prompt-toolkit__python-prompt-toolkit | src/prompt_toolkit/layout/controls.py | {
"start": 17224,
"end": 35047
} | class ____(UIControl):
"""
Control for visualizing the content of a :class:`.Buffer`.
:param buffer: The :class:`.Buffer` object to be displayed.
:param input_processors: A list of
:class:`~prompt_toolkit.layout.processors.Processor` objects.
:param include_default_input_processors: When True, include the default
processors for highlighting of selection, search and displaying of
multiple cursors.
:param lexer: :class:`.Lexer` instance for syntax highlighting.
:param preview_search: `bool` or :class:`.Filter`: Show search while
typing. When this is `True`, probably you want to add a
``HighlightIncrementalSearchProcessor`` as well. Otherwise only the
cursor position will move, but the text won't be highlighted.
:param focusable: `bool` or :class:`.Filter`: Tell whether this control is focusable.
:param focus_on_click: Focus this buffer when it's click, but not yet focused.
:param key_bindings: a :class:`.KeyBindings` object.
"""
def __init__(
self,
buffer: Buffer | None = None,
input_processors: list[Processor] | None = None,
include_default_input_processors: bool = True,
lexer: Lexer | None = None,
preview_search: FilterOrBool = False,
focusable: FilterOrBool = True,
search_buffer_control: (
None | SearchBufferControl | Callable[[], SearchBufferControl]
) = None,
menu_position: Callable[[], int | None] | None = None,
focus_on_click: FilterOrBool = False,
key_bindings: KeyBindingsBase | None = None,
):
self.input_processors = input_processors
self.include_default_input_processors = include_default_input_processors
self.default_input_processors = [
HighlightSearchProcessor(),
HighlightIncrementalSearchProcessor(),
HighlightSelectionProcessor(),
DisplayMultipleCursors(),
]
self.preview_search = to_filter(preview_search)
self.focusable = to_filter(focusable)
self.focus_on_click = to_filter(focus_on_click)
self.buffer = buffer or Buffer()
self.menu_position = menu_position
self.lexer = lexer or SimpleLexer()
self.key_bindings = key_bindings
self._search_buffer_control = search_buffer_control
#: Cache for the lexer.
#: Often, due to cursor movement, undo/redo and window resizing
#: operations, it happens that a short time, the same document has to be
#: lexed. This is a fairly easy way to cache such an expensive operation.
self._fragment_cache: SimpleCache[
Hashable, Callable[[int], StyleAndTextTuples]
] = SimpleCache(maxsize=8)
self._last_click_timestamp: float | None = None
self._last_get_processed_line: Callable[[int], _ProcessedLine] | None = None
def __repr__(self) -> str:
return f"<{self.__class__.__name__} buffer={self.buffer!r} at {id(self)!r}>"
@property
def search_buffer_control(self) -> SearchBufferControl | None:
result: SearchBufferControl | None
if callable(self._search_buffer_control):
result = self._search_buffer_control()
else:
result = self._search_buffer_control
assert result is None or isinstance(result, SearchBufferControl)
return result
@property
def search_buffer(self) -> Buffer | None:
control = self.search_buffer_control
if control is not None:
return control.buffer
return None
@property
def search_state(self) -> SearchState:
"""
Return the `SearchState` for searching this `BufferControl`. This is
always associated with the search control. If one search bar is used
for searching multiple `BufferControls`, then they share the same
`SearchState`.
"""
search_buffer_control = self.search_buffer_control
if search_buffer_control:
return search_buffer_control.searcher_search_state
else:
return SearchState()
def is_focusable(self) -> bool:
return self.focusable()
def preferred_width(self, max_available_width: int) -> int | None:
"""
This should return the preferred width.
Note: We don't specify a preferred width according to the content,
because it would be too expensive. Calculating the preferred
width can be done by calculating the longest line, but this would
require applying all the processors to each line. This is
unfeasible for a larger document, and doing it for small
documents only would result in inconsistent behavior.
"""
return None
def preferred_height(
self,
width: int,
max_available_height: int,
wrap_lines: bool,
get_line_prefix: GetLinePrefixCallable | None,
) -> int | None:
# Calculate the content height, if it was drawn on a screen with the
# given width.
height = 0
content = self.create_content(width, height=1) # Pass a dummy '1' as height.
# When line wrapping is off, the height should be equal to the amount
# of lines.
if not wrap_lines:
return content.line_count
# When the number of lines exceeds the max_available_height, just
# return max_available_height. No need to calculate anything.
if content.line_count >= max_available_height:
return max_available_height
for i in range(content.line_count):
height += content.get_height_for_line(i, width, get_line_prefix)
if height >= max_available_height:
return max_available_height
return height
def _get_formatted_text_for_line_func(
self, document: Document
) -> Callable[[int], StyleAndTextTuples]:
"""
Create a function that returns the fragments for a given line.
"""
# Cache using `document.text`.
def get_formatted_text_for_line() -> Callable[[int], StyleAndTextTuples]:
return self.lexer.lex_document(document)
key = (document.text, self.lexer.invalidation_hash())
return self._fragment_cache.get(key, get_formatted_text_for_line)
def _create_get_processed_line_func(
self, document: Document, width: int, height: int
) -> Callable[[int], _ProcessedLine]:
"""
Create a function that takes a line number of the current document and
returns a _ProcessedLine(processed_fragments, source_to_display, display_to_source)
tuple.
"""
# Merge all input processors together.
input_processors = self.input_processors or []
if self.include_default_input_processors:
input_processors = self.default_input_processors + input_processors
merged_processor = merge_processors(input_processors)
def transform(
lineno: int,
fragments: StyleAndTextTuples,
get_line: Callable[[int], StyleAndTextTuples],
) -> _ProcessedLine:
"Transform the fragments for a given line number."
# Get cursor position at this line.
def source_to_display(i: int) -> int:
"""X position from the buffer to the x position in the
processed fragment list. By default, we start from the 'identity'
operation."""
return i
transformation = merged_processor.apply_transformation(
TransformationInput(
self,
document,
lineno,
source_to_display,
fragments,
width,
height,
get_line,
)
)
return _ProcessedLine(
transformation.fragments,
transformation.source_to_display,
transformation.display_to_source,
)
def create_func() -> Callable[[int], _ProcessedLine]:
get_line = self._get_formatted_text_for_line_func(document)
cache: dict[int, _ProcessedLine] = {}
def get_processed_line(i: int) -> _ProcessedLine:
try:
return cache[i]
except KeyError:
processed_line = transform(i, get_line(i), get_line)
cache[i] = processed_line
return processed_line
return get_processed_line
return create_func()
def create_content(
self, width: int, height: int, preview_search: bool = False
) -> UIContent:
"""
Create a UIContent.
"""
buffer = self.buffer
# Trigger history loading of the buffer. We do this during the
# rendering of the UI here, because it needs to happen when an
# `Application` with its event loop is running. During the rendering of
# the buffer control is the earliest place we can achieve this, where
# we're sure the right event loop is active, and don't require user
# interaction (like in a key binding).
buffer.load_history_if_not_yet_loaded()
# Get the document to be shown. If we are currently searching (the
# search buffer has focus, and the preview_search filter is enabled),
# then use the search document, which has possibly a different
# text/cursor position.)
search_control = self.search_buffer_control
preview_now = preview_search or bool(
# Only if this feature is enabled.
self.preview_search()
and
# And something was typed in the associated search field.
search_control
and search_control.buffer.text
and
# And we are searching in this control. (Many controls can point to
# the same search field, like in Pyvim.)
get_app().layout.search_target_buffer_control == self
)
if preview_now and search_control is not None:
ss = self.search_state
document = buffer.document_for_search(
SearchState(
text=search_control.buffer.text,
direction=ss.direction,
ignore_case=ss.ignore_case,
)
)
else:
document = buffer.document
get_processed_line = self._create_get_processed_line_func(
document, width, height
)
self._last_get_processed_line = get_processed_line
def translate_rowcol(row: int, col: int) -> Point:
"Return the content column for this coordinate."
return Point(x=get_processed_line(row).source_to_display(col), y=row)
def get_line(i: int) -> StyleAndTextTuples:
"Return the fragments for a given line number."
fragments = get_processed_line(i).fragments
# Add a space at the end, because that is a possible cursor
# position. (When inserting after the input.) We should do this on
# all the lines, not just the line containing the cursor. (Because
# otherwise, line wrapping/scrolling could change when moving the
# cursor around.)
fragments = fragments + [("", " ")]
return fragments
content = UIContent(
get_line=get_line,
line_count=document.line_count,
cursor_position=translate_rowcol(
document.cursor_position_row, document.cursor_position_col
),
)
# If there is an auto completion going on, use that start point for a
# pop-up menu position. (But only when this buffer has the focus --
# there is only one place for a menu, determined by the focused buffer.)
if get_app().layout.current_control == self:
menu_position = self.menu_position() if self.menu_position else None
if menu_position is not None:
assert isinstance(menu_position, int)
menu_row, menu_col = buffer.document.translate_index_to_position(
menu_position
)
content.menu_position = translate_rowcol(menu_row, menu_col)
elif buffer.complete_state:
# Position for completion menu.
# Note: We use 'min', because the original cursor position could be
# behind the input string when the actual completion is for
# some reason shorter than the text we had before. (A completion
# can change and shorten the input.)
menu_row, menu_col = buffer.document.translate_index_to_position(
min(
buffer.cursor_position,
buffer.complete_state.original_document.cursor_position,
)
)
content.menu_position = translate_rowcol(menu_row, menu_col)
else:
content.menu_position = None
return content
def mouse_handler(self, mouse_event: MouseEvent) -> NotImplementedOrNone:
"""
Mouse handler for this control.
"""
buffer = self.buffer
position = mouse_event.position
# Focus buffer when clicked.
if get_app().layout.current_control == self:
if self._last_get_processed_line:
processed_line = self._last_get_processed_line(position.y)
# Translate coordinates back to the cursor position of the
# original input.
xpos = processed_line.display_to_source(position.x)
index = buffer.document.translate_row_col_to_index(position.y, xpos)
# Set the cursor position.
if mouse_event.event_type == MouseEventType.MOUSE_DOWN:
buffer.exit_selection()
buffer.cursor_position = index
elif (
mouse_event.event_type == MouseEventType.MOUSE_MOVE
and mouse_event.button != MouseButton.NONE
):
# Click and drag to highlight a selection
if (
buffer.selection_state is None
and abs(buffer.cursor_position - index) > 0
):
buffer.start_selection(selection_type=SelectionType.CHARACTERS)
buffer.cursor_position = index
elif mouse_event.event_type == MouseEventType.MOUSE_UP:
# When the cursor was moved to another place, select the text.
# (The >1 is actually a small but acceptable workaround for
# selecting text in Vi navigation mode. In navigation mode,
# the cursor can never be after the text, so the cursor
# will be repositioned automatically.)
if abs(buffer.cursor_position - index) > 1:
if buffer.selection_state is None:
buffer.start_selection(
selection_type=SelectionType.CHARACTERS
)
buffer.cursor_position = index
# Select word around cursor on double click.
# Two MOUSE_UP events in a short timespan are considered a double click.
double_click = (
self._last_click_timestamp
and time.time() - self._last_click_timestamp < 0.3
)
self._last_click_timestamp = time.time()
if double_click:
start, end = buffer.document.find_boundaries_of_current_word()
buffer.cursor_position += start
buffer.start_selection(selection_type=SelectionType.CHARACTERS)
buffer.cursor_position += end - start
else:
# Don't handle scroll events here.
return NotImplemented
# Not focused, but focusing on click events.
else:
if (
self.focus_on_click()
and mouse_event.event_type == MouseEventType.MOUSE_UP
):
# Focus happens on mouseup. (If we did this on mousedown, the
# up event will be received at the point where this widget is
# focused and be handled anyway.)
get_app().layout.current_control = self
else:
return NotImplemented
return None
def move_cursor_down(self) -> None:
b = self.buffer
b.cursor_position += b.document.get_cursor_down_position()
def move_cursor_up(self) -> None:
b = self.buffer
b.cursor_position += b.document.get_cursor_up_position()
def get_key_bindings(self) -> KeyBindingsBase | None:
"""
When additional key bindings are given. Return these.
"""
return self.key_bindings
def get_invalidate_events(self) -> Iterable[Event[object]]:
"""
Return the Window invalidate events.
"""
# Whenever the buffer changes, the UI has to be updated.
yield self.buffer.on_text_changed
yield self.buffer.on_cursor_position_changed
yield self.buffer.on_completions_changed
yield self.buffer.on_suggestion_set
| BufferControl |
python | celery__celery | celery/worker/components.py | {
"start": 5497,
"end": 6229
} | class ____(bootsteps.StartStopStep):
"""Step used to embed a beat process.
Enabled when the ``beat`` argument is set.
"""
label = 'Beat'
conditional = True
def __init__(self, w, beat=False, **kwargs):
self.enabled = w.beat = beat
w.beat = None
super().__init__(w, beat=beat, **kwargs)
def create(self, w):
from celery.beat import EmbeddedService
if w.pool_cls.__module__.endswith(('gevent', 'eventlet')):
raise ImproperlyConfigured(ERR_B_GREEN)
b = w.beat = EmbeddedService(w.app,
schedule_filename=w.schedule_filename,
scheduler_cls=w.scheduler)
return b
| Beat |
python | pennersr__django-allauth | allauth/headless/account/views.py | {
"start": 3837,
"end": 4531
} | class ____(APIView):
input_class = {"POST": SignupInput}
by_passkey = False
def post(self, request, *args, **kwargs):
if request.user.is_authenticated:
return ConflictResponse(request)
if not get_account_adapter().is_open_for_signup(request):
return ForbiddenResponse(request)
user, resp = self.input.try_save(request)
if not resp:
try:
resp = flows.signup.complete_signup(
request, user=user, by_passkey=self.by_passkey
)
except ImmediateHttpResponse:
pass
return AuthenticationResponse.from_response(request, resp)
| SignupView |
python | walkccc__LeetCode | solutions/2900. Longest Unequal Adjacent Groups Subsequence I/2900.py | {
"start": 0,
"end": 311
} | class ____:
def getWordsInLongestSubsequence(
self,
n: int,
words: list[str],
groups: list[int],
) -> list[str]:
ans = []
groupId = -1
for word, group in zip(words, groups):
if group != groupId:
groupId = group
ans.append(word)
return ans
| Solution |
python | numba__numba | numba/core/typing/builtins.py | {
"start": 14853,
"end": 14921
} | class ____(TupleCompare):
pass
@infer_global(operator.add)
| TupleLt |
python | sympy__sympy | sympy/integrals/manualintegrate.py | {
"start": 12162,
"end": 12289
} | class ____(RewriteRule):
"""Rewrite a+b*x+c*x**2 to a-b**2/(4*c) + c*(x+b/(2*c))**2"""
pass
@dataclass
| CompleteSquareRule |
python | numba__numba | numba/tests/test_caching.py | {
"start": 40784,
"end": 40834
} | class ____(InTreeCacheLocator):
pass
| TestLocator |
python | pytorch__pytorch | test/distributed/tensor/test_utils.py | {
"start": 38957,
"end": 41220
} | class ____(LocalTensorTestBase):
@property
def world_size(self):
return 4
def test_explicit_matmul(self):
with LocalTensorMode(self.world_size):
device_mesh = self.build_device_mesh()
dim = 128
x = torch.randn(8, dim, requires_grad=True)
A = torch.randn(dim, dim, requires_grad=True)
# Prepare DTensors
dx = distribute_tensor(x, device_mesh, [Shard(0)])
dA = distribute_tensor(A, device_mesh, [Shard(0)])
# implicit redistribute works as usual by default
with CommDebugMode() as comm_mode:
torch.matmul(dx, dA)
self.assertEqual(comm_mode.get_total_counts(), 1)
# explicit redistribute works too
with ExplicitRedistributionContext():
with self.assertRaisesRegex(RuntimeError, "Implicit redistribution"):
torch.matmul(dx, dA)
# explicit redistribute allows manual redistribute
with ExplicitRedistributionContext():
dA_repl = dA.redistribute(device_mesh, [Replicate()])
torch.matmul(dx, dA_repl)
dx = distribute_tensor(x, device_mesh, [Shard(0)])
dA = distribute_tensor(A, device_mesh, [Replicate()])
with ExplicitRedistributionContext(strict=True):
dY = torch.matmul(dx, dA_repl)
loss = dY.sum()
# we now see the error during backwards
with self.assertRaisesRegex(RuntimeError, "Implicit redistribution"):
loss.backward(retain_graph=True)
with ExplicitRedistributionContext(strict=False):
# but since it's a 'free' redistribute, we can still do it under non-strict mode
loss.backward(retain_graph=True)
with ExplicitRedistributionContext(enable=False):
# and we can disable
loss.backward(retain_graph=True)
# and re-enable
with self.assertRaisesRegex(RuntimeError, "Implicit redistribution"):
loss.backward(retain_graph=True)
if __name__ == "__main__":
run_tests()
| TestExplicitRedistribute |
python | tensorflow__tensorflow | tensorflow/python/kernel_tests/linalg/matrix_square_root_op_test.py | {
"start": 1117,
"end": 4863
} | class ____(test.TestCase):
def _verifySquareRoot(self, matrix, np_type):
matrix = matrix.astype(np_type)
# Verify that matmul(sqrtm(A), sqrtm(A)) = A
sqrt = gen_linalg_ops.matrix_square_root(matrix)
square = test_util.matmul_without_tf32(sqrt, sqrt)
self.assertShapeEqual(matrix, square)
self.assertAllClose(matrix, square, rtol=1e-4, atol=1e-3)
def _verifySquareRootReal(self, x):
for np_type in [np.float32, np.float64]:
self._verifySquareRoot(x, np_type)
def _verifySquareRootComplex(self, x):
for np_type in [np.complex64, np.complex128]:
self._verifySquareRoot(x, np_type)
def _makeBatch(self, matrix1, matrix2):
matrix_batch = np.concatenate(
[np.expand_dims(matrix1, 0),
np.expand_dims(matrix2, 0)])
matrix_batch = np.tile(matrix_batch, [2, 3, 1, 1])
return matrix_batch
def _testMatrices(self, matrix1, matrix2):
# Real
self._verifySquareRootReal(matrix1)
self._verifySquareRootReal(matrix2)
self._verifySquareRootReal(self._makeBatch(matrix1, matrix2))
matrix1 = matrix1.astype(np.complex64)
matrix2 = matrix2.astype(np.complex64)
matrix1 += 1j * matrix1
matrix2 += 1j * matrix2
self._verifySquareRootComplex(matrix1)
self._verifySquareRootComplex(matrix2)
self._verifySquareRootComplex(self._makeBatch(matrix1, matrix2))
@test_util.run_without_tensor_float_32
def testSymmetricPositiveDefinite(self):
matrix1 = np.array([[2., 1.], [1., 2.]])
matrix2 = np.array([[3., -1.], [-1., 3.]])
self._testMatrices(matrix1, matrix2)
@test_util.run_without_tensor_float_32
def testAsymmetric(self):
matrix1 = np.array([[0., 4.], [-1., 5.]])
matrix2 = np.array([[33., 24.], [48., 57.]])
self._testMatrices(matrix1, matrix2)
@test_util.run_without_tensor_float_32
def testIdentityMatrix(self):
# 2x2
identity = np.array([[1., 0], [0, 1.]])
self._verifySquareRootReal(identity)
# 3x3
identity = np.array([[1., 0, 0], [0, 1., 0], [0, 0, 1.]])
self._verifySquareRootReal(identity)
@test_util.run_without_tensor_float_32
def testEmpty(self):
self._verifySquareRootReal(np.empty([0, 2, 2]))
self._verifySquareRootReal(np.empty([2, 0, 0]))
@test_util.run_in_graph_and_eager_modes(use_gpu=True)
@test_util.run_without_tensor_float_32
def testWrongDimensions(self):
# The input to the square root should be at least a 2-dimensional tensor.
tensor = constant_op.constant([1., 2.])
with self.assertRaises((ValueError, errors_impl.InvalidArgumentError)):
gen_linalg_ops.matrix_square_root(tensor)
@test_util.run_in_graph_and_eager_modes(use_gpu=True)
@test_util.run_without_tensor_float_32
def testNotSquare(self):
with self.assertRaises((ValueError, errors_impl.InvalidArgumentError)):
tensor = constant_op.constant([[1., 0., -1.], [-1., 1., 0.]])
self.evaluate(gen_linalg_ops.matrix_square_root(tensor))
@test_util.run_in_graph_and_eager_modes(use_gpu=True)
@test_util.run_without_tensor_float_32
def testConcurrentExecutesWithoutError(self):
matrix_shape = [5, 5]
seed = [42, 24]
matrix1 = stateless_random_ops.stateless_random_normal(
shape=matrix_shape, seed=seed)
matrix2 = stateless_random_ops.stateless_random_normal(
shape=matrix_shape, seed=seed)
self.assertAllEqual(matrix1, matrix2)
square1 = math_ops.matmul(matrix1, matrix1)
square2 = math_ops.matmul(matrix2, matrix2)
sqrt1 = gen_linalg_ops.matrix_square_root(square1)
sqrt2 = gen_linalg_ops.matrix_square_root(square2)
all_ops = [sqrt1, sqrt2]
sqrt = self.evaluate(all_ops)
self.assertAllClose(sqrt[0], sqrt[1])
if __name__ == "__main__":
test.main()
| SquareRootOpTest |
python | optuna__optuna | optuna/storages/journal/_storage.py | {
"start": 1291,
"end": 1602
} | class ____(enum.IntEnum):
CREATE_STUDY = 0
DELETE_STUDY = 1
SET_STUDY_USER_ATTR = 2
SET_STUDY_SYSTEM_ATTR = 3
CREATE_TRIAL = 4
SET_TRIAL_PARAM = 5
SET_TRIAL_STATE_VALUES = 6
SET_TRIAL_INTERMEDIATE_VALUE = 7
SET_TRIAL_USER_ATTR = 8
SET_TRIAL_SYSTEM_ATTR = 9
| JournalOperation |
python | wandb__wandb | wandb/vendor/pygments/lexers/robotframework.py | {
"start": 8281,
"end": 8766
} | class ____(Setting):
_keyword_settings = ('setup', 'precondition', 'teardown', 'postcondition',
'template')
_import_settings = ()
_other_settings = ('documentation', 'tags', 'timeout')
def _tokenize(self, value, index):
if index == 0:
type = Setting._tokenize(self, value[1:-1], index)
return [('[', SYNTAX), (value[1:-1], type), (']', SYNTAX)]
return Setting._tokenize(self, value, index)
| TestCaseSetting |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-facebook-marketing/source_facebook_marketing/streams/common.py | {
"start": 948,
"end": 9722
} | class ____(Exception):
"""Wrong account type"""
def retry_pattern(backoff_type, exception, **wait_gen_kwargs):
def log_retry_attempt(details):
_, exc, _ = sys.exc_info()
logger.info(str(exc))
logger.info(f"Caught retryable error after {details['tries']} tries. Waiting {details['wait']} more seconds then retrying...")
def reduce_request_record_limit(details):
_, exc, _ = sys.exc_info()
# the list of error patterns to track,
# in order to reduce the request page size and retry
error_patterns = [
"Please reduce the amount of data you're asking for, then retry your request",
"An unknown error occurred",
]
if (
details.get("kwargs", {}).get("params", {}).get("limit")
and exc.http_status() == http.client.INTERNAL_SERVER_ERROR
and exc.api_error_message() in error_patterns
):
# reduce the existing request `limit` param by a half and retry
details["kwargs"]["params"]["limit"] = int(int(details["kwargs"]["params"]["limit"]) / 2)
# set the flag to the api class that the last api call failed
details.get("args")[0].last_api_call_is_successful = True
# set the flag to the api class that the `limit` param was reduced
details.get("args")[0].request_record_limit_is_reduced = True
def revert_request_record_limit(details):
"""
This method is triggered `on_success` after successful retry,
sets the internal class flags to provide the logic to restore the previously reduced
`limit` param.
"""
# reference issue: https://github.com/airbytehq/airbyte/issues/25383
# set the flag to the api class that the last api call was successful
details.get("args")[0].last_api_call_is_successful = False
# set the flag to the api class that the `limit` param is restored
details.get("args")[0].request_record_limit_is_reduced = False
def give_up(details):
if isinstance(details["exception"], FacebookRequestError):
raise traced_exception(details["exception"])
def is_transient_cannot_include_error(exc: FacebookRequestError) -> bool:
"""After migration to API v19.0, some customers randomly face a BAD_REQUEST error (OAuthException) with the pattern:"Cannot include ..."
According to the last comment in https://developers.facebook.com/community/threads/286697364476462/, this might be a transient issue that can be solved with a retry."""
pattern = r"Cannot include .* in summary param because they weren't there while creating the report run."
return bool(exc.http_status() == http.client.BAD_REQUEST and re.search(pattern, exc.api_error_message()))
def should_retry_api_error(exc):
if isinstance(exc, FacebookRequestError):
call_rate_limit_error = exc.api_error_code() in FACEBOOK_RATE_LIMIT_ERROR_CODES
temporary_oauth_error = exc.api_error_code() == FACEBOOK_TEMPORARY_OAUTH_ERROR_CODE
batch_timeout_error = exc.http_status() == http.client.BAD_REQUEST and exc.api_error_code() == FACEBOOK_BATCH_ERROR_CODE
unknown_error = exc.api_error_subcode() == FACEBOOK_UNKNOWN_ERROR_CODE
connection_reset_error = exc.api_error_code() == FACEBOOK_CONNECTION_RESET_ERROR_CODE
server_error = exc.http_status() == http.client.INTERNAL_SERVER_ERROR
service_unavailable_error = exc.http_status() == http.client.SERVICE_UNAVAILABLE
return any(
(
exc.api_transient_error(),
unknown_error,
call_rate_limit_error,
batch_timeout_error,
is_transient_cannot_include_error(exc),
connection_reset_error,
temporary_oauth_error,
server_error,
service_unavailable_error,
)
)
return True
return backoff.on_exception(
backoff_type,
exception,
jitter=None,
on_backoff=[log_retry_attempt, reduce_request_record_limit],
on_success=[revert_request_record_limit],
on_giveup=[give_up],
giveup=lambda exc: not should_retry_api_error(exc),
**wait_gen_kwargs,
)
def deep_merge(a: Any, b: Any) -> Any:
"""Merge two values, with `b` taking precedence over `a`."""
if isinstance(a, dict) and isinstance(b, dict):
# set of all keys in both dictionaries
keys = set(a.keys()) | set(b.keys())
return {key: deep_merge(a.get(key), b.get(key)) for key in keys}
elif isinstance(a, list) and isinstance(b, list):
return [*a, *b]
elif isinstance(a, set) and isinstance(b, set):
return a | b
else:
return a if b is None else b
FACEBOOK_CONFIG_ERRORS_TO_CATCH = [ # list of tuples (code, error_subcode)
(100, 2446289),
]
def traced_exception(fb_exception: FacebookRequestError):
"""Add user-friendly message for FacebookRequestError
Please see ../unit_tests/test_errors.py for full error examples
Please add new errors to the tests
"""
msg = fb_exception.api_error_message() or fb_exception.get_message()
if "Error validating access token" in msg:
failure_type = FailureType.config_error
friendly_msg = "Invalid access token. Re-authenticate if FB oauth is used or refresh access token with all required permissions"
elif "(#100) Missing permissions" in msg:
failure_type = FailureType.config_error
friendly_msg = (
"Credentials don't have enough permissions. Check if correct Ad Account Id is used (as in Ads Manager), "
"re-authenticate if FB oauth is used or refresh access token with all required permissions"
)
elif "permission" in msg:
failure_type = FailureType.config_error
friendly_msg = (
"Credentials don't have enough permissions. Re-authenticate if FB oauth is used or refresh access token "
"with all required permissions."
)
elif "An unknown error occurred" in msg and "error_user_title" in fb_exception._error:
msg = fb_exception._error["error_user_title"]
if "profile is not linked to delegate page" in msg or "el perfil no est" in msg:
failure_type = FailureType.config_error
friendly_msg = (
"Current profile is not linked to delegate page. Check if correct business (not personal) "
"Ad Account Id is used (as in Ads Manager), re-authenticate if FB oauth is used or refresh "
"access token with all required permissions."
)
elif "reduce the amount of data" in msg:
failure_type = FailureType.config_error
friendly_msg = (
"Please reduce the number of fields requested. Go to the schema tab, select your source, "
"and unselect the fields you do not need."
)
elif "The start date of the time range cannot be beyond 37 months from the current date" in msg:
failure_type = FailureType.config_error
friendly_msg = "Please set the start date of your sync to be within the last 3 years."
elif (fb_exception.api_error_code(), fb_exception.api_error_subcode()) in FACEBOOK_CONFIG_ERRORS_TO_CATCH:
failure_type = FailureType.config_error
friendly_msg = msg
elif fb_exception.api_error_code() in FACEBOOK_RATE_LIMIT_ERROR_CODES:
return AirbyteTracedException(
message="The maximum number of requests on the Facebook API has been reached. See https://developers.facebook.com/docs/graph-api/overview/rate-limiting/ for more information",
internal_message=str(fb_exception),
failure_type=FailureType.transient_error,
exception=fb_exception,
)
elif fb_exception.http_status() == 503:
return AirbyteTracedException(
message="The Facebook API service is temporarily unavailable. This issue should resolve itself, and does not require further action.",
internal_message=str(fb_exception),
failure_type=FailureType.transient_error,
exception=fb_exception,
)
else:
failure_type = FailureType.system_error
error_code = fb_exception.api_error_code() if fb_exception.api_error_code() else fb_exception.http_status()
friendly_msg = f"Error code {error_code}: {msg}."
return AirbyteTracedException(
message=friendly_msg or msg,
internal_message=msg,
failure_type=failure_type,
exception=fb_exception,
)
| AccountTypeException |
python | django__django | tests/async/test_async_queryset.py | {
"start": 342,
"end": 10079
} | class ____(TestCase):
@classmethod
def setUpTestData(cls):
cls.s1 = SimpleModel.objects.create(
field=1,
created=datetime(2022, 1, 1, 0, 0, 0),
)
cls.s2 = SimpleModel.objects.create(
field=2,
created=datetime(2022, 1, 1, 0, 0, 1),
)
cls.s3 = SimpleModel.objects.create(
field=3,
created=datetime(2022, 1, 1, 0, 0, 2),
)
cls.r1 = RelatedModel.objects.create(simple=cls.s1)
cls.r2 = RelatedModel.objects.create(simple=cls.s2)
cls.r3 = RelatedModel.objects.create(simple=cls.s3)
@staticmethod
def _get_db_feature(connection_, feature_name):
# Wrapper to avoid accessing connection attributes until inside
# coroutine function. Connection access is thread sensitive and cannot
# be passed across sync/async boundaries.
return getattr(connection_.features, feature_name)
async def test_async_iteration(self):
results = []
async for m in SimpleModel.objects.order_by("pk"):
results.append(m)
self.assertEqual(results, [self.s1, self.s2, self.s3])
async def test_aiterator(self):
qs = SimpleModel.objects.aiterator()
results = []
async for m in qs:
results.append(m)
self.assertCountEqual(results, [self.s1, self.s2, self.s3])
async def test_aiterator_prefetch_related(self):
results = []
async for s in SimpleModel.objects.prefetch_related(
Prefetch("relatedmodel_set", to_attr="prefetched_relatedmodel")
).aiterator():
results.append(s.prefetched_relatedmodel)
self.assertCountEqual(results, [[self.r1], [self.r2], [self.r3]])
async def test_aiterator_invalid_chunk_size(self):
msg = "Chunk size must be strictly positive."
for size in [0, -1]:
qs = SimpleModel.objects.aiterator(chunk_size=size)
with self.subTest(size=size), self.assertRaisesMessage(ValueError, msg):
async for m in qs:
pass
async def test_acount(self):
count = await SimpleModel.objects.acount()
self.assertEqual(count, 3)
async def test_acount_cached_result(self):
qs = SimpleModel.objects.all()
# Evaluate the queryset to populate the query cache.
[x async for x in qs]
count = await qs.acount()
self.assertEqual(count, 3)
await sync_to_async(SimpleModel.objects.create)(
field=4,
created=datetime(2022, 1, 1, 0, 0, 0),
)
# The query cache is used.
count = await qs.acount()
self.assertEqual(count, 3)
async def test_aget(self):
instance = await SimpleModel.objects.aget(field=1)
self.assertEqual(instance, self.s1)
async def test_acreate(self):
await SimpleModel.objects.acreate(field=4)
self.assertEqual(await SimpleModel.objects.acount(), 4)
async def test_aget_or_create(self):
instance, created = await SimpleModel.objects.aget_or_create(field=4)
self.assertEqual(await SimpleModel.objects.acount(), 4)
self.assertIs(created, True)
async def test_aupdate_or_create(self):
instance, created = await SimpleModel.objects.aupdate_or_create(
id=self.s1.id, defaults={"field": 2}
)
self.assertEqual(instance, self.s1)
self.assertEqual(instance.field, 2)
self.assertIs(created, False)
instance, created = await SimpleModel.objects.aupdate_or_create(field=4)
self.assertEqual(await SimpleModel.objects.acount(), 4)
self.assertIs(created, True)
instance, created = await SimpleModel.objects.aupdate_or_create(
field=5, defaults={"field": 7}, create_defaults={"field": 6}
)
self.assertEqual(await SimpleModel.objects.acount(), 5)
self.assertIs(created, True)
self.assertEqual(instance.field, 6)
@skipUnlessDBFeature("has_bulk_insert")
@async_to_sync
async def test_abulk_create(self):
instances = [SimpleModel(field=i) for i in range(10)]
qs = await SimpleModel.objects.abulk_create(instances)
self.assertEqual(len(qs), 10)
@skipUnlessDBFeature("has_bulk_insert", "supports_update_conflicts")
@skipIfDBFeature("supports_update_conflicts_with_target")
@async_to_sync
async def test_update_conflicts_unique_field_unsupported(self):
msg = (
"This database backend does not support updating conflicts with specifying "
"unique fields that can trigger the upsert."
)
with self.assertRaisesMessage(NotSupportedError, msg):
await SimpleModel.objects.abulk_create(
[SimpleModel(field=1), SimpleModel(field=2)],
update_conflicts=True,
update_fields=["field"],
unique_fields=["created"],
)
async def test_abulk_update(self):
instances = SimpleModel.objects.all()
async for instance in instances:
instance.field = instance.field * 10
await SimpleModel.objects.abulk_update(instances, ["field"])
qs = [(o.pk, o.field) async for o in SimpleModel.objects.all()]
self.assertCountEqual(
qs,
[(self.s1.pk, 10), (self.s2.pk, 20), (self.s3.pk, 30)],
)
async def test_ain_bulk(self):
res = await SimpleModel.objects.ain_bulk()
self.assertEqual(
res,
{self.s1.pk: self.s1, self.s2.pk: self.s2, self.s3.pk: self.s3},
)
res = await SimpleModel.objects.ain_bulk([self.s2.pk])
self.assertEqual(res, {self.s2.pk: self.s2})
res = await SimpleModel.objects.ain_bulk([self.s2.pk], field_name="id")
self.assertEqual(res, {self.s2.pk: self.s2})
async def test_alatest(self):
instance = await SimpleModel.objects.alatest("created")
self.assertEqual(instance, self.s3)
instance = await SimpleModel.objects.alatest("-created")
self.assertEqual(instance, self.s1)
async def test_aearliest(self):
instance = await SimpleModel.objects.aearliest("created")
self.assertEqual(instance, self.s1)
instance = await SimpleModel.objects.aearliest("-created")
self.assertEqual(instance, self.s3)
async def test_afirst(self):
instance = await SimpleModel.objects.afirst()
self.assertEqual(instance, self.s1)
instance = await SimpleModel.objects.filter(field=4).afirst()
self.assertIsNone(instance)
async def test_alast(self):
instance = await SimpleModel.objects.alast()
self.assertEqual(instance, self.s3)
instance = await SimpleModel.objects.filter(field=4).alast()
self.assertIsNone(instance)
async def test_aaggregate(self):
total = await SimpleModel.objects.aaggregate(total=Sum("field"))
self.assertEqual(total, {"total": 6})
async def test_aexists(self):
check = await SimpleModel.objects.filter(field=1).aexists()
self.assertIs(check, True)
check = await SimpleModel.objects.filter(field=4).aexists()
self.assertIs(check, False)
async def test_acontains(self):
check = await SimpleModel.objects.acontains(self.s1)
self.assertIs(check, True)
# Unsaved instances are not allowed, so use an ID known not to exist.
check = await SimpleModel.objects.acontains(
SimpleModel(id=self.s3.id + 1, field=4)
)
self.assertIs(check, False)
async def test_aupdate(self):
await SimpleModel.objects.aupdate(field=99)
qs = [o async for o in SimpleModel.objects.all()]
values = [instance.field for instance in qs]
self.assertEqual(set(values), {99})
async def test_adelete(self):
await SimpleModel.objects.filter(field=2).adelete()
qs = [o async for o in SimpleModel.objects.all()]
self.assertCountEqual(qs, [self.s1, self.s3])
@skipUnlessDBFeature("supports_explaining_query_execution")
@async_to_sync
async def test_aexplain(self):
supported_formats = await sync_to_async(self._get_db_feature)(
connection, "supported_explain_formats"
)
all_formats = (None, *supported_formats)
for format_ in all_formats:
with self.subTest(format=format_):
# TODO: Check the captured query when async versions of
# self.assertNumQueries/CaptureQueriesContext context
# processors are available.
result = await SimpleModel.objects.filter(field=1).aexplain(
format=format_
)
self.assertIsInstance(result, str)
self.assertTrue(result)
if not format_:
continue
if format_.lower() == "xml":
try:
xml.etree.ElementTree.fromstring(result)
except xml.etree.ElementTree.ParseError as e:
self.fail(f"QuerySet.aexplain() result is not valid XML: {e}")
elif format_.lower() == "json":
try:
json.loads(result)
except json.JSONDecodeError as e:
self.fail(f"QuerySet.aexplain() result is not valid JSON: {e}")
async def test_raw(self):
sql = "SELECT id, field FROM async_simplemodel WHERE created=%s"
qs = SimpleModel.objects.raw(sql, [self.s1.created])
self.assertEqual([o async for o in qs], [self.s1])
| AsyncQuerySetTest |
python | huggingface__transformers | src/transformers/models/convbert/modeling_convbert.py | {
"start": 38291,
"end": 43055
} | class ____(ConvBertPreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.convbert = ConvBertModel(config)
self.sequence_summary = ConvBertSequenceSummary(config)
self.classifier = nn.Linear(config.hidden_size, 1)
# Initialize weights and apply final processing
self.post_init()
@auto_docstring
def forward(
self,
input_ids: Optional[torch.LongTensor] = None,
attention_mask: Optional[torch.FloatTensor] = None,
token_type_ids: Optional[torch.LongTensor] = None,
position_ids: Optional[torch.LongTensor] = None,
inputs_embeds: Optional[torch.FloatTensor] = None,
labels: Optional[torch.LongTensor] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
) -> Union[tuple, MultipleChoiceModelOutput]:
r"""
input_ids (`torch.LongTensor` of shape `(batch_size, num_choices, sequence_length)`):
Indices of input sequence tokens in the vocabulary.
Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
[`PreTrainedTokenizer.__call__`] for details.
[What are input IDs?](../glossary#input-ids)
token_type_ids (`torch.LongTensor` of shape `(batch_size, num_choices, sequence_length)`, *optional*):
Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0,
1]`:
- 0 corresponds to a *sentence A* token,
- 1 corresponds to a *sentence B* token.
[What are token type IDs?](../glossary#token-type-ids)
position_ids (`torch.LongTensor` of shape `(batch_size, num_choices, sequence_length)`, *optional*):
Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,
config.max_position_embeddings - 1]`.
[What are position IDs?](../glossary#position-ids)
inputs_embeds (`torch.FloatTensor` of shape `(batch_size, num_choices, sequence_length, hidden_size)`, *optional*):
Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This
is useful if you want more control over how to convert *input_ids* indices into associated vectors than the
model's internal embedding lookup matrix.
labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
Labels for computing the multiple choice classification loss. Indices should be in `[0, ...,
num_choices-1]` where `num_choices` is the size of the second dimension of the input tensors. (See
`input_ids` above)
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
num_choices = input_ids.shape[1] if input_ids is not None else inputs_embeds.shape[1]
input_ids = input_ids.view(-1, input_ids.size(-1)) if input_ids is not None else None
attention_mask = attention_mask.view(-1, attention_mask.size(-1)) if attention_mask is not None else None
token_type_ids = token_type_ids.view(-1, token_type_ids.size(-1)) if token_type_ids is not None else None
position_ids = position_ids.view(-1, position_ids.size(-1)) if position_ids is not None else None
inputs_embeds = (
inputs_embeds.view(-1, inputs_embeds.size(-2), inputs_embeds.size(-1))
if inputs_embeds is not None
else None
)
outputs = self.convbert(
input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
sequence_output = outputs[0]
pooled_output = self.sequence_summary(sequence_output)
logits = self.classifier(pooled_output)
reshaped_logits = logits.view(-1, num_choices)
loss = None
if labels is not None:
loss_fct = CrossEntropyLoss()
loss = loss_fct(reshaped_logits, labels)
if not return_dict:
output = (reshaped_logits,) + outputs[1:]
return ((loss,) + output) if loss is not None else output
return MultipleChoiceModelOutput(
loss=loss,
logits=reshaped_logits,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
@auto_docstring
| ConvBertForMultipleChoice |
python | encode__django-rest-framework | tests/test_htmlrenderer.py | {
"start": 5219,
"end": 6665
} | class ____(TestCase):
def setUp(self):
"""
Monkeypatch get_template
"""
self.get_template = django.template.loader.get_template
def get_template(template_name):
if template_name == '404.html':
return engines['django'].from_string("404: {{ detail }}")
if template_name == '403.html':
return engines['django'].from_string("403: {{ detail }}")
raise TemplateDoesNotExist(template_name)
django.template.loader.get_template = get_template
def tearDown(self):
"""
Revert monkeypatching
"""
django.template.loader.get_template = self.get_template
def test_not_found_html_view_with_template(self):
response = self.client.get('/not_found')
self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND)
self.assertTrue(response.content in (
b"404: Not found", b"404 Not Found"))
self.assertEqual(response['Content-Type'], 'text/html; charset=utf-8')
def test_permission_denied_html_view_with_template(self):
response = self.client.get('/permission_denied')
self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)
self.assertTrue(response.content in (b"403: Permission denied", b"403 Forbidden"))
self.assertEqual(response['Content-Type'], 'text/html; charset=utf-8')
| TemplateHTMLRendererExceptionTests |
python | doocs__leetcode | solution/1200-1299/1297.Maximum Number of Occurrences of a Substring/Solution.py | {
"start": 0,
"end": 368
} | class ____:
def maxFreq(self, s: str, maxLetters: int, minSize: int, maxSize: int) -> int:
ans = 0
cnt = Counter()
for i in range(len(s) - minSize + 1):
t = s[i : i + minSize]
ss = set(t)
if len(ss) <= maxLetters:
cnt[t] += 1
ans = max(ans, cnt[t])
return ans
| Solution |
python | kamyu104__LeetCode-Solutions | Python/shortest-distance-to-target-string-in-a-circular-array.py | {
"start": 37,
"end": 499
} | class ____(object):
def closetTarget(self, words, target, startIndex):
"""
:type words: List[str]
:type target: str
:type startIndex: int
:rtype: int
"""
INF = float("inf")
result = INF
for i, w in enumerate(words):
if w == target:
result = min(result, (i-startIndex)%len(words), (startIndex-i)%len(words))
return result if result != INF else -1
| Solution |
python | networkx__networkx | networkx/generators/tests/test_internet_as_graphs.py | {
"start": 229,
"end": 8514
} | class ____:
@classmethod
def setup_class(cls):
cls.n = 1000
cls.seed = 42
cls.G = random_internet_as_graph(cls.n, cls.seed)
cls.T = []
cls.M = []
cls.C = []
cls.CP = []
cls.customers = {}
cls.providers = {}
for i in cls.G.nodes():
if cls.G.nodes[i]["type"] == "T":
cls.T.append(i)
elif cls.G.nodes[i]["type"] == "M":
cls.M.append(i)
elif cls.G.nodes[i]["type"] == "C":
cls.C.append(i)
elif cls.G.nodes[i]["type"] == "CP":
cls.CP.append(i)
else:
raise ValueError("Inconsistent data in the graph node attributes")
cls.set_customers(i)
cls.set_providers(i)
@classmethod
def set_customers(cls, i):
if i not in cls.customers:
cls.customers[i] = set()
for j in neighbors(cls.G, i):
e = cls.G.edges[(i, j)]
if e["type"] == "transit":
customer = int(e["customer"])
if j == customer:
cls.set_customers(j)
cls.customers[i] = cls.customers[i].union(cls.customers[j])
cls.customers[i].add(j)
elif i != customer:
raise ValueError(
"Inconsistent data in the graph edge attributes"
)
@classmethod
def set_providers(cls, i):
if i not in cls.providers:
cls.providers[i] = set()
for j in neighbors(cls.G, i):
e = cls.G.edges[(i, j)]
if e["type"] == "transit":
customer = int(e["customer"])
if i == customer:
cls.set_providers(j)
cls.providers[i] = cls.providers[i].union(cls.providers[j])
cls.providers[i].add(j)
elif j != customer:
raise ValueError(
"Inconsistent data in the graph edge attributes"
)
def test_wrong_input(self):
G = random_internet_as_graph(0)
assert len(G.nodes()) == 0
G = random_internet_as_graph(-1)
assert len(G.nodes()) == 0
G = random_internet_as_graph(1)
assert len(G.nodes()) == 1
def test_node_numbers(self):
assert len(self.G.nodes()) == self.n
assert len(self.T) < 7
assert len(self.M) == round(self.n * 0.15)
assert len(self.CP) == round(self.n * 0.05)
numb = self.n - len(self.T) - len(self.M) - len(self.CP)
assert len(self.C) == numb
def test_connectivity(self):
assert is_connected(self.G)
def test_relationships(self):
# T nodes are not customers of anyone
for i in self.T:
assert len(self.providers[i]) == 0
# C nodes are not providers of anyone
for i in self.C:
assert len(self.customers[i]) == 0
# CP nodes are not providers of anyone
for i in self.CP:
assert len(self.customers[i]) == 0
# test whether there is a customer-provider loop
for i in self.G.nodes():
assert len(self.customers[i].intersection(self.providers[i])) == 0
# test whether there is a peering with a customer or provider
for i, j in self.G.edges():
if self.G.edges[(i, j)]["type"] == "peer":
assert j not in self.customers[i]
assert i not in self.customers[j]
assert j not in self.providers[i]
assert i not in self.providers[j]
def test_degree_values(self):
d_m = 0 # multihoming degree for M nodes
d_cp = 0 # multihoming degree for CP nodes
d_c = 0 # multihoming degree for C nodes
p_m_m = 0 # avg number of peering edges between M and M
p_cp_m = 0 # avg number of peering edges between CP and M
p_cp_cp = 0 # avg number of peering edges between CP and CP
t_m = 0 # probability M's provider is T
t_cp = 0 # probability CP's provider is T
t_c = 0 # probability C's provider is T
for i, j in self.G.edges():
e = self.G.edges[(i, j)]
if e["type"] == "transit":
cust = int(e["customer"])
if i == cust:
prov = j
elif j == cust:
prov = i
else:
raise ValueError("Inconsistent data in the graph edge attributes")
if cust in self.M:
d_m += 1
if self.G.nodes[prov]["type"] == "T":
t_m += 1
elif cust in self.C:
d_c += 1
if self.G.nodes[prov]["type"] == "T":
t_c += 1
elif cust in self.CP:
d_cp += 1
if self.G.nodes[prov]["type"] == "T":
t_cp += 1
else:
raise ValueError("Inconsistent data in the graph edge attributes")
elif e["type"] == "peer":
if self.G.nodes[i]["type"] == "M" and self.G.nodes[j]["type"] == "M":
p_m_m += 1
if self.G.nodes[i]["type"] == "CP" and self.G.nodes[j]["type"] == "CP":
p_cp_cp += 1
if (
self.G.nodes[i]["type"] == "M"
and self.G.nodes[j]["type"] == "CP"
or self.G.nodes[i]["type"] == "CP"
and self.G.nodes[j]["type"] == "M"
):
p_cp_m += 1
else:
raise ValueError("Unexpected data in the graph edge attributes")
assert d_m / len(self.M) == approx((2 + (2.5 * self.n) / 10000), abs=1e-0)
assert d_cp / len(self.CP) == approx((2 + (1.5 * self.n) / 10000), abs=1e-0)
assert d_c / len(self.C) == approx((1 + (5 * self.n) / 100000), abs=1e-0)
assert p_m_m / len(self.M) == approx((1 + (2 * self.n) / 10000), abs=1e-0)
assert p_cp_m / len(self.CP) == approx((0.2 + (2 * self.n) / 10000), abs=1e-0)
assert p_cp_cp / len(self.CP) == approx(
(0.05 + (2 * self.n) / 100000), abs=1e-0
)
assert t_m / d_m == approx(0.375, abs=1e-1)
assert t_cp / d_cp == approx(0.375, abs=1e-1)
assert t_c / d_c == approx(0.125, abs=1e-1)
def test_AS_graph_coverage():
"""Add test coverage for some hard-to-hit branches."""
GG = AS_graph_generator(20, seed=42)
G = GG.generate()
assert len(G) == 20
# Proportion of M nodes is 0.15, so there are 3 when n = 20.
assert len(GG.nodes["M"]) == 3
m_node = nx.utils.arbitrary_element(GG.nodes["M"])
# Proportion of CP nodes is 0.05, so there is 1 when n = 20.
assert len(GG.nodes["CP"]) == 1
cp_node = nx.utils.arbitrary_element(GG.nodes["CP"])
# All M nodes are already connected to each other.
assert all(u in G[v] for u in GG.nodes["M"] for v in GG.nodes["M"] if u != v)
# Add coverage for the unsuccessful branches when adding peering links.
# `add_m_peering_link` cannot add edges when the nodes are already connected.
assert not GG.add_m_peering_link(m_node, "M")
# Artificially add nodes to `customers` to check customer neighbors are
# correctly excluded.
GG.customers[m_node] = set(GG.nodes["M"])
assert not GG.add_m_peering_link(m_node, "M")
# Artificially remove nodes from `providers` to check neighbors are
# correctly excluded (otherwise they might already get disqualified).
GG.providers[cp_node] = set()
assert not GG.add_cp_peering_link(cp_node, "CP")
assert not GG.add_cp_peering_link(cp_node, "M")
# Add coverage for trying to add a new M node where one already exists.
GG.add_node(m_node, "M", 1, 2, 0.5)
assert len(GG.nodes["M"]) == 3
def test_choose_pref_attach():
"""Add test coverage for the empty `degs` branch in `choose_pref_attach`."""
assert choose_pref_attach([], seed=42) is None
| TestInternetASTopology |
python | getsentry__sentry | tests/sentry/incidents/handlers/condition/test_anomaly_detection_handler.py | {
"start": 684,
"end": 7469
} | class ____(ConditionTestCase):
condition = Condition.ANOMALY_DETECTION
def setUp(self) -> None:
super().setUp()
self.snuba_query = self.create_snuba_query()
self.subscription = create_snuba_subscription(self.project, "test", self.snuba_query)
(self.workflow, self.detector, self.detector_workflow, self.workflow_triggers) = (
self.create_detector_and_workflow()
)
packet = AnomalyDetectionUpdate(
subscription_id=str(self.subscription.id),
values={
"value": 1,
"source_id": str(self.subscription.id),
"subscription_id": str(self.subscription.id),
"timestamp": datetime.now(UTC),
},
timestamp=datetime.now(UTC),
entity="test-entity",
)
self.data_source = self.create_data_source(
source_id=str(packet.subscription_id),
organization=self.organization,
)
self.data_source.detectors.add(self.detector)
self.data_packet = DataPacket[AnomalyDetectionUpdate](
source_id=str(packet.subscription_id),
packet=packet,
)
self.dc = self.create_data_condition(
type=self.condition,
comparison={
"sensitivity": AnomalyDetectionSensitivity.MEDIUM,
"seasonality": AnomalyDetectionSeasonality.AUTO,
"threshold_type": AnomalyDetectionThresholdType.ABOVE_AND_BELOW,
},
condition_result=DetectorPriorityLevel.HIGH,
condition_group=self.workflow_triggers,
)
@mock.patch(
"sentry.seer.anomaly_detection.get_anomaly_data.SEER_ANOMALY_DETECTION_CONNECTION_POOL.urlopen"
)
def test_passes(self, mock_seer_request: mock.MagicMock) -> None:
seer_return_value: DetectAnomaliesResponse = {
"success": True,
"timeseries": [
{
"anomaly": {
"anomaly_score": 0.9,
"anomaly_type": AnomalyType.HIGH_CONFIDENCE,
},
"timestamp": 1,
"value": 10,
}
],
}
mock_seer_request.return_value = HTTPResponse(orjson.dumps(seer_return_value), status=200)
assert (
self.dc.evaluate_value(self.data_packet.packet.values)
== DetectorPriorityLevel.HIGH.value
)
@mock.patch(
"sentry.seer.anomaly_detection.get_anomaly_data.SEER_ANOMALY_DETECTION_CONNECTION_POOL.urlopen"
)
def test_passes_medium(self, mock_seer_request: mock.MagicMock) -> None:
seer_return_value: DetectAnomaliesResponse = {
"success": True,
"timeseries": [
{
"anomaly": {
"anomaly_score": 0.2,
"anomaly_type": AnomalyType.LOW_CONFIDENCE,
},
"timestamp": 1,
"value": 10,
}
],
}
mock_seer_request.return_value = HTTPResponse(orjson.dumps(seer_return_value), status=200)
assert (
self.dc.evaluate_value(self.data_packet.packet.values) == DetectorPriorityLevel.OK.value
)
@mock.patch(
"sentry.seer.anomaly_detection.get_anomaly_data.SEER_ANOMALY_DETECTION_CONNECTION_POOL.urlopen"
)
@mock.patch("sentry.seer.anomaly_detection.get_anomaly_data.logger")
def test_seer_call_timeout_error(
self, mock_logger: mock.MagicMock, mock_seer_request: mock.MagicMock
) -> None:
from urllib3.exceptions import TimeoutError
mock_seer_request.side_effect = TimeoutError
timeout_extra = {
"subscription_id": self.subscription.id,
"organization_id": self.organization.id,
"project_id": self.project.id,
"source_id": self.subscription.id,
"source_type": DataSourceType.SNUBA_QUERY_SUBSCRIPTION,
"dataset": self.subscription.snuba_query.dataset,
}
self.dc.evaluate_value(self.data_packet.packet.values)
mock_logger.warning.assert_called_with(
"Timeout error when hitting anomaly detection endpoint", extra=timeout_extra
)
@mock.patch(
"sentry.seer.anomaly_detection.get_anomaly_data.SEER_ANOMALY_DETECTION_CONNECTION_POOL.urlopen"
)
@mock.patch("sentry.seer.anomaly_detection.get_anomaly_data.logger")
def test_seer_call_empty_list(
self, mock_logger: mock.MagicMock, mock_seer_request: mock.MagicMock
) -> None:
seer_return_value: DetectAnomaliesResponse = {"success": True, "timeseries": []}
mock_seer_request.return_value = HTTPResponse(orjson.dumps(seer_return_value), status=200)
self.dc.evaluate_value(self.data_packet.packet.values)
assert mock_logger.warning.call_args[0] == (
"Seer anomaly detection response returned no potential anomalies",
)
@mock.patch(
"sentry.seer.anomaly_detection.get_anomaly_data.SEER_ANOMALY_DETECTION_CONNECTION_POOL.urlopen"
)
@mock.patch("sentry.seer.anomaly_detection.get_anomaly_data.logger")
def test_seer_call_bad_status(
self, mock_logger: mock.MagicMock, mock_seer_request: mock.MagicMock
) -> None:
mock_seer_request.return_value = HTTPResponse(status=403)
extra = {
"subscription_id": self.subscription.id,
"organization_id": self.organization.id,
"project_id": self.project.id,
"source_id": self.subscription.id,
"source_type": DataSourceType.SNUBA_QUERY_SUBSCRIPTION,
"dataset": self.subscription.snuba_query.dataset,
"response_data": None,
}
self.dc.evaluate_value(self.data_packet.packet.values)
mock_logger.error.assert_called_with(
"Error when hitting Seer detect anomalies endpoint", extra=extra
)
@mock.patch(
"sentry.seer.anomaly_detection.get_anomaly_data.SEER_ANOMALY_DETECTION_CONNECTION_POOL.urlopen"
)
@mock.patch("sentry.seer.anomaly_detection.get_anomaly_data.logger")
def test_seer_call_failed_parse(
self, mock_logger: mock.MagicMock, mock_seer_request: mock.MagicMock
) -> None:
# XXX: coercing a response into something that will fail to parse
mock_seer_request.return_value = HTTPResponse(None, status=200) # type: ignore[arg-type]
self.dc.evaluate_value(self.data_packet.packet.values)
mock_logger.exception.assert_called_with(
"Failed to parse Seer anomaly detection response", extra=mock.ANY
)
| TestAnomalyDetectionHandler |
python | readthedocs__readthedocs.org | readthedocs/builds/tests/test_celery_task_router.py | {
"start": 276,
"end": 3320
} | class ____(TestCase):
def setUp(self):
self.project = fixture.get(
Project,
build_queue=None,
)
self.version = self.project.versions.first()
self.build = fixture.get(
Build,
version=self.version,
success=True,
)
for _ in range(TaskRouter.MIN_SUCCESSFUL_BUILDS + 5):
fixture.get(
Build,
version=self.version,
)
self.task = "readthedocs.projects.tasks.builds.update_docs_task"
self.args = (self.version.pk,)
self.kwargs = {
"build_pk": self.build.pk,
}
self.router = TaskRouter()
def test_project_custom_queue(self):
self.project.build_queue = "build:custom"
self.project.save()
self.assertEqual(
self.router.route_for_task(self.task, self.args, self.kwargs),
"build:custom",
)
def test_used_conda_in_last_builds(self):
self.build._config = {"conda": {"file": "docs/environment.yml"}}
self.build.save()
self.assertEqual(
self.router.route_for_task(self.task, self.args, self.kwargs),
TaskRouter.BUILD_LARGE_QUEUE,
)
def test_used_conda_in_last_failed_build(self):
self.build._config = {"conda": {"file": "docs/environment.yml"}}
self.build.success = False
self.build.save()
self.assertEqual(
self.router.route_for_task(self.task, self.args, self.kwargs),
TaskRouter.BUILD_LARGE_QUEUE,
)
def test_more_than_n_builds(self):
self.assertIsNone(
self.router.route_for_task(self.task, self.args, self.kwargs),
)
def test_non_build_task(self):
self.assertIsNone(
self.router.route_for_task("non_build_task", self.args, self.kwargs),
)
def test_no_build_pk(self):
self.assertIsNone(
self.router.route_for_task(self.task, self.args, {}),
)
def test_external_version(self):
external_version = fixture.get(
Version,
project=self.project,
slug="pull-request",
type=EXTERNAL,
)
default_version = self.project.versions.get(
slug=self.project.get_default_version()
)
default_version_build = fixture.get(
Build,
version=default_version,
project=self.project,
builder="build-default-a1b2c3",
)
args = (external_version.pk,)
kwargs = {"build_pk": default_version_build.pk}
self.assertEqual(
self.router.route_for_task(self.task, args, kwargs),
TaskRouter.BUILD_DEFAULT_QUEUE,
)
default_version_build.builder = "build-large-a1b2c3"
default_version_build.save()
self.assertEqual(
self.router.route_for_task(self.task, args, kwargs),
TaskRouter.BUILD_LARGE_QUEUE,
)
| TaskRouterTests |
python | google__jax | tests/magma_linalg_test.py | {
"start": 960,
"end": 5315
} | class ____(jtu.JaxTestCase):
@jtu.sample_product(
shape=[(0, 0), (4, 4), (5, 5), (50, 50), (2, 6, 6)],
dtype=float_types + complex_types,
compute_left_eigenvectors=[False, True],
compute_right_eigenvectors=[False, True],
)
@jtu.run_on_devices("gpu")
def testEig(self, shape, dtype, compute_left_eigenvectors,
compute_right_eigenvectors):
if not gpu_solver.has_magma():
self.skipTest("MAGMA is not installed or can't be loaded.")
# TODO(b/377907938), TODO(danfm): Debug issues MAGMA support for
# complex128 in some configurations.
if dtype == np.complex128:
self.skipTest("MAGMA support for complex128 types is flaky.")
rng = jtu.rand_default(self.rng())
n = shape[-1]
args_maker = lambda: [rng(shape, dtype)]
# Norm, adjusted for dimension and type.
def norm(x):
norm = np.linalg.norm(x, axis=(-2, -1))
return norm / ((n + 1) * jnp.finfo(dtype).eps)
def check_right_eigenvectors(a, w, vr):
self.assertTrue(
np.all(norm(np.matmul(a, vr) - w[..., None, :] * vr) < 100))
def check_left_eigenvectors(a, w, vl):
rank = len(a.shape)
aH = jnp.conj(a.transpose(list(range(rank - 2)) + [rank - 1, rank - 2]))
wC = jnp.conj(w)
check_right_eigenvectors(aH, wC, vl)
a, = args_maker()
results = lax_linalg.eig(
a, compute_left_eigenvectors=compute_left_eigenvectors,
compute_right_eigenvectors=compute_right_eigenvectors,
use_magma=True)
w = results[0]
if compute_left_eigenvectors:
check_left_eigenvectors(a, w, results[1])
if compute_right_eigenvectors:
check_right_eigenvectors(a, w, results[1 + compute_left_eigenvectors])
self._CompileAndCheck(jnp.linalg.eig, args_maker, rtol=1e-3)
@jtu.sample_product(
shape=[(4, 4), (5, 5), (50, 50), (2, 6, 6)],
dtype=float_types + complex_types,
compute_left_eigenvectors=[False, True],
compute_right_eigenvectors=[False, True],
)
@jtu.run_on_devices("gpu")
def testEigHandlesNanInputs(self, shape, dtype, compute_left_eigenvectors,
compute_right_eigenvectors):
"""Verifies that `eig` fails gracefully if given non-finite inputs."""
if not gpu_solver.has_magma():
self.skipTest("MAGMA is not installed or can't be loaded.")
# TODO(b/377907938), TODO(danfm): Debug issues MAGMA support for
# complex128 in some configurations.
if dtype == np.complex128:
self.skipTest("MAGMA support for complex128 types is flaky.")
a = jnp.full(shape, jnp.nan, dtype)
results = lax_linalg.eig(
a, compute_left_eigenvectors=compute_left_eigenvectors,
compute_right_eigenvectors=compute_right_eigenvectors,
use_magma=True)
for result in results:
self.assertTrue(np.all(np.isnan(result)))
def testEigMagmaConfig(self):
if not gpu_solver.has_magma():
self.skipTest("MAGMA is not installed or can't be loaded.")
rng = jtu.rand_default(self.rng())
a = rng((5, 5), np.float32)
with config.gpu_use_magma("on"):
hlo = jax.jit(partial(lax_linalg.eig, use_magma=True)).lower(a).as_text()
self.assertIn('magma = "on"', hlo)
@jtu.sample_product(
shape=[(3, 4), (3, 3), (4, 3), (4, 3)],
dtype=float_types + complex_types,
)
@jtu.run_on_devices("gpu")
def testPivotedQrFactorization(self, shape, dtype):
if not gpu_solver.has_magma():
self.skipTest("MAGMA is not installed or can't be loaded.")
rng = jtu.rand_default(self.rng())
lax_func = partial(lax_linalg.qr, full_matrices=True, pivoting=True, use_magma=True)
sp_func = partial(jax.scipy.linalg.qr, mode="full", pivoting=True)
args_maker = lambda: [rng(shape, dtype)]
self._CheckAgainstNumpy(sp_func, lax_func, args_maker, rtol=1E-5, atol=1E-5)
self._CompileAndCheck(lax_func, args_maker)
def testPivotedQrFactorizationMagmaConfig(self):
if not gpu_solver.has_magma():
self.skipTest("MAGMA is not installed or can't be loaded.")
rng = jtu.rand_default(self.rng())
a = rng((5, 5), np.float32)
with config.gpu_use_magma("on"):
hlo = jax.jit(partial(lax_linalg.qr, pivoting=True, use_magma=True)).lower(a).as_text()
self.assertIn('magma = "on"', hlo)
if __name__ == "__main__":
absltest.main(testLoader=jtu.JaxTestLoader())
| MagmaLinalgTest |
python | pytorch__pytorch | test/fx/test_dce_pass.py | {
"start": 223,
"end": 13832
} | class ____(TestCase):
def _custom_is_impure_node(self, node: torch.fx.Node) -> bool:
if node.is_impure():
return True
# a custom function that defines add operators as impure.
if node.target == torch.ops.aten.add:
return True
return False
def _has_nodes_without_users(self, m: torch.fx.GraphModule, custom: bool = False):
for node in m.graph.nodes:
if (not custom and node.is_impure()) or (
custom and self._custom_is_impure_node(node)
):
continue
if len(node.users) == 0:
return True
return False
def _get_num_placeholders(self, m: torch.fx.GraphModule) -> int:
count = 0
for node in m.graph.nodes:
if node.op == "placeholder":
count += 1
return count
def _run_dce_and_test(
self,
m: torch.nn.Module,
expect_dce_changes: bool,
modules_to_be_leafs: Optional[set[type]] = None,
custom: bool = False,
):
class TestTracer(torch.fx.Tracer):
def is_leaf_module(self, m, qualname):
if modules_to_be_leafs and type(m) in modules_to_be_leafs:
return True
return super().trace(m, qualname)
traced: torch.fx.GraphModule = torch.fx.GraphModule(m, TestTracer().trace(m))
print(str(traced.graph))
# Verify there are nodes without users (if expected).
has_nodes_without_users = self._has_nodes_without_users(traced, custom=custom)
if expect_dce_changes:
self.assertTrue(has_nodes_without_users)
else:
self.assertFalse(has_nodes_without_users)
# Get the original number of placeholders to verify it doesn't change
# during DCE.
orig_num_phs = self._get_num_placeholders(traced)
if custom:
changed = traced.graph.eliminate_dead_code(
is_impure_node=self._custom_is_impure_node
)
else:
changed = traced.graph.eliminate_dead_code()
self.assertTrue(changed if expect_dce_changes else not changed)
# Verify there are no nodes without users after DCE is run.
self.assertFalse(self._has_nodes_without_users(traced, custom=custom))
new_num_phs = self._get_num_placeholders(traced)
self.assertEqual(orig_num_phs, new_num_phs)
traced.recompile()
# Make sure we run and get the same results before/after DCE.
inputs = [torch.tensor([1.5])] * new_num_phs
inputs_copy = copy.deepcopy(inputs)
self.assertTrue(torch.equal(m(*inputs), traced(*inputs_copy)))
def test_simple(self):
"""
Tests that a single node in the graph is DCE'd correctly.
"""
class TestModule(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.attr_1 = torch.nn.Parameter(torch.tensor([-0.9]))
def forward(self, x):
a = x + 1 # noqa: F841
return x + self.attr_1
self._run_dce_and_test(TestModule(), expect_dce_changes=True)
def test_dead_chain(self):
"""
Tests that a chain of two nodes in the graph are DCE'd correctly.
"""
class TestModule(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.attr_1 = torch.nn.Parameter(torch.tensor([-0.9]))
def forward(self, x):
a = x + 1
b = a * 7 # noqa: F841
return x + self.attr_1
self._run_dce_and_test(TestModule(), expect_dce_changes=True)
def test_dead_getattr(self):
"""
Tests that a getatrr in the graph is DCE'd correctly.
"""
class TestModule(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.attr_1 = torch.nn.Parameter(torch.tensor([-0.9]))
def forward(self, x):
a = x + 1
b = a * self.attr_1 # noqa: F841
return x + 11
self._run_dce_and_test(TestModule(), expect_dce_changes=True)
def test_dead_placeholder(self):
"""
Tests that a placeholder in the graph is not DCE'd, as that would change
the function signature.
"""
class TestModule(torch.nn.Module):
def forward(self, x, y):
return x + 7
self._run_dce_and_test(TestModule(), expect_dce_changes=False)
def test_dead_placeholder_with_user(self):
"""
Tests that a placeholder in the graph is not DCE'd, as that would change
the function signature. Also verifies that a dead node that uses the
placeholder is DCE'd.
"""
class TestModule(torch.nn.Module):
def forward(self, x, y):
a = y + 2 # noqa: F841
return x + 7
self._run_dce_and_test(TestModule(), expect_dce_changes=True)
def test_keep_module_with_side_effects(self):
"""
Test that DCE doesn't remove a module if it's specified as having side effects.
"""
class ReLUImpure(torch.nn.ReLU):
_is_impure = True
class TestModule(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.relu = ReLUImpure()
def forward(self, a: torch.Tensor) -> torch.Tensor:
r = self.relu(a) # noqa: F841
return a * 2
self._run_dce_and_test(
TestModule(), expect_dce_changes=False, modules_to_be_leafs={ReLUImpure}
)
def test_keep_torch_assert(self):
"""
Test that DCE doesn't remove torch._assert since it has side effects.
"""
class TestModule(torch.nn.Module):
def forward(self, a: torch.Tensor) -> torch.Tensor:
torch._assert(torch.equal(a, a), "a must equal a")
return a * 2
# Note: Don't need to specify torch._assert as having side effects
# because it's known to.
self._run_dce_and_test(TestModule(), expect_dce_changes=False)
def test_keep_setitem(self):
"""
Fix issue: https://github.com/pytorch/pytorch/issues/145697
Test that DCE doesn't remove operator.setitem since it has side effects.
"""
class TestModule(torch.nn.Module):
def forward(self, a: torch.Tensor) -> torch.Tensor:
a[0, 0, 0, 0] *= 2.0
return a * 2
def dce_backend(gm, inputs, **kwargs):
import torch._inductor.constant_folding
torch._inductor.constant_folding.constant_fold(gm)
return gm
x = torch.randn(1, 3, 224, 224)
dce_x = x.detach().clone()
model = TestModule().eval()
dce_mod = torch.compile(copy.deepcopy(model), backend=dce_backend)
with torch.inference_mode():
eager_out = model(x)
out = dce_mod(dce_x)
self.assertEqual(eager_out, out, atol=1e-5, rtol=1e-5)
def test_impure_nodes_args(self):
"""
Test that DCE doesn't remove call_function nodes with side effects.
"""
class TestModule(torch.nn.Module):
def forward(self, a: torch.Tensor) -> torch.Tensor:
torch._ops.ops.aten.add_.Tensor(a, 1)
return a * 2
# %add_ node should not be removed because it has side effects.
self._run_dce_and_test(TestModule(), expect_dce_changes=False)
def test_impure_random(self):
"""
Test that DCE doesn't remove call_function for torch.rand and other random functions.
Tests both FX tracing and AOT compilation (issue #151524).
"""
class TestModule(torch.nn.Module):
def forward(self, a: torch.Tensor) -> torch.Tensor:
x = torch.rand([10]) # noqa: F841
return a * 2
# Test FX tracing + DCE
self._run_dce_and_test(TestModule(), expect_dce_changes=False)
# Test comprehensive random functions in AOT compilation
class ComprehensiveRandomModule(torch.nn.Module):
def forward(self, x: torch.Tensor) -> torch.Tensor:
# Test various random functions that should be preserved
a = torch.rand(1) # noqa: F841
b = torch.randn(1) # noqa: F841
c = torch.randint(0, 10, (1,)) # noqa: F841
d = torch.randperm(5) # noqa: F841
e = torch.normal(0, 1, (1,)) # noqa: F841
f = torch.poisson(torch.tensor([1.0])) # noqa: F841
g = torch.rand(1) # Used
# Test that random operations with explicit generators are also preserved
gen = torch.Generator().manual_seed(123)
h = torch.rand(1, generator=gen) # noqa: F841
i = torch.randn(1, generator=gen) # noqa: F841
j = torch.rand(1, generator=gen) # Used
return x + g + j
def aot_backend(gm, example_inputs):
def count_random_ops():
return len(
[
n
for n in gm.graph.nodes
if n.op == "call_function"
and any(
fn in str(n.target)
for fn in [
"rand",
"randn",
"randint",
"randperm",
"normal",
"poisson",
]
)
]
)
rand_count = count_random_ops()
gm.graph.eliminate_dead_code()
self.assertEqual(
count_random_ops(), rand_count, "Random ops should be preserved"
)
return gm.forward
model = ComprehensiveRandomModule()
torch.manual_seed(42)
eager_result = model(torch.tensor([1.0]))
torch.manual_seed(42)
compiled_result = torch.compile(model, backend=aot_backend)(torch.tensor([1.0]))
self.assertEqual(eager_result, compiled_result)
def test_impure_kwargs(self):
"""
Test that DCE doesn't remove call_function nodes with side effects on kwargs.
"""
class TestModule(torch.nn.Module):
def forward(self, a: torch.Tensor) -> torch.Tensor:
b = a + 1
torch._ops.ops.aten.add.out(b, b, out=a, alpha=2)
return a
# %add_out node should not be removed because it has side effects.
self._run_dce_and_test(TestModule(), expect_dce_changes=False)
def test_impure_custom(self):
"""
Test that DCE doesn't remove nodes marked as impure by a custom function.
"""
class TestModule(torch.nn.Module):
def forward(self, a: torch.Tensor) -> torch.Tensor:
b = a + 1
c = torch._ops.ops.aten.add(b, b) # noqa: F841
return a
# %add_out node should not be removed because it has side effects.
self._run_dce_and_test(TestModule(), expect_dce_changes=False, custom=True)
@unittest.skipIf(IS_MACOS, "Not working on macos")
def test_keep_collectives(self):
"""
Test that DCE doesn't remote collective ops even the results are not used.
"""
class TestModule(torch.nn.Module):
def forward(
self, a: torch.Tensor, b: torch.Tensor, c: torch.Tensor
) -> torch.Tensor:
d = torch.ops.aten.mul.Tensor(a, b)
e = torch.ops.aten.mul.Tensor(a, c)
future = torch.ops._c10d_functional.all_reduce.default(e, "sum", "0")
torch.ops._c10d_functional.wait_tensor.default(future)
return d
torch.distributed.init_process_group(
backend="fake",
world_size=2,
rank=0,
)
# collective nodes should not be removed because they have side effects.
self._run_dce_and_test(TestModule(), expect_dce_changes=False, custom=False)
torch.distributed.destroy_process_group()
@unittest.skipIf(IS_MACOS, "Not working on macos")
def test_keep_collectives_no_overload(self):
"""
Test that DCE doesn't remote collective ops (no overload version) even the results are not used.
"""
class TestModule(torch.nn.Module):
def forward(
self, a: torch.Tensor, b: torch.Tensor, c: torch.Tensor
) -> torch.Tensor:
d = torch.ops.aten.mul(a, b)
e = torch.ops.aten.mul(a, c)
future = torch.ops._c10d_functional.all_reduce(e, "sum", "0")
torch.ops._c10d_functional.wait_tensor(future)
return d
torch.distributed.init_process_group(
backend="fake",
world_size=2,
rank=0,
)
# collective nodes should not be removed because they have side effects.
self._run_dce_and_test(TestModule(), expect_dce_changes=False, custom=False)
torch.distributed.destroy_process_group()
if __name__ == "__main__":
raise_on_run_directly("test/test_fx.py")
| TestDCE |
python | langchain-ai__langchain | libs/cli/langchain_cli/integration_template/integration_template/tools.py | {
"start": 687,
"end": 2654
} | class ____(BaseTool): # type: ignore[override]
"""__ModuleName__ tool.
Setup:
# TODO: Replace with relevant packages, env vars.
Install `__package_name__` and set environment variable
`__MODULE_NAME___API_KEY`.
```bash
pip install -U __package_name__
export __MODULE_NAME___API_KEY="your-api-key"
```
Instantiation:
```python
tool = __ModuleName__Tool(
# TODO: init params
)
```
Invocation with args:
```python
# TODO: invoke args
tool.invoke({...})
```
```python
# TODO: output of invocation
```
Invocation with ToolCall:
```python
# TODO: invoke args
tool.invoke({"args": {...}, "id": "1", "name": tool.name, "type": "tool_call"})
```
```python
# TODO: output of invocation
```
""" # noqa: E501
# TODO: Set tool name and description
name: str = "TODO: Tool name"
"""The name that is passed to the model when performing tool calling."""
description: str = "TODO: Tool description."
"""The description that is passed to the model when performing tool calling."""
args_schema: Type[BaseModel] = __ModuleName__ToolInput
"""The schema that is passed to the model when performing tool calling."""
# TODO: Add any other init params for the tool.
# param1: str | None
# """param1 determines foobar"""
# TODO: Replaced (a, b) with real tool arguments.
def _run(
self, a: int, b: int, *, run_manager: CallbackManagerForToolRun | None = None
) -> str:
return str(a + b + 80)
# TODO: Implement if tool has native async functionality, otherwise delete.
# async def _arun(
# self,
# a: int,
# b: int,
# *,
# run_manager: AsyncCallbackManagerForToolRun | None = None,
# ) -> str:
# ...
| __ModuleName__Tool |
python | ray-project__ray | python/ray/_private/thirdparty/pynvml/pynvml.py | {
"start": 92161,
"end": 92620
} | class ____(_PrintableStructure):
_fields_ = [
('supportedSchedulers', c_uint * NVML_SUPPORTED_VGPU_SCHEDULER_POLICY_COUNT),
('maxTimeslice', c_uint),
('minTimeslice', c_uint),
('isArrModeSupported', c_uint),
('maxFrequencyForARR', c_uint),
('minFrequencyForARR', c_uint),
('maxAvgFactorForARR', c_uint),
('minAvgFactorForARR', c_uint),
]
| c_nvmlVgpuSchedulerCapabilities_t |
python | pytorch__pytorch | test/distributed/test_c10d_nccl.py | {
"start": 3674,
"end": 7528
} | class ____(TestCase):
@retry_on_connect_failures
@requires_nccl()
@skip_but_pass_in_sandcastle_if(not TEST_CUDA, "No GPUs available, skipping test")
def test_common_errors(self):
vars = {
"WORLD_SIZE": "1",
"RANK": "0",
"MASTER_ADDR": "127.0.0.1",
"MASTER_PORT": str(common.find_free_port()),
}
class Env:
def __init__(self, vars):
self.env_patcher = mock.patch.dict(os.environ, vars, clear=True)
def __enter__(self):
self.env_patcher.start()
def __exit__(self, type, value, traceback):
self.env_patcher.stop()
def without(d, key):
d = d.copy()
d.pop(key)
return d
def withouts(d, keys):
d = d.copy()
for key in keys:
d.pop(key)
return d
with Env(without(vars, "WORLD_SIZE")):
self.assertEqual(None, os.environ.get("WORLD_SIZE"))
with self.assertRaisesRegex(ValueError, "WORLD_SIZE expected"):
gen = c10d.rendezvous("env://")
next(gen)
c10d.init_process_group(backend="nccl", world_size=1)
self.assertEqual(c10d.get_rank(), 0)
self.assertEqual(c10d.get_world_size(), 1)
c10d.destroy_process_group()
with Env(without(vars, "RANK")):
self.assertEqual(None, os.environ.get("RANK"))
with self.assertRaisesRegex(ValueError, "RANK expected"):
gen = c10d.rendezvous("env://")
next(gen)
c10d.init_process_group(backend="nccl", rank=0)
self.assertEqual(c10d.get_rank(), 0)
self.assertEqual(c10d.get_world_size(), 1)
c10d.destroy_process_group()
with Env(withouts(vars, ["RANK", "WORLD_SIZE"])):
self.assertEqual(None, os.environ.get("RANK"))
self.assertEqual(None, os.environ.get("WORLD_SIZE"))
c10d.init_process_group(backend="nccl", rank=0, world_size=1)
self.assertEqual(c10d.get_rank(), 0)
self.assertEqual(c10d.get_world_size(), 1)
c10d.destroy_process_group()
with Env(vars):
c10d.init_process_group(backend="nccl")
self.assertEqual(c10d.get_rank(), 0)
self.assertEqual(c10d.get_world_size(), 1)
c10d.destroy_process_group()
with Env(without(vars, "MASTER_ADDR")):
self.assertEqual(None, os.environ.get("MASTER_ADDR"))
with self.assertRaisesRegex(ValueError, "MASTER_ADDR expected"):
gen = c10d.rendezvous("env://")
next(gen)
with Env(without(vars, "MASTER_PORT")):
self.assertEqual(None, os.environ.get("MASTER_PORT"))
with self.assertRaisesRegex(ValueError, "MASTER_PORT expected"):
gen = c10d.rendezvous("env://")
next(gen)
with Env(without(vars, "WORLD_SIZE")):
self.assertEqual(None, os.environ.get("WORLD_SIZE"))
gen = c10d.rendezvous(f"env://?world_size={1}")
_, _, size = next(gen)
self.assertEqual(size, 1)
with Env(without(vars, "RANK")):
self.assertEqual(None, os.environ.get("RANK"))
gen = c10d.rendezvous(f"env://?rank={0}")
_, rank, _ = next(gen)
self.assertEqual(rank, 0)
with Env(withouts(vars, ["RANK", "WORLD_SIZE"])):
self.assertEqual(None, os.environ.get("RANK"))
self.assertEqual(None, os.environ.get("WORLD_SIZE"))
gen = c10d.rendezvous(f"env://?rank={0}&world_size={1}")
_, rank, size = next(gen)
self.assertEqual(rank, 0)
self.assertEqual(size, 1)
| RendezvousEnvTest |
python | django__django | django/contrib/admin/helpers.py | {
"start": 9649,
"end": 15214
} | class ____:
"""
A wrapper around an inline formset for use in the admin system.
"""
def __init__(
self,
inline,
formset,
fieldsets,
prepopulated_fields=None,
readonly_fields=None,
model_admin=None,
has_add_permission=True,
has_change_permission=True,
has_delete_permission=True,
has_view_permission=True,
):
self.opts = inline
self.formset = formset
self.fieldsets = fieldsets
self.model_admin = model_admin
if readonly_fields is None:
readonly_fields = ()
self.readonly_fields = readonly_fields
if prepopulated_fields is None:
prepopulated_fields = {}
self.prepopulated_fields = prepopulated_fields
self.classes = " ".join(inline.classes) if inline.classes else ""
self.has_add_permission = has_add_permission
self.has_change_permission = has_change_permission
self.has_delete_permission = has_delete_permission
self.has_view_permission = has_view_permission
def __iter__(self):
if self.has_change_permission:
readonly_fields_for_editing = self.readonly_fields
else:
readonly_fields_for_editing = self.readonly_fields + flatten_fieldsets(
self.fieldsets
)
for form, original in zip(
self.formset.initial_forms, self.formset.get_queryset()
):
view_on_site_url = self.opts.get_view_on_site_url(original)
yield InlineAdminForm(
self.formset,
form,
self.fieldsets,
self.prepopulated_fields,
original,
readonly_fields_for_editing,
model_admin=self.opts,
view_on_site_url=view_on_site_url,
)
for form in self.formset.extra_forms:
yield InlineAdminForm(
self.formset,
form,
self.fieldsets,
self.prepopulated_fields,
None,
self.readonly_fields,
model_admin=self.opts,
)
if self.has_add_permission:
yield InlineAdminForm(
self.formset,
self.formset.empty_form,
self.fieldsets,
self.prepopulated_fields,
None,
self.readonly_fields,
model_admin=self.opts,
)
def fields(self):
fk = getattr(self.formset, "fk", None)
empty_form = self.formset.empty_form
meta_labels = empty_form._meta.labels or {}
meta_help_texts = empty_form._meta.help_texts or {}
for i, field_name in enumerate(flatten_fieldsets(self.fieldsets)):
if fk and fk.name == field_name:
continue
if not self.has_change_permission or field_name in self.readonly_fields:
form_field = empty_form.fields.get(field_name)
widget_is_hidden = False
if form_field is not None:
widget_is_hidden = form_field.widget.is_hidden
yield {
"name": field_name,
"label": meta_labels.get(field_name)
or label_for_field(
field_name,
self.opts.model,
self.opts,
form=empty_form,
),
"widget": {"is_hidden": widget_is_hidden},
"required": False,
"help_text": meta_help_texts.get(field_name)
or help_text_for_field(field_name, self.opts.model),
}
else:
form_field = empty_form.fields[field_name]
label = form_field.label
if label is None:
label = label_for_field(
field_name, self.opts.model, self.opts, form=empty_form
)
yield {
"name": field_name,
"label": label,
"widget": form_field.widget,
"required": form_field.required,
"help_text": form_field.help_text,
}
def inline_formset_data(self):
verbose_name = self.opts.verbose_name
return json.dumps(
{
"name": "#%s" % self.formset.prefix,
"options": {
"prefix": self.formset.prefix,
"addText": gettext("Add another %(verbose_name)s")
% {
"verbose_name": capfirst(verbose_name),
},
"deleteText": gettext("Remove"),
},
}
)
@property
def forms(self):
return self.formset.forms
@cached_property
def is_collapsible(self):
if any(self.formset.errors):
return False
return "collapse" in self.classes
def non_form_errors(self):
return self.formset.non_form_errors()
@property
def is_bound(self):
return self.formset.is_bound
@property
def total_form_count(self):
return self.formset.total_form_count
@property
def media(self):
media = self.opts.media + self.formset.media
for fs in self:
media += fs.media
return media
| InlineAdminFormSet |
python | openai__gym | gym/error.py | {
"start": 250,
"end": 384
} | class ____(Unregistered):
"""Raised when the user requests an env from the registry that does not actually exist."""
| UnregisteredEnv |
python | huggingface__transformers | src/transformers/models/sew_d/modeling_sew_d.py | {
"start": 34181,
"end": 35291
} | class ____(nn.Module):
def __init__(self, config):
super().__init__()
self.self = DisentangledSelfAttention(config)
self.output = SEWDSelfOutput(config)
self.config = config
def forward(
self,
hidden_states,
attention_mask,
output_attentions=False,
query_states=None,
relative_pos=None,
rel_embeddings=None,
):
self_output = self.self(
hidden_states,
attention_mask,
output_attentions,
query_states=query_states,
relative_pos=relative_pos,
rel_embeddings=rel_embeddings,
)
if output_attentions:
self_output, att_matrix = self_output
if query_states is None:
query_states = hidden_states
attention_output = self.output(self_output, query_states)
if output_attentions:
return (attention_output, att_matrix)
else:
return attention_output
# Copied from transformers.models.bert.modeling_bert.BertIntermediate with Bert->SEWD
| SEWDAttention |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-github/source_github/github_schema.py | {
"start": 119087,
"end": 119339
} | class ____(sgqlc.types.Enum):
"""Properties by which team connections can be ordered.
Enumeration Choices:
* `NAME`: Allows ordering a list of teams by name.
"""
__schema__ = github_schema
__choices__ = ("NAME",)
| TeamOrderField |
python | apache__airflow | providers/amazon/src/airflow/providers/amazon/aws/operators/appflow.py | {
"start": 16147,
"end": 20824
} | class ____(ShortCircuitOperator, AwsBaseHookMixin[AppflowHook]):
"""
Short-circuit in case of an empty AppFlow's run.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:AppflowRecordsShortCircuitOperator`
:param flow_name: The flow name
:param appflow_run_task_id: Run task ID from where this operator should extract the execution ID
:param ignore_downstream_trigger_rules: Ignore downstream trigger rules
:param aws_conn_id: The Airflow connection used for AWS credentials.
If this is ``None`` or empty then the default boto3 behaviour is used. If
running Airflow in a distributed manner and aws_conn_id is None or
empty, then default boto3 configuration would be used (and must be
maintained on each worker node).
:param region_name: AWS region_name. If not specified then the default boto3 behaviour is used.
:param verify: Whether or not to verify SSL certificates. See:
https://boto3.amazonaws.com/v1/documentation/api/latest/reference/core/session.html
:param botocore_config: Configuration dictionary (key-values) for botocore client. See:
https://botocore.amazonaws.com/v1/documentation/api/latest/reference/config.html
"""
aws_hook_class = AppflowHook
template_fields = aws_template_fields()
ui_color = "#33ffec" # Light blue
def __init__(
self,
*,
flow_name: str,
appflow_run_task_id: str,
ignore_downstream_trigger_rules: bool = True,
aws_conn_id: str | None = "aws_default",
region_name: str | None = None,
verify: bool | str | None = None,
botocore_config: dict | None = None,
**kwargs,
) -> None:
hook_params = AwsHookParams.from_constructor(
aws_conn_id, region_name, verify, botocore_config, additional_params=kwargs
)
super().__init__(
python_callable=self._has_new_records_func,
op_kwargs={
"flow_name": flow_name,
"appflow_run_task_id": appflow_run_task_id,
},
ignore_downstream_trigger_rules=ignore_downstream_trigger_rules,
**kwargs,
)
self.aws_conn_id = hook_params.aws_conn_id
self.region_name = hook_params.region_name
self.verify = hook_params.verify
self.botocore_config = hook_params.botocore_config
self.validate_attributes()
@staticmethod
def _get_target_execution_id(
records: list[ExecutionRecordTypeDef], execution_id: str
) -> ExecutionRecordTypeDef | None:
for record in records:
if record.get("executionId") == execution_id:
return record
return None
def _has_new_records_func(self, **kwargs) -> bool:
appflow_task_id = kwargs["appflow_run_task_id"]
self.log.info("appflow_task_id: %s", appflow_task_id)
flow_name = kwargs["flow_name"]
self.log.info("flow_name: %s", flow_name)
af_client = self.hook.conn
task_instance = kwargs["task_instance"]
execution_id = task_instance.xcom_pull(task_ids=appflow_task_id, key="execution_id")
if not execution_id:
raise AirflowException(f"No execution_id found from task_id {appflow_task_id}!")
self.log.info("execution_id: %s", execution_id)
args = {"flowName": flow_name, "maxResults": 100}
response: DescribeFlowExecutionRecordsResponseTypeDef = cast(
"DescribeFlowExecutionRecordsResponseTypeDef", {}
)
record = None
while not record:
if "nextToken" in response:
response = af_client.describe_flow_execution_records(nextToken=response["nextToken"], **args)
else:
response = af_client.describe_flow_execution_records(**args)
record = AppflowRecordsShortCircuitOperator._get_target_execution_id(
response["flowExecutions"], execution_id
)
if not record and "nextToken" not in response:
raise AirflowException(f"Flow ({execution_id}) without recordsProcessed info.")
execution = record.get("executionResult", {})
if "recordsProcessed" not in execution:
raise AirflowException(f"Flow ({execution_id}) without recordsProcessed info!")
records_processed = execution["recordsProcessed"]
self.log.info("records_processed: %d", records_processed)
task_instance.xcom_push("records_processed", records_processed)
return records_processed > 0
| AppflowRecordsShortCircuitOperator |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.