language stringclasses 1 value | repo stringclasses 346 values | path stringlengths 6 201 | class_span dict | source stringlengths 21 2.38M | target stringlengths 1 96 |
|---|---|---|---|---|---|
python | apache__airflow | providers/standard/src/airflow/providers/standard/exceptions.py | {
"start": 2264,
"end": 2416
} | class ____(AirflowException):
"""Raised when an ApprovalOperator receives a "Reject" response when fail_on_reject is set to True."""
| HITLRejectException |
python | Lightning-AI__lightning | src/lightning/pytorch/utilities/combined_loader.py | {
"start": 7708,
"end": 16314
} | class ____(Iterable):
"""Combines different iterables under specific sampling modes.
Args:
iterables: the iterable or collection of iterables to sample from.
mode: the mode to use. The following modes are supported:
* ``min_size``: stops after the shortest iterable (the one with the lowest number of items) is done.
* ``max_size_cycle``: stops after the longest iterable (the one with most items) is done, while cycling
through the rest of the iterables.
* ``max_size``: stops after the longest iterable (the one with most items) is done, while returning None
for the exhausted iterables.
* ``sequential``: completely consumes each iterable sequentially, and returns a triplet
``(data, idx, iterable_idx)``
Examples:
>>> from torch.utils.data import DataLoader
>>> iterables = {'a': DataLoader(range(6), batch_size=4),
... 'b': DataLoader(range(15), batch_size=5)}
>>> combined_loader = CombinedLoader(iterables, 'max_size_cycle')
>>> _ = iter(combined_loader)
>>> len(combined_loader)
3
>>> for batch, batch_idx, dataloader_idx in combined_loader:
... print(f"{batch}, {batch_idx=}, {dataloader_idx=}")
{'a': tensor([0, 1, 2, 3]), 'b': tensor([0, 1, 2, 3, 4])}, batch_idx=0, dataloader_idx=0
{'a': tensor([4, 5]), 'b': tensor([5, 6, 7, 8, 9])}, batch_idx=1, dataloader_idx=0
{'a': tensor([0, 1, 2, 3]), 'b': tensor([10, 11, 12, 13, 14])}, batch_idx=2, dataloader_idx=0
>>> combined_loader = CombinedLoader(iterables, 'max_size')
>>> _ = iter(combined_loader)
>>> len(combined_loader)
3
>>> for batch, batch_idx, dataloader_idx in combined_loader:
... print(f"{batch}, {batch_idx=}, {dataloader_idx=}")
{'a': tensor([0, 1, 2, 3]), 'b': tensor([0, 1, 2, 3, 4])}, batch_idx=0, dataloader_idx=0
{'a': tensor([4, 5]), 'b': tensor([5, 6, 7, 8, 9])}, batch_idx=1, dataloader_idx=0
{'a': None, 'b': tensor([10, 11, 12, 13, 14])}, batch_idx=2, dataloader_idx=0
>>> combined_loader = CombinedLoader(iterables, 'min_size')
>>> _ = iter(combined_loader)
>>> len(combined_loader)
2
>>> for batch, batch_idx, dataloader_idx in combined_loader:
... print(f"{batch}, {batch_idx=}, {dataloader_idx=}")
{'a': tensor([0, 1, 2, 3]), 'b': tensor([0, 1, 2, 3, 4])}, batch_idx=0, dataloader_idx=0
{'a': tensor([4, 5]), 'b': tensor([5, 6, 7, 8, 9])}, batch_idx=1, dataloader_idx=0
>>> combined_loader = CombinedLoader(iterables, 'sequential')
>>> _ = iter(combined_loader)
>>> len(combined_loader)
5
>>> for batch, batch_idx, dataloader_idx in combined_loader:
... print(f"{batch}, {batch_idx=}, {dataloader_idx=}")
tensor([0, 1, 2, 3]), batch_idx=0, dataloader_idx=0
tensor([4, 5]), batch_idx=1, dataloader_idx=0
tensor([0, 1, 2, 3, 4]), batch_idx=0, dataloader_idx=1
tensor([5, 6, 7, 8, 9]), batch_idx=1, dataloader_idx=1
tensor([10, 11, 12, 13, 14]), batch_idx=2, dataloader_idx=1
"""
def __init__(self, iterables: Any, mode: _LITERAL_SUPPORTED_MODES = "min_size") -> None:
if mode not in _SUPPORTED_MODES:
raise ValueError(f"Unsupported mode {mode!r}, please select one of: {list(_SUPPORTED_MODES)}.")
self._iterables = iterables
self._flattened, self._spec = _tree_flatten(iterables)
self._mode = mode
self._iterator: Optional[_ModeIterator] = None
self._limits: Optional[list[Union[int, float]]] = None
@property
def iterables(self) -> Any:
"""Return the original collection of iterables."""
return self._iterables
@property
def sampler(self) -> Any:
"""Return a collections of samplers extracted from iterables."""
return _map_and_unflatten(lambda x: getattr(x, "sampler", None), self.flattened, self._spec)
@property
def batch_sampler(self) -> Any:
"""Return a collections of batch samplers extracted from iterables."""
return _map_and_unflatten(lambda x: getattr(x, "batch_sampler", None), self.flattened, self._spec)
@property
def flattened(self) -> list[Any]:
"""Return the flat list of iterables."""
return self._flattened
@flattened.setter
def flattened(self, flattened: list[Any]) -> None:
"""Setter to conveniently update the list of iterables."""
if len(flattened) != len(self._flattened):
raise ValueError(
f"Mismatch in flattened length ({len(flattened)}) and existing length ({len(self._flattened)})"
)
# update the iterable collection
self._iterables = tree_unflatten(flattened, self._spec)
self._flattened = flattened
@property
def limits(self) -> Optional[list[Union[int, float]]]:
"""Optional limits per iterator."""
return self._limits
@limits.setter
def limits(self, limits: Optional[Union[int, float, list[Union[int, float]]]]) -> None:
if isinstance(limits, (int, float)):
limits = [limits] * len(self.flattened)
elif isinstance(limits, list) and len(limits) != len(self.flattened):
raise ValueError(
f"Mismatch in number of limits ({len(limits)}) and number of iterables ({len(self.flattened)})"
)
self._limits = limits
def __next__(self) -> _ITERATOR_RETURN:
assert self._iterator is not None
out = next(self._iterator)
if isinstance(self._iterator, _Sequential):
return out
out, batch_idx, dataloader_idx = out
return tree_unflatten(out, self._spec), batch_idx, dataloader_idx
@override
def __iter__(self) -> Self:
cls = _SUPPORTED_MODES[self._mode]["iterator"]
iterator = cls(self.flattened, self._limits)
iter(iterator)
self._iterator = iterator
return self
def __len__(self) -> int:
"""Compute the number of batches."""
if self._iterator is None:
raise RuntimeError("Please call `iter(combined_loader)` first.")
return len(self._iterator)
def reset(self) -> None:
"""Reset the state and shutdown any workers."""
if self._iterator is not None:
self._iterator.reset()
self._iterator = None
for iterable in self.flattened:
_shutdown_workers_and_reset_iterator(iterable)
def _dataset_length(self) -> int:
"""Compute the total length of the datasets according to the current mode."""
datasets = [getattr(dl, "dataset", None) for dl in self.flattened]
lengths = [length for ds in datasets if (length := sized_len(ds)) is not None]
if not lengths:
raise NotImplementedError("All datasets are iterable-style datasets.")
fn = _SUPPORTED_MODES[self._mode]["fn"]
return fn(lengths)
def _state_dicts(self) -> list[dict[str, Any]]:
"""Returns the list of state dicts for iterables in `self.flattened` that are stateful."""
return [loader.state_dict() for loader in self.flattened if isinstance(loader, _Stateful)]
def _load_state_dicts(self, states: list[dict[str, Any]]) -> None:
"""Loads the state dicts for iterables in `self.flattened` that are stateful."""
if not states:
return
stateful_loaders = [loader for loader in self.flattened if isinstance(loader, _Stateful)]
if len(stateful_loaders) != len(states):
raise RuntimeError(
f"The CombinedLoader has {len(stateful_loaders)} stateful loaders, but found {len(states)} states"
" in the checkpoint. Please make sure you define the same dataloaders that were used when saving"
" the checkpoint."
)
for loader, state_dict in zip(stateful_loaders, states):
loader.load_state_dict(state_dict)
def _shutdown_workers_and_reset_iterator(dataloader: object) -> None:
if hasattr(dataloader, "_iterator"):
if isinstance(dataloader._iterator, _MultiProcessingDataLoaderIter):
dataloader._iterator._shutdown_workers()
dataloader._iterator = None
def _get_iterables_lengths(iterables: list[Iterable]) -> list[Union[int, float]]:
return [(float("inf") if (length := sized_len(iterable)) is None else length) for iterable in iterables]
| CombinedLoader |
python | django-import-export__django-import-export | tests/core/models.py | {
"start": 5427,
"end": 5582
} | class ____(models.Model):
big = models.PositiveBigIntegerField(null=True)
small = models.PositiveSmallIntegerField(null=True)
| WithPositiveIntegerFields |
python | mlflow__mlflow | mlflow/store/tracking/dbmodels/models.py | {
"start": 10317,
"end": 11556
} | class ____(Base):
"""
DB model for :py:class:`mlflow.entities.RunTag`. These are recorded in ``tags`` table.
"""
__tablename__ = "tags"
__table_args__ = (
PrimaryKeyConstraint("key", "run_uuid", name="tag_pk"),
Index(f"index_{__tablename__}_run_uuid", "run_uuid"),
)
key = Column(String(250))
"""
Tag key: `String` (limit 250 characters). *Primary Key* for ``tags`` table.
"""
value = Column(String(8000), nullable=True)
"""
Value associated with tag: `String` (limit 8000 characters). Could be *null*.
"""
run_uuid = Column(String(32), ForeignKey("runs.run_uuid"))
"""
Run UUID to which this tag belongs to: *Foreign Key* into ``runs`` table.
"""
run = relationship("SqlRun", backref=backref("tags", cascade="all"))
"""
SQLAlchemy relationship (many:one) with :py:class:`mlflow.store.dbmodels.models.SqlRun`.
"""
def __repr__(self):
return f"<SqlRunTag({self.key}, {self.value})>"
def to_mlflow_entity(self):
"""
Convert DB model to corresponding MLflow entity.
Returns:
:py:class:`mlflow.entities.RunTag`.
"""
return RunTag(key=self.key, value=self.value)
| SqlTag |
python | psf__black | tests/data/cases/preview_comments7.py | {
"start": 1887,
"end": 6231
} | class ____:
@pytest.mark.parametrize(
("post_data", "message"),
[
# metadata_version errors.
(
{},
"None is an invalid value for Metadata-Version. Error: This field is"
" required. see"
" https://packaging.python.org/specifications/core-metadata"
),
(
{"metadata_version": "-1"},
"'-1' is an invalid value for Metadata-Version. Error: Unknown Metadata"
" Version see"
" https://packaging.python.org/specifications/core-metadata"
),
# name errors.
(
{"metadata_version": "1.2"},
"'' is an invalid value for Name. Error: This field is required. see"
" https://packaging.python.org/specifications/core-metadata"
),
(
{"metadata_version": "1.2", "name": "foo-"},
"'foo-' is an invalid value for Name. Error: Must start and end with a"
" letter or numeral and contain only ascii numeric and '.', '_' and"
" '-'. see https://packaging.python.org/specifications/core-metadata"
),
# version errors.
(
{"metadata_version": "1.2", "name": "example"},
"'' is an invalid value for Version. Error: This field is required. see"
" https://packaging.python.org/specifications/core-metadata"
),
(
{"metadata_version": "1.2", "name": "example", "version": "dog"},
"'dog' is an invalid value for Version. Error: Must start and end with"
" a letter or numeral and contain only ascii numeric and '.', '_' and"
" '-'. see https://packaging.python.org/specifications/core-metadata"
)
]
)
def test_fails_invalid_post_data(
self, pyramid_config, db_request, post_data, message
):
...
square = Square(4) # type: Optional[Square]
# Regression test for https://github.com/psf/black/issues/3756.
[
(
"aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa" # aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa
),
]
[
( # aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa
"aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa" # aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa
),
]
# output
from .config import (
Any,
Bool,
ConfigType,
ConfigTypeAttributes,
Int,
Path,
# String,
# resolve_to_config_type,
# DEFAULT_TYPE_ATTRIBUTES,
)
from .config import (
Any,
Bool,
ConfigType,
ConfigTypeAttributes,
Int,
no_comma_here_yet,
# and some comments,
# resolve_to_config_type,
# DEFAULT_TYPE_ATTRIBUTES,
)
from com.my_lovely_company.my_lovely_team.my_lovely_project.my_lovely_component import (
MyLovelyCompanyTeamProjectComponent, # NOT DRY
)
from com.my_lovely_company.my_lovely_team.my_lovely_project.my_lovely_component import (
MyLovelyCompanyTeamProjectComponent as component, # DRY
)
result = 1 # look ma, no comment migration xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
result = 1 # look ma, no comment migration xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
result = ( # aaa
"xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx"
)
result = ( # aaa
"xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx"
)
def func():
c = call(
0.0123,
0.0456,
0.0789,
0.0123,
0.0789,
a[-1], # type: ignore
)
c = call(0.0123, 0.0456, 0.0789, 0.0123, 0.0789, a[-1]) # type: ignore
c = call(
0.0123,
0.0456,
0.0789,
0.0123,
0.0456,
0.0789,
0.0123,
0.0456,
0.0789,
a[-1], # type: ignore
)
# The type: ignore exception only applies to line length, not
# other types of formatting.
c = call(
"aaaaaaaa",
"aaaaaaaa",
"aaaaaaaa",
"aaaaaaaa",
"aaaaaaaa",
"aaaaaaaa", # type: ignore
"aaaaaaaa",
"aaaaaaaa",
"aaaaaaaa",
"aaaaaaaa",
"aaaaaaaa",
"aaaaaaaa",
)
| C |
python | getsentry__sentry | src/sentry/sentry_metrics/consumers/indexer/slicing_router.py | {
"start": 829,
"end": 3063
} | class ____(Exception):
"""
Exception raised when the routing header does not contain an org_id.
"""
def _validate_slicing_config() -> None:
"""
Validates the generalized slicing config (not focusing on an individual
sliceable)
"""
for sliceable, assignments in settings.SENTRY_SLICING_CONFIG.items():
acc = {}
for (assign_lo, assign_hi), _slice_id in assignments.items():
for logical_part in range(assign_lo, assign_hi):
if logical_part in acc:
raise SlicingConfigurationException(
f"'{sliceable}' has two assignments to logical partition {logical_part}"
)
else:
acc[logical_part] = _slice_id
missing_logical_parts = set(
range(0, settings.SENTRY_SLICING_LOGICAL_PARTITION_COUNT)
) - set(acc.keys())
if not len(missing_logical_parts) == 0:
raise SlicingConfigurationException(
f"'{sliceable}' is missing logical partition assignments: {missing_logical_parts}"
)
def _validate_slicing_consumer_config(sliceable: Sliceable) -> None:
"""
Validate all the required settings needed for a slicing router.
"""
if not is_sliced(sliceable):
raise SlicingConfigurationException(
f"{sliceable} is not defined in settings.SENTRY_SLICING_CONFIG"
)
for (current_sliceable, slice_id), configuration in settings.SLICED_KAFKA_TOPICS.items():
if current_sliceable != sliceable:
continue
if "topic" not in configuration:
raise SlicingConfigurationException(
f"({current_sliceable}, {slice_id}) is missing a topic name."
)
if "cluster" not in configuration:
raise SlicingConfigurationException(
f"({current_sliceable}, {slice_id}) is missing a cluster name."
)
cluster = configuration["cluster"]
if cluster not in settings.KAFKA_CLUSTERS:
raise SlicingConfigurationException(
f"Broker configuration missing for {cluster} in settings.KAFKA_CLUSTERS"
)
| MissingOrgInRoutingHeader |
python | allegroai__clearml | clearml/backend_api/services/v2_9/queues.py | {
"start": 17930,
"end": 18790
} | class ____(Response):
"""
Response of queues.create endpoint.
:param id: New queue ID
:type id: str
"""
_service = "queues"
_action = "create"
_version = "2.9"
_schema = {
"definitions": {},
"properties": {"id": {"description": "New queue ID", "type": ["string", "null"]}},
"type": "object",
}
def __init__(self, id: Optional[str] = None, **kwargs: Any) -> None:
super(CreateResponse, self).__init__(**kwargs)
self.id = id
@schema_property("id")
def id(self) -> Optional[str]:
return self._property_id
@id.setter
def id(self, value: Optional[str]) -> None:
if value is None:
self._property_id = None
return
self.assert_isinstance(value, "id", six.string_types)
self._property_id = value
| CreateResponse |
python | aio-libs__aiohttp | aiohttp/web_urldispatcher.py | {
"start": 2046,
"end": 3273
} | class ____(Sized, Iterable["AbstractRoute"]):
def __init__(self, *, name: str | None = None) -> None:
self._name = name
@property
def name(self) -> str | None:
return self._name
@property
@abc.abstractmethod
def canonical(self) -> str:
"""Exposes the resource's canonical path.
For example '/foo/bar/{name}'
"""
@abc.abstractmethod # pragma: no branch
def url_for(self, **kwargs: str) -> URL:
"""Construct url for resource with additional params."""
@abc.abstractmethod # pragma: no branch
async def resolve(self, request: Request) -> _Resolve:
"""Resolve resource.
Return (UrlMappingMatchInfo, allowed_methods) pair.
"""
@abc.abstractmethod
def add_prefix(self, prefix: str) -> None:
"""Add a prefix to processed URLs.
Required for subapplications support.
"""
@abc.abstractmethod
def get_info(self) -> _InfoDict:
"""Return a dict with additional info useful for introspection"""
def freeze(self) -> None:
pass
@abc.abstractmethod
def raw_match(self, path: str) -> bool:
"""Perform a raw match against path"""
| AbstractResource |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-github/source_github/github_schema.py | {
"start": 1497958,
"end": 1500078
} | class ____(sgqlc.types.Type, Node):
"""A sponsorship relationship between a sponsor and a maintainer"""
__schema__ = github_schema
__field_names__ = (
"created_at",
"is_active",
"is_one_time_payment",
"is_sponsor_opted_into_email",
"privacy_level",
"sponsor_entity",
"sponsorable",
"tier",
"tier_selected_at",
)
created_at = sgqlc.types.Field(sgqlc.types.non_null(DateTime), graphql_name="createdAt")
"""Identifies the date and time when the object was created."""
is_active = sgqlc.types.Field(sgqlc.types.non_null(Boolean), graphql_name="isActive")
"""Whether the sponsorship is active. False implies the sponsor is a
past sponsor of the maintainer, while true implies they are a
current sponsor.
"""
is_one_time_payment = sgqlc.types.Field(sgqlc.types.non_null(Boolean), graphql_name="isOneTimePayment")
"""Whether this sponsorship represents a one-time payment versus a
recurring sponsorship.
"""
is_sponsor_opted_into_email = sgqlc.types.Field(Boolean, graphql_name="isSponsorOptedIntoEmail")
"""Whether the sponsor has chosen to receive sponsorship update
emails sent from the sponsorable. Only returns a non-null value
when the viewer has permission to know this.
"""
privacy_level = sgqlc.types.Field(sgqlc.types.non_null(SponsorshipPrivacy), graphql_name="privacyLevel")
"""The privacy level for this sponsorship."""
sponsor_entity = sgqlc.types.Field("Sponsor", graphql_name="sponsorEntity")
"""The user or organization that is sponsoring, if you have
permission to view them.
"""
sponsorable = sgqlc.types.Field(sgqlc.types.non_null(Sponsorable), graphql_name="sponsorable")
"""The entity that is being sponsored"""
tier = sgqlc.types.Field(SponsorsTier, graphql_name="tier")
"""The associated sponsorship tier"""
tier_selected_at = sgqlc.types.Field(DateTime, graphql_name="tierSelectedAt")
"""Identifies the date and time when the current tier was chosen for
this sponsorship.
"""
| Sponsorship |
python | pytorch__pytorch | torch/_subclasses/functional_tensor.py | {
"start": 34752,
"end": 36109
} | class ____(BaseFunctionalizeAPI):
def wrap_tensors(self, args: tuple[Any]) -> tuple[Any]:
from torch._functorch.eager_transforms import _wrap_all_tensors_to_functional
return _wrap_all_tensors_to_functional(args, level=0)
def unwrap_tensors(
self, args: Union[torch.Tensor, tuple[torch.Tensor, ...]]
) -> Union[torch.Tensor, tuple[torch.Tensor, ...]]:
from torch._functorch.eager_transforms import (
_unwrap_all_tensors_from_functional,
)
return _unwrap_all_tensors_from_functional(args, reapply_views=_reapply_views())
def functionalize(self, inner_f: Callable) -> Callable:
return torch.func.functionalize(inner_f)
def redispatch_to_next(self) -> AbstractContextManager:
return torch._C._ExcludeDispatchKeyGuard(
torch._C.DispatchKeySet(torch._C.DispatchKey.Functionalize)
)
def replace(self, input_tensor, output_tensor) -> None:
torch._functionalize_replace(input_tensor, output_tensor)
def commit_update(self, tensor) -> None:
torch._functionalize_commit_update(tensor)
def sync(self, tensor) -> None:
torch._functionalize_sync(tensor)
def mark_mutation_hidden_from_autograd(self, tensor) -> None:
torch._functionalize_mark_mutation_hidden_from_autograd(tensor)
| CppFunctionalizeAPI |
python | ray-project__ray | python/ray/serve/deployment.py | {
"start": 683,
"end": 1860
} | class ____:
"""One or more deployments bound with arguments that can be deployed together.
Can be passed into another `Deployment.bind()` to compose multiple deployments in a
single application, passed to `serve.run`, or deployed via a Serve config file.
For example, to define an Application and run it in Python:
.. code-block:: python
from ray import serve
from ray.serve import Application
@serve.deployment
class MyDeployment:
pass
app: Application = MyDeployment.bind(OtherDeployment.bind())
serve.run(app)
To run the same app using the command line interface (CLI):
.. code-block:: bash
serve run python_file:app
To deploy the same app via a config file:
.. code-block:: yaml
applications:
my_app:
import_path: python_file:app
"""
def __init__(self, bound_deployment: "Deployment"):
# This is used by `build_app`, but made private so users don't use it.
self._bound_deployment = bound_deployment
@PublicAPI(stability="stable")
| Application |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/generator11.py | {
"start": 721,
"end": 959
} | class ____[S, T](Awaitable[T]):
def __init__(self, val: S) -> None:
self.val = val
def __await__(self) -> Generator[S, T, T]:
z = yield self.val
reveal_type(z, expected_text="T@ClassA")
return z
| ClassA |
python | encode__django-rest-framework | tests/test_fields.py | {
"start": 82079,
"end": 83546
} | class ____(FieldValues):
"""
Values for `ListField` with CharField as child.
"""
valid_inputs = [
({'a': 1, 'b': '2', 3: 3}, {'a': '1', 'b': '2', '3': '3'}),
({'a': 1, 'b': None}, {'a': '1', 'b': None}),
]
invalid_inputs = [
('not a dict', ['Expected a dictionary of items but got type "str".']),
]
outputs = [
({'a': 1, 'b': '2', 3: 3}, {'a': '1', 'b': '2', '3': '3'}),
]
field = serializers.HStoreField()
def test_child_is_charfield(self):
with pytest.raises(AssertionError) as exc_info:
serializers.HStoreField(child=serializers.IntegerField())
assert str(exc_info.value) == (
"The `child` argument must be an instance of `CharField`, "
"as the hstore extension stores values as strings."
)
def test_no_source_on_child(self):
with pytest.raises(AssertionError) as exc_info:
serializers.HStoreField(child=serializers.CharField(source='other'))
assert str(exc_info.value) == (
"The `source` argument is not meaningful when applied to a `child=` field. "
"Remove `source=` from the field declaration."
)
def test_allow_null(self):
"""
If `allow_null=True` then `None` is a valid input.
"""
field = serializers.HStoreField(allow_null=True)
output = field.run_validation(None)
assert output is None
| TestHStoreField |
python | tensorflow__tensorflow | tensorflow/python/ops/math_grad_test.py | {
"start": 4981,
"end": 5714
} | class ____(test.TestCase):
@test_util.run_deprecated_v1
def testMinGradient(self):
inputs = constant_op.constant([1.0], dtype=dtypes.float32)
outputs = math_ops.reduce_min(array_ops.concat([inputs, inputs], 0))
with self.cached_session():
error = gradient_checker.compute_gradient_error(inputs, [1], outputs, [])
self.assertLess(error, 1e-4)
@test_util.run_deprecated_v1
def testMaxGradient(self):
inputs = constant_op.constant([1.0], dtype=dtypes.float32)
outputs = math_ops.reduce_max(array_ops.concat([inputs, inputs], 0))
with self.cached_session():
error = gradient_checker.compute_gradient_error(inputs, [1], outputs, [])
self.assertLess(error, 1e-4)
| MinOrMaxGradientTest |
python | tensorflow__tensorflow | tensorflow/python/distribute/values_test.py | {
"start": 16452,
"end": 29235
} | class ____(test.TestCase, parameterized.TestCase):
def tearDown(self):
super().tearDown()
context._reset_context()
def _assign_replica_local(self, v, new):
for var, n in zip(v, new):
with ops.device(var.device):
self.evaluate(var.assign(n))
def _save_return_saver(self, sess, var):
saver = saver_lib.Saver(var_list=[var])
test_dir = self.get_temp_dir()
prefix = os.path.join(test_dir, "ckpt")
return saver.save(sess, prefix), saver
def _save(self, sess, var):
save_path, _ = self._save_return_saver(sess, var)
return save_path
config = config_pb2.ConfigProto()
config.allow_soft_placement = True
@test_util.run_in_graph_and_eager_modes(config=config)
def testProperties(self):
if context.num_gpus() < 1 and context.executing_eagerly():
self.skipTest("A GPU is not available for this test in eager mode.")
v, replica_local = _make_replica_local(
variable_scope.VariableAggregation.SUM)
self.assertEqual(v[0].constraint, replica_local.constraint)
self.assertEqual(v[0].name, replica_local.name)
self.assertEqual(v[0].dtype, replica_local.dtype)
self.assertEqual(v[0].shape, replica_local.shape)
self.assertEqual(variable_scope.VariableAggregation.SUM,
replica_local.aggregation)
@combinations.generate(
combinations.combine(
distribution=[
strategy_combinations.mirrored_strategy_with_gpu_and_cpu
],
mode=["eager"]))
def testCanPassToDefFun(self, distribution):
@def_function.function
def add1(x):
return x + 1.
with distribution.scope():
v = variables_lib.Variable(
1.,
aggregation=variables_lib.VariableAggregation.MEAN,
synchronization=variables_lib.VariableSynchronization.ON_READ)
self.assertEqual(2., self.evaluate(add1(v)))
@combinations.generate(mirrored_and_tpu_strategy_combinations())
def testTensorConversion(self, distribution):
with context.graph_mode():
_, replica_local = _make_replica_local(
variable_scope.VariableAggregation.SUM, distribution)
converted = ops.convert_to_tensor(replica_local, as_ref=False)
self.assertIsInstance(converted, tensor.Tensor)
self.assertEqual(converted.dtype, replica_local.dtype)
converted = ops.convert_to_tensor(replica_local, as_ref=True)
# Resources variable are converted to tensors as well when as_ref is True.
self.assertIsInstance(converted, tensor.Tensor)
self.assertEqual(converted.dtype, replica_local.dtype)
@combinations.generate(combinations.combine(
distribution=[
strategy_combinations.mirrored_strategy_with_gpu_and_cpu,
strategy_combinations.mirrored_strategy_with_two_gpus_no_merge_call,
strategy_combinations.tpu_strategy,
strategy_combinations.tpu_strategy_packed_var,
], mode=["eager"]))
def testValueInCrossReplicaContext(self, distribution):
value_list, replica_local = _make_replica_local(
variable_scope.VariableAggregation.ONLY_FIRST_REPLICA, distribution)
self.assertIsInstance(replica_local.value(), tensor.Tensor)
self.assertEqual(self.evaluate(replica_local.value()),
self.evaluate(value_list[0].value()))
@combinations.generate(
combinations.combine(
distribution=[
strategy_combinations.mirrored_strategy_with_gpu_and_cpu,
strategy_combinations.tpu_strategy_packed_var,
],
mode=["eager"]))
def testValueInDefaultReplicaContext(self, distribution):
with distribution.scope():
v1 = variables_lib.Variable(
0.0,
aggregation=variables_lib.VariableAggregation.SUM,
synchronization=variables_lib.VariableSynchronization.ON_READ)
v2 = variables_lib.Variable(
0.0,
aggregation=variables_lib.VariableAggregation.SUM,
synchronization=variables_lib.VariableSynchronization.ON_READ)
@def_function.function
def replica_fn():
v1.assign_add(1.0)
v2.assign_add(2.0)
distribution.run(replica_fn)
sum_v = v1 + v2
self.assertEqual(sum_v, 6.0)
@combinations.generate(
combinations.combine(
distribution=[
strategy_combinations.tpu_strategy_packed_var,
],
mode=["eager"]))
def testValueInFunctionCrossReplicaContext(self, distribution):
with distribution.scope():
v1 = variables_lib.Variable(
0.0,
aggregation=variables_lib.VariableAggregation.NONE,
synchronization=variables_lib.VariableSynchronization.ON_WRITE)
@def_function.function
def assign_fn():
v1.assign(1.0)
assign_fn()
self.assertEqual(v1, 1.0)
# Make sure the function graph has composite variable as inputs.
graph_def = assign_fn.get_concrete_function().graph.as_graph_def()
self.assertRegex(str(graph_def), "device:COMPOSITE:0")
@combinations.generate(
combinations.combine(
distribution=[
strategy_combinations.tpu_strategy_packed_var,
],
mode=["eager"]))
def testReplicatedValueNameDeterministic(self, distribution):
with distribution.scope():
v1 = variables_lib.Variable(0.0, name="test_var_1")
v2 = variables_lib.Variable(0.0, name="test_var_2")
def fn():
v1.assign_add(1.0)
v2.assign_add(2.0)
return v1 + v2
@def_function.function
def dist_run_fn():
a = distribution.run(fn)
return a
concrete_fn = dist_run_fn.get_concrete_function()
inputs = concrete_fn.graph.inputs
self.assertLen(inputs, 2)
# Before cl/433948982, input name will include a non-deterministic uid,
# e.g. "test_var_1_139726389910864/handle/inputs_0:0"
self.assertEqual(inputs[0].name, "test_var_1/handle/inputs_0:0")
self.assertEqual(inputs[1].name, "test_var_2/handle/inputs_0:0")
@combinations.generate(mirrored_and_tpu_strategy_combinations())
def testSaveAndRestoreReplicaLocalSumOneGraph(self, distribution):
with self.cached_session() as sess:
v, replica_local = _make_replica_local(
variable_scope.VariableAggregation.SUM, distribution)
# Overwrite the initial values.
self._assign_replica_local(v, [3., 4.])
with distribution.scope():
# Saves the current value of v[0] + v[1], 7.
save_path, saver = self._save_return_saver(sess, replica_local)
# Change the values between save and restore.
self._assign_replica_local(v, [5., 6.])
# Restores the saved value of 7. which gets divided equally
# between the variables.
saver.restore(sess, save_path)
self.assertEqual([3.5, 3.5], self.evaluate([v[0], v[1]]))
@combinations.generate(mirrored_and_tpu_strategy_combinations())
def testSaveAndRestoreReplicaLocalMeanOneGraph(self, distribution):
if context.num_gpus() < 1 and context.executing_eagerly():
self.skipTest("A GPU is not available for this test in eager mode.")
with self.cached_session() as sess:
v, replica_local = _make_replica_local(
variable_scope.VariableAggregation.MEAN, distribution)
# Overwrite the initial values.
self._assign_replica_local(v, [3., 4.])
with distribution.scope():
# Saves the current value of (v[0] + v[1])/2, 3.5.
save_path, saver = self._save_return_saver(sess, replica_local)
# Change the values between save and restore.
self._assign_replica_local(v, [5., 6.])
# Restores the saved value of 3.5 to both variables.
saver.restore(sess, save_path)
self.assertEqual([3.5, 3.5], self.evaluate([v[0], v[1]]))
def _save_replica_local_mean(self, distribution):
"""Save variables with mirroring, returns save_path."""
with self.session(graph=ops.Graph()) as sess:
v, replica_local = _make_replica_local(
variable_scope.VariableAggregation.MEAN, distribution)
# Overwrite the initial values.
self._assign_replica_local(v, [3., 4.])
with distribution.scope():
# Saves the current value of (v[0] + v[1])/2, 3.5
save_path = self._save(sess, replica_local)
# Change the values between save and restore.
self._assign_replica_local(v, [5., 6.])
return save_path
def _save_replica_local_sum(self, distribution):
"""Save variables with mirroring, returns save_path."""
with self.session(graph=ops.Graph()) as sess:
v, replica_local = _make_replica_local(
variable_scope.VariableAggregation.SUM, distribution)
# Overwrite the initial values.
self._assign_replica_local(v, [1.5, 2.])
with distribution.scope():
# Saves the current value of v[0] + v[1], 3.5
save_path = self._save(sess, replica_local)
# Change the values between save and restore.
self._assign_replica_local(v, [5., 6.])
return save_path
def _save_normal(self):
"""Save variables without mirroring, returns save_path."""
with self.session(graph=ops.Graph()) as sess:
var = variable_scope.get_variable(
name="v", initializer=1., use_resource=True)
# Overwrite the initial value.
self.evaluate(var.assign(3.5))
# Saves the current value of var, 3.5.
save_path = self._save(sess, var)
# Change the values between save and restore.
self.evaluate(var.assign(5.))
return save_path
def _restore_normal(self, save_path):
"""Restore to variables without mirroring in a fresh graph."""
with self.session(graph=ops.Graph()) as sess:
var = variable_scope.get_variable(
name="v", initializer=7., use_resource=True)
# Overwrite the initial value.
self.evaluate(var.assign(8.))
# Restores the saved value of 3.5 to `var`.
saver = saver_lib.Saver(var_list=[var])
saver.restore(sess, save_path)
self.assertEqual(3.5, self.evaluate(var))
def _restore_replica_local_mean(self, save_path, distribution):
"""Restore to variables with mirroring in a fresh graph."""
with self.session(graph=ops.Graph()) as sess:
v, replica_local = _make_replica_local(
variable_scope.VariableAggregation.MEAN, distribution)
# Overwrite the initial values.
self._assign_replica_local(v, [7., 8.])
with distribution.scope():
# Restores the saved value of 3.5 to both variables.
saver = saver_lib.Saver(var_list=[replica_local])
saver.restore(sess, save_path)
self.assertEqual([3.5, 3.5], self.evaluate([v[0], v[1]]))
def _restore_replica_local_sum(self, save_path, distribution):
"""Restore to variables with mirroring in a fresh graph."""
with self.session(graph=ops.Graph()) as sess:
v, replica_local = _make_replica_local(
variable_scope.VariableAggregation.SUM, distribution)
# Overwrite the initial values.
self._assign_replica_local(v, [7., 8.])
with distribution.scope():
# Restores the saved value of 3.5 to both variables.
saver = saver_lib.Saver(var_list=[replica_local])
saver.restore(sess, save_path)
self.assertEqual([1.75, 1.75], self.evaluate([v[0], v[1]]))
@combinations.generate(mirrored_and_tpu_strategy_combinations())
def testSaveReplicaLocalRestoreReplicaLocalMean(self, distribution):
save_path = self._save_replica_local_mean(distribution)
self._restore_replica_local_mean(save_path, distribution)
@combinations.generate(mirrored_and_tpu_strategy_combinations())
def testSaveReplicaLocalRestoreReplicaLocalSum(self, distribution):
save_path = self._save_replica_local_sum(distribution)
self._restore_replica_local_sum(save_path, distribution)
@combinations.generate(mirrored_and_tpu_strategy_combinations())
def testSaveReplicaLocalMeanRestoreNormal(self, distribution):
save_path = self._save_replica_local_mean(distribution)
self._restore_normal(save_path)
@combinations.generate(mirrored_and_tpu_strategy_combinations())
def testSaveReplicaLocalSumRestoreNormal(self, distribution):
save_path = self._save_replica_local_sum(distribution)
self._restore_normal(save_path)
@combinations.generate(mirrored_and_tpu_strategy_combinations())
def testSaveNormalRestoreReplicaLocalMean(self, distribution):
save_path = self._save_normal()
self._restore_replica_local_mean(save_path, distribution)
@combinations.generate(mirrored_and_tpu_strategy_combinations())
def testSaveNormalRestoreReplicaLocalSum(self, distribution):
save_path = self._save_normal()
self._restore_replica_local_sum(save_path, distribution)
if __name__ == "__main__":
ds_test_util.main()
| DistributedVariableTest |
python | anthropics__anthropic-sdk-python | src/anthropic/types/messages/batch_create_params.py | {
"start": 316,
"end": 520
} | class ____(TypedDict, total=False):
requests: Required[Iterable[Request]]
"""List of requests for prompt completion.
Each is an individual request to create a Message.
"""
| BatchCreateParams |
python | getsentry__sentry | src/sentry/search/events/datasets/profile_functions.py | {
"start": 3226,
"end": 3943
} | class ____(NumericColumn):
def _normalize(self, value: str) -> str:
column = COLUMN_MAP.get(value)
if column is None:
raise InvalidFunctionArgument(f"{value} is not a valid column")
if (
column.kind == Kind.INTEGER
or column.kind == Kind.DURATION
or column.kind == Kind.NUMBER
):
return column.column
raise InvalidFunctionArgument(f"{value} is not a numeric column")
def get_type(self, value: str) -> str: # type: ignore[override] # baseclass is unsound
try:
return COLUMN_MAP[value].kind.value
except KeyError:
return Kind.NUMBER.value
| ProfileFunctionNumericColumn |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/constrainedTypeVar15.py | {
"start": 282,
"end": 663
} | class ____(Generic[_T]):
data: _T
def func1(a: Data[_T]) -> _T:
if isinstance(a.data, (int, float)):
value = int(a.data / 3)
reveal_type(value, expected_text="int*")
else:
value = a.data
reveal_type(value, expected_text="ClassA*")
return value
def func2(val: AnyStr, objs: Iterable[AnyStr]) -> AnyStr:
return val.join(objs)
| Data |
python | google__pytype | pytype/overlays/attr_overlay.py | {
"start": 646,
"end": 765
} | class ____(enum.Enum):
"""Source of an attrib's `typ` property."""
TYPE = 1
DEFAULT = 2
CONVERTER = 3
| TypeSource |
python | django__django | tests/admin_checks/models.py | {
"start": 635,
"end": 876
} | class ____(models.Model):
album1 = models.ForeignKey(Album, models.CASCADE, related_name="album1_set")
album2 = models.ForeignKey(Album, models.CASCADE, related_name="album2_set")
e = models.CharField(max_length=1)
| TwoAlbumFKAndAnE |
python | geekcomputers__Python | venv/Lib/site-packages/pip/_vendor/rich/progress.py | {
"start": 36022,
"end": 59704
} | class ____(JupyterMixin):
"""Renders an auto-updating progress bar(s).
Args:
console (Console, optional): Optional Console instance. Default will an internal Console instance writing to stdout.
auto_refresh (bool, optional): Enable auto refresh. If disabled, you will need to call `refresh()`.
refresh_per_second (Optional[float], optional): Number of times per second to refresh the progress information or None to use default (10). Defaults to None.
speed_estimate_period: (float, optional): Period (in seconds) used to calculate the speed estimate. Defaults to 30.
transient: (bool, optional): Clear the progress on exit. Defaults to False.
redirect_stdout: (bool, optional): Enable redirection of stdout, so ``print`` may be used. Defaults to True.
redirect_stderr: (bool, optional): Enable redirection of stderr. Defaults to True.
get_time: (Callable, optional): A callable that gets the current time, or None to use Console.get_time. Defaults to None.
disable (bool, optional): Disable progress display. Defaults to False
expand (bool, optional): Expand tasks table to fit width. Defaults to False.
"""
def __init__(
self,
*columns: Union[str, ProgressColumn],
console: Optional[Console] = None,
auto_refresh: bool = True,
refresh_per_second: float = 10,
speed_estimate_period: float = 30.0,
transient: bool = False,
redirect_stdout: bool = True,
redirect_stderr: bool = True,
get_time: Optional[GetTimeCallable] = None,
disable: bool = False,
expand: bool = False,
) -> None:
assert refresh_per_second > 0, "refresh_per_second must be > 0"
self._lock = RLock()
self.columns = columns or self.get_default_columns()
self.speed_estimate_period = speed_estimate_period
self.disable = disable
self.expand = expand
self._tasks: Dict[TaskID, Task] = {}
self._task_index: TaskID = TaskID(0)
self.live = Live(
console=console or get_console(),
auto_refresh=auto_refresh,
refresh_per_second=refresh_per_second,
transient=transient,
redirect_stdout=redirect_stdout,
redirect_stderr=redirect_stderr,
get_renderable=self.get_renderable,
)
self.get_time = get_time or self.console.get_time
self.print = self.console.print
self.log = self.console.log
@classmethod
def get_default_columns(cls) -> Tuple[ProgressColumn, ...]:
"""Get the default columns used for a new Progress instance:
- a text column for the description (TextColumn)
- the bar itself (BarColumn)
- a text column showing completion percentage (TextColumn)
- an estimated-time-remaining column (TimeRemainingColumn)
If the Progress instance is created without passing a columns argument,
the default columns defined here will be used.
You can also create a Progress instance using custom columns before
and/or after the defaults, as in this example:
progress = Progress(
SpinnerColumn(),
*Progress.get_default_columns(),
"Elapsed:",
TimeElapsedColumn(),
)
This code shows the creation of a Progress display, containing
a spinner to the left, the default columns, and a labeled elapsed
time column.
"""
return (
TextColumn("[progress.description]{task.description}"),
BarColumn(),
TaskProgressColumn(),
TimeRemainingColumn(),
)
@property
def console(self) -> Console:
return self.live.console
@property
def tasks(self) -> List[Task]:
"""Get a list of Task instances."""
with self._lock:
return list(self._tasks.values())
@property
def task_ids(self) -> List[TaskID]:
"""A list of task IDs."""
with self._lock:
return list(self._tasks.keys())
@property
def finished(self) -> bool:
"""Check if all tasks have been completed."""
with self._lock:
if not self._tasks:
return True
return all(task.finished for task in self._tasks.values())
def start(self) -> None:
"""Start the progress display."""
if not self.disable:
self.live.start(refresh=True)
def stop(self) -> None:
"""Stop the progress display."""
self.live.stop()
if not self.console.is_interactive:
self.console.print()
def __enter__(self) -> "Progress":
self.start()
return self
def __exit__(
self,
exc_type: Optional[Type[BaseException]],
exc_val: Optional[BaseException],
exc_tb: Optional[TracebackType],
) -> None:
self.stop()
def track(
self,
sequence: Union[Iterable[ProgressType], Sequence[ProgressType]],
total: Optional[float] = None,
task_id: Optional[TaskID] = None,
description: str = "Working...",
update_period: float = 0.1,
) -> Iterable[ProgressType]:
"""Track progress by iterating over a sequence.
Args:
sequence (Sequence[ProgressType]): A sequence of values you want to iterate over and track progress.
total: (float, optional): Total number of steps. Default is len(sequence).
task_id: (TaskID): Task to track. Default is new task.
description: (str, optional): Description of task, if new task is created.
update_period (float, optional): Minimum time (in seconds) between calls to update(). Defaults to 0.1.
Returns:
Iterable[ProgressType]: An iterable of values taken from the provided sequence.
"""
if total is None:
total = float(length_hint(sequence)) or None
if task_id is None:
task_id = self.add_task(description, total=total)
else:
self.update(task_id, total=total)
if self.live.auto_refresh:
with _TrackThread(self, task_id, update_period) as track_thread:
for value in sequence:
yield value
track_thread.completed += 1
else:
advance = self.advance
refresh = self.refresh
for value in sequence:
yield value
advance(task_id, 1)
refresh()
def wrap_file(
self,
file: BinaryIO,
total: Optional[int] = None,
*,
task_id: Optional[TaskID] = None,
description: str = "Reading...",
) -> BinaryIO:
"""Track progress file reading from a binary file.
Args:
file (BinaryIO): A file-like object opened in binary mode.
total (int, optional): Total number of bytes to read. This must be provided unless a task with a total is also given.
task_id (TaskID): Task to track. Default is new task.
description (str, optional): Description of task, if new task is created.
Returns:
BinaryIO: A readable file-like object in binary mode.
Raises:
ValueError: When no total value can be extracted from the arguments or the task.
"""
# attempt to recover the total from the task
total_bytes: Optional[float] = None
if total is not None:
total_bytes = total
elif task_id is not None:
with self._lock:
total_bytes = self._tasks[task_id].total
if total_bytes is None:
raise ValueError(
f"unable to get the total number of bytes, please specify 'total'"
)
# update total of task or create new task
if task_id is None:
task_id = self.add_task(description, total=total_bytes)
else:
self.update(task_id, total=total_bytes)
return _Reader(file, self, task_id, close_handle=False)
@typing.overload
def open(
self,
file: Union[str, "PathLike[str]", bytes],
mode: Literal["rb"],
buffering: int = -1,
encoding: Optional[str] = None,
errors: Optional[str] = None,
newline: Optional[str] = None,
*,
total: Optional[int] = None,
task_id: Optional[TaskID] = None,
description: str = "Reading...",
) -> BinaryIO:
pass
@typing.overload
def open(
self,
file: Union[str, "PathLike[str]", bytes],
mode: Union[Literal["r"], Literal["rt"]],
buffering: int = -1,
encoding: Optional[str] = None,
errors: Optional[str] = None,
newline: Optional[str] = None,
*,
total: Optional[int] = None,
task_id: Optional[TaskID] = None,
description: str = "Reading...",
) -> TextIO:
pass
def open(
self,
file: Union[str, "PathLike[str]", bytes],
mode: Union[Literal["rb"], Literal["rt"], Literal["r"]] = "r",
buffering: int = -1,
encoding: Optional[str] = None,
errors: Optional[str] = None,
newline: Optional[str] = None,
*,
total: Optional[int] = None,
task_id: Optional[TaskID] = None,
description: str = "Reading...",
) -> Union[BinaryIO, TextIO]:
"""Track progress while reading from a binary file.
Args:
path (Union[str, PathLike[str]]): The path to the file to read.
mode (str): The mode to use to open the file. Only supports "r", "rb" or "rt".
buffering (int): The buffering strategy to use, see :func:`io.open`.
encoding (str, optional): The encoding to use when reading in text mode, see :func:`io.open`.
errors (str, optional): The error handling strategy for decoding errors, see :func:`io.open`.
newline (str, optional): The strategy for handling newlines in text mode, see :func:`io.open`.
total (int, optional): Total number of bytes to read. If none given, os.stat(path).st_size is used.
task_id (TaskID): Task to track. Default is new task.
description (str, optional): Description of task, if new task is created.
Returns:
BinaryIO: A readable file-like object in binary mode.
Raises:
ValueError: When an invalid mode is given.
"""
# normalize the mode (always rb, rt)
_mode = "".join(sorted(mode, reverse=False))
if _mode not in ("br", "rt", "r"):
raise ValueError("invalid mode {!r}".format(mode))
# patch buffering to provide the same behaviour as the builtin `open`
line_buffering = buffering == 1
if _mode == "br" and buffering == 1:
warnings.warn(
"line buffering (buffering=1) isn't supported in binary mode, the default buffer size will be used",
RuntimeWarning,
)
buffering = -1
elif _mode in ("rt", "r"):
if buffering == 0:
raise ValueError("can't have unbuffered text I/O")
elif buffering == 1:
buffering = -1
# attempt to get the total with `os.stat`
if total is None:
total = stat(file).st_size
# update total of task or create new task
if task_id is None:
task_id = self.add_task(description, total=total)
else:
self.update(task_id, total=total)
# open the file in binary mode,
handle = io.open(file, "rb", buffering=buffering)
reader = _Reader(handle, self, task_id, close_handle=True)
# wrap the reader in a `TextIOWrapper` if text mode
if mode in ("r", "rt"):
return io.TextIOWrapper(
reader,
encoding=encoding,
errors=errors,
newline=newline,
line_buffering=line_buffering,
)
return reader
def start_task(self, task_id: TaskID) -> None:
"""Start a task.
Starts a task (used when calculating elapsed time). You may need to call this manually,
if you called ``add_task`` with ``start=False``.
Args:
task_id (TaskID): ID of task.
"""
with self._lock:
task = self._tasks[task_id]
if task.start_time is None:
task.start_time = self.get_time()
def stop_task(self, task_id: TaskID) -> None:
"""Stop a task.
This will freeze the elapsed time on the task.
Args:
task_id (TaskID): ID of task.
"""
with self._lock:
task = self._tasks[task_id]
current_time = self.get_time()
if task.start_time is None:
task.start_time = current_time
task.stop_time = current_time
def update(
self,
task_id: TaskID,
*,
total: Optional[float] = None,
completed: Optional[float] = None,
advance: Optional[float] = None,
description: Optional[str] = None,
visible: Optional[bool] = None,
refresh: bool = False,
**fields: Any,
) -> None:
"""Update information associated with a task.
Args:
task_id (TaskID): Task id (returned by add_task).
total (float, optional): Updates task.total if not None.
completed (float, optional): Updates task.completed if not None.
advance (float, optional): Add a value to task.completed if not None.
description (str, optional): Change task description if not None.
visible (bool, optional): Set visible flag if not None.
refresh (bool): Force a refresh of progress information. Default is False.
**fields (Any): Additional data fields required for rendering.
"""
with self._lock:
task = self._tasks[task_id]
completed_start = task.completed
if total is not None and total != task.total:
task.total = total
task._reset()
if advance is not None:
task.completed += advance
if completed is not None:
task.completed = completed
if description is not None:
task.description = description
if visible is not None:
task.visible = visible
task.fields.update(fields)
update_completed = task.completed - completed_start
current_time = self.get_time()
old_sample_time = current_time - self.speed_estimate_period
_progress = task._progress
popleft = _progress.popleft
while _progress and _progress[0].timestamp < old_sample_time:
popleft()
if update_completed > 0:
_progress.append(ProgressSample(current_time, update_completed))
if (
task.total is not None
and task.completed >= task.total
and task.finished_time is None
):
task.finished_time = task.elapsed
if refresh:
self.refresh()
def reset(
self,
task_id: TaskID,
*,
start: bool = True,
total: Optional[float] = None,
completed: int = 0,
visible: Optional[bool] = None,
description: Optional[str] = None,
**fields: Any,
) -> None:
"""Reset a task so completed is 0 and the clock is reset.
Args:
task_id (TaskID): ID of task.
start (bool, optional): Start the task after reset. Defaults to True.
total (float, optional): New total steps in task, or None to use current total. Defaults to None.
completed (int, optional): Number of steps completed. Defaults to 0.
visible (bool, optional): Enable display of the task. Defaults to True.
description (str, optional): Change task description if not None. Defaults to None.
**fields (str): Additional data fields required for rendering.
"""
current_time = self.get_time()
with self._lock:
task = self._tasks[task_id]
task._reset()
task.start_time = current_time if start else None
if total is not None:
task.total = total
task.completed = completed
if visible is not None:
task.visible = visible
if fields:
task.fields = fields
if description is not None:
task.description = description
task.finished_time = None
self.refresh()
def advance(self, task_id: TaskID, advance: float = 1) -> None:
"""Advance task by a number of steps.
Args:
task_id (TaskID): ID of task.
advance (float): Number of steps to advance. Default is 1.
"""
current_time = self.get_time()
with self._lock:
task = self._tasks[task_id]
completed_start = task.completed
task.completed += advance
update_completed = task.completed - completed_start
old_sample_time = current_time - self.speed_estimate_period
_progress = task._progress
popleft = _progress.popleft
while _progress and _progress[0].timestamp < old_sample_time:
popleft()
while len(_progress) > 1000:
popleft()
_progress.append(ProgressSample(current_time, update_completed))
if (
task.total is not None
and task.completed >= task.total
and task.finished_time is None
):
task.finished_time = task.elapsed
task.finished_speed = task.speed
def refresh(self) -> None:
"""Refresh (render) the progress information."""
if not self.disable and self.live.is_started:
self.live.refresh()
def get_renderable(self) -> RenderableType:
"""Get a renderable for the progress display."""
renderable = Group(*self.get_renderables())
return renderable
def get_renderables(self) -> Iterable[RenderableType]:
"""Get a number of renderables for the progress display."""
table = self.make_tasks_table(self.tasks)
yield table
def make_tasks_table(self, tasks: Iterable[Task]) -> Table:
"""Get a table to render the Progress display.
Args:
tasks (Iterable[Task]): An iterable of Task instances, one per row of the table.
Returns:
Table: A table instance.
"""
table_columns = (
(
Column(no_wrap=True)
if isinstance(_column, str)
else _column.get_table_column().copy()
)
for _column in self.columns
)
table = Table.grid(*table_columns, padding=(0, 1), expand=self.expand)
for task in tasks:
if task.visible:
table.add_row(
*(
(
column.format(task=task)
if isinstance(column, str)
else column(task)
)
for column in self.columns
)
)
return table
def __rich__(self) -> RenderableType:
"""Makes the Progress class itself renderable."""
with self._lock:
return self.get_renderable()
def add_task(
self,
description: str,
start: bool = True,
total: Optional[float] = 100.0,
completed: int = 0,
visible: bool = True,
**fields: Any,
) -> TaskID:
"""Add a new 'task' to the Progress display.
Args:
description (str): A description of the task.
start (bool, optional): Start the task immediately (to calculate elapsed time). If set to False,
you will need to call `start` manually. Defaults to True.
total (float, optional): Number of total steps in the progress if known.
Set to None to render a pulsing animation. Defaults to 100.
completed (int, optional): Number of steps completed so far. Defaults to 0.
visible (bool, optional): Enable display of the task. Defaults to True.
**fields (str): Additional data fields required for rendering.
Returns:
TaskID: An ID you can use when calling `update`.
"""
with self._lock:
task = Task(
self._task_index,
description,
total,
completed,
visible=visible,
fields=fields,
_get_time=self.get_time,
_lock=self._lock,
)
self._tasks[self._task_index] = task
if start:
self.start_task(self._task_index)
new_task_index = self._task_index
self._task_index = TaskID(int(self._task_index) + 1)
self.refresh()
return new_task_index
def remove_task(self, task_id: TaskID) -> None:
"""Delete a task if it exists.
Args:
task_id (TaskID): A task ID.
"""
with self._lock:
del self._tasks[task_id]
if __name__ == "__main__": # pragma: no coverage
import random
import time
from .panel import Panel
from .rule import Rule
from .syntax import Syntax
from .table import Table
syntax = Syntax(
'''def loop_last(values: Iterable[T]) -> Iterable[Tuple[bool, T]]:
"""Iterate and generate a tuple with a flag for last value."""
iter_values = iter(values)
try:
previous_value = next(iter_values)
except StopIteration:
return
for value in iter_values:
yield False, previous_value
previous_value = value
yield True, previous_value''',
"python",
line_numbers=True,
)
table = Table("foo", "bar", "baz")
table.add_row("1", "2", "3")
progress_renderables = [
"Text may be printed while the progress bars are rendering.",
Panel("In fact, [i]any[/i] renderable will work"),
"Such as [magenta]tables[/]...",
table,
"Pretty printed structures...",
{"type": "example", "text": "Pretty printed"},
"Syntax...",
syntax,
Rule("Give it a try!"),
]
from itertools import cycle
examples = cycle(progress_renderables)
console = Console(record=True)
with Progress(
SpinnerColumn(),
*Progress.get_default_columns(),
TimeElapsedColumn(),
console=console,
transient=False,
) as progress:
task1 = progress.add_task("[red]Downloading", total=1000)
task2 = progress.add_task("[green]Processing", total=1000)
task3 = progress.add_task("[yellow]Thinking", total=None)
while not progress.finished:
progress.update(task1, advance=0.5)
progress.update(task2, advance=0.3)
time.sleep(0.01)
if random.randint(0, 100) < 1:
progress.log(next(examples))
| Progress |
python | pytorch__pytorch | torch/_inductor/template_heuristics/triton.py | {
"start": 79279,
"end": 79965
} | class ____(ScaledMMConfigMixin, CUDAConfigHeuristic):
"""Scaled MM template heuristic for CUDA"""
def __init__(self) -> None:
super().__init__()
# Override mm_configs to use scaled_mm_configs
self.mm_configs = self.scaled_mm_configs
# pyrefly: ignore [bad-override]
def _filter_configs(self, configs: list[BaseConfig]) -> list[BaseConfig]:
configs = [c for c in configs if c.block_k >= 32]
return super()._filter_configs(configs)
@register_template_heuristic(
scaled_mm_device_tma_epilogue_scaling_template.uid,
"cuda",
register=torch.version.hip is None,
op_name="scaled_mm",
)
| CUDAScaledMMTemplateConfigHeuristic |
python | dagster-io__dagster | python_modules/dagster/dagster/_core/definitions/asset_checks/asset_check_result.py | {
"start": 819,
"end": 8246
} | class ____(
NamedTuple(
"_AssetCheckResult",
[
("passed", PublicAttr[bool]),
("asset_key", PublicAttr[Optional[AssetKey]]),
("check_name", PublicAttr[Optional[str]]),
("metadata", PublicAttr[Mapping[str, MetadataValue]]),
("severity", PublicAttr[AssetCheckSeverity]),
("description", PublicAttr[Optional[str]]),
],
),
EventWithMetadata,
):
"""The result of an asset check.
Args:
asset_key (Optional[AssetKey]):
The asset key that was checked.
check_name (Optional[str]):
The name of the check.
passed (bool):
The pass/fail result of the check.
metadata (Optional[Dict[str, RawMetadataValue]]):
Arbitrary metadata about the asset. Keys are displayed string labels, and values are
one of the following: string, float, int, JSON-serializable dict, JSON-serializable
list, and one of the data classes returned by a MetadataValue static method.
severity (AssetCheckSeverity):
Severity of the check. Defaults to ERROR.
description (Optional[str]):
A text description of the result of the check evaluation.
"""
def __new__(
cls,
*,
passed: bool,
asset_key: Optional[CoercibleToAssetKey] = None,
check_name: Optional[str] = None,
metadata: Optional[Mapping[str, RawMetadataValue]] = None,
severity: AssetCheckSeverity = AssetCheckSeverity.ERROR,
description: Optional[str] = None,
):
normalized_metadata = normalize_metadata(
check.opt_mapping_param(metadata, "metadata", key_type=str),
)
return super().__new__(
cls,
asset_key=AssetKey.from_coercible(asset_key) if asset_key is not None else None,
check_name=check.opt_str_param(check_name, "check_name"),
passed=check.bool_param(passed, "passed"),
metadata=normalized_metadata,
severity=check.inst_param(severity, "severity", AssetCheckSeverity),
description=check.opt_str_param(description, "description"),
)
def resolve_target_check_key(
self, check_names_by_asset_key: Optional[Mapping[AssetKey, AbstractSet[str]]]
) -> AssetCheckKey:
if not check_names_by_asset_key:
raise DagsterInvariantViolationError(
"Received unexpected AssetCheckResult. No AssetCheckSpecs were found for this step."
"You may need to set `check_specs` on the asset decorator, or you may be emitting an "
"AssetCheckResult that isn't in the subset passed in `context.selected_asset_check_keys`."
)
asset_keys_with_specs = check_names_by_asset_key.keys()
if self.asset_key is not None:
if self.asset_key not in asset_keys_with_specs:
raise DagsterInvariantViolationError(
"Received unexpected AssetCheckResult. It targets asset"
f" '{self.asset_key.to_user_string()}' which is not targeted by any of the"
" checks currently being evaluated. Targeted assets:"
f" {[asset_key.to_user_string() for asset_key in asset_keys_with_specs]}."
)
resolved_asset_key = self.asset_key
else:
if len(check_names_by_asset_key) > 1:
raise DagsterInvariantViolationError(
"AssetCheckResult didn't specify an asset key, but there are multiple assets"
" to choose from:"
f" {[asset_key.to_user_string() for asset_key in check_names_by_asset_key.keys()]}"
)
resolved_asset_key = next(iter(asset_keys_with_specs))
check_names_with_specs = check_names_by_asset_key[resolved_asset_key]
if self.check_name is not None:
if self.check_name not in check_names_with_specs:
raise DagsterInvariantViolationError(
"Received unexpected AssetCheckResult. No checks currently being evaluated"
f" target asset '{resolved_asset_key.to_user_string()}' and have name"
f" '{self.check_name}'. Checks being evaluated for this asset:"
f" {check_names_with_specs}"
)
resolved_check_name = self.check_name
else:
if len(check_names_with_specs) > 1:
raise DagsterInvariantViolationError(
"AssetCheckResult result didn't specify a check name, but there are multiple"
" checks to choose from for the this asset key:"
f" {check_names_with_specs}"
)
resolved_check_name = next(iter(check_names_with_specs))
return AssetCheckKey(asset_key=resolved_asset_key, name=resolved_check_name)
def to_asset_check_evaluation(
self, step_context: "StepExecutionContext"
) -> AssetCheckEvaluation:
assets_def_for_check = check.not_none(
step_context.job_def.asset_layer.get_assets_def_for_node(
node_handle=step_context.node_handle
),
f"While resolving asset check result {self}, expected to find an AssetsDefinition object that could be associated back to the currently executing NodeHandle {step_context.node_handle}.",
)
all_check_keys = set(
check.not_none(assets_def_for_check._computation).check_keys_by_output_name.values() # noqa: SLF001
)
all_check_names_by_asset_key = {}
for check_key in all_check_keys:
all_check_names_by_asset_key.setdefault(check_key.asset_key, set()).add(check_key.name)
check_key = self.resolve_target_check_key(all_check_names_by_asset_key)
input_asset_info = step_context.maybe_fetch_and_get_input_asset_version_info(
check_key.asset_key
)
from dagster._core.events import DagsterEventType
if (
input_asset_info is not None
and input_asset_info.event_type == DagsterEventType.ASSET_MATERIALIZATION
):
target_materialization_data = AssetCheckEvaluationTargetMaterializationData(
run_id=input_asset_info.run_id,
storage_id=input_asset_info.storage_id,
timestamp=input_asset_info.timestamp,
)
else:
target_materialization_data = None
return AssetCheckEvaluation(
check_name=check_key.name,
asset_key=check_key.asset_key,
passed=self.passed,
metadata=self.metadata,
target_materialization_data=target_materialization_data,
severity=self.severity,
description=self.description,
blocking=assets_def_for_check.get_spec_for_check_key(check_key).blocking,
)
def with_metadata(self, metadata: Mapping[str, RawMetadataValue]) -> "AssetCheckResult": # pyright: ignore[reportIncompatibleMethodOverride]
return AssetCheckResult(
passed=self.passed,
asset_key=self.asset_key,
check_name=self.check_name,
metadata=metadata,
severity=self.severity,
description=self.description,
)
| AssetCheckResult |
python | getsentry__sentry | tests/sentry/testutils/helpers/test_features.py | {
"start": 244,
"end": 2463
} | class ____(TestCase):
def setUp(self) -> None:
self.org = self.create_organization()
def test_without_feature(self) -> None:
assert not features.has("organizations:session-replay", self.org)
@with_feature("organizations:session-replay")
def test_with_feature(self) -> None:
assert features.has("organizations:session-replay", self.org)
def test_batch_has(self) -> None:
# Test that overrides work, and if no overrides are made that we still fall back to the
# defaults
with mock.patch("sentry.features.default_manager._entity_handler", new=None):
with self.feature("system:multi-region"):
# Make sure this check returns True for features that are defaulted to True and aren't
# mocked
ret = features.batch_has(
[
"organizations:advanced-search",
"organizations:codecov-integration",
],
organization=self.org,
)
assert ret is not None
results = list(ret.values())[0]
assert results["organizations:advanced-search"]
assert not results["organizations:codecov-integration"]
def test_feature_with_rpc_organization(self) -> None:
with self.feature({"system:multi-region": False}):
org_context = organization_service.get_organization_by_slug(
slug=self.org.slug, only_visible=False, user_id=None
)
assert org_context
assert org_context.organization
assert isinstance(org_context.organization, RpcOrganization)
assert features.has("system:multi-region") is False
with self.feature({"system:multi-region": True}):
org_context = organization_service.get_organization_by_slug(
slug=self.org.slug, only_visible=False, user_id=None
)
assert org_context
assert org_context.organization
assert isinstance(org_context.organization, RpcOrganization)
assert features.has("system:multi-region")
| TestTestUtilsFeatureHelper |
python | pennersr__django-allauth | allauth/headless/internal/restkit/inputs.py | {
"start": 360,
"end": 388
} | class ____(Form):
pass
| Input |
python | streamlit__streamlit | lib/tests/streamlit/navigation/page_test.py | {
"start": 1521,
"end": 7135
} | class ____(DeltaGeneratorTestCase):
"""Test st.Page"""
def test_cannot_infer_title_raises_exception(self):
"""Test that passing a page without a title raises an exception."""
class Foo:
def __call__(self):
pass
with pytest.raises(StreamlitAPIException):
st.Page(Foo())
try:
st.Page(Foo(), title="Hello")
except Exception as e:
pytest.fail("Should not raise exception: " + str(e))
def test_invalid_icon_raises_exception(self):
"""Test that passing an invalid icon raises an exception."""
with pytest.raises(StreamlitAPIException):
st.Page("page.py", icon="hello world")
def test_valid_icon(self):
"""Test that passing a valid icon does not raise an exception."""
st.Page("page.py", icon="😱")
# Provide an assertion to ensure no error
assert True
def test_empty_string_icon_should_raise_exception(self):
"""Test that passing an empty string icon raises an exception."""
with pytest.raises(StreamlitAPIException) as exc_info:
st.Page("page.py", icon="")
assert 'The value "" is not a valid emoji' in str(exc_info.value)
def test_whitespace_only_icon_should_raise_exception(self):
"""Test that passing a whitespace-only icon raises an exception."""
with pytest.raises(StreamlitAPIException) as exc_info:
st.Page("page.py", icon=" ")
assert 'The value " " is not a valid emoji' in str(exc_info.value)
def test_script_hash_for_paths_are_different(self):
"""Tests that script hashes are different when url path (inferred or not) is unique"""
assert st.Page("page1.py")._script_hash != st.Page("page2.py")._script_hash
assert (
st.Page(lambda: True, url_path="path_1")._script_hash
!= st.Page(lambda: True, url_path="path_2")._script_hash
)
def test_url_path_is_inferred_from_filename(self):
"""Tests that url path is inferred from filename if not provided"""
page = st.Page("page_8.py")
assert page.url_path == "page_8"
def test_url_path_is_inferred_from_function_name(self):
"""Tests that url path is inferred from function name if not provided"""
def page_9():
pass
page = st.Page(page_9)
assert page.url_path == "page_9"
def test_url_path_overrides_if_specified(self):
"""Tests that url path specified directly overrides inferred path"""
page = st.Page("page_8.py", url_path="my_url_path")
assert page.url_path == "my_url_path"
def test_url_path_strips_leading_slash(self):
"""Tests that url path strips leading slash if provided"""
page = st.Page("page_8.py", url_path="/my_url_path")
assert page.url_path == "my_url_path"
def test_url_path_strips_trailing_slash(self):
"""Tests that url path strips leading slash if provided"""
page = st.Page("page_8.py", url_path="my_url_path/")
assert page.url_path == "my_url_path"
def test_url_path_is_empty_string_if_default(self):
"""Tests that url path is "" if the page is the default page"""
def page_9():
pass
page = st.Page(page_9, default=True)
assert page.url_path == ""
def test_non_default_pages_cannot_have_empty_url_path(self):
"""Tests that an error is raised if the empty url path is provided for a non-default page"""
def page_9():
pass
with pytest.raises(StreamlitAPIException):
st.Page(page_9, url_path="")
def test_non_default_pages_cannot_have_nested_url_path(self):
"""Tests that an error is raised if the url path contains a nested path"""
def page_9():
pass
with pytest.raises(StreamlitAPIException):
st.Page(page_9, url_path="foo/bar")
def test_page_with_no_title_raises_api_exception(self):
"""Tests that an error is raised if the title is empty or inferred to be empty"""
with pytest.raises(StreamlitAPIException):
st.Page("_.py")
def page_9():
pass
with pytest.raises(StreamlitAPIException):
st.Page(page_9, title=" ")
def test_page_run_cannot_run_standalone(self):
"""Test that a page cannot run standalone."""
with pytest.raises(StreamlitAPIException):
st.Page("page.py").run()
def test_page_run_can_be_run_if_ordained(self):
"""Test that a page can be run if ordained."""
# Indicates we are in V2
self.script_run_ctx.pages_manager.set_pages({})
page = st.Page(lambda: True)
page._can_be_called = True
page.run()
# Provide an assertion to ensure no error
assert True
# NOTE: This test needs to live outside of the StPagesTest class because the class-level
# @patch mocking the return value of `is_file` takes precedence over the method level
# patch.
@patch("pathlib.Path.is_file", MagicMock(return_value=False))
def test_st_Page_throws_error_if_path_is_invalid():
with pytest.raises(StreamlitAPIException) as e:
st.Page("nonexistent.py")
assert (
str(e.value)
== "Unable to create Page. The file `nonexistent.py` could not be found."
)
with pytest.raises(StreamlitAPIException) as e:
st.Page(Path("nonexistent2.py"))
assert (
str(e.value)
== "Unable to create Page. The file `nonexistent2.py` could not be found."
)
| StPagesTest |
python | kamyu104__LeetCode-Solutions | Python/four-divisors.py | {
"start": 720,
"end": 1657
} | class ____(object):
def sumFourDivisors(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
def factorize(x):
result = []
d = 2
while d*d <= x:
e = 0
while x%d == 0:
x //= d
e += 1
if e:
result.append([d, e])
d += 1 if d == 2 else 2
if x > 1:
result.append([x, 1])
return result
result = 0
for facs in itertools.imap(factorize, nums):
if len(facs) == 1 and facs[0][1] == 3:
p = facs[0][0]
result += (p**4-1)//(p-1) # p^0 + p^1 +p^2 +p^3
elif len(facs) == 2 and facs[0][1] == facs[1][1] == 1:
p, q = facs[0][0], facs[1][0]
result += (1 + p) * (1 + q)
return result
| Solution2 |
python | numba__numba | numba/misc/numba_gdbinfo.py | {
"start": 365,
"end": 5962
} | class ____():
"""Wraps the gdb binary and has methods for checking what the gdb binary
has support for (Python and NumPy)."""
def __init__(self,):
gdb_binary = config.GDB_BINARY
if gdb_binary is None:
msg = ("No valid binary could be found for gdb named: "
f"{config.GDB_BINARY}")
raise ValueError(msg)
self._gdb_binary = gdb_binary
def _run_cmd(self, cmd=()):
gdb_call = [self.gdb_binary, '-q',]
for x in cmd:
gdb_call.append('-ex')
gdb_call.append(x)
gdb_call.extend(['-ex', 'q'])
return subprocess.run(gdb_call, capture_output=True, timeout=10,
text=True)
@property
def gdb_binary(self):
return self._gdb_binary
@classmethod
def success(cls, status):
return status.returncode == 0
def check_launch(self):
"""Checks that gdb will launch ok"""
return self._run_cmd()
def check_python(self):
cmd = ("python from __future__ import print_function; "
"import sys; print(sys.version_info[:2])")
return self._run_cmd((cmd,))
def check_numpy(self):
cmd = ("python from __future__ import print_function; "
"import types; import numpy; "
"print(isinstance(numpy, types.ModuleType))")
return self._run_cmd((cmd,))
def check_numpy_version(self):
cmd = ("python from __future__ import print_function; "
"import types; import numpy;"
"print(numpy.__version__)")
return self._run_cmd((cmd,))
def collect_gdbinfo():
"""Prints information to stdout about the gdb setup that Numba has found"""
# State flags:
gdb_state = None
gdb_has_python = False
gdb_has_numpy = False
gdb_python_version = 'No Python support'
gdb_python_numpy_version = "No NumPy support"
# There are so many ways for gdb to not be working as expected. Surround
# the "is it working" tests with try/except and if there's an exception
# store it for processing later.
try:
# Check gdb exists
gdb_wrapper = _GDBTestWrapper()
# Check gdb works
status = gdb_wrapper.check_launch()
if not gdb_wrapper.success(status):
msg = (f"gdb at '{gdb_wrapper.gdb_binary}' does not appear to work."
f"\nstdout: {status.stdout}\nstderr: {status.stderr}")
raise ValueError(msg)
gdb_state = gdb_wrapper.gdb_binary
except Exception as e:
gdb_state = f"Testing gdb binary failed. Reported Error: {e}"
else:
# Got this far, so gdb works, start checking what it supports
status = gdb_wrapper.check_python()
if gdb_wrapper.success(status):
version_match = re.match(r'\((\d+),\s+(\d+)\)',
status.stdout.strip())
if version_match is not None:
pymajor, pyminor = version_match.groups()
gdb_python_version = f"{pymajor}.{pyminor}"
gdb_has_python = True
status = gdb_wrapper.check_numpy()
if gdb_wrapper.success(status):
if "Traceback" not in status.stderr.strip():
if status.stdout.strip() == 'True':
gdb_has_numpy = True
gdb_python_numpy_version = "Unknown"
# NumPy is present find the version
status = gdb_wrapper.check_numpy_version()
if gdb_wrapper.success(status):
if "Traceback" not in status.stderr.strip():
gdb_python_numpy_version = \
status.stdout.strip()
# Work out what level of print-extension support is present in this gdb
if gdb_has_python:
if gdb_has_numpy:
print_ext_supported = "Full (Python and NumPy supported)"
else:
print_ext_supported = "Partial (Python only, no NumPy support)"
else:
print_ext_supported = "None"
# Work out print ext location
print_ext_file = "gdb_print_extension.py"
print_ext_path = os.path.join(os.path.dirname(__file__), print_ext_file)
# return!
return _gdb_info(gdb_state, print_ext_path, gdb_python_version,
gdb_python_numpy_version, print_ext_supported)
def display_gdbinfo(sep_pos=45):
"""Displays the information collected by collect_gdbinfo.
"""
gdb_info = collect_gdbinfo()
print('-' * 80)
fmt = f'%-{sep_pos}s : %-s'
# Display the information
print(fmt % ("Binary location", gdb_info.binary_loc))
print(fmt % ("Print extension location", gdb_info.extension_loc))
print(fmt % ("Python version", gdb_info.py_ver))
print(fmt % ("NumPy version", gdb_info.np_ver))
print(fmt % ("Numba printing extension support", gdb_info.supported))
print("")
print("To load the Numba gdb printing extension, execute the following "
"from the gdb prompt:")
print(f"\nsource {gdb_info.extension_loc}\n")
print('-' * 80)
warn = """
=============================================================
IMPORTANT: Before sharing you should remove any information
in the above that you wish to keep private e.g. paths.
=============================================================
"""
print(dedent(warn))
if __name__ == '__main__':
display_gdbinfo()
| _GDBTestWrapper |
python | pytorch__pytorch | torch/_dynamo/package.py | {
"start": 5128,
"end": 11461
} | class ____:
"""
Contains the serializable information associated with a single code object
in dynamo. To restore an execution of compiled code, we will need the following
ingredients:
1. The "original" code object, which serves as the entry point for eager
execution, i.e. the code only executed when there's no cache entry hit.
2. The python module name this code object belongs to, for identifying the
enclosing global scope to inject compiled and resume functions.
3. A list of function names that pointing to this code object. There could be
multiple function objects pointing to the same code such as recursive functions.
4. A list of guarded code that eval frame dispatches to.
5. A list of imported module objects unioned from all compiled branches.
6. A list of "backends" (compiled fx graph) unioned from all compield branches.
7. A string path used to access the original code object users defined.
A code object can be accessed by "{python_module}.{function_name}.{code_source}" .
8. A boolean flag indicating whether the function is installed to global scope.
9. A boolean flag indicating whether the function has a compile id.
10. Whether or not this code entry was bypassed
"""
python_code: SerializedCode
python_module: str
function_names: list[_FunctionId]
guarded_codes: list[_GuardedCodeCacheEntry]
import_sources: dict[str, str]
backend_ids: list[_BackendId]
code_source: Optional[str]
install_to_global: bool
has_compile_id: bool = False
bypassed: bool = False
def _lookup_code(entry: _DynamoCodeCacheEntry) -> types.CodeType:
assert len(entry.function_names) == 1
fn: Any = sys.modules[entry.python_module]
parts = entry.function_names[0].split(".")
for part in parts:
fn = getattr(fn, part)
if entry.code_source:
parts = entry.code_source.split(".")
for part in parts:
if part.endswith("]"):
index_begin = part.rfind("[")
assert isinstance(index_begin, int) and index_begin >= 0
attr = getattr(fn, part[:index_begin], None)
if attr is None:
raise PackageError(f"Cannot find source for code entry {entry}")
fn = attr[ast.literal_eval(part[index_begin + 1 : -1])]
else:
fn = getattr(fn, part)
else:
raise PackageError(f"Cannot find source for code entry {entry}")
assert isinstance(fn, types.CodeType)
return fn
def _raise_resolution_error(code: types.CodeType, scope: Any) -> Never:
raise PackageError(
f"Cannot resolve a fully qualified name for {code}. Lookup scope: {scope}"
)
def _get_code_source(code: types.CodeType) -> tuple[str, str]:
"""
Given a code object, return a fully qualified name which will be used as
a serialized handle to access the code object from the new process.
This is normally a straightforward process, but there are some corner cases:
1. When a function is defined with decorator, then this function will be captured
inside a closure with the wrapper object.
2. When a function is defined as a nested function, then the code object will be
stored on the co_consts field of the parent code object by Python compiler.
This function handles all of the corner cases above.
"""
module = inspect.getmodule(code)
if module is None:
raise PackageError(f"Cannot find module for code {code}")
toplevel: Any = module
if sys.version_info >= (3, 11):
parts = code.co_qualname.split(".")
for part in parts:
if not hasattr(toplevel, part):
_raise_resolution_error(code, toplevel)
toplevel = getattr(toplevel, part)
if inspect.isfunction(toplevel):
break
seen = set()
def _find_code_source(obj: Any) -> Optional[str]:
nonlocal toplevel
nonlocal seen
if obj in seen:
return None
seen.add(obj)
if inspect.iscode(obj):
if obj is code:
return ""
for i, const in enumerate(obj.co_consts):
if (res := _find_code_source(const)) is not None:
return f".co_consts[{i}]{res}"
if inspect.isfunction(obj):
if (res := _find_code_source(obj.__code__)) is not None:
toplevel = obj
return f".__code__{res}"
if obj.__closure__ is not None:
for i, cell in enumerate(obj.__closure__):
try:
cell_contents = cell.cell_contents
except ValueError:
continue
if not (
inspect.isfunction(cell_contents)
or inspect.iscode(cell_contents)
):
continue
if (res := _find_code_source(cell_contents)) is not None:
toplevel = obj
return f".__closure__[{i}].cell_contents{res}"
if sys.version_info < (3, 11):
if inspect.ismodule(obj):
for value in obj.__dict__.values():
if not (inspect.isfunction(value) or inspect.isclass(value)):
continue
if (res := _find_code_source(value)) is not None:
return res
if inspect.isclass(obj):
for name, value in obj.__dict__.items():
value = getattr(obj, name)
if not (inspect.isfunction(value) or inspect.isclass(value)):
continue
if (res := _find_code_source(value)) is not None:
if value.__name__ != name:
_raise_resolution_error(code, toplevel)
return res
return None
code_source = _find_code_source(toplevel)
if code_source is None:
_raise_resolution_error(code, toplevel)
# pyrefly: ignore [missing-attribute]
return toplevel.__qualname__, code_source.strip(".")
@dataclasses.dataclass(frozen=True)
| _DynamoCodeCacheEntry |
python | wandb__wandb | wandb/apis/importers/wandb.py | {
"start": 50141,
"end": 50195
} | class ____:
username: str = ""
@dataclass
| _DummyUser |
python | falconry__falcon | tests/asgi/test_hello_asgi.py | {
"start": 307,
"end": 525
} | class ____:
def __init__(self, data):
self._stream = io.BytesIO(data)
self.close_called = False
async def read(self, num_bytes):
return self._stream.read(num_bytes)
| DataReaderWithoutClose |
python | huggingface__transformers | src/transformers/models/informer/modeling_informer.py | {
"start": 71382,
"end": 95694
} | class ____(InformerPreTrainedModel):
def __init__(self, config: InformerConfig):
super().__init__(config)
self.model = InformerModel(config)
if config.distribution_output == "student_t":
self.distribution_output = StudentTOutput(dim=config.input_size)
elif config.distribution_output == "normal":
self.distribution_output = NormalOutput(dim=config.input_size)
elif config.distribution_output == "negative_binomial":
self.distribution_output = NegativeBinomialOutput(dim=config.input_size)
else:
raise ValueError(f"Unknown distribution output {config.distribution_output}")
self.parameter_projection = self.distribution_output.get_parameter_projection(self.model.config.d_model)
self.target_shape = self.distribution_output.event_shape
if config.loss == "nll":
self.loss = nll
else:
raise ValueError(f"Unknown loss function {config.loss}")
# Initialize weights of distribution_output and apply final processing
self.post_init()
def output_params(self, dec_output):
return self.parameter_projection(dec_output)
@torch.jit.ignore
def output_distribution(self, params, loc=None, scale=None, trailing_n=None) -> torch.distributions.Distribution:
sliced_params = params
if trailing_n is not None:
sliced_params = [p[:, -trailing_n:] for p in params]
return self.distribution_output.distribution(sliced_params, loc=loc, scale=scale)
@auto_docstring
def forward(
self,
past_values: torch.Tensor,
past_time_features: torch.Tensor,
past_observed_mask: torch.Tensor,
static_categorical_features: Optional[torch.Tensor] = None,
static_real_features: Optional[torch.Tensor] = None,
future_values: Optional[torch.Tensor] = None,
future_time_features: Optional[torch.Tensor] = None,
future_observed_mask: Optional[torch.Tensor] = None,
decoder_attention_mask: Optional[torch.LongTensor] = None,
encoder_outputs: Optional[list[torch.FloatTensor]] = None,
past_key_values: Optional[Cache] = None,
output_hidden_states: Optional[bool] = None,
output_attentions: Optional[bool] = None,
use_cache: Optional[bool] = None,
return_dict: Optional[bool] = None,
cache_position: Optional[torch.LongTensor] = None,
) -> Union[Seq2SeqTSModelOutput, tuple]:
r"""
past_values (`torch.FloatTensor` of shape `(batch_size, sequence_length)` or `(batch_size, sequence_length, input_size)`):
Past values of the time series, that serve as context in order to predict the future. The sequence size of
this tensor must be larger than the `context_length` of the model, since the model will use the larger size
to construct lag features, i.e. additional values from the past which are added in order to serve as "extra
context".
The `sequence_length` here is equal to `config.context_length` + `max(config.lags_sequence)`, which if no
`lags_sequence` is configured, is equal to `config.context_length` + 7 (as by default, the largest
look-back index in `config.lags_sequence` is 7). The property `_past_length` returns the actual length of
the past.
The `past_values` is what the Transformer encoder gets as input (with optional additional features, such as
`static_categorical_features`, `static_real_features`, `past_time_features` and lags).
Optionally, missing values need to be replaced with zeros and indicated via the `past_observed_mask`.
For multivariate time series, the `input_size` > 1 dimension is required and corresponds to the number of
variates in the time series per time step.
past_time_features (`torch.FloatTensor` of shape `(batch_size, sequence_length, num_features)`):
Required time features, which the model internally will add to `past_values`. These could be things like
"month of year", "day of the month", etc. encoded as vectors (for instance as Fourier features). These
could also be so-called "age" features, which basically help the model know "at which point in life" a
time-series is. Age features have small values for distant past time steps and increase monotonically the
more we approach the current time step. Holiday features are also a good example of time features.
These features serve as the "positional encodings" of the inputs. So contrary to a model like BERT, where
the position encodings are learned from scratch internally as parameters of the model, the Time Series
Transformer requires to provide additional time features. The Time Series Transformer only learns
additional embeddings for `static_categorical_features`.
Additional dynamic real covariates can be concatenated to this tensor, with the caveat that these features
must but known at prediction time.
The `num_features` here is equal to `config.`num_time_features` + `config.num_dynamic_real_features`.
past_observed_mask (`torch.BoolTensor` of shape `(batch_size, sequence_length)` or `(batch_size, sequence_length, input_size)`, *optional*):
Boolean mask to indicate which `past_values` were observed and which were missing. Mask values selected in
`[0, 1]`:
- 1 for values that are **observed**,
- 0 for values that are **missing** (i.e. NaNs that were replaced by zeros).
static_categorical_features (`torch.LongTensor` of shape `(batch_size, number of static categorical features)`, *optional*):
Optional static categorical features for which the model will learn an embedding, which it will add to the
values of the time series.
Static categorical features are features which have the same value for all time steps (static over time).
A typical example of a static categorical feature is a time series ID.
static_real_features (`torch.FloatTensor` of shape `(batch_size, number of static real features)`, *optional*):
Optional static real features which the model will add to the values of the time series.
Static real features are features which have the same value for all time steps (static over time).
A typical example of a static real feature is promotion information.
future_values (`torch.FloatTensor` of shape `(batch_size, prediction_length)` or `(batch_size, prediction_length, input_size)`, *optional*):
Future values of the time series, that serve as labels for the model. The `future_values` is what the
Transformer needs during training to learn to output, given the `past_values`.
The sequence length here is equal to `prediction_length`.
See the demo notebook and code snippets for details.
Optionally, during training any missing values need to be replaced with zeros and indicated via the
`future_observed_mask`.
For multivariate time series, the `input_size` > 1 dimension is required and corresponds to the number of
variates in the time series per time step.
future_time_features (`torch.FloatTensor` of shape `(batch_size, prediction_length, num_features)`):
Required time features for the prediction window, which the model internally will add to `future_values`.
These could be things like "month of year", "day of the month", etc. encoded as vectors (for instance as
Fourier features). These could also be so-called "age" features, which basically help the model know "at
which point in life" a time-series is. Age features have small values for distant past time steps and
increase monotonically the more we approach the current time step. Holiday features are also a good example
of time features.
These features serve as the "positional encodings" of the inputs. So contrary to a model like BERT, where
the position encodings are learned from scratch internally as parameters of the model, the Time Series
Transformer requires to provide additional time features. The Time Series Transformer only learns
additional embeddings for `static_categorical_features`.
Additional dynamic real covariates can be concatenated to this tensor, with the caveat that these features
must but known at prediction time.
The `num_features` here is equal to `config.`num_time_features` + `config.num_dynamic_real_features`.
future_observed_mask (`torch.BoolTensor` of shape `(batch_size, sequence_length)` or `(batch_size, sequence_length, input_size)`, *optional*):
Boolean mask to indicate which `future_values` were observed and which were missing. Mask values selected
in `[0, 1]`:
- 1 for values that are **observed**,
- 0 for values that are **missing** (i.e. NaNs that were replaced by zeros).
This mask is used to filter out missing values for the final loss calculation.
encoder_outputs (`tuple(tuple(torch.FloatTensor)`, *optional*):
Tuple consists of `last_hidden_state`, `hidden_states` (*optional*) and `attentions` (*optional*)
`last_hidden_state` of shape `(batch_size, sequence_length, hidden_size)` (*optional*) is a sequence of
hidden-states at the output of the last layer of the encoder. Used in the cross-attention of the decoder.
Examples:
```python
>>> from huggingface_hub import hf_hub_download
>>> import torch
>>> from transformers import InformerForPrediction
>>> file = hf_hub_download(
... repo_id="hf-internal-testing/tourism-monthly-batch", filename="train-batch.pt", repo_type="dataset"
... )
>>> batch = torch.load(file)
>>> model = InformerForPrediction.from_pretrained(
... "huggingface/informer-tourism-monthly"
... )
>>> # during training, one provides both past and future values
>>> # as well as possible additional features
>>> outputs = model(
... past_values=batch["past_values"],
... past_time_features=batch["past_time_features"],
... past_observed_mask=batch["past_observed_mask"],
... static_categorical_features=batch["static_categorical_features"],
... static_real_features=batch["static_real_features"],
... future_values=batch["future_values"],
... future_time_features=batch["future_time_features"],
... )
>>> loss = outputs.loss
>>> loss.backward()
>>> # during inference, one only provides past values
>>> # as well as possible additional features
>>> # the model autoregressively generates future values
>>> outputs = model.generate(
... past_values=batch["past_values"],
... past_time_features=batch["past_time_features"],
... past_observed_mask=batch["past_observed_mask"],
... static_categorical_features=batch["static_categorical_features"],
... static_real_features=batch["static_real_features"],
... future_time_features=batch["future_time_features"],
... )
>>> mean_prediction = outputs.sequences.mean(dim=1)
```"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
if future_values is not None:
use_cache = False
outputs = self.model(
past_values=past_values,
past_time_features=past_time_features,
past_observed_mask=past_observed_mask,
static_categorical_features=static_categorical_features,
static_real_features=static_real_features,
future_values=future_values,
future_time_features=future_time_features,
decoder_attention_mask=decoder_attention_mask,
encoder_outputs=encoder_outputs,
past_key_values=past_key_values,
output_hidden_states=output_hidden_states,
output_attentions=output_attentions,
use_cache=use_cache,
return_dict=return_dict,
cache_position=cache_position,
)
prediction_loss = None
params = None
if future_values is not None:
params = self.output_params(outputs[0]) # outputs.last_hidden_state
# loc is 3rd last and scale is 2nd last output
distribution = self.output_distribution(params, loc=outputs[-3], scale=outputs[-2])
loss = self.loss(distribution, future_values)
if future_observed_mask is None:
future_observed_mask = torch.ones_like(future_values)
if len(self.target_shape) == 0:
loss_weights = future_observed_mask
else:
loss_weights, _ = future_observed_mask.min(dim=-1, keepdim=False)
prediction_loss = weighted_average(loss, weights=loss_weights)
if not return_dict:
outputs = ((params,) + outputs[1:]) if params is not None else outputs[1:]
return ((prediction_loss,) + outputs) if prediction_loss is not None else outputs
return Seq2SeqTSPredictionOutput(
loss=prediction_loss,
params=params,
past_key_values=outputs.past_key_values,
decoder_hidden_states=outputs.decoder_hidden_states,
decoder_attentions=outputs.decoder_attentions,
cross_attentions=outputs.cross_attentions,
encoder_last_hidden_state=outputs.encoder_last_hidden_state,
encoder_hidden_states=outputs.encoder_hidden_states,
encoder_attentions=outputs.encoder_attentions,
loc=outputs.loc,
scale=outputs.scale,
static_features=outputs.static_features,
)
@torch.no_grad()
def generate(
self,
past_values: torch.Tensor,
past_time_features: torch.Tensor,
future_time_features: torch.Tensor,
past_observed_mask: Optional[torch.Tensor] = None,
static_categorical_features: Optional[torch.Tensor] = None,
static_real_features: Optional[torch.Tensor] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
) -> SampleTSPredictionOutput:
r"""
Greedily generate sequences of sample predictions from a model with a probability distribution head.
Parameters:
past_values (`torch.FloatTensor` of shape `(batch_size, sequence_length)` or `(batch_size, sequence_length, input_size)`):
Past values of the time series, that serve as context in order to predict the future. The sequence size
of this tensor must be larger than the `context_length` of the model, since the model will use the
larger size to construct lag features, i.e. additional values from the past which are added in order to
serve as "extra context".
The `sequence_length` here is equal to `config.context_length` + `max(config.lags_sequence)`, which if
no `lags_sequence` is configured, is equal to `config.context_length` + 7 (as by default, the largest
look-back index in `config.lags_sequence` is 7). The property `_past_length` returns the actual length
of the past.
The `past_values` is what the Transformer encoder gets as input (with optional additional features,
such as `static_categorical_features`, `static_real_features`, `past_time_features` and lags).
Optionally, missing values need to be replaced with zeros and indicated via the `past_observed_mask`.
For multivariate time series, the `input_size` > 1 dimension is required and corresponds to the number
of variates in the time series per time step.
past_time_features (`torch.FloatTensor` of shape `(batch_size, sequence_length, num_features)`):
Required time features, which the model internally will add to `past_values`. These could be things
like "month of year", "day of the month", etc. encoded as vectors (for instance as Fourier features).
These could also be so-called "age" features, which basically help the model know "at which point in
life" a time-series is. Age features have small values for distant past time steps and increase
monotonically the more we approach the current time step. Holiday features are also a good example of
time features.
These features serve as the "positional encodings" of the inputs. So contrary to a model like BERT,
where the position encodings are learned from scratch internally as parameters of the model, the Time
Series Transformer requires to provide additional time features. The Time Series Transformer only
learns additional embeddings for `static_categorical_features`.
Additional dynamic real covariates can be concatenated to this tensor, with the caveat that these
features must but known at prediction time.
The `num_features` here is equal to `config.`num_time_features` + `config.num_dynamic_real_features`.
future_time_features (`torch.FloatTensor` of shape `(batch_size, prediction_length, num_features)`):
Required time features for the prediction window, which the model internally will add to sampled
predictions. These could be things like "month of year", "day of the month", etc. encoded as vectors
(for instance as Fourier features). These could also be so-called "age" features, which basically help
the model know "at which point in life" a time-series is. Age features have small values for distant
past time steps and increase monotonically the more we approach the current time step. Holiday features
are also a good example of time features.
These features serve as the "positional encodings" of the inputs. So contrary to a model like BERT,
where the position encodings are learned from scratch internally as parameters of the model, the Time
Series Transformer requires to provide additional time features. The Time Series Transformer only
learns additional embeddings for `static_categorical_features`.
Additional dynamic real covariates can be concatenated to this tensor, with the caveat that these
features must but known at prediction time.
The `num_features` here is equal to `config.`num_time_features` + `config.num_dynamic_real_features`.
past_observed_mask (`torch.BoolTensor` of shape `(batch_size, sequence_length)` or `(batch_size, sequence_length, input_size)`, *optional*):
Boolean mask to indicate which `past_values` were observed and which were missing. Mask values selected
in `[0, 1]`:
- 1 for values that are **observed**,
- 0 for values that are **missing** (i.e. NaNs that were replaced by zeros).
static_categorical_features (`torch.LongTensor` of shape `(batch_size, number of static categorical features)`, *optional*):
Optional static categorical features for which the model will learn an embedding, which it will add to
the values of the time series.
Static categorical features are features which have the same value for all time steps (static over
time).
A typical example of a static categorical feature is a time series ID.
static_real_features (`torch.FloatTensor` of shape `(batch_size, number of static real features)`, *optional*):
Optional static real features which the model will add to the values of the time series.
Static real features are features which have the same value for all time steps (static over time).
A typical example of a static real feature is promotion information.
output_attentions (`bool`, *optional*):
Whether or not to return the attentions tensors of all attention layers.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers.
Return:
[`SampleTSPredictionOutput`] where the outputs `sequences` tensor will have shape `(batch_size, number of
samples, prediction_length)` or `(batch_size, number of samples, prediction_length, input_size)` for
multivariate predictions.
"""
outputs = self(
static_categorical_features=static_categorical_features,
static_real_features=static_real_features,
past_time_features=past_time_features,
past_values=past_values,
past_observed_mask=past_observed_mask,
future_time_features=future_time_features,
future_values=None,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=True,
use_cache=True,
)
decoder = self.model.get_decoder()
enc_last_hidden = outputs.encoder_last_hidden_state
loc = outputs.loc
scale = outputs.scale
static_feat = outputs.static_features
num_parallel_samples = self.config.num_parallel_samples
repeated_loc = loc.repeat_interleave(repeats=num_parallel_samples, dim=0)
repeated_scale = scale.repeat_interleave(repeats=num_parallel_samples, dim=0)
repeated_past_values = (
past_values.repeat_interleave(repeats=num_parallel_samples, dim=0) - repeated_loc
) / repeated_scale
expanded_static_feat = static_feat.unsqueeze(1).expand(-1, future_time_features.shape[1], -1)
features = torch.cat((expanded_static_feat, future_time_features), dim=-1)
repeated_features = features.repeat_interleave(repeats=num_parallel_samples, dim=0)
repeated_enc_last_hidden = enc_last_hidden.repeat_interleave(repeats=num_parallel_samples, dim=0)
future_samples = []
# greedy decoding
for k in range(self.config.prediction_length):
lagged_sequence = self.model.get_lagged_subsequences(
sequence=repeated_past_values,
subsequences_length=1 + k,
shift=1,
)
lags_shape = lagged_sequence.shape
reshaped_lagged_sequence = lagged_sequence.reshape(lags_shape[0], lags_shape[1], -1)
decoder_input = torch.cat((reshaped_lagged_sequence, repeated_features[:, : k + 1]), dim=-1)
dec_output = decoder(inputs_embeds=decoder_input, encoder_hidden_states=repeated_enc_last_hidden)
dec_last_hidden = dec_output.last_hidden_state
params = self.parameter_projection(dec_last_hidden[:, -1:])
distr = self.output_distribution(params, loc=repeated_loc, scale=repeated_scale)
next_sample = distr.sample()
repeated_past_values = torch.cat(
(repeated_past_values, (next_sample - repeated_loc) / repeated_scale), dim=1
)
future_samples.append(next_sample)
concat_future_samples = torch.cat(future_samples, dim=1)
return SampleTSPredictionOutput(
sequences=concat_future_samples.reshape(
(-1, num_parallel_samples, self.config.prediction_length) + self.target_shape,
)
)
__all__ = ["InformerForPrediction", "InformerModel", "InformerPreTrainedModel"]
| InformerForPrediction |
python | tornadoweb__tornado | tornado/test/util_test.py | {
"start": 9670,
"end": 10219
} | class ____(unittest.TestCase):
def test_re_unescape(self):
test_strings = ("/favicon.ico", "index.html", "Hello, World!", "!$@#%;")
for string in test_strings:
self.assertEqual(string, re_unescape(re.escape(string)))
def test_re_unescape_raises_error_on_invalid_input(self):
with self.assertRaises(ValueError):
re_unescape("\\d")
with self.assertRaises(ValueError):
re_unescape("\\b")
with self.assertRaises(ValueError):
re_unescape("\\Z")
| ReUnescapeTest |
python | pytorch__pytorch | test/torch_np/test_basic.py | {
"start": 14333,
"end": 15179
} | class ____(TestCase):
def test_defaultdtype_defaults(self):
# by default, both floats and ints 64 bit
x = w.empty(3)
z = x + 1j * x
assert x.dtype.torch_dtype == torch.float64
assert z.dtype.torch_dtype == torch.complex128
assert w.arange(3).dtype.torch_dtype == torch.int64
@parametrize("dt", ["pytorch", "float32", torch.float32])
def test_set_default_float(self, dt):
try:
w.set_default_dtype(fp_dtype=dt)
x = w.empty(3)
z = x + 1j * x
assert x.dtype.torch_dtype == torch.float32
assert z.dtype.torch_dtype == torch.complex64
finally:
# restore the
w.set_default_dtype(fp_dtype="numpy")
@skip(_np.__version__ <= "1.23", reason="from_dlpack is new in NumPy 1.23")
| TestDefaultDtype |
python | encode__django-rest-framework | tests/test_viewsets.py | {
"start": 658,
"end": 890
} | class ____(GenericViewSet):
def dispatch(self, request, *args, **kwargs):
return self.dummy(request, *args, **kwargs)
def dummy(self, request, *args, **kwargs):
return Response({'view': self})
| InstanceViewSet |
python | huggingface__transformers | src/transformers/models/megatron_bert/modeling_megatron_bert.py | {
"start": 24530,
"end": 31106
} | class ____(MegatronBertPreTrainedModel):
"""
The model can behave as an encoder (with only self-attention) as well as a decoder, in which case a layer of
cross-attention is added between the self-attention layers, following the architecture described in [Attention is
all you need](https://huggingface.co/papers/1706.03762) by Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit,
Llion Jones, Aidan N. Gomez, Lukasz Kaiser and Illia Polosukhin.
To behave as an decoder the model needs to be initialized with the `is_decoder` argument of the configuration set
to `True`. To be used in a Seq2Seq model, the model needs to initialized with both `is_decoder` argument and
`add_cross_attention` set to `True`; an `encoder_hidden_states` is then expected as an input to the forward pass.
"""
def __init__(self, config, add_pooling_layer=True):
r"""
add_pooling_layer (bool, *optional*, defaults to `True`):
Whether to add a pooling layer
"""
super().__init__(config)
self.config = config
self.embeddings = MegatronBertEmbeddings(config)
self.encoder = MegatronBertEncoder(config)
self.pooler = MegatronBertPooler(config) if add_pooling_layer else None
# Initialize weights and apply final processing
self.post_init()
def get_input_embeddings(self):
return self.embeddings.word_embeddings
def set_input_embeddings(self, value):
self.embeddings.word_embeddings = value
@auto_docstring
def forward(
self,
input_ids: Optional[torch.LongTensor] = None,
attention_mask: Optional[torch.FloatTensor] = None,
token_type_ids: Optional[torch.LongTensor] = None,
position_ids: Optional[torch.LongTensor] = None,
inputs_embeds: Optional[torch.FloatTensor] = None,
encoder_hidden_states: Optional[torch.FloatTensor] = None,
encoder_attention_mask: Optional[torch.FloatTensor] = None,
past_key_values: Optional[Cache] = None,
use_cache: Optional[bool] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
cache_position: Optional[torch.Tensor] = None,
) -> Union[tuple, BaseModelOutputWithPoolingAndCrossAttentions]:
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
if self.config.is_decoder:
use_cache = use_cache if use_cache is not None else self.config.use_cache
else:
use_cache = False
if input_ids is not None and inputs_embeds is not None:
raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
elif input_ids is not None:
self.warn_if_padding_and_no_attention_mask(input_ids, attention_mask)
input_shape = input_ids.size()
elif inputs_embeds is not None:
input_shape = inputs_embeds.size()[:-1]
else:
raise ValueError("You have to specify either input_ids or inputs_embeds")
batch_size, seq_length = input_shape
device = input_ids.device if input_ids is not None else inputs_embeds.device
past_key_values_length = 0 if past_key_values is None else past_key_values.get_seq_length()
if attention_mask is None:
attention_mask = torch.ones(((batch_size, seq_length + past_key_values_length)), device=device)
if token_type_ids is None:
token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=device)
# We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
# ourselves in which case we just need to make it broadcastable to all heads.
extended_attention_mask: torch.Tensor = self.get_extended_attention_mask(attention_mask, input_shape)
# If a 2D or 3D attention mask is provided for the cross-attention
# we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length]
if self.config.is_decoder and encoder_hidden_states is not None:
encoder_batch_size, encoder_sequence_length, _ = encoder_hidden_states.size()
encoder_hidden_shape = (encoder_batch_size, encoder_sequence_length)
if encoder_attention_mask is None:
encoder_attention_mask = torch.ones(encoder_hidden_shape, device=device)
encoder_extended_attention_mask = self.invert_attention_mask(encoder_attention_mask)
else:
encoder_extended_attention_mask = None
embedding_output = self.embeddings(
input_ids=input_ids,
position_ids=position_ids,
token_type_ids=token_type_ids,
inputs_embeds=inputs_embeds,
past_key_values_length=past_key_values_length,
)
encoder_outputs = self.encoder(
embedding_output,
attention_mask=extended_attention_mask,
encoder_hidden_states=encoder_hidden_states,
encoder_attention_mask=encoder_extended_attention_mask,
past_key_values=past_key_values,
use_cache=use_cache,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
cache_position=cache_position,
)
sequence_output = encoder_outputs[0]
pooled_output = self.pooler(sequence_output) if self.pooler is not None else None
if not return_dict:
return (sequence_output, pooled_output) + encoder_outputs[1:]
return BaseModelOutputWithPoolingAndCrossAttentions(
last_hidden_state=sequence_output,
pooler_output=pooled_output,
past_key_values=encoder_outputs.past_key_values,
hidden_states=encoder_outputs.hidden_states,
attentions=encoder_outputs.attentions,
cross_attentions=encoder_outputs.cross_attentions,
)
@auto_docstring(
custom_intro="""
MegatronBert Model with two heads on top as done during the pretraining: a `masked language modeling` head and a
`next sentence prediction (classification)` head.
"""
)
| MegatronBertModel |
python | apache__airflow | providers/google/tests/unit/google/cloud/operators/test_datafusion.py | {
"start": 5677,
"end": 6387
} | class ____:
@mock.patch(RESOURCE_PATH_TO_DICT_STR)
@mock.patch(HOOK_STR)
def test_execute_check_hook_call_should_execute_successfully(self, mock_hook, mock_resource_path_to_dict):
mock_resource_path_to_dict.return_value = {"projects": PROJECT_ID}
op = CloudDataFusionGetInstanceOperator(
task_id="test_tasks",
instance_name=INSTANCE_NAME,
location=LOCATION,
project_id=PROJECT_ID,
)
op.execute(context=mock.MagicMock())
mock_hook.return_value.get_instance.assert_called_once_with(
instance_name=INSTANCE_NAME, location=LOCATION, project_id=PROJECT_ID
)
| TestCloudDataFusionGetInstanceOperator |
python | django__django | tests/urlpatterns_reverse/views.py | {
"start": 1268,
"end": 1722
} | class ____(View):
def get(self, request, *args, **kwargs):
return HttpResponse(f"Hello {self.kwargs['name']}")
view_func_from_cbv = HelloView.as_view()
empty_view_partial = partial(empty_view, template_name="template.html")
empty_view_nested_partial = partial(
empty_view_partial, template_name="nested_partial.html"
)
empty_view_wrapped = update_wrapper(
partial(empty_view, template_name="template.html"),
empty_view,
)
| HelloView |
python | django__django | django/contrib/postgres/validators.py | {
"start": 666,
"end": 1006
} | class ____(MinLengthValidator):
message = ngettext_lazy(
"List contains %(show_value)d item, it should contain no fewer than "
"%(limit_value)d.",
"List contains %(show_value)d items, it should contain no fewer than "
"%(limit_value)d.",
"show_value",
)
@deconstructible
| ArrayMinLengthValidator |
python | tornadoweb__tornado | tornado/test/web_test.py | {
"start": 22744,
"end": 23546
} | class ____(RequestHandler):
def decode_argument(self, value, name=None):
if type(value) is not bytes:
raise Exception("unexpected type for value: %r" % type(value))
# use self.request.arguments directly to avoid recursion
if "encoding" in self.request.arguments:
return value.decode(to_unicode(self.request.arguments["encoding"][0]))
else:
return value
def get(self, arg):
def describe(s):
if type(s) is bytes:
return ["bytes", native_str(binascii.b2a_hex(s))]
elif type(s) is unicode_type:
return ["unicode", s]
raise Exception("unknown type")
self.write({"path": describe(arg), "query": describe(self.get_argument("foo"))})
| DecodeArgHandler |
python | tensorflow__tensorflow | tensorflow/python/distribute/mirrored_strategy_test.py | {
"start": 15750,
"end": 20197
} | class ____(
strategy_test_lib.DistributionTestBase,
strategy_test_lib.TwoDeviceDistributionTestBase, parameterized.TestCase):
def tearDown(self):
super().tearDown()
context._reset_context()
def testGpusCollectiveOp(self, use_default):
@def_function.function(jit_compile=util.is_xla_enabled())
def fn(var, use_default):
if use_default or util.is_xla_enabled():
self.assertIsInstance(
strategy.extended._get_cross_device_ops(var),
cross_device_ops_lib.CollectiveAllReduce)
else:
self.assertIsInstance(
strategy.extended._get_cross_device_ops(var),
cross_device_ops_lib.NcclAllReduce)
strategy = mirrored_strategy.MirroredStrategy(
["GPU:0", "GPU:1"],
cross_device_ops=None
if use_default else cross_device_ops_lib.NcclAllReduce())
with strategy.scope():
var = variables.Variable(1.)
fn(var, use_default)
def testVirtualGpusCollectiveOp(self, use_default):
# Logical devices cannot be changed after context initialization.
context._reset_context()
physical_gpus = context.context().list_physical_devices(device_type="GPU")
context.context().set_logical_device_configuration(physical_gpus[1], [
context.LogicalDeviceConfiguration(memory_limit=1024),
context.LogicalDeviceConfiguration(memory_limit=1024)
])
@def_function.function(jit_compile=util.is_xla_enabled())
def fn(var, use_default):
if use_default or util.is_xla_enabled():
self.assertIsInstance(
strategy.extended._get_cross_device_ops(var),
cross_device_ops_lib.CollectiveAllReduce)
self.assertEqual(
strategy.extended._get_cross_device_ops(
var)._options.implementation,
collective_util.CommunicationImplementation.RING)
else:
self.assertIsInstance(
strategy.extended._get_cross_device_ops(var),
cross_device_ops_lib.NcclAllReduce)
strategy = mirrored_strategy.MirroredStrategy(
["GPU:0", "GPU:1", "GPU:2"],
cross_device_ops=None
if use_default else cross_device_ops_lib.NcclAllReduce())
with strategy.scope():
var = variables.Variable(1.)
fn(var, use_default)
def testCpusCollectiveOp(self, use_default):
del use_default
if util.is_xla_enabled():
self.skipTest("Only expected to run under non-XLA context.")
@def_function.function(jit_compile=True)
def fn(var):
if not ops.executing_eagerly_outside_functions():
self.assertIsInstance(
strategy.extended._get_cross_device_ops(var),
cross_device_ops_lib.ReductionToOneDevice)
else:
self.assertIsInstance(
strategy.extended._get_cross_device_ops(var),
cross_device_ops_lib.CollectiveAllReduce)
strategy = mirrored_strategy.MirroredStrategy(["CPU:0", "CPU:1"])
with strategy.scope():
var = variables.Variable(1.)
fn(var)
def testMixedDevicesCollectiveOp(self, use_default):
del use_default
if util.is_xla_enabled():
self.skipTest("All devices should be identical in XLA context.")
# XLA is not supported if devices are not of the same type.
strategy = mirrored_strategy.MirroredStrategy(["CPU:0", "GPU:0"])
with strategy.scope():
var = variables.Variable(1.)
self.assertIsInstance(
strategy.extended._get_cross_device_ops(var),
cross_device_ops_lib.ReductionToOneDevice)
def testMirroredStrategyInt32VariableCollectiveOp(self, use_default):
if util.is_xla_enabled():
self.skipTest("Only expected to run under non-XLA context.")
strategy = mirrored_strategy.MirroredStrategy(
["GPU:0", "GPU:1"],
cross_device_ops=None
if use_default else cross_device_ops_lib.NcclAllReduce())
with strategy.scope():
# CollevtiveOp does not support int32 on GPU.
var = variables.Variable(1)
self.assertIsInstance(
strategy.extended._get_cross_device_ops(var),
cross_device_ops_lib.ReductionToOneDevice)
def one_device_combinations():
return combinations.combine(
distribution=[
strategy_combinations.mirrored_strategy_with_one_cpu,
strategy_combinations.mirrored_strategy_with_one_gpu,
],
mode=["graph", "eager"])
@combinations.generate(one_device_combinations())
| MirroredGetCrossDeviceOpTest |
python | spack__spack | lib/spack/spack/build_environment.py | {
"start": 31759,
"end": 32913
} | class ____:
def __init__(self, *roots: spack.spec.Spec, context: Context):
# For the roots (well, marked specs) we follow different edges
# than for their deps, depending on the context.
self.root_hashes = set(s.dag_hash() for s in roots)
if context == Context.BUILD:
# Drop direct run deps in build context
# We don't really distinguish between install and build time test deps,
# so we include them here as build-time test deps.
self.root_depflag = dt.BUILD | dt.TEST | dt.LINK
elif context == Context.TEST:
# This is more of an extended run environment
self.root_depflag = dt.TEST | dt.RUN | dt.LINK
elif context == Context.RUN:
self.root_depflag = dt.RUN | dt.LINK
def accept(self, item):
return True
def neighbors(self, item):
spec = item.edge.spec
if spec.dag_hash() in self.root_hashes:
depflag = self.root_depflag
else:
depflag = dt.LINK | dt.RUN
return traverse.sort_edges(spec.edges_to_dependencies(depflag=depflag))
| EnvironmentVisitor |
python | astropy__astropy | astropy/timeseries/tests/test_common.py | {
"start": 2726,
"end": 3288
} | class ____(CommonTimeSeriesTests):
_row = {
"time_bin_start": "2016-03-23T12:30:40",
"time_bin_size": 2 * u.s,
"a": 1.0,
"b": 2,
"c": "a",
}
def setup_method(self, method):
self.series = BinnedTimeSeries(
time_bin_start=INPUT_TIME, time_bin_size=3 * u.s, data=PLAIN_TABLE
)
self.time_attr = "time_bin_start"
def test_column_slicing(self):
ts = self.series["time_bin_start", "time_bin_size", "a"]
assert isinstance(ts, BinnedTimeSeries)
| TestBinnedTimeSeries |
python | huggingface__transformers | src/transformers/models/grounding_dino/modeling_grounding_dino.py | {
"start": 54747,
"end": 58124
} | class ____(nn.Module):
def __init__(self, config) -> None:
super().__init__()
self.d_model = config.d_model
self.text_enhancer_layer = GroundingDinoTextEnhancerLayer(config)
self.fusion_layer = GroundingDinoFusionLayer(config)
self.deformable_layer = GroundingDinoDeformableLayer(config)
def get_text_position_embeddings(
self,
text_features: Tensor,
text_position_embedding: Optional[torch.Tensor],
text_position_ids: Optional[torch.Tensor],
) -> Tensor:
batch_size, seq_length, _ = text_features.shape
if text_position_embedding is None and text_position_ids is None:
text_position_embedding = torch.arange(seq_length, device=text_features.device)
text_position_embedding = text_position_embedding.float()
text_position_embedding = text_position_embedding.unsqueeze(0).unsqueeze(-1)
text_position_embedding = text_position_embedding.repeat(batch_size, 1, 1)
text_position_embedding = get_sine_pos_embed(
text_position_embedding, num_pos_feats=self.d_model, exchange_xy=False
)
if text_position_ids is not None:
text_position_embedding = get_sine_pos_embed(
text_position_ids[..., None], num_pos_feats=self.d_model, exchange_xy=False
)
return text_position_embedding
def forward(
self,
vision_features: Tensor,
vision_position_embedding: Tensor,
spatial_shapes: Tensor,
spatial_shapes_list: list[tuple[int, int]],
level_start_index: Tensor,
key_padding_mask: Tensor,
reference_points: Tensor,
text_features: Optional[Tensor] = None,
text_attention_mask: Optional[Tensor] = None,
text_position_embedding: Optional[Tensor] = None,
text_self_attention_masks: Optional[Tensor] = None,
text_position_ids: Optional[Tensor] = None,
):
text_position_embedding = self.get_text_position_embeddings(
text_features, text_position_embedding, text_position_ids
)
(vision_features, vision_fused_attn), (text_features, text_fused_attn) = self.fusion_layer(
vision_features=vision_features,
text_features=text_features,
attention_mask_vision=key_padding_mask,
attention_mask_text=text_attention_mask,
)
(text_features, text_enhanced_attn) = self.text_enhancer_layer(
hidden_states=text_features,
attention_masks=~text_self_attention_masks, # note we use ~ for mask here
position_embeddings=(text_position_embedding if text_position_embedding is not None else None),
)
(vision_features, vision_deformable_attn) = self.deformable_layer(
hidden_states=vision_features,
attention_mask=~key_padding_mask,
position_embeddings=vision_position_embedding,
reference_points=reference_points,
spatial_shapes=spatial_shapes,
spatial_shapes_list=spatial_shapes_list,
level_start_index=level_start_index,
)
return (
(vision_features, text_features),
(vision_fused_attn, text_fused_attn, text_enhanced_attn, vision_deformable_attn),
)
| GroundingDinoEncoderLayer |
python | pytorch__pytorch | test/lazy/test_extract_compiled_graph.py | {
"start": 353,
"end": 437
} | class ____(nn.Module):
def forward(self, a):
return a * 2
| ModuleConstScale |
python | pytorch__pytorch | torch/_inductor/ir.py | {
"start": 243699,
"end": 244724
} | class ____(TMADescriptor):
"""
the new host-side TMA Descriptor API:
(the ones obtained via create_{1d,2d}_tma_descriptor calls).
See also TMADescriptorStable for the new API.
"""
def __init__(
self,
tensor: IRNode,
dims: list[Union[int, torch.SymInt]],
block_dims: list[Union[int, torch.SymInt]],
element_size: Optional[int] = None,
) -> None:
assert len(dims) in (1, 2)
assert len(dims) == len(block_dims)
if element_size is None:
element_size = tensor.get_dtype().itemsize
self.dims = dims
self.block_dims = block_dims
self.element_size = element_size
self.rank = len(self.dims)
inputs = [tensor]
constant_args = [
*self.dims,
*self.block_dims,
self.element_size,
]
super().__init__(
tensor=tensor,
inputs=inputs,
constant_args=constant_args,
)
| TMADescriptorExperimental |
python | facelessuser__soupsieve | hatch_build.py | {
"start": 517,
"end": 1575
} | class ____(MetadataHookInterface):
"""Our metadata hook."""
def update(self, metadata):
"""See https://ofek.dev/hatch/latest/plugins/metadata-hook/ for more information."""
metadata["classifiers"] = [
f"Development Status :: {get_version_dev_status(self.root)}",
'Environment :: Console',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.9',
'Programming Language :: Python :: 3.10',
'Programming Language :: Python :: 3.11',
'Programming Language :: Python :: 3.12',
'Programming Language :: Python :: 3.13',
'Programming Language :: Python :: 3.14',
'Topic :: Internet :: WWW/HTTP :: Dynamic Content',
'Topic :: Software Development :: Libraries :: Python Modules',
'Typing :: Typed'
]
| CustomMetadataHook |
python | readthedocs__readthedocs.org | readthedocs/filetreediff/tests/test_filetreediff.py | {
"start": 756,
"end": 5400
} | class ____(TestCase):
def setUp(self):
self.project = get(Project)
self.version_a = self.project.versions.get(slug=LATEST)
self.build_a_old = get(
Build,
project=self.project,
version=self.version_a,
state=BUILD_STATE_FINISHED,
success=True,
)
self.build_a = get(
Build,
project=self.project,
version=self.version_a,
state=BUILD_STATE_FINISHED,
success=True,
)
self.version_b = get(
Version,
project=self.project,
slug="v2",
active=True,
built=True,
)
self.build_b_old = get(
Build,
project=self.project,
version=self.version_b,
state=BUILD_STATE_FINISHED,
success=True,
)
self.build_b = get(
Build,
project=self.project,
version=self.version_b,
state=BUILD_STATE_FINISHED,
success=True,
)
def _mock_open(self, content):
@contextmanager
def f(*args, **kwargs):
read_mock = mock.MagicMock()
read_mock.read.return_value = content
yield read_mock
return f
def _mock_manifest(self, build_id: int, files: dict[str, str]):
return self._mock_open(
json.dumps(
{
"build": {"id": build_id},
"files": {
file_path: {"main_content_hash": main_content_hash}
for file_path, main_content_hash in files.items()
},
}
)
)
@mock.patch.object(BuildMediaFileSystemStorageTest, "open")
def test_diff_no_changes(self, storage_open):
files_a = {
"index.html": "hash1",
"tutorials/index.html": "hash2",
}
storage_open.side_effect = [
self._mock_manifest(self.build_a.id, files_a)(),
self._mock_manifest(self.build_b.id, files_a)(),
]
diff = get_diff(self.version_a, self.version_b)
assert diff.added == []
assert diff.deleted == []
assert diff.modified == []
assert not diff.outdated
@mock.patch.object(BuildMediaFileSystemStorageTest, "open")
def test_diff_changes(self, storage_open):
files_a = {
"index.html": "hash1",
"tutorials/index.html": "hash2",
"new-file.html": "hash-new",
}
files_b = {
"index.html": "hash1",
"tutorials/index.html": "hash-changed",
"deleted.html": "hash-deleted",
}
storage_open.side_effect = [
self._mock_manifest(self.build_a.id, files_a)(),
self._mock_manifest(self.build_b.id, files_b)(),
]
diff = get_diff(self.version_a, self.version_b)
assert [file.path for file in diff.files] == ["deleted.html", "new-file.html", "tutorials/index.html"]
assert [file.path for file in diff.added] == ["new-file.html"]
assert [file.path for file in diff.deleted] == ["deleted.html"]
assert [file.path for file in diff.modified] == ["tutorials/index.html"]
assert not diff.outdated
@mock.patch.object(BuildMediaFileSystemStorageTest, "open")
def test_missing_manifest(self, storage_open):
storage_open.side_effect = FileNotFoundError
diff = get_diff(self.version_a, self.version_b)
assert diff is None
@mock.patch.object(BuildMediaFileSystemStorageTest, "open")
def test_outdated_diff(self, storage_open):
files_a = {
"index.html": "hash1",
"tutorials/index.html": "hash2",
"new-file.html": "hash-new",
}
files_b = {
"index.html": "hash1",
"tutorials/index.html": "hash-changed",
"deleted.html": "hash-deleted",
}
storage_open.side_effect = [
self._mock_manifest(self.build_a_old.id, files_a)(),
self._mock_manifest(self.build_b_old.id, files_b)(),
]
diff = get_diff(self.version_a, self.version_b)
assert [file.path for file in diff.files] == ["deleted.html", "new-file.html", "tutorials/index.html"]
assert [file.path for file in diff.added] == ["new-file.html"]
assert [file.path for file in diff.deleted] == ["deleted.html"]
assert [file.path for file in diff.modified] == ["tutorials/index.html"]
assert diff.outdated
| TestsFileTreeDiff |
python | apache__airflow | providers/google/tests/unit/google/cloud/operators/test_vertex_ai.py | {
"start": 67634,
"end": 71855
} | class ____:
@mock.patch("google.cloud.aiplatform.datasets.ImageDataset")
@mock.patch(VERTEX_AI_PATH.format("auto_ml.AutoMLHook"))
def test_execute(self, mock_hook, mock_dataset):
mock_hook.return_value.create_auto_ml_image_training_job.return_value = (None, "training_id")
op = CreateAutoMLImageTrainingJobOperator(
task_id=TASK_ID,
gcp_conn_id=GCP_CONN_ID,
impersonation_chain=IMPERSONATION_CHAIN,
display_name=DISPLAY_NAME,
dataset_id=TEST_DATASET_ID,
prediction_type="classification",
multi_label=False,
model_type="CLOUD",
sync=True,
region=GCP_LOCATION,
project_id=GCP_PROJECT,
parent_model=TEST_PARENT_MODEL,
)
op.execute(context={"ti": mock.MagicMock(), "task": mock.MagicMock()})
mock_hook.assert_called_once_with(gcp_conn_id=GCP_CONN_ID, impersonation_chain=IMPERSONATION_CHAIN)
mock_dataset.assert_called_once_with(dataset_name=TEST_DATASET_ID)
mock_hook.return_value.create_auto_ml_image_training_job.assert_called_once_with(
project_id=GCP_PROJECT,
region=GCP_LOCATION,
display_name=DISPLAY_NAME,
dataset=mock_dataset.return_value,
prediction_type="classification",
parent_model=TEST_PARENT_MODEL,
multi_label=False,
model_type="CLOUD",
base_model=None,
labels=None,
training_encryption_spec_key_name=None,
model_encryption_spec_key_name=None,
training_fraction_split=None,
validation_fraction_split=None,
test_fraction_split=None,
training_filter_split=None,
validation_filter_split=None,
test_filter_split=None,
budget_milli_node_hours=None,
model_display_name=None,
model_labels=None,
disable_early_stopping=False,
sync=True,
is_default_version=None,
model_version_aliases=None,
model_version_description=None,
)
@mock.patch("google.cloud.aiplatform.datasets.ImageDataset")
@mock.patch(VERTEX_AI_PATH.format("auto_ml.AutoMLHook"))
def test_execute__parent_model_version_index_is_removed(self, mock_hook, mock_dataset):
mock_hook.return_value.create_auto_ml_image_training_job.return_value = (None, "training_id")
op = CreateAutoMLImageTrainingJobOperator(
task_id=TASK_ID,
gcp_conn_id=GCP_CONN_ID,
impersonation_chain=IMPERSONATION_CHAIN,
display_name=DISPLAY_NAME,
dataset_id=TEST_DATASET_ID,
prediction_type="classification",
multi_label=False,
model_type="CLOUD",
sync=True,
region=GCP_LOCATION,
project_id=GCP_PROJECT,
parent_model=VERSIONED_TEST_PARENT_MODEL,
)
op.execute(context={"ti": mock.MagicMock(), "task": mock.MagicMock()})
mock_hook.return_value.create_auto_ml_image_training_job.assert_called_once_with(
project_id=GCP_PROJECT,
region=GCP_LOCATION,
display_name=DISPLAY_NAME,
dataset=mock_dataset.return_value,
prediction_type="classification",
parent_model=TEST_PARENT_MODEL,
multi_label=False,
model_type="CLOUD",
base_model=None,
labels=None,
training_encryption_spec_key_name=None,
model_encryption_spec_key_name=None,
training_fraction_split=None,
validation_fraction_split=None,
test_fraction_split=None,
training_filter_split=None,
validation_filter_split=None,
test_filter_split=None,
budget_milli_node_hours=None,
model_display_name=None,
model_labels=None,
disable_early_stopping=False,
sync=True,
is_default_version=None,
model_version_aliases=None,
model_version_description=None,
)
| TestVertexAICreateAutoMLImageTrainingJobOperator |
python | spack__spack | lib/spack/spack/test/web.py | {
"start": 7956,
"end": 8089
} | class ____:
def search(self, *args, **kwargs):
return [{"Key": "keyone"}, {"Key": "keytwo"}, {"Key": "keythree"}]
| MockPages |
python | ansible__ansible | test/lib/ansible_test/_internal/test.py | {
"start": 5778,
"end": 6571
} | class ____(TestResult):
"""Test skipped."""
def __init__(self, command: str, test: str, python_version: t.Optional[str] = None) -> None:
super().__init__(command, test, python_version)
self.reason: t.Optional[str] = None
def write_console(self) -> None:
"""Write results to console."""
if self.reason:
display.warning(self.reason)
else:
display.info('No tests applicable.', verbosity=1)
def write_junit(self, args: TestConfig) -> None:
"""Write results to a junit XML file."""
test_case = junit_xml.TestCase(
classname=self.command,
name=self.name,
skipped=self.reason or 'No tests applicable.',
)
self.save_junit(args, test_case)
| TestSkipped |
python | EpistasisLab__tpot | tpot/builtin_modules/column_one_hot_encoder.py | {
"start": 7791,
"end": 13218
} | class ____(TransformerMixin, BaseEstimator ):
def __init__(self, columns='auto', handle_unknown='error', unknown_value = -1, encoded_missing_value = np.nan, min_frequency=None,max_categories=None):
'''
Parameters
----------
columns : str, list, default='auto'
Determines which columns to onehot encode with sklearn.preprocessing.OneHotEncoder.
- 'auto' : Automatically select categorical features based on columns with less than 10 unique values
- 'categorical' : Automatically select categorical features
- 'numeric' : Automatically select numeric features
- 'all' : Select all features
- list : A list of columns to select
drop, handle_unknown, sparse_output, min_frequency, max_categories : see sklearn.preprocessing.OneHotEncoder
'''
self.columns = columns
self.handle_unknown = handle_unknown
self.unknown_value = unknown_value
self.encoded_missing_value = encoded_missing_value
self.min_frequency = min_frequency
self.max_categories = max_categories
def fit(self, X, y=None):
"""Fit OneHotEncoder to X, then transform X.
Equivalent to self.fit(X).transform(X), but more convenient and more
efficient. See fit for the parameters, transform for the return value.
Parameters
----------
X : array-like or sparse matrix, shape=(n_samples, n_features)
Dense array or sparse matrix.
y: array-like {n_samples,} (Optional, ignored)
Feature labels
"""
if (self.columns == "categorical" or self.columns == "numeric") and not isinstance(X, pd.DataFrame):
raise ValueError(f"Invalid value for columns: {self.columns}. "
"Only 'all' or <list> is supported for np arrays")
if self.columns == "categorical":
self.columns_ = list(X.select_dtypes(exclude='number').columns)
elif self.columns == "numeric":
self.columns_ = [col for col in X.columns if is_numeric_dtype(X[col])]
elif self.columns == "auto":
self.columns_ = auto_select_categorical_features(X)
elif self.columns == "all":
if isinstance(X, pd.DataFrame):
self.columns_ = X.columns
else:
self.columns_ = list(range(X.shape[1]))
elif isinstance(self.columns, list):
self.columns_ = self.columns
else:
raise ValueError(f"Invalid value for columns: {self.columns}")
if len(self.columns_) == 0:
return self
self.enc = sklearn.preprocessing.OrdinalEncoder(categories='auto',
handle_unknown = self.handle_unknown,
unknown_value = self.unknown_value,
encoded_missing_value = self.encoded_missing_value,
min_frequency = self.min_frequency,
max_categories = self.max_categories)
#TODO make this more consistent with sklearn baseimputer/baseencoder
'''
if isinstance(X, pd.DataFrame):
self.enc.set_output(transform="pandas")
for col in X.columns:
# check if the column name is not a string
if not isinstance(col, str):
# if it's not a string, rename the column with "X" prefix
X.rename(columns={col: f"X{col}"}, inplace=True)
'''
if len(self.columns_) == X.shape[1]:
X_sel = self.enc.fit(X)
else:
X_sel, X_not_sel = _X_selected(X, self.columns_)
X_sel = self.enc.fit(X_sel)
return self
def transform(self, X):
"""Transform X using one-hot encoding.
Parameters
----------
X : array-like or sparse matrix, shape=(n_samples, n_features)
Dense array or sparse matrix.
Returns
-------
X_out : sparse matrix if sparse=True else a 2-d array, dtype=int
Transformed input.
"""
if len(self.columns_) == 0:
return X
#TODO make this more consistent with sklearn baseimputer/baseencoder
'''
if isinstance(X, pd.DataFrame):
for col in X.columns:
# check if the column name is not a string
if not isinstance(col, str):
# if it's not a string, rename the column with "X" prefix
X.rename(columns={col: f"X{col}"}, inplace=True)
'''
if len(self.columns_) == X.shape[1]:
return self.enc.transform(X)
else:
X_sel, X_not_sel= _X_selected(X, self.columns_)
X_sel = self.enc.transform(X_sel)
#If X is dataframe
if isinstance(X, pd.DataFrame):
X_sel = pd.DataFrame(X_sel, columns=self.enc.get_feature_names_out())
return pd.concat([X_not_sel.reset_index(drop=True), X_sel.reset_index(drop=True)], axis=1)
else:
return np.hstack((X_not_sel, X_sel)) | ColumnOrdinalEncoder |
python | pypa__hatch | tests/env/plugin/test_interface.py | {
"start": 82214,
"end": 97647
} | class ____:
def test_not_table(self, isolation, isolated_data_dir, platform, global_application):
config = {
"project": {"name": "my_app", "version": "0.0.1"},
"tool": {"hatch": {"envs": {"default": {"workspace": 9000}}}},
}
project = Project(isolation, config=config)
with pytest.raises(TypeError, match="Field workspace must be a table"):
MockEnvironment(
isolation,
project.metadata,
"default",
project.config.envs["default"], # Exception raised here
{},
isolated_data_dir,
isolated_data_dir,
platform,
0,
global_application,
)
def test_parallel_not_boolean(self, isolation, isolated_data_dir, platform, global_application):
config = {
"project": {"name": "my_app", "version": "0.0.1"},
"tool": {"hatch": {"envs": {"default": {"workspace": {"parallel": 9000}}}}},
}
project = Project(isolation, config=config)
environment = MockEnvironment(
isolation,
project.metadata,
"default",
project.config.envs["default"],
{},
isolated_data_dir,
isolated_data_dir,
platform,
0,
global_application,
)
with pytest.raises(TypeError, match="Field `tool.hatch.envs.default.workspace.parallel` must be a boolean"):
_ = environment.workspace.parallel
def test_parallel_default(self, isolation, isolated_data_dir, platform, global_application):
config = {"project": {"name": "my_app", "version": "0.0.1"}}
project = Project(isolation, config=config)
environment = MockEnvironment(
isolation,
project.metadata,
"default",
project.config.envs["default"],
{},
isolated_data_dir,
isolated_data_dir,
platform,
0,
global_application,
)
assert environment.workspace.parallel is True
def test_parallel_override(self, isolation, isolated_data_dir, platform, global_application):
config = {
"project": {"name": "my_app", "version": "0.0.1"},
"tool": {"hatch": {"envs": {"default": {"workspace": {"parallel": False}}}}},
}
project = Project(isolation, config=config)
environment = MockEnvironment(
isolation,
project.metadata,
"default",
project.config.envs["default"],
{},
isolated_data_dir,
isolated_data_dir,
platform,
0,
global_application,
)
assert environment.workspace.parallel is False
def test_members_not_table(self, isolation, isolated_data_dir, platform, global_application):
config = {
"project": {"name": "my_app", "version": "0.0.1"},
"tool": {"hatch": {"envs": {"default": {"workspace": {"members": 9000}}}}},
}
project = Project(isolation, config=config)
with pytest.raises(TypeError, match="Field workspace.members must be an array"):
MockEnvironment(
isolation,
project.metadata,
"default",
project.config.envs["default"], # Exception raised here
{},
isolated_data_dir,
isolated_data_dir,
platform,
0,
global_application,
)
def test_member_invalid_type(self, isolation, isolated_data_dir, platform, global_application):
config = {
"project": {"name": "my_app", "version": "0.0.1"},
"tool": {"hatch": {"envs": {"default": {"workspace": {"members": [9000]}}}}},
}
project = Project(isolation, config=config)
with pytest.raises(TypeError, match="Member #1 must be a string or table"):
MockEnvironment(
isolation,
project.metadata,
"default",
project.config.envs["default"], # Exception raised here
{},
isolated_data_dir,
isolated_data_dir,
platform,
0,
global_application,
)
def test_member_no_path(self, isolation, isolated_data_dir, platform, global_application):
config = {
"project": {"name": "my_app", "version": "0.0.1"},
"tool": {"hatch": {"envs": {"default": {"workspace": {"members": [{}]}}}}},
}
project = Project(isolation, config=config)
with pytest.raises(TypeError, match="Member #1 must define a `path` key"):
MockEnvironment(
isolation,
project.metadata,
"default",
project.config.envs["default"], # Exception raised here
{},
isolated_data_dir,
isolated_data_dir,
platform,
0,
global_application,
)
def test_member_path_not_string(self, isolation, isolated_data_dir, platform, global_application):
config = {
"project": {"name": "my_app", "version": "0.0.1"},
"tool": {"hatch": {"envs": {"default": {"workspace": {"members": [{"path": 9000}]}}}}},
}
project = Project(isolation, config=config)
with pytest.raises(TypeError, match="Member #1 path must be a string"):
MockEnvironment(
isolation,
project.metadata,
"default",
project.config.envs["default"], # Exception raised here
{},
isolated_data_dir,
isolated_data_dir,
platform,
0,
global_application,
)
def test_member_path_empty_string(self, isolation, isolated_data_dir, platform, global_application):
config = {
"project": {"name": "my_app", "version": "0.0.1"},
"tool": {"hatch": {"envs": {"default": {"workspace": {"members": [{"path": ""}]}}}}},
}
project = Project(isolation, config=config)
environment = MockEnvironment(
isolation,
project.metadata,
"default",
project.config.envs["default"],
{},
isolated_data_dir,
isolated_data_dir,
platform,
0,
global_application,
)
with pytest.raises(
ValueError,
match=(
"Option `path` of member #1 of field `tool.hatch.envs.default.workspace.members` "
"cannot be an empty string"
),
):
_ = environment.workspace.members
def test_member_features_not_array(self, isolation, isolated_data_dir, platform, global_application):
config = {
"project": {"name": "my_app", "version": "0.0.1"},
"tool": {"hatch": {"envs": {"default": {"workspace": {"members": [{"path": "foo", "features": 9000}]}}}}},
}
project = Project(isolation, config=config)
environment = MockEnvironment(
isolation,
project.metadata,
"default",
project.config.envs["default"],
{},
isolated_data_dir,
isolated_data_dir,
platform,
0,
global_application,
)
with pytest.raises(
TypeError,
match=(
"Option `features` of member #1 of field `tool.hatch.envs.default.workspace.members` "
"must be an array of strings"
),
):
_ = environment.workspace.members
def test_member_feature_not_string(self, isolation, isolated_data_dir, platform, global_application):
config = {
"project": {"name": "my_app", "version": "0.0.1"},
"tool": {"hatch": {"envs": {"default": {"workspace": {"members": [{"path": "foo", "features": [9000]}]}}}}},
}
project = Project(isolation, config=config)
environment = MockEnvironment(
isolation,
project.metadata,
"default",
project.config.envs["default"],
{},
isolated_data_dir,
isolated_data_dir,
platform,
0,
global_application,
)
with pytest.raises(
TypeError,
match=(
"Feature #1 of option `features` of member #1 of field `tool.hatch.envs.default.workspace.members` "
"must be a string"
),
):
_ = environment.workspace.members
def test_member_feature_empty_string(self, isolation, isolated_data_dir, platform, global_application):
config = {
"project": {"name": "my_app", "version": "0.0.1"},
"tool": {"hatch": {"envs": {"default": {"workspace": {"members": [{"path": "foo", "features": [""]}]}}}}},
}
project = Project(isolation, config=config)
environment = MockEnvironment(
isolation,
project.metadata,
"default",
project.config.envs["default"],
{},
isolated_data_dir,
isolated_data_dir,
platform,
0,
global_application,
)
with pytest.raises(
ValueError,
match=(
"Feature #1 of option `features` of member #1 of field `tool.hatch.envs.default.workspace.members` "
"cannot be an empty string"
),
):
_ = environment.workspace.members
def test_member_feature_duplicate(self, isolation, isolated_data_dir, platform, global_application):
config = {
"project": {"name": "my_app", "version": "0.0.1"},
"tool": {
"hatch": {
"envs": {"default": {"workspace": {"members": [{"path": "foo", "features": ["foo", "Foo"]}]}}}
}
},
}
project = Project(isolation, config=config)
environment = MockEnvironment(
isolation,
project.metadata,
"default",
project.config.envs["default"],
{},
isolated_data_dir,
isolated_data_dir,
platform,
0,
global_application,
)
with pytest.raises(
ValueError,
match=(
"Feature #2 of option `features` of member #1 of field `tool.hatch.envs.default.workspace.members` "
"is a duplicate"
),
):
_ = environment.workspace.members
def test_member_does_not_exist(self, isolation, isolated_data_dir, platform, global_application):
config = {
"project": {"name": "my_app", "version": "0.0.1"},
"tool": {"hatch": {"envs": {"default": {"workspace": {"members": [{"path": "foo"}]}}}}},
}
project = Project(isolation, config=config)
environment = MockEnvironment(
isolation,
project.metadata,
"default",
project.config.envs["default"],
{},
isolated_data_dir,
isolated_data_dir,
platform,
0,
global_application,
)
with pytest.raises(
OSError,
match=re.escape(
f"No members could be derived from `foo` of field `tool.hatch.envs.default.workspace.members`: "
f"{isolation / 'foo'}"
),
):
_ = environment.workspace.members
def test_member_not_project(self, temp_dir, isolated_data_dir, platform, global_application):
config = {
"project": {"name": "my_app", "version": "0.0.1"},
"tool": {"hatch": {"envs": {"default": {"workspace": {"members": [{"path": "foo"}]}}}}},
}
project = Project(temp_dir, config=config)
environment = MockEnvironment(
temp_dir,
project.metadata,
"default",
project.config.envs["default"],
{},
isolated_data_dir,
isolated_data_dir,
platform,
0,
global_application,
)
member_path = temp_dir / "foo"
member_path.mkdir()
with pytest.raises(
OSError,
match=re.escape(
f"Member derived from `foo` of field `tool.hatch.envs.default.workspace.members` is not a project "
f"(no `pyproject.toml` file): {member_path}"
),
):
_ = environment.workspace.members
def test_member_duplicate(self, temp_dir, isolated_data_dir, platform, global_application):
config = {
"project": {"name": "my_app", "version": "0.0.1"},
"tool": {"hatch": {"envs": {"default": {"workspace": {"members": [{"path": "foo"}, {"path": "f*"}]}}}}},
}
project = Project(temp_dir, config=config)
environment = MockEnvironment(
temp_dir,
project.metadata,
"default",
project.config.envs["default"],
{},
isolated_data_dir,
isolated_data_dir,
platform,
0,
global_application,
)
member_path = temp_dir / "foo"
member_path.mkdir()
(member_path / "pyproject.toml").touch()
with pytest.raises(
ValueError,
match=re.escape(
f"Member derived from `f*` of field "
f"`tool.hatch.envs.default.workspace.members` is a duplicate: {member_path}"
),
):
_ = environment.workspace.members
def test_correct(self, hatch, temp_dir, isolated_data_dir, platform, global_application):
member1_path = temp_dir / "foo"
member2_path = temp_dir / "bar"
member3_path = temp_dir / "baz"
for member_path in [member1_path, member2_path, member3_path]:
with temp_dir.as_cwd():
result = hatch("new", member_path.name)
assert result.exit_code == 0, result.output
config = {
"project": {"name": "my_app", "version": "0.0.1"},
"tool": {"hatch": {"envs": {"default": {"workspace": {"members": [{"path": "foo"}, {"path": "b*"}]}}}}},
}
project = Project(temp_dir, config=config)
environment = MockEnvironment(
temp_dir,
project.metadata,
"default",
project.config.envs["default"],
{},
isolated_data_dir,
isolated_data_dir,
platform,
0,
global_application,
)
members = environment.workspace.members
assert len(members) == 3
assert members[0].project.location == member1_path
assert members[1].project.location == member2_path
assert members[2].project.location == member3_path
| TestWorkspaceConfig |
python | PrefectHQ__prefect | tests/infrastructure/provisioners/test_ecs.py | {
"start": 13116,
"end": 16494
} | class ____:
async def test_requires_provisioning(self, authentication_resource):
needs_provisioning = await authentication_resource.requires_provisioning()
assert needs_provisioning
@pytest.mark.usefixtures("existing_iam_user", "existing_iam_policy")
async def test_needs_provisioning_existing_user_and_policy(
self, authentication_resource
):
needs_provisioning = await authentication_resource.requires_provisioning()
assert needs_provisioning
@pytest.mark.usefixtures(
"existing_iam_user",
"existing_iam_policy",
"existing_credentials_block",
"existing_execution_role",
)
async def test_needs_provisioning_existing_resources(self, authentication_resource):
needs_provisioning = await authentication_resource.requires_provisioning()
assert not needs_provisioning
async def test_get_task_count(self, authentication_resource):
count = await authentication_resource.get_task_count()
assert count == 5
async def test_get_planned_actions(self, authentication_resource):
actions = await authentication_resource.get_planned_actions()
assert (
"Creating an IAM user for managing ECS tasks: [blue]prefect-ecs-user[/]"
in actions
)
assert (
"Creating and attaching an IAM policy for managing ECS tasks:"
" [blue]prefect-ecs-policy[/]" in actions
)
assert "Storing generated AWS credentials in a block" in actions
@pytest.mark.usefixtures("existing_iam_user", "existing_iam_policy")
async def test_get_planned_actions_existing_user(self, authentication_resource):
actions = await authentication_resource.get_planned_actions()
assert actions == [
(
"Creating an IAM role assigned to ECS tasks:"
" [blue]PrefectEcsTaskExecutionRole[/]"
),
"Storing generated AWS credentials in a block",
]
@pytest.mark.usefixtures("register_block_types")
async def test_provision(self, authentication_resource, prefect_client):
advance_mock = MagicMock()
base_job_template = {
"variables": {
"type": "object",
"properties": {"aws_credentials": {}, "execution_role_arn": {}},
}
}
await authentication_resource.provision(
base_job_template=base_job_template, advance=advance_mock
)
block_document = await prefect_client.read_block_document_by_name(
"work-pool-aws-credentials", "aws-credentials"
)
assert isinstance(block_document.data["aws_access_key_id"], str)
assert isinstance(block_document.data["aws_secret_access_key"], str)
assert base_job_template["variables"]["properties"]["aws_credentials"] == {
"default": {"$ref": {"block_document_id": str(block_document.id)}},
}
assert advance_mock.call_count == 5
@pytest.fixture
def cluster_resource():
return ClusterResource(cluster_name="prefect-ecs-cluster")
@pytest.fixture
def existing_cluster():
ecs_client = boto3.client("ecs")
ecs_client.create_cluster(clusterName="prefect-ecs-cluster")
yield
ecs_client.delete_cluster(cluster="prefect-ecs-cluster")
| TestAuthenticationResource |
python | walkccc__LeetCode | solutions/737. Sentence Similarity II/737.py | {
"start": 0,
"end": 868
} | class ____:
def areSentencesSimilarTwo(
self,
words1: list[str],
words2: list[str],
pairs: list[list[str]],
) -> bool:
if len(words1) != len(words2):
return False
# graph[key] := all the similar words of key
graph = collections.defaultdict(set)
for a, b in pairs:
graph[a].add(b)
graph[b].add(a)
def dfs(word1: str, word2: str, seen: set) -> bool:
if word1 in graph[word2]:
return True
seen.add(word1)
for child in graph[word1]:
if child in seen:
continue
if dfs(child, word2, seen):
return True
return False
for word1, word2 in zip(words1, words2):
if word1 == word2:
continue
if word1 not in graph:
return False
if not dfs(word1, word2, set()):
return False
return True
| Solution |
python | tensorflow__tensorflow | tensorflow/python/saved_model/nested_structure_coder.py | {
"start": 10159,
"end": 13173
} | class ____:
"""Codec for built-in `TypeSpec` classes.
Built-in TypeSpec's that do not require a custom codec implementation
register themselves by instantiating this class and passing it to
register_codec.
Attributes:
type_spec_class: The built-in TypeSpec class that the
codec is instantiated for.
type_spec_proto_enum: The proto enum value for the built-in TypeSpec class.
"""
_BUILT_IN_TYPE_SPEC_PROTOS = []
_BUILT_IN_TYPE_SPECS = []
def __init__(self, type_spec_class, type_spec_proto_enum):
if not issubclass(type_spec_class, internal.TypeSpec):
raise ValueError(
f"The type '{type_spec_class}' does not subclass tf.TypeSpec.")
if type_spec_class in self._BUILT_IN_TYPE_SPECS:
raise ValueError(
f"The type '{type_spec_class}' already has an instantiated codec.")
if type_spec_proto_enum in self._BUILT_IN_TYPE_SPEC_PROTOS:
raise ValueError(
f"The proto value '{type_spec_proto_enum}' is already registered."
)
if (not isinstance(type_spec_proto_enum, int)
or type_spec_proto_enum <= 0
or type_spec_proto_enum > 10):
raise ValueError(f"The proto value '{type_spec_proto_enum}' is invalid.")
self.type_spec_class = type_spec_class
self.type_spec_proto_enum = type_spec_proto_enum
self._BUILT_IN_TYPE_SPECS.append(type_spec_class)
self._BUILT_IN_TYPE_SPEC_PROTOS.append(type_spec_proto_enum)
def can_encode(self, pyobj):
"""Returns true if `pyobj` can be encoded as the built-in TypeSpec."""
return isinstance(pyobj, self.type_spec_class)
def do_encode(self, type_spec_value, encode_fn):
"""Returns an encoded proto for the given built-in TypeSpec."""
type_state = type_spec_value._serialize() # pylint: disable=protected-access
num_flat_components = len(nest.flatten(
type_spec_value._component_specs, expand_composites=True)) # pylint: disable=protected-access
encoded_type_spec = struct_pb2.StructuredValue()
encoded_type_spec.type_spec_value.CopyFrom(
struct_pb2.TypeSpecProto(
type_spec_class=self.type_spec_proto_enum,
type_state=encode_fn(type_state),
type_spec_class_name=self.type_spec_class.__name__,
num_flat_components=num_flat_components))
return encoded_type_spec
def can_decode(self, value):
"""Returns true if `value` can be decoded into its built-in TypeSpec."""
if value.HasField("type_spec_value"):
type_spec_class_enum = value.type_spec_value.type_spec_class
return type_spec_class_enum == self.type_spec_proto_enum
return False
def do_decode(self, value, decode_fn):
"""Returns the built in `TypeSpec` encoded by the proto `value`."""
type_spec_proto = value.type_spec_value
# pylint: disable=protected-access
return self.type_spec_class._deserialize(
decode_fn(type_spec_proto.type_state)
)
# TODO(b/238903802): Use TraceType serialization and specific protos.
| BuiltInTypeSpecCodec |
python | encode__django-rest-framework | tests/browsable_api/test_form_rendering.py | {
"start": 390,
"end": 479
} | class ____(generics.CreateAPIView):
serializer_class = BasicSerializer
| StandardPostView |
python | doocs__leetcode | solution/2400-2499/2486.Append Characters to String to Make Subsequence/Solution.py | {
"start": 0,
"end": 196
} | class ____:
def appendCharacters(self, s: str, t: str) -> int:
n, j = len(t), 0
for c in s:
if j < n and c == t[j]:
j += 1
return n - j
| Solution |
python | jazzband__django-pipeline | pipeline/templatetags/pipeline.py | {
"start": 5677,
"end": 8025
} | class ____(PipelineMixin, template.Node):
def __init__(self, name):
self.name = name
def render(self, context):
super().render(context)
package_name = template.Variable(self.name).resolve(context)
try:
package = self.package_for(package_name, "js")
except PackageNotFound:
w = "Package %r is unknown. Check PIPELINE['JAVASCRIPT'] in your settings."
logger.warning(w, package_name)
# fail silently, do not return anything if an invalid group is specified
return ""
return self.render_compressed(package, package_name, "js")
def render_js(self, package, path):
template_name = package.template_name or "pipeline/js.html"
context = package.extra_context
context.update(
{
"type": guess_type(path, "text/javascript"),
"url": mark_safe(staticfiles_storage.url(path)),
}
)
return render_to_string(template_name, context)
def render_inline(self, package, js):
context = package.extra_context
context.update({"source": js})
return render_to_string("pipeline/inline_js.html", context)
def render_individual_js(self, package, paths, templates=None):
tags = [self.render_js(package, js) for js in paths]
if templates:
tags.append(self.render_inline(package, templates))
return "\n".join(tags)
def render_error_js(self, package_name, e):
return super().render_error("JavaScript", package_name, e)
@register.tag
def stylesheet(parser, token):
try:
tag_name, name = token.split_contents()
except ValueError:
e = (
"%r requires exactly one argument: the name "
"of a group in the PIPELINE.STYLESHEETS setting"
)
raise template.TemplateSyntaxError(e % token.split_contents()[0])
return StylesheetNode(name)
@register.tag
def javascript(parser, token):
try:
tag_name, name = token.split_contents()
except ValueError:
e = (
"%r requires exactly one argument: the name "
"of a group in the PIPELINE.JAVASVRIPT setting"
)
raise template.TemplateSyntaxError(e % token.split_contents()[0])
return JavascriptNode(name)
| JavascriptNode |
python | sympy__sympy | sympy/polys/domains/compositedomain.py | {
"start": 332,
"end": 1817
} | class ____(Domain[Er]):
"""Base class for composite domains, e.g. ZZ[x], ZZ(X). """
is_Composite = True
gens: Er
ngens: int
symbols: tuple[Expr, ...]
domain: Domain[Er]
def inject(self, *symbols):
"""Inject generators into this domain. """
if not (set(self.symbols) & set(symbols)):
return self.__class__(self.domain, self.symbols + symbols, self.order)
else:
raise GeneratorsError("common generators in %s and %s" % (self.symbols, symbols))
def drop(self, *symbols):
"""Drop generators from this domain. """
symset = set(symbols)
newsyms = tuple(s for s in self.symbols if s not in symset)
domain = self.domain.drop(*symbols)
if not newsyms:
return domain
else:
return self.__class__(domain, newsyms, self.order)
def set_domain(self, domain, /):
"""Set the ground domain of this domain. """
return self.__class__(domain, self.symbols, self.order)
@property
def is_Exact(self):
"""Returns ``True`` if this domain is exact. """
return self.domain.is_Exact
def get_exact(self):
"""Returns an exact version of this domain. """
return self.set_domain(self.domain.get_exact())
@property
def has_CharacteristicZero(self):
return self.domain.has_CharacteristicZero
def characteristic(self):
return self.domain.characteristic()
| CompositeDomain |
python | scikit-learn__scikit-learn | asv_benchmarks/benchmarks/linear_model.py | {
"start": 4483,
"end": 5553
} | class ____(Predictor, Estimator, Benchmark):
"""
Benchmarks for ElasticNet.
"""
param_names = ["representation", "precompute"]
params = (["dense", "sparse"], [True, False])
def setup_cache(self):
super().setup_cache()
def make_data(self, params):
representation, precompute = params
if representation == "dense":
data = _synth_regression_dataset(n_samples=1000000, n_features=100)
else:
data = _synth_regression_sparse_dataset(
n_samples=50000, n_features=5000, density=0.01
)
return data
def make_estimator(self, params):
representation, precompute = params
estimator = ElasticNet(precompute=precompute, alpha=0.001, random_state=0)
return estimator
def make_scorers(self):
make_gen_reg_scorers(self)
def skip(self, params):
representation, precompute = params
if representation == "sparse" and precompute is False:
return True
return False
| ElasticNetBenchmark |
python | allegroai__clearml | clearml/binding/frameworks/xgboost_bind.py | {
"start": 415,
"end": 7370
} | class ____(PatchBaseModelIO):
_current_task = None
__patched = None
__callback_cls = None
@staticmethod
def update_current_task(task: Any, **kwargs: Any) -> None:
PatchXGBoostModelIO._current_task = task
if not task:
return
PatchXGBoostModelIO._patch_model_io()
PostImportHookPatching.add_on_import("xgboost", PatchXGBoostModelIO._patch_model_io)
@staticmethod
def _patch_model_io() -> None:
if PatchXGBoostModelIO.__patched:
return
if "xgboost" not in sys.modules:
return
PatchXGBoostModelIO.__patched = True
# noinspection PyBroadException
try:
import xgboost as xgb # noqa
bst = xgb.Booster
bst.save_model = _patched_call(bst.save_model, PatchXGBoostModelIO._save)
bst.load_model = _patched_call(bst.load_model, PatchXGBoostModelIO._load)
# noinspection PyBroadException
try:
from xgboost.callback import TrainingCallback # noqa
PatchXGBoostModelIO.__callback_cls = PatchXGBoostModelIO._generate_training_callback_class()
xgb.train = _patched_call(xgb.train, PatchXGBoostModelIO._train)
xgb.training.train = _patched_call(xgb.training.train, PatchXGBoostModelIO._train)
xgb.sklearn.train = _patched_call(xgb.sklearn.train, PatchXGBoostModelIO._train)
except ImportError:
pass
except Exception:
pass
except ImportError:
pass
except Exception:
pass
@staticmethod
def _save(
original_fn: Callable,
obj: Any,
f: Union[str, IO],
*args: Any,
**kwargs: Any,
) -> Any:
ret = original_fn(obj, f, *args, **kwargs)
if not PatchXGBoostModelIO._current_task:
return ret
if isinstance(f, six.string_types):
filename = f
elif hasattr(f, "name"):
filename = f.name
# noinspection PyBroadException
try:
f.flush()
except Exception:
pass
else:
filename = None
# give the model a descriptive name based on the file name
# noinspection PyBroadException
try:
model_name = Path(filename).stem
except Exception:
model_name = None
WeightsFileHandler.create_output_model(
obj,
filename,
Framework.xgboost,
PatchXGBoostModelIO._current_task,
singlefile=True,
model_name=model_name,
)
return ret
@staticmethod
def _load(original_fn: Callable, f: Union[str, IO], *args: Any, **kwargs: Any) -> Any:
if not PatchXGBoostModelIO._current_task:
return original_fn(f, *args, **kwargs)
if isinstance(f, six.string_types):
filename = f
elif hasattr(f, "name"):
filename = f.name
elif len(args) == 1 and isinstance(args[0], six.string_types):
filename = args[0]
else:
filename = None
# register input model
empty = _Empty()
# Hack: disabled
if False and running_remotely():
filename = WeightsFileHandler.restore_weights_file(
empty, filename, Framework.xgboost, PatchXGBoostModelIO._current_task
)
model = original_fn(filename or f, *args, **kwargs)
else:
# try to load model before registering, in case we fail
model = original_fn(f, *args, **kwargs)
WeightsFileHandler.restore_weights_file(
empty, filename, Framework.xgboost, PatchXGBoostModelIO._current_task
)
if empty.trains_in_model:
# noinspection PyBroadException
try:
model.trains_in_model = empty.trains_in_model
except Exception:
pass
return model
@staticmethod
def _train(original_fn: Callable, *args: Any, **kwargs: Any) -> Any:
if not PatchXGBoostModelIO._current_task:
return original_fn(*args, **kwargs)
if PatchXGBoostModelIO.__callback_cls:
callbacks = kwargs.get("callbacks") or []
kwargs["callbacks"] = callbacks + [
PatchXGBoostModelIO.__callback_cls(task=PatchXGBoostModelIO._current_task)
]
return original_fn(*args, **kwargs)
@classmethod
def _generate_training_callback_class(cls) -> Optional[Any]:
try:
from xgboost.callback import TrainingCallback # noqa
except ImportError:
return None
class ClearMLCallback(TrainingCallback):
"""
Log evaluation result at each iteration.
"""
_scalar_index_counter = 0
def __init__(self, task: "Task", period: int = 1) -> None:
self.period = period
assert period > 0
self._last_eval = None
self._last_eval_epoch = None
self._logger = task.get_logger()
self._scalar_index = ClearMLCallback._scalar_index_counter
ClearMLCallback._scalar_index_counter += 1
super(ClearMLCallback, self).__init__()
def after_iteration(
self,
model: Any,
epoch: int,
evals_log: Dict[str, Dict[str, List[float]]],
) -> bool:
"""Run after each iteration. Return True when training should stop."""
if not evals_log:
return False
if not (self.period == 1 or (epoch % self.period) == 0):
self._last_eval = evals_log
self._last_eval_epoch = epoch
return False
self._report_eval_log(epoch, evals_log)
self._last_eval = None
self._last_eval_epoch = None
return False
def after_training(self, model: Any) -> Any:
"""Run after training is finished."""
if self._last_eval:
self._report_eval_log(self._last_eval_epoch, self._last_eval)
return model
def _report_eval_log(self, epoch: int, eval_log: Dict[str, Dict[str, List[float]]]) -> None:
for data, metric in eval_log.items():
if self._scalar_index != 0:
data = "{} - {}".format(data, self._scalar_index)
for metric_name, log in metric.items():
value = log[-1]
self._logger.report_scalar(title=data, series=metric_name, value=value, iteration=epoch)
return ClearMLCallback
| PatchXGBoostModelIO |
python | mitmproxy__pdoc | test/testdata/misc.py | {
"start": 5626,
"end": 5741
} | class ____(metaclass=CustomCallMeta):
"""A class where the constructor is defined by its metaclass."""
| CustomCall |
python | getsentry__sentry | src/sentry/plugins/bases/issue2.py | {
"start": 2167,
"end": 2251
} | class ____(TypedDict):
issue_id: int
url: str
label: str
| _PluginIssueIssue |
python | pytorch__pytorch | benchmarks/fastrnns/runner.py | {
"start": 808,
"end": 3107
} | class ____:
def __enter__(self):
import os
enabled = os.environ.get("PYTORCH_JIT", 1)
assert not enabled
def __exit__(self, *args, **kwargs):
pass
RNNRunner = namedtuple(
"RNNRunner",
[
"name",
"creator",
"context",
],
)
def get_nn_runners(*names):
return [nn_runners[name] for name in names]
nn_runners = {
"cudnn": RNNRunner("cudnn", pytorch_lstm_creator, DummyContext),
"cudnn_dropout": RNNRunner(
"cudnn_dropout", partial(pytorch_lstm_creator, dropout=0.4), DummyContext
),
"cudnn_layernorm": RNNRunner(
"cudnn_layernorm", layernorm_pytorch_lstm_creator, DummyContext
),
"vl_cudnn": RNNRunner("vl_cudnn", varlen_pytorch_lstm_creator, DummyContext),
"vl_jit": RNNRunner(
"vl_jit", partial(varlen_lstm_creator, script=True), DummyContext
),
"vl_py": RNNRunner("vl_py", varlen_lstm_creator, DummyContext),
"aten": RNNRunner("aten", pytorch_lstm_creator, DisableCuDNN),
"jit": RNNRunner("jit", lstm_creator, DummyContext),
"jit_premul": RNNRunner("jit_premul", lstm_premul_creator, DummyContext),
"jit_premul_bias": RNNRunner(
"jit_premul_bias", lstm_premul_bias_creator, DummyContext
),
"jit_simple": RNNRunner("jit_simple", lstm_simple_creator, DummyContext),
"jit_multilayer": RNNRunner(
"jit_multilayer", lstm_multilayer_creator, DummyContext
),
"jit_layernorm": RNNRunner("jit_layernorm", lnlstm_creator, DummyContext),
"jit_layernorm_decom": RNNRunner(
"jit_layernorm_decom",
partial(lnlstm_creator, decompose_layernorm=True),
DummyContext,
),
"jit_dropout": RNNRunner("jit_dropout", dropoutlstm_creator, DummyContext),
"py": RNNRunner("py", partial(lstm_creator, script=False), DummyContext),
"resnet18": RNNRunner(
"resnet18", imagenet_cnn_creator(cnn.resnet18, jit=False), DummyContext
),
"resnet18_jit": RNNRunner(
"resnet18_jit", imagenet_cnn_creator(cnn.resnet18), DummyContext
),
"resnet50": RNNRunner(
"resnet50", imagenet_cnn_creator(cnn.resnet50, jit=False), DummyContext
),
"resnet50_jit": RNNRunner(
"resnet50_jit", imagenet_cnn_creator(cnn.resnet50), DummyContext
),
}
| AssertNoJIT |
python | django__django | tests/test_runner/tests.py | {
"start": 25078,
"end": 25419
} | class ____(AdminScriptTestCase):
def setUp(self):
super().setUp()
self.write_settings("settings.py")
def test_ticket_17477(self):
"""'manage.py help test' works after r16352."""
args = ["help", "test"]
out, err = self.run_manage(args)
self.assertNoOutput(err)
| Ticket17477RegressionTests |
python | anthropics__anthropic-sdk-python | src/anthropic/types/beta/beta_skill_params.py | {
"start": 215,
"end": 522
} | class ____(TypedDict, total=False):
skill_id: Required[str]
"""Skill ID"""
type: Required[Literal["anthropic", "custom"]]
"""Type of skill - either 'anthropic' (built-in) or 'custom' (user-defined)"""
version: str
"""Skill version or 'latest' for most recent version"""
| BetaSkillParams |
python | sanic-org__sanic | examples/simple_async_view.py | {
"start": 511,
"end": 964
} | class ____(HTTPMethodView):
async def get(self, request):
return text("I am async get method")
async def post(self, request):
return text("I am async post method")
async def put(self, request):
return text("I am async put method")
app.add_route(SimpleView.as_view(), "/")
app.add_route(SimpleAsyncView.as_view(), "/async")
if __name__ == "__main__":
app.run(host="0.0.0.0", port=8000, debug=True)
| SimpleAsyncView |
python | facebook__pyre-check | client/commands/tests/infer_test.py | {
"start": 49690,
"end": 54675
} | class ____(testslide.TestCase):
def _normalize(self, block_string: str) -> str:
return (
textwrap.dedent(block_string)
.strip()
.replace("@_GENERATED", "@" + "generated")
)
def _assert_in_place(
self,
stub_file_contents: str,
code_file_contents: str,
expected_annotated_code_file_contents: Optional[str],
) -> None:
options = infer.StubGenerationOptions(
annotate_attributes=True,
use_future_annotations=False,
dequalify=False,
quote_annotations=False,
simple_annotations=False,
)
annotated_code = infer.AnnotateModuleInPlace._annotated_code(
code_path="code_path.py",
stub=self._normalize(stub_file_contents),
code=self._normalize(code_file_contents),
options=options,
)
expected_code = (
self._normalize(expected_annotated_code_file_contents)
if expected_annotated_code_file_contents is not None
else None
)
self.assertEqual(expected_code, annotated_code)
def test_apply_functions(self) -> None:
self._assert_in_place(
"""
def foo(x: int) -> None: ...
""",
"""
def foo(x):
pass
""",
"""
def foo(x: int) -> None:
pass
""",
)
self._assert_in_place(
"""
def incomplete_stubs(x: int, y) -> None: ...
""",
"""
def incomplete_stubs(x, y: int):
pass
""",
"""
def incomplete_stubs(x: int, y: int) -> None:
pass
""",
)
self._assert_in_place(
"""
def incomplete_stubs_with_stars(x: int, *args, **kwargs) -> None: ...
""",
"""
def incomplete_stubs_with_stars(x, *args: P.args, **kwargs: P.kwargs):
pass
""",
"""
def incomplete_stubs_with_stars(x: int, *args: P.args, **kwargs: P.kwargs) -> None:
pass
""",
)
def test_apply_globals(self) -> None:
self._assert_in_place(
"""
a: int = ...
""",
"""
a = 1 + 1
""",
"""
a: int = 1 + 1
""",
)
self._assert_in_place(
"""
a: int = ...
b: int = ...
""",
"""
a = b = 1 + 1
""",
"""
a: int
b: int
a = b = 1 + 1
""",
)
self._assert_in_place(
"""
_: str = ...
a: str = ...
""",
"""
_, a = "string".split("")
""",
"""
a: str
_, a = "string".split("")
""",
)
def test_forward_references(self) -> None:
self._assert_in_place(
"""
class Foo:
def method(self) -> Foo: ...
""",
"""
class Foo:
def method(self):
return self
""",
"""
class Foo:
def method(self) -> "Foo":
return self
""",
)
self._assert_in_place(
"""
def foo() -> Foo: ...
""",
"""
def foo():
return Foo()
class Foo:
pass
""",
"""
def foo() -> "Foo":
return Foo()
class Foo:
pass
""",
)
def test_generated(self) -> None:
self._assert_in_place(
"""
def foo() -> None: ...
""",
"""
# not generated
def foo():
return
""",
"""
# not generated
def foo() -> None:
return
""",
)
self._assert_in_place(
"""
def foo() -> None: ...
""",
"""
# @_GENERATED
def foo():
return
""",
None,
)
def test_class_attributes(self) -> None:
self._assert_in_place(
stub_file_contents="""
class Foo:
some_attribute: int = ...
""",
code_file_contents="""
class Foo:
some_attribute = ...
""",
expected_annotated_code_file_contents="""
class Foo:
some_attribute: int = ...
""",
)
| StubApplicationTest |
python | walkccc__LeetCode | solutions/1110. Delete Nodes And Return Forest/1110.py | {
"start": 0,
"end": 576
} | class ____:
def delNodes(self, root: TreeNode, to_delete: list[int]) -> list[TreeNode]:
ans = []
toDeleteSet = set(to_delete)
def dfs(root: TreeNode, isRoot: bool) -> TreeNode:
if not root:
return None
deleted = root.val in toDeleteSet
if isRoot and not deleted:
ans.append(root)
# If root is deleted, both children have the possibility to be a new root
root.left = dfs(root.left, deleted)
root.right = dfs(root.right, deleted)
return None if deleted else root
dfs(root, True)
return ans
| Solution |
python | apache__airflow | providers/google/tests/unit/google/cloud/utils/test_credentials_provider.py | {
"start": 6047,
"end": 6962
} | class ____:
@mock.patch.dict(
os.environ,
{AIRFLOW_CONN_GOOGLE_CLOUD_DEFAULT: ENV_VALUE, CREDENTIALS: ENV_VALUE},
)
@mock.patch("airflow.providers.google.cloud.utils.credentials_provider.build_gcp_conn")
def test_provide_gcp_conn_and_credentials(self, mock_builder):
mock_builder.return_value = TEMP_VARIABLE
path = "path/to/file.json"
scopes = ["scopes"]
project_id = "project_id"
with provide_gcp_conn_and_credentials(path, scopes, project_id):
mock_builder.assert_called_once_with(key_file_path=path, scopes=scopes, project_id=project_id)
assert os.environ[AIRFLOW_CONN_GOOGLE_CLOUD_DEFAULT] == TEMP_VARIABLE
assert os.environ[CREDENTIALS] == path
assert os.environ[AIRFLOW_CONN_GOOGLE_CLOUD_DEFAULT] == ENV_VALUE
assert os.environ[CREDENTIALS] == ENV_VALUE
| TestProvideGcpConnAndCredentials |
python | huggingface__transformers | src/transformers/models/maskformer/modeling_maskformer.py | {
"start": 55635,
"end": 56972
} | class ____(nn.Module):
def __init__(self, in_features: int, lateral_widths: list[int], feature_size: int = 256):
"""
Feature Pyramid Network, given an input tensor and a set of feature map of different feature/spatial size, it
creates a list of feature maps with the same feature size.
Args:
in_features (`int`):
The number of input features (channels).
lateral_widths (`list[int]`):
A list with the features (channels) size of each lateral connection.
feature_size (int, *optional*, defaults to 256):
The features (channels) of the resulting feature maps.
"""
super().__init__()
self.stem = MaskFormerFPNConvLayer(in_features, feature_size)
self.layers = nn.Sequential(
*[MaskFormerFPNLayer(feature_size, lateral_width) for lateral_width in lateral_widths[::-1]]
)
def forward(self, features: list[Tensor]) -> list[Tensor]:
fpn_features = []
last_feature = features[-1]
other_features = features[:-1]
output = self.stem(last_feature)
for layer, left in zip(self.layers, other_features[::-1]):
output = layer(output, left)
fpn_features.append(output)
return fpn_features
| MaskFormerFPNModel |
python | pytorch__pytorch | test/distributed/elastic/rendezvous/c10d_rendezvous_backend_test.py | {
"start": 871,
"end": 1431
} | class ____(TestCase, RendezvousBackendTestMixin):
_store: ClassVar[TCPStore]
@classmethod
def setUpClass(cls) -> None:
cls._store = TCPStore("localhost", 0, is_master=True) # type: ignore[call-arg]
def setUp(self) -> None:
# Make sure we have a clean slate.
self._store.delete_key("torch.rendezvous.dummy_run_id")
self._backend = C10dRendezvousBackend(self._store, "dummy_run_id")
def _corrupt_state(self) -> None:
self._store.set("torch.rendezvous.dummy_run_id", "non_base64")
| TCPStoreBackendTest |
python | numba__numba | numba/cuda/stubs.py | {
"start": 3723,
"end": 4105
} | class ____(Stub):
'''
Constant memory namespace
'''
@stub_function
def array_like(ndarray):
'''
Create a const array from *ndarry*. The resulting const array will have
the same shape, type, and values as *ndarray*.
'''
# -------------------------------------------------------------------------------
# warp level operations
| const |
python | numpy__numpy | numpy/lib/_datasource.py | {
"start": 5780,
"end": 17036
} | class ____:
"""
DataSource(destpath='.')
A generic data source file (file, http, ftp, ...).
DataSources can be local files or remote files/URLs. The files may
also be compressed or uncompressed. DataSource hides some of the
low-level details of downloading the file, allowing you to simply pass
in a valid file path (or URL) and obtain a file object.
Parameters
----------
destpath : str or None, optional
Path to the directory where the source file gets downloaded to for
use. If `destpath` is None, a temporary directory will be created.
The default path is the current directory.
Notes
-----
URLs require a scheme string (``http://``) to be used, without it they
will fail::
>>> repos = np.lib.npyio.DataSource()
>>> repos.exists('www.google.com/index.html')
False
>>> repos.exists('http://www.google.com/index.html')
True
Temporary directories are deleted when the DataSource is deleted.
Examples
--------
::
>>> ds = np.lib.npyio.DataSource('/home/guido')
>>> urlname = 'http://www.google.com/'
>>> gfile = ds.open('http://www.google.com/')
>>> ds.abspath(urlname)
'/home/guido/www.google.com/index.html'
>>> ds = np.lib.npyio.DataSource(None) # use with temporary file
>>> ds.open('/home/guido/foobar.txt')
<open file '/home/guido.foobar.txt', mode 'r' at 0x91d4430>
>>> ds.abspath('/home/guido/foobar.txt')
'/tmp/.../home/guido/foobar.txt'
"""
def __init__(self, destpath=os.curdir):
"""Create a DataSource with a local path at destpath."""
if destpath:
self._destpath = os.path.abspath(destpath)
self._istmpdest = False
else:
import tempfile # deferring import to improve startup time
self._destpath = tempfile.mkdtemp()
self._istmpdest = True
def __del__(self):
# Remove temp directories
if hasattr(self, '_istmpdest') and self._istmpdest:
import shutil
shutil.rmtree(self._destpath)
def _iszip(self, filename):
"""Test if the filename is a zip file by looking at the file extension.
"""
fname, ext = os.path.splitext(filename)
return ext in _file_openers.keys()
def _iswritemode(self, mode):
"""Test if the given mode will open a file for writing."""
# Currently only used to test the bz2 files.
_writemodes = ("w", "+")
return any(c in _writemodes for c in mode)
def _splitzipext(self, filename):
"""Split zip extension from filename and return filename.
Returns
-------
base, zip_ext : {tuple}
"""
if self._iszip(filename):
return os.path.splitext(filename)
else:
return filename, None
def _possible_names(self, filename):
"""Return a tuple containing compressed filename variations."""
names = [filename]
if not self._iszip(filename):
for zipext in _file_openers.keys():
if zipext:
names.append(filename + zipext)
return names
def _isurl(self, path):
"""Test if path is a net location. Tests the scheme and netloc."""
# We do this here to reduce the 'import numpy' initial import time.
from urllib.parse import urlparse
# BUG : URLs require a scheme string ('http://') to be used.
# www.google.com will fail.
# Should we prepend the scheme for those that don't have it and
# test that also? Similar to the way we append .gz and test for
# for compressed versions of files.
scheme, netloc, upath, uparams, uquery, ufrag = urlparse(path)
return bool(scheme and netloc)
def _cache(self, path):
"""Cache the file specified by path.
Creates a copy of the file in the datasource cache.
"""
# We import these here because importing them is slow and
# a significant fraction of numpy's total import time.
import shutil
from urllib.request import urlopen
upath = self.abspath(path)
# ensure directory exists
if not os.path.exists(os.path.dirname(upath)):
os.makedirs(os.path.dirname(upath))
# TODO: Doesn't handle compressed files!
if self._isurl(path):
with urlopen(path) as openedurl:
with _open(upath, 'wb') as f:
shutil.copyfileobj(openedurl, f)
else:
shutil.copyfile(path, upath)
return upath
def _findfile(self, path):
"""Searches for ``path`` and returns full path if found.
If path is an URL, _findfile will cache a local copy and return the
path to the cached file. If path is a local file, _findfile will
return a path to that local file.
The search will include possible compressed versions of the file
and return the first occurrence found.
"""
# Build list of possible local file paths
if not self._isurl(path):
# Valid local paths
filelist = self._possible_names(path)
# Paths in self._destpath
filelist += self._possible_names(self.abspath(path))
else:
# Cached URLs in self._destpath
filelist = self._possible_names(self.abspath(path))
# Remote URLs
filelist = filelist + self._possible_names(path)
for name in filelist:
if self.exists(name):
if self._isurl(name):
name = self._cache(name)
return name
return None
def abspath(self, path):
"""
Return absolute path of file in the DataSource directory.
If `path` is an URL, then `abspath` will return either the location
the file exists locally or the location it would exist when opened
using the `open` method.
Parameters
----------
path : str or pathlib.Path
Can be a local file or a remote URL.
Returns
-------
out : str
Complete path, including the `DataSource` destination directory.
Notes
-----
The functionality is based on `os.path.abspath`.
"""
# We do this here to reduce the 'import numpy' initial import time.
from urllib.parse import urlparse
# TODO: This should be more robust. Handles case where path includes
# the destpath, but not other sub-paths. Failing case:
# path = /home/guido/datafile.txt
# destpath = /home/alex/
# upath = self.abspath(path)
# upath == '/home/alex/home/guido/datafile.txt'
# handle case where path includes self._destpath
splitpath = path.split(self._destpath, 2)
if len(splitpath) > 1:
path = splitpath[1]
scheme, netloc, upath, uparams, uquery, ufrag = urlparse(path)
netloc = self._sanitize_relative_path(netloc)
upath = self._sanitize_relative_path(upath)
return os.path.join(self._destpath, netloc, upath)
def _sanitize_relative_path(self, path):
"""Return a sanitised relative path for which
os.path.abspath(os.path.join(base, path)).startswith(base)
"""
last = None
path = os.path.normpath(path)
while path != last:
last = path
# Note: os.path.join treats '/' as os.sep on Windows
path = path.lstrip(os.sep).lstrip('/')
path = path.lstrip(os.pardir).removeprefix('..')
drive, path = os.path.splitdrive(path) # for Windows
return path
def exists(self, path):
"""
Test if path exists.
Test if `path` exists as (and in this order):
- a local file.
- a remote URL that has been downloaded and stored locally in the
`DataSource` directory.
- a remote URL that has not been downloaded, but is valid and
accessible.
Parameters
----------
path : str or pathlib.Path
Can be a local file or a remote URL.
Returns
-------
out : bool
True if `path` exists.
Notes
-----
When `path` is an URL, `exists` will return True if it's either
stored locally in the `DataSource` directory, or is a valid remote
URL. `DataSource` does not discriminate between the two, the file
is accessible if it exists in either location.
"""
# First test for local path
if os.path.exists(path):
return True
# We import this here because importing urllib is slow and
# a significant fraction of numpy's total import time.
from urllib.error import URLError
from urllib.request import urlopen
# Test cached url
upath = self.abspath(path)
if os.path.exists(upath):
return True
# Test remote url
if self._isurl(path):
try:
netfile = urlopen(path)
netfile.close()
del netfile
return True
except URLError:
return False
return False
def open(self, path, mode='r', encoding=None, newline=None):
"""
Open and return file-like object.
If `path` is an URL, it will be downloaded, stored in the
`DataSource` directory and opened from there.
Parameters
----------
path : str or pathlib.Path
Local file path or URL to open.
mode : {'r', 'w', 'a'}, optional
Mode to open `path`. Mode 'r' for reading, 'w' for writing,
'a' to append. Available modes depend on the type of object
specified by `path`. Default is 'r'.
encoding : {None, str}, optional
Open text file with given encoding. The default encoding will be
what `open` uses.
newline : {None, str}, optional
Newline to use when reading text file.
Returns
-------
out : file object
File object.
"""
# TODO: There is no support for opening a file for writing which
# doesn't exist yet (creating a file). Should there be?
# TODO: Add a ``subdir`` parameter for specifying the subdirectory
# used to store URLs in self._destpath.
if self._isurl(path) and self._iswritemode(mode):
raise ValueError("URLs are not writeable")
# NOTE: _findfile will fail on a new file opened for writing.
found = self._findfile(path)
if found:
_fname, ext = self._splitzipext(found)
if ext == 'bz2':
mode.replace("+", "")
return _file_openers[ext](found, mode=mode,
encoding=encoding, newline=newline)
else:
raise FileNotFoundError(f"{path} not found.")
| DataSource |
python | scikit-learn__scikit-learn | sklearn/cluster/_bicluster.py | {
"start": 6782,
"end": 12431
} | class ____(BaseSpectral):
"""Spectral Co-Clustering algorithm (Dhillon, 2001) [1]_.
Clusters rows and columns of an array `X` to solve the relaxed
normalized cut of the bipartite graph created from `X` as follows:
the edge between row vertex `i` and column vertex `j` has weight
`X[i, j]`.
The resulting bicluster structure is block-diagonal, since each
row and each column belongs to exactly one bicluster.
Supports sparse matrices, as long as they are nonnegative.
Read more in the :ref:`User Guide <spectral_coclustering>`.
Parameters
----------
n_clusters : int, default=3
The number of biclusters to find.
svd_method : {'randomized', 'arpack'}, default='randomized'
Selects the algorithm for finding singular vectors. May be
'randomized' or 'arpack'. If 'randomized', use
:func:`sklearn.utils.extmath.randomized_svd`, which may be faster
for large matrices. If 'arpack', use
:func:`scipy.sparse.linalg.svds`, which is more accurate, but
possibly slower in some cases.
n_svd_vecs : int, default=None
Number of vectors to use in calculating the SVD. Corresponds
to `ncv` when `svd_method=arpack` and `n_oversamples` when
`svd_method` is 'randomized`.
mini_batch : bool, default=False
Whether to use mini-batch k-means, which is faster but may get
different results.
init : {'k-means++', 'random'}, or ndarray of shape \
(n_clusters, n_features), default='k-means++'
Method for initialization of k-means algorithm; defaults to
'k-means++'.
n_init : int, default=10
Number of random initializations that are tried with the
k-means algorithm.
If mini-batch k-means is used, the best initialization is
chosen and the algorithm runs once. Otherwise, the algorithm
is run for each initialization and the best solution chosen.
random_state : int, RandomState instance, default=None
Used for randomizing the singular value decomposition and the k-means
initialization. Use an int to make the randomness deterministic.
See :term:`Glossary <random_state>`.
Attributes
----------
rows_ : array-like of shape (n_row_clusters, n_rows)
Results of the clustering. `rows[i, r]` is True if
cluster `i` contains row `r`. Available only after calling ``fit``.
columns_ : array-like of shape (n_column_clusters, n_columns)
Results of the clustering, like `rows`.
row_labels_ : array-like of shape (n_rows,)
The bicluster label of each row.
column_labels_ : array-like of shape (n_cols,)
The bicluster label of each column.
biclusters_ : tuple of two ndarrays
The tuple contains the `rows_` and `columns_` arrays.
n_features_in_ : int
Number of features seen during :term:`fit`.
.. versionadded:: 0.24
feature_names_in_ : ndarray of shape (`n_features_in_`,)
Names of features seen during :term:`fit`. Defined only when `X`
has feature names that are all strings.
.. versionadded:: 1.0
See Also
--------
SpectralBiclustering : Partitions rows and columns under the assumption
that the data has an underlying checkerboard structure.
References
----------
.. [1] :doi:`Dhillon, Inderjit S, 2001. Co-clustering documents and words using
bipartite spectral graph partitioning.
<10.1145/502512.502550>`
Examples
--------
>>> from sklearn.cluster import SpectralCoclustering
>>> import numpy as np
>>> X = np.array([[1, 1], [2, 1], [1, 0],
... [4, 7], [3, 5], [3, 6]])
>>> clustering = SpectralCoclustering(n_clusters=2, random_state=0).fit(X)
>>> clustering.row_labels_ #doctest: +SKIP
array([0, 1, 1, 0, 0, 0], dtype=int32)
>>> clustering.column_labels_ #doctest: +SKIP
array([0, 0], dtype=int32)
>>> clustering
SpectralCoclustering(n_clusters=2, random_state=0)
For a more detailed example, see the following:
:ref:`sphx_glr_auto_examples_bicluster_plot_spectral_coclustering.py`.
"""
_parameter_constraints: dict = {
**BaseSpectral._parameter_constraints,
"n_clusters": [Interval(Integral, 1, None, closed="left")],
}
def __init__(
self,
n_clusters=3,
*,
svd_method="randomized",
n_svd_vecs=None,
mini_batch=False,
init="k-means++",
n_init=10,
random_state=None,
):
super().__init__(
n_clusters, svd_method, n_svd_vecs, mini_batch, init, n_init, random_state
)
def _check_parameters(self, n_samples):
if self.n_clusters > n_samples:
raise ValueError(
f"n_clusters should be <= n_samples={n_samples}. Got"
f" {self.n_clusters} instead."
)
def _fit(self, X):
normalized_data, row_diag, col_diag = _scale_normalize(X)
n_sv = 1 + int(np.ceil(np.log2(self.n_clusters)))
u, v = self._svd(normalized_data, n_sv, n_discard=1)
z = np.vstack((row_diag[:, np.newaxis] * u, col_diag[:, np.newaxis] * v))
_, labels = self._k_means(z, self.n_clusters)
n_rows = X.shape[0]
self.row_labels_ = labels[:n_rows]
self.column_labels_ = labels[n_rows:]
self.rows_ = np.vstack([self.row_labels_ == c for c in range(self.n_clusters)])
self.columns_ = np.vstack(
[self.column_labels_ == c for c in range(self.n_clusters)]
)
| SpectralCoclustering |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/metaclass9.py | {
"start": 1149,
"end": 1287
} | class ____(metaclass=Meta2, param2="", param1=1, param20=""): ...
# This should generate an error because param1 is the wrong type.
| Class2_2 |
python | HypothesisWorks__hypothesis | hypothesis-python/tests/cover/test_lookup_py39.py | {
"start": 3190,
"end": 4046
} | class ____:
a: dict[tuple[int, int], str]
TestDataClass = typing.Union[TypingTuple, BuiltinTuple]
@pytest.mark.parametrize("data_class", [TypingTuple, BuiltinTuple])
@given(data=st.data())
def test_from_type_with_tuple_works(data, data_class: TestDataClass):
value: TestDataClass = data.draw(st.from_type(data_class))
assert len(value.a) >= 0
def _shorter_lists(list_type):
return st.lists(st.from_type(*typing.get_args(list_type)), max_size=2)
def test_can_register_builtin_list():
# Regression test for https://github.com/HypothesisWorks/hypothesis/issues/3635
with temp_registered(list, _shorter_lists):
assert_all_examples(
st.from_type(list[int]),
lambda ls: len(ls) <= 2 and {type(x) for x in ls}.issubset({int}),
)
T = typing.TypeVar("T")
@typing.runtime_checkable
| BuiltinTuple |
python | tensorflow__tensorflow | tensorflow/python/keras/optimizer_v2/optimizer_v2.py | {
"start": 4028,
"end": 57210
} | class ____(trackable.Trackable):
"""Base class for Keras optimizers.
You should not use this class directly, but instead instantiate one of its
subclasses such as `tf.keras.optimizers.SGD`, `tf.keras.optimizers.Adam`, etc.
### Usage
```python
# Create an optimizer with the desired parameters.
opt = tf.keras.optimizers.SGD(learning_rate=0.1)
# `loss` is a callable that takes no argument and returns the value
# to minimize.
loss = lambda: 3 * var1 * var1 + 2 * var2 * var2
# In graph mode, returns op that minimizes the loss by updating the listed
# variables.
opt_op = opt.minimize(loss, var_list=[var1, var2])
opt_op.run()
# In eager mode, simply call minimize to update the list of variables.
opt.minimize(loss, var_list=[var1, var2])
```
### Usage in custom training loops
In Keras models, sometimes variables are created when the model is first
called, instead of construction time. Examples include 1) sequential models
without input shape pre-defined, or 2) subclassed models. Pass var_list as
callable in these cases.
Example:
```python
opt = tf.keras.optimizers.SGD(learning_rate=0.1)
model = tf.keras.Sequential()
model.add(tf.keras.layers.Dense(num_hidden, activation='relu'))
model.add(tf.keras.layers.Dense(num_classes, activation='sigmoid'))
loss_fn = lambda: tf.keras.losses.mse(model(input), output)
var_list_fn = lambda: model.trainable_weights
for input, output in data:
opt.minimize(loss_fn, var_list_fn)
```
### Processing gradients before applying them
Calling `minimize()` takes care of both computing the gradients and
applying them to the variables. If you want to process the gradients
before applying them you can instead use the optimizer in three steps:
1. Compute the gradients with `tf.GradientTape`.
2. Process the gradients as you wish.
3. Apply the processed gradients with `apply_gradients()`.
Example:
```python
# Create an optimizer.
opt = tf.keras.optimizers.SGD(learning_rate=0.1)
# Compute the gradients for a list of variables.
with tf.GradientTape() as tape:
loss = <call_loss_function>
vars = <list_of_variables>
grads = tape.gradient(loss, vars)
# Process the gradients, for example cap them, etc.
# capped_grads = [MyCapper(g) for g in grads]
processed_grads = [process_gradient(g) for g in grads]
# Ask the optimizer to apply the processed gradients.
opt.apply_gradients(zip(processed_grads, var_list))
```
### Use with `tf.distribute.Strategy`
This optimizer class is `tf.distribute.Strategy` aware, which means it
automatically sums gradients across all replicas. To average gradients,
you divide your loss by the global batch size, which is done
automatically if you use `tf.keras` built-in training or evaluation loops.
See the `reduction` argument of your loss which should be set to
`tf.keras.losses.Reduction.SUM_OVER_BATCH_SIZE` for averaging or
`tf.keras.losses.Reduction.SUM` for not.
To aggregate gradients yourself, call `apply_gradients` with
`experimental_aggregate_gradients` set to False. This is useful if you need to
process aggregated gradients.
If you are not using these and you want to average gradients, you should use
`tf.math.reduce_sum` to add up your per-example losses and then divide by the
global batch size. Note that when using `tf.distribute.Strategy`, the first
component of a tensor's shape is the *replica-local* batch size, which is off
by a factor equal to the number of replicas being used to compute a single
step. As a result, using `tf.math.reduce_mean` will give the wrong answer,
resulting in gradients that can be many times too big.
### Variable Constraints
All Keras optimizers respect variable constraints. If constraint function is
passed to any variable, the constraint will be applied to the variable after
the gradient has been applied to the variable.
Important: If gradient is sparse tensor, variable constraint is not supported.
### Thread Compatibility
The entire optimizer is currently thread compatible, not thread-safe. The user
needs to perform synchronization if necessary.
### Slots
Many optimizer subclasses, such as `Adam` and `Adagrad` allocate and manage
additional variables associated with the variables to train. These are called
<i>Slots</i>. Slots have names and you can ask the optimizer for the names of
the slots that it uses. Once you have a slot name you can ask the optimizer
for the variable it created to hold the slot value.
This can be useful if you want to log debug a training algorithm, report stats
about the slots, etc.
### Hyperparameters
These are arguments passed to the optimizer subclass constructor
(the `__init__` method), and then passed to `self._set_hyper()`.
They can be either regular Python values (like 1.0), tensors, or
callables. If they are callable, the callable will be called during
`apply_gradients()` to get the value for the hyper parameter.
Hyperparameters can be overwritten through user code:
Example:
```python
# Create an optimizer with the desired parameters.
opt = tf.keras.optimizers.SGD(learning_rate=0.1)
# `loss` is a callable that takes no argument and returns the value
# to minimize.
loss = lambda: 3 * var1 + 2 * var2
# In eager mode, simply call minimize to update the list of variables.
opt.minimize(loss, var_list=[var1, var2])
# update learning rate
opt.learning_rate = 0.05
opt.minimize(loss, var_list=[var1, var2])
```
### Callable learning rate
Optimizer accepts a callable learning rate in two ways. The first way is
through built-in or customized
`tf.keras.optimizers.schedules.LearningRateSchedule`. The schedule will be
called on each iteration with `schedule(iteration)`, a `tf.Variable`
owned by the optimizer.
Example:
>>> var = tf.Variable(np.random.random(size=(1,)))
>>> learning_rate = tf.keras.optimizers.schedules.ExponentialDecay(
... initial_learning_rate=.01, decay_steps=20, decay_rate=.1)
>>> opt = tf.keras.optimizers.SGD(learning_rate=learning_rate)
>>> loss = lambda: 3 * var
>>> opt.minimize(loss, var_list=[var])
<tf.Variable...
The second way is through a callable function that
does not accept any arguments.
Example:
>>> var = tf.Variable(np.random.random(size=(1,)))
>>> def lr_callable():
... return .1
>>> opt = tf.keras.optimizers.SGD(learning_rate=lr_callable)
>>> loss = lambda: 3 * var
>>> opt.minimize(loss, var_list=[var])
<tf.Variable...
### Creating a custom optimizer
If you intend to create your own optimization algorithm, simply inherit from
this class and override the following methods:
- `_resource_apply_dense` (update variable given gradient tensor is a dense
`tf.Tensor`)
- `_resource_apply_sparse` (update variable given gradient tensor is a
sparse `tf.IndexedSlices`. The most common way for this to happen
is if you are taking the gradient through a `tf.gather`.)
- `_create_slots`
(if your optimizer algorithm requires additional variables)
- `get_config`
(serialization of the optimizer, include all hyper parameters)
"""
# Subclasses should set this to True unless they override `apply_gradients`
# with a version that does not have the `experimental_aggregate_gradients`
# argument. Older versions of Keras did not have this argument so custom
# optimizers may have overridden `apply_gradients` without the
# `experimental_aggregate_gradients` argument. Keras only passes
# `experimental_aggregate_gradients` if this attribute is True.
# Note: This attribute will likely be removed in an upcoming release.
_HAS_AGGREGATE_GRAD = False
def __init__(self,
name,
gradient_aggregator=None,
gradient_transformers=None,
**kwargs):
"""Create a new Optimizer.
This must be called by the constructors of subclasses.
Note that Optimizer instances should not bind to a single graph,
and so shouldn't keep Tensors as member variables. Generally
you should be able to use the _set_hyper()/state.get_hyper()
facility instead.
This class is stateful and thread-compatible.
Example of custom gradient transformations:
```python
def my_gradient_transformer(grads_and_vars):
# Simple example, double the gradients.
return [(2. * g, v) for g, v in grads_and_vars]
optimizer = tf.keras.optimizers.SGD(
1e-3, gradient_transformers=[my_gradient_transformer])
```
Args:
name: String. The name to use for momentum accumulator weights created
by the optimizer.
gradient_aggregator: The function to use to aggregate gradients across
devices (when using `tf.distribute.Strategy`). If `None`, defaults to
summing the gradients across devices. The function should accept and
return a list of `(gradient, variable)` tuples.
gradient_transformers: Optional. List of functions to use to transform
gradients before applying updates to Variables. The functions are
applied after `gradient_aggregator`. The functions should accept and
return a list of `(gradient, variable)` tuples.
**kwargs: keyword arguments. Allowed arguments are `clipvalue`,
`clipnorm`, `global_clipnorm`.
If `clipvalue` (float) is set, the gradient of each weight
is clipped to be no higher than this value.
If `clipnorm` (float) is set, the gradient of each weight
is individually clipped so that its norm is no higher than this value.
If `global_clipnorm` (float) is set the gradient of all weights is
clipped so that their global norm is no higher than this value.
Raises:
ValueError: in case of any invalid argument.
"""
allowed_kwargs = {"clipnorm", "clipvalue", "lr", "decay", "global_clipnorm"}
for k in kwargs:
if k not in allowed_kwargs:
raise TypeError("Unexpected keyword argument "
"passed to optimizer: " + str(k))
# checks that all keyword arguments are non-negative.
if kwargs[k] is not None and kwargs[k] < 0:
raise ValueError("Expected {} >= 0, received: {}".format(k, kwargs[k]))
if k == "lr":
warnings.warn(
"The `lr` argument is deprecated, use `learning_rate` instead.")
self._use_locking = True
self._init_set_name(name)
self._hyper = {}
# dict: {variable name : {slot name : variable}}
self._slots = {}
self._slot_names = []
self._weights = []
self._iterations = None
# For implementing Trackable. Stores information about how to restore
# slot variables which have not yet been created
# (trackable._CheckpointPosition objects).
# {slot_name :
# {_var_key(variable_to_train): [checkpoint_position, ... ], ... },
# ... }
self._deferred_slot_restorations = {}
decay = kwargs.pop("decay", 0.0)
if decay < 0.:
raise ValueError("decay cannot be less than 0: {}".format(decay))
self._initial_decay = decay
self._hypers_created = False
# Store the distribution strategy object if the optimizer is created inside
# strategy scope, so it could be used to create variables later.
if distribute_lib.has_strategy():
self._distribution_strategy = distribute_lib.get_strategy()
else:
self._distribution_strategy = None
# Configure gradient transformations.
if gradient_aggregator is None:
gradient_aggregator = optimizer_utils.all_reduce_sum_gradients
self.gradient_aggregator = gradient_aggregator
if gradient_transformers is None:
gradient_transformers = []
self.gradient_transformers = gradient_transformers
self.clipnorm = kwargs.pop("clipnorm", None)
self.global_clipnorm = kwargs.pop("global_clipnorm", None)
if self.clipnorm is not None and self.global_clipnorm is not None:
raise ValueError("Cannot accept both `clipnorm` and `global_clipnorm`, "
"passed `clipnorm` {}, `global_clipnorm` {}".format(
self.clipnorm, self.global_clipnorm))
self.clipvalue = kwargs.pop("clipvalue", None)
@property
def clipnorm(self):
"""`float` or `None`. If set, clips gradients to a maximum norm."""
return self._clipnorm
@property
def global_clipnorm(self):
"""`float` or `None`. If set, clips gradients to a maximum norm."""
return self._global_clipnorm
@clipnorm.setter
def clipnorm(self, val):
if val is not None and self.gradient_transformers:
raise ValueError("`clipnorm` cannot be set when `gradient_transformers` "
"is set. Instead, use the `gradient_transformers` to "
"specify clipping and other transformations.")
self._clipnorm = val
self._clipnorm_fn = optimizer_utils.make_gradient_clipnorm_fn(
self._clipnorm)
@global_clipnorm.setter
def global_clipnorm(self, val):
if val is not None and self.gradient_transformers:
raise ValueError("`clipnorm` cannot be set when `gradient_transformers` "
"is set. Instead, use the `gradient_transformers` to "
"specify clipping and other transformations.")
self._global_clipnorm = val
self._global_clipnorm_fn = optimizer_utils.make_global_gradient_clipnorm_fn(
self._global_clipnorm)
@property
def clipvalue(self):
"""`float` or `None`. If set, clips gradients to a maximum value."""
return self._clipvalue
@clipvalue.setter
def clipvalue(self, val):
if val is not None and self.gradient_transformers:
raise ValueError("`clipvalue` cannot be set when `gradient_transformers` "
"is set. Instead, use the `gradient_transformers` to "
"specify clipping and other transformations.")
self._clipvalue = val
self._clipvalue_fn = optimizer_utils.make_gradient_clipvalue_fn(
self._clipvalue)
def _transform_loss(self, loss):
"""Called in `.minimize` to transform loss before computing gradients."""
return loss
def _get_gradients(self, tape, loss, var_list, grad_loss=None):
"""Called in `minimize` to compute gradients from loss."""
grads = tape.gradient(loss, var_list, grad_loss)
return list(zip(grads, var_list))
def _transform_unaggregated_gradients(self, grads_and_vars):
"""Called in `apply_gradients` before gradient aggregation."""
return grads_and_vars
def _aggregate_gradients(self, grads_and_vars):
"""Called in `apply_gradients` to aggregate gradients across devices.
Note that user subclasses may override this, so the interface should not be
changed.
Args:
grads_and_vars: List of (gradient, variable) pairs.
Returns:
A list of (aggregated_gradient, variable) pairs. By default, this calls
`self.gradient_aggregator`.
"""
return self.gradient_aggregator(grads_and_vars)
def _transform_gradients(self, grads_and_vars):
"""Called in `apply_gradients` after aggregation."""
if self._clipvalue is not None:
grads_and_vars = self._clipvalue_fn(grads_and_vars)
if self._clipnorm is not None:
grads_and_vars = self._clipnorm_fn(grads_and_vars)
if self._global_clipnorm is not None:
grads_and_vars = self._global_clipnorm_fn(grads_and_vars)
for fn in self.gradient_transformers:
grads_and_vars = fn(grads_and_vars)
return grads_and_vars
def minimize(self, loss, var_list, grad_loss=None, name=None, tape=None):
"""Minimize `loss` by updating `var_list`.
This method simply computes gradient using `tf.GradientTape` and calls
`apply_gradients()`. If you want to process the gradient before applying
then call `tf.GradientTape` and `apply_gradients()` explicitly instead
of using this function.
Args:
loss: `Tensor` or callable. If a callable, `loss` should take no arguments
and return the value to minimize. If a `Tensor`, the `tape` argument
must be passed.
var_list: list or tuple of `Variable` objects to update to minimize
`loss`, or a callable returning the list or tuple of `Variable` objects.
Use callable when the variable list would otherwise be incomplete before
`minimize` since the variables are created at the first time `loss` is
called.
grad_loss: (Optional). A `Tensor` holding the gradient computed for
`loss`.
name: (Optional) str. Name for the returned operation.
tape: (Optional) `tf.GradientTape`. If `loss` is provided as a `Tensor`,
the tape that computed the `loss` must be provided.
Returns:
An `Operation` that updates the variables in `var_list`. The `iterations`
will be automatically increased by 1.
Raises:
ValueError: If some of the variables are not `Variable` objects.
"""
grads_and_vars = self._compute_gradients(
loss, var_list=var_list, grad_loss=grad_loss, tape=tape)
return self.apply_gradients(grads_and_vars, name=name)
def _compute_gradients(self, loss, var_list, grad_loss=None, tape=None):
"""Compute gradients of `loss` for the variables in `var_list`.
This is the first part of `minimize()`. It returns a list
of (gradient, variable) pairs where "gradient" is the gradient
for "variable". Note that "gradient" can be a `Tensor`, an
`IndexedSlices`, or `None` if there is no gradient for the
given variable.
Args:
loss: `Tensor` or callable. If a callable, `loss` should take no
arguments and return the value to minimize. If a `Tensor`, the `tape`
argument must be passed.
var_list: list or tuple of `Variable` objects to update to minimize
`loss`, or a callable returning the list or tuple of `Variable` objects.
Use callable when the variable list would otherwise be incomplete before
`minimize` and the variables are created at the first time when `loss`
is called.
grad_loss: Optional. A `Tensor` holding the gradient computed for `loss`.
tape: (Optional) `tf.GradientTape`. If `loss` is provided as a `Tensor`,
the tape that computed the `loss` must be provided.
Returns:
A list of (gradient, variable) pairs. Variable is always present, but
gradient can be `None`.
Raises:
TypeError: If `var_list` contains anything else than `Variable` objects.
ValueError: If some arguments are invalid, or var_list is None.
"""
# TODO(josh11b): Test that we handle weight decay in a reasonable way.
if not callable(loss) and tape is None:
raise ValueError("`tape` is required when a `Tensor` loss is passed.")
tape = tape if tape is not None else backprop.GradientTape()
if callable(loss):
with tape:
if not callable(var_list):
tape.watch(var_list)
loss = loss()
if callable(var_list):
var_list = var_list()
with tape:
loss = self._transform_loss(loss)
var_list = nest.flatten(var_list)
with ops.name_scope_v2(self._name + "/gradients"):
grads_and_vars = self._get_gradients(tape, loss, var_list, grad_loss)
self._assert_valid_dtypes([
v for g, v in grads_and_vars
if g is not None and v.dtype != dtypes.resource
])
return grads_and_vars
def apply_gradients(self,
grads_and_vars,
name=None,
experimental_aggregate_gradients=True):
"""Apply gradients to variables.
This is the second part of `minimize()`. It returns an `Operation` that
applies gradients.
The method sums gradients from all replicas in the presence of
`tf.distribute.Strategy` by default. You can aggregate gradients yourself by
passing `experimental_aggregate_gradients=False`.
Example:
```python
grads = tape.gradient(loss, vars)
grads = tf.distribute.get_replica_context().all_reduce('sum', grads)
# Processing aggregated gradients.
optimizer.apply_gradients(zip(grads, vars),
experimental_aggregate_gradients=False)
```
Args:
grads_and_vars: List of (gradient, variable) pairs.
name: Optional name for the returned operation. Default to the name passed
to the `Optimizer` constructor.
experimental_aggregate_gradients: Whether to sum gradients from different
replicas in the presence of `tf.distribute.Strategy`. If False, it's
user responsibility to aggregate the gradients. Default to True.
Returns:
An `Operation` that applies the specified gradients. The `iterations`
will be automatically increased by 1.
Raises:
TypeError: If `grads_and_vars` is malformed.
ValueError: If none of the variables have gradients.
RuntimeError: If called in a cross-replica context.
"""
grads_and_vars = optimizer_utils.filter_empty_gradients(grads_and_vars)
var_list = [v for (_, v) in grads_and_vars]
with ops.name_scope_v2(self._name):
# Create iteration if necessary.
with ops.init_scope():
self._create_all_weights(var_list)
if not grads_and_vars:
# Distribution strategy does not support reducing an empty list of
# gradients
return control_flow_ops.no_op()
if distribute_lib.in_cross_replica_context():
raise RuntimeError(
"`apply_gradients() cannot be called in cross-replica context. "
"Use `tf.distribute.Strategy.run` to enter replica "
"context.")
strategy = distribute_lib.get_strategy()
if (not experimental_aggregate_gradients and strategy and
isinstance(strategy,
(parameter_server_strategy.ParameterServerStrategyV1,
parameter_server_strategy_v2.ParameterServerStrategyV2,
central_storage_strategy.CentralStorageStrategy,
central_storage_strategy.CentralStorageStrategyV1))):
raise NotImplementedError(
"`experimental_aggregate_gradients=False is not supported for "
"ParameterServerStrategy and CentralStorageStrategy")
apply_state = self._prepare(var_list)
if experimental_aggregate_gradients:
grads_and_vars = self._transform_unaggregated_gradients(grads_and_vars)
grads_and_vars = self._aggregate_gradients(grads_and_vars)
grads_and_vars = self._transform_gradients(grads_and_vars)
if optimizer_utils.strategy_supports_no_merge_call():
return self._distributed_apply(strategy, grads_and_vars, name,
apply_state)
else:
return distribute_lib.get_replica_context().merge_call(
functools.partial(self._distributed_apply, apply_state=apply_state),
args=(grads_and_vars,),
kwargs={
"name": name,
})
def _distributed_apply(self, distribution, grads_and_vars, name, apply_state):
"""`apply_gradients` using a `DistributionStrategy`."""
def apply_grad_to_update_var(var, grad):
"""Apply gradient to variable."""
if isinstance(var, tensor.Tensor):
raise NotImplementedError("Trying to update a Tensor ", var)
apply_kwargs = {}
if isinstance(grad, indexed_slices.IndexedSlices):
if var.constraint is not None:
raise RuntimeError(
"Cannot use a constraint function on a sparse variable.")
if "apply_state" in self._sparse_apply_args:
apply_kwargs["apply_state"] = apply_state
return self._resource_apply_sparse_duplicate_indices(
grad.values, var, grad.indices, **apply_kwargs)
if "apply_state" in self._dense_apply_args:
apply_kwargs["apply_state"] = apply_state
update_op = self._resource_apply_dense(grad, var, **apply_kwargs)
if var.constraint is not None:
with ops.control_dependencies([update_op]):
return var.assign(var.constraint(var))
else:
return update_op
eagerly_outside_functions = ops.executing_eagerly_outside_functions()
update_ops = []
with name_scope_only_in_function_or_graph(name or self._name):
for grad, var in grads_and_vars:
# Colocate the update with variables to avoid unnecessary communication
# delays. See b/136304694.
with distribution.extended.colocate_vars_with(var):
with name_scope_only_in_function_or_graph(
"update" if eagerly_outside_functions else "update_" +
var.op.name):
update_op = distribution.extended.update(
var, apply_grad_to_update_var, args=(grad,), group=False)
if distribute_lib.in_cross_replica_context():
# In cross-replica context, extended.update returns a list of
# update ops from all replicas (group=False).
update_ops.extend(update_op)
else:
# In replica context, extended.update return the single update op
# of current replica.
update_ops.append(update_op)
any_symbolic = any(isinstance(i, ops.Operation) or
tf_utils.is_symbolic_tensor(i) for i in update_ops)
if not context.executing_eagerly() or any_symbolic:
# If the current context is graph mode or any of the update ops are
# symbolic then the step update should be carried out under a graph
# context. (eager updates execute immediately)
with backend._current_graph(update_ops).as_default(): # pylint: disable=protected-access
with ops.control_dependencies([control_flow_ops.group(update_ops)]):
return self._iterations.assign_add(1, read_value=False)
return self._iterations.assign_add(1)
def get_gradients(self, loss, params):
"""Returns gradients of `loss` with respect to `params`.
Should be used only in legacy v1 graph mode.
Args:
loss: Loss tensor.
params: List of variables.
Returns:
List of gradient tensors.
Raises:
ValueError: In case any gradient cannot be computed (e.g. if gradient
function not implemented).
"""
params = nest.flatten(params)
with backend.get_graph().as_default(), backend.name_scope(self._name +
"/gradients"):
grads = gradients.gradients(loss, params)
for grad, param in zip(grads, params):
if grad is None:
raise ValueError("Variable {} has `None` for gradient. "
"Please make sure that all of your ops have a "
"gradient defined (i.e. are differentiable). "
"Common ops without gradient: "
"K.argmax, K.round, K.eval.".format(param))
return grads
def get_updates(self, loss, params):
grads = self.get_gradients(loss, params)
grads_and_vars = list(zip(grads, params))
self._assert_valid_dtypes([
v for g, v in grads_and_vars
if g is not None and v.dtype != dtypes.resource
])
return [self.apply_gradients(grads_and_vars)]
def _set_hyper(self, name, value):
"""set hyper `name` to value. value can be callable, tensor, numeric."""
if isinstance(value, trackable.Trackable):
self._track_trackable(value, name, overwrite=True)
if name not in self._hyper:
self._hyper[name] = value
else:
prev_value = self._hyper[name]
if (callable(prev_value)
or isinstance(prev_value,
(tensor.Tensor, int, float,
learning_rate_schedule.LearningRateSchedule))
or isinstance(value, learning_rate_schedule.LearningRateSchedule)):
self._hyper[name] = value
else:
backend.set_value(self._hyper[name], value)
def _get_hyper(self, name, dtype=None):
if not self._hypers_created:
self._create_hypers()
value = self._hyper[name]
if isinstance(value, learning_rate_schedule.LearningRateSchedule):
return value
if callable(value):
value = value()
if dtype:
return math_ops.cast(value, dtype)
else:
return value
def _create_slots(self, var_list):
pass
def _create_all_weights(self, var_list):
"""Creates all weights, including iterations, hyperparameters and slot vars.
This will add newly created variables to `optimizer.weights`.
New variables are only created when this method is called the first time, or
when called with different variables in the var_list.
Args:
var_list: list or tuple of `Variable` objects that will be minimized
using this optimizer.
"""
_ = self.iterations
self._create_hypers()
self._create_slots(var_list)
def __getattribute__(self, name):
"""Overridden to support hyperparameter access."""
try:
return super(OptimizerV2, self).__getattribute__(name)
except AttributeError as e:
# Needed to avoid infinite recursion with __setattr__.
if name == "_hyper":
raise e
# Backwards compatibility with Keras optimizers.
if name == "lr":
name = "learning_rate"
if name in self._hyper:
return self._get_hyper(name)
raise e
def __dir__(self):
result = set(super(OptimizerV2, self).__dir__())
if "_hyper" in result:
result |= self._hyper.keys()
if "learning_rate" in self._hyper.keys():
result.add("lr")
return list(result)
def __setattr__(self, name, value):
"""Override setattr to support dynamic hyperparameter setting."""
# Backwards compatibility with Keras optimizers.
if name == "lr":
name = "learning_rate"
if hasattr(self, "_hyper") and name in self._hyper:
self._set_hyper(name, value)
else:
super(OptimizerV2, self).__setattr__(name, value)
def get_slot_names(self):
"""A list of names for this optimizer's slots."""
return self._slot_names
def add_slot(self, var, slot_name, initializer="zeros", shape=None):
"""Add a new slot variable for `var`.
A slot variable is an additional variable associated with `var` to train.
It is allocated and managed by optimizers, e.g. `Adam`.
Args:
var: a `Variable` object.
slot_name: name of the slot variable.
initializer: initializer of the slot variable
shape: (Optional) shape of the slot variable. If not set, it will default
to the shape of `var`.
Returns:
A slot variable.
"""
if slot_name not in self._slot_names:
self._slot_names.append(slot_name)
var_key = _var_key(var)
slot_dict = self._slots.setdefault(var_key, {})
weight = slot_dict.get(slot_name, None)
if weight is None:
if isinstance(initializer, str) or callable(initializer):
initializer = initializers.get(initializer)
if isinstance(
initializer,
trackable.CheckpointInitialValueCallable) or (shape is not None):
slot_shape = shape
else:
slot_shape = var.shape
initial_value = functools.partial(
initializer, shape=slot_shape, dtype=var.dtype)
else:
initial_value = initializer
with self._distribution_strategy_scope():
strategy = distribute_lib.get_strategy()
if not strategy.extended.variable_created_in_scope(var):
raise ValueError(
"Trying to create optimizer slot variable under the scope for "
"tf.distribute.Strategy ({}), which is different from the scope "
"used for the original variable ({}). Make sure the slot "
"variables are created under the same strategy scope. This may "
"happen if you're restoring from a checkpoint outside the scope"
.format(strategy, var))
with strategy.extended.colocate_vars_with(var):
weight = tf_variables.Variable(
name="%s/%s" % (var._shared_name, slot_name), # pylint: disable=protected-access
dtype=var.dtype,
trainable=False,
initial_value=initial_value)
backend.track_variable(weight)
slot_dict[slot_name] = weight
self._restore_slot_variable(
slot_name=slot_name, variable=var,
slot_variable=weight)
self._weights.append(weight)
return weight
def get_slot(self, var, slot_name):
var_key = _var_key(var)
slot_dict = self._slots[var_key]
return slot_dict[slot_name]
def _prepare(self, var_list):
keys = set()
for var in var_list:
if isinstance(var, ds_values.DistributedValues):
var_devices = var._devices # pylint: disable=protected-access
else:
var_devices = [var.device]
var_dtype = var.dtype.base_dtype
for var_device in var_devices:
keys.add((var_device, var_dtype))
apply_state = {}
for var_device, var_dtype in keys:
apply_state[(var_device, var_dtype)] = {}
with ops.device(var_device):
self._prepare_local(var_device, var_dtype, apply_state)
return apply_state
def _prepare_local(self, var_device, var_dtype, apply_state):
if "learning_rate" in self._hyper:
lr_t = array_ops.identity(self._decayed_lr(var_dtype))
apply_state[(var_device, var_dtype)]["lr_t"] = lr_t
def _fallback_apply_state(self, var_device, var_dtype):
"""Compatibility for subclasses that don't pass apply_state through."""
apply_state = {(var_device, var_dtype): {}}
self._prepare_local(var_device, var_dtype, apply_state)
return apply_state[(var_device, var_dtype)]
def _create_hypers(self):
if self._hypers_created:
return
with self._distribution_strategy_scope():
# Iterate hyper values deterministically.
for name, value in sorted(self._hyper.items()):
if isinstance(
value, (tensor.Tensor, tf_variables.Variable)) or callable(value):
# The check for `callable` covers the usage when `value` is a
# `LearningRateSchedule`, in which case it does not need to create a
# variable.
continue
else:
self._hyper[name] = self.add_weight(
name,
shape=[],
trainable=False,
initializer=value,
aggregation=tf_variables.VariableAggregation.ONLY_FIRST_REPLICA)
self._hypers_created = True
@property
def iterations(self):
"""Variable. The number of training steps this Optimizer has run."""
if self._iterations is None:
with self._distribution_strategy_scope():
self._iterations = self.add_weight(
"iter",
shape=[],
dtype=dtypes.int64,
trainable=False,
aggregation=tf_variables.VariableAggregation.ONLY_FIRST_REPLICA)
self._weights.append(self._iterations)
return self._iterations
@iterations.setter
def iterations(self, variable):
if self._iterations is not None:
raise RuntimeError("Cannot set `iterations` to a new Variable after "
"the Optimizer weights have been created")
self._iterations = variable
self._weights.append(self._iterations)
def _decayed_lr(self, var_dtype):
"""Get decayed learning rate as a Tensor with dtype=var_dtype."""
lr_t = self._get_hyper("learning_rate", var_dtype)
if isinstance(lr_t, learning_rate_schedule.LearningRateSchedule):
local_step = math_ops.cast(self.iterations, var_dtype)
lr_t = math_ops.cast(lr_t(local_step), var_dtype)
if self._initial_decay > 0.:
local_step = math_ops.cast(self.iterations, var_dtype)
decay_t = math_ops.cast(self._initial_decay, var_dtype)
lr_t = lr_t / (1. + decay_t * local_step)
return lr_t
@abc.abstractmethod
def get_config(self):
"""Returns the config of the optimizer.
An optimizer config is a Python dictionary (serializable)
containing the configuration of an optimizer.
The same optimizer can be reinstantiated later
(without any saved state) from this configuration.
Returns:
Python dictionary.
"""
config = {"name": self._name}
if self.clipnorm is not None:
config["clipnorm"] = self.clipnorm
if self.clipvalue is not None:
config["clipvalue"] = self.clipvalue
if self.global_clipnorm is not None:
config["global_clipnorm"] = self.global_clipnorm
return config
@classmethod
def from_config(cls, config, custom_objects=None):
"""Creates an optimizer from its config.
This method is the reverse of `get_config`,
capable of instantiating the same optimizer from the config
dictionary.
Args:
config: A Python dictionary, typically the output of get_config.
custom_objects: A Python dictionary mapping names to additional Python
objects used to create this optimizer, such as a function used for a
hyperparameter.
Returns:
An optimizer instance.
"""
if "lr" in config:
config["learning_rate"] = config.pop("lr")
if "learning_rate" in config:
if isinstance(config["learning_rate"], dict):
config["learning_rate"] = learning_rate_schedule.deserialize(
config["learning_rate"], custom_objects=custom_objects)
return cls(**config)
def _serialize_hyperparameter(self, hyperparameter_name):
"""Serialize a hyperparameter that can be a float, callable, or Tensor."""
value = self._hyper[hyperparameter_name]
if isinstance(value, learning_rate_schedule.LearningRateSchedule):
return learning_rate_schedule.serialize(value)
if callable(value):
return value()
if tensor_util.is_tf_type(value):
return backend.get_value(value)
return value
def variables(self):
"""Returns variables of this Optimizer based on the order created."""
return self._weights
@property
def weights(self):
"""Returns variables of this Optimizer based on the order created."""
return self._weights
def get_weights(self):
"""Returns the current weights of the optimizer.
The weights of an optimizer are its state (ie, variables).
This function returns the weight values associated with this
optimizer as a list of Numpy arrays. The first value is always the
iterations count of the optimizer, followed by the optimizer's state
variables in the order they were created. The returned list can in turn
be used to load state into similarly parameterized optimizers.
For example, the RMSprop optimizer for this simple model returns a list of
three values-- the iteration count, followed by the root-mean-square value
of the kernel and bias of the single Dense layer:
>>> opt = tf.keras.optimizers.RMSprop()
>>> m = tf.keras.models.Sequential([tf.keras.layers.Dense(10)])
>>> m.compile(opt, loss='mse')
>>> data = np.arange(100).reshape(5, 20)
>>> labels = np.zeros(5)
>>> results = m.fit(data, labels) # Training.
>>> len(opt.get_weights())
3
Returns:
Weights values as a list of numpy arrays.
"""
params = self.weights
return backend.batch_get_value(params)
# TODO(tanzheny): Maybe share this logic with base_layer.
def set_weights(self, weights):
"""Set the weights of the optimizer.
The weights of an optimizer are its state (ie, variables).
This function takes the weight values associated with this
optimizer as a list of Numpy arrays. The first value is always the
iterations count of the optimizer, followed by the optimizer's state
variables in the order they are created. The passed values are used to set
the new state of the optimizer.
For example, the RMSprop optimizer for this simple model takes a list of
three values-- the iteration count, followed by the root-mean-square value
of the kernel and bias of the single Dense layer:
>>> opt = tf.keras.optimizers.RMSprop()
>>> m = tf.keras.models.Sequential([tf.keras.layers.Dense(10)])
>>> m.compile(opt, loss='mse')
>>> data = np.arange(100).reshape(5, 20)
>>> labels = np.zeros(5)
>>> results = m.fit(data, labels) # Training.
>>> new_weights = [np.array(10), np.ones([20, 10]), np.zeros([10])]
>>> opt.set_weights(new_weights)
>>> opt.iterations
<tf.Variable 'RMSprop/iter:0' shape=() dtype=int64, numpy=10>
Args:
weights: weight values as a list of numpy arrays.
"""
params = self.weights
if len(params) != len(weights):
raise ValueError(
"You called `set_weights(weights)` on optimizer " + self._name +
" with a weight list of length " + str(len(weights)) +
", but the optimizer was expecting " + str(len(params)) +
" weights. Provided weights: " + str(weights)[:50] + "...")
if not params:
return
weight_value_tuples = []
param_values = backend.batch_get_value(params)
for pv, p, w in zip(param_values, params, weights):
if pv.shape != w.shape:
raise ValueError("Optimizer weight shape " + str(pv.shape) +
" not compatible with "
"provided weight shape " + str(w.shape))
weight_value_tuples.append((p, w))
backend.batch_set_value(weight_value_tuples)
def add_weight(self,
name,
shape,
dtype=None,
initializer="zeros",
trainable=None,
synchronization=tf_variables.VariableSynchronization.AUTO,
aggregation=tf_variables.VariableAggregation.NONE):
if dtype is None:
dtype = dtypes.float32
if isinstance(initializer, str) or callable(initializer):
initializer = initializers.get(initializer)
if synchronization == tf_variables.VariableSynchronization.ON_READ:
if trainable:
raise ValueError(
"Synchronization value can be set to "
"VariableSynchronization.ON_READ only for non-trainable variables. "
"You have specified trainable=True and "
"synchronization=VariableSynchronization.ON_READ.")
else:
# Set trainable to be false when variable is to be synced on read.
trainable = False
elif trainable is None:
trainable = True
variable = self._add_variable_with_custom_getter(
name=name,
shape=shape,
getter=base_layer_utils.make_variable,
overwrite=True,
initializer=initializer,
dtype=dtype,
trainable=trainable,
use_resource=True,
synchronization=synchronization,
aggregation=aggregation)
backend.track_variable(variable)
return variable
def _init_set_name(self, name, zero_based=True):
if not name:
self._name = backend.unique_object_name(
generic_utils.to_snake_case(self.__class__.__name__),
zero_based=zero_based)
else:
self._name = name
def _assert_valid_dtypes(self, tensors):
"""Asserts tensors are all valid types (see `_valid_dtypes`).
Args:
tensors: Tensors to check.
Raises:
ValueError: If any tensor is not a valid type.
"""
valid_dtypes = self._valid_dtypes()
for t in tensors:
dtype = t.dtype.base_dtype
if dtype not in valid_dtypes:
raise ValueError("Invalid type %r for %s, expected: %s." %
(dtype, t.name, [v for v in valid_dtypes]))
def _valid_dtypes(self):
"""Valid types for loss, variables and gradients.
Subclasses should override to allow other float types.
Returns:
Valid types for loss, variables and gradients.
"""
return _DEFAULT_VALID_DTYPES
def _call_if_callable(self, param):
"""Call the function if param is callable."""
return param() if callable(param) else param
def _resource_apply_dense(self, grad, handle, apply_state):
"""Add ops to apply dense gradients to the variable `handle`.
Args:
grad: a `Tensor` representing the gradient.
handle: a `Tensor` of dtype `resource` which points to the variable to be
updated.
apply_state: A dict which is used across multiple apply calls.
Returns:
An `Operation` which updates the value of the variable.
"""
raise NotImplementedError("Must be implemented in subclasses.")
def _resource_apply_sparse_duplicate_indices(self, grad, handle, indices,
**kwargs):
"""Add ops to apply sparse gradients to `handle`, with repeated indices.
Optimizers which override this method must deal with repeated indices. See
the docstring of `_apply_sparse_duplicate_indices` for details. By default
the correct behavior, to sum non-unique indices and their associated
gradients, is enforced by first pre-processing `grad` and `indices` and
passing them on to `_resource_apply_sparse`. Optimizers which deal correctly
with duplicate indices may instead override this method to avoid the
overhead of summing.
Args:
grad: a `Tensor` representing the gradient for the affected indices.
handle: a `Tensor` of dtype `resource` which points to the variable to be
updated.
indices: a `Tensor` of integral type representing the indices for which
the gradient is nonzero. Indices may be repeated.
**kwargs: May optionally contain `apply_state`
Returns:
An `Operation` which updates the value of the variable.
"""
summed_grad, unique_indices = _deduplicate_indexed_slices(
values=grad, indices=indices)
return self._resource_apply_sparse(summed_grad, handle, unique_indices,
**kwargs)
def _resource_apply_sparse(self, grad, handle, indices, apply_state):
"""Add ops to apply sparse gradients to the variable `handle`.
Similar to `_apply_sparse`, the `indices` argument to this method has been
de-duplicated. Optimizers which deal correctly with non-unique indices may
instead override `_resource_apply_sparse_duplicate_indices` to avoid this
overhead.
Args:
grad: a `Tensor` representing the gradient for the affected indices.
handle: a `Tensor` of dtype `resource` which points to the variable to be
updated.
indices: a `Tensor` of integral type representing the indices for which
the gradient is nonzero. Indices are unique.
apply_state: A dict which is used across multiple apply calls.
Returns:
An `Operation` which updates the value of the variable.
"""
raise NotImplementedError("Must be implemented in subclasses.")
def _resource_scatter_add(self, x, i, v):
with ops.control_dependencies([
gen_resource_variable_ops.ResourceScatterAdd(
resource=x.handle, indices=i, updates=v)
]):
return x.value()
def _resource_scatter_update(self, x, i, v):
with ops.control_dependencies(
[gen_resource_variable_ops.ResourceScatterUpdate(
resource=x.handle, indices=i, updates=v)]):
return x.value()
@property
@layer_utils.cached_per_instance
def _dense_apply_args(self):
return tf_inspect.getfullargspec(self._resource_apply_dense).args
@property
@layer_utils.cached_per_instance
def _sparse_apply_args(self):
return tf_inspect.getfullargspec(self._resource_apply_sparse).args
# ---------------
# For implementing the trackable interface
# ---------------
def _restore_slot_variable(self, slot_name, variable, slot_variable):
"""Restore a newly created slot variable's value."""
variable_key = _var_key(variable)
deferred_restorations = self._deferred_slot_restorations.get(
slot_name, {}).pop(variable_key, [])
# Iterate over restores, highest restore UID first to minimize the number
# of assignments.
deferred_restorations.sort(key=lambda position: position.restore_uid,
reverse=True)
for checkpoint_position in deferred_restorations:
checkpoint_position.restore(slot_variable)
def _create_or_restore_slot_variable(
self, slot_variable_position, slot_name, variable):
"""Restore a slot variable's value, possibly creating it.
Called when a variable which has an associated slot variable is created or
restored. When executing eagerly, we create the slot variable with a
restoring initializer.
No new variables are created when graph building. Instead,
_restore_slot_variable catches these after normal creation and adds restore
ops to the graph. This method is nonetheless important when graph building
for the case when a slot variable has already been created but `variable`
has just been added to a dependency graph (causing us to realize that the
slot variable needs to be restored).
Args:
slot_variable_position: A `trackable._CheckpointPosition` object
indicating the slot variable `Trackable` object to be restored.
slot_name: The name of this `Optimizer`'s slot to restore into.
variable: The variable object this slot is being created for.
"""
variable_key = _var_key(variable)
slot_dict = self._slots.get(variable_key, {})
slot_variable = slot_dict.get(slot_name, None)
if (slot_variable is None and context.executing_eagerly() and
slot_variable_position.is_simple_variable()
# Defer slot variable creation if there is an active variable creator
# scope. Generally we'd like to eagerly create/restore slot variables
# when possible, but this may mean that scopes intended to catch
# `variable` also catch its eagerly created slot variable
# unintentionally (specifically make_template would add a dependency on
# a slot variable if not for this case). Deferring is mostly harmless
# (aside from double initialization), and makes variable creator scopes
# behave the same way they do when graph building.
#
# One notable case is with distribution strategy, which uses variable
# creator scope but always desires the `variable` and the slot to use
# the same scope, thus we can safely eagerly create/restore slot
# variables.
and (not ops.get_default_graph()._variable_creator_stack or # pylint: disable=protected-access
self._distribution_strategy)):
initializer = trackable.CheckpointInitialValueCallable(
checkpoint_position=slot_variable_position)
slot_variable = self.add_slot(
var=variable,
initializer=initializer,
slot_name=slot_name,
shape=slot_variable_position.value_shape())
# Slot variables are not owned by any one object (because we don't want to
# save the slot variable if the optimizer is saved without the non-slot
# variable, or if the non-slot variable is saved without the optimizer;
# it's a dependency hypergraph with edges of the form (optimizer, non-slot
# variable, variable)). So we don't _track_ slot variables anywhere, and
# instead special-case this dependency and otherwise pretend it's a normal
# graph.
if slot_variable is not None:
# If we've either made this slot variable, or if we've pulled out an
# existing slot variable, we should restore it.
slot_variable_position.restore(slot_variable)
else:
# We didn't make the slot variable. Defer restoring until it gets created
# normally. We keep a list rather than the one with the highest restore
# UID in case slot variables have their own dependencies, in which case
# those could differ between restores.
self._deferred_slot_restorations.setdefault(
slot_name, {}).setdefault(variable_key, []).append(
slot_variable_position)
@contextlib.contextmanager
def _distribution_strategy_scope(self):
"""Returns the `tf.distribute.Strategy` this optimizer was created under."""
if self._distribution_strategy and not distribute_lib.has_strategy():
with self._distribution_strategy.scope():
yield self._distribution_strategy.scope()
else:
yield
def _var_key(var):
"""Key for representing a primary variable, for looking up slots.
In graph mode the name is derived from the var shared name.
In eager mode the name is derived from the var unique id.
If distribution strategy exists, get the primary variable first.
Args:
var: the variable.
Returns:
the unique name of the variable.
"""
# pylint: disable=protected-access
# Get the distributed variable if it exists.
if hasattr(var, "_distributed_container"):
var = var._distributed_container()
if var._in_graph_mode:
return var._shared_name
return var._unique_id
def _get_slot_key_from_var(var, slot_name):
"""Get the slot key for the variable: var_name/slot_name."""
name = _var_key(var)
return name + "/" + slot_name
| OptimizerV2 |
python | getsentry__sentry | src/sentry/lang/javascript/apps.py | {
"start": 36,
"end": 258
} | class ____(AppConfig):
name = "sentry.lang.javascript"
def ready(self) -> None:
from sentry.plugins.base import register
from .plugin import JavascriptPlugin
register(JavascriptPlugin)
| Config |
python | pytorch__pytorch | benchmarks/operator_benchmark/pt/addmm_test.py | {
"start": 710,
"end": 2300
} | class ____(op_bench.TorchBenchmarkBase):
def init(self, M, N, K, device, dtype):
self.inputs = {
"input_one": torch.rand(
M, K, device=device, requires_grad=self.auto_set(), dtype=dtype
),
"mat1": torch.rand(
M, N, device=device, requires_grad=self.auto_set(), dtype=dtype
),
"mat2": torch.rand(
N, K, device=device, requires_grad=self.auto_set(), dtype=dtype
),
}
self.set_module_name("addmm")
def forward(self, input_one, mat1, mat2):
return torch.addmm(input_one, mat1, mat2)
def get_memory_traffic_bytes(self):
"""Override for addmm: input + (mat1 @ mat2) -> (M, K)
addmm computes: input_one (M, K) + mat1 (M, N) @ mat2 (N, K)
Memory traffic: read(M*K + M*N + N*K) + write(M*K)
"""
input_one = self.inputs["input_one"]
mat1 = self.inputs["mat1"]
mat2 = self.inputs["mat2"]
M, K = input_one.shape
M_check, N = mat1.shape
N_check, K_check = mat2.shape
assert M == M_check and K == K_check and N == N_check, (
"Matrix dimensions must match"
)
bytes_per_element = input_one.element_size()
total_elements = M * K + M * N + N * K + M * K
return total_elements * bytes_per_element
op_bench.generate_pt_test(addmm_short_configs + addmm_long_configs, AddmmBenchmark)
op_bench.generate_pt_gradient_test(addmm_long_configs, AddmmBenchmark)
"""Mircobenchmark for addbmm operator."""
| AddmmBenchmark |
python | getsentry__sentry | src/sentry/integrations/discord/message_builder/base/component/button.py | {
"start": 423,
"end": 1196
} | class ____(DiscordMessageComponent):
# Note that buttons must be contained in an ActionRow!
def __init__(
self,
custom_id: str,
style: int = DiscordButtonStyle.SECONDARY,
label: str | None = None,
disabled: bool = False,
) -> None:
self.style = style
self.custom_id = custom_id
self.label = label
self.disabled = disabled
super().__init__(type=2)
def build(self) -> DiscordButtonDict:
button = DiscordButtonDict(type=self.type, style=self.style, custom_id=self.custom_id)
if self.label is not None:
button["label"] = self.label
if self.disabled is not None:
button["disabled"] = self.disabled
return button
| DiscordButton |
python | HypothesisWorks__hypothesis | hypothesis-python/src/hypothesis/internal/conjecture/datatree.py | {
"start": 21207,
"end": 39834
} | class ____:
"""
A DataTree tracks the structured history of draws in some test function,
across multiple ConjectureData objects.
This information is used by ConjectureRunner to generate novel prefixes of
this tree (see generate_novel_prefix). A novel prefix is a sequence of draws
which the tree has not seen before, and therefore the ConjectureRunner has
not generated as an input to the test function before.
DataTree tracks the following:
- Drawn choices in the choice sequence
- ConjectureData.draw_integer()
- ConjectureData.draw_float()
- ConjectureData.draw_string()
- ConjectureData.draw_boolean()
- ConjectureData.draw_bytes()
- Test conclusions (with some Status, e.g. Status.VALID)
- ConjectureData.conclude_test()
A DataTree is — surprise — a *tree*. A node in this tree is either a choice draw
with some value, a test conclusion with some Status, or a special `Killed` value,
which denotes that further draws may exist beyond this node but should not be
considered worth exploring when generating novel prefixes. A node is a leaf
iff it is a conclusion or Killed.
A branch from node A to node B indicates that we have previously seen some
sequence (a, b) of draws, where a and b are the values in nodes A and B.
Similar intuition holds for conclusion and Killed nodes.
Examples
--------
To see how a DataTree gets built through successive sets of draws, consider
the following code that calls through to some ConjecutreData object `data`.
The first call can be either True or False, and the second call can be any
integer in the range [1, 3].
data.draw_boolean()
data.draw_integer(1, 3)
To start, the corresponding DataTree object is completely empty.
┌──────┐
│ root │
└──────┘
We happen to draw True and then 2 in the above code. The tree tracks this.
(2 also connects to a child Conclusion node with Status.VALID since it's the
final draw in the code. I'll omit Conclusion nodes in diagrams for brevity.)
┌──────┐
│ root │
└──┬───┘
┌──┴───┐
│ True │
└──┬───┘
┌──┴───┐
│ 2 │
└──────┘
This is a very boring tree so far! But now we happen to draw False and
then 1. This causes a split in the tree. Remember, DataTree tracks history
over all invocations of a function, not just one. The end goal is to know
what invocations haven't been tried yet, after all.
┌──────┐
┌───┤ root ├───┐
│ └──────┘ │
┌──┴───┐ ┌─┴─────┐
│ True │ │ False │
└──┬───┘ └──┬────┘
┌─┴─┐ ┌─┴─┐
│ 2 │ │ 1 │
└───┘ └───┘
If we were to ask DataTree for a novel prefix at this point, it might
generate any of (True, 1), (True, 3), (False, 2), or (False, 3).
Note that the novel prefix stops as soon as it generates a novel node. For
instance, if we had generated a novel prefix back when the tree was only
root -> True -> 2, we could have gotten any of (True, 1), (True, 3), or
(False). But we could *not* have gotten (False, n), because both False and
n were novel at that point, and we stop at the first novel node — False.
I won't belabor this example. Here's what the tree looks like when fully
explored:
┌──────┐
┌──────┤ root ├──────┐
│ └──────┘ │
┌──┴───┐ ┌─┴─────┐
┌──┤ True ├──┐ ┌───┤ False ├──┐
│ └──┬───┘ │ │ └──┬────┘ │
┌─┴─┐ ┌─┴─┐ ┌─┴─┐ ┌─┴─┐ ┌─┴─┐ ┌─┴─┐
│ 1 │ │ 2 │ │ 3 │ │ 1 │ │ 2 │ │ 3 │
└───┘ └───┘ └───┘ └───┘ └───┘ └───┘
You could imagine much more complicated trees than this arising in practice,
and indeed they do. In particular, the tree need not be balanced or 'nice'
like the tree above. For instance,
b = data.draw_boolean()
if b:
data.draw_integer(1, 3)
results in a tree with the entire right part lopped off, and False leading
straight to a conclusion node with Status.VALID. As another example,
n = data.draw_integers()
assume(n >= 3)
data.draw_string()
results in a tree with the 0, 1, and 2 nodes leading straight to a
conclusion node with Status.INVALID, and the rest branching off into all
the possibilities of draw_string.
Notes
-----
The above examples are slightly simplified and are intended to convey
intuition. In practice, there are some implementation details to be aware
of.
- In draw nodes, we store the constraints used in addition to the value drawn.
E.g. the node corresponding to data.draw_float(min_value=1.0, max_value=1.5)
would store {"min_value": 1.0, "max_value": 1.5, ...} (default values for
other constraints omitted).
The constraints parameters have the potential to change both the range of
possible outputs of a node, and the probability distribution within that
range, so we need to use these when drawing in DataTree as well. We draw
values using these constraints when (1) generating a novel value for a node
and (2) choosing a random child when traversing the tree.
- For space efficiency, rather than tracking the full tree structure, we
store DataTree as a radix tree. This is conceptually equivalent (radix
trees can always be "unfolded" to the full tree) but it means the internal
representation may differ in practice.
See TreeNode for more information.
"""
def __init__(self) -> None:
self.root: TreeNode = TreeNode()
self._children_cache: dict[ChoiceT, ChildrenCacheValueT] = {}
@property
def is_exhausted(self) -> bool:
"""
Returns True if every node is exhausted, and therefore the tree has
been fully explored.
"""
return self.root.is_exhausted
def generate_novel_prefix(self, random: Random) -> tuple[ChoiceT, ...]:
"""Generate a short random string that (after rewriting) is not
a prefix of any choice sequence previously added to the tree.
The resulting prefix is essentially arbitrary - it would be nice
for it to be uniform at random, but previous attempts to do that
have proven too expensive.
"""
assert not self.is_exhausted
prefix = []
def append_choice(choice_type: ChoiceTypeT, choice: ChoiceT) -> None:
if choice_type == "float":
assert isinstance(choice, int)
choice = int_to_float(choice)
prefix.append(choice)
current_node = self.root
while True:
assert not current_node.is_exhausted
for i, (choice_type, constraints, value) in enumerate(
zip(
current_node.choice_types,
current_node.constraints,
current_node.values,
strict=True,
)
):
if i in current_node.forced:
append_choice(choice_type, value)
else:
attempts = 0
while True:
if attempts <= 10:
try:
node_value = self._draw(
choice_type, constraints, random=random
)
except StopTest: # pragma: no cover
# it is possible that drawing from a fresh data can
# overrun BUFFER_SIZE, due to eg unlucky rejection sampling
# of integer probes. Retry these cases.
attempts += 1
continue
else:
node_value = self._draw_from_cache(
choice_type,
constraints,
key=id(current_node),
random=random,
)
if node_value != value:
append_choice(choice_type, node_value)
break
attempts += 1
self._reject_child(
choice_type,
constraints,
child=node_value,
key=id(current_node),
)
# We've now found a value that is allowed to
# vary, so what follows is not fixed.
return tuple(prefix)
assert not isinstance(current_node.transition, (Conclusion, Killed))
if current_node.transition is None:
return tuple(prefix)
branch = current_node.transition
assert isinstance(branch, Branch)
attempts = 0
while True:
if attempts <= 10:
try:
node_value = self._draw(
branch.choice_type, branch.constraints, random=random
)
except StopTest: # pragma: no cover
attempts += 1
continue
else:
node_value = self._draw_from_cache(
branch.choice_type,
branch.constraints,
key=id(branch),
random=random,
)
try:
child = branch.children[node_value]
except KeyError:
append_choice(branch.choice_type, node_value)
return tuple(prefix)
if not child.is_exhausted:
append_choice(branch.choice_type, node_value)
current_node = child
break
attempts += 1
self._reject_child(
branch.choice_type,
branch.constraints,
child=node_value,
key=id(branch),
)
# We don't expect this assertion to ever fire, but coverage
# wants the loop inside to run if you have branch checking
# on, hence the pragma.
assert ( # pragma: no cover
attempts != 1000
or len(branch.children) < branch.max_children
or any(not v.is_exhausted for v in branch.children.values())
)
def rewrite(self, choices):
"""Use previously seen ConjectureData objects to return a tuple of
the rewritten choice sequence and the status we would get from running
that with the test function. If the status cannot be predicted
from the existing values it will be None."""
data = ConjectureData.for_choices(choices)
try:
self.simulate_test_function(data)
return (data.choices, data.status)
except PreviouslyUnseenBehaviour:
return (choices, None)
def simulate_test_function(self, data: ConjectureData) -> None:
"""Run a simulated version of the test function recorded by
this tree. Note that this does not currently call ``stop_span``
or ``start_span`` as these are not currently recorded in the
tree. This will likely change in future."""
node = self.root
def draw(choice_type, constraints, *, forced=None, convert_forced=True):
if choice_type == "float" and forced is not None and convert_forced:
forced = int_to_float(forced)
draw_func = getattr(data, f"draw_{choice_type}")
value = draw_func(**constraints, forced=forced)
if choice_type == "float":
value = float_to_int(value)
return value
try:
while True:
for i, (choice_type, constraints, previous) in enumerate(
zip(node.choice_types, node.constraints, node.values, strict=True)
):
v = draw(
choice_type,
constraints,
forced=previous if i in node.forced else None,
)
if v != previous:
raise PreviouslyUnseenBehaviour
if isinstance(node.transition, Conclusion):
t = node.transition
data.conclude_test(t.status, t.interesting_origin)
elif node.transition is None:
raise PreviouslyUnseenBehaviour
elif isinstance(node.transition, Branch):
v = draw(node.transition.choice_type, node.transition.constraints)
try:
node = node.transition.children[v]
except KeyError as err:
raise PreviouslyUnseenBehaviour from err
else:
assert isinstance(node.transition, Killed)
data.observer.kill_branch()
node = node.transition.next_node
except StopTest:
pass
def new_observer(self):
return TreeRecordingObserver(self)
def _draw(
self,
choice_type: ChoiceTypeT,
constraints: ChoiceConstraintsT,
*,
random: Random,
) -> ChoiceT:
from hypothesis.internal.conjecture.data import draw_choice
value = draw_choice(choice_type, constraints, random=random)
# using floats as keys into branch.children breaks things, because
# e.g. hash(0.0) == hash(-0.0) would collide as keys when they are
# in fact distinct child branches.
# To distinguish floats here we'll use their bits representation. This
# entails some bookkeeping such that we're careful about when the
# float key is in its bits form (as a key into branch.children) and
# when it is in its float form (as a value we want to write to the
# choice sequence), and converting between the two forms as appropriate.
if choice_type == "float":
assert isinstance(value, float)
value = float_to_int(value)
return value
def _get_children_cache(
self, choice_type: ChoiceTypeT, constraints: ChoiceConstraintsT, *, key: ChoiceT
) -> ChildrenCacheValueT:
# cache the state of the children generator per node/branch (passed as
# `key` here), such that we track which children we've already tried
# for this branch across draws.
# We take advantage of python generators here as one-way iterables,
# so each time we iterate we implicitly store our position in the
# children generator and don't re-draw children. `children` is the
# concrete list of children draw from the generator that we will work
# with. Whenever we need to top up this list, we will draw a new value
# from the generator.
if key not in self._children_cache:
generator = all_children(choice_type, constraints)
children: list[ChoiceT] = []
rejected: set[ChoiceT] = set()
self._children_cache[key] = (generator, children, rejected)
return self._children_cache[key]
def _draw_from_cache(
self,
choice_type: ChoiceTypeT,
constraints: ChoiceConstraintsT,
*,
key: ChoiceT,
random: Random,
) -> ChoiceT:
(generator, children, rejected) = self._get_children_cache(
choice_type, constraints, key=key
)
# Keep a stock of 100 potentially-valid children at all times.
# This number is chosen to balance memory/speed vs randomness. Ideally
# we would sample uniformly from all not-yet-rejected children, but
# computing and storing said children is not free.
# no-branch because coverage of the fall-through case here is a bit
# annoying.
if len(children) < 100: # pragma: no branch
for v in generator:
if choice_type == "float":
assert isinstance(v, float)
v = float_to_int(v)
if v in rejected:
continue
children.append(v)
if len(children) >= 100:
break
return random.choice(children)
def _reject_child(
self,
choice_type: ChoiceTypeT,
constraints: ChoiceConstraintsT,
*,
child: ChoiceT,
key: ChoiceT,
) -> None:
(_generator, children, rejected) = self._get_children_cache(
choice_type, constraints, key=key
)
rejected.add(child)
# we remove a child from the list of possible children *only* when it is
# rejected, and not when it is initially drawn in _draw_from_cache. The
# reason is that a child being drawn does not guarantee that child will
# be used in a way such that it is written back to the tree, so it needs
# to be available for future draws until we are certain it has been
# used.
#
# For instance, if we generated novel prefixes in a loop (but never used
# those prefixes to generate new values!) then we don't want to remove
# the drawn children from the available pool until they are actually
# used.
#
# This does result in a small inefficiency: we may draw a child,
# immediately use it (so we know it cannot be drawn again), but still
# wait to draw and reject it here, because DataTree cannot guarantee
# the drawn child has been used.
if child in children:
children.remove(child)
def _repr_pretty_(self, p: "RepresentationPrinter", cycle: bool) -> None:
assert cycle is False
p.pretty(self.root)
| DataTree |
python | Pylons__pyramid | tests/test_scripts/test_pserve.py | {
"start": 124,
"end": 5113
} | class ____(unittest.TestCase):
def setUp(self):
self.out_ = StringIO()
def out(self, msg):
self.out_.write(msg)
def _getTargetClass(self):
from pyramid.scripts.pserve import PServeCommand
return PServeCommand
def _makeOne(self, *args, **kwargs):
effargs = ['pserve']
effargs.extend(args)
cmd = self._getTargetClass()(effargs, **kwargs)
cmd.out = self.out
self.loader = dummy.DummyLoader()
cmd._get_config_loader = self.loader
return cmd
def test_run_no_args(self):
inst = self._makeOne()
result = inst.run()
self.assertEqual(result, 2)
self.assertEqual(self.out_.getvalue(), 'You must give a config file')
def test_parse_vars_good(self):
inst = self._makeOne('development.ini', 'a=1', 'b=2')
app = dummy.DummyApp()
def get_app(name, global_conf):
app.name = name
app.global_conf = global_conf
return app
self.loader.get_wsgi_app = get_app
self.loader.server = lambda x: x
inst.run()
self.assertEqual(
app.global_conf, {'a': '1', 'b': '2', '__script__': 'pserve'}
)
def test_original_ignore_files(self):
msg = 'A change to "ignore_files" was detected'
def get_app(name, global_conf):
app.name = name
app.global_conf = global_conf
return app
inst = self._makeOne('development.ini')
app = dummy.DummyApp()
self.loader.get_wsgi_app = get_app
self.loader.server = lambda x: x
self.loader.settings = {'pserve': {'ignore_files': '*.txt'}}
inst.run()
self.assertNotIn(msg, self.out_.getvalue())
inst = self._makeOne(
'development.ini', original_ignore_files={'*.txt'}
)
app = dummy.DummyApp()
self.loader.get_wsgi_app = get_app
self.loader.server = lambda x: x
self.loader.settings = {'pserve': {'ignore_files': 'foo/*.txt'}}
inst.run()
self.assertIn(msg, self.out_.getvalue())
def test_parse_vars_bad(self):
inst = self._makeOne('development.ini', 'a')
self.assertRaises(ValueError, inst.run)
def test_config_file_finds_watch_files(self):
inst = self._makeOne('development.ini')
loader = self.loader('/base/path.ini')
loader.settings = {
'pserve': {'watch_files': 'foo\n/baz\ntests.test_scripts:*.py'}
}
inst.pserve_file_config(loader, global_conf={'a': '1'})
self.assertEqual(loader.calls[0]['defaults'], {'a': '1'})
self.assertEqual(
inst.watch_files,
{
os.path.abspath('/base/foo'),
os.path.abspath('/baz'),
os.path.abspath(os.path.join(here, '*.py')),
},
)
def test_config_file_finds_open_url(self):
inst = self._makeOne('development.ini')
loader = self.loader('/base/path.ini')
loader.settings = {'pserve': {'open_url': 'http://127.0.0.1:8080/'}}
inst.pserve_file_config(loader, global_conf={'a': '1'})
self.assertEqual(loader.calls[0]['defaults'], {'a': '1'})
self.assertEqual(inst.open_url, 'http://127.0.0.1:8080/')
def test_guess_server_url(self):
inst = self._makeOne('development.ini')
loader = self.loader('/base/path.ini')
loader.settings = {'server:foo': {'port': '8080'}}
url = inst.guess_server_url(loader, 'foo', global_conf={'a': '1'})
self.assertEqual(loader.calls[0]['defaults'], {'a': '1'})
self.assertEqual(url, 'http://127.0.0.1:8080')
def test_reload_call_hupper_with_correct_args(self):
from pyramid.scripts import pserve
class AttrDict(dict):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.__dict__ = self
def dummy_start_reloader(*args, **kwargs):
dummy_start_reloader.args = args
dummy_start_reloader.kwargs = kwargs
orig_hupper = pserve.hupper
try:
pserve.hupper = AttrDict(
is_active=lambda: False, start_reloader=dummy_start_reloader
)
inst = self._makeOne('--reload', 'development.ini')
inst.run()
finally:
pserve.hupper = orig_hupper
self.assertEqual(
dummy_start_reloader.args, ('pyramid.scripts.pserve.main',)
)
self.assertEqual(
dummy_start_reloader.kwargs,
{
'reload_interval': 1,
'verbose': 1,
'worker_kwargs': {
'argv': ['pserve', '--reload', 'development.ini'],
'quiet': False,
'original_ignore_files': set(),
},
'ignore_files': set(),
},
)
| TestPServeCommand |
python | tensorflow__tensorflow | tensorflow/python/util/deprecation.py | {
"start": 1395,
"end": 28241
} | class ____(Exception):
"""Raised when setting deprecated names multiple times for the same symbol."""
def _log_deprecation(msg, *args, **kwargs):
"""Raises errors for deprecated methods if in strict mode, warns otherwise."""
if strict_mode.STRICT_MODE:
logging.error(msg, *args, **kwargs)
raise RuntimeError(
'This behavior has been deprecated, which raises an error in strict'
' mode.'
)
else:
logging.warning(msg, *args, **kwargs)
def _add_deprecated_function_notice_to_docstring(doc, date, instructions):
"""Adds a deprecation notice to a docstring for deprecated functions."""
main_text = [
'THIS FUNCTION IS DEPRECATED. It will be removed %s.'
% ('in a future version' if date is None else ('after %s' % date))
]
if instructions:
main_text.append('Instructions for updating:')
return decorator_utils.add_notice_to_docstring(
doc,
instructions,
'DEPRECATED FUNCTION',
'(deprecated)',
main_text,
notice_type='Deprecated')
def _add_deprecated_arg_notice_to_docstring(doc, date, instructions,
deprecated_names):
"""Adds a deprecation notice to a docstring for deprecated arguments."""
deprecation_string = ', '.join(sorted(deprecated_names))
return decorator_utils.add_notice_to_docstring(
doc,
instructions,
'DEPRECATED FUNCTION ARGUMENTS',
'(deprecated arguments)', [
'SOME ARGUMENTS ARE DEPRECATED: `(%s)`. '
'They will be removed %s.' %
(deprecation_string, 'in a future version' if date is None else
('after %s' % date)), 'Instructions for updating:'
],
notice_type='Deprecated')
def _add_deprecated_arg_value_notice_to_docstring(doc, date, instructions,
deprecated_name_value_dict):
"""Adds a deprecation notice to a docstring for deprecated arguments."""
deprecation_string = ', '.join(
'%s=%r' % (key, value)
for key, value in sorted(deprecated_name_value_dict.items()))
when = 'in a future version' if date is None else ('after %s' % date)
return decorator_utils.add_notice_to_docstring(
doc,
instructions,
'DEPRECATED FUNCTION ARGUMENT VALUES',
'(deprecated argument values)', [
'SOME ARGUMENT VALUES ARE DEPRECATED: `(%s)`. '
'They will be removed %s.' %
(deprecation_string, when), 'Instructions for updating:'
],
notice_type='Deprecated')
def _validate_deprecation_args(date, instructions):
if date is not None and not re.match(r'20\d\d-[01]\d-[0123]\d', date):
raise ValueError(f'Date must be in format YYYY-MM-DD. Received: {date}')
if not instructions:
raise ValueError(
'Don\'t deprecate things without conversion instructions! Specify '
'the `instructions` argument.')
def _call_location(outer=False):
"""Returns call location given level up from current call."""
# Two up: <_call_location>, <_call_location's caller>
# tf_inspect is not required here. Please ignore the lint warning by adding
# DISABLE_IMPORT_INSPECT_CHECK=TRUE to your cl description. Using it caused
# test timeouts (b/189384061).
f = inspect.currentframe().f_back.f_back
parent = f and f.f_back
if outer and parent is not None:
f = parent
return '{}:{}'.format(f.f_code.co_filename, f.f_lineno)
def _safe_eq(a, b):
if a is None or b is None:
return a is None and b is None
return a == b
def _wrap_decorator(wrapped_function, decorator_name):
"""Indicate that one function wraps another.
This decorator wraps a function using `tf_decorator.make_decorator`
so that doc generation scripts can pick up original function
signature.
It would be better to use @functools.wrap decorator, but it would
not update function signature to match wrapped function in Python 2.
Args:
wrapped_function: The function that decorated function wraps.
decorator_name: The name of the decorator.
Returns:
Function that accepts wrapper function as an argument and returns
`TFDecorator` instance.
"""
def wrapper(wrapper_func):
return tf_decorator.make_decorator(wrapped_function, wrapper_func,
decorator_name)
return wrapper
def deprecated_alias(deprecated_name, name, func_or_class, warn_once=True):
"""Deprecate a symbol in favor of a new name with identical semantics.
This function is meant to be used when defining a backwards-compatibility
alias for a symbol which has been moved. For example:
module1.py:
```python
class NewNameForClass: pass
```
module2.py:
```python
import module1
DeprecatedNameForClass = deprecated_alias(
deprecated_name='module2.DeprecatedNameForClass',
name='module1.NewNameForClass',
func_or_class=module1.NewNameForClass)
```
This function works for classes and functions.
For classes, it creates a new class which is functionally identical (it
inherits from the original, and overrides its constructor), but which prints
a deprecation warning when an instance is created. It also adds a deprecation
notice to the class' docstring.
For functions, it returns a function wrapped by `tf_decorator.make_decorator`.
That function prints a warning when used, and has a deprecation notice in its
docstring. This is more or less equivalent (the deprecation warning has
slightly different text) to writing:
```python
@deprecated
def deprecated_alias(original_args):
real_function(original_args)
```
Args:
deprecated_name: The name of the symbol that is being deprecated, to be used
in the warning message. This should be its fully qualified name to avoid
confusion.
name: The name of the symbol that is to be used instead of the deprecated
name. This should be a fully qualified name to avoid confusion.
func_or_class: The (non-deprecated) class or function for which a deprecated
alias should be created.
warn_once: If True (the default), only print a deprecation warning the first
time this function is used, or the class is instantiated.
Returns:
A wrapped version of `func_or_class` which prints a deprecation warning on
use and has a modified docstring.
"""
if tf_inspect.isclass(func_or_class):
# Make a new class with __init__ wrapped in a warning.
class _NewClass(func_or_class): # pylint: disable=missing-docstring
__doc__ = decorator_utils.add_notice_to_docstring(
func_or_class.__doc__,
'Please use %s instead.' % name,
'DEPRECATED CLASS',
'(deprecated)', [('THIS CLASS IS DEPRECATED. '
'It will be removed in a future version. ')],
notice_type='Deprecated')
__name__ = func_or_class.__name__
__module__ = _call_location(outer=True)
@_wrap_decorator(func_or_class.__init__, 'deprecated_alias')
def __init__(self, *args, **kwargs):
if hasattr(_NewClass.__init__, '__func__'):
# Python 2
_NewClass.__init__.__func__.__doc__ = func_or_class.__init__.__doc__
else:
# Python 3
_NewClass.__init__.__doc__ = func_or_class.__init__.__doc__
if _PRINT_DEPRECATION_WARNINGS:
# We're making the alias as we speak. The original may have other
# aliases, so we cannot use it to check for whether it's already been
# warned about.
if _NewClass.__init__ not in _PRINTED_WARNING:
if warn_once:
_PRINTED_WARNING[_NewClass.__init__] = True
_log_deprecation(
'From %s: The name %s is deprecated. Please use %s instead.\n',
_call_location(), deprecated_name, name)
super(_NewClass, self).__init__(*args, **kwargs)
return _NewClass
else:
decorator_utils.validate_callable(func_or_class, 'deprecated')
# Make a wrapper for the original
@functools.wraps(func_or_class)
def new_func(*args, **kwargs): # pylint: disable=missing-docstring
if _PRINT_DEPRECATION_WARNINGS:
# We're making the alias as we speak. The original may have other
# aliases, so we cannot use it to check for whether it's already been
# warned about.
if new_func not in _PRINTED_WARNING:
if warn_once:
_PRINTED_WARNING[new_func] = True
_log_deprecation(
'From %s: The name %s is deprecated. Please use %s instead.\n',
_call_location(), deprecated_name, name)
return func_or_class(*args, **kwargs)
return tf_decorator.make_decorator(
func_or_class, new_func, 'deprecated',
_add_deprecated_function_notice_to_docstring(
func_or_class.__doc__, None, 'Please use %s instead.' % name))
def deprecated_endpoints(*args):
"""Decorator for marking endpoints deprecated.
This decorator does not print deprecation messages.
TODO(annarev): eventually start printing deprecation warnings when
@deprecation_endpoints decorator is added.
Args:
*args: Deprecated endpoint names.
Returns:
A function that takes symbol as an argument and adds
_tf_deprecated_api_names to that symbol.
_tf_deprecated_api_names would be set to a list of deprecated
endpoint names for the symbol.
"""
def deprecated_wrapper(func):
# pylint: disable=protected-access
if '_tf_deprecated_api_names' in func.__dict__:
raise DeprecatedNamesAlreadySetError(
f'Cannot set deprecated names for {func.__name__} to {args}. '
'Deprecated names are already set to '
f'{func._tf_deprecated_api_names}.')
func._tf_deprecated_api_names = args
# pylint: disable=protected-access
return func
return deprecated_wrapper
def deprecated(date, instructions, warn_once=True):
"""Decorator for marking functions or methods deprecated.
This decorator logs a deprecation warning whenever the decorated function is
called. It has the following format:
<function> (from <module>) is deprecated and will be removed after <date>.
Instructions for updating:
<instructions>
If `date` is None, 'after <date>' is replaced with 'in a future version'.
<function> will include the class name if it is a method.
It also edits the docstring of the function: ' (deprecated)' is appended
to the first line of the docstring and a deprecation notice is prepended
to the rest of the docstring.
Args:
date: String or None. The date the function is scheduled to be removed. Must
be ISO 8601 (YYYY-MM-DD), or None.
instructions: String. Instructions on how to update code using the
deprecated function.
warn_once: Boolean. Set to `True` to warn only the first time the decorated
function is called. Otherwise, every call will log a warning.
Returns:
Decorated function or method.
Raises:
ValueError: If date is not None or in ISO 8601 format, or instructions are
empty.
"""
_validate_deprecation_args(date, instructions)
def deprecated_wrapper(func_or_class):
"""Deprecation wrapper."""
if isinstance(func_or_class, type):
# If a class is deprecated, you actually want to wrap the constructor.
cls = func_or_class
if cls.__new__ is object.__new__:
# If a class defaults to its parent's constructor, wrap that instead.
func = cls.__init__
constructor_name = '__init__'
decorators, _ = tf_decorator.unwrap(func)
for decorator in decorators:
if decorator.decorator_name == 'deprecated':
# If the parent is already deprecated, there's nothing to do.
return cls
else:
func = cls.__new__
constructor_name = '__new__'
else:
cls = None
constructor_name = None
func = func_or_class
decorator_utils.validate_callable(func, 'deprecated')
@_wrap_decorator(func, 'deprecated')
def new_func(*args, **kwargs): # pylint: disable=missing-docstring
if _PRINT_DEPRECATION_WARNINGS:
if func not in _PRINTED_WARNING and cls not in _PRINTED_WARNING:
if warn_once:
_PRINTED_WARNING[func] = True
if cls:
_PRINTED_WARNING[cls] = True
_log_deprecation(
'From %s: %s (from %s) is deprecated and will be removed %s.\n'
'Instructions for updating:\n%s', _call_location(),
decorator_utils.get_qualified_name(func),
func_or_class.__module__,
'in a future version' if date is None else ('after %s' % date),
instructions)
return func(*args, **kwargs)
doc_controls.set_deprecated(new_func)
new_func = tf_decorator.make_decorator(
func, new_func, 'deprecated',
_add_deprecated_function_notice_to_docstring(func.__doc__, date,
instructions))
new_func.__signature__ = inspect.signature(func)
if cls is None:
return new_func
else:
# Insert the wrapped function as the constructor
setattr(cls, constructor_name, new_func)
# And update the docstring of the class.
cls.__doc__ = _add_deprecated_function_notice_to_docstring(
cls.__doc__, date, instructions)
return cls
return deprecated_wrapper
DeprecatedArgSpec = collections.namedtuple(
'DeprecatedArgSpec', ['position', 'has_ok_value', 'ok_value'])
def deprecated_args(date, instructions, *deprecated_arg_names_or_tuples,
**kwargs):
"""Decorator for marking specific function arguments as deprecated.
This decorator logs a deprecation warning whenever the decorated function is
called with the deprecated argument. It has the following format:
Calling <function> (from <module>) with <arg> is deprecated and will be
removed after <date>. Instructions for updating:
<instructions>
If `date` is None, 'after <date>' is replaced with 'in a future version'.
<function> includes the class name if it is a method.
It also edits the docstring of the function: ' (deprecated arguments)' is
appended to the first line of the docstring and a deprecation notice is
prepended to the rest of the docstring.
Args:
date: String or None. The date the function is scheduled to be removed. Must
be ISO 8601 (YYYY-MM-DD), or None.
instructions: String. Instructions on how to update code using the
deprecated function.
*deprecated_arg_names_or_tuples: String or 2-Tuple (String, ok_val). The
string is the deprecated argument name. Optionally, an ok-value may be
provided. If the user provided argument equals this value, the warning is
suppressed.
**kwargs: If `warn_once=False` is passed, every call with a deprecated
argument will log a warning. The default behavior is to only warn the
first time the function is called with any given deprecated argument. All
other kwargs raise `ValueError`.
Returns:
Decorated function or method.
Raises:
ValueError: If date is not None or in ISO 8601 format, instructions are
empty, the deprecated arguments are not present in the function
signature, the second element of a deprecated_tuple is not a
list, or if a kwarg other than `warn_once` is passed.
"""
_validate_deprecation_args(date, instructions)
if not deprecated_arg_names_or_tuples:
raise ValueError('Specify which argument is deprecated.')
if kwargs and list(kwargs.keys()) != ['warn_once']:
kwargs.pop('warn_once', None)
raise ValueError(f'Illegal argument passed to deprecated_args: {kwargs}')
warn_once = kwargs.get('warn_once', True)
def _get_arg_names_to_ok_vals():
"""Returns a dict mapping arg_name to DeprecatedArgSpec w/o position."""
d = {}
for name_or_tuple in deprecated_arg_names_or_tuples:
if isinstance(name_or_tuple, tuple):
d[name_or_tuple[0]] = DeprecatedArgSpec(-1, True, name_or_tuple[1])
else:
d[name_or_tuple] = DeprecatedArgSpec(-1, False, None)
return d
def _get_deprecated_positional_arguments(names_to_ok_vals, arg_spec):
"""Builds a dictionary from deprecated arguments to their spec.
Returned dict is keyed by argument name.
Each value is a DeprecatedArgSpec with the following fields:
position: The zero-based argument position of the argument
within the signature. None if the argument isn't found in
the signature.
ok_values: Values of this argument for which warning will be
suppressed.
Args:
names_to_ok_vals: dict from string arg_name to a list of values, possibly
empty, which should not elicit a warning.
arg_spec: Output from tf_inspect.getfullargspec on the called function.
Returns:
Dictionary from arg_name to DeprecatedArgSpec.
"""
# Extract argument list
arg_space = arg_spec.args + arg_spec.kwonlyargs
arg_name_to_pos = {name: pos for pos, name in enumerate(arg_space)}
deprecated_positional_args = {}
for arg_name, spec in iter(names_to_ok_vals.items()):
if arg_name in arg_name_to_pos:
pos = arg_name_to_pos[arg_name]
deprecated_positional_args[arg_name] = DeprecatedArgSpec(
pos, spec.has_ok_value, spec.ok_value)
return deprecated_positional_args
deprecated_arg_names = _get_arg_names_to_ok_vals()
def deprecated_wrapper(func):
"""Deprecation decorator."""
decorator_utils.validate_callable(func, 'deprecated_args')
arg_spec = tf_inspect.getfullargspec(func)
deprecated_positions = _get_deprecated_positional_arguments(
deprecated_arg_names, arg_spec)
is_varargs_deprecated = arg_spec.varargs in deprecated_arg_names
is_kwargs_deprecated = arg_spec.varkw in deprecated_arg_names
if (len(deprecated_positions) + is_varargs_deprecated + is_kwargs_deprecated
!= len(deprecated_arg_names_or_tuples)):
known_args = (
arg_spec.args + arg_spec.kwonlyargs +
[arg_spec.varargs, arg_spec.varkw])
missing_args = [
arg_name for arg_name in deprecated_arg_names
if arg_name not in known_args
]
raise ValueError('The following deprecated arguments are not present '
f'in the function signature: {missing_args}. '
'Expected arguments from the following list: '
f'{known_args}.')
def _same_value(a, b):
"""A comparison operation that works for multiple object types.
Returns True for two empty lists, two numeric values with the
same value, etc.
Returns False for (pd.DataFrame, None), and other pairs which
should not be considered equivalent.
Args:
a: value one of the comparison.
b: value two of the comparison.
Returns:
A boolean indicating whether the two inputs are the same value
for the purposes of deprecation.
"""
if a is b:
return True
try:
equality = a == b
if isinstance(equality, bool):
return equality
except TypeError:
return False
return False
@functools.wraps(func)
def new_func(*args, **kwargs):
"""Deprecation wrapper."""
# TODO(apassos) figure out a way to have reasonable performance with
# deprecation warnings and eager mode.
if is_in_graph_mode.IS_IN_GRAPH_MODE() and _PRINT_DEPRECATION_WARNINGS:
invalid_args = []
named_args = tf_inspect.getcallargs(func, *args, **kwargs)
for arg_name, spec in iter(deprecated_positions.items()):
if (spec.position < len(args) and
not (spec.has_ok_value and
_same_value(named_args[arg_name], spec.ok_value))):
invalid_args.append(arg_name)
if is_varargs_deprecated and len(args) > len(arg_spec.args):
invalid_args.append(arg_spec.varargs)
if is_kwargs_deprecated and kwargs:
invalid_args.append(arg_spec.varkw)
for arg_name in deprecated_arg_names:
if (arg_name in kwargs and
not (deprecated_positions[arg_name].has_ok_value and
_same_value(named_args[arg_name],
deprecated_positions[arg_name].ok_value))):
invalid_args.append(arg_name)
for arg_name in invalid_args:
if (func, arg_name) not in _PRINTED_WARNING:
if warn_once:
_PRINTED_WARNING[(func, arg_name)] = True
_log_deprecation(
'From %s: calling %s (from %s) with %s is deprecated and will '
'be removed %s.\nInstructions for updating:\n%s',
_call_location(), decorator_utils.get_qualified_name(func),
func.__module__, arg_name,
'in a future version' if date is None else ('after %s' % date),
instructions)
return func(*args, **kwargs)
doc = _add_deprecated_arg_notice_to_docstring(
func.__doc__, date, instructions, sorted(deprecated_arg_names.keys()))
return tf_decorator.make_decorator(func, new_func, 'deprecated', doc)
return deprecated_wrapper
def deprecated_arg_values(date,
instructions,
warn_once=True,
**deprecated_kwargs):
"""Decorator for marking specific function argument values as deprecated.
This decorator logs a deprecation warning whenever the decorated function is
called with the deprecated argument values. It has the following format:
Calling <function> (from <module>) with <arg>=<value> is deprecated and
will be removed after <date>. Instructions for updating:
<instructions>
If `date` is None, 'after <date>' is replaced with 'in a future version'.
<function> will include the class name if it is a method.
It also edits the docstring of the function: ' (deprecated arguments)' is
appended to the first line of the docstring and a deprecation notice is
prepended to the rest of the docstring.
Args:
date: String or None. The date the function is scheduled to be removed. Must
be ISO 8601 (YYYY-MM-DD), or None
instructions: String. Instructions on how to update code using the
deprecated function.
warn_once: If `True`, warn only the first time this function is called with
deprecated argument values. Otherwise, every call (with a deprecated
argument value) will log a warning.
**deprecated_kwargs: The deprecated argument values.
Returns:
Decorated function or method.
Raises:
ValueError: If date is not None or in ISO 8601 format, or instructions are
empty.
"""
_validate_deprecation_args(date, instructions)
if not deprecated_kwargs:
raise ValueError('Specify which argument values are deprecated.')
def deprecated_wrapper(func):
"""Deprecation decorator."""
decorator_utils.validate_callable(func, 'deprecated_arg_values')
@functools.wraps(func)
def new_func(*args, **kwargs):
"""Deprecation wrapper."""
if _PRINT_DEPRECATION_WARNINGS:
named_args = tf_inspect.getcallargs(func, *args, **kwargs)
for arg_name, arg_value in deprecated_kwargs.items():
if arg_name in named_args and _safe_eq(named_args[arg_name],
arg_value):
if (func, arg_name) not in _PRINTED_WARNING:
if warn_once:
_PRINTED_WARNING[(func, arg_name)] = True
_log_deprecation(
'From %s: calling %s (from %s) with %s=%s is deprecated and '
'will be removed %s.\nInstructions for updating:\n%s',
_call_location(), decorator_utils.get_qualified_name(func),
func.__module__, arg_name, arg_value,
'in a future version' if date is None else
('after %s' % date), instructions)
return func(*args, **kwargs)
doc = _add_deprecated_arg_value_notice_to_docstring(func.__doc__, date,
instructions,
deprecated_kwargs)
return tf_decorator.make_decorator(func, new_func, 'deprecated', doc)
return deprecated_wrapper
def deprecated_argument_lookup(new_name, new_value, old_name, old_value):
"""Looks up deprecated argument name and ensures both are not used.
Args:
new_name: new name of argument
new_value: value of new argument (or None if not used)
old_name: old name of argument
old_value: value of old argument (or None if not used)
Returns:
The effective argument that should be used.
Raises:
ValueError: if new_value and old_value are both non-null
"""
if old_value is not None:
if new_value is not None:
raise ValueError(f"Cannot specify both '{old_name}' and '{new_name}'.")
return old_value
return new_value
def rewrite_argument_docstring(old_doc, old_argument, new_argument):
if old_doc is None:
return None
return old_doc.replace('`%s`' % old_argument,
'`%s`' % new_argument).replace('%s:' % old_argument,
'%s:' % new_argument)
@tf_contextlib.contextmanager
def silence():
"""Temporarily silence deprecation warnings."""
global _PRINT_DEPRECATION_WARNINGS
print_deprecation_warnings = _PRINT_DEPRECATION_WARNINGS
_PRINT_DEPRECATION_WARNINGS = False
yield
_PRINT_DEPRECATION_WARNINGS = print_deprecation_warnings
def deprecate_moved_module(deprecated_name, new_module, deletion_version):
"""Logs a warning when a module that has been moved to a new location is used.
Copy the following code into the old module:
```
import deprecation
import new_module
__getattr__ = deprecation.deprecate_moved_module(
__name__, new_module, "2.9") # adjust version number.
```
Args:
deprecated_name: Name of old module.
new_module: Module to replace the old module.
deletion_version: Version of TensorFlow in which the old module will be
removed.
Returns:
A function that logs a warning and returns the symbol from the new module.
Set this function as the module's `__getattr__`.
"""
def getter(name):
if getter not in _PRINTED_WARNING and _PRINT_DEPRECATION_WARNINGS:
_PRINTED_WARNING[getter] = True
_log_deprecation(
'Please fix your imports. Module %s has been moved to %s. The old '
'module will be deleted in version %s.', deprecated_name,
new_module.__name__, deletion_version)
return getattr(new_module, name)
return getter
| DeprecatedNamesAlreadySetError |
python | doocs__leetcode | solution/0800-0899/0869.Reordered Power of 2/Solution.py | {
"start": 0,
"end": 400
} | class ____:
def reorderedPowerOf2(self, n: int) -> bool:
def f(x: int) -> List[int]:
cnt = [0] * 10
while x:
x, v = divmod(x, 10)
cnt[v] += 1
return cnt
target = f(n)
i = 1
while i <= 10**9:
if f(i) == target:
return True
i <<= 1
return False
| Solution |
python | Netflix__metaflow | test/unit/inheritance/flows/comprehensive_linear_base.py | {
"start": 373,
"end": 831
} | class ____(BaseA):
"""Middle class with config and decorated step"""
config_b = Config("config_b", default_value={"multiplier": 3, "offset": 100})
@retry(times=2)
@step
def start(self):
"""Start step with retry decorator"""
print(f"Starting with alpha={self.alpha}, beta={self.beta}")
self.start_value = self.alpha + self.beta
print(f"Start value: {self.start_value}")
self.next(self.process)
| BaseB |
python | getsentry__sentry | src/sentry/backup/dependencies.py | {
"start": 3965,
"end": 7208
} | class ____:
"""What other models does this model depend on, and how?"""
# A "dangling" model is one that does not transitively contain a non-nullable `ForeignField`
# reference to at least one of the `RelocationRootModels` listed above.
#
# TODO(getsentry/team-ospo#190): A model may or may not be "dangling" in different
# `ExportScope`s - for example, a model in `RelocationScope.Organization` may have a single,
# non-nullable `ForeignField` reference to a root model in `RelocationScope.Config`. This would
# cause it to be dangling when we do an `ExportScope.Organization` export, but non-dangling if
# we do an `ExportScope.Global` export. HOWEVER, as best as I can tell, this situation does not
# actually exist today, so we can ignore this subtlety for now and just us a boolean here.
dangling: bool | None
foreign_keys: dict[str, ForeignField]
model: type[models.base.Model]
relocation_dependencies: set[type[models.base.Model]]
relocation_scope: RelocationScope | set[RelocationScope]
silos: list[SiloMode]
table_name: str
uniques: list[frozenset[str]]
def flatten(self) -> set[type[models.base.Model]]:
"""Returns a flat list of all related models, omitting the kind of relation they have."""
return {ff.model for ff in self.foreign_keys.values()}
def get_possible_relocation_scopes(self) -> set[RelocationScope]:
from sentry.db.models import BaseModel
if issubclass(self.model, BaseModel):
return self.model.get_possible_relocation_scopes()
return set()
def get_dependencies_for_relocation(self) -> set[type[models.base.Model]]:
return self.flatten().union(self.relocation_dependencies)
def get_uniques_without_foreign_keys(self) -> list[frozenset[str]]:
"""
Gets all unique sets (that is, either standalone fields that are marked `unique=True`, or
groups of fields listed in `Meta.unique_together`) for a model, as long as those sets do not
include any fields that are foreign keys. Note that the `id` field would be trivially
included in this list for every model, and is therefore ignored.
"""
out = []
for u in self.uniques:
# Exclude unique sets that are just {"id"}, since this is true for every model and not
# very useful when searching for potential collisions.
if u == {"id"}:
continue
has_foreign_key = False
for field in u:
if self.foreign_keys.get(field):
has_foreign_key = True
break
if not has_foreign_key:
out.append(u)
return out
def get_model_name(model: type[models.Model] | models.Model) -> NormalizedModelName:
return NormalizedModelName(f"{model._meta.app_label}.{model._meta.object_name}")
def get_model(model_name: NormalizedModelName) -> type[models.base.Model] | None:
"""
Given a standardized model name string, retrieve the matching Sentry model.
"""
for model in sorted_dependencies():
if get_model_name(model) == model_name:
return model
return None
| ModelRelations |
python | kamyu104__LeetCode-Solutions | Python/minimum-length-of-string-after-operations.py | {
"start": 48,
"end": 357
} | class ____(object):
def minimumLength(self, s):
"""
:type s: str
:rtype: int
"""
cnt = [0]*26
for x in s:
cnt[ord(x)-ord('a')] += 1
return sum(2-x%2 for x in cnt if x)
# Time: O(n)
# Space: O(26)
import collections
# freq table
| Solution |
python | mkdocstrings__mkdocstrings | src/mkdocstrings/_internal/handlers/rendering.py | {
"start": 8458,
"end": 9754
} | class ____(Treeprocessor):
"""Records the heading elements encountered in the document."""
name: str = "mkdocstrings_headings_list"
"""The name of the treeprocessor."""
regex: re.Pattern = re.compile(r"[Hh][1-6]")
"""The regex to match heading tags."""
headings: list[Element]
"""The list (the one passed in the initializer) that is used to record the heading elements (by appending to it)."""
def __init__(self, md: Markdown, headings: list[Element]):
super().__init__(md)
self.headings = headings
def run(self, root: Element) -> None:
"""Record all heading elements encountered in the document."""
permalink_class = self.md.treeprocessors["toc"].permalink_class # type: ignore[attr-defined]
for el in root.iter():
if self.regex.fullmatch(el.tag):
el = copy.copy(el) # noqa: PLW2901
# 'toc' extension's first pass (which we require to build heading stubs/ids) also edits the HTML.
# Undo the permalink edit so we can pass this heading to the outer pass of the 'toc' extension.
if len(el) > 0 and el[-1].get("class") == permalink_class:
del el[-1]
self.headings.append(el)
| _HeadingReportingTreeprocessor |
python | Lightning-AI__lightning | src/lightning/fabric/accelerators/accelerator.py | {
"start": 729,
"end": 2046
} | class ____(ABC):
"""The Accelerator base class.
An Accelerator is meant to deal with one type of hardware.
.. warning:: Writing your own accelerator is an :ref:`experimental <versioning:Experimental API>` feature.
"""
@abstractmethod
def setup_device(self, device: torch.device) -> None:
"""Create and prepare the device for the current process."""
@abstractmethod
def teardown(self) -> None:
"""Clean up any state created by the accelerator."""
@staticmethod
@abstractmethod
def parse_devices(devices: Any) -> Any:
"""Accelerator device parsing logic."""
@staticmethod
@abstractmethod
def get_parallel_devices(devices: Any) -> Any:
"""Gets parallel devices for the Accelerator."""
@staticmethod
@abstractmethod
def auto_device_count() -> int:
"""Get the device count when set to auto."""
@staticmethod
@abstractmethod
def is_available() -> bool:
"""Detect if the hardware is available."""
@staticmethod
@abstractmethod
def name() -> str:
"""The name of the accelerator."""
@classmethod
def register_accelerators(cls, accelerator_registry: _AcceleratorRegistry) -> None:
"""Register the accelerator with the registry."""
pass
| Accelerator |
python | huggingface__transformers | tests/tokenization/test_tokenization_fast.py | {
"start": 10634,
"end": 13447
} | class ____(unittest.TestCase):
def test_local_versioning(self):
tokenizer = AutoTokenizer.from_pretrained("google-bert/bert-base-cased")
json_tokenizer = json.loads(tokenizer._tokenizer.to_str())
json_tokenizer["model"]["vocab"]["huggingface"] = len(tokenizer)
with tempfile.TemporaryDirectory() as tmp_dir:
# Hack to save this in the tokenizer_config.json
tokenizer.init_kwargs["fast_tokenizer_files"] = ["tokenizer.4.0.0.json"]
tokenizer.save_pretrained(tmp_dir)
json.dump(json_tokenizer, open(os.path.join(tmp_dir, "tokenizer.4.0.0.json"), "w"))
# This should pick the new tokenizer file as the version of Transformers is > 4.0.0
new_tokenizer = AutoTokenizer.from_pretrained(tmp_dir)
self.assertEqual(len(new_tokenizer), len(tokenizer) + 1)
json_tokenizer = json.loads(new_tokenizer._tokenizer.to_str())
self.assertIn("huggingface", json_tokenizer["model"]["vocab"])
# Will need to be adjusted if we reach v42 and this test is still here.
# Should pick the old tokenizer file as the version of Transformers is < 4.0.0
shutil.move(os.path.join(tmp_dir, "tokenizer.4.0.0.json"), os.path.join(tmp_dir, "tokenizer.42.0.0.json"))
tokenizer.init_kwargs["fast_tokenizer_files"] = ["tokenizer.42.0.0.json"]
tokenizer.save_pretrained(tmp_dir)
new_tokenizer = AutoTokenizer.from_pretrained(tmp_dir)
self.assertEqual(len(new_tokenizer), len(tokenizer))
json_tokenizer = json.loads(new_tokenizer._tokenizer.to_str())
self.assertNotIn("huggingface", json_tokenizer["model"]["vocab"])
def test_repo_versioning(self):
# This repo has two tokenizer files, one for v4.0.0 and above with an added token, one for versions lower.
repo = "hf-internal-testing/test-two-tokenizers"
# This should pick the new tokenizer file as the version of Transformers is > 4.0.0
tokenizer = AutoTokenizer.from_pretrained(repo)
self.assertEqual(len(tokenizer), 28997)
json_tokenizer = json.loads(tokenizer._tokenizer.to_str())
self.assertIn("huggingface", json_tokenizer["model"]["vocab"])
# Testing an older version by monkey-patching the version in the module it's used.
import transformers as old_transformers
old_transformers.tokenization_utils_base.__version__ = "3.0.0"
old_tokenizer = old_transformers.models.auto.AutoTokenizer.from_pretrained(repo)
self.assertEqual(len(old_tokenizer), 28996)
json_tokenizer = json.loads(old_tokenizer._tokenizer.to_str())
self.assertNotIn("huggingface", json_tokenizer["model"]["vocab"])
@require_tokenizers
| TokenizerVersioningTest |
python | sanic-org__sanic | sanic/exceptions.py | {
"start": 15828,
"end": 16749
} | class ____(BadRequest):
"""400 Bad Request
Args:
message (Optional[Union[str, bytes]], optional): The message to be sent to the client. If `None`
then the HTTP status 'Bad Request' will be sent. Defaults to `None`.
quiet (Optional[bool], optional): When `True`, the error traceback will be suppressed
from the logs. Defaults to `None`.
context (Optional[Dict[str, Any]], optional): Additional mapping of key/value data that will be
sent to the client upon exception. Defaults to `None`.
extra (Optional[Dict[str, Any]], optional): Additional mapping of key/value data that will NOT be
sent to the client when in PRODUCTION mode. Defaults to `None`.
headers (Optional[Dict[str, Any]], optional): Additional headers that should be sent with the HTTP
response. Defaults to `None`.
""" # noqa: E501
| HeaderNotFound |
python | python__mypy | mypyc/test-data/fixtures/ir.py | {
"start": 7784,
"end": 8195
} | class ____(int):
def __init__(self, o: object = ...) -> None: ...
@overload
def __and__(self, n: bool) -> bool: ...
@overload
def __and__(self, n: int) -> int: ...
@overload
def __or__(self, n: bool) -> bool: ...
@overload
def __or__(self, n: int) -> int: ...
@overload
def __xor__(self, n: bool) -> bool: ...
@overload
def __xor__(self, n: int) -> int: ...
| bool |
python | getsentry__sentry | tests/sentry/integrations/slack/webhooks/actions/__init__.py | {
"start": 262,
"end": 10063
} | class ____(APITestCase):
def setUp(self) -> None:
super().setUp()
self.external_id = "slack:1"
self.integration = install_slack(self.organization)
self.idp = add_identity(self.integration, self.user, self.external_id)
self.trigger_id = "13345224609.738474920.8088930838d88f008e0"
self.response_url = (
"https://hooks.slack.com/actions/T47563693/6204672533/x7ZLaiVMoECAW50Gw1ZYAXEM"
)
self.project = self.create_project()
self.rule = self.create_project_rule(project=self.project)
@pytest.fixture(autouse=True)
def mock_webhook_send(self):
with patch(
"slack_sdk.webhook.WebhookClient.send",
return_value=WebhookResponse(
url="",
body='{"ok": True}',
headers={},
status_code=200,
),
) as self.mock_post:
yield
@pytest.fixture(autouse=True)
def mock_view_open(self):
with patch(
"slack_sdk.web.client.WebClient.views_open",
return_value=SlackResponse(
client=None,
http_verb="POST",
api_url="https://api.slack.com/methods/views.open",
req_args={},
data={"ok": True},
headers={},
status_code=200,
),
) as self._mock_view_open:
yield
@pytest.fixture(autouse=True)
def mock_view_update(self):
with patch(
"slack_sdk.web.client.WebClient.views_update",
return_value=SlackResponse(
client=None,
http_verb="POST",
api_url="https://api.slack.com/methods/views.update",
req_args={},
data={"ok": True},
headers={},
status_code=200,
),
) as self._mock_view_update:
yield
@patch(
"sentry.integrations.slack.requests.base.SlackRequest._check_signing_secret",
return_value=True,
)
def post_webhook(
self,
check_signing_secret_mock,
action_data=None,
type="event_callback",
data=None,
team_id="TXXXXXXX1",
callback_id=None,
slack_user=None,
original_message=None,
):
if slack_user is None:
slack_user = {"id": self.external_id, "domain": "example"}
if callback_id is None:
callback_id = orjson.dumps({"issue": self.group.id, "rule": self.rule.id}).decode()
if original_message is None:
original_message = {}
payload = {
"team": {"id": team_id, "domain": "example.com"},
"channel": {"id": "C065W1189", "domain": "forgotten-works"},
"user": slack_user,
"callback_id": callback_id,
"action_ts": "1458170917.164398",
"message_ts": "1458170866.000004",
"original_message": original_message,
"trigger_id": self.trigger_id,
"response_url": self.response_url,
"attachment_id": "1",
"actions": action_data or [],
"type": type,
}
if data:
payload.update(data)
payload = {"payload": orjson.dumps(payload).decode()}
return self.client.post("/extensions/slack/action/", data=payload)
@patch(
"sentry.integrations.slack.requests.base.SlackRequest._check_signing_secret",
return_value=True,
)
def post_webhook_block_kit(
self,
check_signing_secret_mock,
action_data=None,
type="block_actions",
data=None,
team_id="TXXXXXXX1",
slack_user=None,
original_message=None,
selected_option=None,
view=None,
private_metadata=None,
callback_id=None,
):
"""Respond as if we were Slack"""
if slack_user is None:
slack_user = {
"id": self.external_id,
"name": "colleen",
"username": "colleen",
"team_id": team_id,
}
if original_message is None:
original_message = {}
if callback_id is None:
callback_id = orjson.dumps({"issue": self.group.id, "rule": self.rule.id}).decode()
payload = {
"type": type,
"team": {
"id": team_id,
"domain": "hb-meowcraft",
},
"user": slack_user,
"api_app_id": "A058NGW5NDP",
"token": "6IM9MzJR4Ees5x4jkW29iKbj",
"trigger_id": self.trigger_id,
"view": view,
"response_urls": [],
"enterprise": None,
"is_enterprise_install": False,
"callback_id": callback_id,
}
if type == "view_submission":
view = {
"id": "V069MCJ1Y4X",
"team_id": team_id,
"type": "modal",
"blocks": [
{
"type": "section",
"block_id": "a6HD+",
"text": {"type": "mrkdwn", "text": "Resolve", "verbatim": False},
"accessory": {
"type": "static_select",
"action_id": "static_select-action",
"initial_option": {
"text": {
"type": "plain_text",
"text": "Immediately",
"emoji": True,
},
"value": "resolved",
},
"options": [
{
"text": {
"type": "plain_text",
"text": "Immediately",
"emoji": True,
},
"value": "resolved",
},
{
"text": {
"type": "plain_text",
"text": "In the next release",
"emoji": True,
},
"value": "resolved:inNextRelease",
},
{
"text": {
"type": "plain_text",
"text": "In the current release",
"emoji": True,
},
"value": "resolved:inCurrentRelease",
},
],
},
}
],
"private_metadata": private_metadata,
"state": {
"values": {
"a6HD+": {
"static_select-action": {
"type": "static_select",
"selected_option": {
"text": {
"type": "plain_text",
"text": "Immediately",
"emoji": True,
},
"value": selected_option,
},
}
}
}
},
"hash": "1702502121.CZNlXHKw",
"title": {"type": "plain_text", "text": "Resolve Issue", "emoji": True},
"clear_on_close": False,
"notify_on_close": False,
"close": {"type": "plain_text", "text": "Cancel", "emoji": True},
"submit": {"type": "plain_text", "text": "Resolve", "emoji": True},
"previous_view_id": None,
"root_view_id": "V069MCJ1Y4X",
"app_id": "A058NGW5NDP",
"external_id": "",
"app_installed_team_id": "TA17GH2QL",
"bot_id": "B058CDV2LKW",
}
payload["response_urls"] = []
payload["view"] = view
payload["type"] = type
elif type == "block_actions":
payload["container"] = {
"type": "message",
"message_ts": "1702424381.221719",
"channel_id": "C065W1189",
"is_ephemeral": False,
}
payload["channel"] = {
"id": "C065W1189",
"name": "general",
}
payload["message"] = original_message
payload["state"] = {
"values": {
"bXwil": {
"assign": {
"type": "static_select",
"selected_option": selected_option,
}
}
}
}
payload["response_url"] = self.response_url
payload["actions"] = action_data or []
payload["type"] = type
if data:
payload.update(data)
payload = {"payload": orjson.dumps(payload).decode()}
return self.client.post("/extensions/slack/action/", data=payload)
| BaseEventTest |
python | getsentry__sentry | tests/sentry/issues/endpoints/test_organization_group_index.py | {
"start": 173668,
"end": 180417
} | class ____(APITestCase, SnubaTestCase):
endpoint = "sentry-api-0-organization-group-index"
method = "delete"
def get_response(self, *args: Any, **kwargs: Any) -> Response:
if not args:
org = self.project.organization.slug
else:
org = args[0]
return super().get_response(org, **kwargs)
def assert_deleted_groups(self, groups: Sequence[Group]) -> None:
for group in groups:
assert not Group.objects.filter(id=group.id).exists()
assert not GroupHash.objects.filter(group_id=group.id).exists()
@patch("sentry.eventstream.snuba.SnubaEventStream._send")
@patch("sentry.eventstream.snuba.datetime")
def test_delete_by_id(self, mock_datetime: MagicMock, mock_send: MagicMock) -> None:
fixed_datetime = datetime.now()
mock_datetime.now.return_value = fixed_datetime
groups = self.create_n_groups_with_hashes(2, project=self.project)
group_ids = [group.id for group in groups]
self.login_as(user=self.user)
with self.tasks():
response = self.get_response(qs_params={"id": group_ids})
assert response.status_code == 204
# Extract transaction_id from the first call
transaction_id = mock_send.call_args_list[0][1]["extra_data"][0]["transaction_id"]
assert mock_send.call_args_list == [
call(
self.project.id,
"start_delete_groups",
extra_data=(
{
"transaction_id": transaction_id,
"project_id": self.project.id,
"group_ids": group_ids,
"datetime": json.datetime_to_str(fixed_datetime),
},
),
asynchronous=False,
),
call(
self.project.id,
"end_delete_groups",
extra_data=(
{
"transaction_id": transaction_id,
"project_id": self.project.id,
"group_ids": group_ids,
"datetime": json.datetime_to_str(fixed_datetime),
},
),
asynchronous=False,
),
]
for group in groups:
assert not Group.objects.filter(id=group.id).exists()
assert not GroupHash.objects.filter(group_id=group.id).exists()
@patch("sentry.eventstream.backend")
def test_delete_performance_issue_by_id(self, mock_eventstream: MagicMock) -> None:
eventstream_state = {"event_stream_state": str(uuid4())}
mock_eventstream.start_delete_groups = Mock(return_value=eventstream_state)
group1 = self.create_group(
status=GroupStatus.RESOLVED, type=PerformanceSlowDBQueryGroupType.type_id
)
group2 = self.create_group(
status=GroupStatus.UNRESOLVED, type=PerformanceSlowDBQueryGroupType.type_id
)
hashes = []
for g in group1, group2:
hash = uuid4().hex
hashes.append(hash)
GroupHash.objects.create(project=g.project, hash=hash, group=g)
self.login_as(user=self.user)
with self.tasks():
response = self.get_response(qs_params={"id": [group1.id, group2.id]})
assert response.status_code == 204
self.assert_deleted_groups([group1, group2])
def test_bulk_delete_for_many_projects_without_option(self) -> None:
NEW_CHUNK_SIZE = 2
project_2 = self.create_project(slug="baz", organization=self.organization)
groups_1 = self.create_n_groups_with_hashes(2, project=self.project)
groups_2 = self.create_n_groups_with_hashes(5, project=project_2)
with (
self.tasks(),
patch("sentry.api.helpers.group_index.delete.GROUP_CHUNK_SIZE", NEW_CHUNK_SIZE),
patch("sentry.deletions.tasks.groups.logger") as mock_logger,
patch(
"sentry.api.helpers.group_index.delete.uuid4",
side_effect=[self.get_mock_uuid("foo"), self.get_mock_uuid("bar")],
),
):
self.login_as(user=self.user)
response = self.get_success_response(qs_params={"query": ""})
assert response.status_code == 204
batch_1 = [g.id for g in groups_2[0:2]]
batch_2 = [g.id for g in groups_2[2:4]]
batch_3 = [g.id for g in groups_2[4:]]
assert batch_1 + batch_2 + batch_3 == [g.id for g in groups_2]
calls_by_project: dict[int, list[tuple[str, dict[str, Any]]]] = defaultdict(list)
for log_call in mock_logger.info.call_args_list:
calls_by_project[log_call[1]["extra"]["project_id"]].append(log_call)
assert len(calls_by_project) == 2
assert calls_by_project[self.project.id] == [
call(
"delete_groups.started",
extra={
"object_ids": [g.id for g in groups_1],
"project_id": self.project.id,
"transaction_id": "bar",
},
),
]
assert calls_by_project[project_2.id] == [
call(
"delete_groups.started",
extra={
"object_ids": batch_1,
"project_id": project_2.id,
"transaction_id": "foo",
},
),
call(
"delete_groups.started",
extra={
"object_ids": batch_2,
"project_id": project_2.id,
"transaction_id": "foo",
},
),
call(
"delete_groups.started",
extra={
"object_ids": batch_3,
"project_id": project_2.id,
"transaction_id": "foo",
},
),
]
self.assert_deleted_groups(groups_1 + groups_2)
def test_bulk_delete_performance_issues(self) -> None:
groups = self.create_n_groups_with_hashes(
20, self.project, PerformanceSlowDBQueryGroupType.type_id
)
self.login_as(user=self.user)
with self.tasks():
# if query is '' it defaults to is:unresolved
response = self.get_response(qs_params={"query": ""})
assert response.status_code == 204
self.assert_deleted_groups(groups)
| GroupDeleteTest |
python | fluentpython__example-code | 19-dyn-attr-prop/oscon/schedule1.py | {
"start": 587,
"end": 1099
} | class ____:
def __init__(self, **kwargs):
self.__dict__.update(kwargs) # <2>
def load_db(db):
raw_data = osconfeed.load() # <3>
warnings.warn('loading ' + DB_NAME)
for collection, rec_list in raw_data['Schedule'].items(): # <4>
record_type = collection[:-1] # <5>
for record in rec_list:
key = '{}.{}'.format(record_type, record['serial']) # <6>
record['serial'] = key # <7>
db[key] = Record(**record) # <8>
# END SCHEDULE1
| Record |
python | hyperopt__hyperopt | hyperopt/tests/test_base.py | {
"start": 7051,
"end": 8040
} | class ____(unittest.TestCase):
def SONify(self, foo):
rval = SONify(foo)
assert bson.BSON.encode(dict(a=rval))
return rval
def test_int(self):
assert self.SONify(1) == 1
def test_float(self):
assert self.SONify(1.1) == 1.1
def test_np_1d_int(self):
assert np.all(self.SONify(np.asarray([1, 2, 3])) == [1, 2, 3])
def test_np_1d_float(self):
assert np.all(self.SONify(np.asarray([1, 2, 3.4])) == [1, 2, 3.4])
def test_np_1d_str(self):
assert np.all(self.SONify(np.asarray(["a", "b", "ccc"])) == ["a", "b", "ccc"])
def test_np_2d_int(self):
assert np.all(self.SONify(np.asarray([[1, 2], [3, 4]])) == [[1, 2], [3, 4]])
def test_np_2d_float(self):
assert np.all(self.SONify(np.asarray([[1, 2], [3, 4.5]])) == [[1, 2], [3, 4.5]])
def test_nested_w_bool(self):
thing = dict(a=1, b="2", c=True, d=False, e=int(3), f=[1])
assert thing == SONify(thing)
| TestSONify |
python | sqlalchemy__sqlalchemy | lib/sqlalchemy/dialects/mssql/aioodbc.py | {
"start": 1642,
"end": 1809
} | class ____(MSExecutionContext_pyodbc):
def create_server_side_cursor(self):
return self._dbapi_connection.cursor(server_side=True)
| MSExecutionContext_aioodbc |
python | joerick__pyinstrument | examples/falcon_hello_file.py | {
"start": 292,
"end": 801
} | class ____:
filename = "pyinstrument-profile"
def __init__(self, interval=0.01):
self.profiler = Profiler(interval=interval)
def process_request(self, req, resp):
self.profiler.start()
def process_response(self, req, resp, resource, req_succeeded):
self.profiler.stop()
filename = f"{self.filename}-{datetime.now().strftime('%m%d%Y-%H%M%S')}.html"
with open(filename, "w") as file:
file.write(self.profiler.output_html())
| ProfilerMiddleware |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.