language stringclasses 1 value | repo stringclasses 346 values | path stringlengths 6 201 | class_span dict | source stringlengths 21 2.38M | target stringlengths 1 96 |
|---|---|---|---|---|---|
python | tiangolo__fastapi | fastapi/openapi/models.py | {
"start": 10978,
"end": 11684
} | class ____(BaseModelWithConfig):
tags: Optional[List[str]] = None
summary: Optional[str] = None
description: Optional[str] = None
externalDocs: Optional[ExternalDocumentation] = None
operationId: Optional[str] = None
parameters: Optional[List[Union[Parameter, Reference]]] = None
requestBody: Optional[Union[RequestBody, Reference]] = None
# Using Any for Specification Extensions
responses: Optional[Dict[str, Union[Response, Any]]] = None
callbacks: Optional[Dict[str, Union[Dict[str, "PathItem"], Reference]]] = None
deprecated: Optional[bool] = None
security: Optional[List[Dict[str, List[str]]]] = None
servers: Optional[List[Server]] = None
| Operation |
python | keras-team__keras | keras/src/utils/tracking.py | {
"start": 9037,
"end": 10761
} | class ____(set):
def __init__(self, values=None, tracker=None):
self.tracker = tracker
if tracker and values:
values = {tracker.track(v) for v in values}
super().__init__(values or [])
def add(self, value):
if self.tracker:
self.tracker.track(value)
super().add(value)
def update(self, values):
if self.tracker:
values = [self.tracker.track(v) for v in values]
super().update(values)
def remove(self, value):
if self.tracker:
self.tracker.untrack(value)
super().remove(value)
def pop(self):
value = super().pop()
if self.tracker:
self.tracker.untrack(value)
return value
def clear(self):
if self.tracker:
for value in self:
self.tracker.untrack(value)
super().clear()
def tree_flatten(self):
# For optree / dmtree
return (self, None)
@classmethod
def tree_unflatten(cls, metadata, children):
# For optree / dmtree
return cls(children)
def torchtree_flatten(self):
# For torchtree
# Returns (values, metadata)
return (self, None)
@classmethod
def torchtree_unflatten(cls, children, metadata):
# For torchtree
# Requires (values, metadata)
return cls(children)
def torchtree_flatten_with_keys(self):
# For torchtree
# Returns (children, metadata)
from torch.utils import _pytree as torch_tree
values, context = self.torchtree_flatten()
return [
(torch_tree.SequenceKey(i), v) for i, v in enumerate(values)
], context
| TrackedSet |
python | huggingface__transformers | src/transformers/models/parakeet/processing_parakeet.py | {
"start": 932,
"end": 1321
} | class ____(ProcessingKwargs, total=False):
_defaults = {
"audio_kwargs": {
"sampling_rate": 16000,
"padding": "longest",
},
"text_kwargs": {
"padding": True,
"padding_side": "right",
"add_special_tokens": False,
},
"common_kwargs": {"return_tensors": "pt"},
}
| ParakeetProcessorKwargs |
python | PyCQA__pylint | tests/functional/n/not_context_manager.py | {
"start": 1460,
"end": 1773
} | class ____:
@property
def ctx(self):
return dec()
@property
def not_ctx(self):
return 42
lala = Property()
with lala.ctx:
# Don't emit when the context manager is the
# result of accessing a property.
pass
with lala.not_ctx: # [not-context-manager]
pass
| Property |
python | aio-libs__aiohttp | aiohttp/client_exceptions.py | {
"start": 3213,
"end": 3308
} | class ____(ClientResponseError):
"""Client was redirected too many times."""
| TooManyRedirects |
python | huggingface__transformers | utils/test_module/custom_image_processing.py | {
"start": 46,
"end": 103
} | class ____(CLIPImageProcessor):
pass
| CustomImageProcessor |
python | ApeWorX__ape | src/ape/types/units.py | {
"start": 383,
"end": 2518
} | class ____(int):
"""
An integer you can compare with currency-value
strings, such as ``"1 ether"``.
"""
def __eq__(self, other: Any) -> bool:
if isinstance(other, int):
return super().__eq__(other)
elif isinstance(other, str):
try:
other_value = ManagerAccessMixin.conversion_manager.convert(other, int)
except ConversionError:
# Not a currency-value, it's ok.
return False
return super().__eq__(other_value)
# Try from the other end, if hasn't already.
return NotImplemented
def __hash__(self) -> int:
return hash(int(self))
@classmethod
def __get_pydantic_core_schema__(cls, value, handler=None) -> CoreSchema:
return no_info_plain_validator_function(
cls._validate,
serialization=plain_serializer_function_ser_schema(
cls._serialize,
info_arg=False,
return_schema=int_schema(),
),
)
@staticmethod
def _validate(value: Any, info: Optional[ValidationInfo] = None) -> "CurrencyValueComparable":
# NOTE: For some reason, for this to work, it has to happen
# in an "after" validator, or else it always only `int` type on the model.
if value is None:
# Will fail if not optional.
# Type ignore because this is an hacky and unlikely situation.
return None # type: ignore
elif isinstance(value, str) and " " in value:
return ManagerAccessMixin.conversion_manager.convert(value, int)
# For models annotating with this type, we validate all integers into it.
return CurrencyValueComparable(value)
@staticmethod
def _serialize(value):
return int(value)
CurrencyValueComparable.__name__ = int.__name__
CurrencyValue: "TypeAlias" = CurrencyValueComparable
"""
An alias to :class:`~ape.types.CurrencyValueComparable` for
situations when you know for sure the type is a currency-value
(and not just comparable to one).
"""
| CurrencyValueComparable |
python | dagster-io__dagster | python_modules/dagster/dagster_tests/daemon_tests/test_freshness_daemon.py | {
"start": 1909,
"end": 16544
} | class ____(ABC):
@abstractmethod
@pytest.fixture
def daemon_instance(self) -> Generator[dg.DagsterInstance, None, None]: ...
@abstractmethod
@pytest.fixture
def freshness_daemon(self) -> FreshnessDaemon: ...
def _assert_freshness_state(
self,
instance: DagsterInstance,
asset_key: AssetKey,
expected_state: FreshnessState,
):
"""Helper method to assert the freshness state of an asset."""
freshness_state_record = instance.get_freshness_state_records([asset_key]).get(asset_key)
assert freshness_state_record is not None
assert freshness_state_record.freshness_state == expected_state
def _assert_freshness_states(
self,
instance: DagsterInstance,
asset_keys: list[str],
expected_state: FreshnessState,
):
"""Helper method to assert the freshness state of multiple assets."""
for asset_key in asset_keys:
self._assert_freshness_state(instance, dg.AssetKey(asset_key), expected_state)
def _materialize_assets(
self,
instance: DagsterInstance,
asset_keys: list[str],
materialize_time: datetime.datetime,
):
"""Helper method to materialize multiple assets at a given time."""
for asset_key in asset_keys:
store_mat(instance, dg.AssetKey(asset_key), materialize_time)
def test_iteration_no_freshness_policies(
self,
daemon_instance: DagsterInstance,
freshness_daemon: FreshnessDaemon,
):
"""Test that freshness daemon is no-op for assets with no freshness policies."""
def create_defs() -> dg.Definitions:
@dg.asset
def asset_1():
return 1
@dg.asset
def asset_2():
return 2
defs = dg.Definitions(assets=[asset_1, asset_2])
return defs
with setup_remote_repo(instance=daemon_instance, fn=create_defs) as workspace_context:
start_time = datetime.datetime.now(datetime.timezone.utc)
frozen_time = start_time
with freeze_time(frozen_time):
run_iter(freshness_daemon, workspace_context)
self._assert_freshness_state(
daemon_instance, dg.AssetKey("asset_1"), FreshnessState.NOT_APPLICABLE
)
self._assert_freshness_state(
daemon_instance, dg.AssetKey("asset_2"), FreshnessState.NOT_APPLICABLE
)
def test_iteration_single_freshness_policy(
self,
daemon_instance: DagsterInstance,
freshness_daemon: FreshnessDaemon,
):
"""Test that freshness daemon evaluates freshness for a single asset."""
def create_defs() -> dg.Definitions:
@dg.asset(
freshness_policy=FreshnessPolicy.time_window(
fail_window=datetime.timedelta(hours=24),
warn_window=datetime.timedelta(hours=12),
)
)
def asset_with_policy():
return 1
@dg.asset
def asset_without_policy():
return 1
defs = dg.Definitions(assets=[asset_with_policy, asset_without_policy])
return defs
with setup_remote_repo(instance=daemon_instance, fn=create_defs) as workspace_context:
# We're going to iterate through the daemon as time progresses.
# At each iteration, we should see the freshness state for asset_with_policy transition
# UNKNOWN -> PASS (when it materializes) -> WARN -> FAIL
# The freshness state for asset_without_policy should always be None, since it doesn't have a freshness policy.
start_time = datetime.datetime(2024, 1, 1, tzinfo=datetime.timezone.utc)
frozen_time = start_time
with freeze_time(frozen_time):
run_iter(freshness_daemon, workspace_context)
self._assert_freshness_state(
daemon_instance, dg.AssetKey("asset_with_policy"), FreshnessState.UNKNOWN
)
self._assert_freshness_state(
daemon_instance,
dg.AssetKey("asset_without_policy"),
FreshnessState.NOT_APPLICABLE,
)
materialize_time = frozen_time + datetime.timedelta(seconds=1)
with freeze_time(materialize_time):
store_mat(daemon_instance, dg.AssetKey("asset_with_policy"), materialize_time)
run_iter(freshness_daemon, workspace_context)
self._assert_freshness_state(
daemon_instance, dg.AssetKey("asset_with_policy"), FreshnessState.PASS
)
# Advance 12 hours and 1 second from start -> WARN
with freeze_time(materialize_time + datetime.timedelta(hours=12, seconds=1)):
run_iter(freshness_daemon, workspace_context)
self._assert_freshness_state(
daemon_instance, dg.AssetKey("asset_with_policy"), FreshnessState.WARN
)
# Advance 24 hours and 1 second from start -> FAIL
with freeze_time(materialize_time + datetime.timedelta(hours=24, seconds=1)):
run_iter(freshness_daemon, workspace_context)
self._assert_freshness_state(
daemon_instance, dg.AssetKey("asset_with_policy"), FreshnessState.FAIL
)
def test_iteration_multiple_freshness_policies(
self,
daemon_instance: DagsterInstance,
freshness_daemon: FreshnessDaemon,
):
"""Test that freshness daemon evaluates freshness for multiple assets with different freshness policies."""
def create_defs() -> dg.Definitions:
@dg.asset(
freshness_policy=FreshnessPolicy.time_window(
fail_window=datetime.timedelta(minutes=60),
warn_window=datetime.timedelta(minutes=30),
)
)
def asset_1():
return 1
@dg.asset(
freshness_policy=FreshnessPolicy.time_window(
fail_window=datetime.timedelta(minutes=120),
warn_window=datetime.timedelta(minutes=60),
)
)
def asset_2():
return 2
@dg.asset(
freshness_policy=FreshnessPolicy.time_window(
fail_window=datetime.timedelta(minutes=30),
warn_window=datetime.timedelta(minutes=15),
)
)
def asset_3():
return 3
defs = dg.Definitions(assets=[asset_1, asset_2, asset_3])
return defs
with setup_remote_repo(instance=daemon_instance, fn=create_defs) as workspace_context:
# We'll test three assets with different freshness policies:
# asset_1: 30min warn, 60min fail
# asset_2: 60min warn, 120min fail
# asset_3: 15min warn, 30min fail
asset_keys = ["asset_1", "asset_2", "asset_3"]
start_time = datetime.datetime(2024, 1, 1, tzinfo=datetime.timezone.utc)
frozen_time = start_time
with freeze_time(frozen_time):
run_iter(freshness_daemon, workspace_context)
self._assert_freshness_states(daemon_instance, asset_keys, FreshnessState.UNKNOWN)
materialize_time = frozen_time + datetime.timedelta(seconds=1)
with freeze_time(materialize_time):
self._materialize_assets(daemon_instance, asset_keys, materialize_time)
run_iter(freshness_daemon, workspace_context)
self._assert_freshness_states(daemon_instance, asset_keys, FreshnessState.PASS)
# Advance 20 minutes - asset_3 should be WARN, others still PASS
with freeze_time(materialize_time + datetime.timedelta(minutes=20)):
run_iter(freshness_daemon, workspace_context)
self._assert_freshness_states(
daemon_instance, ["asset_1", "asset_2"], FreshnessState.PASS
)
self._assert_freshness_state(
daemon_instance, dg.AssetKey("asset_3"), FreshnessState.WARN
)
# Advance 35 minutes - asset_1 should be WARN, asset_3 should be FAIL, asset_2 still PASS
with freeze_time(materialize_time + datetime.timedelta(minutes=35)):
run_iter(freshness_daemon, workspace_context)
self._assert_freshness_state(
daemon_instance, dg.AssetKey("asset_1"), FreshnessState.WARN
)
self._assert_freshness_state(
daemon_instance, dg.AssetKey("asset_2"), FreshnessState.PASS
)
self._assert_freshness_state(
daemon_instance, dg.AssetKey("asset_3"), FreshnessState.FAIL
)
# Advance 65 minutes - asset_1 should be FAIL, asset_2 should be WARN, asset_3 still FAIL
with freeze_time(materialize_time + datetime.timedelta(minutes=65)):
run_iter(freshness_daemon, workspace_context)
self._assert_freshness_state(
daemon_instance, dg.AssetKey("asset_1"), FreshnessState.FAIL
)
self._assert_freshness_state(
daemon_instance, dg.AssetKey("asset_2"), FreshnessState.WARN
)
self._assert_freshness_state(
daemon_instance, dg.AssetKey("asset_3"), FreshnessState.FAIL
)
# Advance 125 minutes - all assets should be FAIL
with freeze_time(materialize_time + datetime.timedelta(minutes=125)):
run_iter(freshness_daemon, workspace_context)
self._assert_freshness_states(daemon_instance, asset_keys, FreshnessState.FAIL)
def test_iteration_multiple_materializations(
self,
daemon_instance: DagsterInstance,
freshness_daemon: FreshnessDaemon,
):
"""Test that freshness daemon correctly evaluates freshness using the most recent materialization."""
def create_defs() -> dg.Definitions:
@dg.asset(
freshness_policy=FreshnessPolicy.time_window(
fail_window=datetime.timedelta(minutes=60),
warn_window=datetime.timedelta(minutes=30),
)
)
def asset_with_multiple_materializations():
return 1
defs = dg.Definitions(assets=[asset_with_multiple_materializations])
return defs
with setup_remote_repo(instance=daemon_instance, fn=create_defs) as workspace_context:
# We'll test an asset that gets materialized multiple times
# and verify that the daemon uses the most recent materialization
start_time = datetime.datetime(2024, 1, 1, tzinfo=datetime.timezone.utc)
frozen_time = start_time
with freeze_time(frozen_time):
run_iter(freshness_daemon, workspace_context)
self._assert_freshness_state(
daemon_instance,
dg.AssetKey("asset_with_multiple_materializations"),
FreshnessState.UNKNOWN,
)
first_materialize_time = frozen_time + datetime.timedelta(seconds=1)
with freeze_time(first_materialize_time):
store_mat(
daemon_instance,
dg.AssetKey("asset_with_multiple_materializations"),
first_materialize_time,
)
run_iter(freshness_daemon, workspace_context)
self._assert_freshness_state(
daemon_instance,
dg.AssetKey("asset_with_multiple_materializations"),
FreshnessState.PASS,
)
# Advance 40 minutes - should be WARN
with freeze_time(first_materialize_time + datetime.timedelta(minutes=40)):
run_iter(freshness_daemon, workspace_context)
self._assert_freshness_state(
daemon_instance,
dg.AssetKey("asset_with_multiple_materializations"),
FreshnessState.WARN,
)
# Second materialization at 45 minutes
second_materialize_time = first_materialize_time + datetime.timedelta(minutes=45)
with freeze_time(second_materialize_time):
store_mat(
daemon_instance,
dg.AssetKey("asset_with_multiple_materializations"),
second_materialize_time,
)
run_iter(freshness_daemon, workspace_context)
self._assert_freshness_state(
daemon_instance,
dg.AssetKey("asset_with_multiple_materializations"),
FreshnessState.PASS,
)
# Advance 20 minutes from second materialization - should still be PASS
with freeze_time(second_materialize_time + datetime.timedelta(minutes=20)):
run_iter(freshness_daemon, workspace_context)
self._assert_freshness_state(
daemon_instance,
dg.AssetKey("asset_with_multiple_materializations"),
FreshnessState.PASS,
)
# Advance 35 minutes from second materialization - should be WARN
with freeze_time(second_materialize_time + datetime.timedelta(minutes=35)):
run_iter(freshness_daemon, workspace_context)
self._assert_freshness_state(
daemon_instance,
dg.AssetKey("asset_with_multiple_materializations"),
FreshnessState.WARN,
)
# Advance 65 minutes from second materialization - should be FAIL
with freeze_time(second_materialize_time + datetime.timedelta(minutes=65)):
run_iter(freshness_daemon, workspace_context)
self._assert_freshness_state(
daemon_instance,
dg.AssetKey("asset_with_multiple_materializations"),
FreshnessState.FAIL,
)
| FreshnessDaemonTests |
python | Pylons__pyramid | src/pyramid/config/security.py | {
"start": 524,
"end": 13796
} | class ____:
def add_default_security(self):
self.set_csrf_storage_policy(LegacySessionCSRFStoragePolicy())
@action_method
def set_security_policy(self, policy):
"""Override the :app:`Pyramid` :term:`security policy` in the current
configuration. The ``policy`` argument must be an instance
of a security policy or a :term:`dotted Python name`
that points at an instance of a security policy.
.. note::
Using the ``security_policy`` argument to the
:class:`pyramid.config.Configurator` constructor can be used to
achieve the same purpose.
"""
def register():
self.registry.registerUtility(policy, ISecurityPolicy)
policy = self.maybe_dotted(policy)
intr = self.introspectable(
'security policy',
None,
self.object_description(policy),
'security policy',
)
intr['policy'] = policy
self.action(
ISecurityPolicy,
register,
order=PHASE2_CONFIG,
introspectables=(intr,),
)
@action_method
def set_authentication_policy(self, policy):
"""
.. deprecated:: 2.0
Authentication policies have been replaced by security policies.
See :ref:`upgrading_auth_20` for more information.
Override the :app:`Pyramid` :term:`authentication policy` in the
current configuration. The ``policy`` argument must be an instance
of an authentication policy or a :term:`dotted Python name`
that points at an instance of an authentication policy.
.. note::
Using the ``authentication_policy`` argument to the
:class:`pyramid.config.Configurator` constructor can be used to
achieve the same purpose.
"""
warnings.warn(
'Authentication and authorization policies have been deprecated '
'in favor of security policies. See "Upgrading '
'Authentication/Authorization" in "What\'s New in Pyramid 2.0" '
'of the documentation for more information.',
DeprecationWarning,
stacklevel=3,
)
def register():
self.registry.registerUtility(policy, IAuthenticationPolicy)
if self.registry.queryUtility(IAuthorizationPolicy) is None:
raise ConfigurationError(
'Cannot configure an authentication policy without '
'also configuring an authorization policy '
'(use the set_authorization_policy method)'
)
if self.registry.queryUtility(ISecurityPolicy) is not None:
raise ConfigurationError(
'Cannot configure an authentication and authorization'
'policy with a configured security policy.'
)
security_policy = LegacySecurityPolicy()
self.registry.registerUtility(security_policy, ISecurityPolicy)
policy = self.maybe_dotted(policy)
intr = self.introspectable(
'authentication policy',
None,
self.object_description(policy),
'authentication policy',
)
intr['policy'] = policy
# authentication policy used by view config (phase 3)
self.action(
IAuthenticationPolicy,
register,
order=PHASE2_CONFIG,
introspectables=(intr,),
)
@action_method
def set_authorization_policy(self, policy):
"""
.. deprecated:: 2.0
Authentication policies have been replaced by security policies.
See :ref:`upgrading_auth_20` for more information.
Override the :app:`Pyramid` :term:`authorization policy` in the
current configuration. The ``policy`` argument must be an instance
of an authorization policy or a :term:`dotted Python name` that points
at an instance of an authorization policy.
.. note::
Using the ``authorization_policy`` argument to the
:class:`pyramid.config.Configurator` constructor can be used to
achieve the same purpose.
"""
warnings.warn(
'Authentication and authorization policies have been deprecated '
'in favor of security policies. See "Upgrading '
'Authentication/Authorization" in "What\'s New in Pyramid 2.0" '
'of the documentation for more information.',
DeprecationWarning,
stacklevel=3,
)
def register():
self.registry.registerUtility(policy, IAuthorizationPolicy)
def ensure():
if self.autocommit:
return
if self.registry.queryUtility(IAuthenticationPolicy) is None:
raise ConfigurationError(
'Cannot configure an authorization policy without '
'also configuring an authentication policy '
'(use the set_authorization_policy method)'
)
policy = self.maybe_dotted(policy)
intr = self.introspectable(
'authorization policy',
None,
self.object_description(policy),
'authorization policy',
)
intr['policy'] = policy
# authorization policy used by view config (phase 3) and
# authentication policy (phase 2)
self.action(
IAuthorizationPolicy,
register,
order=PHASE1_CONFIG,
introspectables=(intr,),
)
self.action(None, ensure)
@action_method
def set_default_permission(self, permission):
"""
Set the default permission to be used by all subsequent
:term:`view configuration` registrations. ``permission``
should be a :term:`permission` string to be used as the
default permission. An example of a permission
string:``'view'``. Adding a default permission makes it
unnecessary to protect each view configuration with an
explicit permission, unless your application policy requires
some exception for a particular view.
If a default permission is *not* set, views represented by
view configuration registrations which do not explicitly
declare a permission will be executable by entirely anonymous
users (any authorization policy is ignored).
Later calls to this method override will conflict with earlier calls;
there can be only one default permission active at a time within an
application.
.. warning::
If a default permission is in effect, view configurations meant to
create a truly anonymously accessible view (even :term:`exception
view` views) *must* use the value of the permission importable as
:data:`pyramid.security.NO_PERMISSION_REQUIRED`. When this string
is used as the ``permission`` for a view configuration, the default
permission is ignored, and the view is registered, making it
available to all callers regardless of their credentials.
.. seealso::
See also :ref:`setting_a_default_permission`.
.. note::
Using the ``default_permission`` argument to the
:class:`pyramid.config.Configurator` constructor can be used to
achieve the same purpose.
"""
def register():
self.registry.registerUtility(permission, IDefaultPermission)
intr = self.introspectable(
'default permission', None, permission, 'default permission'
)
intr['value'] = permission
perm_intr = self.introspectable(
'permissions', permission, permission, 'permission'
)
perm_intr['value'] = permission
# default permission used during view registration (phase 3)
self.action(
IDefaultPermission,
register,
order=PHASE1_CONFIG,
introspectables=(intr, perm_intr),
)
def add_permission(self, permission_name):
"""
A configurator directive which registers a free-standing
permission without associating it with a view callable. This can be
used so that the permission shows up in the introspectable data under
the ``permissions`` category (permissions mentioned via ``add_view``
already end up in there). For example::
config = Configurator()
config.add_permission('view')
"""
intr = self.introspectable(
'permissions', permission_name, permission_name, 'permission'
)
intr['value'] = permission_name
self.action(None, introspectables=(intr,))
@action_method
def set_default_csrf_options(
self,
require_csrf=True,
token='csrf_token',
header='X-CSRF-Token',
safe_methods=('GET', 'HEAD', 'OPTIONS', 'TRACE'),
check_origin=True,
allow_no_origin=False,
callback=None,
):
"""
Set the default CSRF options used by subsequent view registrations.
``require_csrf`` controls whether CSRF checks will be automatically
enabled on each view in the application. This value is used as the
fallback when ``require_csrf`` is left at the default of ``None`` on
:meth:`pyramid.config.Configurator.add_view`.
``token`` is the name of the CSRF token used in the body of the
request, accessed via ``request.POST[token]``. Default: ``csrf_token``.
``header`` is the name of the header containing the CSRF token,
accessed via ``request.headers[header]``. Default: ``X-CSRF-Token``.
If ``token`` or ``header`` are set to ``None`` they will not be used
for checking CSRF tokens.
``safe_methods`` is an iterable of HTTP methods which are expected to
not contain side-effects as defined by RFC2616. Safe methods will
never be automatically checked for CSRF tokens.
Default: ``('GET', 'HEAD', 'OPTIONS', TRACE')``.
``check_origin`` is a boolean. If ``False``, the ``Origin`` and
``Referer`` headers will not be validated as part of automated
CSRF checks.
``allow_no_origin`` is a boolean. If ``True``, a request lacking both
an ``Origin`` and ``Referer`` header will pass the CSRF check. This
option has no effect if ``check_origin`` is ``False``.
If ``callback`` is set, it must be a callable accepting ``(request)``
and returning ``True`` if the request should be checked for a valid
CSRF token. This callback allows an application to support
alternate authentication methods that do not rely on cookies which
are not subject to CSRF attacks. For example, if a request is
authenticated using the ``Authorization`` header instead of a cookie,
this may return ``False`` for that request so that clients do not
need to send the ``X-CSRF-Token`` header. The callback is only tested
for non-safe methods as defined by ``safe_methods``.
.. versionadded:: 1.7
.. versionchanged:: 1.8
Added the ``callback`` option.
.. versionchanged:: 2.0
Added the ``allow_no_origin`` and ``check_origin`` options.
"""
options = DefaultCSRFOptions(
require_csrf=require_csrf,
token=token,
header=header,
safe_methods=safe_methods,
check_origin=check_origin,
allow_no_origin=allow_no_origin,
callback=callback,
)
def register():
self.registry.registerUtility(options, IDefaultCSRFOptions)
intr = self.introspectable(
'default csrf view options',
None,
options,
'default csrf view options',
)
intr['require_csrf'] = require_csrf
intr['token'] = token
intr['header'] = header
intr['safe_methods'] = as_sorted_tuple(safe_methods)
intr['check_origin'] = allow_no_origin
intr['allow_no_origin'] = check_origin
intr['callback'] = callback
self.action(
IDefaultCSRFOptions,
register,
order=PHASE1_CONFIG,
introspectables=(intr,),
)
@action_method
def set_csrf_storage_policy(self, policy):
"""
Set the :term:`CSRF storage policy` used by subsequent view
registrations.
``policy`` is a class that implements the
:meth:`pyramid.interfaces.ICSRFStoragePolicy` interface and defines
how to generate and persist CSRF tokens.
"""
def register():
self.registry.registerUtility(policy, ICSRFStoragePolicy)
intr = self.introspectable(
'csrf storage policy', None, policy, 'csrf storage policy'
)
intr['policy'] = policy
self.action(ICSRFStoragePolicy, register, introspectables=(intr,))
@implementer(IDefaultCSRFOptions)
| SecurityConfiguratorMixin |
python | davidhalter__jedi | jedi/plugins/stdlib.py | {
"start": 18258,
"end": 18742
} | class ____(SignatureWrapper):
def __init__(self, wrapped_signature, skipped_arg_count, skipped_arg_set):
super().__init__(wrapped_signature)
self._skipped_arg_count = skipped_arg_count
self._skipped_arg_set = skipped_arg_set
def get_param_names(self, resolve_stars=False):
names = self._wrapped_signature.get_param_names()[self._skipped_arg_count:]
return [n for n in names if n.string_name not in self._skipped_arg_set]
| PartialSignature |
python | django__django | tests/template_tests/filter_tests/test_get_digit.py | {
"start": 94,
"end": 476
} | class ____(SimpleTestCase):
def test_values(self):
self.assertEqual(get_digit(123, 1), 3)
self.assertEqual(get_digit(123, 2), 2)
self.assertEqual(get_digit(123, 3), 1)
self.assertEqual(get_digit(123, 4), 0)
self.assertEqual(get_digit(123, 0), 123)
def test_string(self):
self.assertEqual(get_digit("xyz", 0), "xyz")
| FunctionTests |
python | pytorch__pytorch | test/torch_np/numpy_tests/linalg/test_linalg.py | {
"start": 57474,
"end": 59396
} | class ____(TestCase):
def test_matrix_rank(self):
# Full rank matrix
assert_equal(4, matrix_rank(np.eye(4)))
# rank deficient matrix
I = np.eye(4)
I[-1, -1] = 0.0
assert_equal(matrix_rank(I), 3)
# All zeros - zero rank
assert_equal(matrix_rank(np.zeros((4, 4))), 0)
# 1 dimension - rank 1 unless all 0
assert_equal(matrix_rank([1, 0, 0, 0]), 1)
assert_equal(matrix_rank(np.zeros((4,))), 0)
# accepts array-like
assert_equal(matrix_rank([1]), 1)
# greater than 2 dimensions treated as stacked matrices
ms = np.array([I, np.eye(4), np.zeros((4, 4))])
assert_equal(matrix_rank(ms), np.array([3, 4, 0]))
# works on scalar
assert_equal(matrix_rank(1), 1)
def test_symmetric_rank(self):
assert_equal(4, matrix_rank(np.eye(4), hermitian=True))
assert_equal(1, matrix_rank(np.ones((4, 4)), hermitian=True))
assert_equal(0, matrix_rank(np.zeros((4, 4)), hermitian=True))
# rank deficient matrix
I = np.eye(4)
I[-1, -1] = 0.0
assert_equal(3, matrix_rank(I, hermitian=True))
# manually supplied tolerance
I[-1, -1] = 1e-8
assert_equal(4, matrix_rank(I, hermitian=True, tol=0.99e-8))
assert_equal(3, matrix_rank(I, hermitian=True, tol=1.01e-8))
def test_reduced_rank(self):
# Test matrices with reduced rank
# rng = np.random.RandomState(20120714)
np.random.seed(20120714)
for _ in range(100):
# Make a rank deficient matrix
X = np.random.normal(size=(40, 10))
X[:, 0] = X[:, 1] + X[:, 2]
# Assert that matrix_rank detected deficiency
assert_equal(matrix_rank(X), 9)
X[:, 3] = X[:, 4] + X[:, 5]
assert_equal(matrix_rank(X), 8)
@instantiate_parametrized_tests
| TestMatrixRank |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/typeVarDefaultClass3.py | {
"start": 1345,
"end": 1432
} | class ____[T2 = T1]: ...
# This should generate an error because T4 depends on T2.
| ClassI |
python | kamyu104__LeetCode-Solutions | Python/beautiful-pairs.py | {
"start": 1947,
"end": 4366
} | class ____(object):
def beautifulPair(self, nums1, nums2):
"""
:type nums1: List[int]
:type nums2: List[int]
:rtype: List[int]
"""
INF = float("inf")
MAX_NEIGHBOR_COUNT = (8+2)//2
def dist(a, b):
if a > b:
a, b = b, a
return [abs(points[a][0]-points[b][0])+abs(points[a][1]-points[b][1]), a, b]
def merge_sort(left, right):
def update(arr, i): # added
for j in reversed(xrange(len(arr))):
if points[i][1]-points[arr[j]][1] > result[0]:
break
result[:] = min(result, dist(i, arr[j]))
else:
j = -1
assert((len(arr)-1)-j <= MAX_NEIGHBOR_COUNT)
if left == right:
return
mid = left+(right-left)//2
x = points[order[mid]][0] # added
merge_sort(left, mid)
merge_sort(mid+1, right)
tmp, tmp_l, tmp_r = [], [], []
l, r = left, mid+1
while l <= mid or r <= right:
if r == right+1 or (l <= mid and points[order[l]][1] <= points[order[r]][1]): # modified
update(tmp_r, order[l])
if x-points[order[l]][0] <= result[0]: # added
tmp_l.append(order[l])
tmp.append(order[l])
l += 1
else:
update(tmp_l, order[r])
if points[order[r]][0]-x <= result[0]: # added
tmp_r.append(order[r])
tmp.append(order[r])
r += 1
order[left:right+1] = tmp
points = [(i, j) for i, j in itertools.izip(nums1, nums2)]
result = [INF]*3
lookup = {}
for i in reversed(xrange(len(points))):
if points[i] in lookup:
result = [0, (i, lookup[points[i]])]
lookup[points[i]] = i
if result[0] == 0:
return result[1]
order = range(len(points))
order.sort(key=lambda x: points[x][0])
merge_sort(0, len(points)-1)
return result[1:]
# Time: O(nlogn)
# Space: O(n)
import itertools
# divide and conquer, merge sort, variant of closest pair
# reference: https://www.baeldung.com/cs/minimal-manhattan-distance
| Solution2 |
python | pyca__cryptography | tests/x509/test_x509_ext.py | {
"start": 184065,
"end": 185086
} | class ____:
def test_nocheck(self, backend):
cert = _load_cert(
os.path.join("x509", "custom", "ocsp_nocheck.pem"),
x509.load_pem_x509_certificate,
)
ext = cert.extensions.get_extension_for_oid(ExtensionOID.OCSP_NO_CHECK)
assert isinstance(ext.value, x509.OCSPNoCheck)
def test_eq(self):
onc1 = x509.OCSPNoCheck()
onc2 = x509.OCSPNoCheck()
assert onc1 == onc2
def test_hash(self):
onc1 = x509.OCSPNoCheck()
onc2 = x509.OCSPNoCheck()
assert hash(onc1) == hash(onc2)
def test_ne(self):
onc1 = x509.OCSPNoCheck()
onc2 = x509.OCSPNoCheck()
assert onc1 == onc2
assert (onc1 != onc2) is False
assert onc1 != object()
def test_repr(self):
onc = x509.OCSPNoCheck()
assert repr(onc) == "<OCSPNoCheck()>"
def test_public_bytes(self):
ext = x509.OCSPNoCheck()
assert ext.public_bytes() == b"\x05\x00"
| TestOCSPNoCheckExtension |
python | oauthlib__oauthlib | tests/test_common.py | {
"start": 438,
"end": 1920
} | class ____(TestCase):
def test_urldecode(self):
self.assertCountEqual(urldecode(''), [])
self.assertCountEqual(urldecode('='), [('', '')])
self.assertCountEqual(urldecode('%20'), [(' ', '')])
self.assertCountEqual(urldecode('+'), [(' ', '')])
self.assertCountEqual(urldecode('c2'), [('c2', '')])
self.assertCountEqual(urldecode('c2='), [('c2', '')])
self.assertCountEqual(urldecode('foo=bar'), [('foo', 'bar')])
self.assertCountEqual(urldecode('foo_%20~=.bar-'),
[('foo_ ~', '.bar-')])
self.assertCountEqual(urldecode('foo=1,2,3'), [('foo', '1,2,3')])
self.assertCountEqual(urldecode('foo=(1,2,3)'), [('foo', '(1,2,3)')])
self.assertCountEqual(urldecode('foo=bar.*'), [('foo', 'bar.*')])
self.assertCountEqual(urldecode('foo=bar@spam'), [('foo', 'bar@spam')])
self.assertCountEqual(urldecode('foo=bar/baz'), [('foo', 'bar/baz')])
self.assertCountEqual(urldecode('foo=bar?baz'), [('foo', 'bar?baz')])
self.assertCountEqual(urldecode('foo=bar\'s'), [('foo', 'bar\'s')])
self.assertCountEqual(urldecode('foo=$'), [('foo', '$')])
self.assertRaises(ValueError, urldecode, 'foo bar')
self.assertRaises(ValueError, urldecode, '%R')
self.assertRaises(ValueError, urldecode, '%RA')
self.assertRaises(ValueError, urldecode, '%AR')
self.assertRaises(ValueError, urldecode, '%RR')
| EncodingTest |
python | python-openxml__python-docx | tests/text/test_pagebreak.py | {
"start": 300,
"end": 5317
} | class ____:
"""Unit-test suite for the docx.text.pagebreak.RenderedPageBreak object."""
def it_raises_on_preceding_fragment_when_page_break_is_not_first_in_paragrah(
self, fake_parent: t.ProvidesStoryPart
):
p_cxml = 'w:p/(w:r/(w:t"abc",w:lastRenderedPageBreak,w:lastRenderedPageBreak))'
p = cast(CT_P, element(p_cxml))
lrpb = p.lastRenderedPageBreaks[-1]
page_break = RenderedPageBreak(lrpb, fake_parent)
with pytest.raises(ValueError, match="only defined on first rendered page-br"):
page_break.preceding_paragraph_fragment
def it_produces_None_for_preceding_fragment_when_page_break_is_leading(
self, fake_parent: t.ProvidesStoryPart
):
"""A page-break with no preceding content is "leading"."""
p_cxml = 'w:p/(w:pPr/w:ind,w:r/(w:lastRenderedPageBreak,w:t"foo",w:t"bar"))'
p = cast(CT_P, element(p_cxml))
lrpb = p.lastRenderedPageBreaks[0]
page_break = RenderedPageBreak(lrpb, fake_parent)
preceding_fragment = page_break.preceding_paragraph_fragment
assert preceding_fragment is None
def it_can_split_off_the_preceding_paragraph_content_when_in_a_run(
self, fake_parent: t.ProvidesStoryPart
):
p_cxml = (
"w:p/("
" w:pPr/w:ind"
' ,w:r/(w:t"foo",w:lastRenderedPageBreak,w:t"bar")'
' ,w:r/w:t"barfoo"'
")"
)
p = cast(CT_P, element(p_cxml))
lrpb = p.lastRenderedPageBreaks[0]
page_break = RenderedPageBreak(lrpb, fake_parent)
preceding_fragment = page_break.preceding_paragraph_fragment
expected_cxml = 'w:p/(w:pPr/w:ind,w:r/w:t"foo")'
assert preceding_fragment is not None
assert preceding_fragment._p.xml == xml(expected_cxml)
def and_it_can_split_off_the_preceding_paragraph_content_when_in_a_hyperlink(
self, fake_parent: t.ProvidesStoryPart
):
p_cxml = (
"w:p/("
" w:pPr/w:ind"
' ,w:hyperlink/w:r/(w:t"foo",w:lastRenderedPageBreak,w:t"bar")'
' ,w:r/w:t"barfoo"'
")"
)
p = cast(CT_P, element(p_cxml))
lrpb = p.lastRenderedPageBreaks[0]
page_break = RenderedPageBreak(lrpb, fake_parent)
preceding_fragment = page_break.preceding_paragraph_fragment
expected_cxml = 'w:p/(w:pPr/w:ind,w:hyperlink/w:r/(w:t"foo",w:t"bar"))'
assert preceding_fragment is not None
assert preceding_fragment._p.xml == xml(expected_cxml)
def it_raises_on_following_fragment_when_page_break_is_not_first_in_paragrah(
self, fake_parent: t.ProvidesStoryPart
):
p_cxml = 'w:p/(w:r/(w:lastRenderedPageBreak,w:lastRenderedPageBreak,w:t"abc"))'
p = cast(CT_P, element(p_cxml))
lrpb = p.lastRenderedPageBreaks[-1]
page_break = RenderedPageBreak(lrpb, fake_parent)
with pytest.raises(ValueError, match="only defined on first rendered page-br"):
page_break.following_paragraph_fragment
def it_produces_None_for_following_fragment_when_page_break_is_trailing(
self, fake_parent: t.ProvidesStoryPart
):
"""A page-break with no following content is "trailing"."""
p_cxml = 'w:p/(w:pPr/w:ind,w:r/(w:t"foo",w:t"bar",w:lastRenderedPageBreak))'
p = cast(CT_P, element(p_cxml))
lrpb = p.lastRenderedPageBreaks[0]
page_break = RenderedPageBreak(lrpb, fake_parent)
following_fragment = page_break.following_paragraph_fragment
assert following_fragment is None
def it_can_split_off_the_following_paragraph_content_when_in_a_run(
self, fake_parent: t.ProvidesStoryPart
):
p_cxml = 'w:p/(w:pPr/w:ind,w:r/(w:t"foo",w:lastRenderedPageBreak,w:t"bar"),w:r/w:t"foo")'
p = cast(CT_P, element(p_cxml))
lrpb = p.lastRenderedPageBreaks[0]
page_break = RenderedPageBreak(lrpb, fake_parent)
following_fragment = page_break.following_paragraph_fragment
expected_cxml = 'w:p/(w:pPr/w:ind,w:r/w:t"bar",w:r/w:t"foo")'
assert following_fragment is not None
assert following_fragment._p.xml == xml(expected_cxml)
def and_it_can_split_off_the_following_paragraph_content_when_in_a_hyperlink(
self, fake_parent: t.ProvidesStoryPart
):
p_cxml = (
"w:p/("
" w:pPr/w:ind"
' ,w:hyperlink/w:r/(w:t"foo",w:lastRenderedPageBreak,w:t"bar")'
' ,w:r/w:t"baz"'
' ,w:r/w:t"qux"'
")"
)
p = cast(CT_P, element(p_cxml))
lrpb = p.lastRenderedPageBreaks[0]
page_break = RenderedPageBreak(lrpb, fake_parent)
following_fragment = page_break.following_paragraph_fragment
expected_cxml = 'w:p/(w:pPr/w:ind,w:r/w:t"baz",w:r/w:t"qux")'
assert following_fragment is not None
assert following_fragment._p.xml == xml(expected_cxml)
| DescribeRenderedPageBreak |
python | marshmallow-code__apispec | src/apispec/ext/marshmallow/field_converter.py | {
"start": 2358,
"end": 23823
} | class ____:
"""Adds methods for converting marshmallow fields to an OpenAPI properties."""
field_mapping: dict[type, tuple[str | None, str | None]] = DEFAULT_FIELD_MAPPING
openapi_version: Version
def init_attribute_functions(self):
self.attribute_functions = [
# self.field2type_and_format should run first
# as other functions may rely on its output
self.field2type_and_format,
self.field2default,
self.field2choices,
self.field2read_only,
self.field2write_only,
self.field2range,
self.field2length,
self.field2pattern,
self.metadata2properties,
self.enum2properties,
self.nested2properties,
self.pluck2properties,
self.list2properties,
self.dict2properties,
self.timedelta2properties,
self.datetime2properties,
self.field2nullable,
]
def map_to_openapi_type(self, field_cls, *args):
"""Set mapping for custom field class.
:param type field_cls: Field class to set mapping for.
``*args`` can be:
- a pair of the form ``(type, format)``
- a core marshmallow field type (in which case we reuse that type's mapping)
"""
if len(args) == 1 and args[0] in self.field_mapping:
openapi_type_field = self.field_mapping[args[0]]
elif len(args) == 2:
openapi_type_field = args
else:
raise TypeError("Pass core marshmallow field type or (type, fmt) pair.")
self.field_mapping[field_cls] = openapi_type_field
def add_attribute_function(self, func):
"""Method to add an attribute function to the list of attribute functions
that will be called on a field to convert it from a field to an OpenAPI
property.
:param func func: the attribute function to add
The attribute function will be bound to the
`OpenAPIConverter <apispec.ext.marshmallow.openapi.OpenAPIConverter>`
instance.
It will be called for each field in a schema with
`self <apispec.ext.marshmallow.openapi.OpenAPIConverter>` and a
`field <marshmallow.fields.Field>` instance
positional arguments and `ret <dict>` keyword argument.
Must return a dictionary of OpenAPI properties that will be shallow
merged with the return values of all other attribute functions called on the field.
User added attribute functions will be called after all built-in attribute
functions in the order they were added. The merged results of all
previously called attribute functions are accessible via the `ret`
argument.
"""
bound_func = func.__get__(self)
setattr(self, func.__name__, bound_func)
self.attribute_functions.append(bound_func)
def field2property(self, field: marshmallow.fields.Field) -> dict:
"""Return the JSON Schema property definition given a marshmallow
:class:`Field <marshmallow.fields.Field>`.
Will include field metadata that are valid properties of OpenAPI schema objects
(e.g. "description", "enum", "example").
https://github.com/OAI/OpenAPI-Specification/blob/master/versions/3.0.2.md#schemaObject
:param Field field: A marshmallow field.
:rtype: dict, a Property Object
"""
ret: dict = {}
for attr_func in self.attribute_functions:
ret.update(attr_func(field, ret=ret))
return ret
def field2type_and_format(
self, field: marshmallow.fields.Field, **kwargs: typing.Any
) -> dict:
"""Return the dictionary of OpenAPI type and format based on the field type.
:param Field field: A marshmallow field.
:rtype: dict
"""
# If this type isn't directly in the field mapping then check the
# hierarchy until we find something that does.
for field_class in type(field).__mro__:
if field_class in self.field_mapping:
type_, fmt = self.field_mapping[field_class]
break
else:
warnings.warn(
f"Field of type {type(field)} does not inherit from marshmallow.Field.",
UserWarning,
stacklevel=2,
)
type_, fmt = "string", None
ret = {}
if type_:
ret["type"] = type_
if fmt:
ret["format"] = fmt
return ret
def field2default(
self, field: marshmallow.fields.Field, **kwargs: typing.Any
) -> dict:
"""Return the dictionary containing the field's default value.
Will first look for a `default` key in the field's metadata and then
fall back on the field's `missing` parameter. A callable passed to the
field's missing parameter will be ignored.
:param Field field: A marshmallow field.
:rtype: dict
"""
ret = {}
if "default" in field.metadata:
ret["default"] = field.metadata["default"]
else:
default = field.load_default
if default is not marshmallow.missing and not callable(default):
default = field._serialize(default, None, None)
ret["default"] = default
return ret
def field2choices(
self, field: marshmallow.fields.Field, **kwargs: typing.Any
) -> dict:
"""Return the dictionary of OpenAPI field attributes for valid choices definition.
:param Field field: A marshmallow field.
:rtype: dict
"""
attributes = {}
comparable = [
validator.comparable
for validator in field.validators
if hasattr(validator, "comparable")
]
if comparable:
attributes["enum"] = comparable
else:
choices = [
OrderedSet(validator.choices)
for validator in field.validators
if hasattr(validator, "choices")
]
if choices:
attributes["enum"] = list(functools.reduce(operator.and_, choices))
if field.allow_none:
enum = attributes.get("enum")
if enum is not None and None not in enum:
attributes["enum"].append(None)
return attributes
def field2read_only(
self, field: marshmallow.fields.Field, **kwargs: typing.Any
) -> dict:
"""Return the dictionary of OpenAPI field attributes for a dump_only field.
:param Field field: A marshmallow field.
:rtype: dict
"""
attributes = {}
if field.dump_only:
attributes["readOnly"] = True
return attributes
def field2write_only(
self, field: marshmallow.fields.Field, **kwargs: typing.Any
) -> dict:
"""Return the dictionary of OpenAPI field attributes for a load_only field.
:param Field field: A marshmallow field.
:rtype: dict
"""
attributes = {}
if field.load_only and self.openapi_version.major >= 3:
attributes["writeOnly"] = True
return attributes
def field2nullable(self, field: marshmallow.fields.Field, ret) -> dict:
"""Return the dictionary of OpenAPI field attributes for a nullable field.
:param Field field: A marshmallow field.
:rtype: dict
"""
attributes: dict = {}
if field.allow_none:
if self.openapi_version.major < 3:
attributes["x-nullable"] = True
elif self.openapi_version.minor < 1:
if "$ref" in ret:
attributes["anyOf"] = [
{"type": "object", "nullable": True},
{"$ref": ret.pop("$ref")},
]
elif "allOf" in ret:
attributes["anyOf"] = [
*ret.pop("allOf"),
{"type": "object", "nullable": True},
]
else:
attributes["nullable"] = True
else:
if "$ref" in ret:
attributes["anyOf"] = [{"$ref": ret.pop("$ref")}, {"type": "null"}]
elif "allOf" in ret:
attributes["anyOf"] = [*ret.pop("allOf"), {"type": "null"}]
elif "type" in ret:
attributes["type"] = [*make_type_list(ret.get("type")), "null"]
return attributes
def field2range(self, field: marshmallow.fields.Field, ret) -> dict:
"""Return the dictionary of OpenAPI field attributes for a set of
:class:`Range <marshmallow.validators.Range>` validators.
:param Field field: A marshmallow field.
:rtype: dict
"""
validators = [
validator
for validator in field.validators
if (
hasattr(validator, "min")
and hasattr(validator, "max")
and not hasattr(validator, "equal")
)
]
min_attr, max_attr = (
("minimum", "maximum")
if set(make_type_list(ret.get("type"))) & {"number", "integer"}
else ("x-minimum", "x-maximum")
)
# Serialize min/max values with the field to which the validator is applied
return {
k: field._serialize(v, None, None)
for k, v in make_min_max_attributes(validators, min_attr, max_attr).items()
}
def field2length(
self, field: marshmallow.fields.Field, **kwargs: typing.Any
) -> dict:
"""Return the dictionary of OpenAPI field attributes for a set of
:class:`Length <marshmallow.validators.Length>` validators.
:param Field field: A marshmallow field.
:rtype: dict
"""
validators = [
validator
for validator in field.validators
if (
hasattr(validator, "min")
and hasattr(validator, "max")
and hasattr(validator, "equal")
)
]
is_array = isinstance(
field, (marshmallow.fields.Nested, marshmallow.fields.List)
)
min_attr = "minItems" if is_array else "minLength"
max_attr = "maxItems" if is_array else "maxLength"
equal_list = [
validator.equal for validator in validators if validator.equal is not None
]
if equal_list:
return {min_attr: equal_list[0], max_attr: equal_list[0]}
return make_min_max_attributes(validators, min_attr, max_attr)
def field2pattern(
self, field: marshmallow.fields.Field, **kwargs: typing.Any
) -> dict:
"""Return the dictionary of OpenAPI field attributes for a
:class:`Regexp <marshmallow.validators.Regexp>` validator.
If there is more than one such validator, only the first
is used in the output spec.
:param Field field: A marshmallow field.
:rtype: dict
"""
regex_validators = (
v
for v in field.validators
if isinstance(getattr(v, "regex", None), re.Pattern)
)
v = next(regex_validators, None)
attributes = {} if v is None else {"pattern": v.regex.pattern} # type:ignore
if next(regex_validators, None) is not None:
warnings.warn(
f"More than one regex validator defined on {type(field)} field. Only the "
"first one will be used in the output spec.",
UserWarning,
stacklevel=2,
)
return attributes
def metadata2properties(
self, field: marshmallow.fields.Field, **kwargs: typing.Any
) -> dict:
"""Return a dictionary of properties extracted from field metadata.
Will include field metadata that are valid properties of `OpenAPI schema
objects
<https://github.com/OAI/OpenAPI-Specification/blob/master/versions/3.0.2.md#schemaObject>`_
(e.g. "description", "enum", "example").
In addition, `specification extensions
<https://github.com/OAI/OpenAPI-Specification/blob/master/versions/3.0.2.md#specification-extensions>`_
are supported. Prefix `x_` to the desired extension when passing the
keyword argument to the field constructor. apispec will convert `x_` to
`x-` to comply with OpenAPI.
:param Field field: A marshmallow field.
:rtype: dict
"""
# Dasherize metadata that starts with x_
metadata = {
key.replace("_", "-") if key.startswith("x_") else key: value
for key, value in field.metadata.items()
if isinstance(key, str)
}
# Avoid validation error with "Additional properties not allowed"
ret = {
key: value
for key, value in metadata.items()
if key in _VALID_PROPERTIES or key.startswith(_VALID_PREFIX)
}
return ret
def nested2properties(self, field: marshmallow.fields.Field, ret) -> dict:
"""Return a dictionary of properties from :class:`Nested <marshmallow.fields.Nested` fields.
Typically provides a reference object and will add the schema to the spec
if it is not already present
If a custom `schema_name_resolver` function returns `None` for the nested
schema a JSON schema object will be returned
:param Field field: A marshmallow field.
:rtype: dict
"""
# Pluck is a subclass of Nested but is in essence a single field; it
# is treated separately by pluck2properties.
if isinstance(field, marshmallow.fields.Nested) and not isinstance(
field, marshmallow.fields.Pluck
):
schema_dict = self.resolve_nested_schema(field.schema) # type:ignore
if (
ret
and "$ref" in schema_dict
and (
self.openapi_version.major < 3
or (
self.openapi_version.major == 3
and self.openapi_version.minor == 0
)
)
):
ret.update({"allOf": [schema_dict]})
else:
ret.update(schema_dict)
return ret
def pluck2properties(self, field, **kwargs: typing.Any) -> dict:
"""Return a dictionary of properties from :class:`Pluck <marshmallow.fields.Pluck` fields.
Pluck effectively trans-includes a field from another schema into this,
possibly wrapped in an array (`many=True`).
:param Field field: A marshmallow field.
:rtype: dict
"""
if isinstance(field, marshmallow.fields.Pluck):
plucked_field = field.schema.fields[field.field_name]
ret = self.field2property(plucked_field)
return {"type": "array", "items": ret} if field.many else ret
return {}
def list2properties(self, field, **kwargs: typing.Any) -> dict:
"""Return a dictionary of properties from :class:`List <marshmallow.fields.List>` fields.
Will provide an `items` property based on the field's `inner` attribute
:param Field field: A marshmallow field.
:rtype: dict
"""
ret = {}
if isinstance(field, marshmallow.fields.List):
ret["items"] = self.field2property(field.inner)
return ret
def dict2properties(self, field, **kwargs: typing.Any) -> dict:
"""Return a dictionary of properties from :class:`Dict <marshmallow.fields.Dict>` fields.
Only applicable for Marshmallow versions greater than 3. Will provide an
`additionalProperties` property based on the field's `value_field` attribute
:param Field field: A marshmallow field.
:rtype: dict
"""
ret = {}
if isinstance(field, marshmallow.fields.Dict):
value_field = field.value_field
if value_field:
ret["additionalProperties"] = self.field2property(value_field)
elif "additionalProperties" not in kwargs.get("ret", {}):
ret["additionalProperties"] = {}
return ret
def timedelta2properties(self, field, **kwargs: typing.Any) -> dict:
"""Return a dictionary of properties from :class:`TimeDelta <marshmallow.fields.TimeDelta>` fields.
Adds a `x-unit` vendor property based on the field's `precision` attribute
:param Field field: A marshmallow field.
:rtype: dict
"""
ret = {}
if isinstance(field, marshmallow.fields.TimeDelta):
ret["x-unit"] = field.precision
# Required for Marshmallow <4. Can be removed when support for Marshmallow 3 is dropped.
# This overrides the type set in field2type_and_format (from DEFAULT_FIELD_MAPPING)
if hasattr(field, "serialization_type"):
ret["type"] = {
int: "integer",
float: "number",
}.get(field.serialization_type, "number")
return ret
def enum2properties(self, field, **kwargs: typing.Any) -> dict:
"""Return a dictionary of properties from :class:`Enum <marshmallow.fields.Enum` fields.
:param Field field: A marshmallow field.
:rtype: dict
"""
ret = {}
if isinstance(field, marshmallow.fields.Enum):
ret = self.field2property(field.field)
if field.by_value is False:
choices = (m for m in field.enum.__members__)
else:
choices = (m.value for m in field.enum)
ret["enum"] = [field.field._serialize(v, None, None) for v in choices]
if field.allow_none and None not in ret["enum"]:
ret["enum"].append(None)
return ret
def datetime2properties(self, field, **kwargs: typing.Any) -> dict:
"""Return a dictionary of properties from :class:`DateTime <marshmallow.fields.DateTime` fields.
:param Field field: A marshmallow field.
:rtype: dict
"""
ret = {}
if isinstance(field, marshmallow.fields.DateTime):
if field.format == "iso" or field.format is None:
# Will return { "type": "string", "format": "date-time" }
# as specified inside DEFAULT_FIELD_MAPPING
pass
elif field.format == "rfc":
ret = {
"type": "string",
"format": None,
"example": "Wed, 02 Oct 2002 13:00:00 GMT",
"pattern": r"((Mon|Tue|Wed|Thu|Fri|Sat|Sun), ){0,1}\d{2} "
+ r"(Jan|Feb|Mar|Apr|May|Jun|Jul|Aug|Sep|Oct|Nov|Dec) \d{4} \d{2}:\d{2}:\d{2} "
+ r"(UT|GMT|EST|EDT|CST|CDT|MST|MDT|PST|PDT|(Z|A|M|N)|(\+|-)\d{4})",
}
elif field.format == "timestamp":
ret = {
"type": "number",
"format": "float",
"example": "1676451245.596",
"min": "0",
}
elif field.format == "timestamp_ms":
ret = {
"type": "number",
"format": "float",
"example": "1676451277514.654",
"min": "0",
}
else:
ret = {
"type": "string",
"format": None,
"pattern": (
field.metadata["pattern"]
if field.metadata.get("pattern")
else None
),
}
return ret
def make_type_list(types):
"""Return a list of types from a type attribute
Since OpenAPI 3.1.0, "type" can be a single type as string or a list of
types, including 'null'. This function takes a "type" attribute as input
and returns it as a list, be it an empty or single-element list.
This is useful to factorize type-conditional code or code adding a type.
"""
if types is None:
return []
if isinstance(types, str):
return [types]
return types
def make_min_max_attributes(validators, min_attr, max_attr) -> dict:
"""Return a dictionary of minimum and maximum attributes based on a list
of validators. If either minimum or maximum values are not present in any
of the validator objects that attribute will be omitted.
:param validators list: A list of `Marshmallow` validator objects. Each
objct is inspected for a minimum and maximum values
:param min_attr string: The OpenAPI attribute for the minimum value
:param max_attr string: The OpenAPI attribute for the maximum value
"""
attributes = {}
min_list = [validator.min for validator in validators if validator.min is not None]
max_list = [validator.max for validator in validators if validator.max is not None]
if min_list:
attributes[min_attr] = max(min_list)
if max_list:
attributes[max_attr] = min(max_list)
return attributes
| FieldConverterMixin |
python | lepture__authlib | authlib/integrations/requests_client/oauth2_session.py | {
"start": 1426,
"end": 4979
} | class ____(OAuth2Client, Session):
"""Construct a new OAuth 2 client requests session.
:param client_id: Client ID, which you get from client registration.
:param client_secret: Client Secret, which you get from registration.
:param authorization_endpoint: URL of the authorization server's
authorization endpoint.
:param token_endpoint: URL of the authorization server's token endpoint.
:param token_endpoint_auth_method: client authentication method for
token endpoint.
:param revocation_endpoint: URL of the authorization server's OAuth 2.0
revocation endpoint.
:param revocation_endpoint_auth_method: client authentication method for
revocation endpoint.
:param scope: Scope that you needed to access user resources.
:param state: Shared secret to prevent CSRF attack.
:param redirect_uri: Redirect URI you registered as callback.
:param token: A dict of token attributes such as ``access_token``,
``token_type`` and ``expires_at``.
:param token_placement: The place to put token in HTTP request. Available
values: "header", "body", "uri".
:param update_token: A function for you to update token. It accept a
:class:`OAuth2Token` as parameter.
:param leeway: Time window in seconds before the actual expiration of the
authentication token, that the token is considered expired and will
be refreshed.
:param default_timeout: If settled, every requests will have a default timeout.
"""
client_auth_class = OAuth2ClientAuth
token_auth_class = OAuth2Auth
oauth_error_class = OAuthError
SESSION_REQUEST_PARAMS = (
"allow_redirects",
"timeout",
"cookies",
"files",
"proxies",
"hooks",
"stream",
"verify",
"cert",
"json",
)
def __init__(
self,
client_id=None,
client_secret=None,
token_endpoint_auth_method=None,
revocation_endpoint_auth_method=None,
scope=None,
state=None,
redirect_uri=None,
token=None,
token_placement="header",
update_token=None,
leeway=60,
default_timeout=None,
**kwargs,
):
Session.__init__(self)
self.default_timeout = default_timeout
update_session_configure(self, kwargs)
OAuth2Client.__init__(
self,
session=self,
client_id=client_id,
client_secret=client_secret,
token_endpoint_auth_method=token_endpoint_auth_method,
revocation_endpoint_auth_method=revocation_endpoint_auth_method,
scope=scope,
state=state,
redirect_uri=redirect_uri,
token=token,
token_placement=token_placement,
update_token=update_token,
leeway=leeway,
**kwargs,
)
def fetch_access_token(self, url=None, **kwargs):
"""Alias for fetch_token."""
return self.fetch_token(url, **kwargs)
def request(self, method, url, withhold_token=False, auth=None, **kwargs):
"""Send request with auto refresh token feature (if available)."""
if self.default_timeout:
kwargs.setdefault("timeout", self.default_timeout)
if not withhold_token and auth is None:
if not self.token:
raise MissingTokenError()
auth = self.token_auth
return super().request(method, url, auth=auth, **kwargs)
| OAuth2Session |
python | pandas-dev__pandas | asv_bench/benchmarks/dtypes.py | {
"start": 1095,
"end": 3173
} | class ____:
try:
params = [
tm.ALL_INT_NUMPY_DTYPES
+ tm.ALL_INT_EA_DTYPES
+ tm.FLOAT_NUMPY_DTYPES
+ tm.COMPLEX_DTYPES
+ tm.DATETIME64_DTYPES
+ tm.TIMEDELTA64_DTYPES
+ tm.BOOL_DTYPES
]
except AttributeError:
params = [
tm.ALL_INT_DTYPES
+ tm.ALL_EA_INT_DTYPES
+ tm.FLOAT_DTYPES
+ tm.COMPLEX_DTYPES
+ tm.DATETIME64_DTYPES
+ tm.TIMEDELTA64_DTYPES
+ tm.BOOL_DTYPES
]
param_names = ["dtype"]
def setup(self, dtype):
N, K = 5000, 50
self.index = Index([f"i-{i}" for i in range(N)], dtype=object)
self.columns = Index([f"i-{i}" for i in range(K)], dtype=object)
def create_df(data):
return DataFrame(data, index=self.index, columns=self.columns)
self.df_int = create_df(np.random.randint(low=100, size=(N, K)))
self.df_float = create_df(np.random.randn(N, K))
self.df_bool = create_df(np.random.choice([True, False], size=(N, K)))
self.df_string = create_df(
np.random.choice(list(string.ascii_letters), size=(N, K))
)
def time_select_dtype_int_include(self, dtype):
self.df_int.select_dtypes(include=dtype)
def time_select_dtype_int_exclude(self, dtype):
self.df_int.select_dtypes(exclude=dtype)
def time_select_dtype_float_include(self, dtype):
self.df_float.select_dtypes(include=dtype)
def time_select_dtype_float_exclude(self, dtype):
self.df_float.select_dtypes(exclude=dtype)
def time_select_dtype_bool_include(self, dtype):
self.df_bool.select_dtypes(include=dtype)
def time_select_dtype_bool_exclude(self, dtype):
self.df_bool.select_dtypes(exclude=dtype)
def time_select_dtype_string_include(self, dtype):
self.df_string.select_dtypes(include=dtype)
def time_select_dtype_string_exclude(self, dtype):
self.df_string.select_dtypes(exclude=dtype)
| SelectDtypes |
python | ray-project__ray | release/ray_release/file_manager/job_file_manager.py | {
"start": 805,
"end": 8253
} | class ____(FileManager):
def __init__(self, cluster_manager: ClusterManager):
import anyscale
super(JobFileManager, self).__init__(cluster_manager=cluster_manager)
self.sdk = self.cluster_manager.sdk
self.s3_client = boto3.client(S3_CLOUD_STORAGE)
self.cloud_storage_provider = os.environ.get(
"ANYSCALE_CLOUD_STORAGE_PROVIDER", S3_CLOUD_STORAGE
)
if self.cloud_storage_provider == S3_CLOUD_STORAGE:
self.bucket = str(RELEASE_AWS_BUCKET)
elif self.cloud_storage_provider == GS_CLOUD_STORAGE:
self.bucket = GS_BUCKET
self.gs_client = storage.Client()
elif self.cloud_storage_provider == AZURE_CLOUD_STORAGE:
self.bucket = AZURE_STORAGE_ACCOUNT
else:
raise RuntimeError(
f"Non supported anyscale service provider: "
f"{self.cloud_storage_provider}"
)
self.job_manager = JobManager(cluster_manager)
# Backward compatible
if "ANYSCALE_RAY_DIR" in anyscale.__dict__:
sys.path.insert(0, f"{anyscale.ANYSCALE_RAY_DIR}/bin")
def _run_with_retry(self, f, initial_retry_delay_s: int = 10):
assert callable(f)
return exponential_backoff_retry(
f,
retry_exceptions=Exception,
initial_retry_delay_s=initial_retry_delay_s,
max_retries=3,
)
def _generate_tmp_cloud_storage_path(self):
return f"tmp/{generate_tmp_cloud_storage_path()}"
def download_from_cloud(
self, key: str, target: str, delete_after_download: bool = False
):
if self.cloud_storage_provider == S3_CLOUD_STORAGE:
self._run_with_retry(
lambda: self.s3_client.download_file(
Bucket=self.bucket,
Key=key,
Filename=target,
)
)
if self.cloud_storage_provider == GS_CLOUD_STORAGE:
bucket = self.gs_client.bucket(self.bucket)
blob = bucket.blob(key)
self._run_with_retry(lambda: blob.download_to_filename(target))
if self.cloud_storage_provider == AZURE_CLOUD_STORAGE:
account_url = f"https://{AZURE_STORAGE_ACCOUNT}.dfs.core.windows.net"
credential = get_azure_credential()
blob_service_client = BlobServiceClient(account_url, credential)
blob_client = blob_service_client.get_blob_client(
container=AZURE_STORAGE_CONTAINER, blob=key
)
with open(target, "wb") as f:
blob_client.download_blob().readinto(f)
if delete_after_download:
self.delete(key)
def download(self, source: str, target: str):
# Attention: Only works for single files at the moment
remote_upload_to = self._generate_tmp_cloud_storage_path()
# remote source -> s3
bucket_address = f"s3://{self.bucket}/{remote_upload_to}"
retcode, _ = self._run_with_retry(
lambda: self.job_manager.run_and_wait(
(
f"pip install -q awscli && "
f"aws s3 cp {source} {bucket_address} "
"--acl bucket-owner-full-control"
),
{},
)
)
if retcode != 0:
raise FileDownloadError(f"Error downloading file {source} to {target}")
self.download_from_cloud(remote_upload_to, target, delete_after_download=True)
def _push_local_dir(self):
remote_upload_to = self._generate_tmp_cloud_storage_path()
# pack local dir
_, local_path = tempfile.mkstemp()
shutil.make_archive(local_path, "gztar", os.getcwd())
# local source -> s3
self._run_with_retry(
lambda: self.s3_client.upload_file(
Filename=local_path + ".tar.gz",
Bucket=self.bucket,
Key=remote_upload_to,
)
)
# remove local archive
os.unlink(local_path)
bucket_address = f"s3://{self.bucket}/{remote_upload_to}"
# s3 -> remote target
retcode, _ = self.job_manager.run_and_wait(
f"pip install -q awscli && "
f"aws s3 cp {bucket_address} archive.tar.gz && "
f"tar xf archive.tar.gz ",
{},
)
if retcode != 0:
raise FileUploadError(
f"Error uploading local dir to session "
f"{self.cluster_manager.cluster_name}."
)
try:
self._run_with_retry(
lambda: self.s3_client.delete_object(
Bucket=self.bucket, Key=remote_upload_to
),
initial_retry_delay_s=2,
)
except RuntimeError as e:
logger.warning(f"Could not remove temporary S3 object: {e}")
def upload(self, source: Optional[str] = None, target: Optional[str] = None):
if source is None and target is None:
self._push_local_dir()
return
assert isinstance(source, str)
assert isinstance(target, str)
remote_upload_to = self._generate_tmp_cloud_storage_path()
# local source -> s3
self._run_with_retry(
lambda: self.s3_client.upload_file(
Filename=source,
Bucket=self.bucket,
Key=remote_upload_to,
)
)
# s3 -> remote target
bucket_address = f"{S3_CLOUD_STORAGE}://{self.bucket}/{remote_upload_to}"
retcode, _ = self.job_manager.run_and_wait(
"pip install -q awscli && " f"aws s3 cp {bucket_address} {target}",
{},
)
if retcode != 0:
raise FileUploadError(f"Error uploading file {source} to {target}")
self.delete(remote_upload_to)
def _delete_gs_fn(self, key: str, recursive: bool = False):
if recursive:
blobs = self.gs_client.list_blobs(
self.bucket,
prefix=key,
)
for blob in blobs:
blob.delete()
else:
blob = self.gs_client.bucket(self.bucket).blob(key)
blob.delete()
def _delete_s3_fn(self, key: str, recursive: bool = False):
if recursive:
response = self.s3_client.list_objects_v2(Bucket=self.bucket, Prefix=key)
for object in response["Contents"]:
self.s3_client.delete_object(Bucket=self.bucket, Key=object["Key"])
else:
self.s3_client.delete_object(Bucket=self.bucket, Key=key)
def delete(self, key: str, recursive: bool = False):
def delete_fn():
if self.cloud_storage_provider == S3_CLOUD_STORAGE:
self._delete_s3_fn(key, recursive)
return
if self.cloud_storage_provider == GS_CLOUD_STORAGE:
self._delete_gs_fn(key, recursive)
return
if self.cloud_storage_provider == AZURE_CLOUD_STORAGE:
# TODO(aslonnie): Implement Azure blob deletion.
return
try:
self._run_with_retry(
delete_fn,
initial_retry_delay_s=2,
)
except Exception as e:
logger.warning(f"Could not remove temporary cloud object: {e}")
| JobFileManager |
python | dateutil__dateutil | tests/test_tz.py | {
"start": 40332,
"end": 42715
} | class ____(GettzTest):
def gettz(self, name):
zoneinfo_file = zoneinfo.get_zonefile_instance()
return zoneinfo_file.get(name)
def testZoneInfoFileStart1(self):
tz = self.gettz("EST5EDT")
self.assertEqual(datetime(2003, 4, 6, 1, 59, tzinfo=tz).tzname(), "EST",
MISSING_TARBALL)
self.assertEqual(datetime(2003, 4, 6, 2, 00, tzinfo=tz).tzname(), "EDT")
def testZoneInfoFileEnd1(self):
tzc = self.gettz("EST5EDT")
self.assertEqual(datetime(2003, 10, 26, 0, 59, tzinfo=tzc).tzname(),
"EDT", MISSING_TARBALL)
end_est = tz.enfold(datetime(2003, 10, 26, 1, 00, tzinfo=tzc), fold=1)
self.assertEqual(end_est.tzname(), "EST")
def testZoneInfoOffsetSignal(self):
utc = self.gettz("UTC")
nyc = self.gettz("America/New_York")
self.assertNotEqual(utc, None, MISSING_TARBALL)
self.assertNotEqual(nyc, None)
t0 = datetime(2007, 11, 4, 0, 30, tzinfo=nyc)
t1 = t0.astimezone(utc)
t2 = t1.astimezone(nyc)
self.assertEqual(t0, t2)
self.assertEqual(nyc.dst(t0), timedelta(hours=1))
def testZoneInfoCopy(self):
# copy.copy() called on a ZoneInfo file was returning the same instance
CHI = self.gettz('America/Chicago')
CHI_COPY = copy.copy(CHI)
self.assertIsNot(CHI, CHI_COPY)
self.assertEqual(CHI, CHI_COPY)
def testZoneInfoDeepCopy(self):
CHI = self.gettz('America/Chicago')
CHI_COPY = copy.deepcopy(CHI)
self.assertIsNot(CHI, CHI_COPY)
self.assertEqual(CHI, CHI_COPY)
def testZoneInfoInstanceCaching(self):
zif_0 = zoneinfo.get_zonefile_instance()
zif_1 = zoneinfo.get_zonefile_instance()
self.assertIs(zif_0, zif_1)
def testZoneInfoNewInstance(self):
zif_0 = zoneinfo.get_zonefile_instance()
zif_1 = zoneinfo.get_zonefile_instance(new_instance=True)
zif_2 = zoneinfo.get_zonefile_instance()
self.assertIsNot(zif_0, zif_1)
self.assertIs(zif_1, zif_2)
def testZoneInfoDeprecated(self):
with pytest.warns(DeprecationWarning):
zoneinfo.gettz('US/Eastern')
def testZoneInfoMetadataDeprecated(self):
with pytest.warns(DeprecationWarning):
zoneinfo.gettz_db_metadata()
| ZoneInfoGettzTest |
python | tensorflow__tensorflow | tensorflow/python/feature_column/feature_column_v2_test.py | {
"start": 239716,
"end": 255543
} | class ____(test.TestCase):
def test_defaults(self):
column = fc.weighted_categorical_column(
categorical_column=fc.categorical_column_with_identity(
key='ids', num_buckets=3),
weight_feature_key='values')
self.assertEqual('ids_weighted_by_values', column.name)
self.assertEqual(3, column.num_buckets)
self.assertEqual({
'ids': parsing_ops.VarLenFeature(dtypes.int64),
'values': parsing_ops.VarLenFeature(dtypes.float32)
}, column.parse_example_spec)
self.assertTrue(column._is_v2_column)
def test_is_v2_column(self):
column = fc.weighted_categorical_column(
categorical_column=fc_old._categorical_column_with_identity(
key='ids', num_buckets=3),
weight_feature_key='values')
self.assertFalse(column._is_v2_column)
def test_deep_copy(self):
"""Tests deepcopy of categorical_column_with_hash_bucket."""
original = fc.weighted_categorical_column(
categorical_column=fc.categorical_column_with_identity(
key='ids', num_buckets=3),
weight_feature_key='values')
for column in (original, copy.deepcopy(original)):
self.assertEqual('ids_weighted_by_values', column.name)
self.assertEqual(3, column.num_buckets)
self.assertEqual({
'ids': parsing_ops.VarLenFeature(dtypes.int64),
'values': parsing_ops.VarLenFeature(dtypes.float32)
}, column.parse_example_spec)
def test_invalid_dtype_none(self):
with self.assertRaisesRegex(ValueError, 'is not convertible to float'):
fc.weighted_categorical_column(
categorical_column=fc.categorical_column_with_identity(
key='ids', num_buckets=3),
weight_feature_key='values',
dtype=None)
def test_invalid_dtype_string(self):
with self.assertRaisesRegex(ValueError, 'is not convertible to float'):
fc.weighted_categorical_column(
categorical_column=fc.categorical_column_with_identity(
key='ids', num_buckets=3),
weight_feature_key='values',
dtype=dtypes.string)
def test_invalid_input_dtype(self):
column = fc.weighted_categorical_column(
categorical_column=fc.categorical_column_with_identity(
key='ids', num_buckets=3),
weight_feature_key='values')
strings = sparse_tensor.SparseTensorValue(
indices=((0, 0), (1, 0), (1, 1)),
values=('omar', 'stringer', 'marlo'),
dense_shape=(2, 2))
with self.assertRaisesRegex(ValueError, 'Bad dtype'):
fc._transform_features_v2({
'ids': strings,
'values': strings
}, (column,), None)
def test_column_name_collision(self):
with self.assertRaisesRegex(ValueError, r'Parse config.*already exists'):
fc.weighted_categorical_column(
categorical_column=fc.categorical_column_with_identity(
key='aaa', num_buckets=3),
weight_feature_key='aaa').parse_example_spec()
def test_missing_weights(self):
column = fc.weighted_categorical_column(
categorical_column=fc.categorical_column_with_identity(
key='ids', num_buckets=3),
weight_feature_key='values')
inputs = sparse_tensor.SparseTensorValue(
indices=((0, 0), (1, 0), (1, 1)),
values=('omar', 'stringer', 'marlo'),
dense_shape=(2, 2))
with self.assertRaisesRegex(ValueError,
'values is not in features dictionary'):
fc._transform_features_v2({'ids': inputs}, (column,), None)
def test_parse_example(self):
a = fc.categorical_column_with_vocabulary_list(
key='aaa', vocabulary_list=('omar', 'stringer', 'marlo'))
a_weighted = fc.weighted_categorical_column(a, weight_feature_key='weights')
data = example_pb2.Example(
features=feature_pb2.Features(
feature={
'aaa':
feature_pb2.Feature(
bytes_list=feature_pb2.BytesList(
value=[b'omar', b'stringer'])),
'weights':
feature_pb2.Feature(
float_list=feature_pb2.FloatList(value=[1., 10.]))
}))
features = parsing_ops.parse_example(
serialized=[data.SerializeToString()],
features=fc.make_parse_example_spec_v2([a_weighted]))
self.assertIn('aaa', features)
self.assertIn('weights', features)
_assert_sparse_tensor_value(
self,
sparse_tensor.SparseTensorValue(
indices=[[0, 0], [0, 1]],
values=np.array([b'omar', b'stringer'], dtype=np.object_),
dense_shape=[1, 2]), self.evaluate(features['aaa']))
_assert_sparse_tensor_value(
self,
sparse_tensor.SparseTensorValue(
indices=[[0, 0], [0, 1]],
values=np.array([1., 10.], dtype=np.float32),
dense_shape=[1, 2]), self.evaluate(features['weights']))
def test_transform_features(self):
column = fc.weighted_categorical_column(
categorical_column=fc.categorical_column_with_identity(
key='ids', num_buckets=3),
weight_feature_key='values')
inputs = sparse_tensor.SparseTensorValue(
indices=((0, 0), (1, 0), (1, 1)), values=(0, 1, 0), dense_shape=(2, 2))
weights = sparse_tensor.SparseTensorValue(
indices=((0, 0), (1, 0), (1, 1)),
values=(0.5, 1.0, 0.1),
dense_shape=(2, 2))
id_tensor, weight_tensor = fc._transform_features_v2({
'ids': inputs,
'values': weights,
}, (column,), None)[column]
self.evaluate(variables_lib.global_variables_initializer())
self.evaluate(lookup_ops.tables_initializer())
_assert_sparse_tensor_value(
self,
sparse_tensor.SparseTensorValue(
indices=inputs.indices,
values=np.array(inputs.values, dtype=np.int64),
dense_shape=inputs.dense_shape), self.evaluate(id_tensor))
_assert_sparse_tensor_value(
self,
sparse_tensor.SparseTensorValue(
indices=weights.indices,
values=np.array(weights.values, dtype=np.float32),
dense_shape=weights.dense_shape), self.evaluate(weight_tensor))
def test_transform_features_dense_input(self):
column = fc.weighted_categorical_column(
categorical_column=fc.categorical_column_with_identity(
key='ids', num_buckets=3),
weight_feature_key='values')
weights = sparse_tensor.SparseTensorValue(
indices=((0, 0), (1, 0), (1, 1)),
values=(0.5, 1.0, 0.1),
dense_shape=(2, 2))
id_tensor, weight_tensor = fc._transform_features_v2({
'ids': ((0, -1), (1, 0)),
'values': weights,
}, (column,), None)[column]
self.evaluate(variables_lib.global_variables_initializer())
self.evaluate(lookup_ops.tables_initializer())
_assert_sparse_tensor_value(
self,
sparse_tensor.SparseTensorValue(
indices=((0, 0), (1, 0), (1, 1)),
values=np.array((0, 1, 0), dtype=np.int64),
dense_shape=(2, 2)), self.evaluate(id_tensor))
_assert_sparse_tensor_value(
self,
sparse_tensor.SparseTensorValue(
indices=weights.indices,
values=np.array(weights.values, dtype=np.float32),
dense_shape=weights.dense_shape), self.evaluate(weight_tensor))
def test_transform_features_dense_weights(self):
column = fc.weighted_categorical_column(
categorical_column=fc.categorical_column_with_identity(
key='ids', num_buckets=3),
weight_feature_key='values')
inputs = sparse_tensor.SparseTensorValue(
indices=((0, 0), (1, 0), (1, 1)), values=(2, 1, 0), dense_shape=(2, 2))
id_tensor, weight_tensor = fc._transform_features_v2({
'ids': inputs,
'values': ((.5, 0.), (1., .1)),
}, (column,), None)[column]
self.evaluate(variables_lib.global_variables_initializer())
self.evaluate(lookup_ops.tables_initializer())
_assert_sparse_tensor_value(
self,
sparse_tensor.SparseTensorValue(
indices=inputs.indices,
values=np.array(inputs.values, dtype=np.int64),
dense_shape=inputs.dense_shape), self.evaluate(id_tensor))
_assert_sparse_tensor_value(
self,
sparse_tensor.SparseTensorValue(
indices=((0, 0), (1, 0), (1, 1)),
values=np.array((.5, 1., .1), dtype=np.float32),
dense_shape=(2, 2)), self.evaluate(weight_tensor))
def test_old_linear_model(self):
column = fc.weighted_categorical_column(
categorical_column=fc.categorical_column_with_identity(
key='ids', num_buckets=3),
weight_feature_key='values')
with ops.Graph().as_default():
predictions = fc_old.linear_model({
'ids':
sparse_tensor.SparseTensorValue(
indices=((0, 0), (1, 0), (1, 1)),
values=(0, 2, 1),
dense_shape=(2, 2)),
'values':
sparse_tensor.SparseTensorValue(
indices=((0, 0), (1, 0), (1, 1)),
values=(.5, 1., .1),
dense_shape=(2, 2))
}, (column,))
bias = get_linear_model_bias()
weight_var = get_linear_model_column_var(column)
self.evaluate(variables_lib.global_variables_initializer())
self.evaluate(lookup_ops.tables_initializer())
self.assertAllClose((0.,), self.evaluate(bias))
self.assertAllClose(((0.,), (0.,), (0.,)), self.evaluate(weight_var))
self.assertAllClose(((0.,), (0.,)), self.evaluate(predictions))
self.evaluate(weight_var.assign(((1.,), (2.,), (3.,))))
# weight_var[0] * weights[0, 0] = 1 * .5 = .5
# weight_var[2] * weights[1, 0] + weight_var[1] * weights[1, 1]
# = 3*1 + 2*.1 = 3+.2 = 3.2
self.assertAllClose(((.5,), (3.2,)), self.evaluate(predictions))
def test_old_linear_model_mismatched_shape(self):
column = fc.weighted_categorical_column(
categorical_column=fc.categorical_column_with_identity(
key='ids', num_buckets=3),
weight_feature_key='values')
with ops.Graph().as_default():
with self.assertRaisesRegex(ValueError,
r'Dimensions.*are not compatible'):
fc_old.linear_model({
'ids':
sparse_tensor.SparseTensorValue(
indices=((0, 0), (1, 0), (1, 1)),
values=(0, 2, 1),
dense_shape=(2, 2)),
'values':
sparse_tensor.SparseTensorValue(
indices=((0, 0), (0, 1), (1, 0), (1, 1)),
values=(.5, 11., 1., .1),
dense_shape=(2, 2))
}, (column,))
def test_old_linear_model_mismatched_dense_values(self):
column = fc.weighted_categorical_column(
categorical_column=fc.categorical_column_with_identity(
key='ids', num_buckets=3),
weight_feature_key='values')
with ops.Graph().as_default():
predictions = fc_old.linear_model({
'ids':
sparse_tensor.SparseTensorValue(
indices=((0, 0), (1, 0), (1, 1)),
values=(0, 2, 1),
dense_shape=(2, 2)),
'values': ((.5,), (1.,))
}, (column,),
sparse_combiner='mean')
# Disabling the constant folding optimizer here since it changes the
# error message differently on CPU and GPU.
config = config_pb2.ConfigProto()
config.graph_options.rewrite_options.constant_folding = (
rewriter_config_pb2.RewriterConfig.OFF)
with _initialized_session(config):
with self.assertRaisesRegex(errors.OpError, 'Incompatible shapes'):
self.evaluate(predictions)
def test_old_linear_model_mismatched_dense_shape(self):
column = fc.weighted_categorical_column(
categorical_column=fc.categorical_column_with_identity(
key='ids', num_buckets=3),
weight_feature_key='values')
with ops.Graph().as_default():
predictions = fc_old.linear_model({
'ids':
sparse_tensor.SparseTensorValue(
indices=((0, 0), (1, 0), (1, 1)),
values=(0, 2, 1),
dense_shape=(2, 2)),
'values': ((.5,), (1.,), (.1,))
}, (column,))
bias = get_linear_model_bias()
weight_var = get_linear_model_column_var(column)
self.evaluate(variables_lib.global_variables_initializer())
self.evaluate(lookup_ops.tables_initializer())
self.assertAllClose((0.,), self.evaluate(bias))
self.assertAllClose(((0.,), (0.,), (0.,)), self.evaluate(weight_var))
self.assertAllClose(((0.,), (0.,)), self.evaluate(predictions))
self.evaluate(weight_var.assign(((1.,), (2.,), (3.,))))
# weight_var[0] * weights[0, 0] = 1 * .5 = .5
# weight_var[2] * weights[1, 0] + weight_var[1] * weights[1, 1]
# = 3*1 + 2*.1 = 3+.2 = 3.2
self.assertAllClose(((.5,), (3.2,)), self.evaluate(predictions))
def test_old_linear_model_old_categorical(self):
column = fc.weighted_categorical_column(
categorical_column=fc_old._categorical_column_with_identity(
key='ids', num_buckets=3),
weight_feature_key='values')
with ops.Graph().as_default():
predictions = fc_old.linear_model({
'ids':
sparse_tensor.SparseTensorValue(
indices=((0, 0), (1, 0), (1, 1)),
values=(0, 2, 1),
dense_shape=(2, 2)),
'values':
sparse_tensor.SparseTensorValue(
indices=((0, 0), (1, 0), (1, 1)),
values=(.5, 1., .1),
dense_shape=(2, 2))
}, (column,))
bias = get_linear_model_bias()
weight_var = get_linear_model_column_var(column)
self.evaluate(variables_lib.global_variables_initializer())
self.evaluate(lookup_ops.tables_initializer())
self.assertAllClose((0.,), self.evaluate(bias))
self.assertAllClose(((0.,), (0.,), (0.,)), self.evaluate(weight_var))
self.assertAllClose(((0.,), (0.,)), self.evaluate(predictions))
self.evaluate(weight_var.assign(((1.,), (2.,), (3.,))))
# weight_var[0] * weights[0, 0] = 1 * .5 = .5
# weight_var[2] * weights[1, 0] + weight_var[1] * weights[1, 1]
# = 3*1 + 2*.1 = 3+.2 = 3.2
self.assertAllClose(((.5,), (3.2,)), self.evaluate(predictions))
# TODO(ptucker): Add test with embedding of weighted categorical.
def test_serialization(self):
categorical_column = fc.categorical_column_with_identity(
key='ids', num_buckets=3)
column = fc.weighted_categorical_column(
categorical_column=categorical_column, weight_feature_key='weight')
self.assertEqual([categorical_column, 'weight'], column.parents)
config = column.get_config()
self.assertEqual({
'categorical_column': {
'config': {
'key': 'ids',
'number_buckets': 3,
'default_value': None
},
'class_name': 'IdentityCategoricalColumn'
},
'dtype': 'float32',
'weight_feature_key': 'weight'
}, config)
self.assertEqual(column, fc.WeightedCategoricalColumn.from_config(config))
new_column = fc.WeightedCategoricalColumn.from_config(
config,
columns_by_name={
serialization._column_name_with_class_name(categorical_column):
categorical_column
})
self.assertEqual(column, new_column)
self.assertIs(categorical_column, new_column.categorical_column)
if __name__ == '__main__':
test.main()
| WeightedCategoricalColumnTest |
python | huggingface__transformers | src/transformers/models/rt_detr_v2/modeling_rt_detr_v2.py | {
"start": 5447,
"end": 10385
} | class ____(nn.Module):
"""
RTDetrV2 version of multiscale deformable attention, extending the base implementation
with improved offset handling and initialization.
"""
def __init__(self, config: RTDetrV2Config):
super().__init__()
num_heads = config.decoder_attention_heads
n_points = config.decoder_n_points
if config.d_model % num_heads != 0:
raise ValueError(
f"embed_dim (d_model) must be divisible by num_heads, but got {config.d_model} and {num_heads}"
)
dim_per_head = config.d_model // num_heads
# check if dim_per_head is power of 2
if not ((dim_per_head & (dim_per_head - 1) == 0) and dim_per_head != 0):
warnings.warn(
"You'd better set embed_dim (d_model) in RTDetrV2MultiscaleDeformableAttention to make the"
" dimension of each attention head a power of 2 which is more efficient in the authors' CUDA"
" implementation."
)
self.im2col_step = 64
self.d_model = config.d_model
# V2-specific attributes
self.n_levels = config.decoder_n_levels
self.n_heads = num_heads
self.n_points = n_points
self.sampling_offsets = nn.Linear(config.d_model, num_heads * self.n_levels * n_points * 2)
self.attention_weights = nn.Linear(config.d_model, num_heads * self.n_levels * n_points)
self.value_proj = nn.Linear(config.d_model, config.d_model)
self.output_proj = nn.Linear(config.d_model, config.d_model)
self.offset_scale = config.decoder_offset_scale
self.method = config.decoder_method
# Initialize n_points list and scale
n_points_list = [self.n_points for _ in range(self.n_levels)]
self.n_points_list = n_points_list
n_points_scale = [1 / n for n in n_points_list for _ in range(n)]
self.register_buffer("n_points_scale", torch.tensor(n_points_scale, dtype=torch.float32))
def forward(
self,
hidden_states: torch.Tensor,
attention_mask: Optional[torch.Tensor] = None,
encoder_hidden_states=None,
encoder_attention_mask=None,
position_embeddings: Optional[torch.Tensor] = None,
reference_points=None,
spatial_shapes=None,
spatial_shapes_list=None,
level_start_index=None,
output_attentions: bool = False,
):
# Process inputs up to sampling locations calculation using parent class logic
if position_embeddings is not None:
hidden_states = hidden_states + position_embeddings
batch_size, num_queries, _ = hidden_states.shape
batch_size, sequence_length, _ = encoder_hidden_states.shape
if not is_torchdynamo_compiling() and (spatial_shapes[:, 0] * spatial_shapes[:, 1]).sum() != sequence_length:
raise ValueError(
"Make sure to align the spatial shapes with the sequence length of the encoder hidden states"
)
value = self.value_proj(encoder_hidden_states)
if attention_mask is not None:
value = value.masked_fill(~attention_mask[..., None], float(0))
value = value.view(batch_size, sequence_length, self.n_heads, self.d_model // self.n_heads)
# V2-specific sampling offsets shape
sampling_offsets = self.sampling_offsets(hidden_states).view(
batch_size, num_queries, self.n_heads, self.n_levels * self.n_points, 2
)
attention_weights = self.attention_weights(hidden_states).view(
batch_size, num_queries, self.n_heads, self.n_levels * self.n_points
)
attention_weights = F.softmax(attention_weights, -1)
# V2-specific sampling locations calculation
if reference_points.shape[-1] == 2:
offset_normalizer = torch.stack([spatial_shapes[..., 1], spatial_shapes[..., 0]], -1)
sampling_locations = (
reference_points[:, :, None, :, None, :]
+ sampling_offsets / offset_normalizer[None, None, None, :, None, :]
)
elif reference_points.shape[-1] == 4:
n_points_scale = self.n_points_scale.to(dtype=hidden_states.dtype).unsqueeze(-1)
offset = sampling_offsets * n_points_scale * reference_points[:, :, None, :, 2:] * self.offset_scale
sampling_locations = reference_points[:, :, None, :, :2] + offset
else:
raise ValueError(f"Last dim of reference_points must be 2 or 4, but got {reference_points.shape[-1]}")
# V2-specific attention implementation choice
output = multi_scale_deformable_attention_v2(
value, spatial_shapes_list, sampling_locations, attention_weights, self.n_points_list, self.method
)
output = self.output_proj(output)
return output, attention_weights
| RTDetrV2MultiscaleDeformableAttention |
python | pypa__packaging | src/packaging/_parser.py | {
"start": 1045,
"end": 10368
} | class ____(NamedTuple):
name: str
url: str
extras: list[str]
specifier: str
marker: MarkerList | None
# --------------------------------------------------------------------------------------
# Recursive descent parser for dependency specifier
# --------------------------------------------------------------------------------------
def parse_requirement(source: str) -> ParsedRequirement:
return _parse_requirement(Tokenizer(source, rules=DEFAULT_RULES))
def _parse_requirement(tokenizer: Tokenizer) -> ParsedRequirement:
"""
requirement = WS? IDENTIFIER WS? extras WS? requirement_details
"""
tokenizer.consume("WS")
name_token = tokenizer.expect(
"IDENTIFIER", expected="package name at the start of dependency specifier"
)
name = name_token.text
tokenizer.consume("WS")
extras = _parse_extras(tokenizer)
tokenizer.consume("WS")
url, specifier, marker = _parse_requirement_details(tokenizer)
tokenizer.expect("END", expected="end of dependency specifier")
return ParsedRequirement(name, url, extras, specifier, marker)
def _parse_requirement_details(
tokenizer: Tokenizer,
) -> tuple[str, str, MarkerList | None]:
"""
requirement_details = AT URL (WS requirement_marker?)?
| specifier WS? (requirement_marker)?
"""
specifier = ""
url = ""
marker = None
if tokenizer.check("AT"):
tokenizer.read()
tokenizer.consume("WS")
url_start = tokenizer.position
url = tokenizer.expect("URL", expected="URL after @").text
if tokenizer.check("END", peek=True):
return (url, specifier, marker)
tokenizer.expect("WS", expected="whitespace after URL")
# The input might end after whitespace.
if tokenizer.check("END", peek=True):
return (url, specifier, marker)
marker = _parse_requirement_marker(
tokenizer,
span_start=url_start,
expected="semicolon (after URL and whitespace)",
)
else:
specifier_start = tokenizer.position
specifier = _parse_specifier(tokenizer)
tokenizer.consume("WS")
if tokenizer.check("END", peek=True):
return (url, specifier, marker)
marker = _parse_requirement_marker(
tokenizer,
span_start=specifier_start,
expected=(
"comma (within version specifier), semicolon (after version specifier)"
if specifier
else "semicolon (after name with no version specifier)"
),
)
return (url, specifier, marker)
def _parse_requirement_marker(
tokenizer: Tokenizer, *, span_start: int, expected: str
) -> MarkerList:
"""
requirement_marker = SEMICOLON marker WS?
"""
if not tokenizer.check("SEMICOLON"):
tokenizer.raise_syntax_error(
f"Expected {expected} or end",
span_start=span_start,
span_end=None,
)
tokenizer.read()
marker = _parse_marker(tokenizer)
tokenizer.consume("WS")
return marker
def _parse_extras(tokenizer: Tokenizer) -> list[str]:
"""
extras = (LEFT_BRACKET wsp* extras_list? wsp* RIGHT_BRACKET)?
"""
if not tokenizer.check("LEFT_BRACKET", peek=True):
return []
with tokenizer.enclosing_tokens(
"LEFT_BRACKET",
"RIGHT_BRACKET",
around="extras",
):
tokenizer.consume("WS")
extras = _parse_extras_list(tokenizer)
tokenizer.consume("WS")
return extras
def _parse_extras_list(tokenizer: Tokenizer) -> list[str]:
"""
extras_list = identifier (wsp* ',' wsp* identifier)*
"""
extras: list[str] = []
if not tokenizer.check("IDENTIFIER"):
return extras
extras.append(tokenizer.read().text)
while True:
tokenizer.consume("WS")
if tokenizer.check("IDENTIFIER", peek=True):
tokenizer.raise_syntax_error("Expected comma between extra names")
elif not tokenizer.check("COMMA"):
break
tokenizer.read()
tokenizer.consume("WS")
extra_token = tokenizer.expect("IDENTIFIER", expected="extra name after comma")
extras.append(extra_token.text)
return extras
def _parse_specifier(tokenizer: Tokenizer) -> str:
"""
specifier = LEFT_PARENTHESIS WS? version_many WS? RIGHT_PARENTHESIS
| WS? version_many WS?
"""
with tokenizer.enclosing_tokens(
"LEFT_PARENTHESIS",
"RIGHT_PARENTHESIS",
around="version specifier",
):
tokenizer.consume("WS")
parsed_specifiers = _parse_version_many(tokenizer)
tokenizer.consume("WS")
return parsed_specifiers
def _parse_version_many(tokenizer: Tokenizer) -> str:
"""
version_many = (SPECIFIER (WS? COMMA WS? SPECIFIER)*)?
"""
parsed_specifiers = ""
while tokenizer.check("SPECIFIER"):
span_start = tokenizer.position
parsed_specifiers += tokenizer.read().text
if tokenizer.check("VERSION_PREFIX_TRAIL", peek=True):
tokenizer.raise_syntax_error(
".* suffix can only be used with `==` or `!=` operators",
span_start=span_start,
span_end=tokenizer.position + 1,
)
if tokenizer.check("VERSION_LOCAL_LABEL_TRAIL", peek=True):
tokenizer.raise_syntax_error(
"Local version label can only be used with `==` or `!=` operators",
span_start=span_start,
span_end=tokenizer.position,
)
tokenizer.consume("WS")
if not tokenizer.check("COMMA"):
break
parsed_specifiers += tokenizer.read().text
tokenizer.consume("WS")
return parsed_specifiers
# --------------------------------------------------------------------------------------
# Recursive descent parser for marker expression
# --------------------------------------------------------------------------------------
def parse_marker(source: str) -> MarkerList:
return _parse_full_marker(Tokenizer(source, rules=DEFAULT_RULES))
def _parse_full_marker(tokenizer: Tokenizer) -> MarkerList:
retval = _parse_marker(tokenizer)
tokenizer.expect("END", expected="end of marker expression")
return retval
def _parse_marker(tokenizer: Tokenizer) -> MarkerList:
"""
marker = marker_atom (BOOLOP marker_atom)+
"""
expression = [_parse_marker_atom(tokenizer)]
while tokenizer.check("BOOLOP"):
token = tokenizer.read()
expr_right = _parse_marker_atom(tokenizer)
expression.extend((token.text, expr_right))
return expression
def _parse_marker_atom(tokenizer: Tokenizer) -> MarkerAtom:
"""
marker_atom = WS? LEFT_PARENTHESIS WS? marker WS? RIGHT_PARENTHESIS WS?
| WS? marker_item WS?
"""
tokenizer.consume("WS")
if tokenizer.check("LEFT_PARENTHESIS", peek=True):
with tokenizer.enclosing_tokens(
"LEFT_PARENTHESIS",
"RIGHT_PARENTHESIS",
around="marker expression",
):
tokenizer.consume("WS")
marker: MarkerAtom = _parse_marker(tokenizer)
tokenizer.consume("WS")
else:
marker = _parse_marker_item(tokenizer)
tokenizer.consume("WS")
return marker
def _parse_marker_item(tokenizer: Tokenizer) -> MarkerItem:
"""
marker_item = WS? marker_var WS? marker_op WS? marker_var WS?
"""
tokenizer.consume("WS")
marker_var_left = _parse_marker_var(tokenizer)
tokenizer.consume("WS")
marker_op = _parse_marker_op(tokenizer)
tokenizer.consume("WS")
marker_var_right = _parse_marker_var(tokenizer)
tokenizer.consume("WS")
return (marker_var_left, marker_op, marker_var_right)
def _parse_marker_var(tokenizer: Tokenizer) -> MarkerVar: # noqa: RET503
"""
marker_var = VARIABLE | QUOTED_STRING
"""
if tokenizer.check("VARIABLE"):
return process_env_var(tokenizer.read().text.replace(".", "_"))
elif tokenizer.check("QUOTED_STRING"):
return process_python_str(tokenizer.read().text)
else:
tokenizer.raise_syntax_error(
message="Expected a marker variable or quoted string"
)
def process_env_var(env_var: str) -> Variable:
if env_var in ("platform_python_implementation", "python_implementation"):
return Variable("platform_python_implementation")
else:
return Variable(env_var)
def process_python_str(python_str: str) -> Value:
value = ast.literal_eval(python_str)
return Value(str(value))
def _parse_marker_op(tokenizer: Tokenizer) -> Op:
"""
marker_op = IN | NOT IN | OP
"""
if tokenizer.check("IN"):
tokenizer.read()
return Op("in")
elif tokenizer.check("NOT"):
tokenizer.read()
tokenizer.expect("WS", expected="whitespace after 'not'")
tokenizer.expect("IN", expected="'in' after 'not'")
return Op("not in")
elif tokenizer.check("OP"):
return Op(tokenizer.read().text)
else:
return tokenizer.raise_syntax_error(
"Expected marker operator, one of <=, <, !=, ==, >=, >, ~=, ===, in, not in"
)
| ParsedRequirement |
python | PrefectHQ__prefect | src/integrations/prefect-aws/tests/test_batch.py | {
"start": 2019,
"end": 3188
} | class ____:
def test_batch_submit(self, job_queue_arn, job_definition_arn, aws_credentials):
@flow
def test_flow():
return batch_submit(
"batch_test_job",
job_queue_arn,
job_definition_arn,
aws_credentials,
)
job_id = test_flow()
assert_valid_job_id(job_id)
async def test_batch_submit_async_dispatch(
self, job_queue_arn, job_definition_arn, aws_credentials
):
@flow
async def test_flow():
return await batch_submit(
"batch_test_job",
job_queue_arn,
job_definition_arn,
aws_credentials,
)
job_id = await test_flow()
assert_valid_job_id(job_id)
async def test_batch_submit_force_sync_from_async(
self, job_queue_arn, job_definition_arn, aws_credentials
):
job_id = batch_submit(
"batch_test_job",
job_queue_arn,
job_definition_arn,
aws_credentials,
_sync=True,
)
assert_valid_job_id(job_id)
| TestBatchSubmit |
python | realpython__materials | wordcount/tests/realpython/resources.py | {
"start": 416,
"end": 938
} | class ____(ABC):
slug: str
title: str | None = None
@property
def slug_clean(self) -> str:
return self.slug.strip("/")
@property
def title_pretty(self) -> str:
if self.title is None:
return self.slug_clean.replace("-", " ").title()
else:
return self.title
@property
@abstractmethod
def url(self) -> str:
pass
def __str__(self) -> str:
return f"[{self.title_pretty}]({self.url})"
@dataclass(unsafe_hash=True)
| Resource |
python | cython__cython | Cython/Compiler/Annotate.py | {
"start": 13208,
"end": 13557
} | class ____:
def __init__(self, style, text, tag="", size=0):
self.style = style
self.text = text
self.tag = tag
self.size = size
def start(self):
return "<span class='cython tag %s' title='%s'>%s" % (self.style, self.text, self.tag)
def end(self):
return self.size, "</span>"
| AnnotationItem |
python | tornadoweb__tornado | tornado/httputil.py | {
"start": 2168,
"end": 4941
} | class ____:
"""Class that holds a subset of ABNF rules from RFC 9110 and friends.
Class attributes are re.Pattern objects, with the same name as in the RFC
(with hyphens changed to underscores). Currently contains only the subset
we use (which is why this class is not public). Unfortunately the fields
cannot be alphabetized as they are in the RFCs because of dependencies.
"""
# RFC 3986 (URI)
# The URI hostname ABNF is both complex (including detailed vaildation of IPv4 and IPv6
# literals) and not strict enough (a lot of punctuation is allowed by the ABNF even though
# it is not allowed by DNS). We simplify it by allowing square brackets and colons in any
# position, not only for their use in IPv6 literals.
uri_unreserved = re.compile(r"[A-Za-z0-9\-._~]")
uri_sub_delims = re.compile(r"[!$&'()*+,;=]")
uri_pct_encoded = re.compile(r"%[0-9A-Fa-f]{2}")
uri_host = re.compile(
rf"(?:[\[\]:]|{uri_unreserved.pattern}|{uri_sub_delims.pattern}|{uri_pct_encoded.pattern})*"
)
uri_port = re.compile(r"[0-9]*")
# RFC 5234 (ABNF)
VCHAR = re.compile(r"[\x21-\x7E]")
# RFC 9110 (HTTP Semantics)
obs_text = re.compile(r"[\x80-\xFF]")
field_vchar = re.compile(rf"(?:{VCHAR.pattern}|{obs_text.pattern})")
# Not exactly from the RFC to simplify and combine field-content and field-value.
field_value = re.compile(
rf"|"
rf"{field_vchar.pattern}|"
rf"{field_vchar.pattern}(?:{field_vchar.pattern}| |\t)*{field_vchar.pattern}"
)
tchar = re.compile(r"[!#$%&'*+\-.^_`|~0-9A-Za-z]")
token = re.compile(rf"{tchar.pattern}+")
field_name = token
method = token
host = re.compile(rf"(?:{uri_host.pattern})(?::{uri_port.pattern})?")
# RFC 9112 (HTTP/1.1)
HTTP_version = re.compile(r"HTTP/[0-9]\.[0-9]")
reason_phrase = re.compile(rf"(?:[\t ]|{VCHAR.pattern}|{obs_text.pattern})+")
# request_target delegates to the URI RFC 3986, which is complex and may be
# too restrictive (for example, the WHATWG version of the URL spec allows non-ASCII
# characters). Instead, we allow everything but control chars and whitespace.
request_target = re.compile(rf"{field_vchar.pattern}+")
request_line = re.compile(
rf"({method.pattern}) ({request_target.pattern}) ({HTTP_version.pattern})"
)
status_code = re.compile(r"[0-9]{3}")
status_line = re.compile(
rf"({HTTP_version.pattern}) ({status_code.pattern}) ({reason_phrase.pattern})?"
)
@lru_cache(1000)
def _normalize_header(name: str) -> str:
"""Map a header name to Http-Header-Case.
>>> _normalize_header("coNtent-TYPE")
'Content-Type'
"""
return "-".join([w.capitalize() for w in name.split("-")])
| _ABNF |
python | weaviate__weaviate-python-client | weaviate/collections/classes/batch.py | {
"start": 543,
"end": 799
} | class ____:
collection: str
vector: Optional[VECTORS]
uuid: str
properties: Optional[Dict[str, WeaviateField]]
tenant: Optional[str]
references: Optional[ReferenceInputs]
index: int
retry_count: int = 0
@dataclass
| _BatchObject |
python | pytorch__pytorch | torch/_subclasses/_fake_tensor_utils.py | {
"start": 2272,
"end": 3053
} | class ____:
"""
Represents a SymInt, SymFloat, SymBool without the associated ShapeEnv
"""
ty: type[PySymType]
node: _DeconstructedSymNode
@staticmethod
def from_sym_type(value: PySymType) -> _DeconstructedSymType:
return _DeconstructedSymType(type(value), value.node)
def extract(self, shape_env: ShapeEnv) -> PySymType:
return self.ty(self.node.extract(shape_env))
def __str__(self) -> str:
return f"{self.ty}({self.node})"
def __repr__(self) -> str:
return f"_DeconstructedSymType({self.ty}, {self.node!r})"
def __eq__(self, other: object) -> bool:
return NotImplemented
def __hash__(self) -> int:
return NotImplemented
@dataclass(frozen=True, slots=True)
| _DeconstructedSymType |
python | joke2k__faker | tests/providers/test_phone_number.py | {
"start": 18462,
"end": 18955
} | class ____:
"""Test sk_SK phone number provider methods"""
def test_phone_number(self, faker, num_samples):
pattern: Pattern = re.compile(
r"(^(00421|\+421)? ?[2] ?[0-9]{8}$)|"
r"(^(00421|\+421)? ?[3-5][0-9] ?[0-9]{3} ?[0-9]{4}$)|"
r"(^(00421|\+421)? ?[9][0-9]{2} ?[0-9]{3} ?[0-9]{3}$)"
)
for _ in range(num_samples):
phone_number = faker.phone_number()
assert pattern.fullmatch(phone_number)
| TestSkSk |
python | Unity-Technologies__ml-agents | ml-agents/mlagents/trainers/tests/test_subprocess_env_manager.py | {
"start": 11368,
"end": 12811
} | class ____(Exception):
pass
@pytest.mark.parametrize("num_envs", [1, 4])
def test_subprocess_failing_step(num_envs):
def failing_step_env_factory(_worker_id, _config):
env = UnexpectedExceptionEnvironment(
["1D"], use_discrete=True, to_raise=CustomTestOnlyException
)
return env
env_manager = SubprocessEnvManager(failing_step_env_factory, RunOptions())
# Expect the exception raised to be routed back up to the top level.
with pytest.raises(CustomTestOnlyException):
check_environment_trains(
failing_step_env_factory(0, []),
{"1D": ppo_dummy_config()},
env_manager=env_manager,
success_threshold=None,
)
env_manager.close()
@pytest.mark.parametrize("num_envs", [1, 4])
def test_subprocess_env_raises_errors(num_envs):
def failing_env_factory(worker_id, config):
import time
# Sleep momentarily to allow time for the EnvManager to be waiting for the
# subprocess response. We won't be able to capture failures from the subprocess
# that cause it to close the pipe before we can send the first message.
time.sleep(0.5)
raise UnityEnvironmentException()
env_manager = SubprocessEnvManager(failing_env_factory, RunOptions(), num_envs)
with pytest.raises(UnityEnvironmentException):
env_manager.reset()
env_manager.close()
| CustomTestOnlyException |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/matchClass3.py | {
"start": 1135,
"end": 1261
} | class ____:
x: int
match E(1):
case ProtoE(x):
pass
case y:
reveal_type(y, expected_text="Never")
| E |
python | dagster-io__dagster | python_modules/dagster/dagster/_core/errors.py | {
"start": 7852,
"end": 8020
} | class ____(DagsterError):
"""Indicates the user has violated a well-defined invariant that can only be enforced
at runtime.
"""
| DagsterInvariantViolationError |
python | tornadoweb__tornado | maint/test/redbot/red_test.py | {
"start": 960,
"end": 1132
} | class ____(RequestHandler):
def get(self, computed_etag):
self.write(computed_etag)
def compute_etag(self):
return self._write_buffer[0]
| CacheHandler |
python | joke2k__faker | tests/providers/test_job.py | {
"start": 3840,
"end": 4029
} | class ____:
"""Test ko_KR job provider"""
def test_job(self, faker, num_samples):
for _ in range(num_samples):
assert faker.job() in KoKrJobProvider.jobs
| TestKoKr |
python | apache__airflow | providers/grpc/tests/unit/grpc/hooks/test_grpc.py | {
"start": 1935,
"end": 2340
} | class ____:
def __init__(self, _):
pass
def single_call(self, data):
return data
def stream_call(self, data):
return ["streaming", "call"]
@pytest.fixture
def channel_mock():
"""We mock run_command to capture its call args; it returns nothing so mock training is unnecessary."""
with patch("grpc.Channel") as grpc_channel:
yield grpc_channel
| StubClass |
python | google__pytype | pytype/blocks/blocks_test.py | {
"start": 891,
"end": 9126
} | class ____(BaseBlocksTest):
"""Tests for order_code in blocks.py."""
def _order_code(self, code):
"""Helper function to disassemble and then order code."""
ordered, _ = blocks.process_code(code)
return ordered
def test_trivial(self):
# Disassembled from:
# | return None
co = self.make_code(
[
(o.LOAD_CONST, 0),
(o.RETURN_VALUE, 0),
],
name="trivial",
)
ordered_code = self._order_code(co)
(b0,) = ordered_code.order
self.assertEqual(len(b0.code), 2)
self.assertCountEqual([], b0.incoming)
self.assertCountEqual([], b0.outgoing)
def test_has_opcode(self):
# Disassembled from:
# | return None
co = self.make_code(
[
(o.LOAD_CONST, 0),
(o.RETURN_VALUE, 0),
],
name="trivial",
)
ordered_code = self._order_code(co)
self.assertTrue(ordered_code.has_opcode(opcodes.LOAD_CONST))
self.assertTrue(ordered_code.has_opcode(opcodes.RETURN_VALUE))
self.assertFalse(ordered_code.has_opcode(opcodes.POP_TOP))
def test_yield(self):
# Disassembled from:
# | yield 1
# | yield None
co = self.make_code(
[
# b0:
(o.LOAD_CONST, 0),
(o.YIELD_VALUE, 0),
# b1:
(o.POP_TOP, 0),
(o.LOAD_CONST, 0),
(o.RETURN_VALUE, 0),
],
name="yield",
)
ordered_code = self._order_code(co)
self.assertEqual(ordered_code.name, "yield")
b0, b1 = ordered_code.order
self.assertCountEqual(b0.outgoing, [b1])
self.assertCountEqual(b1.incoming, [b0])
self.assertCountEqual(b0.incoming, [])
self.assertCountEqual(b1.outgoing, [])
def test_triangle(self):
# Disassembled from:
# | x = y
# | if y > 1:
# | x -= 2
# | return x
co = self.make_code(
[
# b0:
(o.LOAD_GLOBAL, 0),
(o.STORE_FAST, 0),
(o.LOAD_GLOBAL, 0),
(o.LOAD_CONST, 1),
(o.COMPARE_OP, 4),
(o.POP_JUMP_IF_FALSE, 10),
# b1:
(o.LOAD_FAST, 0),
(o.LOAD_CONST, 2),
(o.INPLACE_SUBTRACT, 0),
(o.STORE_FAST, 0),
# b2:
(o.LOAD_FAST, 0),
(o.RETURN_VALUE, 0),
],
name="triangle",
)
ordered_code = self._order_code(co)
self.assertEqual(ordered_code.name, "triangle")
b0, b1, b2 = ordered_code.order
self.assertCountEqual(b0.incoming, [])
self.assertCountEqual(b0.outgoing, [b1, b2])
self.assertCountEqual(b1.incoming, [b0])
self.assertCountEqual(b1.outgoing, [b2])
self.assertCountEqual(b2.incoming, [b0, b1])
self.assertCountEqual(b2.outgoing, [])
def test_diamond(self):
# Disassembled from:
# | x = y
# | if y > 1:
# | x -= 2
# | else:
# | x += 2
# | return x
co = self.make_code(
[
# b0:
(o.LOAD_GLOBAL, 0),
(o.STORE_FAST, 0),
(o.LOAD_GLOBAL, 0),
(o.LOAD_CONST, 1),
(o.COMPARE_OP, 4),
(o.POP_JUMP_IF_FALSE, 12),
# b1:
(o.LOAD_FAST, 0),
(o.LOAD_CONST, 0),
(o.INPLACE_SUBTRACT, 0),
(o.STORE_FAST, 0),
(o.LOAD_FAST, 0),
(o.RETURN_VALUE, 0),
# b2:
(o.LOAD_FAST, 0),
(o.LOAD_CONST, 0),
(o.INPLACE_ADD, 0),
(o.STORE_FAST, 0),
(o.LOAD_FAST, 0),
(o.RETURN_VALUE, 0),
],
name="diamond",
)
ordered_code = self._order_code(co)
self.assertEqual(ordered_code.name, "diamond")
b0, b1, b2 = ordered_code.order
self.assertCountEqual(b0.incoming, [])
self.assertCountEqual(b0.outgoing, [b1, b2])
self.assertCountEqual(b1.incoming, [b0])
self.assertCountEqual(b2.incoming, [b0])
def test_raise(self):
# Disassembled from:
# | raise ValueError()
# | return 1
co = self.make_code(
[
# b0:
(o.LOAD_GLOBAL, 0),
(o.CALL_FUNCTION, 0),
(o.RAISE_VARARGS, 1),
(o.LOAD_CONST, 1),
(o.RETURN_VALUE, 0), # dead.
],
name="raise",
)
ordered_code = self._order_code(co)
self.assertEqual(ordered_code.name, "raise")
b0, b1 = ordered_code.order
self.assertEqual(len(b0.code), 2)
self.assertCountEqual(b0.incoming, [])
self.assertCountEqual(b0.outgoing, [b1])
self.assertCountEqual(b1.incoming, [b0])
self.assertCountEqual(b1.outgoing, [])
def test_call(self):
# Disassembled from:
# | f()
co = self.make_code(
[
# b0:
(o.LOAD_GLOBAL, 0),
(o.CALL_FUNCTION, 0),
# b1:
(o.POP_TOP, 0),
(o.LOAD_CONST, 0),
(o.RETURN_VALUE, 0),
],
name="call",
)
ordered_code = self._order_code(co)
b0, b1 = ordered_code.order
self.assertEqual(len(b0.code), 2)
self.assertEqual(len(b1.code), 3)
self.assertCountEqual(b0.outgoing, [b1])
def test_finally(self):
# Disassembled from:
# | try:
# | pass
# | finally:
# | pass
co = self.make_code(
[
# b0:
(o.SETUP_FINALLY, 3),
(o.POP_BLOCK, 0),
# b1:
(o.LOAD_CONST, 0),
(o.RETURN_VALUE, 0),
# b2:
(o.RERAISE, 0),
],
name="finally",
)
ordered_code = self._order_code(co)
b0, b1, b2 = ordered_code.order
self.assertEqual(len(b0.code), 2)
self.assertEqual(len(b1.code), 2)
self.assertEqual(len(b2.code), 1)
self.assertCountEqual(b0.outgoing, [b1, b2])
def test_except(self):
# Disassembled from:
# | try:
# | pass
# | except:
# | pass
co = self.make_code(
[
# b0:
(o.SETUP_FINALLY, 3),
(o.POP_BLOCK, 0),
# b1:
(o.LOAD_CONST, 0),
(o.RETURN_VALUE, 0),
# b2:
(o.POP_TOP, 0),
(o.POP_TOP, 0),
(o.POP_TOP, 0),
(o.POP_EXCEPT, 0),
(o.LOAD_CONST, 0),
(o.RETURN_VALUE, 0),
],
name="except",
)
ordered_code = self._order_code(co)
b0, b1, b2 = ordered_code.order
self.assertEqual(len(b0.code), 2)
self.assertEqual(len(b1.code), 2)
self.assertEqual(len(b2.code), 6)
self.assertCountEqual([b1, b2], b0.outgoing)
def test_return(self):
# Disassembled from:
# | return None
# | return None
co = self.make_code(
[
(o.LOAD_CONST, 0),
(o.RETURN_VALUE, 0), # dead.
(o.LOAD_CONST, 1), # dead.
(o.RETURN_VALUE, 0), # dead.
],
name="return",
)
ordered_code = self._order_code(co)
(b0,) = ordered_code.order
self.assertEqual(len(b0.code), 2)
def test_with(self):
# Disassembled from:
# | with None:
# | pass
co = self.make_code(
[
# b0:
(o.LOAD_CONST, 0),
(o.SETUP_WITH, 9),
(o.POP_TOP, 0),
(o.POP_BLOCK, 0),
# b1:
(o.LOAD_CONST, 0),
(o.DUP_TOP, 0),
(o.DUP_TOP, 0),
(o.CALL_FUNCTION, 3),
# b2:
(o.POP_TOP, 0),
(o.LOAD_CONST, 0),
(o.RETURN_VALUE, 0),
# b3:
(o.WITH_EXCEPT_START, 0),
(o.POP_JUMP_IF_TRUE, 14),
# b4:
(o.RERAISE, 1),
# b5:
(o.POP_TOP, 0),
(o.POP_TOP, 0),
(o.POP_TOP, 0),
(o.POP_EXCEPT, 0),
(o.POP_TOP, 0),
(o.LOAD_CONST, 0),
(o.RETURN_VALUE, 0),
],
name="with",
)
ordered_code = self._order_code(co)
b0, b1, b2, b3, b4, b5 = ordered_code.order
self.assertEqual(len(b0.code), 4)
self.assertEqual(len(b1.code), 4)
self.assertEqual(len(b2.code), 3)
self.assertEqual(len(b3.code), 2)
self.assertEqual(len(b4.code), 1)
self.assertEqual(len(b5.code), 7)
| OrderingTest |
python | huggingface__transformers | tests/quantization/bnb/test_mixed_int8.py | {
"start": 17608,
"end": 22500
} | class ____(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.model_name = "google-t5/t5-small"
cls.dense_act_model_name = "google/flan-t5-small" # flan-t5 uses dense-act instead of dense-relu-dense
cls.tokenizer = AutoTokenizer.from_pretrained(cls.model_name)
cls.input_text = "Translate in German: Hello, my dog is cute"
def tearDown(self):
r"""
TearDown function needs to be called at the end of each test to free the GPU memory and cache, also to
avoid unexpected behaviors. Please see: https://discuss.pytorch.org/t/how-can-we-release-gpu-memory-cache/14530/27
"""
gc.collect()
backend_empty_cache(torch_device)
def test_inference_without_keep_in_fp32(self):
r"""
Test whether it is possible to mix both `int8` and `fp32` weights when using `keep_in_fp32_modules` correctly.
`flan-t5-small` uses `T5DenseGatedActDense` whereas `google-t5/t5-small` uses `T5DenseReluDense`. We need to test
both cases.
"""
from transformers import T5ForConditionalGeneration
modules = T5ForConditionalGeneration._keep_in_fp32_modules
T5ForConditionalGeneration._keep_in_fp32_modules = None
# test with `google-t5/t5-small`
model = T5ForConditionalGeneration.from_pretrained(
self.model_name, quantization_config=BitsAndBytesConfig(load_in_8bit=True), device_map="auto"
)
encoded_input = self.tokenizer(self.input_text, return_tensors="pt").to(model.device)
_ = model.generate(**encoded_input)
# test with `flan-t5-small`
model = T5ForConditionalGeneration.from_pretrained(
self.dense_act_model_name, quantization_config=BitsAndBytesConfig(load_in_8bit=True), device_map="auto"
)
encoded_input = self.tokenizer(self.input_text, return_tensors="pt").to(model.device)
_ = model.generate(**encoded_input)
T5ForConditionalGeneration._keep_in_fp32_modules = modules
def test_inference_with_keep_in_fp32(self):
r"""
Test whether it is possible to mix both `int8` and `fp32` weights when using `keep_in_fp32_modules` correctly.
`flan-t5-small` uses `T5DenseGatedActDense` whereas `google-t5/t5-small` uses `T5DenseReluDense`. We need to test
both cases.
"""
from transformers import T5ForConditionalGeneration
# test with `google-t5/t5-small`
model = T5ForConditionalGeneration.from_pretrained(
self.model_name, quantization_config=BitsAndBytesConfig(load_in_8bit=True), device_map="auto"
)
# there was a bug with decoders - this test checks that it is fixed
self.assertTrue(isinstance(model.decoder.block[0].layer[0].SelfAttention.q, bnb.nn.Linear8bitLt))
encoded_input = self.tokenizer(self.input_text, return_tensors="pt").to(model.device)
_ = model.generate(**encoded_input)
# test with `flan-t5-small`
model = T5ForConditionalGeneration.from_pretrained(
self.dense_act_model_name, quantization_config=BitsAndBytesConfig(load_in_8bit=True), device_map="auto"
)
encoded_input = self.tokenizer(self.input_text, return_tensors="pt").to(model.device)
_ = model.generate(**encoded_input)
def test_inference_with_keep_in_fp32_serialized(self):
r"""
Test whether it is possible to mix both `int8` and `fp32` weights when using `keep_in_fp32_modules` correctly on
a serialized model.
`flan-t5-small` uses `T5DenseGatedActDense` whereas `google-t5/t5-small` uses `T5DenseReluDense`. We need to test
both cases.
"""
from transformers import T5ForConditionalGeneration
# test with `google-t5/t5-small`
model = T5ForConditionalGeneration.from_pretrained(
self.model_name, quantization_config=BitsAndBytesConfig(load_in_8bit=True), device_map="auto"
)
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(tmp_dir)
model = T5ForConditionalGeneration.from_pretrained(tmp_dir)
# there was a bug with decoders - this test checks that it is fixed
self.assertTrue(isinstance(model.decoder.block[0].layer[0].SelfAttention.q, bnb.nn.Linear8bitLt))
encoded_input = self.tokenizer(self.input_text, return_tensors="pt").to(model.device)
_ = model.generate(**encoded_input)
# test with `flan-t5-small`
model = T5ForConditionalGeneration.from_pretrained(
self.dense_act_model_name, quantization_config=BitsAndBytesConfig(load_in_8bit=True), device_map="auto"
)
encoded_input = self.tokenizer(self.input_text, return_tensors="pt").to(model.device)
_ = model.generate(**encoded_input)
| MixedInt8T5Test |
python | huggingface__transformers | tests/pipelines/test_pipelines_document_question_answering.py | {
"start": 1806,
"end": 15234
} | class ____(unittest.TestCase):
model_mapping = MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING
@require_pytesseract
@require_vision
def get_test_pipeline(
self,
model,
tokenizer=None,
image_processor=None,
feature_extractor=None,
processor=None,
dtype="float32",
):
dqa_pipeline = DocumentQuestionAnsweringPipeline(
model=model,
tokenizer=tokenizer,
feature_extractor=feature_extractor,
image_processor=image_processor,
processor=processor,
dtype=dtype,
max_new_tokens=20,
)
image = INVOICE_URL
word_boxes = list(zip(*apply_tesseract(load_image(image), None, "")))
question = "What is the placebo?"
examples = [
{
"image": load_image(image),
"question": question,
},
{
"image": image,
"question": question,
},
{
"image": image,
"question": question,
"word_boxes": word_boxes,
},
]
return dqa_pipeline, examples
def run_pipeline_test(self, dqa_pipeline, examples):
outputs = dqa_pipeline(examples, top_k=2)
self.assertEqual(
outputs,
[
[
{"score": ANY(float), "answer": ANY(str), "start": ANY(int), "end": ANY(int)},
{"score": ANY(float), "answer": ANY(str), "start": ANY(int), "end": ANY(int)},
]
]
* 3,
)
@require_torch
@require_detectron2
@require_pytesseract
def test_small_model_pt(self):
dqa_pipeline = pipeline(
"document-question-answering", model="hf-internal-testing/tiny-random-layoutlmv2-for-dqa-test"
)
image = INVOICE_URL
question = "How many cats are there?"
expected_output = [
{"score": 0.0001, "answer": "oy 2312/2019", "start": 38, "end": 39},
{"score": 0.0001, "answer": "oy 2312/2019 DUE", "start": 38, "end": 40},
]
outputs = dqa_pipeline(image=image, question=question, top_k=2)
self.assertEqual(nested_simplify(outputs, decimals=4), expected_output)
outputs = dqa_pipeline({"image": image, "question": question}, top_k=2)
self.assertEqual(nested_simplify(outputs, decimals=4), expected_output)
# This image does not detect ANY text in it, meaning layoutlmv2 should fail.
# Empty answer probably
image = "./tests/fixtures/tests_samples/COCO/000000039769.png"
outputs = dqa_pipeline(image=image, question=question, top_k=2)
self.assertEqual(outputs, [])
# We can optionally pass directly the words and bounding boxes
image = "./tests/fixtures/tests_samples/COCO/000000039769.png"
words = []
boxes = []
outputs = dqa_pipeline(image=image, question=question, words=words, boxes=boxes, top_k=2)
self.assertEqual(outputs, [])
@require_torch
@require_torch_bf16
@require_detectron2
@require_pytesseract
def test_small_model_pt_bf16(self):
dqa_pipeline = pipeline(
"document-question-answering",
model="hf-internal-testing/tiny-random-layoutlmv2-for-dqa-test",
dtype=torch.bfloat16,
)
image = INVOICE_URL
question = "How many cats are there?"
expected_output = [
{"score": 0.0001, "answer": "oy 2312/2019", "start": 38, "end": 39},
{"score": 0.0001, "answer": "oy 2312/2019 DUE", "start": 38, "end": 40},
]
outputs = dqa_pipeline(image=image, question=question, top_k=2)
self.assertEqual(nested_simplify(outputs, decimals=4), expected_output)
outputs = dqa_pipeline({"image": image, "question": question}, top_k=2)
self.assertEqual(nested_simplify(outputs, decimals=4), expected_output)
# This image does not detect ANY text in it, meaning layoutlmv2 should fail.
# Empty answer probably
image = "./tests/fixtures/tests_samples/COCO/000000039769.png"
outputs = dqa_pipeline(image=image, question=question, top_k=2)
self.assertEqual(outputs, [])
# We can optionally pass directly the words and bounding boxes
image = "./tests/fixtures/tests_samples/COCO/000000039769.png"
words = []
boxes = []
outputs = dqa_pipeline(image=image, question=question, words=words, boxes=boxes, top_k=2)
self.assertEqual(outputs, [])
# TODO: Enable this once hf-internal-testing/tiny-random-donut is implemented
# @require_torch
# def test_small_model_pt_donut(self):
# dqa_pipeline = pipeline("document-question-answering", model="hf-internal-testing/tiny-random-donut")
# # dqa_pipeline = pipeline("document-question-answering", model="../tiny-random-donut")
# image = "https://templates.invoicehome.com/invoice-template-us-neat-750px.png"
# question = "How many cats are there?"
#
# outputs = dqa_pipeline(image=image, question=question, top_k=2)
# self.assertEqual(
# nested_simplify(outputs, decimals=4), [{"score": 0.8799, "answer": "2"}, {"score": 0.296, "answer": "1"}]
# )
@slow
@require_torch
@require_detectron2
@require_pytesseract
def test_large_model_pt(self):
dqa_pipeline = pipeline(
"document-question-answering",
model="tiennvcs/layoutlmv2-base-uncased-finetuned-docvqa",
revision="9977165",
)
image = INVOICE_URL
question = "What is the invoice number?"
outputs = dqa_pipeline(image=image, question=question, top_k=2)
self.assertEqual(
nested_simplify(outputs, decimals=4),
[
{"score": 0.9944, "answer": "us-001", "start": 16, "end": 16},
{"score": 0.0009, "answer": "us-001", "start": 16, "end": 16},
],
)
outputs = dqa_pipeline({"image": image, "question": question}, top_k=2)
self.assertEqual(
nested_simplify(outputs, decimals=4),
[
{"score": 0.9944, "answer": "us-001", "start": 16, "end": 16},
{"score": 0.0009, "answer": "us-001", "start": 16, "end": 16},
],
)
outputs = dqa_pipeline(
[{"image": image, "question": question}, {"image": image, "question": question}], top_k=2
)
self.assertEqual(
nested_simplify(outputs, decimals=4),
[
[
{"score": 0.9944, "answer": "us-001", "start": 16, "end": 16},
{"score": 0.0009, "answer": "us-001", "start": 16, "end": 16},
],
]
* 2,
)
@slow
@require_torch
@require_detectron2
@require_pytesseract
def test_large_model_pt_chunk(self):
dqa_pipeline = pipeline(
"document-question-answering",
model="tiennvcs/layoutlmv2-base-uncased-finetuned-docvqa",
revision="9977165",
max_seq_len=50,
)
image = INVOICE_URL
question = "What is the invoice number?"
outputs = dqa_pipeline(image=image, question=question, top_k=2)
self.assertEqual(
nested_simplify(outputs, decimals=4),
[
{"score": 0.9974, "answer": "1110212019", "start": 23, "end": 23},
{"score": 0.9948, "answer": "us-001", "start": 16, "end": 16},
],
)
outputs = dqa_pipeline({"image": image, "question": question}, top_k=2)
self.assertEqual(
nested_simplify(outputs, decimals=4),
[
{"score": 0.9974, "answer": "1110212019", "start": 23, "end": 23},
{"score": 0.9948, "answer": "us-001", "start": 16, "end": 16},
],
)
outputs = dqa_pipeline(
[{"image": image, "question": question}, {"image": image, "question": question}], top_k=2
)
self.assertEqual(
nested_simplify(outputs, decimals=4),
[
[
{"score": 0.9974, "answer": "1110212019", "start": 23, "end": 23},
{"score": 0.9948, "answer": "us-001", "start": 16, "end": 16},
]
]
* 2,
)
@slow
@require_torch
@require_pytesseract
@require_vision
def test_large_model_pt_layoutlm(self):
tokenizer = AutoTokenizer.from_pretrained(
"impira/layoutlm-document-qa", revision="3dc6de3", add_prefix_space=True
)
dqa_pipeline = pipeline(
"document-question-answering",
model="impira/layoutlm-document-qa",
tokenizer=tokenizer,
revision="3dc6de3",
)
image = INVOICE_URL
question = "What is the invoice number?"
outputs = dqa_pipeline(image=image, question=question, top_k=2)
self.assertEqual(
nested_simplify(outputs, decimals=3),
[
{"score": 0.425, "answer": "us-001", "start": 16, "end": 16},
{"score": 0.082, "answer": "1110212019", "start": 23, "end": 23},
],
)
outputs = dqa_pipeline({"image": image, "question": question}, top_k=2)
self.assertEqual(
nested_simplify(outputs, decimals=3),
[
{"score": 0.425, "answer": "us-001", "start": 16, "end": 16},
{"score": 0.082, "answer": "1110212019", "start": 23, "end": 23},
],
)
outputs = dqa_pipeline(
[{"image": image, "question": question}, {"image": image, "question": question}], top_k=2
)
self.assertEqual(
nested_simplify(outputs, decimals=3),
[
[
{"score": 0.425, "answer": "us-001", "start": 16, "end": 16},
{"score": 0.082, "answer": "1110212019", "start": 23, "end": 23},
]
]
* 2,
)
word_boxes = list(zip(*apply_tesseract(load_image(image), None, "")))
# This model should also work if `image` is set to None
outputs = dqa_pipeline({"image": None, "word_boxes": word_boxes, "question": question}, top_k=2)
self.assertEqual(
nested_simplify(outputs, decimals=3),
[
{"score": 0.425, "answer": "us-001", "start": 16, "end": 16},
{"score": 0.082, "answer": "1110212019", "start": 23, "end": 23},
],
)
@slow
@require_torch
@require_pytesseract
@require_vision
def test_large_model_pt_layoutlm_chunk(self):
tokenizer = AutoTokenizer.from_pretrained(
"impira/layoutlm-document-qa", revision="3dc6de3", add_prefix_space=True
)
dqa_pipeline = pipeline(
"document-question-answering",
model="impira/layoutlm-document-qa",
tokenizer=tokenizer,
revision="3dc6de3",
max_seq_len=50,
)
image = INVOICE_URL
question = "What is the invoice number?"
outputs = dqa_pipeline(image=image, question=question, top_k=2)
self.assertEqual(
nested_simplify(outputs, decimals=4),
[
{"score": 0.9999, "answer": "us-001", "start": 16, "end": 16},
{"score": 0.9998, "answer": "us-001", "start": 16, "end": 16},
],
)
outputs = dqa_pipeline(
[{"image": image, "question": question}, {"image": image, "question": question}], top_k=2
)
self.assertEqual(
nested_simplify(outputs, decimals=4),
[
[
{"score": 0.9999, "answer": "us-001", "start": 16, "end": 16},
{"score": 0.9998, "answer": "us-001", "start": 16, "end": 16},
]
]
* 2,
)
word_boxes = list(zip(*apply_tesseract(load_image(image), None, "")))
# This model should also work if `image` is set to None
outputs = dqa_pipeline({"image": None, "word_boxes": word_boxes, "question": question}, top_k=2)
self.assertEqual(
nested_simplify(outputs, decimals=4),
[
{"score": 0.9999, "answer": "us-001", "start": 16, "end": 16},
{"score": 0.9998, "answer": "us-001", "start": 16, "end": 16},
],
)
@slow
@require_torch
def test_large_model_pt_donut(self):
dqa_pipeline = pipeline(
"document-question-answering",
model="naver-clova-ix/donut-base-finetuned-docvqa",
tokenizer=AutoTokenizer.from_pretrained("naver-clova-ix/donut-base-finetuned-docvqa"),
image_processor="naver-clova-ix/donut-base-finetuned-docvqa",
)
image = INVOICE_URL
question = "What is the invoice number?"
outputs = dqa_pipeline(image=image, question=question, top_k=2)
self.assertEqual(nested_simplify(outputs, decimals=4), [{"answer": "us-001"}])
| DocumentQuestionAnsweringPipelineTests |
python | viewflow__viewflow | viewflow/workflow/flow/nodes.py | {
"start": 1273,
"end": 1974
} | class ____(mixins.NodeDetailMixin, mixins.NodeUndoMixin, nodes.StartHandle):
"""
The ``Start`` handle node in a flow.
This node is used as the initial step in a flow from code
.. code-block:: python
class MyFlow(flow.Flow):
start = flow.StartHandle(this.on_start_process).Next(this.approve)
def start_process(self, activation, sample=False):
activation.process.sample = sample
return activation.process
...
process = MyFlow.start.run(sample=True)
"""
index_view_class = views.IndexTaskView
detail_view_class = views.DetailTaskView
undo_view_class = views.UndoTaskView
| StartHandle |
python | kubernetes-client__python | kubernetes/client/models/v1_config_map.py | {
"start": 383,
"end": 9779
} | class ____(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'api_version': 'str',
'binary_data': 'dict(str, str)',
'data': 'dict(str, str)',
'immutable': 'bool',
'kind': 'str',
'metadata': 'V1ObjectMeta'
}
attribute_map = {
'api_version': 'apiVersion',
'binary_data': 'binaryData',
'data': 'data',
'immutable': 'immutable',
'kind': 'kind',
'metadata': 'metadata'
}
def __init__(self, api_version=None, binary_data=None, data=None, immutable=None, kind=None, metadata=None, local_vars_configuration=None): # noqa: E501
"""V1ConfigMap - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._api_version = None
self._binary_data = None
self._data = None
self._immutable = None
self._kind = None
self._metadata = None
self.discriminator = None
if api_version is not None:
self.api_version = api_version
if binary_data is not None:
self.binary_data = binary_data
if data is not None:
self.data = data
if immutable is not None:
self.immutable = immutable
if kind is not None:
self.kind = kind
if metadata is not None:
self.metadata = metadata
@property
def api_version(self):
"""Gets the api_version of this V1ConfigMap. # noqa: E501
APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources # noqa: E501
:return: The api_version of this V1ConfigMap. # noqa: E501
:rtype: str
"""
return self._api_version
@api_version.setter
def api_version(self, api_version):
"""Sets the api_version of this V1ConfigMap.
APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources # noqa: E501
:param api_version: The api_version of this V1ConfigMap. # noqa: E501
:type: str
"""
self._api_version = api_version
@property
def binary_data(self):
"""Gets the binary_data of this V1ConfigMap. # noqa: E501
BinaryData contains the binary data. Each key must consist of alphanumeric characters, '-', '_' or '.'. BinaryData can contain byte sequences that are not in the UTF-8 range. The keys stored in BinaryData must not overlap with the ones in the Data field, this is enforced during validation process. Using this field will require 1.10+ apiserver and kubelet. # noqa: E501
:return: The binary_data of this V1ConfigMap. # noqa: E501
:rtype: dict(str, str)
"""
return self._binary_data
@binary_data.setter
def binary_data(self, binary_data):
"""Sets the binary_data of this V1ConfigMap.
BinaryData contains the binary data. Each key must consist of alphanumeric characters, '-', '_' or '.'. BinaryData can contain byte sequences that are not in the UTF-8 range. The keys stored in BinaryData must not overlap with the ones in the Data field, this is enforced during validation process. Using this field will require 1.10+ apiserver and kubelet. # noqa: E501
:param binary_data: The binary_data of this V1ConfigMap. # noqa: E501
:type: dict(str, str)
"""
self._binary_data = binary_data
@property
def data(self):
"""Gets the data of this V1ConfigMap. # noqa: E501
Data contains the configuration data. Each key must consist of alphanumeric characters, '-', '_' or '.'. Values with non-UTF-8 byte sequences must use the BinaryData field. The keys stored in Data must not overlap with the keys in the BinaryData field, this is enforced during validation process. # noqa: E501
:return: The data of this V1ConfigMap. # noqa: E501
:rtype: dict(str, str)
"""
return self._data
@data.setter
def data(self, data):
"""Sets the data of this V1ConfigMap.
Data contains the configuration data. Each key must consist of alphanumeric characters, '-', '_' or '.'. Values with non-UTF-8 byte sequences must use the BinaryData field. The keys stored in Data must not overlap with the keys in the BinaryData field, this is enforced during validation process. # noqa: E501
:param data: The data of this V1ConfigMap. # noqa: E501
:type: dict(str, str)
"""
self._data = data
@property
def immutable(self):
"""Gets the immutable of this V1ConfigMap. # noqa: E501
Immutable, if set to true, ensures that data stored in the ConfigMap cannot be updated (only object metadata can be modified). If not set to true, the field can be modified at any time. Defaulted to nil. # noqa: E501
:return: The immutable of this V1ConfigMap. # noqa: E501
:rtype: bool
"""
return self._immutable
@immutable.setter
def immutable(self, immutable):
"""Sets the immutable of this V1ConfigMap.
Immutable, if set to true, ensures that data stored in the ConfigMap cannot be updated (only object metadata can be modified). If not set to true, the field can be modified at any time. Defaulted to nil. # noqa: E501
:param immutable: The immutable of this V1ConfigMap. # noqa: E501
:type: bool
"""
self._immutable = immutable
@property
def kind(self):
"""Gets the kind of this V1ConfigMap. # noqa: E501
Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds # noqa: E501
:return: The kind of this V1ConfigMap. # noqa: E501
:rtype: str
"""
return self._kind
@kind.setter
def kind(self, kind):
"""Sets the kind of this V1ConfigMap.
Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds # noqa: E501
:param kind: The kind of this V1ConfigMap. # noqa: E501
:type: str
"""
self._kind = kind
@property
def metadata(self):
"""Gets the metadata of this V1ConfigMap. # noqa: E501
:return: The metadata of this V1ConfigMap. # noqa: E501
:rtype: V1ObjectMeta
"""
return self._metadata
@metadata.setter
def metadata(self, metadata):
"""Sets the metadata of this V1ConfigMap.
:param metadata: The metadata of this V1ConfigMap. # noqa: E501
:type: V1ObjectMeta
"""
self._metadata = metadata
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, V1ConfigMap):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, V1ConfigMap):
return True
return self.to_dict() != other.to_dict()
| V1ConfigMap |
python | getsentry__sentry | src/sentry/snuba/metrics/extraction.py | {
"start": 42515,
"end": 61501
} | class ____:
"""
Contains the information required to query or extract an on-demand metric.
"""
# Base fields from outside.
field: str
query: str
groupbys: Sequence[str]
spec_type: MetricSpecType
spec_version: SpecVersion
# Public fields.
op: MetricOperationType
# Private fields.
_metric_type: str
_arguments: Sequence[str]
def __init__(
self,
field: str,
query: str,
environment: str | None = None,
groupbys: Sequence[str] | None = None,
spec_type: MetricSpecType = MetricSpecType.SIMPLE_QUERY,
spec_version: SpecVersion | None = None,
):
self.field = field
self.query = query
self.spec_type = spec_type
self.spec_version = (
spec_version
if spec_version
else OnDemandMetricSpecVersioning.get_default_spec_version()
)
# Removes field if passed in selected_columns
self.groupbys = [groupby for groupby in groupbys or () if groupby != field]
# Include environment in groupbys which will cause it to included it in the query hash
if (
self.spec_type == MetricSpecType.DYNAMIC_QUERY
and "environment" not in self.groupbys
and self.spec_version.flags == {"include_environment_tag"}
):
self.groupbys.append("environment")
# For now, we just support the environment as extra, but in the future we might need more complex ways to
# combine extra values that are outside the query string.
self.environment = environment
self._arguments = []
self._eager_process()
def _eager_process(self) -> None:
op, metric_type, arguments = self._process_field()
self.op = op
self._metric_type = metric_type
self._arguments = arguments or []
@property
def field_to_extract(self) -> str | None:
if self.op in ("on_demand_apdex", "on_demand_count_web_vitals"):
return None
if self.op in ("on_demand_user_misery"):
return _map_field_name("user")
if not self._arguments:
return None
return self._arguments[0]
@property
def metric_type(self) -> str:
"""Returns c, d or s representing if it's a counter, distribution or set."""
return self._metric_type
@cached_property
def mri(self) -> str:
"""The unique identifier of the on-demand metric."""
return f"{self._metric_type}:{CUSTOM_ALERT_METRIC_NAME}@none"
@cached_property
def _query_str_for_hash(self) -> str:
"""Returns a hash of the query and field to be used as a unique identifier for the on-demand metric."""
str_to_hash = f"{self._field_for_hash()};{self._query_for_hash()}"
if self.groupbys:
# For compatibility with existing deployed metrics, leave existing hash untouched unless conditions are now
# included in the spec.
return f"{str_to_hash};{self._groupbys_for_hash()}"
return str_to_hash
@cached_property
def query_hash(self) -> str:
str_to_hash = self._query_str_for_hash
hash = hashlib.shake_128(str_to_hash.encode()).hexdigest(4)
return hash
def _field_for_hash(self) -> str | None:
# Since derived metrics are a special case, we want to make sure that the hashing is different from the other
# metrics.
#
# More specifically the hashing implementation will depend on the derived metric type:
# - failure count & rate -> hash the op
# - apdex -> hash the op + value
#
# The rationale for different hashing is complex to explain but the main idea is that if we hash the argument
# and the conditions, we might have a case in which `count()` with condition `f` has the same hash as `apdex()`
# with condition `f` and this will create a problem, since we might already have data for the `count()` and when
# `apdex()` is created in the UI, we will use that metric but that metric didn't extract in the past the tags
# that are used for apdex calculation, effectively causing problems with the data.
if self.op in _NO_ARG_METRICS:
return self.op
elif self.op in _MULTIPLE_ARGS_METRICS:
ret_val = f"{self.op}"
for arg in self._arguments:
ret_val += f":{arg}"
return ret_val
if not self._arguments:
return None
return self._arguments[0]
def _query_for_hash(self) -> str:
# In order to reduce the amount of metric being extracted, we perform a sort of the conditions tree. This
# heuristic allows us to perform some de-duplication to minimize the number of metrics extracted for
# semantically identical queries.
#
# In case we have `None` condition, we will use `None` string for hashing, so it's a sentinel value.
return str(_deep_sorted(self.condition))
def _groupbys_for_hash(self) -> str:
# A sorted list of group-bys for the hash, since groupbys will be unique per on_demand metric.
return str(sorted(self.groupbys))
@cached_property
def condition(self) -> RuleCondition | None:
"""Returns a parent condition containing a list of other conditions which determine whether of not the metric
is extracted."""
return self._process_query()
def tags_conditions(self, project: Project) -> list[TagSpec]:
"""Returns a list of tag conditions that will specify how tags are injected into metrics by Relay, and a bool if those specs may be project specific."""
tags_specs_generator = _ONDEMAND_OP_TO_SPEC_GENERATOR.get(self.op)
tags_specs_generator_for_project = _ONDEMAND_OP_TO_PROJECT_SPEC_GENERATOR.get(self.op)
if tags_specs_generator_for_project is not None:
tags_specs_generator = tags_specs_generator_for_project
if tags_specs_generator is None:
return []
return tags_specs_generator(project, self._arguments)
def _tag_for_field(self, groupby: str) -> TagSpec:
"""Returns a TagSpec for a field, eg. a groupby"""
field = _map_field_name(groupby)
return {
"key": groupby,
"field": field,
}
def tags_groupbys(self, groupbys: Sequence[str]) -> list[TagSpec]:
"""Returns a list of tag specs generate for added groupbys, as they need to be stored separately for queries to work."""
return [self._tag_for_field(groupby) for groupby in groupbys]
def to_metric_spec(self, project: Project) -> MetricSpec:
"""Converts the OndemandMetricSpec into a MetricSpec that Relay can understand."""
# Tag conditions are always computed based on the project.
extended_tags_conditions = self.tags_conditions(project).copy()
extended_tags_conditions.append({"key": QUERY_HASH_KEY, "value": self.query_hash})
tag_from_groupbys = self.tags_groupbys(self.groupbys)
extended_tags_conditions.extend(tag_from_groupbys)
# Once we switch to the next spec we can remove this block
# since the environment will be added to the groupbys, thus, being included in the query hash
if (
self.spec_type == MetricSpecType.DYNAMIC_QUERY
and self.spec_version.flags == set()
and self._tag_for_field("environment") not in extended_tags_conditions
):
extended_tags_conditions.append(self._tag_for_field("environment"))
metric_spec: MetricSpec = {
"category": DataCategory.TRANSACTION.api_name(),
"mri": self.mri,
"field": self.field_to_extract,
"tags": extended_tags_conditions,
}
condition = self.condition
if condition is not None:
metric_spec["condition"] = condition
return metric_spec
def _process_field(self) -> tuple[MetricOperationType, str, Sequence[str] | None]:
parsed_field = self._parse_field(self.field)
op = self._get_op(parsed_field.function, parsed_field.arguments)
metric_type = self._get_metric_type(parsed_field.function)
return op, metric_type, self._parse_arguments(op, metric_type, parsed_field)
def _process_query(self) -> RuleCondition | None:
# First step is to parse the query string into our internal AST format.
parsed_query = self._parse_query(self.query)
# We extend the parsed query with other conditions that we want to inject externally from the query. If it is
# a simple query, we encode the environment in the query hash, instead of emitting it as a tag of the metric.
if self.spec_type == MetricSpecType.SIMPLE_QUERY:
parsed_query = self._extend_parsed_query(parsed_query)
# Second step is to extract the conditions that might be present in the aggregate function (e.g. count_if).
parsed_field = self._parse_field(self.field)
aggregate_conditions = self._aggregate_conditions(parsed_field)
# In case we have an empty query, but we have some conditions from the aggregate, we can just return them.
if parsed_query.is_empty() and aggregate_conditions:
return aggregate_conditions
try:
# Third step is to generate the actual Relay rule that contains all rules nested. We assume that the query
# being passed here, can be satisfied ONLY by on demand metrics.
rule_condition = SearchQueryConverter(parsed_query.conditions).convert()
except Exception:
if not parsed_query.is_empty():
logger.exception("Error while converting search query '%s'", self.query)
return None
# If we don't have to merge the aggregate, we can just return the parsed rules.
if not aggregate_conditions:
return rule_condition
# In case we have a top level rule which is not an "and" we have to wrap it.
if rule_condition["op"] != "and":
return {"op": "and", "inner": [rule_condition, aggregate_conditions]}
# In the other case, we can just flatten the conditions.
rule_condition["inner"].append(aggregate_conditions)
return rule_condition
def _extend_parsed_query(self, parsed_query_result: QueryParsingResult) -> QueryParsingResult:
conditions = cast(list[QueryToken], parsed_query_result.conditions)
new_conditions: list[QueryToken] = []
if self.environment is not None:
new_conditions.append(
SearchFilter(
key=SearchKey(name="environment"),
operator="=",
value=SearchValue(raw_value=self.environment),
)
)
extended_conditions = conditions
if new_conditions:
conditions = [ParenExpression(children=conditions)] if conditions else []
# This transformation is equivalent to (new_conditions) AND (conditions).
extended_conditions = [ParenExpression(children=new_conditions)] + conditions
return QueryParsingResult(conditions=extended_conditions)
@staticmethod
def _aggregate_conditions(parsed_field: FieldParsingResult) -> RuleCondition | None:
# We have to handle the special case for the "count_if" function, however it may be better to build some
# better abstracted code to handle third-party rule conditions injection.
if parsed_field.function == "count_if":
key, op, value = parsed_field.arguments
return _convert_countif_filter(key, op, value)
return None
@staticmethod
def _parse_arguments(
op: MetricOperationType, metric_type: str, parsed_field: FieldParsingResult
) -> Sequence[str] | None:
requires_arguments = metric_type in ["s", "d"] or op in _MULTIPLE_ARGS_METRICS
if not requires_arguments:
return None
if len(parsed_field.arguments) == 0:
raise OnDemandMetricSpecError(f"The operation {op} supports one or more parameters")
arguments = parsed_field.arguments
return [_map_field_name(arguments[0])] if op not in _MULTIPLE_ARGS_METRICS else arguments
@staticmethod
def _get_op(function: str, args: Sequence[str]) -> MetricOperationType:
if function == "percentile":
percentile_op = _get_percentile_op(args)
if percentile_op is not None:
function = cast(str, percentile_op)
op = _SEARCH_TO_METRIC_AGGREGATES.get(function) or _SEARCH_TO_DERIVED_METRIC_AGGREGATES.get(
function
)
if op is not None:
return op
raise OnDemandMetricSpecError(f"Unsupported aggregate function {function}")
@staticmethod
def _get_metric_type(function: str) -> str:
metric_type = _AGGREGATE_TO_METRIC_TYPE.get(function)
if metric_type is not None:
return metric_type
raise OnDemandMetricSpecError(f"Unsupported aggregate function {function}")
@staticmethod
def _parse_field(value: str) -> FieldParsingResult:
try:
function, arguments, alias = _parse_function(value)
if function:
return FieldParsingResult(function=function, arguments=arguments, alias=alias)
# TODO: why is this here?
column = query_builder.resolve_column(value)
return column
except InvalidSearchQuery as e:
raise OnDemandMetricSpecError(
f"Unable to parse the field '{value}' in on demand spec: {e}"
)
@staticmethod
def _parse_query(value: str) -> QueryParsingResult:
"""Parse query string into our internal AST format."""
try:
conditions = parse_search_query(query=value, removed_blacklisted=True)
# In order to avoid having issues with the parsing logic, we want to remove any unnecessary parentheses
# that are not needed, since if we had the parentheses this might lead to a different conditions tree, which
# in our case doesn't happen since SearchQueryConverter optimizes that case, but it can easily slip in other
# edge cases.
conditions = _remove_redundant_parentheses(conditions)
return QueryParsingResult(conditions=conditions)
except InvalidSearchQuery as e:
raise OnDemandMetricSpecError(f"Invalid search query '{value}' in on demand spec: {e}")
def fetch_on_demand_metric_spec(
org_id: int,
field: str,
query: str,
environment: str | None = None,
groupbys: Sequence[str] | None = None,
spec_type: MetricSpecType = MetricSpecType.SIMPLE_QUERY,
) -> OnDemandMetricSpec:
"""Function to query the right spec based on the feature flags for an organization."""
# The spec version defines what OnDemandMetricSpec version is created
spec_version = OnDemandMetricSpecVersioning.get_query_spec_version(org_id)
return OnDemandMetricSpec(
field=field,
query=query,
environment=environment,
groupbys=groupbys,
spec_type=spec_type,
spec_version=spec_version,
)
def _convert_countif_filter(key: str, op: str, value: str) -> RuleCondition:
"""Maps ``count_if`` arguments to a ``RuleCondition``."""
assert op in _COUNTIF_TO_RELAY_OPERATORS, f"Unsupported `count_if` operator {op}"
condition = cast(
RuleCondition,
{
"op": _COUNTIF_TO_RELAY_OPERATORS[op],
"name": _map_field_name(key),
"value": fields.normalize_count_if_value({"column": key, "value": value}),
},
)
if op == "notEquals":
condition = {"op": "not", "inner": condition}
return condition
def _map_field_name(search_key: str) -> str:
"""
Maps the name of a field in a search query to the event protocol path.
Raises an exception if the field is not supported.
"""
# Map known fields using a static mapping.
if field := _SEARCH_TO_PROTOCOL_FIELDS.get(search_key):
return f"event.{field}"
# Measurements support generic access.
if search_key.startswith("measurements."):
return f"event.{search_key}.value"
# Run a schema-aware check for tags. Always use the resolver output,
# since it accounts for passing `tags[foo]` as key.
resolved = (resolve_column(Dataset.Transactions))(search_key)
if resolved == "transaction_name":
transaction_field = _SEARCH_TO_PROTOCOL_FIELDS.get("transaction")
return f"event.{transaction_field}"
if resolved.startswith("tags["):
stripped_search_key = resolved[5:-1]
# In case a field is identified as a tag but the stripped search key is matching
# an existing field, we want to use that instead.
# For example 'tags[os]' or 'os' are resolved both to 'tags[os]' by `resolve_column`. To
# generalizing the handling, we define the mapping only for 'os' and strip it accordingly.
if field := _SEARCH_TO_PROTOCOL_FIELDS.get(stripped_search_key):
return f"event.{field}"
return f"event.tags.{stripped_search_key}"
raise ValueError(f"Unsupported query field {search_key}")
def _get_satisfactory_metric(project: Project) -> str:
"""It returns the statisfactory response time threshold for the project and
the associated metric ("transaction.duration" or "measurements.lcp")."""
result = ProjectTransactionThreshold.filter(
organization_id=project.organization.id,
project_ids=[project.id],
order_by=[],
value_list=["metric"],
)
if len(result) == 0:
metric = TransactionMetric.DURATION.value
else:
metric = result[0][0]
if metric == TransactionMetric.DURATION.value:
metric_field = "transaction.duration"
elif metric == TransactionMetric.LCP.value:
# We assume it's lcp since the enumerator contains only two possibilities.
metric_field = "measurements.lcp"
else:
raise Exception("Invalid metric for project transaction threshold")
return metric_field
def _escape_wildcard(value: str) -> str:
"""
Escapes all characters in the wildcard which are considered as meta characters in the glob
implementation in Relay, which can be found at: https://docs.rs/globset/latest/globset/#syntax.
The goal of this function is to only preserve the `*` character as it is the only character that Sentry's
product offers to users to perform wildcard matching.
"""
i, n = 0, len(value)
escaped = ""
while i < n:
c = value[i]
i = i + 1
if c in "[]{}?":
escaped += rf"\{c}"
else:
escaped += c
return escaped
T = TypeVar("T")
| OnDemandMetricSpec |
python | protocolbuffers__protobuf | python/google/protobuf/internal/well_known_types.py | {
"start": 19302,
"end": 21306
} | class ____(object):
"""Class for Struct message type."""
__slots__ = ()
def __getitem__(self, key):
return _GetStructValue(self.fields[key])
def __setitem__(self, key, value):
_SetStructValue(self.fields[key], value)
def __delitem__(self, key):
del self.fields[key]
def __len__(self):
return len(self.fields)
def __iter__(self):
return iter(self.fields)
def _internal_assign(self, dictionary):
self.Clear()
self.update(dictionary)
def _internal_compare(self, other):
size = len(self)
if size != len(other):
return False
for key, value in self.items():
if key not in other:
return False
if isinstance(other[key], (dict, list)):
if not value._internal_compare(other[key]):
return False
elif value != other[key]:
return False
return True
def keys(self): # pylint: disable=invalid-name
return self.fields.keys()
def values(self): # pylint: disable=invalid-name
return [self[key] for key in self]
def items(self): # pylint: disable=invalid-name
return [(key, self[key]) for key in self]
def get_or_create_list(self, key):
"""Returns a list for this key, creating if it didn't exist already."""
if not self.fields[key].HasField('list_value'):
# Clear will mark list_value modified which will indeed create a list.
self.fields[key].list_value.Clear()
return self.fields[key].list_value
def get_or_create_struct(self, key):
"""Returns a struct for this key, creating if it didn't exist already."""
if not self.fields[key].HasField('struct_value'):
# Clear will mark struct_value modified which will indeed create a struct.
self.fields[key].struct_value.Clear()
return self.fields[key].struct_value
def update(self, dictionary): # pylint: disable=invalid-name
for key, value in dictionary.items():
_SetStructValue(self.fields[key], value)
collections.abc.MutableMapping.register(Struct)
| Struct |
python | docker__docker-py | tests/integration/api_image_test.py | {
"start": 4012,
"end": 10513
} | class ____(BaseAPIIntegrationTest):
'''Base class for `docker import` test cases.'''
TAR_SIZE = 512 * 1024
def write_dummy_tar_content(self, n_bytes, tar_fd):
def extend_file(f, n_bytes):
f.seek(n_bytes - 1)
f.write(bytearray([65]))
f.seek(0)
tar = tarfile.TarFile(fileobj=tar_fd, mode='w')
with tempfile.NamedTemporaryFile() as f:
extend_file(f, n_bytes)
tarinfo = tar.gettarinfo(name=f.name, arcname='testdata')
tar.addfile(tarinfo, fileobj=f)
tar.close()
@contextlib.contextmanager
def dummy_tar_stream(self, n_bytes):
'''Yields a stream that is valid tar data of size n_bytes.'''
with tempfile.NamedTemporaryFile() as tar_file:
self.write_dummy_tar_content(n_bytes, tar_file)
tar_file.seek(0)
yield tar_file
@contextlib.contextmanager
def dummy_tar_file(self, n_bytes):
'''Yields the name of a valid tar file of size n_bytes.'''
with tempfile.NamedTemporaryFile(delete=False) as tar_file:
self.write_dummy_tar_content(n_bytes, tar_file)
tar_file.seek(0)
yield tar_file.name
def test_import_from_bytes(self):
with self.dummy_tar_stream(n_bytes=500) as f:
content = f.read()
# The generic import_image() function cannot import in-memory bytes
# data that happens to be represented as a string type, because
# import_image() will try to use it as a filename and usually then
# trigger an exception. So we test the import_image_from_data()
# function instead.
statuses = self.client.import_image_from_data(
content, repository='test/import-from-bytes')
result_text = statuses.splitlines()[-1]
result = json.loads(result_text)
assert 'error' not in result
img_id = result['status']
self.tmp_imgs.append(img_id)
def test_import_from_file(self):
with self.dummy_tar_file(n_bytes=self.TAR_SIZE) as tar_filename:
# statuses = self.client.import_image(
# src=tar_filename, repository='test/import-from-file')
statuses = self.client.import_image_from_file(
tar_filename, repository='test/import-from-file')
result_text = statuses.splitlines()[-1]
result = json.loads(result_text)
assert 'error' not in result
assert 'status' in result
img_id = result['status']
self.tmp_imgs.append(img_id)
def test_import_from_stream(self):
with self.dummy_tar_stream(n_bytes=self.TAR_SIZE) as tar_stream:
statuses = self.client.import_image(
src=tar_stream, repository='test/import-from-stream')
# statuses = self.client.import_image_from_stream(
# tar_stream, repository='test/import-from-stream')
result_text = statuses.splitlines()[-1]
result = json.loads(result_text)
assert 'error' not in result
assert 'status' in result
img_id = result['status']
self.tmp_imgs.append(img_id)
def test_import_image_from_data_with_changes(self):
with self.dummy_tar_stream(n_bytes=500) as f:
content = f.read()
statuses = self.client.import_image_from_data(
content, repository='test/import-from-bytes',
changes=['USER foobar', 'CMD ["echo"]']
)
result_text = statuses.splitlines()[-1]
result = json.loads(result_text)
assert 'error' not in result
img_id = result['status']
self.tmp_imgs.append(img_id)
img_data = self.client.inspect_image(img_id)
assert img_data is not None
assert img_data['Config']['Cmd'] == ['echo']
assert img_data['Config']['User'] == 'foobar'
def test_import_image_with_changes(self):
with self.dummy_tar_file(n_bytes=self.TAR_SIZE) as tar_filename:
statuses = self.client.import_image(
src=tar_filename, repository='test/import-from-file',
changes=['USER foobar', 'CMD ["echo"]']
)
result_text = statuses.splitlines()[-1]
result = json.loads(result_text)
assert 'error' not in result
img_id = result['status']
self.tmp_imgs.append(img_id)
img_data = self.client.inspect_image(img_id)
assert img_data is not None
assert img_data['Config']['Cmd'] == ['echo']
assert img_data['Config']['User'] == 'foobar'
# Docs say output is available in 1.23, but this test fails on 1.12.0
@requires_api_version('1.24')
def test_get_load_image(self):
test_img = 'hello-world:latest'
self.client.pull(test_img)
data = self.client.get_image(test_img)
assert data
output = self.client.load_image(data)
assert any(line for line in output
if f'Loaded image: {test_img}' in line.get('stream', ''))
@contextlib.contextmanager
def temporary_http_file_server(self, stream):
'''Serve data from an IO stream over HTTP.'''
class Handler(SimpleHTTPRequestHandler):
def do_GET(self):
self.send_response(200)
self.send_header('Content-Type', 'application/x-tar')
self.end_headers()
shutil.copyfileobj(stream, self.wfile)
server = socketserver.TCPServer(('', 0), Handler)
thread = threading.Thread(target=server.serve_forever)
thread.daemon = True
thread.start()
yield f'http://{socket.gethostname()}:{server.server_address[1]}'
server.shutdown()
@pytest.mark.skipif(True, reason="Doesn't work inside a container - FIXME")
def test_import_from_url(self):
# The crappy test HTTP server doesn't handle large files well, so use
# a small file.
tar_size = 10240
with self.dummy_tar_stream(n_bytes=tar_size) as tar_data:
with self.temporary_http_file_server(tar_data) as url:
statuses = self.client.import_image(
src=url, repository='test/import-from-url')
result_text = statuses.splitlines()[-1]
result = json.loads(result_text)
assert 'error' not in result
assert 'status' in result
img_id = result['status']
self.tmp_imgs.append(img_id)
@requires_api_version('1.25')
| ImportImageTest |
python | python-openxml__python-docx | src/docx/enum/text.py | {
"start": 2188,
"end": 3926
} | class ____(BaseXmlEnum):
"""Specifies a standard preset color to apply.
Used for font highlighting and perhaps other applications.
* MS API name: `WdColorIndex`
* URL: https://msdn.microsoft.com/EN-US/library/office/ff195343.aspx
"""
INHERITED = (-1, None, "Color is inherited from the style hierarchy.")
"""Color is inherited from the style hierarchy."""
AUTO = (0, "default", "Automatic color. Default; usually black.")
"""Automatic color. Default; usually black."""
BLACK = (1, "black", "Black color.")
"""Black color."""
BLUE = (2, "blue", "Blue color")
"""Blue color"""
BRIGHT_GREEN = (4, "green", "Bright green color.")
"""Bright green color."""
DARK_BLUE = (9, "darkBlue", "Dark blue color.")
"""Dark blue color."""
DARK_RED = (13, "darkRed", "Dark red color.")
"""Dark red color."""
DARK_YELLOW = (14, "darkYellow", "Dark yellow color.")
"""Dark yellow color."""
GRAY_25 = (16, "lightGray", "25% shade of gray color.")
"""25% shade of gray color."""
GRAY_50 = (15, "darkGray", "50% shade of gray color.")
"""50% shade of gray color."""
GREEN = (11, "darkGreen", "Green color.")
"""Green color."""
PINK = (5, "magenta", "Pink color.")
"""Pink color."""
RED = (6, "red", "Red color.")
"""Red color."""
TEAL = (10, "darkCyan", "Teal color.")
"""Teal color."""
TURQUOISE = (3, "cyan", "Turquoise color.")
"""Turquoise color."""
VIOLET = (12, "darkMagenta", "Violet color.")
"""Violet color."""
WHITE = (8, "white", "White color.")
"""White color."""
YELLOW = (7, "yellow", "Yellow color.")
"""Yellow color."""
WD_COLOR = WD_COLOR_INDEX
| WD_COLOR_INDEX |
python | PrefectHQ__prefect | src/prefect/settings/models/server/services.py | {
"start": 15731,
"end": 16376
} | class ____(ServicesBaseSetting):
"""
Settings for controlling the task run recorder service
"""
model_config: ClassVar[SettingsConfigDict] = build_settings_config(
("server", "services", "task_run_recorder")
)
enabled: bool = Field(
default=True,
description="Whether or not to start the task run recorder service in the server application.",
validation_alias=AliasChoices(
AliasPath("enabled"),
"prefect_server_services_task_run_recorder_enabled",
"prefect_api_services_task_run_recorder_enabled",
),
)
| ServerServicesTaskRunRecorderSettings |
python | HypothesisWorks__hypothesis | hypothesis-python/tests/cover/test_pretty.py | {
"start": 14417,
"end": 16403
} | class ____(Exception):
pass
def test_exception():
assert pretty.pretty(ValueError("hi")) == "ValueError('hi')"
assert pretty.pretty(ValueError("hi", "there")) == "ValueError('hi', 'there')"
assert "test_pretty." in pretty.pretty(MyException())
def test_re_evals():
for r in [
re.compile(r"hi"),
re.compile(r"b\nc", re.MULTILINE),
re.compile(rb"hi", 0),
re.compile("foo", re.MULTILINE | re.UNICODE),
]:
r2 = eval(pretty.pretty(r), globals())
assert r.pattern == r2.pattern
assert r.flags == r2.flags
def test_print_builtin_function():
assert pretty.pretty(abs) == "abs"
def test_pretty_function():
assert pretty.pretty(test_pretty_function) == "test_pretty_function"
def test_breakable_at_group_boundary():
assert "\n" in pretty.pretty([[], "0" * 80])
@pytest.mark.parametrize(
"obj, rep",
[
(float("nan"), "nan"),
(-float("nan"), "-nan"),
(SIGNALING_NAN, "struct.unpack('d', struct.pack('Q', 0x7ff8000000000001))[0]"),
(-SIGNALING_NAN, "struct.unpack('d', struct.pack('Q', 0xfff8000000000001))[0]"),
],
)
def test_nan_reprs(obj, rep):
assert pretty.pretty(obj) == rep
assert float_to_lex(obj) == float_to_lex(
eval(rep, {"struct": struct, "nan": float("nan")})
)
def _repr_call(*args, **kwargs):
p = pretty.RepresentationPrinter()
p.repr_call(*args, **kwargs)
return p.getvalue()
@pytest.mark.parametrize("func_name", ["f", "lambda: ...", "lambda *args: ..."])
def test_repr_call(func_name):
fn = f"({func_name})" if func_name.startswith(("lambda:", "lambda ")) else func_name
aas = "a" * 100
assert _repr_call(func_name, (1, 2), {}) == f"{fn}(1, 2)"
assert _repr_call(func_name, (aas,), {}) == f"{fn}(\n {aas!r},\n)"
assert _repr_call(func_name, (), {"a": 1, "b": 2}) == f"{fn}(a=1, b=2)"
assert _repr_call(func_name, (), {"x": aas}) == f"{fn}(\n x={aas!r},\n)"
| MyException |
python | run-llama__llama_index | llama-index-integrations/postprocessor/llama-index-postprocessor-tei-rerank/llama_index/postprocessor/tei_rerank/base.py | {
"start": 686,
"end": 5239
} | class ____(BaseNodePostprocessor):
base_url: str = Field(
default=DEFAULT_URL,
description="Base URL for the text embeddings service.",
)
top_n: int = Field(
default=TOP_N, description="Number of nodes to return sorted by score."
)
keep_retrieval_score: bool = Field(
default=False,
description="Whether to keep the retrieval score in metadata.",
)
timeout: float = Field(
default=60.0,
description="Timeout in seconds for the request.",
)
truncate_text: bool = Field(
default=True,
description="Whether to truncate text or not when generating embeddings.",
)
auth_token: Optional[Union[str, Callable[[str], str]]] = Field(
default=None,
description="Authentication token or authentication token generating function for authenticated requests",
)
model_name: str = Field(
default="API",
description="Base URL for the text embeddings service.",
)
mode: str = Field(
default="text",
description="Re-ranking Method, full for including meta-data too.",
)
def __init__(
self,
top_n: int = TOP_N,
base_url: str = DEFAULT_URL,
text_instruction: Optional[str] = None,
query_instruction: Optional[str] = None,
timeout: float = 60.0,
truncate_text: bool = True,
auth_token: Optional[Union[str, Callable[[str], str]]] = None,
model_name="API",
):
super().__init__(
base_url=base_url,
top_n=top_n,
text_instruction=text_instruction,
query_instruction=query_instruction,
timeout=timeout,
truncate_text=truncate_text,
auth_token=auth_token,
model_name=model_name,
mode="text",
)
@classmethod
def class_name(cls) -> str:
return "TextEmbeddingsInference"
def _call_api(self, query: str, texts: List[str]) -> List[float]:
headers = {"Content-Type": "application/json"}
if self.auth_token is not None:
if callable(self.auth_token):
headers["Authorization"] = self.auth_token(self.base_url)
else:
headers["Authorization"] = self.auth_token
json_data = {"query": query, "texts": texts}
with httpx.Client() as client:
response = client.post(
f"{self.base_url}/rerank",
headers=headers,
json=json_data,
timeout=self.timeout,
)
return response.json()
def _postprocess_nodes(
self,
nodes: List[NodeWithScore],
query_bundle: Optional[QueryBundle] = None,
) -> List[NodeWithScore]:
dispatcher.event(
ReRankStartEvent(
query=query_bundle,
nodes=nodes,
top_n=self.top_n,
model_name=self.model_name,
)
)
if query_bundle is None:
raise ValueError("Missing query bundle in extra info.")
if len(nodes) == 0:
return []
query = query_bundle.query_str
if self.mode == "full":
texts = [
node.node.get_content(metadata_mode=MetadataMode.EMBED)
for node in nodes
]
elif self.mode == "text":
texts = [node.text for node in nodes]
else:
warnings.warn('Re-Ranking Mode defaulting to mode "text"')
texts = [node.text for node in nodes]
with self.callback_manager.event(
CBEventType.RERANKING,
payload={
EventPayload.NODES: nodes,
EventPayload.QUERY_STR: query_bundle.query_str,
EventPayload.TOP_K: self.top_n,
},
) as event:
scores = self._call_api(query, texts)
assert len(scores) == len(nodes)
for node, score in zip(nodes, scores):
if self.keep_retrieval_score:
# keep the retrieval score in metadata
node.node.metadata["retrieval_score"] = node.score
node.score = float(score["score"])
new_nodes = sorted(nodes, key=lambda x: -x.score if x.score else 0)[
: self.top_n
]
event.on_end(payload={EventPayload.NODES: new_nodes})
dispatcher.event(ReRankEndEvent(nodes=new_nodes))
return new_nodes
| TextEmbeddingInference |
python | pytorch__pytorch | test/torch_np/numpy_tests/core/test_numeric.py | {
"start": 48839,
"end": 70125
} | class ____(TestCase):
def setUp(self):
super().setUp()
self.nr = 5
self.nc = 3
def fastclip(self, a, m, M, out=None, casting=None):
if out is None:
if casting is None:
return a.clip(m, M)
else:
return a.clip(m, M, casting=casting)
else:
if casting is None:
return a.clip(m, M, out)
else:
return a.clip(m, M, out, casting=casting)
def clip(self, a, m, M, out=None):
# use slow-clip
selector = np.less(a, m) + 2 * np.greater(a, M)
return selector.choose((a, m, M), out=out)
# Handy functions
def _generate_data(self, n, m):
return randn(n, m)
def _generate_data_complex(self, n, m):
return randn(n, m) + 1.0j * rand(n, m)
def _generate_flt_data(self, n, m):
return (randn(n, m)).astype(np.float32)
def _neg_byteorder(self, a):
a = np.asarray(a)
if sys.byteorder == "little":
a = a.astype(a.dtype.newbyteorder(">"))
else:
a = a.astype(a.dtype.newbyteorder("<"))
return a
def _generate_non_native_data(self, n, m):
data = randn(n, m)
data = self._neg_byteorder(data)
assert_(not data.dtype.isnative)
return data
def _generate_int_data(self, n, m):
return (10 * rand(n, m)).astype(np.int64)
def _generate_int32_data(self, n, m):
return (10 * rand(n, m)).astype(np.int32)
# Now the real test cases
@parametrize("dtype", "?bhilBfd")
def test_ones_pathological(self, dtype):
# for preservation of behavior described in
# gh-12519; amin > amax behavior may still change
# in the future
arr = np.ones(10, dtype=dtype)
expected = np.zeros(10, dtype=dtype)
actual = np.clip(arr, 1, 0)
assert_equal(actual, expected)
@parametrize("dtype", "eFD")
def test_ones_pathological_2(self, dtype):
if dtype in "FD":
# FIXME: make xfail
raise SkipTest("torch.clamp not implemented for complex types")
# for preservation of behavior described in
# gh-12519; amin > amax behavior may still change
# in the future
arr = np.ones(10, dtype=dtype)
expected = np.zeros(10, dtype=dtype)
actual = np.clip(arr, 1, 0)
assert_equal(actual, expected)
def test_simple_double(self):
# Test native double input with scalar min/max.
a = self._generate_data(self.nr, self.nc)
m = 0.1
M = 0.6
ac = self.fastclip(a, m, M)
act = self.clip(a, m, M)
assert_array_equal(ac, act)
def test_simple_int(self):
# Test native int input with scalar min/max.
a = self._generate_int_data(self.nr, self.nc)
a = a.astype(int)
m = -2
M = 4
ac = self.fastclip(a, m, M)
act = self.clip(a, m, M)
assert_array_equal(ac, act)
def test_array_double(self):
# Test native double input with array min/max.
a = self._generate_data(self.nr, self.nc)
m = np.zeros(a.shape)
M = m + 0.5
ac = self.fastclip(a, m, M)
act = self.clip(a, m, M)
assert_array_equal(ac, act)
@xpassIfTorchDynamo_np # (reason="byteorder not supported in torch")
def test_simple_nonnative(self):
# Test non native double input with scalar min/max.
# Test native double input with non native double scalar min/max.
a = self._generate_non_native_data(self.nr, self.nc)
m = -0.5
M = 0.6
ac = self.fastclip(a, m, M)
act = self.clip(a, m, M)
assert_array_equal(ac, act)
# Test native double input with non native double scalar min/max.
a = self._generate_data(self.nr, self.nc)
m = -0.5
M = self._neg_byteorder(0.6)
assert_(not M.dtype.isnative)
ac = self.fastclip(a, m, M)
act = self.clip(a, m, M)
assert_array_equal(ac, act)
@xpassIfTorchDynamo_np # (reason="clamp not supported for complex")
def test_simple_complex(self):
# Test native complex input with native double scalar min/max.
# Test native input with complex double scalar min/max.
a = 3 * self._generate_data_complex(self.nr, self.nc)
m = -0.5
M = 1.0
ac = self.fastclip(a, m, M)
act = self.clip(a, m, M)
assert_array_equal(ac, act)
# Test native input with complex double scalar min/max.
a = 3 * self._generate_data(self.nr, self.nc)
m = -0.5 + 1.0j
M = 1.0 + 2.0j
ac = self.fastclip(a, m, M)
act = self.clip(a, m, M)
assert_array_equal(ac, act)
@xfail # (reason="clamp not supported for complex")
def test_clip_complex(self):
# Address Issue gh-5354 for clipping complex arrays
# Test native complex input without explicit min/max
# ie, either min=None or max=None
a = np.ones(10, dtype=complex)
m = a.min()
M = a.max()
am = self.fastclip(a, m, None)
aM = self.fastclip(a, None, M)
assert_array_equal(am, a)
assert_array_equal(aM, a)
def test_clip_non_contig(self):
# Test clip for non contiguous native input and native scalar min/max.
a = self._generate_data(self.nr * 2, self.nc * 3)
a = a[::2, ::3]
assert_(not a.flags["F_CONTIGUOUS"])
assert_(not a.flags["C_CONTIGUOUS"])
ac = self.fastclip(a, -1.6, 1.7)
act = self.clip(a, -1.6, 1.7)
assert_array_equal(ac, act)
def test_simple_out(self):
# Test native double input with scalar min/max.
a = self._generate_data(self.nr, self.nc)
m = -0.5
M = 0.6
ac = np.zeros(a.shape)
act = np.zeros(a.shape)
self.fastclip(a, m, M, ac)
self.clip(a, m, M, act)
assert_array_equal(ac, act)
# @xpassIfTorchDynamo_np # (reason="casting not supported")
@parametrize(
"casting",
[
subtest(None, decorators=[xfail]),
subtest("unsafe", decorators=[xpassIfTorchDynamo_np]),
],
)
def test_simple_int32_inout(self, casting):
# Test native int32 input with double min/max and int32 out.
a = self._generate_int32_data(self.nr, self.nc)
m = np.float64(0)
M = np.float64(2)
ac = np.zeros(a.shape, dtype=np.int32)
act = ac.copy()
if casting is not None:
# explicitly passing "unsafe" will silence warning
self.fastclip(a, m, M, ac, casting=casting)
self.clip(a, m, M, act)
assert_array_equal(ac, act)
def test_simple_int64_out(self):
# Test native int32 input with int32 scalar min/max and int64 out.
a = self._generate_int32_data(self.nr, self.nc)
m = np.int32(-1)
M = np.int32(1)
ac = np.zeros(a.shape, dtype=np.int64)
act = ac.copy()
self.fastclip(a, m, M, ac)
self.clip(a, m, M, act)
assert_array_equal(ac, act)
@xfail # (reason="FIXME arrays not equal")
def test_simple_int64_inout(self):
# Test native int32 input with double array min/max and int32 out.
a = self._generate_int32_data(self.nr, self.nc)
m = np.zeros(a.shape, np.float64)
M = np.float64(1)
ac = np.zeros(a.shape, dtype=np.int32)
act = ac.copy()
self.clip(a, m, M, act)
assert_array_equal(ac, act)
@xfail # (reason="FIXME arrays not equal")
def test_simple_int32_out(self):
# Test native double input with scalar min/max and int out.
a = self._generate_data(self.nr, self.nc)
m = -1.0
M = 2.0
ac = np.zeros(a.shape, dtype=np.int32)
act = ac.copy()
self.clip(a, m, M, act)
assert_array_equal(ac, act)
def test_simple_inplace_01(self):
# Test native double input with array min/max in-place.
a = self._generate_data(self.nr, self.nc)
ac = a.copy()
m = np.zeros(a.shape)
M = 1.0
self.fastclip(a, m, M, a)
self.clip(a, m, M, ac)
assert_array_equal(a, ac)
def test_simple_inplace_02(self):
# Test native double input with scalar min/max in-place.
a = self._generate_data(self.nr, self.nc)
ac = a.copy()
m = -0.5
M = 0.6
self.fastclip(a, m, M, a)
self.clip(ac, m, M, ac)
assert_array_equal(a, ac)
def test_noncontig_inplace(self):
# Test non contiguous double input with double scalar min/max in-place.
a = self._generate_data(self.nr * 2, self.nc * 3)
a = a[::2, ::3]
assert_(not a.flags["F_CONTIGUOUS"])
assert_(not a.flags["C_CONTIGUOUS"])
ac = a.copy()
m = -0.5
M = 0.6
self.fastclip(a, m, M, a)
self.clip(ac, m, M, ac)
assert_array_equal(a, ac)
def test_type_cast_01(self):
# Test native double input with scalar min/max.
a = self._generate_data(self.nr, self.nc)
m = -0.5
M = 0.6
ac = self.fastclip(a, m, M)
act = self.clip(a, m, M)
assert_array_equal(ac, act)
def test_type_cast_02(self):
# Test native int32 input with int32 scalar min/max.
a = self._generate_int_data(self.nr, self.nc)
a = a.astype(np.int32)
m = -2
M = 4
ac = self.fastclip(a, m, M)
act = self.clip(a, m, M)
assert_array_equal(ac, act)
def test_type_cast_03(self):
# Test native int32 input with float64 scalar min/max.
a = self._generate_int32_data(self.nr, self.nc)
m = -2
M = 4
ac = self.fastclip(a, np.float64(m), np.float64(M))
act = self.clip(a, np.float64(m), np.float64(M))
assert_array_equal(ac, act)
def test_type_cast_04(self):
# Test native int32 input with float32 scalar min/max.
a = self._generate_int32_data(self.nr, self.nc)
m = np.float32(-2)
M = np.float32(4)
act = self.fastclip(a, m, M)
ac = self.clip(a, m, M)
assert_array_equal(ac, act)
def test_type_cast_05(self):
# Test native int32 with double arrays min/max.
a = self._generate_int_data(self.nr, self.nc)
m = -0.5
M = 1.0
ac = self.fastclip(a, m * np.zeros(a.shape), M)
act = self.clip(a, m * np.zeros(a.shape), M)
assert_array_equal(ac, act)
@xpassIfTorchDynamo_np # (reason="newbyteorder not supported")
def test_type_cast_06(self):
# Test native with NON native scalar min/max.
a = self._generate_data(self.nr, self.nc)
m = 0.5
m_s = self._neg_byteorder(m)
M = 1.0
act = self.clip(a, m_s, M)
ac = self.fastclip(a, m_s, M)
assert_array_equal(ac, act)
@xpassIfTorchDynamo_np # (reason="newbyteorder not supported")
def test_type_cast_07(self):
# Test NON native with native array min/max.
a = self._generate_data(self.nr, self.nc)
m = -0.5 * np.ones(a.shape)
M = 1.0
a_s = self._neg_byteorder(a)
assert_(not a_s.dtype.isnative)
act = a_s.clip(m, M)
ac = self.fastclip(a_s, m, M)
assert_array_equal(ac, act)
@xpassIfTorchDynamo_np # (reason="newbyteorder not supported")
def test_type_cast_08(self):
# Test NON native with native scalar min/max.
a = self._generate_data(self.nr, self.nc)
m = -0.5
M = 1.0
a_s = self._neg_byteorder(a)
assert_(not a_s.dtype.isnative)
ac = self.fastclip(a_s, m, M)
act = a_s.clip(m, M)
assert_array_equal(ac, act)
@xpassIfTorchDynamo_np # (reason="newbyteorder not supported")
def test_type_cast_09(self):
# Test native with NON native array min/max.
a = self._generate_data(self.nr, self.nc)
m = -0.5 * np.ones(a.shape)
M = 1.0
m_s = self._neg_byteorder(m)
assert_(not m_s.dtype.isnative)
ac = self.fastclip(a, m_s, M)
act = self.clip(a, m_s, M)
assert_array_equal(ac, act)
def test_type_cast_10(self):
# Test native int32 with float min/max and float out for output argument.
a = self._generate_int_data(self.nr, self.nc)
b = np.zeros(a.shape, dtype=np.float32)
m = np.float32(-0.5)
M = np.float32(1)
act = self.clip(a, m, M, out=b)
ac = self.fastclip(a, m, M, out=b)
assert_array_equal(ac, act)
@xpassIfTorchDynamo_np # (reason="newbyteorder not supported")
def test_type_cast_11(self):
# Test non native with native scalar, min/max, out non native
a = self._generate_non_native_data(self.nr, self.nc)
b = a.copy()
b = b.astype(b.dtype.newbyteorder(">"))
bt = b.copy()
m = -0.5
M = 1.0
self.fastclip(a, m, M, out=b)
self.clip(a, m, M, out=bt)
assert_array_equal(b, bt)
def test_type_cast_12(self):
# Test native int32 input and min/max and float out
a = self._generate_int_data(self.nr, self.nc)
b = np.zeros(a.shape, dtype=np.float32)
m = np.int32(0)
M = np.int32(1)
act = self.clip(a, m, M, out=b)
ac = self.fastclip(a, m, M, out=b)
assert_array_equal(ac, act)
def test_clip_with_out_simple(self):
# Test native double input with scalar min/max
a = self._generate_data(self.nr, self.nc)
m = -0.5
M = 0.6
ac = np.zeros(a.shape)
act = np.zeros(a.shape)
self.fastclip(a, m, M, ac)
self.clip(a, m, M, act)
assert_array_equal(ac, act)
@xfail # (reason="FIXME arrays not equal")
def test_clip_with_out_simple2(self):
# Test native int32 input with double min/max and int32 out
a = self._generate_int32_data(self.nr, self.nc)
m = np.float64(0)
M = np.float64(2)
ac = np.zeros(a.shape, dtype=np.int32)
act = ac.copy()
self.clip(a, m, M, act)
assert_array_equal(ac, act)
def test_clip_with_out_simple_int32(self):
# Test native int32 input with int32 scalar min/max and int64 out
a = self._generate_int32_data(self.nr, self.nc)
m = np.int32(-1)
M = np.int32(1)
ac = np.zeros(a.shape, dtype=np.int64)
act = ac.copy()
self.fastclip(a, m, M, ac)
self.clip(a, m, M, act)
assert_array_equal(ac, act)
@xfail # (reason="FIXME arrays not equal")
def test_clip_with_out_array_int32(self):
# Test native int32 input with double array min/max and int32 out
a = self._generate_int32_data(self.nr, self.nc)
m = np.zeros(a.shape, np.float64)
M = np.float64(1)
ac = np.zeros(a.shape, dtype=np.int32)
act = ac.copy()
self.clip(a, m, M, act)
assert_array_equal(ac, act)
@xfail # (reason="FIXME arrays not equal")
def test_clip_with_out_array_outint32(self):
# Test native double input with scalar min/max and int out
a = self._generate_data(self.nr, self.nc)
m = -1.0
M = 2.0
ac = np.zeros(a.shape, dtype=np.int32)
act = ac.copy()
self.clip(a, m, M, act)
assert_array_equal(ac, act)
def test_clip_with_out_transposed(self):
# Test that the out argument works when transposed
a = np.arange(16).reshape(4, 4)
out = np.empty_like(a).T
a.clip(4, 10, out=out)
expected = self.clip(a, 4, 10)
assert_array_equal(out, expected)
def test_clip_with_out_memory_overlap(self):
# Test that the out argument works when it has memory overlap
a = np.arange(16).reshape(4, 4)
ac = a.copy()
a[:-1].clip(4, 10, out=a[1:])
expected = self.clip(ac[:-1], 4, 10)
assert_array_equal(a[1:], expected)
def test_clip_inplace_array(self):
# Test native double input with array min/max
a = self._generate_data(self.nr, self.nc)
ac = a.copy()
m = np.zeros(a.shape)
M = 1.0
self.fastclip(a, m, M, a)
self.clip(a, m, M, ac)
assert_array_equal(a, ac)
def test_clip_inplace_simple(self):
# Test native double input with scalar min/max
a = self._generate_data(self.nr, self.nc)
ac = a.copy()
m = -0.5
M = 0.6
self.fastclip(a, m, M, a)
self.clip(a, m, M, ac)
assert_array_equal(a, ac)
def test_clip_func_takes_out(self):
# Ensure that the clip() function takes an out=argument.
a = self._generate_data(self.nr, self.nc)
ac = a.copy()
m = -0.5
M = 0.6
a2 = np.clip(a, m, M, out=a)
self.clip(a, m, M, ac)
assert_array_equal(a2, ac)
assert_(a2 is a)
@skip(reason="Edge case; Wait until deprecation graduates")
def test_clip_nan(self):
d = np.arange(7.0)
with assert_warns(DeprecationWarning):
assert_equal(d.clip(min=np.nan), d)
with assert_warns(DeprecationWarning):
assert_equal(d.clip(max=np.nan), d)
with assert_warns(DeprecationWarning):
assert_equal(d.clip(min=np.nan, max=np.nan), d)
with assert_warns(DeprecationWarning):
assert_equal(d.clip(min=-2, max=np.nan), d)
with assert_warns(DeprecationWarning):
assert_equal(d.clip(min=np.nan, max=10), d)
@parametrize(
"amin, amax",
[
# two scalars
(1, 0),
# mix scalar and array
(1, np.zeros(10)),
# two arrays
(np.ones(10), np.zeros(10)),
],
)
def test_clip_value_min_max_flip(self, amin, amax):
a = np.arange(10, dtype=np.int64)
# requirement from ufunc_docstrings.py
expected = np.minimum(np.maximum(a, amin), amax)
actual = np.clip(a, amin, amax)
assert_equal(actual, expected)
@parametrize(
"arr, amin, amax",
[
# problematic scalar nan case from hypothesis
(
np.zeros(10, dtype=np.int64),
np.array(np.nan),
np.zeros(10, dtype=np.int32),
),
],
)
def test_clip_scalar_nan_propagation(self, arr, amin, amax):
# enforcement of scalar nan propagation for comparisons
# called through clip()
expected = np.minimum(np.maximum(arr, amin), amax)
actual = np.clip(arr, amin, amax)
assert_equal(actual, expected)
@skip # hypothesis hynp.from_dtype fails on CI (versions?)
@given(
data=st.data(),
arr=hynp.arrays(
dtype=hynp.integer_dtypes() | hynp.floating_dtypes(),
shape=hynp.array_shapes(),
),
)
def test_clip_property(self, data, arr):
"""A property-based test using Hypothesis.
This aims for maximum generality: it could in principle generate *any*
valid inputs to np.clip, and in practice generates much more varied
inputs than human testers come up with.
Because many of the inputs have tricky dependencies - compatible dtypes
and mutually-broadcastable shapes - we use `st.data()` strategy draw
values *inside* the test function, from strategies we construct based
on previous values. An alternative would be to define a custom strategy
with `@st.composite`, but until we have duplicated code inline is fine.
That accounts for most of the function; the actual test is just three
lines to calculate and compare actual vs expected results!
"""
numeric_dtypes = hynp.integer_dtypes() | hynp.floating_dtypes()
# Generate shapes for the bounds which can be broadcast with each other
# and with the base shape. Below, we might decide to use scalar bounds,
# but it's clearer to generate these shapes unconditionally in advance.
in_shapes, result_shape = data.draw(
hynp.mutually_broadcastable_shapes(num_shapes=2, base_shape=arr.shape)
)
# Scalar `nan` is deprecated due to the differing behaviour it shows.
s = numeric_dtypes.flatmap(lambda x: hynp.from_dtype(x, allow_nan=False))
amin = data.draw(
s
| hynp.arrays(
dtype=numeric_dtypes, shape=in_shapes[0], elements={"allow_nan": False}
)
)
amax = data.draw(
s
| hynp.arrays(
dtype=numeric_dtypes, shape=in_shapes[1], elements={"allow_nan": False}
)
)
# Then calculate our result and expected result and check that they're
# equal! See gh-12519 and gh-19457 for discussion deciding on this
# property and the result_type argument.
result = np.clip(arr, amin, amax)
t = np.result_type(arr, amin, amax)
expected = np.minimum(amax, np.maximum(arr, amin, dtype=t), dtype=t)
assert result.dtype == t
assert_array_equal(result, expected)
| TestClip |
python | uqfoundation__dill | dill/tests/test_classdef.py | {
"start": 532,
"end": 623
} | class ____:
def __call__(self):
pass
def ok(self):
return True
| _class2 |
python | apache__airflow | providers/google/tests/unit/google/cloud/operators/test_vertex_ai.py | {
"start": 113595,
"end": 114637
} | class ____:
@mock.patch(VERTEX_AI_PATH.format("model_service.Model.to_dict"))
@mock.patch(VERTEX_AI_PATH.format("model_service.ModelServiceHook"))
def test_execute(self, mock_hook, to_dict_mock):
op = GetModelOperator(
task_id=TASK_ID,
model_id=TEST_MODEL_NAME,
gcp_conn_id=GCP_CONN_ID,
impersonation_chain=IMPERSONATION_CHAIN,
region=GCP_LOCATION,
project_id=GCP_PROJECT,
retry=RETRY,
timeout=TIMEOUT,
metadata=METADATA,
)
op.execute(context={"ti": mock.MagicMock(), "task": mock.MagicMock()})
mock_hook.assert_called_once_with(gcp_conn_id=GCP_CONN_ID, impersonation_chain=IMPERSONATION_CHAIN)
mock_hook.return_value.get_model.assert_called_once_with(
region=GCP_LOCATION,
project_id=GCP_PROJECT,
model_id=TEST_MODEL_NAME,
retry=RETRY,
timeout=TIMEOUT,
metadata=METADATA,
)
| TestVertexAIGetModelsOperator |
python | optuna__optuna | tests/terminator_tests/test_callback.py | {
"start": 201,
"end": 1188
} | class ____(BaseTerminator):
def __init__(self, termination_trial_number: int) -> None:
self._termination_trial_number = termination_trial_number
def should_terminate(self, study: Study) -> bool:
trials = study.get_trials(states=[TrialState.COMPLETE])
latest_number = max([t.number for t in trials])
if latest_number >= self._termination_trial_number:
return True
else:
return False
def test_terminator_callback_terminator() -> None:
# This test case validates that the study is stopped when the `should_terminate` method of the
# terminator returns `True` for the first time.
termination_trial_number = 10
callback = TerminatorCallback(
terminator=_DeterministicTerminator(termination_trial_number),
)
study = create_study()
study.optimize(lambda _: 0.0, callbacks=[callback], n_trials=100)
assert len(study.trials) == termination_trial_number + 1
| _DeterministicTerminator |
python | cython__cython | Tools/make_dataclass_tests.py | {
"start": 9833,
"end": 10586
} | class ____(ast.NodeTransformer):
def __init__(self, substitutions):
super().__init__()
self.substitutions = substitutions
def visit_Constant(self, node):
# attempt to handle some difference in class names
# (note: requires Python>=3.8)
if isinstance(node.value, str):
if node.value.find("<locals>") != -1:
import re
new_value = new_value2 = re.sub("[\w.]*<locals>", "", node.value)
for key, value in self.substitutions.items():
new_value2 = re.sub(f"(?<![\w])[.]{key}(?![\w])", value, new_value2)
if new_value != new_value2:
node.value = new_value2
return node
| SubstituteNameString |
python | tensorflow__tensorflow | tensorflow/python/keras/layers/recurrent.py | {
"start": 60562,
"end": 69732
} | class ____(RNN):
"""Fully-connected RNN where the output is to be fed back to input.
See [the Keras RNN API guide](https://www.tensorflow.org/guide/keras/rnn)
for details about the usage of RNN API.
Args:
units: Positive integer, dimensionality of the output space.
activation: Activation function to use.
Default: hyperbolic tangent (`tanh`).
If you pass None, no activation is applied
(ie. "linear" activation: `a(x) = x`).
use_bias: Boolean, (default `True`), whether the layer uses a bias vector.
kernel_initializer: Initializer for the `kernel` weights matrix,
used for the linear transformation of the inputs. Default:
`glorot_uniform`.
recurrent_initializer: Initializer for the `recurrent_kernel`
weights matrix, used for the linear transformation of the recurrent state.
Default: `orthogonal`.
bias_initializer: Initializer for the bias vector. Default: `zeros`.
kernel_regularizer: Regularizer function applied to the `kernel` weights
matrix. Default: `None`.
recurrent_regularizer: Regularizer function applied to the
`recurrent_kernel` weights matrix. Default: `None`.
bias_regularizer: Regularizer function applied to the bias vector. Default:
`None`.
activity_regularizer: Regularizer function applied to the output of the
layer (its "activation"). Default: `None`.
kernel_constraint: Constraint function applied to the `kernel` weights
matrix. Default: `None`.
recurrent_constraint: Constraint function applied to the `recurrent_kernel`
weights matrix. Default: `None`.
bias_constraint: Constraint function applied to the bias vector. Default:
`None`.
dropout: Float between 0 and 1.
Fraction of the units to drop for the linear transformation of the inputs.
Default: 0.
recurrent_dropout: Float between 0 and 1.
Fraction of the units to drop for the linear transformation of the
recurrent state. Default: 0.
return_sequences: Boolean. Whether to return the last output
in the output sequence, or the full sequence. Default: `False`.
return_state: Boolean. Whether to return the last state
in addition to the output. Default: `False`
go_backwards: Boolean (default False).
If True, process the input sequence backwards and return the
reversed sequence.
stateful: Boolean (default False). If True, the last state
for each sample at index i in a batch will be used as initial
state for the sample of index i in the following batch.
unroll: Boolean (default False).
If True, the network will be unrolled,
else a symbolic loop will be used.
Unrolling can speed-up a RNN,
although it tends to be more memory-intensive.
Unrolling is only suitable for short sequences.
Call arguments:
inputs: A 3D tensor, with shape `[batch, timesteps, feature]`.
mask: Binary tensor of shape `[batch, timesteps]` indicating whether
a given timestep should be masked. An individual `True` entry indicates
that the corresponding timestep should be utilized, while a `False` entry
indicates that the corresponding timestep should be ignored.
training: Python boolean indicating whether the layer should behave in
training mode or in inference mode. This argument is passed to the cell
when calling it. This is only relevant if `dropout` or
`recurrent_dropout` is used.
initial_state: List of initial state tensors to be passed to the first
call of the cell.
Examples:
```python
inputs = np.random.random([32, 10, 8]).astype(np.float32)
simple_rnn = tf.keras.layers.SimpleRNN(4)
output = simple_rnn(inputs) # The output has shape `[32, 4]`.
simple_rnn = tf.keras.layers.SimpleRNN(
4, return_sequences=True, return_state=True)
# whole_sequence_output has shape `[32, 10, 4]`.
# final_state has shape `[32, 4]`.
whole_sequence_output, final_state = simple_rnn(inputs)
```
"""
def __init__(self,
units,
activation='tanh',
use_bias=True,
kernel_initializer='glorot_uniform',
recurrent_initializer='orthogonal',
bias_initializer='zeros',
kernel_regularizer=None,
recurrent_regularizer=None,
bias_regularizer=None,
activity_regularizer=None,
kernel_constraint=None,
recurrent_constraint=None,
bias_constraint=None,
dropout=0.,
recurrent_dropout=0.,
return_sequences=False,
return_state=False,
go_backwards=False,
stateful=False,
unroll=False,
**kwargs):
if 'implementation' in kwargs:
kwargs.pop('implementation')
logging.warning('The `implementation` argument '
'in `SimpleRNN` has been deprecated. '
'Please remove it from your layer call.')
if 'enable_caching_device' in kwargs:
cell_kwargs = {'enable_caching_device':
kwargs.pop('enable_caching_device')}
else:
cell_kwargs = {}
cell = SimpleRNNCell(
units,
activation=activation,
use_bias=use_bias,
kernel_initializer=kernel_initializer,
recurrent_initializer=recurrent_initializer,
bias_initializer=bias_initializer,
kernel_regularizer=kernel_regularizer,
recurrent_regularizer=recurrent_regularizer,
bias_regularizer=bias_regularizer,
kernel_constraint=kernel_constraint,
recurrent_constraint=recurrent_constraint,
bias_constraint=bias_constraint,
dropout=dropout,
recurrent_dropout=recurrent_dropout,
dtype=kwargs.get('dtype'),
trainable=kwargs.get('trainable', True),
**cell_kwargs)
super(SimpleRNN, self).__init__(
cell,
return_sequences=return_sequences,
return_state=return_state,
go_backwards=go_backwards,
stateful=stateful,
unroll=unroll,
**kwargs)
self.activity_regularizer = regularizers.get(activity_regularizer)
self.input_spec = [InputSpec(ndim=3)]
def call(self, inputs, mask=None, training=None, initial_state=None):
return super(SimpleRNN, self).call(
inputs, mask=mask, training=training, initial_state=initial_state)
@property
def units(self):
return self.cell.units
@property
def activation(self):
return self.cell.activation
@property
def use_bias(self):
return self.cell.use_bias
@property
def kernel_initializer(self):
return self.cell.kernel_initializer
@property
def recurrent_initializer(self):
return self.cell.recurrent_initializer
@property
def bias_initializer(self):
return self.cell.bias_initializer
@property
def kernel_regularizer(self):
return self.cell.kernel_regularizer
@property
def recurrent_regularizer(self):
return self.cell.recurrent_regularizer
@property
def bias_regularizer(self):
return self.cell.bias_regularizer
@property
def kernel_constraint(self):
return self.cell.kernel_constraint
@property
def recurrent_constraint(self):
return self.cell.recurrent_constraint
@property
def bias_constraint(self):
return self.cell.bias_constraint
@property
def dropout(self):
return self.cell.dropout
@property
def recurrent_dropout(self):
return self.cell.recurrent_dropout
def get_config(self):
config = {
'units':
self.units,
'activation':
activations.serialize(self.activation),
'use_bias':
self.use_bias,
'kernel_initializer':
initializers.serialize(self.kernel_initializer),
'recurrent_initializer':
initializers.serialize(self.recurrent_initializer),
'bias_initializer':
initializers.serialize(self.bias_initializer),
'kernel_regularizer':
regularizers.serialize(self.kernel_regularizer),
'recurrent_regularizer':
regularizers.serialize(self.recurrent_regularizer),
'bias_regularizer':
regularizers.serialize(self.bias_regularizer),
'activity_regularizer':
regularizers.serialize(self.activity_regularizer),
'kernel_constraint':
constraints.serialize(self.kernel_constraint),
'recurrent_constraint':
constraints.serialize(self.recurrent_constraint),
'bias_constraint':
constraints.serialize(self.bias_constraint),
'dropout':
self.dropout,
'recurrent_dropout':
self.recurrent_dropout
}
base_config = super(SimpleRNN, self).get_config()
config.update(_config_for_enable_caching_device(self.cell))
del base_config['cell']
return dict(list(base_config.items()) + list(config.items()))
@classmethod
def from_config(cls, config):
if 'implementation' in config:
config.pop('implementation')
return cls(**config)
| SimpleRNN |
python | Textualize__textual | src/textual/css/scalar.py | {
"start": 465,
"end": 577
} | class ____(ScalarError):
"""Raised when a scalar couldn't be parsed from a string."""
@unique
| ScalarParseError |
python | pydantic__pydantic | tests/test_edge_cases.py | {
"start": 32838,
"end": 48709
} | class ____(str):
def foobar(self):
return 7
@pytest.mark.parametrize(
'value,expected',
[
('a string', 'a string'),
(b'some bytes', 'some bytes'),
(bytearray('foobar', encoding='utf8'), 'foobar'),
(StrEnum.a, 'a10'),
(CustomStr('whatever'), 'whatever'),
],
)
def test_valid_string_types(value, expected):
class Model(BaseModel):
v: str
assert Model(v=value).v == expected
@pytest.mark.parametrize(
'value,errors',
[
(
{'foo': 'bar'},
[{'input': {'foo': 'bar'}, 'loc': ('v',), 'msg': 'Input should be a valid string', 'type': 'string_type'}],
),
(
[1, 2, 3],
[{'input': [1, 2, 3], 'loc': ('v',), 'msg': 'Input should be a valid string', 'type': 'string_type'}],
),
],
)
def test_invalid_string_types(value, errors):
class Model(BaseModel):
v: str
with pytest.raises(ValidationError) as exc_info:
Model(v=value)
assert exc_info.value.errors(include_url=False) == errors
def test_inheritance_config():
class Parent(BaseModel):
a: str
class Child(Parent):
model_config = ConfigDict(str_to_lower=True)
b: str
m1 = Parent(a='A')
m2 = Child(a='A', b='B')
assert repr(m1) == "Parent(a='A')"
assert repr(m2) == "Child(a='a', b='b')"
def test_partial_inheritance_config():
class Parent(BaseModel):
a: int = Field(ge=0)
class Child(Parent):
b: int = Field(ge=0)
Child(a=0, b=0)
with pytest.raises(ValidationError) as exc_info:
Child(a=-1, b=0)
assert exc_info.value.errors(include_url=False) == [
{
'ctx': {'ge': 0},
'input': -1,
'loc': ('a',),
'msg': 'Input should be greater than or equal to 0',
'type': 'greater_than_equal',
}
]
with pytest.raises(ValidationError) as exc_info:
Child(a=0, b=-1)
assert exc_info.value.errors(include_url=False) == [
{
'ctx': {'ge': 0},
'input': -1,
'loc': ('b',),
'msg': 'Input should be greater than or equal to 0',
'type': 'greater_than_equal',
}
]
def test_annotation_inheritance():
class A(BaseModel):
integer: int = 1
class B(A):
integer: int = 2
assert B.model_fields['integer'].annotation == int
class C(A):
integer: str = 'G'
assert C.__annotations__['integer'] == str
assert C.model_fields['integer'].annotation == str
with pytest.raises(
TypeError,
match=(
"Field 'integer' defined on a base class was overridden by a non-annotated attribute. "
'All field definitions, including overrides, require a type annotation.'
),
):
class D(A):
integer = 'G'
def test_string_none():
class Model(BaseModel):
model_config = ConfigDict(extra='ignore')
a: constr(min_length=20, max_length=1000) = ...
with pytest.raises(ValidationError) as exc_info:
Model(a=None)
assert exc_info.value.errors(include_url=False) == [
{'input': None, 'loc': ('a',), 'msg': 'Input should be a valid string', 'type': 'string_type'}
]
# def test_return_errors_ok():
# class Model(BaseModel):
# foo: int
# bar: list[int]
#
# assert validate_model(Model, {'foo': '123', 'bar': (1, 2, 3)}) == (
# {'foo': 123, 'bar': [1, 2, 3]},
# {'foo', 'bar'},
# None,
# )
# d, f, e = validate_model(Model, {'foo': '123', 'bar': (1, 2, 3)}, False)
# assert d == {'foo': 123, 'bar': [1, 2, 3]}
# assert f == {'foo', 'bar'}
# assert e is None
# def test_return_errors_error():
# class Model(BaseModel):
# foo: int
# bar: list[int]
#
# d, f, e = validate_model(Model, {'foo': '123', 'bar': (1, 2, 'x')}, False)
# assert d == {'foo': 123}
# assert f == {'foo', 'bar'}
# assert e.errors() == [{'loc': ('bar', 2), 'msg': 'value is not a valid integer', 'type': 'type_error.integer'}]
#
# d, f, e = validate_model(Model, {'bar': (1, 2, 3)}, False)
# assert d == {'bar': [1, 2, 3]}
# assert f == {'bar'}
# assert e.errors() == [{'loc': ('foo',), 'msg': 'field required', 'type': 'value_error.missing'}]
def test_optional_required():
class Model(BaseModel):
bar: Optional[int]
assert Model(bar=123).model_dump() == {'bar': 123}
assert Model(bar=None).model_dump() == {'bar': None}
with pytest.raises(ValidationError) as exc_info:
Model()
assert exc_info.value.errors(include_url=False) == [
{'input': {}, 'loc': ('bar',), 'msg': 'Field required', 'type': 'missing'}
]
def test_unable_to_infer():
with pytest.raises(
errors.PydanticUserError,
match=re.escape(
'A non-annotated attribute was detected: `x = None`. All model fields require a type annotation; '
'if `x` is not meant to be a field, you may be able to resolve this error by annotating it as a '
"`ClassVar` or updating `model_config['ignored_types']`"
),
):
class InvalidDefinitionModel(BaseModel):
x = None
def test_multiple_errors():
class Model(BaseModel):
a: Union[None, int, float, Decimal]
with pytest.raises(ValidationError) as exc_info:
Model(a='foobar')
# insert_assert(exc_info.value.errors(include_url=False))
assert exc_info.value.errors(include_url=False) == [
{
'type': 'int_parsing',
'loc': ('a', 'int'),
'msg': 'Input should be a valid integer, unable to parse string as an integer',
'input': 'foobar',
},
{
'type': 'float_parsing',
'loc': ('a', 'float'),
'msg': 'Input should be a valid number, unable to parse string as a number',
'input': 'foobar',
},
{
'type': 'decimal_parsing',
'loc': ('a', 'decimal'),
'msg': 'Input should be a valid decimal',
'input': 'foobar',
},
]
assert Model(a=1.5).a == 1.5
assert Model(a=None).a is None
def test_validate_default():
class Model(BaseModel):
model_config = ConfigDict(validate_default=True)
a: int
b: int
with pytest.raises(ValidationError) as exc_info:
Model()
assert exc_info.value.errors(include_url=False) == [
{'input': {}, 'loc': ('a',), 'msg': 'Field required', 'type': 'missing'},
{'input': {}, 'loc': ('b',), 'msg': 'Field required', 'type': 'missing'},
]
def test_force_extra():
class Model(BaseModel):
model_config = ConfigDict(extra='ignore')
foo: int
assert Model.model_config['extra'] == 'ignore'
def test_submodel_different_type():
class Foo(BaseModel):
a: int
class Bar(BaseModel):
b: int
class Spam(BaseModel):
c: Foo
assert Spam(c={'a': '123'}).model_dump() == {'c': {'a': 123}}
with pytest.raises(ValidationError):
Spam(c={'b': '123'})
assert Spam(c=Foo(a='123')).model_dump() == {'c': {'a': 123}}
with pytest.raises(ValidationError):
Spam(c=Bar(b='123'))
def test_self():
class Model(BaseModel):
self: str
m = Model.model_validate(dict(self='some value'))
assert m.model_dump() == {'self': 'some value'}
assert m.self == 'some value'
assert m.model_json_schema() == {
'title': 'Model',
'type': 'object',
'properties': {'self': {'title': 'Self', 'type': 'string'}},
'required': ['self'],
}
def test_no_name_conflict_in_constructor():
class Model(BaseModel):
self: int
m = Model(**{'__pydantic_self__': 4, 'self': 2})
assert m.self == 2
def test_self_recursive():
class SubModel(BaseModel):
self: int
class Model(BaseModel):
sm: SubModel
m = Model.model_validate({'sm': {'self': '123'}})
assert m.model_dump() == {'sm': {'self': 123}}
def test_custom_init():
class Model(BaseModel):
x: int
def __init__(self, x: int, y: int):
if isinstance(y, str):
y = len(y)
super().__init__(x=x + int(y))
assert Model(x=1, y=1).x == 2
assert Model.model_validate({'x': 1, 'y': 1}).x == 2
assert Model.model_validate_json('{"x": 1, "y": 2}').x == 3
# For documentation purposes: type hints on __init__ are not currently used for validation:
assert Model.model_validate({'x': 1, 'y': 'abc'}).x == 4
def test_nested_custom_init():
class NestedModel(BaseModel):
self: str
modified_number: int = 1
def __init__(someinit, **kwargs):
super().__init__(**kwargs)
someinit.modified_number += 1
class TopModel(BaseModel):
self: str
nest: NestedModel
m = TopModel.model_validate(dict(self='Top Model', nest=dict(self='Nested Model', modified_number=0)))
assert m.self == 'Top Model'
assert m.nest.self == 'Nested Model'
assert m.nest.modified_number == 1
def test_init_inspection():
calls = []
class Foobar(BaseModel):
x: int
def __init__(self, **data) -> None:
with pytest.raises(AttributeError):
calls.append(data)
assert self.x
super().__init__(**data)
Foobar(x=1)
Foobar.model_validate({'x': 2})
Foobar.model_validate_json('{"x": 3}')
assert calls == [{'x': 1}, {'x': 2}, {'x': 3}]
def test_type_on_annotation():
class FooBar:
pass
class Model(BaseModel):
a: type[int]
b: type[int] = int
c: type[FooBar]
d: type[FooBar] = FooBar
e: Sequence[type[FooBar]] = [FooBar]
f: Union[type[FooBar], Sequence[type[FooBar]]] = FooBar
g: Union[type[FooBar], Sequence[type[FooBar]]] = [FooBar]
model_config = {'arbitrary_types_allowed': True}
assert Model.model_fields.keys() == set('abcdefg')
def test_type_union():
class Model(BaseModel):
a: type[Union[str, bytes]]
b: type[Union[Any, str]]
m = Model(a=bytes, b=int)
assert m.model_dump() == {'a': bytes, 'b': int}
assert m.a == bytes
def test_type_on_none():
class Model(BaseModel):
a: type[None]
Model(a=type(None))
with pytest.raises(ValidationError) as exc_info:
Model(a=None)
assert exc_info.value.errors(include_url=False) == [
{
'type': 'is_subclass_of',
'loc': ('a',),
'msg': 'Input should be a subclass of NoneType',
'input': None,
'ctx': {'class': 'NoneType'},
}
]
def test_type_on_typealias():
Float = TypeAliasType('Float', float)
class MyFloat(float): ...
adapter = TypeAdapter(type[Float])
adapter.validate_python(float)
adapter.validate_python(MyFloat)
with pytest.raises(ValidationError) as exc_info:
adapter.validate_python(str)
assert exc_info.value.errors(include_url=False) == [
{
'type': 'is_subclass_of',
'loc': (),
'msg': 'Input should be a subclass of float',
'input': str,
'ctx': {'class': 'float'},
}
]
def test_type_on_annotated():
class Model(BaseModel):
a: type[Annotated[int, ...]]
Model(a=int)
with pytest.raises(ValidationError) as exc_info:
Model(a=str)
assert exc_info.value.errors(include_url=False) == [
{
'type': 'is_subclass_of',
'loc': ('a',),
'msg': 'Input should be a subclass of int',
'input': str,
'ctx': {'class': 'int'},
}
]
def test_type_on_generic_alias() -> None:
error_msg = 'Instead of using type[list[int]], use type[list].'
with pytest.raises(PydanticUserError) as exc_info:
class Model(BaseModel):
a: type[list[int]]
assert error_msg in exc_info.value.message
def test_typing_type_on_generic_alias() -> None:
error_msg = 'Instead of using type[typing.List[int]], use type[list].'
with pytest.raises(PydanticUserError) as exc_info:
# Note: this only works with typing.List, list behaves differently in Python 3.9 and sometimes 3.10,
# so thus we use typing.List here.
class Model(BaseModel):
a: type[typing.List[int]] # noqa: UP006
assert error_msg in exc_info.value.message
def test_type_assign():
class Parent:
def echo(self):
return 'parent'
class Child(Parent):
def echo(self):
return 'child'
class Different:
def echo(self):
return 'different'
class Model(BaseModel):
v: type[Parent] = Parent
assert Model(v=Parent).v().echo() == 'parent'
assert Model().v().echo() == 'parent'
assert Model(v=Child).v().echo() == 'child'
with pytest.raises(ValidationError) as exc_info:
Model(v=Different)
assert exc_info.value.errors(include_url=False) == [
{
'ctx': {'class': Parent.__qualname__},
'input': HasRepr(repr(Different)),
'loc': ('v',),
'msg': f'Input should be a subclass of {Parent.__qualname__}',
'type': 'is_subclass_of',
}
]
def test_optional_subfields():
class Model(BaseModel):
a: Optional[int]
assert Model.model_fields['a'].annotation == Optional[int]
with pytest.raises(ValidationError) as exc_info:
Model(a='foobar')
assert exc_info.value.errors(include_url=False) == [
{
'input': 'foobar',
'loc': ('a',),
'msg': 'Input should be a valid integer, unable to parse string as an integer',
'type': 'int_parsing',
}
]
with pytest.raises(ValidationError) as exc_info:
Model()
assert exc_info.value.errors(include_url=False) == [
{'input': {}, 'loc': ('a',), 'msg': 'Field required', 'type': 'missing'}
]
assert Model(a=None).a is None
assert Model(a=12).a == 12
def test_validated_optional_subfields():
class Model(BaseModel):
a: Optional[int]
@field_validator('a')
@classmethod
def check_a(cls, v):
return v
assert Model.model_fields['a'].annotation == Optional[int]
with pytest.raises(ValidationError) as exc_info:
Model(a='foobar')
assert exc_info.value.errors(include_url=False) == [
{
'input': 'foobar',
'loc': ('a',),
'msg': 'Input should be a valid integer, unable to parse string as an integer',
'type': 'int_parsing',
}
]
with pytest.raises(ValidationError) as exc_info:
Model()
assert exc_info.value.errors(include_url=False) == [
{'input': {}, 'loc': ('a',), 'msg': 'Field required', 'type': 'missing'}
]
assert Model(a=None).a is None
assert Model(a=12).a == 12
def test_optional_field_constraints():
class MyModel(BaseModel):
my_int: Optional[int] = Field(ge=3)
with pytest.raises(ValidationError) as exc_info:
MyModel(my_int=2)
assert exc_info.value.errors(include_url=False) == [
{
'ctx': {'ge': 3},
'input': 2,
'loc': ('my_int',),
'msg': 'Input should be greater than or equal to 3',
'type': 'greater_than_equal',
}
]
def test_field_str_shape():
class Model(BaseModel):
a: list[int]
assert repr(Model.model_fields['a']) == 'FieldInfo(annotation=list[int], required=True)'
assert str(Model.model_fields['a']) == 'annotation=list[int] required=True'
T1 = TypeVar('T1')
T2 = TypeVar('T2')
| CustomStr |
python | sqlalchemy__sqlalchemy | lib/sqlalchemy/sql/visitors.py | {
"start": 19068,
"end": 21290
} | class ____(util.MemoizedSlots):
"""Base class for visitor objects which can traverse externally using
the :func:`.visitors.traverse` function.
Direct usage of the :func:`.visitors.traverse` function is usually
preferred.
"""
__slots__ = ("_visitor_dict", "_next")
__traverse_options__: Dict[str, Any] = {}
_next: Optional[ExternalTraversal]
def traverse_single(self, obj: Visitable, **kw: Any) -> Any:
for v in self.visitor_iterator:
meth = getattr(v, "visit_%s" % obj.__visit_name__, None)
if meth:
return meth(obj, **kw)
def iterate(
self, obj: Optional[ExternallyTraversible]
) -> Iterator[ExternallyTraversible]:
"""Traverse the given expression structure, returning an iterator
of all elements.
"""
return iterate(obj, self.__traverse_options__)
@overload
def traverse(self, obj: Literal[None]) -> None: ...
@overload
def traverse(
self, obj: ExternallyTraversible
) -> ExternallyTraversible: ...
def traverse(
self, obj: Optional[ExternallyTraversible]
) -> Optional[ExternallyTraversible]:
"""Traverse and visit the given expression structure."""
return traverse(obj, self.__traverse_options__, self._visitor_dict)
def _memoized_attr__visitor_dict(
self,
) -> Dict[str, _TraverseCallableType[Any]]:
visitors = {}
for name in dir(self):
if name.startswith("visit_"):
visitors[name[6:]] = getattr(self, name)
return visitors
@property
def visitor_iterator(self) -> Iterator[ExternalTraversal]:
"""Iterate through this visitor and each 'chained' visitor."""
v: Optional[ExternalTraversal] = self
while v:
yield v
v = getattr(v, "_next", None)
def chain(self: _ExtT, visitor: ExternalTraversal) -> _ExtT:
"""'Chain' an additional ExternalTraversal onto this ExternalTraversal
The chained visitor will receive all visit events after this one.
"""
tail = list(self.visitor_iterator)[-1]
tail._next = visitor
return self
| ExternalTraversal |
python | doocs__leetcode | solution/1700-1799/1721.Swapping Nodes in a Linked List/Solution.py | {
"start": 151,
"end": 496
} | class ____:
def swapNodes(self, head: Optional[ListNode], k: int) -> Optional[ListNode]:
fast = slow = head
for _ in range(k - 1):
fast = fast.next
p = fast
while fast.next:
fast, slow = fast.next, slow.next
q = slow
p.val, q.val = q.val, p.val
return head
| Solution |
python | pydantic__pydantic | pydantic/errors.py | {
"start": 3055,
"end": 3173
} | class ____(PydanticErrorMixin, TypeError):
"""An error raised due to incorrect use of Pydantic."""
| PydanticUserError |
python | django-import-export__django-import-export | import_export/admin.py | {
"start": 23673,
"end": 32227
} | class ____(BaseExportMixin, ImportExportMixinBase):
"""
Export mixin.
This is intended to be mixed with
`ModelAdmin <https://docs.djangoproject.com/en/stable/ref/contrib/admin/>`_.
"""
#: template for change_list view
import_export_change_list_template = "admin/import_export/change_list_export.html"
#: template for export view
export_template_name = "admin/import_export/export.html"
#: export data encoding
to_encoding = None
#: Form class to use for the initial export step.
#: Assign to :class:`~import_export.forms.ExportForm` if you would
#: like to disable selectable fields feature.
export_form_class = SelectableFieldsExportForm
def get_urls(self):
urls = super().get_urls()
my_urls = [
path(
"export/",
self.admin_site.admin_view(self.export_action),
name="%s_%s_export" % self.get_model_info(),
),
]
return my_urls + urls
def has_export_permission(self, request):
"""
Returns whether a request has export permission.
"""
EXPORT_PERMISSION_CODE = getattr(
settings, "IMPORT_EXPORT_EXPORT_PERMISSION_CODE", None
)
if EXPORT_PERMISSION_CODE is None:
return True
opts = self.opts
codename = get_permission_codename(EXPORT_PERMISSION_CODE, opts)
return request.user.has_perm(f"{opts.app_label}.{codename}")
def get_export_queryset(self, request):
"""
Returns export queryset. The queryset is obtained by calling
ModelAdmin
`get_queryset()
<https://docs.djangoproject.com/en/dev/ref/contrib/admin/#django.contrib.admin.ModelAdmin.get_queryset>`_.
Default implementation respects applied search and filters.
"""
list_display = self.get_list_display(request)
list_display_links = self.get_list_display_links(request, list_display)
list_select_related = self.get_list_select_related(request)
list_filter = self.get_list_filter(request)
search_fields = self.get_search_fields(request)
if self.get_actions(request):
list_display = ["action_checkbox"] + list(list_display)
ChangeList = self.get_changelist(request)
changelist_kwargs = {
"request": request,
"model": self.model,
"list_display": list_display,
"list_display_links": list_display_links,
"list_filter": list_filter,
"date_hierarchy": self.date_hierarchy,
"search_fields": search_fields,
"list_select_related": list_select_related,
"list_per_page": self.list_per_page,
"list_max_show_all": self.list_max_show_all,
"list_editable": self.list_editable,
"model_admin": self,
"sortable_by": self.sortable_by,
}
changelist_kwargs["search_help_text"] = self.search_help_text
class ExportChangeList(ChangeList):
def get_results(self, request):
"""
Overrides ChangeList.get_results() to bypass default operations like
pagination and result counting, which are not needed for export. This
prevents executing unnecessary COUNT queries during ChangeList
initialization.
"""
pass
cl = ExportChangeList(**changelist_kwargs)
# get_queryset() is already called during initialization,
# it is enough to get its results
if hasattr(cl, "queryset"):
return cl.queryset
# Fallback in case the ChangeList doesn't have queryset attribute set
return cl.get_queryset(request)
def get_export_data(self, file_format, request, queryset, **kwargs):
"""
Returns file_format representation for given queryset.
"""
if not self.has_export_permission(request):
raise PermissionDenied
force_native_type = type(file_format) in BINARY_FORMATS
data = self.get_data_for_export(
request,
queryset,
force_native_type=force_native_type,
**kwargs,
)
export_data = file_format.export_data(data)
encoding = kwargs.get("encoding")
if not file_format.is_binary() and encoding:
export_data = export_data.encode(encoding)
return export_data
def get_export_context_data(self, **kwargs):
return self.get_context_data(**kwargs)
def get_context_data(self, **kwargs):
return {}
def get_export_form_class(self):
"""
Get the form class used to read the export format.
"""
return self.export_form_class
def export_action(self, request):
"""
Handles the default workflow for both the export form and the
export of data to file.
"""
if not self.has_export_permission(request):
raise PermissionDenied
form_type = self.get_export_form_class()
formats = self.get_export_formats()
queryset = self.get_export_queryset(request)
if self.is_skip_export_form_enabled():
return self._do_file_export(formats[0](), request, queryset)
form = form_type(
formats,
self.get_export_resource_classes(request),
data=request.POST or None,
)
if request.POST and f"{FORM_FIELD_PREFIX}export_items" in request.POST:
# this field is instantiated if the export is POSTed from the
# 'action' drop down
form.fields["export_items"] = MultipleChoiceField(
widget=MultipleHiddenInput,
required=False,
choices=[(pk, pk) for pk in queryset.values_list("pk", flat=True)],
)
if form.is_valid():
file_format = formats[int(form.cleaned_data["format"])]()
if "export_items" in form.changed_data:
# this request has arisen from an Admin UI action
# export item pks are stored in form data
# so generate the queryset from the stored pks
queryset = queryset.filter(pk__in=form.cleaned_data["export_items"])
try:
return self._do_file_export(
file_format, request, queryset, export_form=form
)
except (ValueError, FieldError) as e:
messages.error(request, str(e))
context = self.init_request_context_data(request, form)
request.current_app = self.admin_site.name
return TemplateResponse(request, [self.export_template_name], context=context)
def changelist_view(self, request, extra_context=None):
if extra_context is None:
extra_context = {}
extra_context["has_export_permission"] = self.has_export_permission(request)
return super().changelist_view(request, extra_context)
def get_export_filename(self, request, queryset, file_format):
return super().get_export_filename(file_format)
def init_request_context_data(self, request, form):
context = self.get_export_context_data()
context.update(self.admin_site.each_context(request))
context["title"] = _("Export")
context["form"] = form
context["opts"] = self.model._meta
context["fields_list"] = [
(
res.get_display_name(),
[
field.column_name
for field in res(
**self.get_export_resource_kwargs(request)
).get_user_visible_export_fields()
],
)
for res in self.get_export_resource_classes(request)
]
return context
def _do_file_export(self, file_format, request, queryset, export_form=None):
export_data = self.get_export_data(
file_format,
request,
queryset,
encoding=self.to_encoding,
export_form=export_form,
)
content_type = file_format.get_content_type()
response = HttpResponse(export_data, content_type=content_type)
response["Content-Disposition"] = 'attachment; filename="{}"'.format(
self.get_export_filename(request, queryset, file_format),
)
post_export.send(sender=None, model=self.model)
return response
| ExportMixin |
python | tensorflow__tensorflow | tensorflow/python/tpu/tests/tpu_embedding_base_test.py | {
"start": 1986,
"end": 30006
} | class ____(parameterized.TestCase, test.TestCase):
def skip_if_oss(self):
if FLAGS.project is not None or FLAGS.zone is not None:
self.skipTest(
'Skipping tests for oss as it is slow to run every test in cloud tpu.'
)
def setUp(self):
super(TPUEmbeddingBaseTest, self).setUp()
self.embedding_values = np.array(list(range(32)), dtype=np.float64)
self.initializer = init_ops_v2.Constant(self.embedding_values)
# Embedding for video initialized to
# 0 1 2 3
# 4 5 6 7
# ...
self.table_video = tpu_embedding_v2_utils.TableConfig(
vocabulary_size=8,
dim=4,
initializer=self.initializer,
combiner='sum',
name='video')
# Embedding for user initialized to
# 0 1
# 2 3
# 4 5
# 6 7
# ...
self.table_user = tpu_embedding_v2_utils.TableConfig(
vocabulary_size=16,
dim=2,
initializer=self.initializer,
combiner='mean',
name='user')
self.feature_config = (tpu_embedding_v2_utils.FeatureConfig(
table=self.table_video, name='watched'),
tpu_embedding_v2_utils.FeatureConfig(
table=self.table_video, name='favorited'),
tpu_embedding_v2_utils.FeatureConfig(
table=self.table_user, name='friends'))
self.batch_size = 2
self.data_batch_size = 4
# One (global) batch of inputs
# sparse tensor for watched:
# row 0: 0
# row 1: 0, 1
# row 2: 0, 1
# row 3: 1
self.feature_watched_indices = [[0, 0], [1, 0], [1, 1], [2, 0], [2, 1],
[3, 0]]
self.feature_watched_values = [0, 0, 1, 0, 1, 1]
self.feature_watched_row_lengths = [1, 2, 2, 1]
# sparse tensor for favorited:
# row 0: 0, 1
# row 1: 1
# row 2: 0
# row 3: 0, 1
self.feature_favorited_indices = [[0, 0], [0, 1], [1, 0], [2, 0], [3, 0],
[3, 1]]
self.feature_favorited_values = [0, 1, 1, 0, 0, 1]
self.feature_favorited_row_lengths = [2, 1, 1, 2]
# sparse tensor for friends:
# row 0: 3
# row 1: 0, 1, 2
# row 2: 3
# row 3: 0, 1, 2
self.feature_friends_indices = [[0, 0], [1, 0], [1, 1], [1, 2], [2, 0],
[3, 0], [3, 1], [3, 2]]
self.feature_friends_values = [3, 0, 1, 2, 3, 0, 1, 2]
self.feature_friends_row_lengths = [1, 3, 1, 3]
self.resolver = None
# Basically we are expand the dims of the old feature by 1 and repeat
# batch size times for the first dimension.
def create_hight_dimensional_indices(indices):
indices = np.array(indices, dtype=np.int32)
batch_size_index = np.repeat(
np.arange(self.data_batch_size), len(indices)).reshape(-1, 1)
repeated_indices = np.tile(indices, (self.data_batch_size, 1))
return np.concatenate([batch_size_index, repeated_indices], axis=1)
# Create high dimensional features with shape(4, 4, 2)
self.feature_watched_indices_high_dimensional = create_hight_dimensional_indices(
self.feature_watched_indices)
self.feature_watched_values_high_dimensional = self.feature_watched_values * self.data_batch_size
self.feature_watched_row_lengths_high_dimensional = self.feature_watched_row_lengths * self.data_batch_size
# Create high dimensional features with shape(4, 4, 2)
self.feature_favorited_indices_high_dimensional = create_hight_dimensional_indices(
self.feature_favorited_indices)
self.feature_favorited_values_high_dimensional = self.feature_favorited_values * self.data_batch_size
self.feature_favorited_row_lengths_high_dimensional = self.feature_favorited_row_lengths * self.data_batch_size
# Create high dimensional features with shape(4, 4, 3)
self.feature_friends_indices_high_dimensional = create_hight_dimensional_indices(
self.feature_friends_indices)
self.feature_friends_values_high_dimensional = self.feature_friends_values * self.data_batch_size
self.feature_friends_row_lengths_high_dimensional = self.feature_friends_row_lengths * self.data_batch_size
def _init_tpu_system(self):
self.resolver = tpu_cluster_resolver.TPUClusterResolver(
tpu=FLAGS.tpu, zone=FLAGS.zone, project=FLAGS.project)
if hasattr(self.resolver, '_cloud_tpu_client'):
self.resolver._cloud_tpu_client.configure_tpu_version(
version='nightly', restart_type='always')
remote.connect_to_cluster(self.resolver)
return tpu_cluster_resolver.initialize_tpu_system(self.resolver)
def _get_strategy(self):
_ = self._init_tpu_system()
return tpu_strategy.TPUStrategy(self.resolver)
def _create_mid_level(self, optimizer=None):
# Create `TPUEmbedding` object.
if optimizer is None:
optimizer = tpu_embedding_v2_utils.SGD(learning_rate=0.1)
return tpu_embedding_v2.TPUEmbedding(
feature_config=self.feature_config, optimizer=optimizer)
def _create_strategy_and_mid_level(self, optimizer_name) -> Tuple[
tpu_strategy.TPUStrategy, tpu_embedding_v2.TPUEmbedding,
tpu_embedding_v2_utils._Optimizer]:
strategy = self._get_strategy()
with strategy.scope():
if optimizer_name == 'sgd':
optimizer = tpu_embedding_v2_utils.SGD(learning_rate=0.1)
elif optimizer_name == 'adagrad':
optimizer = tpu_embedding_v2_utils.Adagrad(learning_rate=0.1)
elif optimizer_name == 'adam':
optimizer = tpu_embedding_v2_utils.Adam(learning_rate=0.1)
elif optimizer_name == 'ftrl':
optimizer = tpu_embedding_v2_utils.FTRL(learning_rate=0.1)
elif optimizer_name == 'adagrad_momentum':
optimizer = tpu_embedding_v2_utils.AdagradMomentum(
learning_rate=0.1,
momentum=0.9,
use_nesterov=True,
exponent=3.0,
epsilon=0.1,
beta2=0.9)
else:
raise ValueError('optimizer is not recognized: ', optimizer_name)
mid_level_api = self._create_mid_level(optimizer=optimizer)
return strategy, mid_level_api, optimizer
def _create_sparse_data(self, include_weights, weight=0.5):
sparse_features = (sparse_tensor.SparseTensor(
indices=self.feature_watched_indices,
values=self.feature_watched_values,
dense_shape=[self.data_batch_size, 2]),
sparse_tensor.SparseTensor(
indices=self.feature_favorited_indices,
values=self.feature_favorited_values,
dense_shape=[self.data_batch_size, 2]),
sparse_tensor.SparseTensor(
indices=self.feature_friends_indices,
values=self.feature_friends_values,
dense_shape=[self.data_batch_size, 3]))
if include_weights:
weights = []
for sparse in sparse_features:
values = (
array_ops.ones_like(sparse.values, dtype=dtypes.float32) * weight)
weights.append(
sparse_tensor.SparseTensor(
indices=sparse.indices,
values=values,
dense_shape=sparse.dense_shape))
sparse_features = (sparse_features, tuple(weights))
return sparse_features
def _create_sparse_dataset(self, strategy, include_weights=False, weight=0.5):
# Create dataset for enqueue operation
sparse_features = self._create_sparse_data(include_weights, weight)
dataset = dataset_ops.DatasetV2.from_tensors(sparse_features)
# Data is batched to self.data_batch_size, rebatch to global batch size.
return dataset.unbatch().repeat().batch(
self.batch_size * strategy.num_replicas_in_sync, drop_remainder=True)
def _create_high_dimensional_sparse_dataset(self,
strategy,
include_weights=False,
weight=0.5):
sparse_features = (
sparse_tensor.SparseTensor(
indices=self.feature_watched_indices_high_dimensional,
values=self.feature_watched_values_high_dimensional,
dense_shape=[self.data_batch_size, self.data_batch_size, 2]),
sparse_tensor.SparseTensor(
indices=self.feature_favorited_indices_high_dimensional,
values=self.feature_favorited_values_high_dimensional,
dense_shape=[self.data_batch_size, self.data_batch_size, 2]),
sparse_tensor.SparseTensor(
indices=self.feature_friends_indices_high_dimensional,
values=self.feature_friends_values_high_dimensional,
dense_shape=[self.data_batch_size, self.data_batch_size, 3]))
if include_weights:
weights = []
for sparse in sparse_features:
values = (
array_ops.ones_like(sparse.values, dtype=dtypes.float32) * weight)
weights.append(
sparse_tensor.SparseTensor(
indices=sparse.indices,
values=values,
dense_shape=sparse.dense_shape))
sparse_features = (sparse_features, tuple(weights))
dataset = dataset_ops.DatasetV2.from_tensors(sparse_features)
# Data is batched to self.data_batch_size, rebatch to global batch size.
return dataset.unbatch().repeat().batch(
self.batch_size * strategy.num_replicas_in_sync, drop_remainder=True)
def _create_high_dimensional_ragged_dataset(self,
strategy,
include_weights=False,
weight=0.5):
ragged_features = (
ragged_tensor.RaggedTensor.from_row_lengths(
row_lengths=self.feature_watched_row_lengths_high_dimensional,
values=self.feature_watched_values_high_dimensional),
ragged_tensor.RaggedTensor.from_row_lengths(
row_lengths=self.feature_favorited_row_lengths_high_dimensional,
values=self.feature_favorited_values_high_dimensional),
ragged_tensor.RaggedTensor.from_row_lengths(
row_lengths=self.feature_friends_row_lengths_high_dimensional,
values=self.feature_friends_values_high_dimensional))
if include_weights:
weights = []
for ragged in ragged_features:
values = (
array_ops.ones_like(ragged.values, dtype=dtypes.float32) * weight)
weights.append(
ragged_tensor.RaggedTensor(
row_lengths=ragged.row_lengths(), values=values))
ragged_features = (ragged_features, tuple(weights))
dataset = dataset_ops.DatasetV2.from_tensors(ragged_features)
# Data is batched to self.data_batch_size, rebatch to global batch size.
return dataset.unbatch().repeat().batch(
self.batch_size * strategy.num_replicas_in_sync, drop_remainder=True)
def _create_ragged_dataset(self, strategy, include_weights=False, weight=0.5):
# Create dataset for enqueue operation
sparse_features = self._create_sparse_data(include_weights, weight)
ragged_features = nest.map_structure(ragged_tensor.RaggedTensor.from_sparse,
sparse_features)
dataset = dataset_ops.DatasetV2.from_tensors(ragged_features)
# Data is batched to self.data_batch_size, rebatch to global batch size.
return dataset.unbatch().repeat().batch(
self.batch_size * strategy.num_replicas_in_sync, drop_remainder=True)
def _create_dense_dataset(self, strategy, include_weights=False, weight=0.5):
features = (constant_op.constant(
self.feature_watched_values[:self.data_batch_size], dtype=dtypes.int32),
constant_op.constant(
self.feature_favorited_values[:self.data_batch_size],
dtype=dtypes.int32),
constant_op.constant(
self.feature_friends_values[:self.data_batch_size],
dtype=dtypes.int32))
if include_weights:
weights = [
array_ops.ones_like(t, dtype=dtypes.float32) * weight
for t in features
]
features = (features, tuple(weights))
dataset = dataset_ops.DatasetV2.from_tensors(features)
return dataset.unbatch().repeat().batch(
self.batch_size * strategy.num_replicas_in_sync, drop_remainder=True)
def _create_high_dimensional_dense_dataset(self,
strategy,
include_weights=False,
weight=0.5):
dense_size = self.data_batch_size * self.data_batch_size
features = (constant_op.constant(
self.feature_watched_values_high_dimensional[:dense_size],
shape=(self.data_batch_size, self.data_batch_size, 1),
dtype=dtypes.int32),
constant_op.constant(
self.feature_favorited_values_high_dimensional[:dense_size],
shape=(self.data_batch_size, self.data_batch_size, 1),
dtype=dtypes.int32),
constant_op.constant(
self.feature_friends_values_high_dimensional[:dense_size],
shape=(self.data_batch_size, self.data_batch_size, 1),
dtype=dtypes.int32))
if include_weights:
weights = [
array_ops.ones_like(t, dtype=dtypes.float32) * weight
for t in features
]
features = (features, tuple(weights))
dataset = dataset_ops.DatasetV2.from_tensors(features)
return dataset.unbatch().repeat().batch(
self.batch_size * strategy.num_replicas_in_sync, drop_remainder=True)
def _check_results(self, strategy, shard_out_val, training, input_data,
table_to_variable, optimizer, is_high_dimensional):
num_replicas = strategy.num_replicas_in_sync
# Unpack the values `strategy.run()` returns.
loss = self._unpack(strategy, shard_out_val[0])
activation_watched = self._unpack(strategy, shard_out_val[1])
activation_favorited = self._unpack(strategy, shard_out_val[2])
activation_friends = self._unpack(strategy, shard_out_val[3])
# Core 0:
# Calculate the values of embedding activations.
activation_watched_gold0 = np.array([[0, 1, 2, 3], [4, 6, 8, 10]])
activation_favorited_gold0 = np.array([[4, 6, 8, 10], [4, 5, 6, 7]])
# Second row of `activation_friends_gold0` is the mean of the following.
# row 0: 0 1
# row 1: 2 3
# row 2: 4 5
activation_friends_gold0 = np.array([[6, 7], [2, 3]])
loss_gold0 = self._compute_loss(activation_watched_gold0,
activation_favorited_gold0,
activation_friends_gold0)
# Add on values from other cores:
# Activations for watched are an alternating sequence of
# activation_watched_gold0 and activation_favorited_gold0.
# For favorited it is the same but in the opposite order.
activation_watched_gold = np.concatenate(
(activation_watched_gold0, activation_favorited_gold0))
activation_favorited_gold = np.concatenate(
(activation_favorited_gold0, activation_watched_gold0))
activation_friends_gold = np.concatenate(
(activation_friends_gold0, activation_friends_gold0))
if is_high_dimensional:
activation_watched_gold = np.stack([activation_watched_gold] *
self.batch_size * num_replicas)
activation_favorited_gold = np.stack([activation_favorited_gold] *
self.batch_size * num_replicas)
activation_friends_gold = np.stack([activation_friends_gold] *
self.batch_size * num_replicas)
else:
if num_replicas == 1:
activation_watched_gold = activation_watched_gold0
activation_favorited_gold = activation_favorited_gold0
activation_friends_gold = activation_friends_gold0
else:
activation_watched_gold = np.concatenate(
[activation_watched_gold] * (num_replicas // self.batch_size))
activation_favorited_gold = np.concatenate(
[activation_favorited_gold] * (num_replicas // self.batch_size))
activation_friends_gold = np.concatenate(
[activation_friends_gold] * (num_replicas // self.batch_size))
loss_gold = [loss_gold0] * num_replicas
# Test values.
self.assertAllClose(activation_watched_gold, activation_watched)
self.assertAllClose(activation_favorited_gold, activation_favorited)
self.assertAllClose(activation_friends_gold, activation_friends)
self.assertAllClose(loss_gold, loss)
embedding_table_video_before = np.copy(
np.reshape(self.embedding_values, [8, 4]))
embedding_table_user_before = np.copy(
np.reshape(self.embedding_values, [16, 2]))
if is_high_dimensional:
global_batch_size = self.batch_size * self.data_batch_size * num_replicas
else:
global_batch_size = self.batch_size * num_replicas
if training:
gradient_wrt_watched_gold = (2 * activation_watched_gold /
global_batch_size)
gradient_wrt_favorited_gold = (2 * activation_favorited_gold /
global_batch_size)
gradient_wrt_friends_gold = (2 * activation_friends_gold /
global_batch_size)
# Calculate gradients wrt embedding tables.
gradients_wrt_user = (
self._compute_gradients_wrt_embedding_table(
gradient_wrt_friends_gold, embedding_table_user_before,
input_data[2].indices.numpy(), input_data[2].values.numpy(),
self.table_user.combiner))
gradients_wrt_video = (
self._compute_gradients_wrt_embedding_table(
gradient_wrt_favorited_gold, embedding_table_video_before,
input_data[1].indices.numpy(), input_data[1].values.numpy(),
self.table_video.combiner) +
self._compute_gradients_wrt_embedding_table(
gradient_wrt_watched_gold, embedding_table_video_before,
input_data[0].indices.numpy(), input_data[0].values.numpy(),
self.table_video.combiner))
self._check_embedding_and_slot_variables(embedding_table_user_before,
gradients_wrt_user,
embedding_table_video_before,
gradients_wrt_video, optimizer,
table_to_variable)
def _check_embedding_and_slot_variables(self, embedding_table_user_before,
gradients_wrt_user,
embedding_table_video_before,
gradients_wrt_video, optimizer,
table_to_variable):
if isinstance(optimizer, tpu_embedding_v2_utils.SGD):
check_fn = self._check_embedding_and_slot_variables_for_sgd
elif isinstance(optimizer, tpu_embedding_v2_utils.Adagrad):
check_fn = self._check_embedding_and_slot_variables_for_adagrad
elif isinstance(optimizer, tpu_embedding_v2_utils.AdagradMomentum):
check_fn = self._check_embedding_and_slot_variables_for_adagrad_momentum
elif isinstance(optimizer, tpu_embedding_v2_utils.Adam):
check_fn = self._check_embedding_and_slot_variables_for_adam
elif isinstance(optimizer, tpu_embedding_v2_utils.FTRL):
check_fn = self._check_embedding_and_slot_variables_for_ftrl
else:
raise ValueError('optimizer is not recognized: ', type(optimizer))
check_fn(embedding_table_user_before, gradients_wrt_user, optimizer,
table_to_variable[self.table_user.name])
check_fn(embedding_table_video_before, gradients_wrt_video, optimizer,
table_to_variable[self.table_video.name])
def _check_embedding_and_slot_variables_for_sgd(self, embedding_table_before,
gradients, optimizer,
variables):
embedding_table = np.copy(embedding_table_before)
embedding_table -= optimizer.learning_rate * np.sum(gradients, axis=0)
self.assertAllClose(
self._get_variable(variables['parameters']).numpy(), embedding_table)
def _check_embedding_and_slot_variables_for_adagrad(self,
embedding_table_before,
gradients, optimizer,
variable):
embedding_table = np.copy(embedding_table_before)
accumulator = (
optimizer.initial_accumulator_value + np.sum(gradients, axis=0)**2)
embedding_table -= (
optimizer.learning_rate * np.sum(gradients, axis=0) /
np.sqrt(accumulator))
self.assertAllClose(
self._get_variable(variable['parameters']).numpy(), embedding_table)
self.assertAllClose(
self._get_variable(variable['accumulators']).numpy(), accumulator)
def _check_embedding_and_slot_variables_for_adagrad_momentum(
self, embedding_table_before, gradients, optimizer, variable):
embedding_table = np.copy(embedding_table_before)
accumulator = np.zeros(self._get_variable(variable['accumulators']).shape)
momenta = np.zeros(self._get_variable(variable['momenta']).shape)
gradients = np.sum(gradients, axis=0)
if optimizer.beta2 == 1.0:
accumulator += gradients**2
else:
accumulator = optimizer.beta2 * accumulator + (
1 - optimizer.beta2) * gradients**2
accumulator_power = np.power(accumulator + optimizer.epsilon,
-1.0 / optimizer.exponent)
momenta = optimizer.momentum * momenta + gradients * accumulator_power
if optimizer.use_nesterov:
update = optimizer.momentum * momenta + gradients * accumulator_power
else:
update = momenta
embedding_table -= optimizer.learning_rate * update
self.assertAllClose(
self._get_variable(variable['parameters']).numpy(),
embedding_table,
rtol=1e-3)
self.assertAllClose(
self._get_variable(variable['accumulators']).numpy(),
accumulator,
rtol=1e-3)
self.assertAllClose(
self._get_variable(variable['momenta']).numpy(), momenta, rtol=1e-3)
def _check_embedding_and_slot_variables_for_adam(self, embedding_table_before,
gradients, optimizer,
variable):
embedding_table = np.copy(embedding_table_before)
g = np.sum(gradients, axis=0)
v = g**2 * (1 - optimizer.beta_2)
m = g * (1 - optimizer.beta_1)
epsilon = optimizer.epsilon
# TPU Embeddings don't have the LR decay factor for Adam.
lr_modifier = 1
embedding_table -= (
m * optimizer.learning_rate * lr_modifier / (np.sqrt(v) + epsilon))
self.assertAllClose(
self._get_variable(variable['parameters']).numpy(),
embedding_table,
rtol=1e-4)
self.assertAllClose(
self._get_variable(variable['momenta']).numpy(), m, rtol=1e-4)
self.assertAllClose(
self._get_variable(variable['velocities']).numpy(), v, rtol=1e-4)
def _check_embedding_and_slot_variables_for_ftrl(self, embedding_table_before,
gradients, optimizer,
variable):
embedding_table = np.copy(embedding_table_before)
neg_lr_p = -optimizer.learning_rate_power
accumulator = (
optimizer.initial_accumulator_value + np.sum(gradients, axis=0)**2)
sigma = (accumulator**neg_lr_p - optimizer.initial_accumulator_value**
neg_lr_p) / optimizer.learning_rate
linear = np.sum(gradients, axis=0) - sigma * embedding_table
quadratic = accumulator**neg_lr_p / optimizer.learning_rate
embedding_table = -linear / quadratic
actual_parameters = self._get_variable(variable['parameters']).numpy()
# For entries where `linear` == 0, it is not worth comparing since the
# initial values have not been touched yet and they will not agree with what
# the actual values should be.
actual_parameters *= (linear != 0.0)
# FTRL has a bit more precision diff on parameters.
self.assertAllClose(actual_parameters, embedding_table, rtol=5e-5)
self.assertAllClose(
self._get_variable(variable['linears']).numpy(), linear, rtol=5e-4)
self.assertAllClose(
self._get_variable(variable['accumulators']).numpy(), accumulator)
def _get_replica_numpy(self, structured, strategy, replica_id):
def select_replica(x):
x = strategy.experimental_local_results(x)
if len(x) == 1:
return x.numpy()
return x[replica_id].numpy()
return nest.map_structure(select_replica, structured)
def _compute_gradients_wrt_embedding_table(self, gradient_wrt_activation,
embedding_table, feature_indices,
feature_values, combiner):
"""Compute gradients wrt embedding_table.
Args:
gradient_wrt_activation: `np.array` with shape `batch_size` by embedding
`dimension`.
embedding_table: `np.array` with shape `vocabulary_size` by embedding
`dimension`.
feature_indices: `indices` as used to construct `SparseTensor`.
feature_values: `values` as used to construct `SparseTensor`.
combiner: `String`, 'mean' or 'sum'.
Returns:
Gradients wrt `embedding_table`, an `np.array`s with shape
`batch_size` by `vocabulary_size` by
embedding `dimension`.
Raises:
ValueError: if `combiner` is not one of 'mean' or 'sum'.
"""
if combiner not in ('mean', 'sum'):
raise ValueError(
'`combiner` must be mean or sum; got {}.'.format(combiner))
grads_shape = gradient_wrt_activation.shape[:-1] + embedding_table.shape
grads = np.zeros(shape=grads_shape)
count = np.zeros(shape=grads_shape)
for feature_indice, vocabulary_id in zip(feature_indices, feature_values):
batch_index = tuple(feature_indice[:-1])
grads[batch_index][vocabulary_id] += gradient_wrt_activation[batch_index]
count[batch_index] += 1
count[count == 0] = 1
if combiner == 'mean':
grads = grads / count
return np.reshape(grads, (-1, *embedding_table.shape))
def _unpack(self, strategy, per_replica_output):
per_replica_output = strategy.experimental_local_results(per_replica_output)
per_replica_output = array_ops.concat(per_replica_output, axis=0).numpy()
return per_replica_output
def _get_total_loss_tensor(self, activations):
losses = []
for activation in activations:
losses.append(
math_ops.reduce_mean(
math_ops.reduce_sum(
gen_math_ops.squared_difference(activation, 0), axis=-1)))
total_loss = array_ops.expand_dims_v2(sum(losses), 0)
return total_loss
def _compute_loss(self, activation_watched, activation_favorited,
activation_friends):
watched_loss = np.mean(np.sum(activation_watched**2, axis=-1))
favorited_loss = np.mean(np.sum(activation_favorited**2, axis=-1))
friends_loss = np.mean(np.sum(activation_friends**2, axis=-1))
loss = watched_loss + favorited_loss + friends_loss
return loss
def _get_variable(self, variable):
if isinstance(variable, tpu_embedding_v2.TPUEmbeddingVariable):
return variable.variables[0]
return variable
def _get_tmpdir(self, name, subdir=''):
segments = [FLAGS.model_dir, name] + ([subdir] if subdir else [])
return os.path.join(*segments)
| TPUEmbeddingBaseTest |
python | scipy__scipy | scipy/fftpack/tests/test_real_transforms.py | {
"start": 20726,
"end": 24460
} | class ____:
dec = 14
dct_type = [1, 2, 3, 4]
norms = [None, 'ortho']
rstate = np.random.RandomState(1234)
shape = (32, 16)
data = rstate.randn(*shape)
@pytest.mark.parametrize('fforward,finverse', [(dctn, idctn),
(dstn, idstn)])
@pytest.mark.parametrize('axes', [None,
1, (1,), [1],
0, (0,), [0],
(0, 1), [0, 1],
(-2, -1), [-2, -1]])
@pytest.mark.parametrize('dct_type', dct_type)
@pytest.mark.parametrize('norm', ['ortho'])
def test_axes_round_trip(self, fforward, finverse, axes, dct_type, norm):
tmp = fforward(self.data, type=dct_type, axes=axes, norm=norm)
tmp = finverse(tmp, type=dct_type, axes=axes, norm=norm)
assert_array_almost_equal(self.data, tmp, decimal=12)
@pytest.mark.parametrize('fforward,fforward_ref', [(dctn, dct_2d_ref),
(dstn, dst_2d_ref)])
@pytest.mark.parametrize('dct_type', dct_type)
@pytest.mark.parametrize('norm', norms)
def test_dctn_vs_2d_reference(self, fforward, fforward_ref,
dct_type, norm):
y1 = fforward(self.data, type=dct_type, axes=None, norm=norm)
y2 = fforward_ref(self.data, type=dct_type, norm=norm)
assert_array_almost_equal(y1, y2, decimal=11)
@pytest.mark.parametrize('finverse,finverse_ref', [(idctn, idct_2d_ref),
(idstn, idst_2d_ref)])
@pytest.mark.parametrize('dct_type', dct_type)
@pytest.mark.parametrize('norm', [None, 'ortho'])
def test_idctn_vs_2d_reference(self, finverse, finverse_ref,
dct_type, norm):
fdata = dctn(self.data, type=dct_type, norm=norm)
y1 = finverse(fdata, type=dct_type, norm=norm)
y2 = finverse_ref(fdata, type=dct_type, norm=norm)
assert_array_almost_equal(y1, y2, decimal=11)
@pytest.mark.parametrize('fforward,finverse', [(dctn, idctn),
(dstn, idstn)])
def test_axes_and_shape(self, fforward, finverse):
with assert_raises(ValueError,
match="when given, axes and shape arguments"
" have to be of the same length"):
fforward(self.data, shape=self.data.shape[0], axes=(0, 1))
with assert_raises(ValueError,
match="when given, axes and shape arguments"
" have to be of the same length"):
fforward(self.data, shape=self.data.shape[0], axes=None)
with assert_raises(ValueError,
match="when given, axes and shape arguments"
" have to be of the same length"):
fforward(self.data, shape=self.data.shape, axes=0)
@pytest.mark.parametrize('fforward', [dctn, dstn])
def test_shape(self, fforward):
tmp = fforward(self.data, shape=(128, 128), axes=None)
assert_equal(tmp.shape, (128, 128))
@pytest.mark.parametrize('fforward,finverse', [(dctn, idctn),
(dstn, idstn)])
@pytest.mark.parametrize('axes', [1, (1,), [1],
0, (0,), [0]])
def test_shape_is_none_with_axes(self, fforward, finverse, axes):
tmp = fforward(self.data, shape=None, axes=axes, norm='ortho')
tmp = finverse(tmp, shape=None, axes=axes, norm='ortho')
assert_array_almost_equal(self.data, tmp, decimal=self.dec)
| Test_DCTN_IDCTN |
python | getsentry__sentry | src/sentry/models/artifactbundle.py | {
"start": 6692,
"end": 7184
} | class ____(Model):
__relocation_scope__ = RelocationScope.Excluded
organization_id = BoundedBigIntegerField(db_index=True)
project_id = BoundedBigIntegerField()
artifact_bundle = FlexibleForeignKey("sentry.ArtifactBundle")
date_added = models.DateTimeField(default=timezone.now)
class Meta:
app_label = "sentry"
db_table = "sentry_projectartifactbundle"
indexes = (models.Index(fields=("project_id", "artifact_bundle")),)
| ProjectArtifactBundle |
python | apache__airflow | providers/google/tests/unit/google/cloud/operators/test_vertex_ai.py | {
"start": 52691,
"end": 53724
} | class ____:
@mock.patch(VERTEX_AI_PATH.format("dataset.Dataset.to_dict"))
@mock.patch(VERTEX_AI_PATH.format("dataset.DatasetHook"))
def test_execute(self, mock_hook, to_dict_mock):
op = CreateDatasetOperator(
task_id=TASK_ID,
gcp_conn_id=GCP_CONN_ID,
impersonation_chain=IMPERSONATION_CHAIN,
region=GCP_LOCATION,
project_id=GCP_PROJECT,
dataset=TEST_DATASET,
retry=RETRY,
timeout=TIMEOUT,
metadata=METADATA,
)
op.execute(context={"ti": mock.MagicMock(), "task": mock.MagicMock()})
mock_hook.assert_called_once_with(gcp_conn_id=GCP_CONN_ID, impersonation_chain=IMPERSONATION_CHAIN)
mock_hook.return_value.create_dataset.assert_called_once_with(
region=GCP_LOCATION,
project_id=GCP_PROJECT,
dataset=TEST_DATASET,
retry=RETRY,
timeout=TIMEOUT,
metadata=METADATA,
)
| TestVertexAICreateDatasetOperator |
python | great-expectations__great_expectations | great_expectations/exceptions/resource_freshness.py | {
"start": 1024,
"end": 1116
} | class ____(ResourceFreshnessAggregateError):
pass
| CheckpointRelatedResourcesFreshnessError |
python | python__mypy | mypyc/test/test_typeops.py | {
"start": 503,
"end": 2227
} | class ____(unittest.TestCase):
def test_bit(self) -> None:
assert is_subtype(bit_rprimitive, bool_rprimitive)
assert is_subtype(bit_rprimitive, int_rprimitive)
assert is_subtype(bit_rprimitive, short_int_rprimitive)
for rt in native_int_types:
assert is_subtype(bit_rprimitive, rt)
def test_bool(self) -> None:
assert not is_subtype(bool_rprimitive, bit_rprimitive)
assert is_subtype(bool_rprimitive, int_rprimitive)
assert is_subtype(bool_rprimitive, short_int_rprimitive)
for rt in native_int_types:
assert is_subtype(bool_rprimitive, rt)
def test_int64(self) -> None:
assert is_subtype(int64_rprimitive, int64_rprimitive)
assert is_subtype(int64_rprimitive, int_rprimitive)
assert not is_subtype(int64_rprimitive, short_int_rprimitive)
assert not is_subtype(int64_rprimitive, int32_rprimitive)
assert not is_subtype(int64_rprimitive, int16_rprimitive)
def test_int32(self) -> None:
assert is_subtype(int32_rprimitive, int32_rprimitive)
assert is_subtype(int32_rprimitive, int_rprimitive)
assert not is_subtype(int32_rprimitive, short_int_rprimitive)
assert not is_subtype(int32_rprimitive, int64_rprimitive)
assert not is_subtype(int32_rprimitive, int16_rprimitive)
def test_int16(self) -> None:
assert is_subtype(int16_rprimitive, int16_rprimitive)
assert is_subtype(int16_rprimitive, int_rprimitive)
assert not is_subtype(int16_rprimitive, short_int_rprimitive)
assert not is_subtype(int16_rprimitive, int64_rprimitive)
assert not is_subtype(int16_rprimitive, int32_rprimitive)
| TestSubtype |
python | pypa__setuptools | setuptools/_vendor/wheel/vendored/packaging/requirements.py | {
"start": 587,
"end": 2933
} | class ____:
"""Parse a requirement.
Parse a given requirement string into its parts, such as name, specifier,
URL, and extras. Raises InvalidRequirement on a badly-formed requirement
string.
"""
# TODO: Can we test whether something is contained within a requirement?
# If so how do we do that? Do we need to test against the _name_ of
# the thing as well as the version? What about the markers?
# TODO: Can we normalize the name and extra name?
def __init__(self, requirement_string: str) -> None:
try:
parsed = _parse_requirement(requirement_string)
except ParserSyntaxError as e:
raise InvalidRequirement(str(e)) from e
self.name: str = parsed.name
self.url: Optional[str] = parsed.url or None
self.extras: Set[str] = set(parsed.extras or [])
self.specifier: SpecifierSet = SpecifierSet(parsed.specifier)
self.marker: Optional[Marker] = None
if parsed.marker is not None:
self.marker = Marker.__new__(Marker)
self.marker._markers = _normalize_extra_values(parsed.marker)
def _iter_parts(self, name: str) -> Iterator[str]:
yield name
if self.extras:
formatted_extras = ",".join(sorted(self.extras))
yield f"[{formatted_extras}]"
if self.specifier:
yield str(self.specifier)
if self.url:
yield f"@ {self.url}"
if self.marker:
yield " "
if self.marker:
yield f"; {self.marker}"
def __str__(self) -> str:
return "".join(self._iter_parts(self.name))
def __repr__(self) -> str:
return f"<Requirement('{self}')>"
def __hash__(self) -> int:
return hash(
(
self.__class__.__name__,
*self._iter_parts(canonicalize_name(self.name)),
)
)
def __eq__(self, other: Any) -> bool:
if not isinstance(other, Requirement):
return NotImplemented
return (
canonicalize_name(self.name) == canonicalize_name(other.name)
and self.extras == other.extras
and self.specifier == other.specifier
and self.url == other.url
and self.marker == other.marker
)
| Requirement |
python | tensorflow__tensorflow | tensorflow/python/kernel_tests/array_ops/array_ops_test.py | {
"start": 92541,
"end": 94653
} | class ____(test_lib.Benchmark):
"""Benchmark the repeat implementation."""
def run_and_time(self, op, iters=100, warmup_iters=10):
self.evaluate(variables.global_variables_initializer())
for _ in range(warmup_iters):
_ = self.evaluate(op)
t0 = time.time()
for _ in range(iters):
self.evaluate(op)
t1 = time.time()
self.report_benchmark(iters=iters, wall_time=(t1 - t0) / float(iters))
def make_variable(self, shape, dtype=dtypes.float32):
items = 1
for dim in shape:
items *= dim
var = variables.Variable(
array_ops.reshape(math_ops.linspace(1., float(items), items), shape),
dtype=dtype)
return var
def run_benchmark(self, shape, max_repeats, axis=None):
with session.Session():
var = self.make_variable(shape)
if axis is None:
axis_size = 1
for dim in shape:
axis_size *= dim
else:
axis_size = shape[axis]
repeats = constant_op.constant(
np.random.randint(max_repeats, size=[axis_size]), dtype=dtypes.int64)
repeat_op = array_ops.repeat(var, repeats, axis=axis)
# Return a scalar to reduce the device-to-host memcopy overhead.
repeat_op = repeat_op[(0,) * len(shape)]
self.run_and_time(repeat_op)
def benchmark_repeat_few_1d(self):
self.run_benchmark(shape=[1024 * 1024], max_repeats=8, axis=0)
def benchmark_repeat_many_1d(self):
self.run_benchmark(shape=[8 * 1024], max_repeats=1024, axis=0)
def benchmark_repeat_few_2d_axis0(self):
self.run_benchmark(shape=[8, 128 * 1024], max_repeats=8, axis=0)
def benchmark_repeat_many_2d_axis0(self):
self.run_benchmark(shape=[8, 1024], max_repeats=1024, axis=0)
def benchmark_repeat_many_2d_axis0_big(self):
self.run_benchmark(shape=[1024, 32], max_repeats=1024, axis=0)
def benchmark_repeat_few_2d_axis1(self):
self.run_benchmark(shape=[8, 128 * 1024], max_repeats=8, axis=1)
def benchmark_repeat_many_2d_axis1(self):
self.run_benchmark(shape=[8, 1024], max_repeats=1024, axis=1)
@test_util.run_all_in_graph_and_eager_modes
| RepeatBenchmark |
python | huggingface__transformers | src/transformers/models/phi4_multimodal/modeling_phi4_multimodal.py | {
"start": 9269,
"end": 11573
} | class ____(PreTrainedModel):
config: Phi4MultimodalVisionConfig
base_model_prefix = "phi4_vision"
input_modalities = ("image",)
supports_gradient_checkpointing = True
_no_split_modules = ["Phi4MultimodalVisionEncoderLayer"]
_supports_flash_attn = True
_supports_sdpa = True
_supports_flex_attn = True
_supports_attention_backend = True
_can_record_outputs = {
"hidden_states": Phi4MultimodalVisionEncoderLayer,
"attentions": Phi4MultimodalVisionAttention,
}
@torch.no_grad()
def _init_weights(self, module):
"""Initialize the weights"""
if isinstance(module, Phi4MultimodalVisionEmbeddings):
width = (
self.config.hidden_size
if isinstance(self.config, Phi4MultimodalVisionConfig)
else self.config.hidden_size
)
init.normal_(module.position_embedding.weight, std=1 / np.sqrt(width))
elif isinstance(module, nn.Embedding):
default_flax_embed_init(module.weight)
elif isinstance(module, Phi4MultimodalVisionAttention):
init.normal_(module.q_proj.weight)
init.normal_(module.k_proj.weight)
init.normal_(module.v_proj.weight)
init.normal_(module.out_proj.weight)
init.zeros_(module.q_proj.bias)
init.zeros_(module.k_proj.bias)
init.zeros_(module.v_proj.bias)
init.zeros_(module.out_proj.bias)
elif isinstance(module, Phi4MultimodalVisionMLP):
init.normal_(module.fc1.weight)
init.normal_(module.fc2.weight)
init.normal_(module.fc1.bias, std=1e-6)
init.normal_(module.fc2.bias, std=1e-6)
elif isinstance(module, Phi4MultimodalVisionMultiheadAttentionPoolingHead):
init.normal_(module.probe)
init.normal_(module.attention.in_proj_weight)
init.zeros_(module.attention.in_proj_bias)
elif isinstance(module, (nn.Linear, nn.Conv2d)):
lecun_normal_(module.weight)
if module.bias is not None:
init.zeros_(module.bias)
elif isinstance(module, nn.LayerNorm):
init.zeros_(module.bias)
init.ones_(module.weight)
| Phi4MultimodalVisionPreTrainedModel |
python | apache__airflow | providers/elasticsearch/src/airflow/providers/elasticsearch/log/es_response.py | {
"start": 2339,
"end": 2871
} | class ____(AttributeDict):
"""
The Hit class is used to manage and access elements in a document.
It inherits from the AttributeDict class and provides
attribute-like access to its elements, similar to a dictionary.
"""
def __init__(self, document):
data = {}
if "_source" in document:
data = document["_source"]
if "fields" in document:
data.update(document["fields"])
super().__init__(data)
super().__setattr__("meta", HitMeta(document))
| Hit |
python | apache__airflow | providers/google/src/airflow/providers/google/cloud/operators/cloud_run.py | {
"start": 8103,
"end": 10515
} | class ____(GoogleCloudBaseOperator):
"""
Lists jobs.
:param project_id: Required. The ID of the Google Cloud project that the service belongs to.
:param region: Required. The ID of the Google Cloud region that the service belongs to.
:param show_deleted: If true, returns deleted (but unexpired)
resources along with active ones.
:param limit: The number of jobs to list. If left empty,
all the jobs will be returned.
:param gcp_conn_id: The connection ID used to connect to Google Cloud.
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
"""
template_fields = (
"project_id",
"region",
"gcp_conn_id",
"impersonation_chain",
)
def __init__(
self,
project_id: str,
region: str,
show_deleted: bool = False,
limit: int | None = None,
gcp_conn_id: str = "google_cloud_default",
impersonation_chain: str | Sequence[str] | None = None,
**kwargs,
):
super().__init__(**kwargs)
self.project_id = project_id
self.region = region
self.gcp_conn_id = gcp_conn_id
self.impersonation_chain = impersonation_chain
self.show_deleted = show_deleted
self.limit = limit
if limit is not None and limit < 0:
raise AirflowException("The limit for the list jobs request should be greater or equal to zero")
def execute(self, context: Context):
hook: CloudRunHook = CloudRunHook(
gcp_conn_id=self.gcp_conn_id, impersonation_chain=self.impersonation_chain
)
jobs = hook.list_jobs(
region=self.region, project_id=self.project_id, show_deleted=self.show_deleted, limit=self.limit
)
return [Job.to_dict(job) for job in jobs]
| CloudRunListJobsOperator |
python | tensorflow__tensorflow | tensorflow/python/ops/parallel_for/control_flow_ops_test.py | {
"start": 87294,
"end": 89649
} | class ____(PForTestCase, parameterized.TestCase):
@parameterized.parameters((None,), (3,))
def test_create_composite_inside_loop(self, parallel_iterations):
num_particles = 10
velocities = random_ops.random_uniform([num_particles])
particles = pfor_control_flow_ops.pfor(
# Build a batch of particles all with the same mass.
lambda i: Particle(mass=4., velocity=array_ops.gather(velocities, i)),
num_particles,
parallel_iterations=parallel_iterations)
particles_mass, particles_velocity, velocities = self.evaluate(
(particles.mass, particles.velocity, velocities))
self.assertAllEqual(particles_mass, 4. * np.ones([num_particles]))
self.assertAllEqual(particles_velocity, velocities)
@parameterized.parameters((None,), (3,))
def test_composite_is_converted_to_batched_tensor(
self, parallel_iterations):
particles = pfor_control_flow_ops.pfor(
lambda _: Particle(mass=random_ops.random_uniform([3]), # pylint: disable=g-long-lambda
velocity=random_ops.random_uniform([5, 3])),
4,
parallel_iterations=parallel_iterations)
# Naively batching the component shapes would give `[4, 3]` and `[4, 5, 3]`
# which have no consistent broadcast shape.
self.assertEqual(particles.mass.shape, [4, 1, 3])
self.assertAllEqual(particles.velocity.shape, [4, 5, 3])
def test_vectorized_map_gathers_composite_tensors(self):
particles = Particle(mass=[1., 2., 3., 4., 5.],
velocity=[1., 2., 3., 4., 5.])
self.assertAllEqual(
pfor_control_flow_ops.vectorized_map(
lambda x: x.mass * x.velocity, particles),
particles.mass * particles.velocity)
def test_vectorized_map_of_ragged_tensors(self):
# Vmap should be able to handle ragged Tensors as long as they're not
# *actually* ragged.
ragged = ragged_tensor.RaggedTensor.from_uniform_row_length(
ragged_tensor.RaggedTensor.from_row_lengths(
values=[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12],
row_lengths=[3, 3, 3, 3]),
uniform_row_length=2) # Overall shape [2, 2, 3].
self.assertAllEqual(
pfor_control_flow_ops.vectorized_map(
lambda x: x.to_tensor(shape=[2, 3]), ragged),
ragged.to_tensor(shape=[2, 2, 3]))
| CompositeTensorTest |
python | walkccc__LeetCode | solutions/1267. Count Servers that Communicate/1267.py | {
"start": 0,
"end": 429
} | class ____:
def countServers(self, grid: list[list[int]]) -> int:
m = len(grid)
n = len(grid[0])
ans = 0
rows = [0] * m
cols = [0] * n
for i in range(m):
for j in range(n):
if grid[i][j] == 1:
rows[i] += 1
cols[j] += 1
for i in range(m):
for j in range(n):
if grid[i][j] == 1 and (rows[i] > 1 or cols[j] > 1):
ans += 1
return ans
| Solution |
python | pytorch__pytorch | benchmarks/functional_autograd_benchmark/torchaudio_models.py | {
"start": 6191,
"end": 7463
} | class ____(nn.Module):
def __init__(
self,
input_size,
hidden_size,
rnn_type=nn.LSTM,
bidirectional=False,
batch_norm=True,
):
super().__init__()
self.input_size = input_size
self.hidden_size = hidden_size
self.bidirectional = bidirectional
self.batch_norm = (
SequenceWise(nn.BatchNorm1d(input_size)) if batch_norm else None
)
self.rnn = rnn_type(
input_size=input_size,
hidden_size=hidden_size,
bidirectional=bidirectional,
bias=True,
)
self.num_directions = 2 if bidirectional else 1
def flatten_parameters(self):
self.rnn.flatten_parameters()
def forward(self, x, output_lengths):
if self.batch_norm is not None:
x = self.batch_norm(x)
x = nn.utils.rnn.pack_padded_sequence(x, output_lengths, enforce_sorted=False)
x, h = self.rnn(x)
x, _ = nn.utils.rnn.pad_packed_sequence(x)
if self.bidirectional:
x = (
x.view(x.size(0), x.size(1), 2, -1)
.sum(2)
.view(x.size(0), x.size(1), -1)
) # (TxNxH*2) -> (TxNxH) by sum
return x
| BatchRNN |
python | cython__cython | Cython/Compiler/MatchCaseNodes.py | {
"start": 12538,
"end": 13292
} | class ____(PatternNode):
"""
keys list of NameNodes
value_patterns list of PatternNodes of equal length to keys
double_star_capture_target NameNode or None
"""
keys = []
value_patterns = []
double_star_capture_target = None
child_attrs = PatternNode.child_attrs + [
"keys",
"value_patterns",
"double_star_capture_target",
]
def get_main_pattern_targets(self):
targets = set()
for pattern in self.value_patterns:
self.update_targets_with_targets(targets, pattern.get_targets())
if self.double_star_capture_target:
self.add_target_to_targets(targets, self.double_star_capture_target.name)
return targets
| MatchMappingPatternNode |
python | jina-ai__jina | tests/k8s/executor-merger/exec_merger.py | {
"start": 122,
"end": 1269
} | class ____(Executor):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
from jina.logging.logger import JinaLogger
self.logger = JinaLogger(self.__class__.__name__)
@requests
def debug(self, docs_matrix: List[DocumentArray], **kwargs):
self.logger.debug(
f'Received doc matrix in exec-merger with length {len(docs_matrix)}.'
)
result = DocumentArray()
for docs in zip(*docs_matrix):
traversed_executors = [doc.tags['traversed-executors'] for doc in docs]
shard_ids = [doc.tags['shard_id'] for doc in docs]
shards = [doc.tags['shards'] for doc in docs]
parallels = [doc.tags['parallel'] for doc in docs]
traversed_executors = list(chain(*traversed_executors))
doc = Document()
doc.tags['traversed-executors'] = traversed_executors
doc.tags['shard_id'] = shard_ids
doc.tags['shards'] = shards
doc.tags['parallel'] = parallels
doc.tags['merged'] = True
result.append(doc)
return result
| ExecMerger |
python | wandb__wandb | wandb/vendor/watchdog_0_9_0/wandb_watchdog/observers/api.py | {
"start": 2324,
"end": 4163
} | class ____(BaseThread):
"""
Producer thread base class subclassed by event emitters
that generate events and populate a queue with them.
:param event_queue:
The event queue to populate with generated events.
:type event_queue:
:class:`watchdog.events.EventQueue`
:param watch:
The watch to observe and produce events for.
:type watch:
:class:`ObservedWatch`
:param timeout:
Timeout (in seconds) between successive attempts at reading events.
:type timeout:
``float``
"""
def __init__(self, event_queue, watch, timeout=DEFAULT_EMITTER_TIMEOUT):
BaseThread.__init__(self)
self._event_queue = event_queue
self._watch = watch
self._timeout = timeout
@property
def timeout(self):
"""
Blocking timeout for reading events.
"""
return self._timeout
@property
def watch(self):
"""
The watch associated with this emitter.
"""
return self._watch
def queue_event(self, event):
"""
Queues a single event.
:param event:
Event to be queued.
:type event:
An instance of :class:`watchdog.events.FileSystemEvent`
or a subclass.
"""
self._event_queue.put((event, self.watch))
def queue_events(self, timeout):
"""Override this method to populate the event queue with events
per interval period.
:param timeout:
Timeout (in seconds) between successive attempts at
reading events.
:type timeout:
``float``
"""
def run(self):
try:
while self.should_keep_running():
self.queue_events(self.timeout)
finally:
pass
| EventEmitter |
python | optuna__optuna | optuna/_imports.py | {
"start": 3349,
"end": 4225
} | class ____(types.ModuleType):
"""Module wrapper for lazy import.
This class wraps the specified modules and lazily imports them only when accessed.
Otherwise, `import optuna` is slowed down by importing all submodules and
dependencies even if not required.
Within this project's usage, importlib override this module's attribute on the first
access and the imported submodule is directly accessed from the second access.
Args:
name: Name of module to apply lazy import.
"""
def __init__(self, name: str) -> None:
super().__init__(name)
self._name = name
def _load(self) -> types.ModuleType:
module = importlib.import_module(self._name)
self.__dict__.update(module.__dict__)
return module
def __getattr__(self, item: str) -> Any:
return getattr(self._load(), item)
| _LazyImport |
python | modin-project__modin | versioneer.py | {
"start": 19336,
"end": 22921
} | class ____(Exception):
"""Exception raised if a method is not valid for the current scenario."""
# these dictionaries contain VCS-specific tools
LONG_VERSION_PY: Dict[str, str] = {}
HANDLERS: Dict[str, Dict[str, Callable]] = {}
def register_vcs_handler(vcs: str, method: str) -> Callable: # decorator
"""Create decorator to mark a method as the handler of a VCS."""
def decorate(f: Callable) -> Callable:
"""Store f in HANDLERS[vcs][method]."""
HANDLERS.setdefault(vcs, {})[method] = f
return f
return decorate
def run_command(
commands: List[str],
args: List[str],
cwd: Optional[str] = None,
verbose: bool = False,
hide_stderr: bool = False,
env: Optional[Dict[str, str]] = None,
) -> Tuple[Optional[str], Optional[int]]:
"""Call the given command(s)."""
assert isinstance(commands, list)
process = None
popen_kwargs: Dict[str, Any] = {}
if sys.platform == "win32":
# This hides the console window if pythonw.exe is used
startupinfo = subprocess.STARTUPINFO()
startupinfo.dwFlags |= subprocess.STARTF_USESHOWWINDOW
popen_kwargs["startupinfo"] = startupinfo
for command in commands:
try:
dispcmd = str([command] + args)
# remember shell=False, so use git.cmd on windows, not just git
process = subprocess.Popen(
[command] + args,
cwd=cwd,
env=env,
stdout=subprocess.PIPE,
stderr=(subprocess.PIPE if hide_stderr else None),
**popen_kwargs,
)
break
except OSError as e:
if e.errno == errno.ENOENT:
continue
if verbose:
print("unable to run %s" % dispcmd)
print(e)
return None, None
else:
if verbose:
print("unable to find command, tried %s" % (commands,))
return None, None
stdout = process.communicate()[0].strip().decode()
if process.returncode != 0:
if verbose:
print("unable to run %s (error)" % dispcmd)
print("stdout was %s" % stdout)
return None, process.returncode
return stdout, process.returncode
LONG_VERSION_PY[
"git"
] = r'''
# This file helps to compute a version number in source trees obtained from
# git-archive tarball (such as those provided by githubs download-from-tag
# feature). Distribution tarballs (built by setup.py sdist) and build
# directories (produced by setup.py build) will contain a much shorter file
# that just contains the computed version number.
# This file is released into the public domain.
# Generated by versioneer-0.29
# https://github.com/python-versioneer/python-versioneer
"""Git implementation of _version.py."""
import errno
import os
import re
import subprocess
import sys
from typing import Any, Callable, Dict, List, Optional, Tuple
import functools
def get_keywords() -> Dict[str, str]:
"""Get the keywords needed to look up the version information."""
# these strings will be replaced by git during git-archive.
# setup.py/versioneer.py will grep for the variable names, so they must
# each be defined on a line of their own. _version.py will just call
# get_keywords().
git_refnames = "%(DOLLAR)sFormat:%%d%(DOLLAR)s"
git_full = "%(DOLLAR)sFormat:%%H%(DOLLAR)s"
git_date = "%(DOLLAR)sFormat:%%ci%(DOLLAR)s"
keywords = {"refnames": git_refnames, "full": git_full, "date": git_date}
return keywords
| NotThisMethod |
python | django__django | django/contrib/sites/admin.py | {
"start": 102,
"end": 214
} | class ____(admin.ModelAdmin):
list_display = ("domain", "name")
search_fields = ("domain", "name")
| SiteAdmin |
python | mlflow__mlflow | mlflow/system_metrics/metrics/network_monitor.py | {
"start": 142,
"end": 1351
} | class ____(BaseMetricsMonitor):
def __init__(self):
super().__init__()
self._set_initial_metrics()
def _set_initial_metrics(self):
# Set initial network usage metrics. `psutil.net_io_counters()` counts the stats since the
# system boot, so to set network usage metrics as 0 when we start logging, we need to keep
# the initial network usage metrics.
network_usage = psutil.net_io_counters()
self._initial_receive_megabytes = network_usage.bytes_recv / 1e6
self._initial_transmit_megabytes = network_usage.bytes_sent / 1e6
def collect_metrics(self):
# Get network usage metrics.
network_usage = psutil.net_io_counters()
# Usage metrics will be the diff between current and initial metrics.
self._metrics["network_receive_megabytes"] = (
network_usage.bytes_recv / 1e6 - self._initial_receive_megabytes
)
self._metrics["network_transmit_megabytes"] = (
network_usage.bytes_sent / 1e6 - self._initial_transmit_megabytes
)
def aggregate_metrics(self):
# Network metrics don't need to be averaged.
return dict(self._metrics)
| NetworkMonitor |
python | encode__django-rest-framework | tests/test_serializer_nested.py | {
"start": 8670,
"end": 11130
} | class ____(TestCase):
# tests for rests_framework.serializers.raise_errors_on_nested_writes
def test_nested_serializer_error(self):
class ProfileSerializer(serializers.ModelSerializer):
class Meta:
model = NestedWriteProfile
fields = ['address']
class NestedProfileSerializer(serializers.ModelSerializer):
profile = ProfileSerializer()
class Meta:
model = NestedWritePerson
fields = ['profile']
serializer = NestedProfileSerializer(data={'profile': {'address': '52 festive road'}})
assert serializer.is_valid()
assert serializer.validated_data == {'profile': {'address': '52 festive road'}}
with pytest.raises(AssertionError) as exc_info:
serializer.save()
assert str(exc_info.value) == (
'The `.create()` method does not support writable nested fields by '
'default.\nWrite an explicit `.create()` method for serializer '
'`tests.test_serializer_nested.NestedProfileSerializer`, or set '
'`read_only=True` on nested serializer fields.'
)
def test_dotted_source_field_error(self):
class DottedAddressSerializer(serializers.ModelSerializer):
address = serializers.CharField(source='profile.address')
class Meta:
model = NestedWritePerson
fields = ['address']
serializer = DottedAddressSerializer(data={'address': '52 festive road'})
assert serializer.is_valid()
assert serializer.validated_data == {'profile': {'address': '52 festive road'}}
with pytest.raises(AssertionError) as exc_info:
serializer.save()
assert str(exc_info.value) == (
'The `.create()` method does not support writable dotted-source '
'fields by default.\nWrite an explicit `.create()` method for '
'serializer `tests.test_serializer_nested.DottedAddressSerializer`, '
'or set `read_only=True` on dotted-source serializer fields.'
)
if postgres_fields:
class NonRelationalPersonModel(models.Model):
"""Model declaring a postgres JSONField"""
data = postgres_fields.JSONField()
class Meta:
required_db_features = {'supports_json_field'}
@pytest.mark.skipif(not postgres_fields, reason='psycopg is not installed')
| TestNestedWriteErrors |
python | getsentry__sentry | src/sentry/integrations/source_code_management/metrics.py | {
"start": 378,
"end": 1514
} | class ____(StrEnum):
"""
SCM integration features
"""
# RepositoryIntegration
GET_STACKTRACE_LINK = "get_stacktrace_link"
GET_CODEOWNER_FILE = "get_codeowner_file"
CHECK_FILE = "check_file"
# SourceCodeIssueIntegration (SCM only)
GET_REPOSITORY_CHOICES = "get_repository_choices"
# SourceCodeSearchEndpoint
HANDLE_SEARCH_ISSUES = "handle_search_issues"
HANDLE_SEARCH_REPOSITORIES = "handle_search_repositories"
GET = "get"
# CommitContextIntegration
GET_BLAME_FOR_FILES = "get_blame_for_files"
CREATE_COMMENT = "create_comment"
UPDATE_COMMENT = "update_comment"
QUEUE_COMMENT_TASK = "queue_comment_task"
GET_PR_DIFFS = "get_pr_diffs" # open PR comments
GET_PR_COMMENTS = "get_pr_comments"
GET_ISSUE_COMMENTS = "get_issue_comments"
# Tasks
LINK_ALL_REPOS = "link_all_repos"
# GitHub only
DERIVE_CODEMAPPINGS = "derive_codemappings"
STUDENT_PACK = "student_pack"
# Releases
COMPARE_COMMITS = "compare_commits"
# Status Checks
CREATE_STATUS_CHECK = "create_status_check"
@dataclass
| SCMIntegrationInteractionType |
python | dagster-io__dagster | examples/airlift-migration-tutorial/tutorial_example/shared/load_csv_to_duckdb.py | {
"start": 107,
"end": 1081
} | class ____:
table_name: str
csv_path: Path
duckdb_path: Path
names: list[str]
duckdb_schema: str
duckdb_database_name: str
def load_csv_to_duckdb(args: LoadCsvToDuckDbArgs) -> None:
# Ensure that path exists
if not args.csv_path.exists():
raise ValueError(f"CSV file not found at {args.csv_path}")
if not args.duckdb_path.exists():
raise ValueError(f"DuckDB database not found at {args.duckdb_path}")
# Duckdb database stored in airflow home
df = pd.read_csv( # noqa: F841 # used by duckdb
args.csv_path,
names=args.names,
)
# Connect to DuckDB and create a new table
con = duckdb.connect(str(args.duckdb_path))
con.execute(f"CREATE SCHEMA IF NOT EXISTS {args.duckdb_schema}").fetchall()
con.execute(
f"CREATE TABLE IF NOT EXISTS {args.duckdb_database_name}.{args.duckdb_schema}.{args.table_name} AS SELECT * FROM df"
).fetchall()
con.close()
| LoadCsvToDuckDbArgs |
python | realpython__materials | python-313/free-threading-jit/benchmarks/gil.py | {
"start": 363,
"end": 1842
} | class ____(NamedTuple):
python: str
threads: int
seconds: float
def save(self):
empty = not CSV_PATH.exists()
with CSV_PATH.open(mode="a", encoding="utf-8", newline="") as file:
writer = DictWriter(file, Record._fields)
if empty:
writer.writeheader()
writer.writerow(self._asdict())
def parse_args():
parser = ArgumentParser()
parser.add_argument("-t", "--threads", type=int, default=cpu_count())
parser.add_argument("-n", type=int, default=DEFAULT_N)
return parser.parse_args()
def main(args):
print_details()
benchmark(args.threads, args.n)
def timed(function):
@wraps(function)
def wrapper(num_threads, n):
t1 = perf_counter()
result = function(num_threads, n)
t2 = perf_counter()
duration = t2 - t1
print(f"\b\b\b: {duration:.2f}s")
Record(python_short(), num_threads, duration).save()
return result
return wrapper
@timed
def benchmark(num_threads, n):
with ThreadPoolExecutor(max_workers=num_threads) as executor:
for _ in range(num_threads):
executor.submit(fib, n)
if num_threads > 1:
print(f"Running {num_threads} threads...", end="", flush=True)
else:
print("Running 1 thread...", end="", flush=True)
def fib(n):
return n if n < 2 else fib(n - 2) + fib(n - 1)
if __name__ == "__main__":
main(parse_args())
| Record |
python | mitmproxy__pdoc | test/testdata/misc.py | {
"start": 6698,
"end": 6958
} | class ____:
static_attr_to_class = ClassDecorator
"""this is a static attribute that point to a Class (not an instance)"""
static_attr_to_instance = ClassDecorator(None)
"""this is a static attribute that point to an instance"""
| ClassAsAttribute |
python | tensorflow__tensorflow | tensorflow/python/data/kernel_tests/ignore_errors_test.py | {
"start": 6168,
"end": 7041
} | class ____(checkpoint_test_base.CheckpointTestBase,
parameterized.TestCase):
def _build_ds(self):
components = np.array([1., 2., 3., np.nan, 5.]).astype(np.float32)
dataset = dataset_ops.Dataset.from_tensor_slices(components)
dataset = dataset.map(lambda x: array_ops.check_numerics(x, "message"))
dataset = dataset.ignore_errors()
options = options_lib.Options()
options.experimental_external_state_policy = (
options_lib.ExternalStatePolicy.IGNORE)
return dataset.with_options(options)
@combinations.generate(
combinations.times(test_base.default_test_combinations(),
checkpoint_test_base.default_test_combinations()))
def test(self, verify_fn):
verify_fn(self, self._build_ds, num_outputs=4)
if __name__ == "__main__":
test.main()
| IgnoreErrorsCheckpointTest |
python | getsentry__sentry | src/sentry/workflow_engine/migrations/0067_workflow_action_group_status_group_db_constraint.py | {
"start": 222,
"end": 1730
} | class ____(CheckedMigration):
# This flag is used to mark that a migration shouldn't be automatically run in production.
# This should only be used for operations where it's safe to run the migration after your
# code has deployed. So this should not be used for most operations that alter the schema
# of a table.
# Here are some things that make sense to mark as post deployment:
# - Large data migrations. Typically we want these to be run manually so that they can be
# monitored and not block the deploy for a long period of time while they run.
# - Adding indexes to large tables. Since this can take a long time, we'd generally prefer to
# run this outside deployments so that we don't block them. Note that while adding an index
# is a schema change, it's completely safe to run the operation after the code has deployed.
# Once deployed, run these manually via: https://develop.sentry.dev/database-migrations/#migration-deployment
is_post_deployment = True
dependencies = [
("sentry", "0914_increase_orgmember_user_email_max_length"),
("workflow_engine", "0066_workflow_action_group_status_table"),
]
operations = [
migrations.AlterField(
model_name="workflowactiongroupstatus",
name="group",
field=sentry.db.models.fields.foreignkey.FlexibleForeignKey(
on_delete=django.db.models.deletion.CASCADE, to="sentry.group"
),
),
]
| Migration |
python | cherrypy__cherrypy | cherrypy/test/test_states.py | {
"start": 9358,
"end": 10682
} | class ____(helper.CPWebCase):
def test_daemonize(self):
if os.name not in ['posix']:
return self.skip('skipped (not on posix) ')
self.HOST = '127.0.0.1'
self.PORT = 8081
# Spawn the process and wait, when this returns, the original process
# is finished. If it daemonized properly, we should still be able
# to access pages.
p = helper.CPProcess(
ssl=(self.scheme.lower() == 'https'),
wait=True,
daemonize=True,
socket_host='127.0.0.1',
socket_port=8081,
)
p.write_conf(extra='test_case_name: "test_daemonize"')
p.start(imports='cherrypy.test._test_states_demo')
try:
# Just get the pid of the daemonization process.
self.getPage('/pid')
self.assertStatus(200)
page_pid = int(self.body)
self.assertEqual(page_pid, p.get_pid())
finally:
# Shut down the spawned process
self.getPage('/exit')
p.join()
# Wait until here to test the exit code because we want to ensure
# that we wait for the daemon to finish running before we fail.
if p.exit_code != 0:
self.fail('Daemonized parent process failed to exit cleanly.')
| PluginTests |
python | django__django | tests/migrations/test_migrations_no_operations/0001_initial.py | {
"start": 35,
"end": 116
} | class ____(migrations.Migration):
dependencies = []
operations = []
| Migration |
python | ionelmc__pytest-benchmark | src/pytest_benchmark/utils.py | {
"start": 6849,
"end": 7083
} | class ____(RegressionCheck):
def compute(self, current, compared):
val = compared[self.field]
if not val:
return float('inf')
return current[self.field] / val * 100 - 100
| PercentageRegressionCheck |
python | kamyu104__LeetCode-Solutions | Python/minimum-cost-to-separate-sentence-into-rows.py | {
"start": 71,
"end": 1202
} | class ____(object):
def minimumCost(self, sentence, k):
"""
:type sentence: str
:type k: int
:rtype: int
"""
def lens(sentence):
j = len(sentence)-1
for i in reversed(xrange(-1, len(sentence))):
if i == -1 or sentence[i] == ' ':
yield j-i
j = i-1
word_lens, dp = [], [] # dp[i]: min cost of word_lens[-1-i:]
t = -1
for l in lens(sentence):
word_lens.append(l)
dp.append(float("inf"))
t += l+1
if t <= k:
dp[-1] = 0
continue
total = l
for j in reversed(xrange(len(dp)-1)):
dp[-1] = min(dp[-1], dp[j] + (k-total)**2)
total += (word_lens[j]+1)
if total > k:
word_lens = word_lens[j:] # minimize len(word_lens) s.t. sum(word_lens) > k
dp = dp[j:]
break
return dp[-1] if dp else 0
# Time: O(s + n * k), n is the number of the word_lens
# Space: O(n)
| Solution |
python | keras-team__keras | guides/writing_your_own_callbacks.py | {
"start": 3360,
"end": 6457
} | class ____(keras.callbacks.Callback):
def on_train_begin(self, logs=None):
keys = list(logs.keys())
print("Starting training; got log keys: {}".format(keys))
def on_train_end(self, logs=None):
keys = list(logs.keys())
print("Stop training; got log keys: {}".format(keys))
def on_epoch_begin(self, epoch, logs=None):
keys = list(logs.keys())
print(
"Start epoch {} of training; got log keys: {}".format(epoch, keys)
)
def on_epoch_end(self, epoch, logs=None):
keys = list(logs.keys())
print("End epoch {} of training; got log keys: {}".format(epoch, keys))
def on_test_begin(self, logs=None):
keys = list(logs.keys())
print("Start testing; got log keys: {}".format(keys))
def on_test_end(self, logs=None):
keys = list(logs.keys())
print("Stop testing; got log keys: {}".format(keys))
def on_predict_begin(self, logs=None):
keys = list(logs.keys())
print("Start predicting; got log keys: {}".format(keys))
def on_predict_end(self, logs=None):
keys = list(logs.keys())
print("Stop predicting; got log keys: {}".format(keys))
def on_train_batch_begin(self, batch, logs=None):
keys = list(logs.keys())
print(
"...Training: start of batch {}; got log keys: {}".format(
batch, keys
)
)
def on_train_batch_end(self, batch, logs=None):
keys = list(logs.keys())
print(
"...Training: end of batch {}; got log keys: {}".format(batch, keys)
)
def on_test_batch_begin(self, batch, logs=None):
keys = list(logs.keys())
print(
"...Evaluating: start of batch {}; got log keys: {}".format(
batch, keys
)
)
def on_test_batch_end(self, batch, logs=None):
keys = list(logs.keys())
print(
"...Evaluating: end of batch {}; got log keys: {}".format(
batch, keys
)
)
def on_predict_batch_begin(self, batch, logs=None):
keys = list(logs.keys())
print(
"...Predicting: start of batch {}; got log keys: {}".format(
batch, keys
)
)
def on_predict_batch_end(self, batch, logs=None):
keys = list(logs.keys())
print(
"...Predicting: end of batch {}; got log keys: {}".format(
batch, keys
)
)
"""
Let's try it out:
"""
model = get_model()
model.fit(
x_train,
y_train,
batch_size=128,
epochs=1,
verbose=0,
validation_split=0.5,
callbacks=[CustomCallback()],
)
res = model.evaluate(
x_test, y_test, batch_size=128, verbose=0, callbacks=[CustomCallback()]
)
res = model.predict(x_test, batch_size=128, callbacks=[CustomCallback()])
"""
### Usage of `logs` dict
The `logs` dict contains the loss value, and all the metrics at the end of a batch or
epoch. Example includes the loss and mean absolute error.
"""
| CustomCallback |
python | doocs__leetcode | solution/0600-0699/0634.Find the Derangement of An Array/Solution2.py | {
"start": 0,
"end": 205
} | class ____:
def findDerangement(self, n: int) -> int:
mod = 10**9 + 7
a, b = 1, 0
for i in range(2, n + 1):
a, b = b, ((i - 1) * (a + b)) % mod
return b
| Solution |
python | tensorflow__tensorflow | tensorflow/python/ops/weak_tensor_nn_test.py | {
"start": 6374,
"end": 8447
} | class ____(test_lib.TestCase):
def testValues(self):
np_values = np.array(
[np.linspace(-7.0, 0.0, 100),
np.linspace(0.0, 7.0, 100)],
dtype=np.float32)
tf_values = _get_weak_tensor(np_values)
actual_tf_outputs = nn_impl.swish(tf_values)
self.assertIsInstance(actual_tf_outputs, weak_tensor.WeakTensor)
expected_tf_outputs = tf_values * math_ops.sigmoid(tf_values)
actual_outputs, expected_outputs = self.evaluate(
[actual_tf_outputs, expected_tf_outputs])
self.assertAllClose(actual_outputs, expected_outputs)
def testValuesWithBeta(self):
np_values = np.array(
[np.linspace(-7.0, 0.0, 100),
np.linspace(0.0, 7.0, 100)],
dtype=np.float32)
tf_values = _get_weak_tensor(np_values)
actual_tf_outputs = nn_impl.swish(tf_values, beta=0.5)
self.assertIsInstance(actual_tf_outputs, weak_tensor.WeakTensor)
expected_tf_outputs = tf_values * math_ops.sigmoid(0.5 * tf_values)
actual_outputs, expected_outputs = self.evaluate(
[actual_tf_outputs, expected_tf_outputs])
self.assertAllClose(actual_outputs, expected_outputs)
def testGradients(self):
shape = [5, 3, 4]
sigma = 5
input_values = np.random.randn(*shape) * sigma
x_tf = _get_weak_tensor(input_values)
with self.cached_session():
def f(x): # pylint: disable=invalid-name
return nn_impl.swish(x)
theoretical, numerical = gradient_checker_v2.compute_gradient(
f, [x_tf])
self.assertAllClose(theoretical, numerical)
def testGradientsWithBeta(self):
shape = [5, 3, 4]
sigma = 5
input_values = np.random.randn(*shape) * sigma
x_tf = _get_weak_tensor(input_values)
with self.cached_session():
def f(x): # pylint: disable=invalid-name
return nn_impl.swish(x, beta=0.5)
theoretical, numerical = gradient_checker_v2.compute_gradient(
f, [x_tf])
self.assertAllClose(theoretical, numerical)
if __name__ == "__main__":
ops.set_dtype_conversion_mode("all")
test_lib.main()
| SwishTest |
python | pytorch__pytorch | torch/nn/modules/loss.py | {
"start": 791,
"end": 1147
} | class ____(Module):
reduction: str
def __init__(self, size_average=None, reduce=None, reduction: str = "mean") -> None:
super().__init__()
if size_average is not None or reduce is not None:
self.reduction: str = _Reduction.legacy_get_string(size_average, reduce)
else:
self.reduction = reduction
| _Loss |
python | marshmallow-code__marshmallow | src/marshmallow/fields.py | {
"start": 34878,
"end": 35738
} | class ____(Number[int]):
"""An integer field.
:param strict: If `True`, only integer types are valid.
Otherwise, any value castable to `int` is valid.
:param kwargs: The same keyword arguments that :class:`Number` receives.
"""
num_type = int
#: Default error messages.
default_error_messages = {"invalid": "Not a valid integer."}
def __init__(
self,
*,
strict: bool = False,
as_string: bool = False,
**kwargs: Unpack[_BaseFieldKwargs],
):
self.strict = strict
super().__init__(as_string=as_string, **kwargs)
# override Number
def _validated(self, value: typing.Any) -> int:
if self.strict and not isinstance(value, numbers.Integral):
raise self.make_error("invalid", input=value)
return super()._validated(value)
| Integer |
python | pytorch__pytorch | test/onnx/verify.py | {
"start": 228,
"end": 20961
} | class ____:
"""
An error-collecting object which supports error recovery.
It is intended to be used like a context manager:
>>> with Errors("Top-level error message") as errs:
>>> ...
"""
def __init__(self, msg, rtol=1e-3, atol=1e-5):
self.msg = msg
self.errors = []
self.context = []
self.rtol = rtol
self.atol = atol
# Allocated upon instance creation so that multiple Errors
# can be used
class ShortCircuit(Exception):
pass
self.exc_class = ShortCircuit
def requireAlmostEqual(self, x, y, msg=None):
"""
Test that x and y are nearly equal (equal within self.rtol
precision); aborts execution if they are not.
"""
self.almostEqualAndThen(x, y, msg, self.failWith)
def checkAlmostEqual(self, x, y, msg=None):
"""
Test that x and y are nearly equal (equal within self.rtol
precision), but continue execution even if they are not equal.
To prevent error cascades, you should remember to call "failIfErrs"
at some later point in time.
"""
self.almostEqualAndThen(x, y, msg, self.addErr)
def almostEqualAndThen(self, x, y, msg, k):
"""
Helper for implementing "requireAlmostEqual" and "checkAlmostEqual".
Upon failure, invokes continuation "k" with the error message.
At the moment, only tests on "numpy.ndarray" are supported.
"""
if isinstance(x, np.ndarray) and isinstance(y, np.ndarray):
np.testing.assert_allclose(
x, y, rtol=self.rtol, atol=self.atol, equal_nan=True, verbose=True
)
else:
raise RuntimeError("Unsupported almost equal test")
def requireEqual(self, x, y, msg=None):
"""
Test that x and y are equal; aborts execution if they are not.
"""
self.equalAndThen(x, y, msg, self.failWith)
def checkEqual(self, x, y, msg=None):
"""
Test that x and y are equal, but continue execution even if they are not equal.
To prevent error cascades, you should remember to call "failIfErrs"
at some later point in time.
"""
self.equalAndThen(x, y, msg, self.addErr)
# Bit-for-bit accuracy test
def equalAndThen(self, x, y, msg, k):
"""
Helper for implementing "requireEqual" and "checkEqual". Upon failure,
invokes continuation "k" with the error message.
"""
if isinstance(x, onnx.TensorProto) and isinstance(y, onnx.TensorProto):
self.equalAndThen(x.name, y.name, msg, k)
# Use numpy for the comparison
t1 = onnx.numpy_helper.to_array(x)
t2 = onnx.numpy_helper.to_array(y)
new_msg = f"{colonize(msg)}In embedded parameter '{x.name}'"
self.equalAndThen(t1, t2, new_msg, k)
elif isinstance(x, np.ndarray) and isinstance(y, np.ndarray):
np.testing.assert_equal(x, y)
else:
if x != y:
# TODO: Better algorithm for lists
sx = str(x)
sy = str(y)
if len(sx) > 40 or len(sy) > 40 or "\n" in sx or "\n" in sy:
# long form
l = "=" * 50
k(
"\n{}The value\n{}\n{}\n{}\n\ndoes not equal\n\n{}\n{}\n{}".format(
colonize(msg, ":\n"), l, sx, l, l, sy, l
)
)
else:
k(f"{colonize(msg)}{sx} != {sy}")
def requireMultiLineEqual(self, x, y, msg=None):
"""
Test that long, multi-line strings x and y are equal;
aborts execution if they are not.
"""
self.multiLineEqualAndThen(x, y, msg, self.failWith)
def multiLineEqualAndThen(self, x, y, msg, k):
"""
Helper for implementing "requireMultiLineEqual". Upon failure,
invokes continuation "k" with the error message.
"""
if msg is None:
msg = "Strings are not equal"
if x != y:
diff = difflib.ndiff(x.splitlines(True), y.splitlines(True))
k("{}{}".format(colonize(msg, ":\n\n"), "".join(diff)))
def addErr(self, msg):
"""
Add an error to the error context, but continue executing.
"""
# TODO: instead of immediately concatenating the context in the msg,
# attach it as metadata and make a decision how to format it later.
for c in reversed(self.context):
msg += "\n\n * " + "\n ".join(c.splitlines())
self.errors.append(msg)
def fail(self):
"""
Immediately fail and short-circuit to the next recovery context.
NB: It is an error to "fail" without having added any errors to
the error context.
"""
raise self.exc_class
def failWith(self, msg):
"""
Add an error to the error context, and then short-circuit.
"""
self.addErr(msg)
self.fail()
def failIfErrs(self):
"""
If there are any errors in the error context, short-circuit.
This is used to prevent error cascades.
"""
if self.errors:
self.fail()
def recover(self):
"""
Returns a context manager which can be used to recover in case of
an error. Example usage:
>>> with errs.recover():
>>> ...
"""
parent_self = self
class Recover:
def __enter__(self):
pass
def __exit__(self, exc_type, exc_value, traceback):
if exc_type == parent_self.exc_class:
return True
return Recover()
def addErrCtxt(self, msg):
"""
Returns a context manager which encloses a fragment of code with
an extra contextual message, e.g., where an error occurred, or a hint
applicable to all errors in the area. Example usage:
>>> with errs.addErrCtx("Some text"):
>>> ...
"""
parent_self = self
class AddContext:
def __enter__(self):
parent_self.context.append(msg)
def __exit__(self, exc_type, exc_value, traceback):
parent_self.context.pop()
return AddContext()
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
if self.errors:
errors_msg = "\n\n".join("ERROR: " + x for x in self.errors)
final_msg = "{}\n{}\n{}".format(self.msg, "-" * 70, errors_msg)
raise AssertionError(final_msg)
if exc_type == self.exc_class:
raise RuntimeError("ShortCircuit was raised, but no errors were recorded")
def verify(
model,
args,
backend,
verbose=False,
training=torch.onnx.TrainingMode.EVAL,
rtol=1e-3,
atol=1e-7,
test_args=2,
do_constant_folding=True,
opset_version=None,
keep_initializers_as_inputs=True,
add_node_names=False,
operator_export_type=torch.onnx.OperatorExportTypes.ONNX,
input_names=None,
dynamic_axes=None,
remained_onnx_input_idx=None,
):
"""
Export a model into ONNX, import it into a specified ONNX backend, and then
on a few random inputs verify that PyTorch and the backend produced the same
results. Requires onnx to be installed.
This function may spuriously fail: some operators are implemented with
different numerical precision in an ONNX backend, in which case an unstable
network (e.g., Inception) may blow up these numerical instabilities. This
situation is less likely to happen if your model has been trained. However,
if this is not the case, you may have found a bug! Please report it to the
PyTorch developers. You can also debug the issue yourself by removing
suffixes of operators from your model until verification passes.
For reproducibility, we recommend explicitly setting PyTorch's seed before
invoking this function.
Args:
model (torch.nn.Module): the model to be exported and verified
args (tuple of arguments): the inputs to
the model, e.g., such that ``model(*args)`` is a valid
invocation of the model. Any non-Variable arguments will
be hard-coded into the exported model; any Variable arguments
will become inputs of the exported model, in the order they
occur in args. If args is a Variable, this is equivalent
to having called it with a 1-ary tuple of that Variable.
(Note: passing keyword arguments to the model is not currently
supported. Give us a shout if you need it.)
backend (onnx.backend module): ONNX backend to verify with
verbose (bool, default False): if specified, we will print out a debug
description of the trace being exported.
training (bool, default False): export the model in training mode. At
the moment, ONNX is oriented towards exporting models for inference
only, so you will generally not need to set this to True.
rtol (float, default 1e-3): relative precision required
test_args (int or iterable of args, default 2):
either an integer specifying the number
of random arguments to generate, or an iterable producing arguments
to test under.
opset_version (int, default None): the opset version of the model to
export. If not specified, the default value in symboli_helper will
be used in utils._export().
operator_export_type (enum, default OperatorExportTypes.ONNX): the operator
export type to use when exporting the model. The default value converts
all operators to ONNX ops.
input_names (list of string): list of input names.
dynamic_axes (dict of (string, list)): dynamic_axes.
remained_onnx_input_idx (list of int, default None): The remained ONNX input index.
"""
def _nested_map(condition, fn, condition_msg=None):
def _map(obj):
if condition(obj):
return fn(obj)
elif obj is None:
return None
elif isinstance(obj, (list, tuple)):
return type(obj)(_map(x) for x in obj)
else:
raise ValueError(
"Auto nesting doesn't know how to process "
"an input object of type "
+ torch.typename(obj)
+ (
". Accepted types: "
+ condition_msg
+ ", or lists/tuples of them"
if condition_msg
else ""
)
)
return _map
def _iter_filter(condition, allow_unknown=False, condition_msg=None):
def _iter(obj):
if condition(obj):
yield obj
elif obj is None:
return
elif isinstance(obj, (list, tuple)):
for o in obj:
yield from _iter(o)
elif allow_unknown:
yield obj
else:
raise ValueError(
"Auto nesting doesn't know how to process "
"an input object of type "
+ torch.typename(obj)
+ (
". Accepted types: "
+ condition_msg
+ ", or lists/tuples of them"
if condition_msg
else ""
)
)
return _iter
def is_tensor(o):
return isinstance(o, torch.Tensor)
_iter_tensors = _iter_filter(is_tensor, condition_msg="Tensors")
def randomize_arg(arg):
new_data = arg.data.clone()
# For now, don't try randomizing non-float tensors; these
# are likely to be things like indices, where just randomly
# spattering some longs is unlikely to work. One way we could
# make this work is to apply a random permutation or something.
if arg.is_floating_point():
new_data.uniform_()
return torch.autograd.Variable(new_data, requires_grad=arg.requires_grad)
randomize_args = _nested_map(is_tensor, randomize_arg)
def backend_args(args):
# TODO: onnx should accept iterables
return tuple(v.data.cpu().numpy() for v in _iter_tensors(args))
def load_bytes(b):
b.seek(0)
x = onnx.load(b)
# doc_string has stack traces - let's remove them to make comparison
# sane
onnx.helper.strip_doc_string(x)
return x
# Special case for common case of passing a single Tensor
if isinstance(args, torch.Tensor):
args = (args,)
with torch.onnx.select_model_mode_for_export(model, training):
proto_bytes = io.BytesIO()
torch_out = torch.onnx.utils._export(
model,
args,
proto_bytes,
verbose=verbose,
do_constant_folding=do_constant_folding,
opset_version=opset_version,
keep_initializers_as_inputs=keep_initializers_as_inputs,
add_node_names=add_node_names,
operator_export_type=operator_export_type,
input_names=input_names,
dynamic_axes=dynamic_axes,
)
if isinstance(model, torch.jit.ScriptModule):
torch_out = model(*args)
proto = load_bytes(proto_bytes)
prepared = backend.prepare(proto)
def run(args, remained_onnx_input_idx):
alt_proto_bytes = io.BytesIO()
torch_out = torch.onnx.utils._export(
model,
args,
alt_proto_bytes,
verbose=verbose,
do_constant_folding=do_constant_folding,
opset_version=opset_version,
keep_initializers_as_inputs=keep_initializers_as_inputs,
add_node_names=add_node_names,
operator_export_type=operator_export_type,
input_names=input_names,
dynamic_axes=dynamic_axes,
)
if isinstance(model, torch.jit.ScriptModule):
torch_out = model(*args)
alt_proto = load_bytes(alt_proto_bytes)
if proto.SerializeToString() != alt_proto.SerializeToString():
# OK, let's try to figure out what happened.
msg = "When I exported your model with different inputs, the result was different."
if not verbose:
msg += "\n(To get more information, run torch.onnx.verify(..., verbose=True))"
with Errors(msg, rtol=rtol, atol=atol) as errs:
# First, check if we have the same number of parameters, and
# that they"re the same order. If they don"t, something has *really* gone wrong.
initializer_order_hint = (
"This is really strange! The second time I exported your model,\n"
"it had a different set of parameters. Are you assigning Parameters\n"
"in the forward() of your model definition?"
)
with errs.addErrCtxt(initializer_order_hint):
errs.requireEqual(
[x.name for x in proto.graph.initializer],
[x.name for x in alt_proto.graph.initializer],
msg="Parameters list differs",
)
# Now check if the embedded parameters are actually the same
initializer_hint = (
"A difference in embedded parameters usually means that\n"
"your model is updating parameters/buffers even in inference\n"
"mode. Look for a buggy nn.Module which isn't respecting train().\n"
)
with errs.recover(), errs.addErrCtxt(initializer_hint):
for x, y in zip(
proto.graph.initializer, alt_proto.graph.initializer
):
errs.checkEqual(x, y)
# Next, check if the model structure lines up.
structure_hint = (
"A difference in model structure usually means that\n"
"your model has dynamic control flow. These models are not\n"
"currently supported by the exporter."
)
with errs.recover(), errs.addErrCtxt(structure_hint):
# Delete initializers since we already tested them
stripped_proto = onnx.ModelProto()
stripped_proto.CopyFrom(proto)
del stripped_proto.graph.initializer[:]
stripped_alt_proto = onnx.ModelProto()
stripped_alt_proto.CopyFrom(alt_proto)
del stripped_alt_proto.graph.initializer[:]
# Compare the printable graph representations first
errs.requireMultiLineEqual(
onnx.helper.printable_graph(stripped_proto.graph),
onnx.helper.printable_graph(stripped_alt_proto.graph),
)
# Compare the actual protobuf text formats now (not
# very user-friendly!)
errs.requireMultiLineEqual(
str(stripped_proto), str(stripped_alt_proto)
)
# One last ditch effort, using built-in equality on
# protobufs
errs.requireEqual(stripped_proto, stripped_alt_proto)
errs.failIfErrs()
# At this point, we should have figured out why the binary
# protobufs differed, and short-circuited out of this code
# with a helpful error message. But what if we didn't?
# We better still try to give a good error message in this
# case. We EXPECT these requires to fail. If they don't,
# that is a bug in verify
errs.requireEqual(proto, alt_proto)
errs.requireEqual(
proto_bytes.getvalue(), alt_proto_bytes.getvalue()
)
raise AssertionError
# TODO: test that the traced model also returns the same thing...
run_helper(torch_out, args, remained_onnx_input_idx)
# Factored out so we can avoid one run of the model
def run_helper(torch_out, args, remained_onnx_input_idx):
onnx_input = backend_args(args)
if remained_onnx_input_idx is not None:
input_onnx = []
for idx in remained_onnx_input_idx:
input_onnx.append(onnx_input[idx])
onnx_input = tuple(input_onnx)
backend_out = prepared.run(onnx_input)
if isinstance(torch_out, torch.Tensor):
torch_out = (torch_out,)
torch_out, _ = torch.jit._flatten(torch_out)
# NB: onnx backend NEVER returns bare numpy array
msg = "ONNX backend returned different results from PyTorch"
result_hint = (
"If you are not using trained parameters, a difference in results\n"
"could mean that your network is numerically unstable. Otherwise\n"
"it indicates a bug in PyTorch/ONNX; please file a bug report."
)
with (
Errors(msg, rtol=rtol, atol=atol) as errs,
errs.addErrCtxt(result_hint),
):
for i, (x, y) in enumerate(zip(torch_out, backend_out)):
errs.checkAlmostEqual(x.data.cpu().numpy(), y, f"In output {i}")
run_helper(torch_out, args, remained_onnx_input_idx)
if isinstance(test_args, int):
for _ in range(test_args):
run(randomize_args(args), remained_onnx_input_idx)
else:
for test_arg in test_args:
run(test_arg, remained_onnx_input_idx)
| Errors |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.