language stringclasses 1
value | repo stringclasses 346
values | path stringlengths 6 201 | class_span dict | source stringlengths 21 2.38M | target stringlengths 1 96 |
|---|---|---|---|---|---|
python | sympy__sympy | sympy/vector/orienters.py | {
"start": 622,
"end": 2785
} | class ____(Orienter):
"""
Class to denote an axis orienter.
"""
def __new__(cls, angle, axis):
if not isinstance(axis, sympy.vector.Vector):
raise TypeError("axis should be a Vector")
angle = sympify(angle)
obj = super().__new__(cls, angle, axis)
obj._angle = angle
obj._axis = axis
return obj
def __init__(self, angle, axis):
"""
Axis rotation is a rotation about an arbitrary axis by
some angle. The angle is supplied as a SymPy expr scalar, and
the axis is supplied as a Vector.
Parameters
==========
angle : Expr
The angle by which the new system is to be rotated
axis : Vector
The axis around which the rotation has to be performed
Examples
========
>>> from sympy.vector import CoordSys3D
>>> from sympy import symbols
>>> q1 = symbols('q1')
>>> N = CoordSys3D('N')
>>> from sympy.vector import AxisOrienter
>>> orienter = AxisOrienter(q1, N.i + 2 * N.j)
>>> B = N.orient_new('B', (orienter, ))
"""
# Dummy initializer for docstrings
pass
@cacheit
def rotation_matrix(self, system):
"""
The rotation matrix corresponding to this orienter
instance.
Parameters
==========
system : CoordSys3D
The coordinate system wrt which the rotation matrix
is to be computed
"""
axis = sympy.vector.express(self.axis, system).normalize()
axis = axis.to_matrix(system)
theta = self.angle
parent_orient = ((eye(3) - axis * axis.T) * cos(theta) +
Matrix([[0, -axis[2], axis[1]],
[axis[2], 0, -axis[0]],
[-axis[1], axis[0], 0]]) * sin(theta) +
axis * axis.T)
parent_orient = parent_orient.T
return parent_orient
@property
def angle(self):
return self._angle
@property
def axis(self):
return self._axis
| AxisOrienter |
python | fluentpython__example-code-2e | 15-more-types/petbox/petbox.py | {
"start": 536,
"end": 748
} | class ____(Generic[T_co]):
def __init__(self, contents: Any):
self.contents = contents
def get(self) -> Any:
return self.contents
T_contra = TypeVar('T_contra', contravariant=True)
| OutBox |
python | ipython__ipython | IPython/core/inputtransformer2.py | {
"start": 28498,
"end": 28634
} | class ____(Compile):
def __init__(self, extra_flags=0):
super().__init__()
self.flags |= extra_flags
| MaybeAsyncCompile |
python | kubernetes-client__python | kubernetes/client/models/v1_parent_reference.py | {
"start": 383,
"end": 6219
} | class ____(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'group': 'str',
'name': 'str',
'namespace': 'str',
'resource': 'str'
}
attribute_map = {
'group': 'group',
'name': 'name',
'namespace': 'namespace',
'resource': 'resource'
}
def __init__(self, group=None, name=None, namespace=None, resource=None, local_vars_configuration=None): # noqa: E501
"""V1ParentReference - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._group = None
self._name = None
self._namespace = None
self._resource = None
self.discriminator = None
if group is not None:
self.group = group
self.name = name
if namespace is not None:
self.namespace = namespace
self.resource = resource
@property
def group(self):
"""Gets the group of this V1ParentReference. # noqa: E501
Group is the group of the object being referenced. # noqa: E501
:return: The group of this V1ParentReference. # noqa: E501
:rtype: str
"""
return self._group
@group.setter
def group(self, group):
"""Sets the group of this V1ParentReference.
Group is the group of the object being referenced. # noqa: E501
:param group: The group of this V1ParentReference. # noqa: E501
:type: str
"""
self._group = group
@property
def name(self):
"""Gets the name of this V1ParentReference. # noqa: E501
Name is the name of the object being referenced. # noqa: E501
:return: The name of this V1ParentReference. # noqa: E501
:rtype: str
"""
return self._name
@name.setter
def name(self, name):
"""Sets the name of this V1ParentReference.
Name is the name of the object being referenced. # noqa: E501
:param name: The name of this V1ParentReference. # noqa: E501
:type: str
"""
if self.local_vars_configuration.client_side_validation and name is None: # noqa: E501
raise ValueError("Invalid value for `name`, must not be `None`") # noqa: E501
self._name = name
@property
def namespace(self):
"""Gets the namespace of this V1ParentReference. # noqa: E501
Namespace is the namespace of the object being referenced. # noqa: E501
:return: The namespace of this V1ParentReference. # noqa: E501
:rtype: str
"""
return self._namespace
@namespace.setter
def namespace(self, namespace):
"""Sets the namespace of this V1ParentReference.
Namespace is the namespace of the object being referenced. # noqa: E501
:param namespace: The namespace of this V1ParentReference. # noqa: E501
:type: str
"""
self._namespace = namespace
@property
def resource(self):
"""Gets the resource of this V1ParentReference. # noqa: E501
Resource is the resource of the object being referenced. # noqa: E501
:return: The resource of this V1ParentReference. # noqa: E501
:rtype: str
"""
return self._resource
@resource.setter
def resource(self, resource):
"""Sets the resource of this V1ParentReference.
Resource is the resource of the object being referenced. # noqa: E501
:param resource: The resource of this V1ParentReference. # noqa: E501
:type: str
"""
if self.local_vars_configuration.client_side_validation and resource is None: # noqa: E501
raise ValueError("Invalid value for `resource`, must not be `None`") # noqa: E501
self._resource = resource
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, V1ParentReference):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, V1ParentReference):
return True
return self.to_dict() != other.to_dict()
| V1ParentReference |
python | readthedocs__readthedocs.org | readthedocs/doc_builder/tests/test_environments.py | {
"start": 104,
"end": 1089
} | class ____(TestCase):
def test_command_escape(self):
commands = [
(
["ls", ".", "; touch /tmp/test"],
"/bin/sh -c 'ls . \\;\\ touch\\ /tmp/test'",
),
(
["ls", ".", "\ntouch /tmp/test"],
"/bin/sh -c 'ls . \\\ntouch\\ /tmp/test'",
),
(
["ls", ".", "\ftouch /tmp/test"],
"/bin/sh -c 'ls . \\\ftouch\\ /tmp/test'",
),
(
["ls", ".", "\ttouch /tmp/test"],
"/bin/sh -c 'ls . \\\ttouch\\ /tmp/test'",
),
(
["ls", ".", "\vtouch /tmp/test"],
"/bin/sh -c 'ls . \\\vtouch\\ /tmp/test'",
),
]
for command, expected in commands:
build_command = DockerBuildCommand(command=command)
assert build_command.get_wrapped_command() == expected, command
| TestDockerBuildEnvironment |
python | mlflow__mlflow | dev/clint/src/clint/rules/thread_pool_executor_without_thread_name_prefix.py | {
"start": 84,
"end": 803
} | class ____(Rule):
def _message(self) -> str:
return (
"`ThreadPoolExecutor()` must be called with a `thread_name_prefix` argument to improve "
"debugging and traceability of thread-related issues."
)
@staticmethod
def check(node: ast.Call, resolver: Resolver) -> bool:
"""
Returns True if the call is ThreadPoolExecutor() without a thread_name_prefix parameter.
"""
if names := resolver.resolve(node):
return names == ["concurrent", "futures", "ThreadPoolExecutor"] and not any(
kw.arg == "thread_name_prefix" for kw in node.keywords
)
return False
| ThreadPoolExecutorWithoutThreadNamePrefix |
python | rapidsai__cudf | python/cudf/cudf/core/indexing_utils.py | {
"start": 1141,
"end": 1224
} | class ____:
"""An indexer for a slice."""
key: slice
@dataclass
| SliceIndexer |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-github/source_github/github_schema.py | {
"start": 317797,
"end": 318275
} | class ____(sgqlc.types.Input):
"""Autogenerated input type of UnlockLockable"""
__schema__ = github_schema
__field_names__ = ("lockable_id", "client_mutation_id")
lockable_id = sgqlc.types.Field(sgqlc.types.non_null(ID), graphql_name="lockableId")
"""ID of the item to be unlocked."""
client_mutation_id = sgqlc.types.Field(String, graphql_name="clientMutationId")
"""A unique identifier for the client performing the mutation."""
| UnlockLockableInput |
python | apache__airflow | airflow-core/src/airflow/ti_deps/deps/base_ti_dep.py | {
"start": 1125,
"end": 5291
} | class ____:
"""
Abstract base class for task instances dependencies.
All dependencies must be satisfied in order for task instances to run.
For example, a task that can only run if a certain number of its upstream tasks succeed.
This is an abstract class and must be subclassed to be used.
"""
# If this dependency can be ignored by a context in which it is added to. Needed
# because some dependencies should never be ignorable in their contexts.
IGNORABLE = False
# Whether this dependency is not a global task instance dependency but specific
# to some tasks (e.g. depends_on_past is not specified by all tasks).
IS_TASK_DEP = False
def __eq__(self, other: object) -> bool:
"""Check if two task instance dependencies are equal by comparing their types."""
return isinstance(self, type(other))
def __hash__(self) -> int:
"""Compute the hash value based on the task instance dependency type."""
return hash(type(self))
def __repr__(self) -> str:
"""Return a string representation of the task instance dependency."""
return f"<TIDep({self.name})>"
@property
def name(self) -> str:
"""
The human-readable name for the dependency.
Use the class name as the default if ``NAME`` is not provided.
"""
return getattr(self, "NAME", self.__class__.__name__)
def _get_dep_statuses(
self,
ti: TaskInstance,
session: Session,
dep_context: DepContext,
) -> Iterator[TIDepStatus]:
"""
Abstract method that returns an iterable of TIDepStatus objects.
Each object describes whether the given task instance has this dependency met.
For example a subclass could return an iterable of TIDepStatus objects, each one
representing if each of the passed in task's upstream tasks succeeded or not.
:param ti: the task instance to get the dependency status for
:param session: database session
:param dep_context: the context for which this dependency should be evaluated for
"""
raise NotImplementedError
@provide_session
def get_dep_statuses(
self,
ti: TaskInstance,
session: Session,
dep_context: DepContext | None = None,
) -> Iterator[TIDepStatus]:
"""
Wrap around the private _get_dep_statuses method.
Contains some global checks for all dependencies.
:param ti: the task instance to get the dependency status for
:param session: database session
:param dep_context: the context for which this dependency should be evaluated for
"""
cxt = DepContext() if dep_context is None else dep_context
if self.IGNORABLE and cxt.ignore_all_deps:
yield self._passing_status(reason="Context specified all dependencies should be ignored.")
return
if self.IS_TASK_DEP and cxt.ignore_task_deps:
yield self._passing_status(reason="Context specified all task dependencies should be ignored.")
return
yield from self._get_dep_statuses(ti, session, cxt)
@provide_session
def is_met(self, ti: TaskInstance, session: Session, dep_context: DepContext | None = None) -> bool:
"""
Return whether a dependency is met for a given task instance.
A dependency is considered met if all the dependency statuses it reports
are passing. This is only used in tests.
:param ti: the task instance to see if this dependency is met for
:param session: database session
:param dep_context: The context this dependency is being checked under that stores
state that can be used by this dependency.
:meta private:
"""
return all(status.passed for status in self.get_dep_statuses(ti, session, dep_context))
def _failing_status(self, reason: str = "") -> TIDepStatus:
return TIDepStatus(self.name, False, reason)
def _passing_status(self, reason: str = "") -> TIDepStatus:
return TIDepStatus(self.name, True, reason)
| BaseTIDep |
python | pytorch__pytorch | test/distributed/test_functional_api.py | {
"start": 18461,
"end": 22899
} | class ____(DistributedTestBase):
@with_comms()
def test_all_gather_into_tensor_coalesced(self, device):
exit_if_lt_x_accelerators(self.world_size)
tensors = [
torch.ones([4], device=device),
torch.ones([4], device=device) + 1,
]
mesh = dt.DeviceMesh(device, torch.arange(self.world_size))
res = ft_c.all_gather_into_tensor_coalesced(tensors, mesh)
self.assertEqual(2, len(res))
self.assertEqual(torch.ones([4 * dist.get_world_size()]), res[0])
self.assertEqual(torch.ones([4 * dist.get_world_size()]) + 1, res[1])
@with_comms()
def test_all_to_all_single(self, device):
mesh = dt.DeviceMesh(device, torch.arange(self.world_size))
rank = dist.get_rank()
row = self.world_size * (rank + 1) * (self.world_size + 1) / 2
x = torch.ones(int(row), 5, device=device) * (rank + 1)
split_sizes = [(i + 1) * (rank + 1) for i in range(self.world_size)]
y = ft_c.all_to_all_single(
x, output_split_sizes=split_sizes, input_split_sizes=split_sizes, group=mesh
)
expected = []
for idx, tensor in enumerate(torch.split(x, split_sizes)):
expected.append(torch.full_like(tensor, (idx + 1)))
expected = torch.cat(expected)
self.assertEqual(y, expected)
@with_comms()
def test_all_to_all_single_1d_input(self, device):
mesh = dt.DeviceMesh(device, torch.arange(self.world_size))
rank = dist.get_rank()
row = self.world_size * (rank + 1) * (self.world_size + 1) / 2
x = torch.ones(int(row), device=device) * (rank + 1)
split_sizes = [(i + 1) * (rank + 1) for i in range(self.world_size)]
y = ft_c.all_to_all_single(
x, output_split_sizes=split_sizes, input_split_sizes=split_sizes, group=mesh
)
expected = []
for idx, tensor in enumerate(torch.split(x, split_sizes)):
expected.append(torch.full_like(tensor, (idx + 1)))
expected = torch.cat(expected)
self.assertEqual(y, expected)
@with_comms()
def test_all_to_all_single_split_sizes_none(self, device):
mesh = dt.DeviceMesh(device, torch.arange(self.world_size))
rank = dist.get_rank()
x = torch.ones(self.world_size, self.world_size, device=device) * (rank + 1)
y = ft_c.all_to_all_single(
x, output_split_sizes=None, input_split_sizes=None, group=mesh
)
expected = []
for idx, tensor in enumerate(torch.chunk(x, self.world_size)):
expected.append(torch.full_like(tensor, (idx + 1)))
expected = torch.cat(expected)
self.assertEqual(y, expected)
@unittest.skipIf(not HAS_GPU, "Inductor+gpu needs triton and recent GPU arch")
@requires_accelerator_dist_backend(["nccl", "xccl"])
@with_comms()
def test_tracing(self, device):
def allreduce(t, pg):
return ft_c.all_reduce(t, "sum", pg)
compiled_allreduce = torch.compile(allreduce, fullgraph=True)
compiled_allreduce(torch.randn(8, device=device), self.pg)
@unittest.skipIf(not HAS_GPU, "Inductor+gpu needs triton and recent GPU arch")
def test_tracing_with_fakepg(self, device=DEVICE):
exit_if_lt_x_accelerators(self.world_size)
def allreduce(t, pg):
return ft_c.all_reduce(t, "sum", pg)
compiled_allreduce = torch.compile(allreduce, fullgraph=True) # noqa: F841
dist.init_process_group(
backend="fake",
rank=0,
world_size=8,
)
allreduce(torch.randn(8, device=device), pg=dist.group.WORLD)
dist.destroy_process_group()
@unittest.skipIf(not HAS_GPU, "Inductor+gpu needs triton and recent GPU arch")
@requires_accelerator_dist_backend(["nccl", "xccl"])
@with_comms()
def test_tracing_with_dce_code(self, device):
if self.world_size > 2:
return
def func(batch, group, rank):
ret = ft_c.permute_tensor(batch, [1, 0], group)
if hasattr(ret, "wait"):
ret = ret.wait()
if rank == 0:
return ret
else:
return batch * 5
compiled_func = torch.compile(func)
compiled_func(torch.ones((100,), device=device), self.process_group, self.rank)
dist.barrier()
| TestCollectivesWithDistributedBackend |
python | pydata__xarray | xarray/core/dataarray.py | {
"start": 7091,
"end": 8159
} | class ____(Generic[T_DataArray]):
__slots__ = ("data_array",)
def __init__(self, data_array: T_DataArray):
self.data_array = data_array
def __getitem__(self, key) -> T_DataArray:
if not utils.is_dict_like(key):
# expand the indexer so we can handle Ellipsis
labels = indexing.expanded_indexer(key, self.data_array.ndim)
key = dict(zip(self.data_array.dims, labels, strict=True))
return self.data_array.sel(key)
def __setitem__(self, key, value) -> None:
if not utils.is_dict_like(key):
# expand the indexer so we can handle Ellipsis
labels = indexing.expanded_indexer(key, self.data_array.ndim)
key = dict(zip(self.data_array.dims, labels, strict=True))
dim_indexers = map_index_queries(self.data_array, key).dim_indexers
self.data_array[dim_indexers] = value
# Used as the key corresponding to a DataArray's variable when converting
# arbitrary DataArray objects to datasets
_THIS_ARRAY = ReprObject("<this-array>")
| _LocIndexer |
python | ansible__ansible | test/units/_internal/templating/test_templar.py | {
"start": 4121,
"end": 7553
} | class ____(BaseTemplar, unittest.TestCase):
def test_trust_fail_raises_in_tests(self):
"""Ensure template trust check failures default to fatal for unit tests (set in units/conftest.py)"""
from ansible._internal._templating._engine import TemplateTrustCheckFailedError
assert _TemplateConfig.untrusted_template_handler.action is ErrorAction.ERROR
with pytest.raises(TemplateTrustCheckFailedError):
self.templar.template("{{ i_am_not_trusted }}")
def test_trust_fail_warning_behavior(self):
"""Validate that trust checks are non-fatal when TemplateConfig.untrusted_template_handler is set to `ErrorAction.WARNING`."""
untrusted_template = "{{ i_am_not_trusted }}"
assert hasattr(_TemplateConfig, 'untrusted_template_handler')
with (unittest.mock.patch.object(_TemplateConfig, 'untrusted_template_handler', ErrorHandler(ErrorAction.WARNING)),
unittest.mock.patch.object(Display, 'error_as_warning', return_value=None) as mock_warning):
assert self.templar.template(untrusted_template) is untrusted_template
assert mock_warning.call_count > 0
warning_value = mock_warning.call_args.kwargs['exception']
assert isinstance(warning_value, TemplateTrustCheckFailedError)
assert "Encountered untrusted template or expression" in warning_value.message
assert warning_value.obj == untrusted_template
def test_is_possible_template(self):
"""This test ensures that a broken template still gets templated"""
# Purposefully invalid jinja
self.assertRaises(AnsibleError, self.templar.template, TrustedAsTemplate().tag('{{ foo|default(False)) }}'))
def test_is_template_raw_string(self):
res = self.templar.is_template('foo')
self.assertFalse(res)
def test_is_template_none(self):
res = self.templar.is_template(None)
self.assertFalse(res)
def test_template(self):
res = self.templar.template(TrustedAsTemplate().tag('{{foo}}'))
self.assertTrue(res)
self.assertEqual(res, 'bar')
def test_template_in_data(self):
res = self.templar.template(TrustedAsTemplate().tag('{{bam}}'))
self.assertTrue(res)
self.assertEqual(res, 'bar')
def test_template_bare(self):
res = self.templar.template('bam')
self.assertTrue(res)
self.assertEqual(res, 'bam')
def test_template_to_json(self):
res = self.templar.template(TrustedAsTemplate().tag('{{bam|to_json}}'))
self.assertTrue(res)
self.assertEqual(res, '"bar"')
def test_template_untagged_string(self):
unsafe_obj = "Hello"
res = self.templar.template(unsafe_obj)
assert not TrustedAsTemplate.is_tagged_on(res)
def test_weird(self):
data = TrustedAsTemplate().tag(u"""1 2 #}huh{# %}ddfg{% }}dfdfg{{ {%what%} {{#foo#}} {%{bar}%} {#%blip%#} {{asdfsd%} 3 4 {{foo}} 5 6 7""")
self.assertRaisesRegex(AnsibleError,
'Syntax error in template',
self.templar.template,
data)
def test_template_with_error(self):
"""Check that AnsibleError is raised, fail if an unhandled exception is raised"""
self.assertRaises(AnsibleError, self.templar.template, TrustedAsTemplate().tag("{{ str_with_error }}"))
| TestTemplarTemplate |
python | numba__numba | numba/tests/npyufunc/test_caching.py | {
"start": 3508,
"end": 4680
} | class ____(UfuncCacheTest):
# Note: DUFunc doesn't support parallel target yet
def check_dufunc_usecase(self, usecase_name):
mod = self.import_module()
usecase = getattr(mod, usecase_name)
# Create dufunc
with capture_cache_log() as out:
ufunc = usecase()
self.check_cache_saved(out.getvalue(), count=0)
# Compile & cache
with capture_cache_log() as out:
ufunc(np.arange(10))
self.check_cache_saved(out.getvalue(), count=1)
self.check_cache_loaded(out.getvalue(), count=0)
# Use cached
with capture_cache_log() as out:
ufunc = usecase()
ufunc(np.arange(10))
self.check_cache_loaded(out.getvalue(), count=1)
def test_direct_dufunc_cache(self):
# We don't test for objmode because DUfunc don't support it.
self.check_dufunc_usecase('direct_dufunc_cache_usecase')
def test_indirect_dufunc_cache(self):
self.check_dufunc_usecase('indirect_dufunc_cache_usecase')
def _fix_raw_path(rstr):
if config.IS_WIN32:
rstr = rstr.replace(r'/', r'\\\\')
return rstr
| TestDUfuncCacheTest |
python | huggingface__transformers | tests/models/vitpose/test_modeling_vitpose.py | {
"start": 1392,
"end": 4948
} | class ____:
def __init__(
self,
parent,
batch_size=13,
image_size=[16 * 8, 12 * 8],
patch_size=[8, 8],
num_channels=3,
is_training=True,
use_labels=True,
hidden_size=32,
num_hidden_layers=2,
num_attention_heads=4,
intermediate_size=37,
hidden_act="gelu",
hidden_dropout_prob=0.1,
attention_probs_dropout_prob=0.1,
type_sequence_label_size=10,
initializer_range=0.02,
num_labels=2,
scale_factor=4,
out_indices=[-1],
scope=None,
):
self.parent = parent
self.batch_size = batch_size
self.image_size = image_size
self.patch_size = patch_size
self.num_channels = num_channels
self.is_training = is_training
self.use_labels = use_labels
self.hidden_size = hidden_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.intermediate_size = intermediate_size
self.hidden_act = hidden_act
self.hidden_dropout_prob = hidden_dropout_prob
self.attention_probs_dropout_prob = attention_probs_dropout_prob
self.type_sequence_label_size = type_sequence_label_size
self.initializer_range = initializer_range
self.num_labels = num_labels
self.scale_factor = scale_factor
self.out_indices = out_indices
self.scope = scope
# in VitPose, the seq length equals the number of patches
num_patches = (image_size[0] // patch_size[0]) * (image_size[1] // patch_size[1])
self.seq_length = num_patches
def prepare_config_and_inputs(self):
pixel_values = floats_tensor([self.batch_size, self.num_channels, self.image_size[0], self.image_size[1]])
labels = None
if self.use_labels:
labels = ids_tensor([self.batch_size], self.type_sequence_label_size)
config = self.get_config()
return config, pixel_values, labels
def get_config(self):
return VitPoseConfig(
backbone_config=self.get_backbone_config(),
)
def get_backbone_config(self):
return VitPoseBackboneConfig(
image_size=self.image_size,
patch_size=self.patch_size,
num_channels=self.num_channels,
num_hidden_layers=self.num_hidden_layers,
hidden_size=self.hidden_size,
intermediate_size=self.intermediate_size,
num_attention_heads=self.num_attention_heads,
hidden_act=self.hidden_act,
out_indices=self.out_indices,
)
def create_and_check_for_pose_estimation(self, config, pixel_values, labels):
model = VitPoseForPoseEstimation(config)
model.to(torch_device)
model.eval()
result = model(pixel_values)
expected_height = (self.image_size[0] // self.patch_size[0]) * self.scale_factor
expected_width = (self.image_size[1] // self.patch_size[1]) * self.scale_factor
self.parent.assertEqual(
result.heatmaps.shape, (self.batch_size, self.num_labels, expected_height, expected_width)
)
def prepare_config_and_inputs_for_common(self):
config_and_inputs = self.prepare_config_and_inputs()
(
config,
pixel_values,
labels,
) = config_and_inputs
inputs_dict = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
| VitPoseModelTester |
python | ray-project__ray | python/ray/train/torch/train_loop_utils.py | {
"start": 27880,
"end": 29137
} | class ____(Optimizer):
def __init__(self, optimizer: Optimizer, scaler: Optional[GradScaler] = None):
self.optimizer = optimizer
self.scaler = scaler
@property
def state(self):
return self.optimizer.state
@state.setter
def state(self, state):
self.optimizer.state = state
@property
def param_groups(self):
return self.optimizer.param_groups
@param_groups.setter
def param_groups(self, param_groups):
self.optimizer.param_groups = param_groups
@property
def defaults(self):
return self.optimizer.defaults
@defaults.setter
def defaults(self, defaults):
self.optimizer.defaults = defaults
def add_param_group(self, param_group):
self.optimizer.add_param_group(param_group)
def load_state_dict(self, state_dict):
self.optimizer.load_state_dict(state_dict)
def state_dict(self):
return self.optimizer.state_dict()
def zero_grad(self):
self.optimizer.zero_grad()
def step(self, closure=None):
if self.scaler is not None:
self.scaler.step(self.optimizer, closure)
self.scaler.update()
else:
self.optimizer.step(closure)
| _WrappedOptimizer |
python | aio-libs__aiohttp | aiohttp/helpers.py | {
"start": 20476,
"end": 23207
} | class ____(BaseTimerContext):
"""Low resolution timeout context manager"""
__slots__ = ("_loop", "_tasks", "_cancelled", "_cancelling")
def __init__(self, loop: asyncio.AbstractEventLoop) -> None:
self._loop = loop
self._tasks: list[asyncio.Task[Any]] = []
self._cancelled = False
self._cancelling = 0
def assert_timeout(self) -> None:
"""Raise TimeoutError if timer has already been cancelled."""
if self._cancelled:
raise asyncio.TimeoutError from None
def __enter__(self) -> BaseTimerContext:
task = asyncio.current_task(loop=self._loop)
if task is None:
raise RuntimeError("Timeout context manager should be used inside a task")
if sys.version_info >= (3, 11):
# Remember if the task was already cancelling
# so when we __exit__ we can decide if we should
# raise asyncio.TimeoutError or let the cancellation propagate
self._cancelling = task.cancelling()
if self._cancelled:
raise asyncio.TimeoutError from None
self._tasks.append(task)
return self
def __exit__(
self,
exc_type: type[BaseException] | None,
exc_val: BaseException | None,
exc_tb: TracebackType | None,
) -> bool | None:
enter_task: asyncio.Task[Any] | None = None
if self._tasks:
enter_task = self._tasks.pop()
if exc_type is asyncio.CancelledError and self._cancelled:
assert enter_task is not None
# The timeout was hit, and the task was cancelled
# so we need to uncancel the last task that entered the context manager
# since the cancellation should not leak out of the context manager
if sys.version_info >= (3, 11):
# If the task was already cancelling don't raise
# asyncio.TimeoutError and instead return None
# to allow the cancellation to propagate
if enter_task.uncancel() > self._cancelling:
return None
raise asyncio.TimeoutError from exc_val
return None
def timeout(self) -> None:
if not self._cancelled:
for task in set(self._tasks):
task.cancel()
self._cancelled = True
def ceil_timeout(
delay: float | None, ceil_threshold: float = 5
) -> async_timeout.Timeout:
if delay is None or delay <= 0:
return async_timeout.timeout(None)
loop = asyncio.get_running_loop()
now = loop.time()
when = now + delay
if delay > ceil_threshold:
when = ceil(when)
return async_timeout.timeout_at(when)
| TimerContext |
python | allegroai__clearml | clearml/backend_api/services/v2_20/events.py | {
"start": 95462,
"end": 96389
} | class ____(Request):
"""
get task scalar metrics and variants
:param task: task ID
:type task: str
"""
_service = "events"
_action = "get_scalar_metrics_and_variants"
_version = "2.20"
_schema = {
"definitions": {},
"properties": {"task": {"description": "task ID", "type": "string"}},
"required": ["task"],
"type": "object",
}
def __init__(self, task: str, **kwargs: Any) -> None:
super(GetScalarMetricsAndVariantsRequest, self).__init__(**kwargs)
self.task = task
@schema_property("task")
def task(self) -> str:
return self._property_task
@task.setter
def task(self, value: str) -> None:
if value is None:
self._property_task = None
return
self.assert_isinstance(value, "task", six.string_types)
self._property_task = value
| GetScalarMetricsAndVariantsRequest |
python | scipy__scipy | scipy/linalg/tests/test_special_matrices.py | {
"start": 2214,
"end": 2461
} | class ____:
def test_basic(self):
y = hankel([1, 2, 3])
assert_array_equal(y, [[1, 2, 3], [2, 3, 0], [3, 0, 0]])
y = hankel([1, 2, 3], [3, 4, 5])
assert_array_equal(y, [[1, 2, 3], [2, 3, 4], [3, 4, 5]])
| TestHankel |
python | charliermarsh__ruff | crates/ruff_linter/resources/test/fixtures/flake8_django/DJ007.py | {
"start": 111,
"end": 196
} | class ____(forms.ModelForm):
class Meta:
fields = b"__all__"
| TestModelForm2 |
python | google__jax | jax/experimental/jax2tf/tests/jax2tf_test.py | {
"start": 1739,
"end": 60228
} | class ____(JaxToTfTestCase):
def setUp(self):
super().setUp()
versions = tf.version.VERSION.split(".")
if versions < ["2", "19", "1"]:
# StableHLO changed on March 18th, 2025 ,to version 1.10.0, and this
# introduces ops like vhlo_sine_v2. These ops require a TF version
# released after this date.
self.skipTest("Need version of TensorFlow at least 2.19.1")
# One TF device of each device_type
self.tf_devices = []
for tf_device in (tf.config.list_logical_devices("TPU") +
tf.config.list_logical_devices("GPU") +
tf.config.list_logical_devices()):
if tf_device.device_type == "TPU_SYSTEM":
continue # A virtual device
if all(tf_device.device_type != d.device_type for d in self.tf_devices):
self.tf_devices.append(tf_device)
def test_empty(self):
f_jax = lambda x, y: x
self.ConvertAndCompare(f_jax, 0.7, 1)
def test_sin(self):
f_tf = jax2tf.convert(jnp.sin)
x = np.float32(.5)
sin_x = np.sin(x)
self.assertAllClose(sin_x, f_tf(x))
self.assertAllClose(sin_x, tf.function(f_tf, autograph=False,
jit_compile=True)(x))
tf_preferred_device = (
tf.config.list_logical_devices("TPU")
+ tf.config.list_logical_devices("GPU")
+ tf.config.list_logical_devices()
)[0]
logging.info("Running TF on %s", tf_preferred_device)
# The following, with jit_compile=False, fails with native serialization
# because TF executes the function where it is instantiated (For example,
# XlaCallModule op on CPU). The workaround here is that we can
# wrap it and add device assignment inside the tf.function.
@tf.function(autograph=False, jit_compile=False)
def f_tf_wrapped(x):
with tf.device(tf_preferred_device.name):
return f_tf(x)
with tf.device(tf_preferred_device.name):
self.assertAllClose(sin_x, f_tf_wrapped(x))
def test_basics(self):
f_jax = lambda x: jnp.sin(jnp.cos(x))
self.ConvertAndCompare(f_jax, 0.7)
def test_input_output_naming(self):
@jax2tf.convert
def f(xs, y):
return [jnp.add(x, y) for x in xs]
@tf.function(autograph=False)
def u(xs, y):
xs = tf.nest.map_structure(tf.convert_to_tensor, xs)
with tf.GradientTape() as tape:
tf.nest.map_structure(tape.watch, xs)
y = f(xs, y)
tape.gradient(y, xs)
return y
cf = u.get_concrete_function([1., 2., 3.], 4.)
g = cf.graph
g.get_operation_by_name("jax2tf_arg_0")
g.get_operation_by_name("jax2tf_arg_1")
g.get_operation_by_name("jax2tf_arg_2")
g.get_operation_by_name("jax2tf_arg_3")
g.get_operation_by_name("jax2tf_out")
g.get_operation_by_name("jax2tf_out_1")
g.get_operation_by_name("jax2tf_out_2")
with self.assertRaises(KeyError):
g.get_operation_by_name("jax2tf_arg_4")
with self.assertRaises(KeyError):
g.get_operation_by_name("jax2tf_out_3")
g.get_operation_by_name("jax2tf_vjp/jax2tf_arg_0")
g.get_operation_by_name("jax2tf_vjp/jax2tf_arg_1")
g.get_operation_by_name("jax2tf_vjp/jax2tf_arg_2")
g.get_operation_by_name("jax2tf_vjp/jax2tf_arg_3")
g.get_operation_by_name("jax2tf_vjp/jax2tf_out")
g.get_operation_by_name("jax2tf_vjp/jax2tf_out_1")
g.get_operation_by_name("jax2tf_vjp/jax2tf_out_2")
g.get_operation_by_name("jax2tf_vjp/jax2tf_out_3")
def test_pytrees(self):
# Take and return pytrees
def f_jax(x: tuple[float, dict[str, float]]) -> tuple[float, dict[str, float]]:
x_a, x_dict = x
return x_a * 2., {k: v * 3. for k, v in x_dict.items()}
x = (.7, {"a": .8, "b": .9})
self.ConvertAndCompare(f_jax, x)
def test_variable_input(self):
f_jax = lambda x: jnp.sin(jnp.cos(x))
f_tf = jax2tf.convert(f_jax)
v = tf.Variable(0.7, dtype=jax2tf.dtype_of_val(0.7))
self.assertIsInstance(f_tf(v), tf.Tensor)
self.assertAllClose(f_jax(0.7), f_tf(v))
def test_jit(self):
f_jax = jax.jit(lambda x: jnp.sin(jnp.cos(x)))
self.ConvertAndCompare(f_jax, 0.7)
def test_nested_jit(self):
f_jax = jax.jit(lambda x: jnp.sin(jax.jit(jnp.cos)(x)))
x = 0.7
self.ConvertAndCompare(f_jax, x)
def test_nested_jit_pytree(self):
@jax.jit
def f_jax(xy):
x, y = xy
return x + y
xy = (0.7, 0.8)
self.ConvertAndCompare(f_jax, xy)
def test_nested_jit_is_compiled(self):
# Check that nested jax.jit are compiled with tf.function(jit_compile=True)
# We do this by looking for the _XlaMustCompile attribute in the function graph
def has_xla_must_compile(f_tf, x):
f_conc = tf.function(f_tf, autograph=True).get_concrete_function(tf.convert_to_tensor(x))
for n in f_conc.graph._nodes_by_id.values():
try:
n.get_attr("_XlaMustCompile")
return True
except ValueError:
continue
return False
x = np.array(0.7)
f_no_jit = lambda x: x
self.assertFalse(has_xla_must_compile(jax2tf.convert(f_no_jit), x))
f_jit = lambda x: jax.jit(jnp.sin)(x)
# TODO(b/207464757): TF compilation is disabled
self.assertFalse(has_xla_must_compile(jax2tf.convert(f_jit), x))
def test_converts_jax_arrays(self):
f_tf = tf.function(lambda x: x)
self.assertEqual(f_tf(jnp.zeros([])).numpy(), 0.)
self.assertEqual(f_tf(jnp.ones([])).numpy(), 1.)
f_tf = tf.function(lambda x: x + x)
self.assertEqual(f_tf(jnp.ones([])).numpy(), 2.)
# Test with a PmapSharding-sharded Array.
n = jax.local_device_count()
mk_sharded = lambda f: jax.pmap(lambda x: x)(f([n]))
f_tf = tf.function(lambda x: x)
self.assertAllClose(f_tf(mk_sharded(jnp.zeros)).numpy(),
jnp.zeros([n]))
self.assertAllClose(f_tf(mk_sharded(jnp.ones)).numpy(),
jnp.ones([n]))
@jtu.skip_on_devices("gpu")
def test_bfloat16_passed_by_tf(self):
f_jax = lambda a, b: a + b
f_tf = tf.function(jax2tf.convert(f_jax), autograph=False,
input_signature=[tf.TensorSpec([512, 512], tf.bfloat16),
tf.TensorSpec([512, 512], tf.bfloat16)])
self.assertIsNotNone(f_tf.get_concrete_function())
@jtu.skip_on_devices("gpu")
def test_bfloat16_returned_by_jax(self):
f_jax = lambda a, b: (a + b).astype(jnp.bfloat16)
f_tf = jax2tf.convert(f_jax)
self.assertEqual(f_tf(1., 2.).dtype, tf.bfloat16)
@jtu.skip_on_devices("gpu")
def test_bfloat16_tf_grad(self):
f_jax = lambda a, b: a + b
def _tf_grad(a, b):
with tf.GradientTape() as tape:
tape.watch(a)
result = jax2tf.convert(f_jax)(a, b)
return result, tape.gradient(result, a)
f_tf = tf.function(_tf_grad, autograph=False,
input_signature=[tf.TensorSpec([512, 512], tf.bfloat16),
tf.TensorSpec([512, 512], tf.bfloat16)])
self.assertIsNotNone(f_tf.get_concrete_function())
@jtu.sample_product(
dtype=[np.int64, np.float64],
with_function=[True, False],
)
def test_converts_64bit(self, dtype=np.int64, with_function=False):
if not config.enable_x64.value:
self.skipTest("requires x64 mode")
big_const = np.full((5,), 2 ** 33, dtype=dtype)
self.ConvertAndCompare(jnp.sin, big_const)
f_conv = jax2tf.convert(jnp.sin)
if with_function:
f_conv = tf.function(f_conv, autograph=False)
# We check also when we pass tf.Variable or tf.Tensor into the
# converted function
self.assertAllClose(jnp.sin(big_const),
f_conv(tf.Variable(big_const)))
self.assertAllClose(jnp.sin(big_const),
f_conv(tf.constant(big_const)))
def test_64bit_behavior_enable_x64_readme(self):
# Tests some of the examples from the README
if not config.enable_x64.value:
self.skipTest("requires x64 mode")
# JAX and TF have different default float types if JAX_ENABLE_X64=1
self.assertEqual(tf.math.sin(3.14).dtype, tf.float32)
self.assertEqual(jnp.sin(3.14).dtype, jnp.float64)
# jax2tf.convert has the same behavior as JAX
self.assertEqual(jax2tf.convert(jnp.sin)(3.14).dtype, tf.float64)
# The following will compute `sin` in float64.
self.assertEqual(tf.function(jax2tf.convert(jnp.sin), autograph=False)(tf.Variable(3.14, dtype=tf.float64)).dtype, tf.float64)
# The following will compute `sin` in float32.
self.assertEqual(tf.function(jax2tf.convert(jnp.sin), autograph=False)(tf.Variable(3.14)).dtype, tf.float32)
def test_64bit_behavior_not_enable_x64_readme(self):
# Tests some of the examples from the README
if config.enable_x64.value:
self.skipTest("requires not x64 mode")
# JAX and TF have same default float types if JAX_ENABLE_X64=0
self.assertEqual(tf.math.sin(3.14).dtype, tf.float32)
self.assertEqual(jnp.sin(3.14).dtype, jnp.float32)
self.assertEqual(tf.math.sin(np.float64(3.14)).dtype, tf.float64)
# JAX forces values to 32-bit
self.assertEqual(jnp.sin(np.float64(3.14)).dtype, jnp.float32)
# jax2tf.convert has the same behavior as JAX
self.assertEqual(jax2tf.convert(jnp.sin)(3.14).dtype, tf.float32)
self.assertEqual(jax2tf.convert(jnp.sin)(np.float64(3.14)).dtype, tf.float32)
self.assertEqual(tf.function(jax2tf.convert(jnp.sin), autograph=False)(tf.Variable(3.14, dtype=tf.float64)).dtype, tf.float32)
def test_function(self):
f_jax = jax.jit(lambda x: jnp.sin(jnp.cos(x)))
self.ConvertAndCompare(f_jax, 0.7)
@jtu.sample_product(with_function=[False, True])
def test_gradients_disabled(self, with_function=False):
if tf.version.VERSION.split(".") <= ["2", "17", "0"]:
self.skipTest("This test works only with newer versions of TF")
f_tf = jax2tf.convert(jnp.tan, with_gradient=False)
if with_function:
f_tf = tf.function(f_tf, autograph=False)
x = tf.ones([])
# With tf.function the error is raised when we evaluate f_tf(x), in
# eager mode when we evaluate tape.gradient(y, x)
with self.assertRaisesRegex(LookupError,
"Gradient explicitly disabled.*The jax2tf-converted function does not support gradients"):
with tf.GradientTape() as tape:
tape.watch(x)
y = f_tf(x)
_ = tape.gradient(y, x)
@jtu.sample_product(with_function=[False, True])
def test_gradients(self, with_function=True):
def f(x, y):
return x * x, x * y
f_tf = jax2tf.convert(f, with_gradient=True)
if with_function:
f_tf = tf.function(f_tf, autograph=False)
default_float_type = jax2tf.dtype_of_val(4.)
x = tf.Variable(4., dtype=jax2tf.dtype_of_val(4.))
y = tf.Variable(5., dtype=default_float_type)
with tf.GradientTape(persistent=True) as tape:
u, v = f_tf(x, y)
self.assertAllClose(2. * 4., tape.gradient(u, x))
self.assertAllClose(0., tape.gradient(u, y))
self.assertAllClose(5., tape.gradient(v, x))
self.assertAllClose(4., tape.gradient(v, y))
def test_higher_order_gradients(self):
f = lambda x: x ** 3
f_tf = jax2tf.convert(f)
x = tf.Variable(4.0, dtype=tf.float32) # Create a Tensorflow variable initialized to 4.0
with tf.GradientTape() as t2:
with tf.GradientTape() as t1:
y = f_tf(x)
# Compute the gradient inside the outer `t2` context manager
# which means the gradient computation is differentiable as well.
dy_dx = t1.gradient(y, x)
d2y_dx2 = t2.gradient(dy_dx, x)
self.assertAllClose(np.float32(48.), dy_dx.numpy())
self.assertAllClose(np.float32(24.), d2y_dx2.numpy())
@jtu.sample_product(with_function=[False, True])
def test_gradients_pytree(self, with_function=False):
def f(xy: tuple[float, float]) -> dict[str, float]:
x, y = xy
return dict(one=x * x, two=x * y)
f_tf = jax2tf.convert(f, with_gradient=True)
if with_function:
f_tf = tf.function(f_tf, autograph=False)
default_float_dtype = jax2tf.dtype_of_val(4.)
x = tf.Variable(4., dtype=default_float_dtype)
y = tf.Variable(5., dtype=default_float_dtype)
with tf.GradientTape(persistent=True) as tape:
uv = f_tf((x, y))
self.assertAllClose(2. * 4., tape.gradient(uv["one"], x))
self.assertAllClose(0., tape.gradient(uv["one"], y))
self.assertAllClose(5., tape.gradient(uv["two"], x))
self.assertAllClose(4., tape.gradient(uv["two"], y))
def test_custom_pytree_readme(self):
# Code examples from README.md
class CustomPair:
def __init__(self, a, b):
self.a = a
self.b = b
jax.tree_util.register_pytree_node(CustomPair,
lambda x: ((x.a, x.b), None),
lambda _, ab: CustomPair(*ab))
def f_jax(pair: CustomPair):
return np.float32(2.) * pair.a + np.float32(3.) * pair.b
f_tf = jax2tf.convert(f_jax)
x = CustomPair(np.float32(4.), np.float32(5.))
res_jax = f_jax(x)
# TF execution works as long as JAX can flatten the arguments and results
res_tf = f_tf(x)
self.assertAllClose(res_jax, res_tf.numpy())
res_tf_2 = tf.function(f_tf, autograph=False, jit_compile=True)(x)
self.assertAllClose(res_jax, res_tf_2)
# wrapped TF function to use only standard containers
def f_tf_wrapped(a, b):
return f_tf(CustomPair(a, b))
# Try to put into SavedModel
my_model = tf.Module()
# Save a function that can take scalar inputs.
my_model.f = tf.function(f_tf_wrapped, autograph=False,
input_signature=[tf.TensorSpec([], tf.float32),
tf.TensorSpec([], tf.float32)])
model_dir = os.path.join(absltest.get_default_test_tmpdir(), str(id(my_model)))
tf.saved_model.save(my_model, model_dir,
options=tf.saved_model.SaveOptions(experimental_custom_gradients=True))
# Restoring (note: the restored model does *not* require JAX to run, just XLA).
restored_model = tf.saved_model.load(model_dir)
def restored_f(pair: CustomPair):
return restored_model.f(pair.a, pair.b)
res_tf_3 = restored_f(x)
self.assertAllClose(res_jax, res_tf_3)
grad_jax = jax.grad(f_jax)(x)
x_v = [tf.Variable(x.a), tf.Variable(x.b)]
with tf.GradientTape() as tape:
res = f_tf_wrapped(*x_v)
grad_tf = tape.gradient(res, x_v)
self.assertAllClose(grad_jax.a, grad_tf[0])
self.assertAllClose(grad_jax.b, grad_tf[1])
@jtu.sample_product(with_function=[False, True])
def test_gradients_with_ordered_dict_input(self, with_function=True):
def f(inputs):
out = 0.0
for v in inputs.values():
out += jnp.sum(v)
return out
f_tf = jax2tf.convert(f, with_gradient=True)
if with_function:
f_tf = tf.function(f_tf, autograph=False)
default_float_type = jax2tf.dtype_of_val(4.)
x = tf.Variable([4.], dtype=default_float_type)
y = tf.Variable([4., 5.], dtype=default_float_type)
inputs = collections.OrderedDict()
inputs['r'] = x
inputs['d'] = y
with tf.GradientTape(persistent=True) as tape:
u = f_tf(inputs)
self.assertAllClose(np.array([1.]), tape.gradient(u, x).numpy())
self.assertAllClose(np.array([1., 1.]), tape.gradient(u, y).numpy())
@jtu.sample_product(with_function=[False, True])
def test_gradients_with_custom_jvp(self, with_function=True):
"""Check gradients, for a function with custom JVP."""
@jax.custom_jvp
def f(x):
return x * x
@f.defjvp
def f_jvp(primals, tangents):
# 3 * x * x_t
x, = primals
x_dot, = tangents
primal_out = f(x)
tangent_out = 3. * x * x_dot
return primal_out, tangent_out
self.assertAllClose(4. * 4., f(4.))
self.assertAllClose(3. * 4., jax.grad(f)(4.))
f_tf = jax2tf.convert(f, with_gradient=True)
if with_function:
f_tf = tf.function(f_tf, autograph=False)
self.assertAllClose(4. * 4., f_tf(4.))
x = tf.Variable(4., dtype=jax2tf.dtype_of_val(4.))
with tf.GradientTape() as tape:
tape.watch(x)
y = f_tf(x)
self.assertAllClose(4. * 4., y)
self.assertAllClose(3. * 4., tape.gradient(y, x))
@jtu.sample_product(with_function=[False, True])
def test_gradients_with_custom_vjp(self, with_function=True):
"""Check gradients, for a function with custom VJP."""
@jax.custom_vjp
def f(x):
return x * x
# f_fwd: a -> (b, residual)
def f_fwd(x):
return f(x), 3. * x
# f_bwd: (residual, CT b) -> [CT a]
def f_bwd(residual, ct_b):
return residual * ct_b,
f.defvjp(f_fwd, f_bwd)
self.assertAllClose(4. * 4., f(4.))
self.assertAllClose(3. * 4., jax.grad(f)(4.))
f_tf = jax2tf.convert(f, with_gradient=True)
if with_function:
f_tf = tf.function(f_tf, autograph=False)
self.assertAllClose(4. * 4., f_tf(4.))
x = tf.Variable(4., dtype=jax2tf.dtype_of_val(4.))
with tf.GradientTape() as tape:
tape.watch(x)
y = f_tf(x)
self.assertAllClose(4. * 4., y)
self.assertAllClose(3. * 4., tape.gradient(y, x))
def test_gradient_with_float0_intermediate(self):
# Gradient over integer-argument functions
def f(x, y): # x is an int, y is a float
return 2 * x + y
def g(x): # x: f32
return 2. * f(3 * x.astype("int32"), x * 4.)
x = 2.
grad_g = jax.grad(g)
self.ConvertAndCompare(grad_g, x)
def test_gradient_with_float0_result(self):
# Gradient over integer-argument functions, with float0 result
def f(x, y): # x is an int, y is a float
return 2 * x + y
def g(x): # x: i32
return jnp.sum(2. * f(3 * x, 4. * jnp.array(x, jnp.dtype("float32"))))
grad_g = jax.grad(g, allow_int=True)
x = 2
d_dx_jax = grad_g(x)
d_dx_tf = jax2tf.convert(grad_g)(x)
self.assertEqual(d_dx_jax.dtype, dtypes.float0)
self.assertAllClose(jnp.zeros(np.shape(d_dx_jax), np.bool_),
d_dx_tf.numpy())
shape = (3, 4)
x = np.ones(shape, dtype=np.int32)
d_dx_jax = grad_g(x)
d_dx_tf = jax2tf.convert(grad_g)(x)
self.assertEqual(d_dx_jax.dtype, dtypes.float0)
self.assertAllClose(jnp.zeros(np.shape(d_dx_jax), np.bool_),
d_dx_tf.numpy())
@jtu.sample_product(with_function=[False, True])
def test_gradients_unused_argument_readme(self, with_function=False):
# x1 and x3 are not used. x3 has integer type.
def fn(x0, x1, x2, x3):
return x0 * 0. + x2 * 2.
xs = [tf.Variable(x) for x in [10., 11., 12., 13]]
with tf.GradientTape(persistent=True) as tape:
res = fn(*xs)
g_tf_native = tape.gradient(res, xs)
self.assertAllClose(g_tf_native[0].numpy(), np.float32(0.))
self.assertIsNone(g_tf_native[1])
self.assertAllClose(g_tf_native[2].numpy(), np.float32(2.))
self.assertIsNone(g_tf_native[3])
g_tf_native_0 = tape.gradient(res, xs,
unconnected_gradients=tf.UnconnectedGradients.ZERO)
self.assertAllClose(g_tf_native_0[0].numpy(), np.float32(0.))
self.assertAllClose(g_tf_native_0[1].numpy(), np.float32(0.))
self.assertAllClose(g_tf_native_0[2].numpy(), np.float32(2.))
self.assertAllClose(g_tf_native_0[3].numpy(), np.int32(0))
# Now with jax2tf.convert
with tf.GradientTape(persistent=True) as tape:
conv_fn = jax2tf.convert(fn, with_gradient=True)
if with_function:
conv_fn = tf.function(conv_fn, autograph=False)
res = conv_fn(*xs)
g_jax2tf = tape.gradient(res, xs)
# Returns: 0., 0., 2., None
# Note that the gradient for x1 is 0.
self.assertAllClose(g_jax2tf[0].numpy(), np.float32(0.))
self.assertAllClose(g_jax2tf[1].numpy(), np.float32(0.))
self.assertAllClose(g_jax2tf[2].numpy(), np.float32(2.))
self.assertIsNone(g_jax2tf[3])
g_jax2tf = tape.gradient(res, xs,
unconnected_gradients=tf.UnconnectedGradients.ZERO)
self.assertAllClose(g_jax2tf[0].numpy(), np.float32(0.))
self.assertAllClose(g_jax2tf[1].numpy(), np.float32(0.))
self.assertAllClose(g_jax2tf[2].numpy(), np.float32(2.))
self.assertAllClose(g_jax2tf[3].numpy(), np.int32(0))
@jtu.sample_product(with_function=[False, True])
def test_gradients_int_argument(self, with_function=False):
# https://github.com/jax-ml/jax/issues/6975
# Also issue #6975.
# An expanded version of test_gradients_unused_argument
state = dict(
float_used=np.array([0.7, 0.9], dtype=np.float32),
float_passthrough=np.float16(1.),
float_unused=np.array([1.1, 2.2, 3.3], dtype=np.float32),
int_used=np.int16(5),
int_passthrough=np.int8(7),
int_unused=np.array([1, 2, 3], dtype=np.uint32),
bool_used=np.array([True, False, False, True], dtype=np.bool_),
bool_passthrough=np.array([True, False, False, True, False], dtype=np.bool_),
bool_unused=np.array([[True, False], [False, True]], dtype=np.bool_),
)
def jax_f(state):
res = dict(state,
float_used=2. * state["float_used"],
int_used=3 * state["int_used"],
bool_used=(state["bool_used"] == state["bool_used"]))
del res["float_unused"]
del res["int_unused"]
del res["bool_unused"]
return res
args = (state,)
res_jax = jax_f(*args)
# Native JAX AD
vjp_jax_fun, args_vjp = tf_test_util.TransformJaxVJP(jax_f, args, res_jax)
grad_jax, = vjp_jax_fun(*args_vjp)
def compare_with_overrides(*, what, expected, **expected_overrides):
what_keys = set(what.keys())
expected_keys = set(expected.keys())
self.assertEqual(what_keys, expected_keys)
for k, w in what.items():
e = expected[k]
if k in expected_overrides:
if expected_overrides[k] == "ZERO":
e = np.zeros_like(w)
elif expected_overrides[k] == "ZERO_BOOL":
e = np.zeros(np.shape(w), dtype=np.bool_)
elif expected_overrides[k] == "ONE":
e = np.ones_like(w)
else:
e = expected_overrides[k]
if e is None:
self.assertIsNone(w, msg=k)
else:
self.assertIsNotNone(w, msg=k)
w = w.numpy() if isinstance(w, tf.Tensor) else e
e = e.numpy() if isinstance(e, tf.Tensor) else e
try:
self.assertAllClose(e, w, err_msg=k)
except:
print(f"Failed at {k}")
raise
# compare_with_overrides(g_jax, {},
# bool_passthrough=np.zeros(state["bool_passthrough"].shape, dtype=dtypes.float0),
# bool_unused=np.zeros(state["bool_unused"].shape, dtype=dtypes.float0),
# bool_used=np.zeros(state["bool_used"].shape, dtype=dtypes.float0),
# float_passthrough=np.ones_like(state["float_passthrough"]),
# float_unused=np.zeros_like(state["float_unused"]),
# float_used=np.ones_like(state["float_used"]) * np.array(2., dtype=state["float_used"].dtype),
# int_passthrough=np.zeros(state["int_passthrough"].shape, dtype=dtypes.float0),
# int_unused=np.zeros(state["int_unused"].shape, dtype=dtypes.float0),
# int_used=np.zeros(state["int_used"].shape, dtype=dtypes.float0))
# Now native TF gradients, only to test how native TF AD works
_, (grad_tf_0,) = tf_test_util.ComputeTfValueAndGrad(
jax_f, args, unconnected_gradients=tf.UnconnectedGradients.ZERO)
compare_with_overrides(what=grad_tf_0,
expected=grad_jax,
float_unused="ZERO",
bool_used="ZERO", bool_passthrough="ONE", bool_unused="ZERO",
int_used="ZERO", int_passthrough="ONE", int_unused="ZERO")
_, (grad_tf_None,) = tf_test_util.ComputeTfValueAndGrad(
jax_f, args,
unconnected_gradients=tf.UnconnectedGradients.NONE)
compare_with_overrides(what=grad_tf_None,
expected=grad_tf_0,
float_unused=None, int_used=None, int_unused=None,
bool_used=None, bool_unused=None)
f_tf_jax = jax2tf.convert(jax_f)
if with_function:
f_tf_jax = tf.function(f_tf_jax, autograph=False)
_, (grad_tf_jax_0,) = tf_test_util.ComputeTfValueAndGrad(f_tf_jax, args)
# Same results as TF native AD with tf.UnconnectedGradients.ZERO
compare_with_overrides(what=grad_tf_jax_0,
expected=grad_tf_0,
int_passthrough="ZERO", bool_passthrough="ZERO")
_, (grad_tf_jax_None,) = tf_test_util.ComputeTfValueAndGrad(
f_tf_jax, args,
unconnected_gradients=tf.UnconnectedGradients.NONE)
compare_with_overrides(what=grad_tf_jax_None,
expected=grad_tf_0,
int_used=None, int_passthrough=None, int_unused=None,
bool_unused=None, bool_used=None, bool_passthrough=None)
# Not convert the JAX gradient function
tf_vjp_jax_fun = jax2tf.convert(vjp_jax_fun)
grad_tf_vjp_jax, = tf_vjp_jax_fun(*args_vjp)
compare_with_overrides(what=grad_tf_vjp_jax,
expected=grad_tf_0,
bool_passthrough="ZERO_BOOL",
bool_unused="ZERO_BOOL", bool_used="ZERO_BOOL",
int_passthrough="ZERO_BOOL", int_unused="ZERO_BOOL",
int_used="ZERO_BOOL")
def test_readme_gradient_int(self):
x = np.array(2, dtype=np.int16)
def f_jax(x): # x: int16
return x.astype(np.float32) * 2.
print(jax.grad(f_jax, allow_int=True)(x))
# returns a special `float0`: array((b'',), dtype=[('float0', 'V')])
print(jax2tf.convert(jax.grad(f_jax, allow_int=True))(x))
# returns a 0 with same shape as x, but with dtype int32
def f_tf(x): # x: int16
return tf.cast(x, tf.float32) * 2.
xv = tf.Variable(x)
with tf.GradientTape(persistent=True) as tape:
print(tape.gradient(f_tf(xv), xv))
# returns None
print(tape.gradient(f_tf(xv), xv,
unconnected_gradients=tf.UnconnectedGradients.ZERO))
# returns 0 with the same shape and dtype as x
def test_convert_argument_non_callable_error(self):
with self.assertRaisesRegex(TypeError, "Expected a callable value"):
jax2tf.convert(5.)
def test_convert_argument_non_tensor_error(self):
with self.assertRaisesRegex(TypeError,
"Argument.*is not a valid JAX type"):
jax2tf.convert(lambda x: x)(lambda y: y)
def test_argument_eager_tensor(self):
x = jax2tf.convert(jnp.sin)(1.)
jax2tf.convert(jnp.cos)(x) # No error
def test_checkpoint_wrapper_types(self):
m = tf.Module()
m.a = [tf.Module(), tf.Module()]
m.b = (tf.Module(), tf.Module())
m.c = {'a': tf.Module(), 'b': tf.Module()}
self.assertNotEqual(type(m.a), list)
self.assertNotEqual(type(m.b), tuple)
self.assertNotEqual(type(m.c), dict)
self.assertLen(jax.tree_util.tree_leaves(m.a), 2)
self.assertLen(jax.tree_util.tree_leaves(m.b), 2)
self.assertLen(jax.tree_util.tree_leaves(m.c), 2)
@unittest.skip("Test fails at head")
def test_issue_10586(self):
class JaxModule(tf.Module):
def __init__(self):
self._params = {'w': tf.Variable(tf.ones([784, 10]), name='w'),
'b': tf.Variable(tf.ones([10]), name='b')}
def __call__(self, x):
return jax2tf.convert(lambda p, x: x @ p['w'] + p['b'])(self._params, x)
net = JaxModule()
images = tf.ones([1, 784])
with tf.GradientTape() as tape:
loss = tf.reduce_sum(net(images))
params = tape.watched_variables()
grads = tape.gradient(loss, params)
for var, grad in zip(params, grads):
self.assertEqual(var.shape, grad.shape, msg=var.name)
def test_custom_jvp(self):
"""Conversion of function with custom JVP"""
@jax.custom_jvp
def f(x):
return x * x
@f.defjvp
def f_jvp(primals, tangents):
x, = primals
x_dot, = tangents
primal_out = f(x)
tangent_out = 3. * x * x_dot
return primal_out, tangent_out
arg = 0.7
self.TransformConvertAndCompare(f, arg, None)
self.TransformConvertAndCompare(f, arg, "jvp")
self.TransformConvertAndCompare(f, arg, "vmap")
self.TransformConvertAndCompare(f, arg, "jvp_vmap")
self.TransformConvertAndCompare(f, arg, "grad")
self.TransformConvertAndCompare(f, arg, "grad_vmap")
def test_custom_vjp(self):
"""Conversion of function with custom VJP"""
@jax.custom_vjp
def f(x):
return x * x
# f_fwd: a -> (b, residual)
def f_fwd(x):
return f(x), 3. * x
# f_bwd: (residual, CT b) -> [CT a]
def f_bwd(residual, ct_b):
return residual * ct_b,
f.defvjp(f_fwd, f_bwd)
arg = 0.7
self.TransformConvertAndCompare(f, arg, None)
self.TransformConvertAndCompare(f, arg, "vmap")
self.TransformConvertAndCompare(f, arg, "grad")
self.TransformConvertAndCompare(f, arg, "grad_vmap")
def test_remat(self):
def f(x1):
x2 = jnp.sin(x1)
x3 = jnp.sin(x2)
x4 = jnp.sin(x3)
return x4
remat_f = ad_checkpoint.checkpoint(f)
# The computation of grad_f computes "sin" 5 times, 3 for the forward pass
# and then to rematerialize "x2" and "x3" in the backward pass.
arg = np.array(3.)
f_tf = jax2tf.convert(jax.grad(remat_f))
f_tf_hlo = self.TfToHlo(f_tf, arg)
self.assertRegex(f_tf_hlo, r"opt-barrier")
def test_remat_free_var(self):
def f(x):
y = 2 * x
@ad_checkpoint.checkpoint
def g():
return y
return g()
arg = 3.
self.TransformConvertAndCompare(f, arg, None)
self.TransformConvertAndCompare(f, arg, "grad")
def test_checkpoint_name(self):
def f_jax(x):
return ad_checkpoint.checkpoint_name(jnp.sin(x), "sin")
jax2tf.convert(f_jax)(1.) # No error.
def test_convert_of_nested_independent_jit(self):
def func(x):
def inner1(y):
return x + y
# The JIT does not have data dependency
return jax.jit(inner1)(1.)
jax2tf.convert(func)(2.)
def test_convert_of_nested_dependent_jit(self):
def func(x):
def inner1(y):
return x + y
# The JIT does have data dependency
return jax.jit(inner1)(x)
jax2tf.convert(func)(2.) # No error
def test_jit_unused(self):
def f_jax(x, y_unused):
return x * np.float32(2.)
x, y_unused = np.float32(5.), np.arange(7, dtype=np.int32)
res_tf = jax2tf.convert(jax.jit(f_jax, keep_unused=False))(x, y_unused)
self.assertAllClose(f_jax(x, None), res_tf)
@parameterized.named_parameters(
dict(testcase_name=mode, mode=mode)
for mode in ("eager", "graph", "compiled"))
def test_jit_unused_grad(self, mode="eager"):
def f_jax(x, y_unused):
return x * np.float32(2.)
x, y_unused = np.float32(5.), np.arange(7, dtype=np.int32)
res_jax = f_jax(x, y_unused)
f_tf = jax2tf.convert(jax.jit(f_jax, keep_unused=False))
x_tf, y_unused_tf = tf.constant(x), tf.constant(y_unused)
def grad_tf(x, y_unused):
with tf.GradientTape() as tape:
tape.watch(x)
tape.watch(y_unused)
res_tf = f_tf(x, y_unused)
grad_tf_x, grad_tf_y = tape.gradient(res_tf, (x, y_unused))
return res_tf, grad_tf_x, grad_tf_y
if mode == "graph":
grad_tf = tf.function(grad_tf, autograph=False)
elif mode == "compiled":
grad_tf = tf.function(grad_tf, autograph=False, jit_compile=True)
res_tf, grad_tf_x, grad_tf_y = grad_tf(x_tf, y_unused_tf)
self.assertAllClose(res_jax, res_tf)
self.assertAllClose(np.float32(2.), grad_tf_x)
self.assertIsNone(grad_tf_y)
def test_nested_convert_error(self):
def outer(y):
return jax2tf.convert(jnp.sin)(y) # Inner convert takes tracer args
with self.assertRaisesRegex(
ValueError, "convert must be used outside all JAX transformations"):
jax2tf.convert(outer)(np.ones((4,), dtype=np.float32))
def test_nested_convert_error_non_tracer(self):
"""The inner convert takes non-tracer arguments"""
def outer(y):
sin_1 = jax2tf.convert(jnp.sin)(1.) # Inner convert takes non-tracer arg
return y + sin_1
with self.assertRaisesRegex(
ValueError, "convert must be used outside all JAX transformations"):
jax2tf.convert(outer)(2.)
@jtu.sample_product(transform=["jit", "jvp", "grad", "vmap"])
def test_convert_under_transform_error(self, transform="vmap"):
def outer(y):
return jax2tf.convert(jnp.sin)(y) # Inner convert takes tracer args
with self.assertRaisesRegex(
ValueError, "convert must be used outside all JAX transformations"):
self.TransformConvertAndCompare(outer, np.ones((4,)), transform)
@jtu.sample_product(transform=["jit", "jvp", "grad", "vmap"])
def test_convert_under_transform_error_non_tracer(self, transform="vmap"):
def outer(y):
sin_1 = jax2tf.convert(jnp.sin)(1.) # Inner convert takes non-tracer arg
return y + sin_1
with self.assertRaisesRegex(
ValueError, "convert must be used outside all JAX transformations"):
self.TransformConvertAndCompare(outer, np.ones((4,)), transform)
def test_name_scope(self):
def run_tf():
@jax.named_call
def my_test_function_jax(x):
return x * x
def caller_jax(x):
return my_test_function_jax(jnp.sin(x))
out = jax2tf.convert(caller_jax, with_gradient=False)(2.)
return out
self.assertIn("my_test_function_jax/mul", self.TfToHlo(run_tf))
def test_bfloat16_constant(self):
# Re: https://github.com/jax-ml/jax/issues/3942
def jax_fn_scalar(x):
x = x.astype(jnp.bfloat16)
x *= 2.
return x
def jax_fn_array(x):
x = x.astype(jnp.bfloat16)
x *= np.array([1.5, 2.5, 3.5], jnp.bfloat16)
return x
tf_fn_scalar = jax2tf.convert(jax_fn_scalar)
self.assertAllClose(tf_fn_scalar(1.375).numpy(), jnp.bfloat16(2.750))
tf_fn_array = jax2tf.convert(jax_fn_array)
self.assertAllClose(
tf_fn_array(np.array([3, 4, 5])), np.array([4.5, 10, 17.5],
jnp.bfloat16))
def test_weak_types(self):
mul = jax.jit(jnp.multiply)
# The value `2` here should be weakly typed, and should not lead to
# promotion.
tf_fn = jax2tf.convert(lambda x: mul(x, 2.))
self.assertAllClose(tf_fn(tf.constant(1.375, tf.bfloat16)).numpy(),
jnp.bfloat16(2.750))
@jtu.sample_product(with_function=[False, True])
def test_kwargs(self, with_function=False):
# Re: https://github.com/jax-ml/jax/issues/6791
def f_jax(*, x):
return jnp.sum(x)
f_tf = jax2tf.convert(f_jax)
if with_function:
f_tf = tf.function(f_tf, autograph=False)
self.assertAllClose(
f_tf(x=np.zeros(3, dtype=np.float32)), # Call with kwargs.
np.zeros((), dtype=np.float32))
@jtu.sample_product(with_function=[False, True])
def test_grad_kwargs(self, with_function=False):
# Re: https://github.com/jax-ml/jax/issues/6791
x = (np.zeros(3, dtype=np.float32),
np.zeros(4, dtype=np.float32))
def f_jax(*, x=(1., 2.)):
return jnp.sum(x[0]) + 2. * jnp.sum(x[1])
f_tf = jax2tf.convert(f_jax)
if with_function:
f_tf = tf.function(f_tf, autograph=False)
xv = tf.nest.map_structure(tf.Variable, x)
with tf.GradientTape() as tape:
res = f_tf(x=xv)
grad_tf = tape.gradient(res, xv)
self.assertAllClose((np.full_like(x[0], fill_value=1.),
np.full_like(x[1], fill_value=2.)),
(grad_tf[0].numpy(), grad_tf[1].numpy()))
def test_device_array_arg(self):
self.ConvertAndCompare(jnp.sin, jnp.zeros((2, 3), jnp.float32))
def test_randint(self):
def randint():
return jax.random.randint(
jax.random.PRNGKey(42), shape=(), minval=0, maxval=1)
self.ConvertAndCompare(randint)
def test_op_metadata_simple(self):
self.skipTest("include_xla_op_metadata not yet enabled")
# A simple example
# The user_frame is used to compute line numbers for ops in the test.
user_frame = source_info_util.user_frame(
source_info_util.current().traceback)
def f_simple(x):
return jnp.sin(x)
x = np.ones((2, 3), np.float32)
self.CheckOpMetadata(
f_simple, x,
[tf_test_util.OpMetadataGraph(tf_type="Sin",
source_file=__file__,
source_line=user_frame.start_line + 2,
op_name="jax2tf(f_simple)/sin",
op_type="sin")
]
)
def test_op_metadata_sub_jit(self):
self.skipTest("include_xla_op_metadata not yet enabled")
# Calling a jitted-function
# The user_frame is used to compute line numbers for ops in the test.
user_frame = source_info_util.user_frame(
source_info_util.current().traceback)
def f_callee(x):
return jnp.cos(x)
def f_caller(x):
y = jnp.tanh(x)
z = jax.jit(f_callee)(y)
return jnp.sin(z)
x = np.ones((2, 3), np.float32)
self.CheckOpMetadata(
f_caller, x,
[tf_test_util.OpMetadataGraph(tf_type="Tanh",
source_file=__file__,
source_line=user_frame.start_line + 4,
op_name="jax2tf(f_caller)/tanh",
op_type="tanh"),
tf_test_util.OpMetadataGraph(tf_type="Cos",
source_file=__file__,
source_line=user_frame.start_line + 2,
op_name="jax2tf(f_caller)/jit(f_callee)/cos",
op_type="cos"),
tf_test_util.OpMetadataGraph(tf_type="Sin",
source_file=__file__,
source_line=user_frame.start_line + 6,
op_name="jax2tf(f_caller)/sin",
op_type="sin"),
]
)
def test_op_metadata_named(self):
self.skipTest("include_xla_op_metadata not yet enabled")
# Calling a jax.named_call
# The user_frame is used to compute line numbers for ops in the test.
user_frame = source_info_util.user_frame(
source_info_util.current().traceback)
def f_callee(x):
return jnp.cos(x)
def f_caller(x):
y = jnp.tanh(x)
z = jax.named_call(f_callee, name="callee")(y)
return jnp.sin(z)
x = np.ones((2, 3), np.float32)
self.CheckOpMetadata(
f_caller, x,
[tf_test_util.OpMetadataGraph(tf_type="Tanh",
source_file=__file__,
source_line=user_frame.start_line + 4,
op_name="jax2tf(f_caller)/tanh",
op_type="tanh"),
tf_test_util.OpMetadataGraph(tf_type="Cos",
source_file=__file__,
source_line=user_frame.start_line + 2,
op_name="jax2tf(f_caller)/named(callee)/cos",
op_type="cos"),
tf_test_util.OpMetadataGraph(tf_type="Sin",
source_file=__file__,
source_line=user_frame.start_line + 6,
op_name="jax2tf(f_caller)/sin",
op_type="sin"),
]
)
def test_op_metadata_while_and_cond(self):
self.skipTest("include_xla_op_metadata not yet enabled")
# An example with while and cond
# The user_frame is used to compute line numbers for ops in the test.
user_frame = source_info_util.user_frame(
source_info_util.current().traceback)
def f_while_cond(x):
def body_fun(i_acc):
i, acc = i_acc
return (i + 1,
(jnp.cos(acc) +
lax.cond(jnp.mod(i, 2) == 0,
lambda acc: jnp.sin(acc),
lambda acc: acc,
acc)))
_, acc = lax.while_loop(
lambda i_acc: i_acc[0] <= 5,
body_fun, (0, x))
return acc
x = np.ones((2, 3), np.float32)
self.CheckOpMetadata(
f_while_cond, x,
[tf_test_util.OpMetadataGraph(tf_type="Cos",
source_file=__file__,
source_line=user_frame.start_line + 5,
op_name="jax2tf(f_while_cond)/while/body/cos",
op_type="cos"),
tf_test_util.OpMetadataGraph(tf_type="Sin",
source_file=__file__,
source_line=user_frame.start_line + 7,
op_name="jax2tf(f_while_cond)/while/body/branch_1_fun/sin",
op_type="sin"),
tf_test_util.OpMetadataGraph(tf_type="FloorMod",
source_file=__file__,
source_line=user_frame.start_line + 6,
op_name="jax2tf(f_while_cond)/while/body/rem",
op_type="rem"),
]
)
def test_op_metadata_batched_while(self):
self.skipTest("include_xla_op_metadata not yet enabled")
# An example with while and cond
# The user_frame is used to compute line numbers for ops in the test.
user_frame = source_info_util.user_frame(
source_info_util.current().traceback)
@jax.vmap
def f_while(x):
def body_fun(carry):
new_carry = jnp.sin(carry) # We look for "sin" in the graph
return new_carry
_, carry = lax.while_loop(
lambda carry: jnp.all(carry <= x), # We look for "le" in the graph
body_fun, x)
return carry
shape = (3, 2)
x = np.arange(math.prod(shape), dtype=np.float32).reshape(shape)
jax_comp = jax.jit(f_while).lower(x).compiler_ir('hlo')
backend = xb.get_backend()
modules = backend.compile(jax_comp).hlo_modules()
jax_opt_hlo = modules[0].to_string()
print(f"JAX OPT HLO = {jax_opt_hlo}")
self.CheckOpMetadata(
f_while, x,
[tf_test_util.OpMetadataGraph(tf_type="Sin",
source_file=__file__,
source_line=user_frame.start_line + 4,
op_name="jax2tf(f_while)/while/body/sin",
op_type="sin"),
tf_test_util.OpMetadataGraph(tf_type="LessEqual",
source_file=__file__,
source_line=user_frame.start_line + 8,
op_name="jax2tf(f_while)/while/body_pred/le",
op_type="le"),
]
)
def test_op_metadata_disabled(self):
self.skipTest("include_xla_op_metadata not yet enabled")
def f_simple(x):
return jnp.sin(x)
x = np.ones((2, 3), np.float32)
self.CheckOpMetadata(
f_simple, x,
[],
include_xla_op_metadata=False
)
def assertAllOperationStartWith(self, g: "tf.Graph", scope_name: str):
"""Assert all operations name start with ```scope_name```.
Also the scope_name only occur one time.
"""
result = g.get_operations()
if not result:
self.fail("result is empty.")
for op in result:
logging.info("tf op.name = %s", op.name)
if not op.name.startswith(scope_name):
self.fail(f"{op.name} does not start with {scope_name}.")
def test_name_scope_polymorphic(self):
if not config.dynamic_shapes.value:
self.skipTest("shape polymorphism but --jax_dynamic_shapes is not set.")
def func_jax(x, y):
return jnp.sin(x) + jnp.cos(y)
func_tf = jax2tf.convert(
func_jax, polymorphic_shapes="(b,...)", with_gradient=True)
outer_scope = "output_a"
g = tf.Graph()
with g.as_default() as g:
with tf.name_scope(outer_scope):
x = tf.Variable(
tf.zeros(shape=(1, 5), dtype=tf.dtypes.float32), name="x")
y = tf.compat.v1.placeholder(tf.dtypes.float32, (None, 5), "y")
_ = func_tf(x, y)
self.assertAllOperationStartWith(g, outer_scope)
# wrap tf.function
g2 = tf.Graph()
with g2.as_default() as g:
with tf.name_scope(outer_scope):
x = tf.Variable(
tf.zeros(shape=(1, 5), dtype=tf.dtypes.float32), name="x")
y = tf.compat.v1.placeholder(tf.dtypes.float32, (None, 5), "y")
_ = tf.function(func_tf, jit_compile=True, autograph=False)(x, y)
self.assertAllOperationStartWith(g2, outer_scope)
def test_name_scope_cond(self):
def f(x):
def f_pos(x):
with jax.named_scope("jax_f_pos"):
return lax.cond(x < 1., jnp.cos, jnp.sin, x)
with jax.named_scope("jax_f_outer"):
return lax.cond(x > 0., f_pos, lambda x: x, x)
@tf.function(jit_compile=True, autograph=False)
def outer_forward():
with tf.name_scope("tf_outer_forward"):
x = 0.5
f_tf = jax2tf.convert(f)
_ = f_tf(x)
g = outer_forward.get_concrete_function().graph
self.assertAllOperationStartWith(g, "tf_outer_forward")
for func in g._functions.values():
self.assertAllOperationStartWith(
func.graph, "tf_outer_forward/jax2tf_f_/jax_f_outer")
x = tf.Variable(0.5, name="tf_outer_back/x")
@tf.function(jit_compile=True, autograph=False)
def outer_back():
with tf.name_scope("tf_outer_back"):
f_tf = jax2tf.convert(f)
with tf.GradientTape() as tape:
res_tf = f_tf(x)
_ = tape.gradient(res_tf, x)
g = outer_back.get_concrete_function().graph
self.assertAllOperationStartWith(g, "tf_outer_back")
for func in g._functions.values():
self.assertAllOperationStartWith(func.graph, "tf_outer_back")
def test_name_scope_while_loop(self):
def f(x):
with tf.name_scope("outer_scope"):
def condition(x):
return jnp.sum(x, keepdims=False) < 100
def body(x):
return jnp.add(x, 2.0)
result = jax.lax.while_loop(condition, body, x)
return result
tf_f = tf.function(jax2tf.convert(f), jit_compile=True, autograph=False)
g = tf_f.get_concrete_function(tf.zeros((1, 3))).graph
for func in g._functions.values():
for op in func.graph.get_operations():
if op.name.count(f"outer_scope/jax2tf_{f.__name__}_/while") > 1:
self.fail(
"tf graph has repeated name issue on when converting lax.while to tf.while."
f"See op.name = : {op.name}")
@parameterized.named_parameters(
dict(testcase_name=(
f"2={transform2 if transform2 != 'none' else ''}"
f"_1={transform1 if transform1 != 'none' else ''}"
f"{'_nullary' if nullary else ''}"),
transform1=transform1, transform2=transform2, nullary=nullary)
# Test transform2(transform1(func)
for transform1 in [
"none",
"jit", "jit_in_shardings_None",
"jit_in_shardings_Sharding", "shard_map", "pmap"]
for transform2 in (
["none", "jit_in_shardings_None",
"jit_in_shardings_Sharding"]
)
# Whether the function can be nullary
for nullary in (
# To reduce the number of tests
[True, False] if transform2 == "none" else
[False])
)
def test_cross_platform(self,
transform1="jit_in_shardings_P",
transform2="jit_in_shardings_P", nullary=False):
# Tests cross-lowering for transform2(transform1(func))
if transform2 == "none" and (
transform1 == "shard_map" or
transform1 in ["jit_in_shardings_P", "jit_in_shardings_Sharding"] and nullary):
raise unittest.SkipTest("Skip because must have jit at top level")
x = np.ones((4, 6), dtype=np.float32)
mesh = sharding.Mesh(jax.devices()[:1], ("a",))
# cummax has distinctive lowering for TPU, using a reduce-window op
func = lambda x: lax.cummax(x, axis=0, reverse=False)
# For shard_map we cannot use cummax :-( because it does not have a
# replication rule. But we use lax.all_gather which on TPU is lowered with
# an all-gather op
func_shard_map = lambda x: lax.all_gather(x, "a", axis=1, tiled=True)
def apply_transform(func, transform: str):
transformed_func = dict(
none=func,
jit=jax.jit(func),
jit_in_shardings_None=jax.jit(func, in_shardings=None),
jit_in_shardings_Sharding=jax.jit(
func,
in_shardings=(sharding.NamedSharding(mesh, P("a")),),
out_shardings=sharding.NamedSharding(mesh, P("a"))),
shard_map=(
shard_map(func, mesh=mesh, in_specs=(P("a", None),),
out_specs=P("a", None))),
pmap=jax.pmap(func, in_axes=0, out_axes=0),
)[transform]
return transformed_func
transformed1_func = apply_transform(
(func_shard_map if transform1 == "shard_map" else func),
transform1)
assert transform2 not in ["shard_map"]
transformed2_func = apply_transform(transformed1_func, transform2)
if transform1 == "pmap":
x = x.reshape((1, -1)) # Since we use 1 device
if not nullary:
func_to_convert = transformed2_func
args = [x]
else:
func_to_convert = lambda: transformed2_func(jnp.ones(x.shape,
dtype=x.dtype))
args = []
if transform1 == "pmap":
if nullary:
raise unittest.SkipTest("Cannot lower nested pmap: jit-of-pmap warning")
raise unittest.SkipTest("TODO: figure out how to invoke pmap from TF")
# Run the JAX native version, to check it works, and to fill caches.
_ = func_to_convert(*args)
exported = export.export(
(jax.jit(func_to_convert) if not hasattr(func_to_convert, "trace") else func_to_convert),
platforms=("tpu",)
)(*(core.ShapedArray(a.shape, a.dtype) for a in args))
if transform1 == "shard_map":
self.assertIn("stablehlo.all_gather", str(exported.mlir_module()))
else:
self.assertIn("stablehlo.reduce_window", str(exported.mlir_module()))
def test_cross_platform_error(self):
f_tf = jax2tf.convert(jnp.sin,
native_serialization_platforms=('tpu',))
x = np.float32(.5)
if jtu.test_device_matches(["tpu"]):
self.assertAllClose(jnp.sin(x), f_tf(x))
else:
# We can construct the tf.Graph
f_tf_fun = tf.function(f_tf, jit_compile=True, autograph=False)
graph_def = f_tf_fun.get_concrete_function(x).graph.as_graph_def()
self.assertIn("XlaCallModule", str(graph_def))
with self.assertRaisesRegex(tf.errors.NotFoundError,
"The current platform .* is not among the platforms required by the module"):
f_tf(x)
def test_native_parameters_for_non_native(self):
# We can use the native_serialization_platforms even for non-native
# serialization.
f_tf = jax2tf.convert(jnp.sin,
native_serialization_platforms=('cpu',))
x = np.float32(.5)
# Run the TF code on CPU
tf_cpus = tf.config.list_logical_devices("CPU")
self.assertNotEmpty(tf_cpus)
with tf.device(tf_cpus[0]):
self.assertAllClose(jnp.sin(x), f_tf(x))
f_tf = jax2tf.convert(jnp.sin,
native_serialization_disabled_checks=(
jax2tf.DisabledSafetyCheck.platform(),))
self.assertAllClose(jnp.sin(x), f_tf(x))
def test_native_serialization_grad(self):
# Check that the grad function uses the same native serialization parameters
# as the primal function.
f_tf = jax2tf.convert(jnp.sin,
native_serialization_platforms=('tpu',))
x = np.arange(4, dtype=np.float32)
x_v = tf.Variable(x)
@tf.function(autograph=False)
def f_grad_tf(x_v):
with tf.GradientTape() as tape:
tape.watch(x_v)
res_tf = f_tf(x_v)
return tape.gradient(res_tf, x_v)
# Make sure that we have 2x XlaCallModule in the graph of the gradient
# function
f_grad_tf_fun = tf.function(f_grad_tf, autograph=False)
graph_def = f_grad_tf_fun.get_concrete_function(x).graph.as_graph_def()
logging.info("Found graph_def: %s", graph_def)
self.assertLen(re.findall(r'op:\s*"XlaCallModule"', str(graph_def)), 2)
if not jtu.test_device_matches(["tpu"]):
with self.assertRaisesRegex(
tf.errors.NotFoundError,
r"The current platform .* is not among the platforms required by the module: \[TPU\]"):
f_grad_tf(x_v)
def test_effects_error(self):
def f_jax(x):
jax.debug.print("{}", x)
return jnp.sin(x)
with self.assertRaisesRegex(NotImplementedError,
"serialization of host_callbacks is not yet implemented"):
jax2tf.convert(f_jax)(np.float32(42.))
def f_ordered_jax(x):
jax.debug.print("{}", x, ordered=True)
return jnp.sin(x)
with self.assertRaisesRegex(NotImplementedError,
"serialization of host_callbacks is not yet implemented"):
jax2tf.convert(f_ordered_jax)(np.float32(42.))
def test_tuple_args(self):
# On TPU if we have more than 2000 arguments, we pass them as a tuple.
# This is a compiler option, and should have no effect on lowering.
if not jtu.test_device_matches(["tpu"]):
raise unittest.SkipTest("Test enabled on TPU only")
def f_jax(*many_args):
acc = 0.
for a in many_args:
acc += a
return acc
many_args = [np.float32(i) for i in range(2001)]
# Test that we do set lowered.compile_args[tuple_args]
lowered = jax.jit(f_jax).lower(*many_args)
self.assertTrue(lowered._lowering.compile_args["tuple_args"])
res = jax2tf.convert(f_jax)(*many_args)
self.assertAllClose(f_jax(*many_args), res)
def test_nested_convert(self):
# Test call sequence: convert -> call_tf -> convert.
@jax.jit
def f_jax(x):
return x + 1
inputs = np.ones((10), dtype=np.float32)
res = f_jax(inputs)
f_tf = jax2tf.convert(f_jax)
self.assertAllClose(res, f_tf(inputs))
f_jax_nested = jax2tf.call_tf(f_tf)
self.assertAllClose(res, f_jax_nested(inputs))
f_tf_nested = jax2tf.convert(f_jax_nested)
self.assertAllClose(res, f_tf_nested(inputs))
def test_multi_platform(self):
if config.enable_x64.value:
self.skipTest("TODO: enable when we can handle i64 platform_index_argument")
# Checks that we dispatch from TF to the proper JAX platform lowering.
# We add a different value to it: cpu=2., tpu=3., cuda=.4, rocm=5.
_testing_multi_platform_to_add = dict(cpu=2., tpu=3., cuda=4., rocm=5.)
def f_jax(x):
return x + lax.platform_dependent(
tpu=lambda: _testing_multi_platform_to_add["tpu"],
cuda=lambda: _testing_multi_platform_to_add["cuda"],
rocm=lambda: _testing_multi_platform_to_add["rocm"],
default=lambda: _testing_multi_platform_to_add["cpu"]
)
x = np.float32(.42)
f_tf = jax2tf.convert(
f_jax,
native_serialization_platforms=("cpu", "cuda", "tpu"))
for tf_device in self.tf_devices:
logging.info(
f"Running on tf_device = {tf_device} of device_type = {tf_device.device_type}")
with tf.device(tf_device):
res = f_tf(x)
tf_device_jax_platform = dict(
CPU="cpu", GPU="cuda", TPU="tpu"
)[tf_device.device_type]
self.assertAllClose(
res,
x + _testing_multi_platform_to_add[tf_device_jax_platform])
def test_dot_algorithm(self):
# ref: https://github.com/jax-ml/jax/issues/24236
if tf.version.VERSION.split(".") <= ["2", "18", "0"]:
self.skipTest("Because of an XLA bug this test segfaults with TF v2.18.0")
if jtu.test_device_matches(["tpu"]):
algorithm = "BF16_BF16_F32"
else:
algorithm = "F32_F32_F32"
def f_jax(x):
return jax.lax.dot(x, x, precision=algorithm)
f_tf = jax2tf.convert(f_jax)
f_tf(np.ones((128, 128), dtype=np.float32)) # no crash
def test_jvp_through_loop(self):
# Context: b/388929258
num_actions = 512
def tf_preprocessor(features):
features["num_c_actions"] = tf.constant(256, tf.int32)
return features
def postprocessor(prob, features):
actions = jnp.arange(num_actions, dtype=jnp.int32)
r = actions // features["num_c_actions"]
c = actions - r * features["num_c_actions"]
rr = jnp.array([0.12, 0.3])[r] * prob
rc = (jnp.arange(256) * 0.7)[c] * prob
return rr, rc
def loop_step(features, params):
features = jax2tf.call_tf(tf_preprocessor)(features)
odds = features["f1"] @ params["w1"] + features["f2"] @ params["w2"]
prob = jax.nn.sigmoid(odds)
rr, rc = postprocessor(prob, features)
new_f1 = jnp.mean(rr, keepdims=True)
new_f2 = jnp.mean(rc, keepdims=True)
return new_f1, new_f2
def loop(init_features, params):
def body(carry, unused_x):
f1, f2 = carry
return loop_step({"f1": f1, "f2": f2}, params), None
(rr, rc), _ = jax.lax.scan(
body, (init_features["f1"], init_features["f2"]), length=10
)
return rr, rc
def loss(features, params):
rr, rc = loop(features, params)
return jnp.mean((rr - rc) ** 2)
jax.grad(loss, argnums=(1,))(
{"f1": jnp.array([0.5]), "f2": jnp.array([0.7])},
{
"w1": jnp.ones((1, num_actions)) * 0.01,
"w2": jnp.ones((1, num_actions)) * 0.01,
},
)
@unittest.skipIf(tf is None, "Test requires tensorflow")
@jtu.with_config(jax_enable_custom_prng=True)
@jtu.thread_unsafe_test_class()
| Jax2TfTest |
python | django-haystack__django-haystack | test_haystack/test_indexes.py | {
"start": 4714,
"end": 22295
} | class ____(TestCase):
fixtures = ["base_data"]
def setUp(self):
super().setUp()
self.sb = connections["default"].get_backend()
self.mi = GoodMockSearchIndex()
self.cmi = GoodCustomMockSearchIndex()
self.cnmi = GoodNullableMockSearchIndex()
self.gfmsi = GoodFacetedMockSearchIndex()
# Fake the unified index.
self.old_unified_index = connections["default"]._index
self.ui = UnifiedIndex()
self.ui.build(indexes=[self.mi])
connections["default"]._index = self.ui
self.sample_docs = {
"core.mockmodel.1": {
"text": "Indexed!\n1\n",
"django_id": "1",
"django_ct": "core.mockmodel",
"extra": "Stored!\n1",
"author": "daniel1",
"pub_date": datetime.datetime(2009, 3, 17, 6, 0),
"id": "core.mockmodel.1",
},
"core.mockmodel.2": {
"text": "Indexed!\n2\n",
"django_id": "2",
"django_ct": "core.mockmodel",
"extra": "Stored!\n2",
"author": "daniel2",
"pub_date": datetime.datetime(2009, 3, 17, 7, 0),
"id": "core.mockmodel.2",
},
"core.mockmodel.3": {
"text": "Indexed!\n3\n",
"django_id": "3",
"django_ct": "core.mockmodel",
"extra": "Stored!\n3",
"author": "daniel3",
"pub_date": datetime.datetime(2009, 3, 17, 8, 0),
"id": "core.mockmodel.3",
},
}
def tearDown(self):
connections["default"]._index = self.old_unified_index
super().tearDown()
def test_no_contentfield_present(self):
self.assertRaises(SearchFieldError, BadSearchIndex1)
def test_too_many_contentfields_present(self):
self.assertRaises(SearchFieldError, BadSearchIndex2)
def test_contentfield_present(self):
try:
mi = GoodMockSearchIndex()
except:
self.fail()
def test_proper_fields(self):
self.assertEqual(len(self.mi.fields), 4)
self.assertTrue("text" in self.mi.fields)
self.assertTrue(isinstance(self.mi.fields["text"], indexes.CharField))
self.assertTrue("author" in self.mi.fields)
self.assertTrue(isinstance(self.mi.fields["author"], indexes.CharField))
self.assertTrue("pub_date" in self.mi.fields)
self.assertTrue(isinstance(self.mi.fields["pub_date"], indexes.DateTimeField))
self.assertTrue("extra" in self.mi.fields)
self.assertTrue(isinstance(self.mi.fields["extra"], indexes.CharField))
self.assertEqual(len(self.cmi.fields), 7)
self.assertTrue("text" in self.cmi.fields)
self.assertTrue(isinstance(self.cmi.fields["text"], indexes.CharField))
self.assertTrue("author" in self.cmi.fields)
self.assertTrue(isinstance(self.cmi.fields["author"], indexes.CharField))
self.assertTrue("author_exact" in self.cmi.fields)
self.assertTrue(
isinstance(self.cmi.fields["author_exact"], indexes.FacetCharField)
)
self.assertTrue("pub_date" in self.cmi.fields)
self.assertTrue(isinstance(self.cmi.fields["pub_date"], indexes.DateTimeField))
self.assertTrue("pub_date_exact" in self.cmi.fields)
self.assertTrue(
isinstance(self.cmi.fields["pub_date_exact"], indexes.FacetDateTimeField)
)
self.assertTrue("extra" in self.cmi.fields)
self.assertTrue(isinstance(self.cmi.fields["extra"], indexes.CharField))
self.assertTrue("hello" in self.cmi.fields)
self.assertTrue(isinstance(self.cmi.fields["extra"], indexes.CharField))
def test_index_queryset(self):
self.assertEqual(self.cmi.index_queryset().count(), 3)
def test_read_queryset(self):
self.assertEqual(self.cmi.read_queryset().count(), 2)
def test_build_queryset(self):
# The custom SearchIndex.build_queryset returns the same records as
# the read_queryset
self.assertEqual(self.cmi.build_queryset().count(), 2)
# Store a reference to the original method
old_guf = self.mi.__class__.get_updated_field
self.mi.__class__.get_updated_field = lambda self: "pub_date"
# With an updated field, we should get have filtered results
sd = datetime.datetime(2009, 3, 17, 7, 0)
self.assertEqual(len(self.mi.build_queryset(start_date=sd)), 2)
ed = datetime.datetime(2009, 3, 17, 7, 59)
self.assertEqual(len(self.mi.build_queryset(end_date=ed)), 2)
sd = datetime.datetime(2009, 3, 17, 6, 0)
ed = datetime.datetime(2009, 3, 17, 6, 59)
self.assertEqual(len(self.mi.build_queryset(start_date=sd, end_date=ed)), 1)
# Remove the updated field for the next test
del self.mi.__class__.get_updated_field
# The default should return all 3 even if we specify a start date
# because there is no updated field specified
self.assertEqual(len(self.mi.build_queryset(start_date=sd)), 3)
# Restore the original attribute
self.mi.__class__.get_updated_field = old_guf
def test_prepare(self):
mock = MockModel()
mock.pk = 20
mock.author = "daniel%s" % mock.id
mock.pub_date = datetime.datetime(2009, 1, 31, 4, 19, 0)
self.assertEqual(len(self.mi.prepare(mock)), 7)
self.assertEqual(
sorted(self.mi.prepare(mock).keys()),
["author", "django_ct", "django_id", "extra", "id", "pub_date", "text"],
)
def test_custom_prepare(self):
mock = MockModel()
mock.pk = 20
mock.author = "daniel%s" % mock.id
mock.pub_date = datetime.datetime(2009, 1, 31, 4, 19, 0)
self.assertEqual(len(self.cmi.prepare(mock)), 11)
self.assertEqual(
sorted(self.cmi.prepare(mock).keys()),
[
"author",
"author_exact",
"django_ct",
"django_id",
"extra",
"hello",
"id",
"pub_date",
"pub_date_exact",
"text",
"whee",
],
)
self.assertEqual(len(self.cmi.full_prepare(mock)), 11)
self.assertEqual(
sorted(self.cmi.full_prepare(mock).keys()),
[
"author",
"author_exact",
"django_ct",
"django_id",
"extra",
"hello",
"id",
"pub_date",
"pub_date_exact",
"text",
"whee",
],
)
def test_thread_safety(self):
# This is a regression. ``SearchIndex`` used to write to
# ``self.prepared_data``, which would leak between threads if things
# went too fast.
exceptions = []
def threaded_prepare(index_queue, index, model):
try:
index.queue = index_queue
prepped = index.prepare(model)
except Exception as e:
exceptions.append(e)
raise
class ThreadedSearchIndex(GoodMockSearchIndex):
def prepare_author(self, obj):
if obj.pk == 20:
time.sleep(0.1)
else:
time.sleep(0.5)
index_queue.put(self.prepared_data["author"])
return self.prepared_data["author"]
tmi = ThreadedSearchIndex()
index_queue = queue.Queue()
mock_1 = MockModel()
mock_1.pk = 20
mock_1.author = "foo"
mock_1.pub_date = datetime.datetime(2009, 1, 31, 4, 19, 0)
mock_2 = MockModel()
mock_2.pk = 21
mock_2.author = "daniel%s" % mock_2.id
mock_2.pub_date = datetime.datetime(2009, 1, 31, 4, 19, 0)
th1 = Thread(target=threaded_prepare, args=(index_queue, tmi, mock_1))
th2 = Thread(target=threaded_prepare, args=(index_queue, tmi, mock_2))
th1.start()
th2.start()
th1.join()
th2.join()
mock_1_result = index_queue.get()
mock_2_result = index_queue.get()
self.assertEqual(mock_1_result, "foo")
self.assertEqual(mock_2_result, "daniel21")
def test_custom_prepare_author(self):
mock = MockModel()
mock.pk = 20
mock.author = "daniel%s" % mock.id
mock.pub_date = datetime.datetime(2009, 1, 31, 4, 19, 0)
self.assertEqual(len(self.cmi.prepare(mock)), 11)
self.assertEqual(
sorted(self.cmi.prepare(mock).keys()),
[
"author",
"author_exact",
"django_ct",
"django_id",
"extra",
"hello",
"id",
"pub_date",
"pub_date_exact",
"text",
"whee",
],
)
self.assertEqual(len(self.cmi.full_prepare(mock)), 11)
self.assertEqual(
sorted(self.cmi.full_prepare(mock).keys()),
[
"author",
"author_exact",
"django_ct",
"django_id",
"extra",
"hello",
"id",
"pub_date",
"pub_date_exact",
"text",
"whee",
],
)
self.assertEqual(self.cmi.prepared_data["author"], "Hi, I'm daniel20")
self.assertEqual(self.cmi.prepared_data["author_exact"], "Hi, I'm daniel20")
def test_custom_model_attr(self):
mock = MockModel()
mock.pk = 20
mock.author = "daniel%s" % mock.id
mock.pub_date = datetime.datetime(2009, 1, 31, 4, 19, 0)
self.assertEqual(len(self.cmi.prepare(mock)), 11)
self.assertEqual(
sorted(self.cmi.prepare(mock).keys()),
[
"author",
"author_exact",
"django_ct",
"django_id",
"extra",
"hello",
"id",
"pub_date",
"pub_date_exact",
"text",
"whee",
],
)
self.assertEqual(len(self.cmi.full_prepare(mock)), 11)
self.assertEqual(
sorted(self.cmi.full_prepare(mock).keys()),
[
"author",
"author_exact",
"django_ct",
"django_id",
"extra",
"hello",
"id",
"pub_date",
"pub_date_exact",
"text",
"whee",
],
)
self.assertEqual(self.cmi.prepared_data["hello"], "World!")
def test_custom_index_fieldname(self):
mock = MockModel()
mock.pk = 20
mock.author = "daniel%s" % mock.id
mock.pub_date = datetime.datetime(2009, 1, 31, 4, 19, 0)
cofnmi = GoodOverriddenFieldNameMockSearchIndex()
self.assertEqual(len(cofnmi.prepare(mock)), 6)
self.assertEqual(
sorted(cofnmi.prepare(mock).keys()),
["django_ct", "django_id", "hello", "id", "more_content", "name_s"],
)
self.assertEqual(cofnmi.prepared_data["name_s"], "daniel20")
self.assertEqual(cofnmi.get_content_field(), "more_content")
def test_get_content_field(self):
self.assertEqual(self.mi.get_content_field(), "text")
def test_update(self):
self.sb.clear()
self.assertEqual(self.sb.search("*")["hits"], 0)
self.mi.update()
self.assertEqual(self.sb.search("*")["hits"], 3)
self.sb.clear()
def test_update_object(self):
self.sb.clear()
self.assertEqual(self.sb.search("*")["hits"], 0)
mock = MockModel()
mock.pk = 20
mock.author = "daniel%s" % mock.id
mock.pub_date = datetime.datetime(2009, 1, 31, 4, 19, 0)
self.mi.update_object(mock)
self.assertEqual(
[(res.content_type(), res.pk) for res in self.sb.search("*")["results"]],
[("core.mockmodel", "20")],
)
self.sb.clear()
def test_remove_object(self):
self.mi.update()
self.assertEqual(self.sb.search("*")["hits"], 3)
mock = MockModel()
mock.pk = 20
mock.author = "daniel%s" % mock.id
mock.pub_date = datetime.datetime(2009, 1, 31, 4, 19, 0)
self.mi.update_object(mock)
self.assertEqual(self.sb.search("*")["hits"], 4)
self.mi.remove_object(mock)
self.assertEqual(
[(res.content_type(), res.pk) for res in self.sb.search("*")["results"]],
[("core.mockmodel", "1"), ("core.mockmodel", "2"), ("core.mockmodel", "3")],
)
# Put it back so we can test passing kwargs.
mock = MockModel()
mock.pk = 20
mock.author = "daniel%s" % mock.id
mock.pub_date = datetime.datetime(2009, 1, 31, 4, 19, 0)
self.mi.update_object(mock)
self.assertEqual(self.sb.search("*")["hits"], 4)
self.mi.remove_object(mock, commit=False)
self.assertEqual(
[(res.content_type(), res.pk) for res in self.sb.search("*")["results"]],
[
("core.mockmodel", "1"),
("core.mockmodel", "2"),
("core.mockmodel", "3"),
("core.mockmodel", "20"),
],
)
self.sb.clear()
def test_clear(self):
self.mi.update()
self.assertGreater(self.sb.search("*")["hits"], 0)
self.mi.clear()
self.assertEqual(self.sb.search("*")["hits"], 0)
def test_reindex(self):
self.mi.reindex()
self.assertEqual(
[(res.content_type(), res.pk) for res in self.sb.search("*")["results"]],
[("core.mockmodel", "1"), ("core.mockmodel", "2"), ("core.mockmodel", "3")],
)
self.sb.clear()
def test_inheritance(self):
try:
agmi = AltGoodMockSearchIndex()
except:
self.fail()
self.assertEqual(len(agmi.fields), 5)
self.assertTrue("text" in agmi.fields)
self.assertTrue(isinstance(agmi.fields["text"], indexes.CharField))
self.assertTrue("author" in agmi.fields)
self.assertTrue(isinstance(agmi.fields["author"], indexes.CharField))
self.assertTrue("pub_date" in agmi.fields)
self.assertTrue(isinstance(agmi.fields["pub_date"], indexes.DateTimeField))
self.assertTrue("extra" in agmi.fields)
self.assertTrue(isinstance(agmi.fields["extra"], indexes.CharField))
self.assertTrue("additional" in agmi.fields)
self.assertTrue(isinstance(agmi.fields["additional"], indexes.CharField))
def test_proper_field_resolution(self):
mrofsc = MROFieldsSearchChild()
mock = MockModel()
mock.pk = 20
mock.author = "daniel%s" % mock.id
mock.pub_date = datetime.datetime(2009, 1, 31, 4, 19, 0)
mock.test_a = "This is A"
mock.test_b = "This is B"
self.assertEqual(len(mrofsc.fields), 1)
prepped_data = mrofsc.prepare(mock)
self.assertEqual(len(prepped_data), 4)
self.assertEqual(prepped_data["text"], "This is A")
def test_load_all_queryset(self):
self.assertEqual([obj.id for obj in self.cmi.load_all_queryset()], [2, 3])
def test_nullable(self):
mock = MockModel()
mock.pk = 20
mock.author = None
mock.pub_date = datetime.datetime(2009, 1, 31, 4, 19, 0)
prepared_data = self.cnmi.prepare(mock)
self.assertEqual(len(prepared_data), 6)
self.assertEqual(
sorted(prepared_data.keys()),
["author", "author_exact", "django_ct", "django_id", "id", "text"],
)
prepared_data = self.cnmi.full_prepare(mock)
self.assertEqual(len(prepared_data), 4)
self.assertEqual(
sorted(prepared_data.keys()), ["django_ct", "django_id", "id", "text"]
)
def test_custom_facet_fields(self):
mock = MockModel()
mock.pk = 20
mock.author = "daniel"
mock.pub_date = datetime.datetime(2009, 1, 31, 4, 19, 0)
prepared_data = self.gfmsi.prepare(mock)
self.assertEqual(len(prepared_data), 8)
self.assertEqual(
sorted(prepared_data.keys()),
[
"author",
"author_foo",
"django_ct",
"django_id",
"id",
"pub_date",
"pub_date_exact",
"text",
],
)
prepared_data = self.gfmsi.full_prepare(mock)
self.assertEqual(len(prepared_data), 8)
self.assertEqual(
sorted(prepared_data.keys()),
[
"author",
"author_foo",
"django_ct",
"django_id",
"id",
"pub_date",
"pub_date_exact",
"text",
],
)
self.assertEqual(prepared_data["author_foo"], "Hi, I'm daniel")
self.assertEqual(prepared_data["pub_date_exact"], "2010-10-26T01:54:32")
| SearchIndexTestCase |
python | django__django | django/contrib/messages/middleware.py | {
"start": 148,
"end": 986
} | class ____(MiddlewareMixin):
"""
Middleware that handles temporary messages.
"""
def process_request(self, request):
request._messages = default_storage(request)
def process_response(self, request, response):
"""
Update the storage backend (i.e., save the messages).
Raise ValueError if not all messages could be stored and DEBUG is True.
"""
# A higher middleware layer may return a request which does not contain
# messages storage, so make no assumption that it will be there.
if hasattr(request, "_messages"):
unstored_messages = request._messages.update(response)
if unstored_messages and settings.DEBUG:
raise ValueError("Not all temporary messages could be stored.")
return response
| MessageMiddleware |
python | neetcode-gh__leetcode | python/0295-find-median-from-data-stream.py | {
"start": 0,
"end": 1020
} | class ____:
def __init__(self):
"""
initialize your data structure here.
"""
# two heaps, large, small, minheap, maxheap
# heaps should be equal size
self.small, self.large = [], [] # maxHeap, minHeap (python default)
def addNum(self, num: int) -> None:
if self.large and num > self.large[0]:
heapq.heappush(self.large, num)
else:
heapq.heappush(self.small, -1 * num)
if len(self.small) > len(self.large) + 1:
val = -1 * heapq.heappop(self.small)
heapq.heappush(self.large, val)
if len(self.large) > len(self.small) + 1:
val = heapq.heappop(self.large)
heapq.heappush(self.small, -1 * val)
def findMedian(self) -> float:
if len(self.small) > len(self.large):
return -1 * self.small[0]
elif len(self.large) > len(self.small):
return self.large[0]
return (-1 * self.small[0] + self.large[0]) / 2.0
| MedianFinder |
python | scipy__scipy | scipy/optimize/tests/test_constraints.py | {
"start": 6178,
"end": 8016
} | class ____:
def test_repr(self):
# so that eval works
from numpy import array, inf # noqa: F401
for args in (
(-1.0, 5.0),
(-1.0, np.inf, True),
(np.array([1.0, -np.inf]), np.array([2.0, np.inf])),
(np.array([1.0, -np.inf]), np.array([2.0, np.inf]),
np.array([True, False])),
):
bounds = Bounds(*args)
bounds2 = eval(repr(Bounds(*args)))
assert_array_equal(bounds.lb, bounds2.lb)
assert_array_equal(bounds.ub, bounds2.ub)
assert_array_equal(bounds.keep_feasible, bounds2.keep_feasible)
def test_array(self):
# gh13501
b = Bounds(lb=[0.0, 0.0], ub=[1.0, 1.0])
assert isinstance(b.lb, np.ndarray)
assert isinstance(b.ub, np.ndarray)
def test_defaults(self):
b1 = Bounds()
b2 = Bounds(np.asarray(-np.inf), np.asarray(np.inf))
assert b1.lb == b2.lb
assert b1.ub == b2.ub
def test_input_validation(self):
message = "Lower and upper bounds must be dense arrays."
with pytest.raises(ValueError, match=message):
Bounds(sps.coo_array([1, 2]), [1, 2])
with pytest.raises(ValueError, match=message):
Bounds([1, 2], sps.coo_array([1, 2]))
message = "`keep_feasible` must be a dense array."
with pytest.raises(ValueError, match=message):
Bounds([1, 2], [1, 2], keep_feasible=sps.coo_array([True, True]))
message = "`lb`, `ub`, and `keep_feasible` must be broadcastable."
with pytest.raises(ValueError, match=message):
Bounds([1, 2], [1, 2, 3])
def test_residual(self):
bounds = Bounds(-2, 4)
x0 = [-1, 2]
np.testing.assert_allclose(bounds.residual(x0), ([1, 4], [5, 2]))
| TestBounds |
python | doocs__leetcode | solution/0400-0499/0473.Matchsticks to Square/Solution2.py | {
"start": 0,
"end": 618
} | class ____:
def makesquare(self, matchsticks: List[int]) -> bool:
@cache
def dfs(state, t):
if state == (1 << len(matchsticks)) - 1:
return True
for i, v in enumerate(matchsticks):
if state & (1 << i):
continue
if t + v > s:
break
if dfs(state | (1 << i), (t + v) % s):
return True
return False
s, mod = divmod(sum(matchsticks), 4)
matchsticks.sort()
if mod:
return False
return dfs(0, 0)
| Solution |
python | doocs__leetcode | solution/0000-0099/0056.Merge Intervals/Solution2.py | {
"start": 0,
"end": 324
} | class ____:
def merge(self, intervals: List[List[int]]) -> List[List[int]]:
intervals.sort()
ans = [intervals[0]]
for s, e in intervals[1:]:
if ans[-1][1] < s:
ans.append([s, e])
else:
ans[-1][1] = max(ans[-1][1], e)
return ans
| Solution |
python | gevent__gevent | src/gevent/_fileobjectcommon.py | {
"start": 16136,
"end": 19872
} | class ____(object):
"""
Internal base class to ensure a level of consistency
between :class:`~.FileObjectPosix`, :class:`~.FileObjectThread`
and :class:`~.FileObjectBlock`.
"""
# List of methods we delegate to the wrapping IO object, if they
# implement them and we do not.
_delegate_methods = (
# General methods
'flush',
'fileno',
'writable',
'readable',
'seek',
'seekable',
'tell',
# Read
'read',
'readline',
'readlines',
'read1',
'readinto',
# Write.
# Note that we do not extend WriteallMixin,
# so writeall will be copied, if it exists, and
# wrapped.
'write',
'writeall',
'writelines',
'truncate',
)
_io = None
def __init__(self, descriptor):
# type: (OpenDescriptor) -> None
self._io = descriptor.opened()
# We don't actually use this property ourself, but we save it (and
# pass it along) for compatibility.
self._close = descriptor.closefd
self._do_delegate_methods()
io = property(lambda s: s._io,
# Historically we either hand-wrote all the delegation methods
# to use self.io, or we simply used __getattr__ to look them up at
# runtime. This meant people could change the io attribute on the fly
# and it would mostly work (subprocess.py used to do that). We don't recommend
# that, but we still support it.
lambda s, nv: setattr(s, '_io', nv) or s._do_delegate_methods())
def _do_delegate_methods(self):
for meth_name in self._delegate_methods:
meth = getattr(self._io, meth_name, None)
implemented_by_class = hasattr(type(self), meth_name)
if meth and not implemented_by_class:
setattr(self, meth_name, self._wrap_method(meth))
elif hasattr(self, meth_name) and not implemented_by_class:
delattr(self, meth_name)
def _wrap_method(self, method):
"""
Wrap a method we're copying into our dictionary from the underlying
io object to do something special or different, if necessary.
"""
return method
@property
def closed(self):
"""True if the file is closed"""
return isinstance(self._io, _ClosedIO)
def close(self):
if isinstance(self._io, _ClosedIO):
return
fobj = self._io
self._io = _ClosedIO(self._io)
try:
self._do_close(fobj, self._close)
finally:
fobj = None
# Remove delegate methods to drop remaining references to
# _io.
d = self.__dict__
for meth_name in self._delegate_methods:
d.pop(meth_name, None)
def _do_close(self, fobj, closefd):
raise NotImplementedError()
def __getattr__(self, name):
return getattr(self._io, name)
def __repr__(self):
return '<%s at 0x%x %s_fobj=%r%s>' % (
self.__class__.__name__,
id(self),
'closed' if self.closed else '',
self.io,
self._extra_repr()
)
def _extra_repr(self):
return ''
def __enter__(self):
return self
def __exit__(self, *args):
self.close()
def __iter__(self):
return self
def __next__(self):
line = self.readline()
if not line:
raise StopIteration
return line
next = __next__
def __bool__(self):
return True
__nonzero__ = __bool__
| FileObjectBase |
python | ethereum__web3.py | tests/core/subscriptions/test_subscription_manager.py | {
"start": 603,
"end": 17866
} | class ____(PersistentConnectionProvider):
socket_recv = AsyncMock()
socket_send = AsyncMock()
@pytest_asyncio.fixture
async def subscription_manager():
countr = itertools.count()
w3 = AsyncWeb3(MockProvider())
w3.eth._subscribe = AsyncMock()
w3.eth._subscribe.side_effect = lambda *_: f"0x{str(next(countr))}"
w3.eth._unsubscribe = AsyncMock()
w3.eth._unsubscribe.return_value = True
yield w3.subscription_manager
def create_subscription_message(sub_id):
return cast(
RPCResponse,
{
"jsonrpc": "2.0",
"method": "eth_subscription",
"params": {"subscription": sub_id, "result": "0x0"},
},
)
@pytest.mark.asyncio
async def test_subscription_default_labels_are_unique(subscription_manager):
sub1 = NewHeadsSubscription()
sub2 = NewHeadsSubscription()
sub3 = NewHeadsSubscription()
sub4 = NewHeadsSubscription()
sub_ids = await subscription_manager.subscribe([sub1, sub2, sub3, sub4])
assert sub_ids == ["0x0", "0x1", "0x2", "0x3"]
assert sub1.label != sub2.label != sub3.label != sub4.label
assert sub1.label == "NewHeadsSubscription('newHeads',)"
assert sub2.label == "NewHeadsSubscription('newHeads',)#2"
assert sub3.label == "NewHeadsSubscription('newHeads',)#3"
assert sub4.label == "NewHeadsSubscription('newHeads',)#4"
# assert no issues unsubscribing
await subscription_manager.unsubscribe_all()
assert subscription_manager.subscriptions == []
assert subscription_manager._subscription_container.subscriptions == []
assert subscription_manager._subscription_container.subscriptions_by_id == {}
assert subscription_manager._subscription_container.subscriptions_by_label == {}
@pytest.mark.asyncio
async def test_subscription_manager_raises_for_new_subs_with_the_same_custom_label(
subscription_manager,
):
sub1 = NewHeadsSubscription(label="foo")
sub2 = LogsSubscription(label="foo")
with pytest.raises(
Web3ValueError,
match="Subscription label already exists. Subscriptions must have unique "
"labels.\n label: foo",
):
await subscription_manager.subscribe([sub1, sub2])
# make sure the subscription was subscribed to and not added to the manager
assert subscription_manager.subscriptions == [sub1]
sub_container = subscription_manager._subscription_container
assert len(sub_container) == 1
assert sub_container.subscriptions == [sub1]
assert sub_container.subscriptions_by_id == {"0x0": sub1}
assert sub_container.subscriptions_by_label == {"foo": sub1}
@pytest.mark.asyncio
async def test_subscription_manager_get_by_id(subscription_manager):
sub = NewHeadsSubscription(label="foo")
await subscription_manager.subscribe(sub)
assert subscription_manager.get_by_id("0x0") == sub
assert subscription_manager.get_by_id("0x1") is None
@pytest.mark.asyncio
async def test_subscription_manager_get_by_label(subscription_manager):
sub = NewHeadsSubscription(label="foo")
await subscription_manager.subscribe(sub)
assert subscription_manager.get_by_label("foo") == sub
assert subscription_manager.get_by_label("bar") is None
@pytest.mark.asyncio
async def test_unsubscribe_one_by_one_clears_all_subscriptions(
subscription_manager,
):
sub1 = NewHeadsSubscription(label="foo")
sub2 = PendingTxSubscription(label="bar")
await subscription_manager.subscribe(sub1)
await subscription_manager.subscribe(sub2)
await subscription_manager.unsubscribe(sub1)
assert subscription_manager.subscriptions == [sub2]
await subscription_manager.unsubscribe(sub2)
assert subscription_manager.subscriptions == []
@pytest.mark.asyncio
async def test_unsubscribe_all_clears_all_subscriptions(subscription_manager):
sub1 = NewHeadsSubscription(label="foo")
sub2 = PendingTxSubscription(label="bar")
await subscription_manager.subscribe([sub1, sub2])
assert subscription_manager.subscriptions == [sub1, sub2]
await subscription_manager.unsubscribe_all()
assert subscription_manager.subscriptions == []
sub_container = subscription_manager._subscription_container
assert len(sub_container) == 0
assert sub_container.subscriptions == []
assert sub_container.subscriptions_by_id == {}
assert sub_container.subscriptions_by_label == {}
@pytest.mark.asyncio
async def test_unsubscribe_with_one_or_multiple(subscription_manager):
sub1 = NewHeadsSubscription()
sub2 = PendingTxSubscription()
sub3 = NewHeadsSubscription()
sub4 = LogsSubscription()
sub5 = NewHeadsSubscription()
sub6 = PendingTxSubscription()
(
sub_id1,
sub_id2,
sub_id3,
sub_id4,
sub_id5,
sub_id6,
) = await subscription_manager.subscribe([sub1, sub2, sub3, sub4, sub5, sub6])
assert sub_id1 == "0x0"
assert sub_id2 == "0x1"
assert sub_id3 == "0x2"
assert sub_id4 == "0x3"
assert sub_id5 == "0x4"
assert sub_id6 == "0x5"
assert subscription_manager.subscriptions == [sub1, sub2, sub3, sub4, sub5, sub6]
# unsubscribe single by hex id
assert await subscription_manager.unsubscribe(sub_id1) is True
assert subscription_manager.subscriptions == [sub2, sub3, sub4, sub5, sub6]
# unsubscribe many by hex id
assert await subscription_manager.unsubscribe([sub_id2, sub_id3]) is True
assert subscription_manager.subscriptions == [sub4, sub5, sub6]
# unsubscribe non-existent hex id
with pytest.raises(Web3ValueError, match=f"Subscription not found|{0x7}"):
await subscription_manager.unsubscribe(HexStr("0x7"))
# unsubscribe by subscription object
assert await subscription_manager.unsubscribe(sub4) is True
assert subscription_manager.subscriptions == [sub5, sub6]
# unsubscribe many by subscription object
assert await subscription_manager.unsubscribe([sub5, sub6]) is True
assert subscription_manager.subscriptions == []
# unsubscribe non-existent subscription object
with pytest.raises(Web3ValueError, match=f"Subscription not found|{sub5.id}"):
await subscription_manager.unsubscribe(sub5)
@pytest.mark.asyncio
async def test_unsubscribe_with_subscriptions_reference_does_not_mutate_the_list(
subscription_manager,
):
sub1 = NewHeadsSubscription()
sub2 = LogsSubscription()
sub3 = PendingTxSubscription()
sub4 = NewHeadsSubscription()
await subscription_manager.subscribe([sub1, sub2, sub3, sub4])
assert subscription_manager.subscriptions == [sub1, sub2, sub3, sub4]
# assert not mutating in place
await subscription_manager.unsubscribe(subscription_manager.subscriptions)
assert subscription_manager.subscriptions == []
# via unsubscribe all
await subscription_manager.subscribe([sub1, sub2, sub3, sub4])
assert subscription_manager.subscriptions == [sub1, sub2, sub3, sub4]
await subscription_manager.unsubscribe_all()
assert subscription_manager.subscriptions == []
@pytest.mark.asyncio
async def test_high_throughput_subscription_with_parallelize(
subscription_manager,
) -> None:
provider = subscription_manager._w3.provider
num_msgs = 5_000
provider._request_processor._handler_subscription_queue = TaskReliantQueue(
maxsize=num_msgs
)
# Turn on task-based processing. This test should fail the time constraint if this
# is not set to ``True`` (not task-based processing).
subscription_manager.parallelize = True
class Counter:
val: int = 0
counter = Counter()
async def high_throughput_handler(handler_context) -> None:
# if we awaited all `num_msgs`, we would sleep at least 5 seconds total
await asyncio.sleep(5 / num_msgs)
handler_context.counter.val += 1
if handler_context.counter.val == num_msgs:
await handler_context.subscription.unsubscribe()
# build a meaningless subscription since we are fabricating the messages
sub_id = await subscription_manager.subscribe(
NewHeadsSubscription(
handler=high_throughput_handler, handler_context={"counter": counter}
),
)
provider._request_processor.cache_request_information(
request_id=sub_id,
method="eth_subscribe",
params=[],
response_formatters=((), (), ()),
)
# put `num_msgs` messages in the queue
for _ in range(num_msgs):
provider._request_processor._handler_subscription_queue.put_nowait(
create_subscription_message(sub_id)
)
start = time.time()
await subscription_manager.handle_subscriptions()
stop = time.time()
assert counter.val == num_msgs
assert subscription_manager.total_handler_calls == num_msgs
assert stop - start < 3
@pytest.mark.asyncio
async def test_parallelize_with_error_propagation(
subscription_manager,
) -> None:
provider = subscription_manager._w3.provider
subscription_manager.parallelize = True
async def high_throughput_handler(_handler_context) -> None:
raise ValueError("Test error msg.")
# build a meaningless subscription since we are fabricating the messages
sub_id = await subscription_manager.subscribe(
NewHeadsSubscription(handler=high_throughput_handler)
)
provider._request_processor.cache_request_information(
request_id=sub_id,
method="eth_subscribe",
params=[],
response_formatters=((), (), ()),
)
provider._request_processor._handler_subscription_queue.put_nowait(
create_subscription_message(sub_id)
)
with pytest.raises(
SubscriptionHandlerTaskException,
match="Test error msg.",
):
await subscription_manager.handle_subscriptions()
@pytest.mark.asyncio
async def test_subscription_parallelize_false_overrides_manager_true(
subscription_manager,
) -> None:
provider = subscription_manager._w3.provider
subscription_manager.parallelize = True # manager parallelizing
async def test_handler(context) -> None:
# assert not parallelized
assert context.subscription.parallelize is False
assert subscription_manager._tasks == set()
await context.subscription.unsubscribe()
sub_id = await subscription_manager.subscribe(
# parallelize=False overrides manager's parallelization setting
NewHeadsSubscription(handler=test_handler, parallelize=False)
)
provider._request_processor.cache_request_information(
request_id=sub_id,
method="eth_subscribe",
params=[],
response_formatters=((), (), ()),
)
provider._request_processor._handler_subscription_queue.put_nowait(
create_subscription_message(sub_id)
)
await subscription_manager.handle_subscriptions()
assert subscription_manager.total_handler_calls == 1
@pytest.mark.asyncio
async def test_subscription_parallelize_true_overrides_manager_default_false(
subscription_manager,
) -> None:
provider = subscription_manager._w3.provider
assert subscription_manager.parallelize is False
async def test_handler(context) -> None:
# check that the subscription is parallelized
assert context.subscription.parallelize is True
assert len(context.async_w3.subscription_manager._tasks) == 1
assert asyncio.current_task() in context.async_w3.subscription_manager._tasks
await context.subscription.unsubscribe()
sub_id = await subscription_manager.subscribe(
NewHeadsSubscription(handler=test_handler, parallelize=True)
)
provider._request_processor.cache_request_information(
request_id=sub_id,
method="eth_subscribe",
params=[],
response_formatters=((), (), ()),
)
provider._request_processor._handler_subscription_queue.put_nowait(
create_subscription_message(sub_id)
)
await subscription_manager.handle_subscriptions()
assert subscription_manager.total_handler_calls == 1
assert len(subscription_manager._tasks) == 0 # assert cleaned up
@pytest.mark.asyncio
async def test_mixed_subscription_parallelization_settings(
subscription_manager,
) -> None:
provider = subscription_manager._w3.provider
subscription_manager.parallelize = True # manager wants parallel
completion_order = []
async def fast_parallel_handler(_ctx) -> None:
await asyncio.sleep(0.05)
completion_order.append("fast_parallel")
assert asyncio.current_task() in subscription_manager._tasks
async def slow_sequential_handler(_ctx) -> None:
await asyncio.sleep(0.15)
completion_order.append("slow_sequential")
assert asyncio.current_task() not in subscription_manager._tasks
async def medium_default_handler(_ctx) -> None:
await asyncio.sleep(0.10)
completion_order.append("medium_default")
assert asyncio.current_task() in subscription_manager._tasks
# we assume this should be the last task to complete so unsubscribe only here
await subscription_manager.unsubscribe_all()
# subscriptions with different settings
fast_sub_id = await subscription_manager.subscribe(
NewHeadsSubscription(handler=fast_parallel_handler, parallelize=True)
)
slow_sub_id = await subscription_manager.subscribe(
NewHeadsSubscription(handler=slow_sequential_handler, parallelize=False)
)
medium_sub_id = await subscription_manager.subscribe(
# uses the manager default parallelization setting (True)
NewHeadsSubscription(handler=medium_default_handler)
)
for sub_id in {slow_sub_id, fast_sub_id, medium_sub_id}:
provider._request_processor.cache_request_information(
request_id=sub_id,
method="eth_subscribe",
params=[],
response_formatters=((), (), ()),
)
# send messages in order of slow (but main loop), fast (parallel), medium (parallel)
for sub_id in [slow_sub_id, fast_sub_id, medium_sub_id]:
provider._request_processor._handler_subscription_queue.put_nowait(
create_subscription_message(sub_id)
)
await subscription_manager.handle_subscriptions()
# `slow_sequential` should complete first despite taking longest because it
# blocks the main loop. The next two run in parallel after, so the fastest of the
# two should complete next, leaving the `medium_default` last.
assert len(completion_order) == 3
assert completion_order[0] == "slow_sequential"
assert "fast_parallel" == completion_order[1]
assert "medium_default" == completion_order[2]
@pytest.mark.asyncio
async def test_performance_difference_with_subscription_overrides(
subscription_manager,
) -> None:
provider = subscription_manager._w3.provider
assert subscription_manager.parallelize is False
manager_tasks = subscription_manager._tasks
async def parallel_handler(_ctx) -> None:
await asyncio.sleep(0.1)
assert asyncio.current_task() in manager_tasks
if len(manager_tasks) >= 3:
await subscription_manager.unsubscribe_all()
# create 3 subscriptions, override all to parallel despite manager default False
for _ in range(3):
sub_id = await subscription_manager.subscribe(
NewHeadsSubscription(handler=parallel_handler, parallelize=True)
)
provider._request_processor.cache_request_information(
request_id=sub_id,
method="eth_subscribe",
params=[],
response_formatters=((), (), ()),
)
provider._request_processor._handler_subscription_queue.put_nowait(
create_subscription_message(sub_id)
)
await subscription_manager.handle_subscriptions()
assert subscription_manager.total_handler_calls == 3
assert len(manager_tasks) == 0 # all tasks cleaned up
@pytest.mark.asyncio
async def test_eth_subscribe_api_call_with_all_kwargs(subscription_manager):
async_w3 = subscription_manager._w3
provider = subscription_manager._w3.provider
label = "test_subscription"
test_ctx = "test context"
async def parallel_handler(context) -> None:
assert asyncio.current_task() in subscription_manager._tasks
assert context.test_ctx == test_ctx
sub = subscription_manager.get_by_id(context.subscription.id)
assert sub.label == label
await context.subscription.unsubscribe()
sub_id = await async_w3.eth.subscribe(
"newHeads",
handler=parallel_handler,
handler_context={"test_ctx": test_ctx},
label=label,
parallelize=True,
)
provider._request_processor.cache_request_information(
request_id=sub_id,
method="eth_subscribe",
params=[],
response_formatters=((), (), ()),
)
provider._request_processor._handler_subscription_queue.put_nowait(
create_subscription_message(sub_id)
)
await subscription_manager.handle_subscriptions()
assert subscription_manager.total_handler_calls == 1
assert len(async_w3.subscription_manager._tasks) == 0
| MockProvider |
python | pypa__hatch | tests/backend/builders/test_binary.py | {
"start": 2008,
"end": 4836
} | class ____:
def test_unset(self, isolation):
config = {"project": {"name": "My.App", "version": "0.1.0"}}
builder = BinaryBuilder(str(isolation), config=config)
assert builder.config.scripts == builder.config.scripts == []
def test_default(self, isolation):
config = {
"project": {
"name": "My.App",
"version": "0.1.0",
"scripts": {"b": "foo.bar.baz:cli", "a": "baz.bar.foo:cli"},
}
}
builder = BinaryBuilder(str(isolation), config=config)
assert builder.config.scripts == ["a", "b"]
def test_specific(self, isolation):
config = {
"project": {
"name": "My.App",
"version": "0.1.0",
"scripts": {"b": "foo.bar.baz:cli", "a": "baz.bar.foo:cli"},
},
"tool": {"hatch": {"build": {"targets": {"binary": {"scripts": ["a", "a"]}}}}},
}
builder = BinaryBuilder(str(isolation), config=config)
assert builder.config.scripts == ["a"]
def test_not_array(self, isolation):
config = {
"project": {
"name": "My.App",
"version": "0.1.0",
"scripts": {"b": "foo.bar.baz:cli", "a": "baz.bar.foo:cli"},
},
"tool": {"hatch": {"build": {"targets": {"binary": {"scripts": 9000}}}}},
}
builder = BinaryBuilder(str(isolation), config=config)
with pytest.raises(TypeError, match="Field `tool.hatch.build.targets.binary.scripts` must be an array"):
_ = builder.config.scripts
def test_script_not_string(self, isolation):
config = {
"project": {
"name": "My.App",
"version": "0.1.0",
"scripts": {"b": "foo.bar.baz:cli", "a": "baz.bar.foo:cli"},
},
"tool": {"hatch": {"build": {"targets": {"binary": {"scripts": [9000]}}}}},
}
builder = BinaryBuilder(str(isolation), config=config)
with pytest.raises(
TypeError, match="Script #1 of field `tool.hatch.build.targets.binary.scripts` must be a string"
):
_ = builder.config.scripts
def test_unknown_script(self, isolation):
config = {
"project": {
"name": "My.App",
"version": "0.1.0",
"scripts": {"b": "foo.bar.baz:cli", "a": "baz.bar.foo:cli"},
},
"tool": {"hatch": {"build": {"targets": {"binary": {"scripts": ["c"]}}}}},
}
builder = BinaryBuilder(str(isolation), config=config)
with pytest.raises(ValueError, match="Unknown script in field `tool.hatch.build.targets.binary.scripts`: c"):
_ = builder.config.scripts
| TestScripts |
python | scikit-image__scikit-image | src/skimage/util/_map_array.py | {
"start": 2588,
"end": 6686
} | class ____:
"""Class designed to mimic mapping by NumPy array indexing.
This class is designed to replicate the use of NumPy arrays for mapping
values with indexing:
>>> values = np.array([0.25, 0.5, 1.0])
>>> indices = np.array([[0, 0, 1], [2, 2, 1]])
>>> values[indices]
array([[0.25, 0.25, 0.5 ],
[1. , 1. , 0.5 ]])
The issue with this indexing is that you need a very large ``values``
array if the values in the ``indices`` array are large.
>>> values = np.array([0.25, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1.0])
>>> indices = np.array([[0, 0, 10], [0, 10, 10]])
>>> values[indices]
array([[0.25, 0.25, 1. ],
[0.25, 1. , 1. ]])
Using this class, the approach is similar, but there is no need to
create a large values array:
>>> in_indices = np.array([0, 10])
>>> out_values = np.array([0.25, 1.0])
>>> values = ArrayMap(in_indices, out_values)
>>> values
ArrayMap(array([ 0, 10]), array([0.25, 1. ]))
>>> print(values)
ArrayMap:
0 → 0.25
10 → 1.0
>>> indices = np.array([[0, 0, 10], [0, 10, 10]])
>>> values[indices]
array([[0.25, 0.25, 1. ],
[0.25, 1. , 1. ]])
Parameters
----------
in_values : array of int, shape (K,)
The source values from which to map.
out_values : array, shape (K,)
The destination values from which to map.
"""
def __init__(self, in_values, out_values):
self.in_values = in_values
self.out_values = out_values
self._max_str_lines = 4
self._array = None
def __len__(self):
"""Return one more than the maximum label value being remapped."""
return np.max(self.in_values) + 1
def __array__(self, dtype=None, copy=None):
"""Return an array that behaves like the arraymap when indexed.
This array can be very large: it is the size of the largest value
in the ``in_vals`` array, plus one.
"""
if dtype is None:
dtype = self.out_values.dtype
output = np.zeros(np.max(self.in_values) + 1, dtype=dtype)
output[self.in_values] = self.out_values
return output
@property
def dtype(self):
return self.out_values.dtype
def __repr__(self):
return f'ArrayMap({repr(self.in_values)}, {repr(self.out_values)})'
def __str__(self):
if len(self.in_values) <= self._max_str_lines + 1:
rows = range(len(self.in_values))
string = '\n'.join(
['ArrayMap:']
+ [f' {self.in_values[i]} → {self.out_values[i]}' for i in rows]
)
else:
rows0 = list(range(0, self._max_str_lines // 2))
rows1 = list(range(-self._max_str_lines // 2, 0))
string = '\n'.join(
['ArrayMap:']
+ [f' {self.in_values[i]} → {self.out_values[i]}' for i in rows0]
+ [' ...']
+ [f' {self.in_values[i]} → {self.out_values[i]}' for i in rows1]
)
return string
def __call__(self, arr):
return self.__getitem__(arr)
def __getitem__(self, index):
scalar = np.isscalar(index)
if scalar:
index = np.array([index])
elif isinstance(index, slice):
start = index.start or 0 # treat None or 0 the same way
stop = index.stop if index.stop is not None else len(self)
step = index.step
index = np.arange(start, stop, step)
if index.dtype == bool:
index = np.flatnonzero(index)
out = map_array(
index,
self.in_values.astype(index.dtype, copy=False),
self.out_values,
)
if scalar:
out = out[0]
return out
def __setitem__(self, indices, values):
if self._array is None:
self._array = self.__array__()
self._array[indices] = values
self.in_values = np.flatnonzero(self._array)
self.out_values = self._array[self.in_values]
| ArrayMap |
python | redis__redis-py | redis/multidb/failure_detector.py | {
"start": 338,
"end": 841
} | class ____(ABC):
@abstractmethod
def register_failure(self, exception: Exception, cmd: tuple) -> None:
"""Register a failure that occurred during command execution."""
pass
@abstractmethod
def register_command_execution(self, cmd: tuple) -> None:
"""Register a command execution."""
pass
@abstractmethod
def set_command_executor(self, command_executor) -> None:
"""Set the command executor for this failure."""
pass
| FailureDetector |
python | donnemartin__interactive-coding-challenges | recursion_dynamic/coin_change/test_coin_change.py | {
"start": 18,
"end": 479
} | class ____(unittest.TestCase):
def test_coin_change(self):
coin_changer = CoinChanger()
self.assertEqual(coin_changer.make_change([1, 2], 0), 0)
self.assertEqual(coin_changer.make_change([1, 2, 3], 5), 5)
self.assertEqual(coin_changer.make_change([1, 5, 25, 50], 10), 3)
print('Success: test_coin_change')
def main():
test = Challenge()
test.test_coin_change()
if __name__ == '__main__':
main()
| Challenge |
python | altair-viz__altair | altair/vegalite/v6/schema/core.py | {
"start": 1149550,
"end": 1149816
} | class ____(VegaLiteSchema):
"""ScaleInvalidDataShowAsxOffset schema wrapper."""
_schema = {"$ref": '#/definitions/ScaleInvalidDataShowAs<"xOffset">'}
def __init__(self, *args, **kwds):
super().__init__(*args, **kwds)
| ScaleInvalidDataShowAsxOffset |
python | sphinx-doc__sphinx | sphinx/ext/autodoc/_legacy_class_based/_documenters.py | {
"start": 82761,
"end": 93748
} | class ____(DocstringSignatureMixin, ClassLevelDocumenter): # type: ignore[misc]
"""Specialized Documenter subclass for methods (normal, static and class)."""
objtype = 'method'
directivetype = 'method'
member_order = 50
priority = 1 # must be more than FunctionDocumenter
@classmethod
def can_document_member(
cls: type[Documenter], member: Any, membername: str, isattr: bool, parent: Any
) -> bool:
return inspect.isroutine(member) and not isinstance(parent, ModuleDocumenter)
def import_object(self, raiseerror: bool = False) -> bool:
ret = super().import_object(raiseerror)
if not ret:
return ret
# to distinguish classmethod/staticmethod
obj = self.parent.__dict__.get(self.object_name, self.object)
if inspect.isstaticmethod(obj, cls=self.parent, name=self.object_name):
# document static members before regular methods
self.member_order -= 1 # type: ignore[misc]
elif inspect.isclassmethod(obj):
# document class methods before static methods as
# they usually behave as alternative constructors
self.member_order -= 2 # type: ignore[misc]
return ret
def format_args(self, **kwargs: Any) -> str:
if self.config.autodoc_typehints in {'none', 'description'}:
kwargs.setdefault('show_annotation', False)
if self.config.autodoc_typehints_format == 'short':
kwargs.setdefault('unqualified_typehints', True)
if self.config.python_display_short_literal_types:
kwargs.setdefault('short_literals', True)
try:
if self.object == object.__init__ and self.parent != object: # NoQA: E721
# Classes not having own __init__() method are shown as no arguments.
#
# Note: The signature of object.__init__() is (self, /, *args, **kwargs).
# But it makes users confused.
args = '()'
else:
if inspect.isstaticmethod(
self.object, cls=self.parent, name=self.object_name
):
self._events.emit(
'autodoc-before-process-signature', self.object, False
)
sig = inspect.signature(
self.object,
bound_method=False,
type_aliases=self.config.autodoc_type_aliases,
)
else:
self._events.emit(
'autodoc-before-process-signature', self.object, True
)
sig = inspect.signature(
self.object,
bound_method=True,
type_aliases=self.config.autodoc_type_aliases,
)
args = stringify_signature(sig, **kwargs)
except TypeError as exc:
logger.warning(
__('Failed to get a method signature for %s: %s'), self.fullname, exc
)
return ''
except ValueError:
args = ''
if self.config.strip_signature_backslash:
# escape backslashes for reST
args = args.replace('\\', '\\\\')
return args
def add_directive_header(self, sig: str) -> None:
super().add_directive_header(sig)
sourcename = self.get_sourcename()
obj = self.parent.__dict__.get(self.object_name, self.object)
if inspect.isabstractmethod(obj):
self.add_line(' :abstractmethod:', sourcename)
if inspect.iscoroutinefunction(obj) or inspect.isasyncgenfunction(obj):
self.add_line(' :async:', sourcename)
if (
inspect.is_classmethod_like(obj)
or inspect.is_singledispatch_method(obj)
and inspect.is_classmethod_like(obj.func)
):
self.add_line(' :classmethod:', sourcename)
if inspect.isstaticmethod(obj, cls=self.parent, name=self.object_name):
self.add_line(' :staticmethod:', sourcename)
if self.analyzer and '.'.join(self.objpath) in self.analyzer.finals:
self.add_line(' :final:', sourcename)
def document_members(self, all_members: bool = False) -> None:
pass
def format_signature(self, **kwargs: Any) -> str:
if self.config.autodoc_typehints_format == 'short':
kwargs.setdefault('unqualified_typehints', True)
if self.config.python_display_short_literal_types:
kwargs.setdefault('short_literals', True)
sigs = []
if (
self.analyzer
and '.'.join(self.objpath) in self.analyzer.overloads
and self.config.autodoc_typehints != 'none'
):
# Use signatures for overloaded methods instead of the implementation method.
overloaded = True
else:
overloaded = False
sig = super().format_signature(**kwargs)
sigs.append(sig)
meth = self.parent.__dict__.get(self.objpath[-1])
if inspect.is_singledispatch_method(meth):
# append signature of singledispatch'ed functions
for typ, func in meth.dispatcher.registry.items():
if typ is object:
pass # default implementation. skipped.
else:
if inspect.isclassmethod(func):
func = func.__func__
dispatchmeth = self.annotate_to_first_argument(func, typ)
if dispatchmeth:
documenter = MethodDocumenter(self.directive, '')
documenter.parent = self.parent
documenter.object = dispatchmeth
documenter.objpath = ['']
sigs.append(documenter.format_signature())
if overloaded and self.analyzer is not None:
if inspect.isstaticmethod(
self.object, cls=self.parent, name=self.object_name
):
actual = inspect.signature(
self.object,
bound_method=False,
type_aliases=self.config.autodoc_type_aliases,
)
else:
actual = inspect.signature(
self.object,
bound_method=True,
type_aliases=self.config.autodoc_type_aliases,
)
__globals__ = safe_getattr(self.object, '__globals__', {})
for overload in self.analyzer.overloads['.'.join(self.objpath)]:
overload = self.merge_default_value(actual, overload)
overload = evaluate_signature(
overload, __globals__, self.config.autodoc_type_aliases
)
if not inspect.isstaticmethod(
self.object, cls=self.parent, name=self.object_name
):
parameters = list(overload.parameters.values())
overload = overload.replace(parameters=parameters[1:])
sig = stringify_signature(overload, **kwargs)
sigs.append(sig)
return '\n'.join(sigs)
def merge_default_value(self, actual: Signature, overload: Signature) -> Signature:
"""Merge default values of actual implementation to the overload variants."""
parameters = list(overload.parameters.values())
for i, param in enumerate(parameters):
actual_param = actual.parameters.get(param.name)
if actual_param and param.default == '...':
parameters[i] = param.replace(default=actual_param.default)
return overload.replace(parameters=parameters)
def annotate_to_first_argument(
self, func: Callable[..., Any], typ: type
) -> Callable[..., Any] | None:
"""Annotate type hint to the first argument of function if needed."""
try:
sig = inspect.signature(func, type_aliases=self.config.autodoc_type_aliases)
except TypeError as exc:
logger.warning(
__('Failed to get a method signature for %s: %s'), self.fullname, exc
)
return None
except ValueError:
return None
if len(sig.parameters) == 1:
return None
def dummy(): # type: ignore[no-untyped-def] # NoQA: ANN202
pass
params = list(sig.parameters.values())
if params[1].annotation is Parameter.empty:
params[1] = params[1].replace(annotation=typ)
try:
dummy.__signature__ = sig.replace( # type: ignore[attr-defined]
parameters=params
)
return dummy
except (AttributeError, TypeError):
# failed to update signature (ex. built-in or extension types)
return None
return func
def get_doc(self) -> list[list[str]] | None:
if self._new_docstrings is not None:
# docstring already returned previously, then modified by
# `DocstringSignatureMixin`. Just return the previously-computed
# result, so that we don't lose the processing done by
# `DocstringSignatureMixin`.
return self._new_docstrings
if self.objpath[-1] == '__init__':
docstring = getdoc(
self.object,
self.get_attr,
self.config.autodoc_inherit_docstrings,
self.parent,
self.object_name,
)
if docstring is not None and (
docstring == object.__init__.__doc__ # for pypy
or docstring.strip() == object.__init__.__doc__ # for !pypy
):
docstring = None
if docstring:
tab_width = self.directive.state.document.settings.tab_width
return [prepare_docstring(docstring, tabsize=tab_width)]
else:
return []
elif self.objpath[-1] == '__new__':
docstring = getdoc(
self.object,
self.get_attr,
self.config.autodoc_inherit_docstrings,
self.parent,
self.object_name,
)
if docstring is not None and (
docstring == object.__new__.__doc__ # for pypy
or docstring.strip() == object.__new__.__doc__ # for !pypy
):
docstring = None
if docstring:
tab_width = self.directive.state.document.settings.tab_width
return [prepare_docstring(docstring, tabsize=tab_width)]
else:
return []
else:
return super().get_doc()
| MethodDocumenter |
python | PrefectHQ__prefect | tests/server/concurrency/test_filesystem_lease_storage.py | {
"start": 393,
"end": 20585
} | class ____:
@pytest.fixture
def temp_dir(self):
with tempfile.TemporaryDirectory() as temp_dir:
yield Path(temp_dir)
@pytest.fixture
def storage(self, temp_dir: Path) -> ConcurrencyLeaseStorage:
return ConcurrencyLeaseStorage(storage_path=temp_dir)
@pytest.fixture
def sample_resource_ids(self) -> list[UUID]:
return [uuid4(), uuid4()]
@pytest.fixture
def sample_metadata(self) -> ConcurrencyLimitLeaseMetadata:
return ConcurrencyLimitLeaseMetadata(slots=5)
@pytest.fixture
def sample_metadata_with_holder(self) -> ConcurrencyLimitLeaseMetadata:
return ConcurrencyLimitLeaseMetadata(
slots=3,
holder=ConcurrencyLeaseHolder(type="task_run", id=uuid4()),
)
async def test_create_lease_without_metadata(
self, storage: ConcurrencyLeaseStorage, sample_resource_ids: list[UUID]
):
ttl = timedelta(minutes=5)
lease = await storage.create_lease(sample_resource_ids, ttl)
assert lease.resource_ids == sample_resource_ids
assert lease.metadata is None
# Verify lease file was created (excluding expiration index)
lease_files = [
f
for f in storage.storage_path.glob("*.json")
if f.name != "expirations.json"
]
assert len(lease_files) == 1
async def test_create_lease_with_metadata(
self,
storage: ConcurrencyLeaseStorage,
sample_resource_ids: list[UUID],
sample_metadata: ConcurrencyLimitLeaseMetadata,
):
ttl = timedelta(minutes=5)
lease = await storage.create_lease(sample_resource_ids, ttl, sample_metadata)
assert lease.resource_ids == sample_resource_ids
assert lease.metadata == sample_metadata
# Verify lease file was created with correct data (excluding expiration index)
lease_files = [
f
for f in storage.storage_path.glob("*.json")
if f.name != "expirations.json"
]
assert len(lease_files) == 1
with open(lease_files[0], "r") as f:
data = json.load(f)
assert data["metadata"]["slots"] == 5
assert len(data["resource_ids"]) == 2
async def test_create_lease_with_holder(
self,
storage: ConcurrencyLeaseStorage,
sample_resource_ids: list[UUID],
sample_metadata_with_holder: ConcurrencyLimitLeaseMetadata,
):
ttl = timedelta(minutes=5)
lease = await storage.create_lease(
sample_resource_ids, ttl, sample_metadata_with_holder
)
assert lease.resource_ids == sample_resource_ids
assert lease.metadata is not None
assert lease.metadata == sample_metadata_with_holder
assert lease.metadata.holder is not None
assert lease.metadata.holder.model_dump() == {
"type": "task_run",
"id": lease.metadata.holder.id,
}
# Verify lease file was created with correct data
lease_files = [
f
for f in storage.storage_path.glob("*.json")
if f.name != "expirations.json"
]
assert len(lease_files) == 1
with open(lease_files[0], "r") as f:
data = json.load(f)
assert data["metadata"]["slots"] == 3
assert data["metadata"]["holder"] == {
"type": "task_run",
"id": str(lease.metadata.holder.id),
}
async def test_read_lease_existing(
self, storage: ConcurrencyLeaseStorage, sample_resource_ids: list[UUID]
):
ttl = timedelta(minutes=5)
await storage.create_lease(sample_resource_ids, ttl)
# Get the lease ID from the file (excluding expiration index)
lease_files = [
f
for f in storage.storage_path.glob("*.json")
if f.name != "expirations.json"
]
lease_id = UUID(lease_files[0].stem)
read_lease = await storage.read_lease(lease_id)
assert read_lease is not None
assert read_lease.resource_ids == sample_resource_ids
assert read_lease.metadata is None
async def test_read_lease_with_holder(
self,
storage: ConcurrencyLeaseStorage,
sample_resource_ids: list[UUID],
sample_metadata_with_holder: ConcurrencyLimitLeaseMetadata,
):
ttl = timedelta(minutes=5)
created_lease = await storage.create_lease(
sample_resource_ids, ttl, sample_metadata_with_holder
)
read_lease = await storage.read_lease(created_lease.id)
assert read_lease is not None
assert read_lease.resource_ids == sample_resource_ids
assert read_lease.metadata is not None
assert read_lease.metadata.slots == 3
assert read_lease.metadata.holder is not None
assert read_lease.metadata.holder.model_dump() == {
"type": "task_run",
"id": read_lease.metadata.holder.id,
}
async def test_read_lease_non_existing(self, storage: ConcurrencyLeaseStorage):
non_existing_id = uuid4()
lease = await storage.read_lease(non_existing_id)
assert lease is None
async def test_read_lease_expired(
self, storage: ConcurrencyLeaseStorage, sample_resource_ids: list[UUID]
):
# Create an expired lease
expired_ttl = timedelta(seconds=-1)
await storage.create_lease(sample_resource_ids, expired_ttl)
# Get the lease ID from the file (excluding expiration index)
lease_files = [
f
for f in storage.storage_path.glob("*.json")
if f.name != "expirations.json"
]
lease_id = UUID(lease_files[0].stem)
# Reading should return return the lease
read_lease = await storage.read_lease(lease_id)
assert read_lease is not None
assert read_lease.expiration < datetime.now(timezone.utc)
async def test_read_lease_corrupted_file(
self, storage: ConcurrencyLeaseStorage, sample_resource_ids: list[UUID]
):
# Create a valid lease first
ttl = timedelta(minutes=5)
await storage.create_lease(sample_resource_ids, ttl)
# Get the lease ID and corrupt the file (excluding expiration index)
lease_files = [
f
for f in storage.storage_path.glob("*.json")
if f.name != "expirations.json"
]
lease_id = UUID(lease_files[0].stem)
with open(lease_files[0], "w") as f:
f.write("invalid json content")
# Reading should return None and clean up the corrupted file
read_lease = await storage.read_lease(lease_id)
assert read_lease is None
# File should be cleaned up (excluding expiration index)
lease_files = [
f
for f in storage.storage_path.glob("*.json")
if f.name != "expirations.json"
]
assert len(lease_files) == 0
async def test_renew_lease(
self, storage: ConcurrencyLeaseStorage, sample_resource_ids: list[UUID]
):
ttl = timedelta(minutes=5)
await storage.create_lease(sample_resource_ids, ttl)
# Get the lease ID and original expiration (excluding expiration index)
lease_files = [
f
for f in storage.storage_path.glob("*.json")
if f.name != "expirations.json"
]
lease_id = UUID(lease_files[0].stem)
with open(lease_files[0], "r") as f:
original_data = json.load(f)
original_expiration = datetime.fromisoformat(original_data["expiration"])
# Renew the lease
new_ttl = timedelta(minutes=10)
renewed = await storage.renew_lease(lease_id, new_ttl)
assert renewed is True
# Check that expiration was updated
with open(lease_files[0], "r") as f:
updated_data = json.load(f)
new_expiration = datetime.fromisoformat(updated_data["expiration"])
assert new_expiration > original_expiration
async def test_renew_lease_non_existing(self, storage: ConcurrencyLeaseStorage):
non_existing_id = uuid4()
renewed = await storage.renew_lease(non_existing_id, timedelta(minutes=5))
assert renewed is False
async def test_renew_lease_corrupted_file(
self, storage: ConcurrencyLeaseStorage, sample_resource_ids: list[UUID]
):
# Create a valid lease first
ttl = timedelta(minutes=5)
await storage.create_lease(sample_resource_ids, ttl)
# Get the lease ID and corrupt the file (excluding expiration index)
lease_files = [
f
for f in storage.storage_path.glob("*.json")
if f.name != "expirations.json"
]
lease_id = UUID(lease_files[0].stem)
with open(lease_files[0], "w") as f:
f.write("invalid json content")
# Renewing should clean up the corrupted file and return False
renewed = await storage.renew_lease(lease_id, timedelta(minutes=10))
assert renewed is False
# File should be cleaned up (excluding expiration index)
lease_files = [
f
for f in storage.storage_path.glob("*.json")
if f.name != "expirations.json"
]
assert len(lease_files) == 0
async def test_revoke_lease(
self, storage: ConcurrencyLeaseStorage, sample_resource_ids: list[UUID]
):
ttl = timedelta(minutes=5)
await storage.create_lease(sample_resource_ids, ttl)
# Verify file exists (excluding expiration index)
lease_files = [
f
for f in storage.storage_path.glob("*.json")
if f.name != "expirations.json"
]
assert len(lease_files) == 1
lease_id = UUID(lease_files[0].stem)
# Release the lease
await storage.revoke_lease(lease_id)
# File should be deleted (excluding expiration index)
lease_files = [
f
for f in storage.storage_path.glob("*.json")
if f.name != "expirations.json"
]
assert len(lease_files) == 0
async def test_revoke_lease_non_existing(self, storage: ConcurrencyLeaseStorage):
non_existing_id = uuid4()
# Should not raise an exception
await storage.revoke_lease(non_existing_id)
async def test_read_expired_lease_ids_no_expired(
self, storage: ConcurrencyLeaseStorage, sample_resource_ids: list[UUID]
):
ttl = timedelta(minutes=5)
await storage.create_lease(sample_resource_ids, ttl)
expired_ids = await storage.read_expired_lease_ids()
assert expired_ids == []
async def test_read_expired_lease_ids_with_expired(
self, storage: ConcurrencyLeaseStorage, sample_resource_ids: list[UUID]
):
expired_ttl = timedelta(seconds=-1)
await storage.create_lease(sample_resource_ids, expired_ttl)
expired_ids = await storage.read_expired_lease_ids()
assert len(expired_ids) == 1
# Verify the lease ID is correct
lease_files = [
f
for f in storage.storage_path.glob("*.json")
if f.name != "expirations.json"
]
assert len(lease_files) == 1
expected_lease_id = UUID(lease_files[0].stem)
assert expired_ids[0] == expected_lease_id
async def test_read_expired_lease_ids_with_limit(
self, storage: ConcurrencyLeaseStorage, sample_resource_ids: list[UUID]
):
expired_ttl = timedelta(seconds=-1)
await storage.create_lease(sample_resource_ids, expired_ttl)
await storage.create_lease(sample_resource_ids, expired_ttl)
await storage.create_lease(sample_resource_ids, expired_ttl)
expired_ids = await storage.read_expired_lease_ids(limit=2)
assert len(expired_ids) == 2
async def test_read_expired_lease_ids_mixed_expiration(
self, storage: ConcurrencyLeaseStorage, sample_resource_ids: list[UUID]
):
expired_ttl = timedelta(seconds=-1)
valid_ttl = timedelta(minutes=5)
await storage.create_lease(sample_resource_ids, expired_ttl)
await storage.create_lease(sample_resource_ids, valid_ttl)
await storage.create_lease(sample_resource_ids, expired_ttl)
expired_ids = await storage.read_expired_lease_ids()
assert len(expired_ids) == 2
async def test_read_expired_lease_ids_corrupted_files(
self, storage: ConcurrencyLeaseStorage, sample_resource_ids: list[UUID]
):
# Create a valid lease and a corrupted file
ttl = timedelta(minutes=5)
await storage.create_lease(sample_resource_ids, ttl)
# Create a corrupted file
corrupted_file = storage.storage_path / f"{uuid4()}.json"
with open(corrupted_file, "w") as f:
f.write("invalid json content")
# Should return no expired leases (corrupted files are ignored)
expired_ids = await storage.read_expired_lease_ids()
assert expired_ids == []
# Corrupted file still exists (only cleaned up when accessed)
assert corrupted_file.exists()
async def test_storage_path_creation(self, temp_dir: Path):
# Test that storage path is created only when needed
storage_path = temp_dir / "nested" / "path"
assert not storage_path.exists()
# Creating the storage instance should not create the directory
storage = ConcurrencyLeaseStorage(storage_path=storage_path)
assert not storage_path.exists()
# Creating a lease should create the directory
sample_resource_ids = [uuid4(), uuid4()]
ttl = timedelta(minutes=5)
await storage.create_lease(sample_resource_ids, ttl)
assert storage_path.exists()
assert storage_path.is_dir()
async def test_multiple_leases_persistence(
self, storage: ConcurrencyLeaseStorage, sample_resource_ids: list[UUID]
):
# Create multiple leases
ttl = timedelta(minutes=5)
await storage.create_lease(sample_resource_ids, ttl)
await storage.create_lease(sample_resource_ids, ttl)
await storage.create_lease(sample_resource_ids, ttl)
# Verify all files exist (excluding expiration index)
lease_files = [
f
for f in storage.storage_path.glob("*.json")
if f.name != "expirations.json"
]
assert len(lease_files) == 3
# Verify we can read all leases
lease_ids = [UUID(f.stem) for f in lease_files]
for lease_id in lease_ids:
read_lease = await storage.read_lease(lease_id)
assert read_lease is not None
assert read_lease.resource_ids == sample_resource_ids
async def test_list_holders_for_limit_empty(self, storage: ConcurrencyLeaseStorage):
limit_id = uuid4()
holders = await storage.list_holders_for_limit(limit_id)
assert holders == []
async def test_list_holders_for_limit_no_holders(
self, storage: ConcurrencyLeaseStorage, sample_resource_ids: list[UUID]
):
# Create a lease without a holder
ttl = timedelta(minutes=5)
metadata = ConcurrencyLimitLeaseMetadata(slots=2)
await storage.create_lease(sample_resource_ids, ttl, metadata)
holders = await storage.list_holders_for_limit(sample_resource_ids[0])
assert holders == []
async def test_list_holders_for_limit_with_holders(
self, storage: ConcurrencyLeaseStorage
):
limit_id = uuid4()
# Create leases with different holders
holder1 = ConcurrencyLeaseHolder(type="task_run", id=uuid4())
holder2 = ConcurrencyLeaseHolder(type="flow_run", id=uuid4())
metadata1 = ConcurrencyLimitLeaseMetadata(slots=2, holder=holder1)
metadata2 = ConcurrencyLimitLeaseMetadata(slots=1, holder=holder2)
ttl = timedelta(minutes=5)
await storage.create_lease([limit_id], ttl, metadata1)
await storage.create_lease([limit_id], ttl, metadata2)
# Create a lease for a different limit to ensure it's not included
other_limit_id = uuid4()
metadata3 = ConcurrencyLimitLeaseMetadata(
slots=1, holder=ConcurrencyLeaseHolder(type="task_run", id=uuid4())
)
await storage.create_lease([other_limit_id], ttl, metadata3)
holders_with_leases = await storage.list_holders_for_limit(limit_id)
assert len(holders_with_leases) == 2
holders = [holder for _, holder in holders_with_leases]
assert holder1 in holders
assert holder2 in holders
async def test_list_holders_for_limit_expired_leases(
self, storage: ConcurrencyLeaseStorage
):
limit_id = uuid4()
# Create an expired lease with a holder
expired_ttl = timedelta(seconds=-1)
holder = ConcurrencyLeaseHolder(type="task_run", id=uuid4())
metadata = ConcurrencyLimitLeaseMetadata(slots=1, holder=holder)
await storage.create_lease([limit_id], expired_ttl, metadata)
# Create an active lease with a holder
active_ttl = timedelta(minutes=5)
active_holder = ConcurrencyLeaseHolder(type="flow_run", id=uuid4())
active_metadata = ConcurrencyLimitLeaseMetadata(slots=1, holder=active_holder)
active_lease = await storage.create_lease(
[limit_id], active_ttl, active_metadata
)
holders = await storage.list_holders_for_limit(limit_id)
assert len(holders) == 1
lease_id, holder = holders[0]
assert lease_id == active_lease.id
assert holder == active_holder
async def test_read_active_lease_ids_with_pagination(
self, storage: ConcurrencyLeaseStorage
):
# Create 10 active leases
active_ttl = timedelta(minutes=5)
lease_ids: list[UUID] = []
for _ in range(10):
lease = await storage.create_lease([uuid4()], active_ttl)
lease_ids.append(lease.id)
# Test getting first page
first_page = await storage.read_active_lease_ids(limit=3, offset=0)
assert len(first_page) == 3
assert all(lid in lease_ids for lid in first_page)
# Test getting second page
second_page = await storage.read_active_lease_ids(limit=3, offset=3)
assert len(second_page) == 3
assert all(lid in lease_ids for lid in second_page)
# Ensure no overlap between pages
assert set(first_page).isdisjoint(set(second_page))
# Test getting third page
third_page = await storage.read_active_lease_ids(limit=3, offset=6)
assert len(third_page) == 3
assert all(lid in lease_ids for lid in third_page)
# Test getting partial last page
fourth_page = await storage.read_active_lease_ids(limit=3, offset=9)
assert len(fourth_page) == 1
assert all(lid in lease_ids for lid in fourth_page)
# Test offset beyond available items
empty_page = await storage.read_active_lease_ids(limit=3, offset=100)
assert empty_page == []
async def test_read_active_lease_ids_default_pagination(
self, storage: ConcurrencyLeaseStorage
):
# Create 150 active leases (more than default limit)
active_ttl = timedelta(minutes=5)
lease_ids: list[UUID] = []
for _ in range(150):
lease = await storage.create_lease([uuid4()], active_ttl)
lease_ids.append(lease.id)
# Test default limit of 100
default_page = await storage.read_active_lease_ids()
assert len(default_page) == 100
assert all(lid in lease_ids for lid in default_page)
# Test with offset
offset_page = await storage.read_active_lease_ids(offset=100)
assert len(offset_page) == 50 # remaining leases
assert all(lid in lease_ids for lid in offset_page)
# Ensure no overlap with first page
assert set(default_page).isdisjoint(set(offset_page))
| TestFilesystemConcurrencyLeaseStorage |
python | django__django | django/contrib/gis/forms/widgets.py | {
"start": 259,
"end": 2291
} | class ____(Widget):
"""
The base class for rich geometry widgets.
Render a map using the WKT of the geometry.
"""
base_layer = None
geom_type = "GEOMETRY"
map_srid = 4326
display_raw = False
supports_3d = False
template_name = "" # set on subclasses
def __init__(self, attrs=None):
self.attrs = {
key: getattr(self, key)
for key in ("base_layer", "geom_type", "map_srid", "display_raw")
}
if attrs:
self.attrs.update(attrs)
def serialize(self, value):
return value.wkt if value else ""
def deserialize(self, value):
try:
return GEOSGeometry(value)
except (GEOSException, ValueError, TypeError) as err:
logger.error("Error creating geometry from value '%s' (%s)", value, err)
return None
def get_context(self, name, value, attrs):
context = super().get_context(name, value, attrs)
# If a string reaches here (via a validation error on another
# field) then just reconstruct the Geometry.
if value and isinstance(value, str):
value = self.deserialize(value)
if value:
# Check that srid of value and map match
if value.srid and value.srid != self.map_srid:
try:
ogr = value.ogr
ogr.transform(self.map_srid)
value = ogr
except gdal.GDALException as err:
logger.error(
"Error transforming geometry from srid '%s' to srid '%s' (%s)",
value.srid,
self.map_srid,
err,
)
context["serialized"] = self.serialize(value)
geom_type = gdal.OGRGeomType(self.attrs["geom_type"]).name
context["widget"]["attrs"]["geom_name"] = (
"Geometry" if geom_type == "Unknown" else geom_type
)
return context
| BaseGeometryWidget |
python | dagster-io__dagster | python_modules/dagster/dagster/_core/workspace/load_target.py | {
"start": 7281,
"end": 7793
} | class ____(WorkspaceLoadTarget):
module_name: str
attribute: Optional[str]
working_directory: Optional[str]
location_name: Optional[str]
def create_origins(self) -> Sequence[ManagedGrpcPythonEnvCodeLocationOrigin]:
return [
location_origin_from_module_name(
self.module_name,
self.attribute,
self.working_directory,
location_name=self.location_name,
)
]
@record(kw_only=False)
| ModuleTarget |
python | conda__conda | conda/env/specs/yaml_file.py | {
"start": 543,
"end": 2701
} | class ____(EnvironmentSpecBase):
# Do not use this plugin for in the environment spec detection process.
# Users must specify using `environment.yaml` with the `--environment-specifier`
# option.
detection_supported = False
_environment = None
extensions = {".yaml", ".yml"}
def __init__(self, filename=None, **kwargs):
self.filename = filename
self.msg = None
def can_handle(self):
"""
Validates loader can process environment definition.
This can handle if:
* the provided file exists
* the provided file ends in the supported file extensions (.yaml or .yml)
* the yaml file can be loaded and is not empty
:return: True or False
"""
if not self.filename:
return False
# Extract the file extension (e.g., '.txt' or '' if no extension)
_, file_ext = os.path.splitext(self.filename)
# Check if the file has a supported extension and exists
if not any(spec_ext == file_ext for spec_ext in YamlFileSpec.extensions):
return False
try:
yamlstr = env.load_file(self.filename)
data = yaml_safe_load(yamlstr)
if data is None:
return False
except Exception:
log.debug("Failed to load %s as a YAML.", self.filename, exc_info=True)
return False
return True
@property
@deprecated("26.3", "26.9", addendum="This method is not used anymore, use 'env'")
def environment(self) -> EnvironmentYaml:
if not self._environment:
if not self.can_handle():
raise CondaValueError(f"Cannot handle environment file: {self.msg}")
self._environment = env.from_file(self.filename)
if self._environment is None:
raise CondaValueError("Environment could not be loaded")
return self._environment
@property
def env(self) -> Environment:
if not self._environment:
self._environment = env.from_file(self.filename)
return self._environment.to_environment_model()
| YamlFileSpec |
python | sqlalchemy__sqlalchemy | test/orm/test_query.py | {
"start": 182186,
"end": 184142
} | class ____(_fixtures.FixtureTest):
run_inserts = "once"
run_setup_mappers = "once"
run_deletes = None
@classmethod
def setup_mappers(cls):
User = cls.classes.User
users = cls.tables.users
cls.mapper_registry.map_imperatively(User, users)
@classmethod
def fixtures(cls):
rows = [(i, "user %d" % (i)) for i in range(1, 21)]
return dict(users=(("id", "name"),) + tuple(rows))
@testing.combinations(
(0,),
(1,),
(5,),
(20,),
argnames="num_rows",
)
@testing.combinations(
("all",),
("allquery",),
("fetchone",),
("iter",),
("iterquery",),
("iternosavequery",),
argnames="method",
)
@testing.combinations((1,), (10,), (30,), argnames="yield_per")
def test_iter_combinations(self, num_rows, method, yield_per):
User = self.classes.User
s = fixture_session()
if method.endswith("query"):
q = s.query(User).limit(num_rows)
if yield_per is not None:
q = q.yield_per(yield_per)
else:
q = select(User).limit(num_rows)
if yield_per is not None:
q = q.execution_options(yield_per=yield_per)
result = s.execute(q)
if method == "allquery":
rows = q.all()
elif method == "iterquery":
rows = [row for row in q]
elif method == "iternosavequery":
rows = [None for row in q]
elif method == "all":
rows = result.all()
elif method == "fetchone":
rows = []
while True:
row = result.fetchone()
if row is None:
break
else:
rows.append(row)
elif method == "iter":
rows = [r for r in result]
eq_(len(rows), num_rows)
| YieldIterationTest |
python | geekcomputers__Python | venv/Lib/site-packages/pip/_internal/index/collector.py | {
"start": 12788,
"end": 16265
} | class ____:
"""
Responsible for collecting Link objects from all configured locations,
making network requests as needed.
The class's main method is its collect_sources() method.
"""
def __init__(
self,
session: PipSession,
search_scope: SearchScope,
) -> None:
self.search_scope = search_scope
self.session = session
@classmethod
def create(
cls,
session: PipSession,
options: Values,
suppress_no_index: bool = False,
) -> "LinkCollector":
"""
:param session: The Session to use to make requests.
:param suppress_no_index: Whether to ignore the --no-index option
when constructing the SearchScope object.
"""
index_urls = [options.index_url] + options.extra_index_urls
if options.no_index and not suppress_no_index:
logger.debug(
"Ignoring indexes: %s",
",".join(redact_auth_from_url(url) for url in index_urls),
)
index_urls = []
# Make sure find_links is a list before passing to create().
find_links = options.find_links or []
search_scope = SearchScope.create(
find_links=find_links,
index_urls=index_urls,
no_index=options.no_index,
)
link_collector = LinkCollector(
session=session,
search_scope=search_scope,
)
return link_collector
@property
def find_links(self) -> List[str]:
return self.search_scope.find_links
def fetch_response(self, location: Link) -> Optional[IndexContent]:
"""
Fetch an HTML page containing package links.
"""
return _get_index_content(location, session=self.session)
def collect_sources(
self,
project_name: str,
candidates_from_page: CandidatesFromPage,
) -> CollectedSources:
# The OrderedDict calls deduplicate sources by URL.
index_url_sources = collections.OrderedDict(
build_source(
loc,
candidates_from_page=candidates_from_page,
page_validator=self.session.is_secure_origin,
expand_dir=False,
cache_link_parsing=False,
project_name=project_name,
)
for loc in self.search_scope.get_index_urls_locations(project_name)
).values()
find_links_sources = collections.OrderedDict(
build_source(
loc,
candidates_from_page=candidates_from_page,
page_validator=self.session.is_secure_origin,
expand_dir=True,
cache_link_parsing=True,
project_name=project_name,
)
for loc in self.find_links
).values()
if logger.isEnabledFor(logging.DEBUG):
lines = [
f"* {s.link}"
for s in itertools.chain(find_links_sources, index_url_sources)
if s is not None and s.link is not None
]
lines = [
f"{len(lines)} location(s) to search "
f"for versions of {project_name}:"
] + lines
logger.debug("\n".join(lines))
return CollectedSources(
find_links=list(find_links_sources),
index_urls=list(index_url_sources),
)
| LinkCollector |
python | doocs__leetcode | lcof2/剑指 Offer II 068. 查找插入位置/Solution.py | {
"start": 0,
"end": 315
} | class ____:
def searchInsert(self, nums: List[int], target: int) -> int:
left, right = 0, len(nums)
while left < right:
mid = (left + right) >> 1
if nums[mid] >= target:
right = mid
else:
left = mid + 1
return left
| Solution |
python | apache__airflow | providers/databricks/tests/unit/databricks/operators/test_databricks.py | {
"start": 20627,
"end": 42805
} | class ____:
def test_init_with_notebook_task_named_parameters(self):
"""
Test the initializer with the named parameters.
"""
op = DatabricksSubmitRunOperator(
task_id=TASK_ID, new_cluster=NEW_CLUSTER, notebook_task=NOTEBOOK_TASK
)
expected = utils.normalise_json_content(
{"new_cluster": NEW_CLUSTER, "notebook_task": NOTEBOOK_TASK, "run_name": TASK_ID}
)
assert expected == utils.normalise_json_content(op.json)
def test_init_with_spark_python_task_named_parameters(self):
"""
Test the initializer with the named parameters.
"""
op = DatabricksSubmitRunOperator(
task_id=TASK_ID, new_cluster=NEW_CLUSTER, spark_python_task=SPARK_PYTHON_TASK
)
expected = utils.normalise_json_content(
{"new_cluster": NEW_CLUSTER, "spark_python_task": SPARK_PYTHON_TASK, "run_name": TASK_ID}
)
assert expected == utils.normalise_json_content(op.json)
def test_init_with_pipeline_name_task_named_parameters(self):
"""
Test the initializer with the named parameters.
"""
op = DatabricksSubmitRunOperator(task_id=TASK_ID, pipeline_task=PIPELINE_NAME_TASK)
expected = utils.normalise_json_content({"pipeline_task": PIPELINE_NAME_TASK, "run_name": TASK_ID})
assert expected == utils.normalise_json_content(op.json)
def test_init_with_pipeline_id_task_named_parameters(self):
"""
Test the initializer with the named parameters.
"""
op = DatabricksSubmitRunOperator(task_id=TASK_ID, pipeline_task=PIPELINE_ID_TASK)
expected = utils.normalise_json_content({"pipeline_task": PIPELINE_ID_TASK, "run_name": TASK_ID})
assert expected == utils.normalise_json_content(op.json)
def test_init_with_spark_submit_task_named_parameters(self):
"""
Test the initializer with the named parameters.
"""
op = DatabricksSubmitRunOperator(
task_id=TASK_ID, new_cluster=NEW_CLUSTER, spark_submit_task=SPARK_SUBMIT_TASK
)
expected = utils.normalise_json_content(
{"new_cluster": NEW_CLUSTER, "spark_submit_task": SPARK_SUBMIT_TASK, "run_name": TASK_ID}
)
assert expected == utils.normalise_json_content(op.json)
def test_init_with_dbt_task_named_parameters(self):
"""
Test the initializer with the named parameters.
"""
git_source = {
"git_url": "https://github.com/dbt-labs/jaffle_shop",
"git_provider": "github",
"git_branch": "main",
}
op = DatabricksSubmitRunOperator(
task_id=TASK_ID, new_cluster=NEW_CLUSTER, dbt_task=DBT_TASK, git_source=git_source
)
expected = utils.normalise_json_content(
{"new_cluster": NEW_CLUSTER, "dbt_task": DBT_TASK, "git_source": git_source, "run_name": TASK_ID}
)
assert expected == utils.normalise_json_content(op.json)
def test_init_with_dbt_task_mixed_parameters(self):
"""
Test the initializer with mixed parameters.
"""
git_source = {
"git_url": "https://github.com/dbt-labs/jaffle_shop",
"git_provider": "github",
"git_branch": "main",
}
json = {"git_source": git_source}
op = DatabricksSubmitRunOperator(
task_id=TASK_ID, new_cluster=NEW_CLUSTER, dbt_task=DBT_TASK, json=json
)
expected = utils.normalise_json_content(
{"new_cluster": NEW_CLUSTER, "dbt_task": DBT_TASK, "git_source": git_source, "run_name": TASK_ID}
)
assert expected == utils.normalise_json_content(op.json)
def test_init_with_dbt_task_without_git_source_raises_error(self):
"""
Test the initializer without the necessary git_source for dbt_task raises error.
"""
exception_message = "git_source is required for dbt_task"
with pytest.raises(AirflowException, match=exception_message):
DatabricksSubmitRunOperator(task_id=TASK_ID, new_cluster=NEW_CLUSTER, dbt_task=DBT_TASK)
def test_init_with_dbt_task_json_without_git_source_raises_error(self):
"""
Test the initializer without the necessary git_source for dbt_task raises error.
"""
json = {"dbt_task": DBT_TASK, "new_cluster": NEW_CLUSTER}
exception_message = "git_source is required for dbt_task"
with pytest.raises(AirflowException, match=exception_message):
DatabricksSubmitRunOperator(task_id=TASK_ID, json=json)
def test_init_with_json(self):
"""
Test the initializer with json data.
"""
json = {"new_cluster": NEW_CLUSTER, "notebook_task": NOTEBOOK_TASK}
op = DatabricksSubmitRunOperator(task_id=TASK_ID, json=json)
expected = utils.normalise_json_content(
{"new_cluster": NEW_CLUSTER, "notebook_task": NOTEBOOK_TASK, "run_name": TASK_ID}
)
assert expected == utils.normalise_json_content(op.json)
def test_init_with_tasks(self):
tasks = [{"task_key": 1, "new_cluster": NEW_CLUSTER, "notebook_task": NOTEBOOK_TASK}]
op = DatabricksSubmitRunOperator(task_id=TASK_ID, tasks=tasks)
expected = utils.normalise_json_content({"run_name": TASK_ID, "tasks": tasks})
assert expected == utils.normalise_json_content(op.json)
def test_init_with_specified_run_name(self):
"""
Test the initializer with a specified run_name.
"""
json = {"new_cluster": NEW_CLUSTER, "notebook_task": NOTEBOOK_TASK, "run_name": RUN_NAME}
op = DatabricksSubmitRunOperator(task_id=TASK_ID, json=json)
expected = utils.normalise_json_content(
{"new_cluster": NEW_CLUSTER, "notebook_task": NOTEBOOK_TASK, "run_name": RUN_NAME}
)
assert expected == utils.normalise_json_content(op.json)
def test_pipeline_task(self):
"""
Test the initializer with a pipeline task.
"""
pipeline_task = {"pipeline_id": "test-dlt"}
json = {"new_cluster": NEW_CLUSTER, "run_name": RUN_NAME, "pipeline_task": pipeline_task}
op = DatabricksSubmitRunOperator(task_id=TASK_ID, json=json)
expected = utils.normalise_json_content(
{"new_cluster": NEW_CLUSTER, "pipeline_task": pipeline_task, "run_name": RUN_NAME}
)
assert expected == utils.normalise_json_content(op.json)
def test_init_with_merging(self):
"""
Test the initializer when json and other named parameters are both
provided. The named parameters should override top level keys in the
json dict.
"""
override_new_cluster = {"workers": 999}
json = {
"new_cluster": NEW_CLUSTER,
"notebook_task": NOTEBOOK_TASK,
}
op = DatabricksSubmitRunOperator(task_id=TASK_ID, json=json, new_cluster=override_new_cluster)
expected = utils.normalise_json_content(
{
"new_cluster": override_new_cluster,
"notebook_task": NOTEBOOK_TASK,
"run_name": TASK_ID,
}
)
assert expected == utils.normalise_json_content(op.json)
def test_init_with_templating(self):
json = {
"new_cluster": NEW_CLUSTER,
"notebook_task": TEMPLATED_NOTEBOOK_TASK,
}
dag = DAG("test", schedule=None, start_date=datetime.now())
op = DatabricksSubmitRunOperator(dag=dag, task_id=TASK_ID, json=json)
op.render_template_fields(context={"ds": DATE})
expected = utils.normalise_json_content(
{
"new_cluster": NEW_CLUSTER,
"notebook_task": RENDERED_TEMPLATED_NOTEBOOK_TASK,
"run_name": TASK_ID,
}
)
assert expected == utils.normalise_json_content(op.json)
def test_init_with_git_source(self):
json = {"new_cluster": NEW_CLUSTER, "notebook_task": NOTEBOOK_TASK, "run_name": RUN_NAME}
git_source = {
"git_url": "https://github.com/apache/airflow",
"git_provider": "github",
"git_branch": "main",
}
op = DatabricksSubmitRunOperator(task_id=TASK_ID, git_source=git_source, json=json)
expected = utils.normalise_json_content(
{
"new_cluster": NEW_CLUSTER,
"notebook_task": NOTEBOOK_TASK,
"run_name": RUN_NAME,
"git_source": git_source,
}
)
assert expected == utils.normalise_json_content(op.json)
def test_init_with_bad_type(self):
json = {"test": datetime.now()}
op = DatabricksSubmitRunOperator(task_id=TASK_ID, json=json)
# Looks a bit weird since we have to escape regex reserved symbols.
exception_message = (
r"Type \<(type|class) \'datetime.datetime\'\> used "
r"for parameter json\[test\] is not a number or a string"
)
with pytest.raises(AirflowException, match=exception_message):
utils.normalise_json_content(op.json)
@mock.patch("airflow.providers.databricks.operators.databricks.DatabricksHook")
def test_exec_success(self, db_mock_class):
"""
Test the execute function in case where the run is successful.
"""
run = {
"new_cluster": NEW_CLUSTER,
"notebook_task": NOTEBOOK_TASK,
}
op = DatabricksSubmitRunOperator(task_id=TASK_ID, json=run)
db_mock = db_mock_class.return_value
db_mock.submit_run.return_value = RUN_ID
db_mock.get_run = make_run_with_state_mock("TERMINATED", "SUCCESS")
op.execute(None)
expected = utils.normalise_json_content(
{"new_cluster": NEW_CLUSTER, "notebook_task": NOTEBOOK_TASK, "run_name": TASK_ID}
)
db_mock_class.assert_called_once_with(
DEFAULT_CONN_ID,
retry_limit=op.databricks_retry_limit,
retry_delay=op.databricks_retry_delay,
retry_args=None,
caller="DatabricksSubmitRunOperator",
)
db_mock.submit_run.assert_called_once_with(expected)
db_mock.get_run_page_url.assert_called_once_with(RUN_ID)
db_mock.get_run.assert_called_once_with(RUN_ID)
assert op.run_id == RUN_ID
@mock.patch("airflow.providers.databricks.operators.databricks.DatabricksHook")
def test_exec_pipeline_name(self, db_mock_class):
"""
Test the execute function when provided a pipeline name.
"""
run = {"pipeline_task": {"pipeline_name": "This is a test pipeline"}}
op = DatabricksSubmitRunOperator(task_id=TASK_ID, json=run)
db_mock = db_mock_class.return_value
db_mock.find_pipeline_id_by_name.return_value = PIPELINE_ID_TASK["pipeline_id"]
db_mock.submit_run.return_value = RUN_ID
db_mock.get_run = make_run_with_state_mock("TERMINATED", "SUCCESS")
op.execute(None)
expected = utils.normalise_json_content({"pipeline_task": PIPELINE_ID_TASK, "run_name": TASK_ID})
db_mock_class.assert_called_once_with(
DEFAULT_CONN_ID,
retry_limit=op.databricks_retry_limit,
retry_delay=op.databricks_retry_delay,
retry_args=None,
caller="DatabricksSubmitRunOperator",
)
db_mock.find_pipeline_id_by_name.assert_called_once_with("This is a test pipeline")
db_mock.submit_run.assert_called_once_with(expected)
db_mock.get_run_page_url.assert_called_once_with(RUN_ID)
db_mock.get_run.assert_called_once_with(RUN_ID)
assert op.run_id == RUN_ID
@mock.patch("airflow.providers.databricks.operators.databricks.DatabricksHook")
def test_exec_failure(self, db_mock_class):
"""
Test the execute function in case where the run failed.
"""
run = {
"new_cluster": NEW_CLUSTER,
"notebook_task": NOTEBOOK_TASK,
}
op = DatabricksSubmitRunOperator(task_id=TASK_ID, json=run)
db_mock = db_mock_class.return_value
db_mock.submit_run.return_value = RUN_ID
db_mock.get_run = make_run_with_state_mock("TERMINATED", "FAILED")
with pytest.raises(AirflowException):
op.execute(None)
expected = utils.normalise_json_content(
{
"new_cluster": NEW_CLUSTER,
"notebook_task": NOTEBOOK_TASK,
"run_name": TASK_ID,
}
)
db_mock_class.assert_called_once_with(
DEFAULT_CONN_ID,
retry_limit=op.databricks_retry_limit,
retry_delay=op.databricks_retry_delay,
retry_args=None,
caller="DatabricksSubmitRunOperator",
)
db_mock.submit_run.assert_called_once_with(expected)
db_mock.get_run_page_url.assert_called_once_with(RUN_ID)
db_mock.get_run.assert_called_once_with(RUN_ID)
assert op.run_id == RUN_ID
@mock.patch("airflow.providers.databricks.operators.databricks.DatabricksHook")
def test_on_kill(self, db_mock_class):
run = {
"new_cluster": NEW_CLUSTER,
"notebook_task": NOTEBOOK_TASK,
}
op = DatabricksSubmitRunOperator(task_id=TASK_ID, json=run)
db_mock = db_mock_class.return_value
op.run_id = RUN_ID
op.on_kill()
db_mock.cancel_run.assert_called_once_with(RUN_ID)
@mock.patch("airflow.providers.databricks.operators.databricks.DatabricksHook")
def test_wait_for_termination(self, db_mock_class):
run = {
"new_cluster": NEW_CLUSTER,
"notebook_task": NOTEBOOK_TASK,
}
op = DatabricksSubmitRunOperator(task_id=TASK_ID, json=run)
db_mock = db_mock_class.return_value
db_mock.submit_run.return_value = RUN_ID
db_mock.get_run = make_run_with_state_mock("TERMINATED", "SUCCESS")
assert op.wait_for_termination
op.execute(None)
expected = utils.normalise_json_content(
{"new_cluster": NEW_CLUSTER, "notebook_task": NOTEBOOK_TASK, "run_name": TASK_ID}
)
db_mock_class.assert_called_once_with(
DEFAULT_CONN_ID,
retry_limit=op.databricks_retry_limit,
retry_delay=op.databricks_retry_delay,
retry_args=None,
caller="DatabricksSubmitRunOperator",
)
db_mock.submit_run.assert_called_once_with(expected)
db_mock.get_run_page_url.assert_called_once_with(RUN_ID)
db_mock.get_run.assert_called_once_with(RUN_ID)
@mock.patch("airflow.providers.databricks.operators.databricks.DatabricksHook")
def test_no_wait_for_termination(self, db_mock_class):
run = {
"new_cluster": NEW_CLUSTER,
"notebook_task": NOTEBOOK_TASK,
}
op = DatabricksSubmitRunOperator(task_id=TASK_ID, wait_for_termination=False, json=run)
db_mock = db_mock_class.return_value
db_mock.submit_run.return_value = RUN_ID
assert not op.wait_for_termination
op.execute(None)
expected = utils.normalise_json_content(
{"new_cluster": NEW_CLUSTER, "notebook_task": NOTEBOOK_TASK, "run_name": TASK_ID}
)
db_mock_class.assert_called_once_with(
DEFAULT_CONN_ID,
retry_limit=op.databricks_retry_limit,
retry_delay=op.databricks_retry_delay,
retry_args=None,
caller="DatabricksSubmitRunOperator",
)
db_mock.submit_run.assert_called_once_with(expected)
db_mock.get_run_page_url.assert_called_once_with(RUN_ID)
db_mock.get_run.assert_not_called()
@mock.patch("airflow.providers.databricks.operators.databricks.DatabricksHook")
def test_execute_task_deferred(self, db_mock_class):
"""
Test the execute function in case where the run is successful.
"""
run = {
"new_cluster": NEW_CLUSTER,
"notebook_task": NOTEBOOK_TASK,
}
op = DatabricksSubmitRunOperator(deferrable=True, task_id=TASK_ID, json=run)
db_mock = db_mock_class.return_value
db_mock.submit_run.return_value = RUN_ID
db_mock.get_run = make_run_with_state_mock("RUNNING", "RUNNING")
with pytest.raises(TaskDeferred) as exc:
op.execute(None)
assert isinstance(exc.value.trigger, DatabricksExecutionTrigger)
assert exc.value.method_name == "execute_complete"
expected = utils.normalise_json_content(
{"new_cluster": NEW_CLUSTER, "notebook_task": NOTEBOOK_TASK, "run_name": TASK_ID}
)
db_mock_class.assert_called_once_with(
DEFAULT_CONN_ID,
retry_limit=op.databricks_retry_limit,
retry_delay=op.databricks_retry_delay,
retry_args=None,
caller="DatabricksSubmitRunOperator",
)
db_mock.submit_run.assert_called_once_with(expected)
db_mock.get_run_page_url.assert_called_once_with(RUN_ID)
assert op.run_id == RUN_ID
def test_execute_complete_success(self):
"""
Test `execute_complete` function in case the Trigger has returned a successful completion event.
"""
run = {
"new_cluster": NEW_CLUSTER,
"notebook_task": NOTEBOOK_TASK,
}
event = {
"run_id": RUN_ID,
"run_page_url": RUN_PAGE_URL,
"run_state": RunState("TERMINATED", "SUCCESS", "").to_json(),
"errors": [],
}
op = DatabricksSubmitRunOperator(deferrable=True, task_id=TASK_ID, json=run)
assert op.execute_complete(context=None, event=event) is None
@mock.patch("airflow.providers.databricks.operators.databricks.DatabricksHook")
def test_execute_complete_failure(self, db_mock_class):
"""
Test `execute_complete` function in case the Trigger has returned a failure completion event.
"""
run_state_failed = RunState("TERMINATED", "FAILED", "")
run = {
"new_cluster": NEW_CLUSTER,
"notebook_task": NOTEBOOK_TASK,
}
event = {
"run_id": RUN_ID,
"run_page_url": RUN_PAGE_URL,
"run_state": run_state_failed.to_json(),
"repair_run": False,
"errors": [],
}
op = DatabricksSubmitRunOperator(deferrable=True, task_id=TASK_ID, json=run)
with pytest.raises(AirflowException):
op.execute_complete(context=None, event=event)
db_mock = db_mock_class.return_value
db_mock.submit_run.return_value = RUN_ID
db_mock.get_run = make_run_with_state_mock("TERMINATED", "FAILED")
with pytest.raises(AirflowException, match=f"Job run failed with terminal state: {run_state_failed}"):
op.execute_complete(context=None, event=event)
def test_execute_complete_incorrect_event_validation_failure(self):
event = {"event_id": "no such column"}
op = DatabricksSubmitRunOperator(deferrable=True, task_id=TASK_ID)
with pytest.raises(AirflowException):
op.execute_complete(context=None, event=event)
@mock.patch("airflow.providers.databricks.operators.databricks.DatabricksHook")
@mock.patch("airflow.providers.databricks.operators.databricks.DatabricksSubmitRunOperator.defer")
def test_databricks_submit_run_deferrable_operator_failed_before_defer(self, mock_defer, db_mock_class):
"""Asserts that a task is not deferred when its failed"""
run = {
"new_cluster": NEW_CLUSTER,
"notebook_task": NOTEBOOK_TASK,
}
op = DatabricksSubmitRunOperator(deferrable=True, task_id=TASK_ID, json=run)
db_mock = db_mock_class.return_value
db_mock.submit_run.return_value = RUN_ID
db_mock.get_run = make_run_with_state_mock("TERMINATED", "FAILED")
op.execute(None)
expected = utils.normalise_json_content(
{"new_cluster": NEW_CLUSTER, "notebook_task": NOTEBOOK_TASK, "run_name": TASK_ID}
)
db_mock_class.assert_called_once_with(
DEFAULT_CONN_ID,
retry_limit=op.databricks_retry_limit,
retry_delay=op.databricks_retry_delay,
retry_args=None,
caller="DatabricksSubmitRunOperator",
)
db_mock.submit_run.assert_called_once_with(expected)
db_mock.get_run_page_url.assert_called_once_with(RUN_ID)
assert op.run_id == RUN_ID
assert not mock_defer.called
@mock.patch("airflow.providers.databricks.operators.databricks.DatabricksHook")
@mock.patch("airflow.providers.databricks.operators.databricks.DatabricksSubmitRunOperator.defer")
def test_databricks_submit_run_deferrable_operator_success_before_defer(self, mock_defer, db_mock_class):
"""Asserts that a task is not deferred when it succeeds"""
run = {
"new_cluster": NEW_CLUSTER,
"notebook_task": NOTEBOOK_TASK,
}
op = DatabricksSubmitRunOperator(deferrable=True, task_id=TASK_ID, json=run)
db_mock = db_mock_class.return_value
db_mock.submit_run.return_value = RUN_ID
db_mock.get_run = make_run_with_state_mock("TERMINATED", "SUCCESS")
op.execute(None)
expected = utils.normalise_json_content(
{"new_cluster": NEW_CLUSTER, "notebook_task": NOTEBOOK_TASK, "run_name": TASK_ID}
)
db_mock_class.assert_called_once_with(
DEFAULT_CONN_ID,
retry_limit=op.databricks_retry_limit,
retry_delay=op.databricks_retry_delay,
retry_args=None,
caller="DatabricksSubmitRunOperator",
)
db_mock.submit_run.assert_called_once_with(expected)
db_mock.get_run_page_url.assert_called_once_with(RUN_ID)
assert op.run_id == RUN_ID
assert not mock_defer.called
| TestDatabricksSubmitRunOperator |
python | geekcomputers__Python | venv/Lib/site-packages/pip/_vendor/urllib3/connectionpool.py | {
"start": 2909,
"end": 33074
} | class ____(ConnectionPool, RequestMethods):
"""
Thread-safe connection pool for one host.
:param host:
Host used for this HTTP Connection (e.g. "localhost"), passed into
:class:`http.client.HTTPConnection`.
:param port:
Port used for this HTTP Connection (None is equivalent to 80), passed
into :class:`http.client.HTTPConnection`.
:param strict:
Causes BadStatusLine to be raised if the status line can't be parsed
as a valid HTTP/1.0 or 1.1 status line, passed into
:class:`http.client.HTTPConnection`.
.. note::
Only works in Python 2. This parameter is ignored in Python 3.
:param timeout:
Socket timeout in seconds for each individual connection. This can
be a float or integer, which sets the timeout for the HTTP request,
or an instance of :class:`urllib3.util.Timeout` which gives you more
fine-grained control over request timeouts. After the constructor has
been parsed, this is always a `urllib3.util.Timeout` object.
:param maxsize:
Number of connections to save that can be reused. More than 1 is useful
in multithreaded situations. If ``block`` is set to False, more
connections will be created but they will not be saved once they've
been used.
:param block:
If set to True, no more than ``maxsize`` connections will be used at
a time. When no free connections are available, the call will block
until a connection has been released. This is a useful side effect for
particular multithreaded situations where one does not want to use more
than maxsize connections per host to prevent flooding.
:param headers:
Headers to include with all requests, unless other headers are given
explicitly.
:param retries:
Retry configuration to use by default with requests in this pool.
:param _proxy:
Parsed proxy URL, should not be used directly, instead, see
:class:`urllib3.ProxyManager`
:param _proxy_headers:
A dictionary with proxy headers, should not be used directly,
instead, see :class:`urllib3.ProxyManager`
:param \\**conn_kw:
Additional parameters are used to create fresh :class:`urllib3.connection.HTTPConnection`,
:class:`urllib3.connection.HTTPSConnection` instances.
"""
scheme = "http"
ConnectionCls = HTTPConnection
ResponseCls = HTTPResponse
def __init__(
self,
host,
port=None,
strict=False,
timeout=Timeout.DEFAULT_TIMEOUT,
maxsize=1,
block=False,
headers=None,
retries=None,
_proxy=None,
_proxy_headers=None,
_proxy_config=None,
**conn_kw
):
ConnectionPool.__init__(self, host, port)
RequestMethods.__init__(self, headers)
self.strict = strict
if not isinstance(timeout, Timeout):
timeout = Timeout.from_float(timeout)
if retries is None:
retries = Retry.DEFAULT
self.timeout = timeout
self.retries = retries
self.pool = self.QueueCls(maxsize)
self.block = block
self.proxy = _proxy
self.proxy_headers = _proxy_headers or {}
self.proxy_config = _proxy_config
# Fill the queue up so that doing get() on it will block properly
for _ in xrange(maxsize):
self.pool.put(None)
# These are mostly for testing and debugging purposes.
self.num_connections = 0
self.num_requests = 0
self.conn_kw = conn_kw
if self.proxy:
# Enable Nagle's algorithm for proxies, to avoid packet fragmentation.
# We cannot know if the user has added default socket options, so we cannot replace the
# list.
self.conn_kw.setdefault("socket_options", [])
self.conn_kw["proxy"] = self.proxy
self.conn_kw["proxy_config"] = self.proxy_config
# Do not pass 'self' as callback to 'finalize'.
# Then the 'finalize' would keep an endless living (leak) to self.
# By just passing a reference to the pool allows the garbage collector
# to free self if nobody else has a reference to it.
pool = self.pool
# Close all the HTTPConnections in the pool before the
# HTTPConnectionPool object is garbage collected.
weakref_finalize(self, _close_pool_connections, pool)
def _new_conn(self):
"""
Return a fresh :class:`HTTPConnection`.
"""
self.num_connections += 1
log.debug(
"Starting new HTTP connection (%d): %s:%s",
self.num_connections,
self.host,
self.port or "80",
)
conn = self.ConnectionCls(
host=self.host,
port=self.port,
timeout=self.timeout.connect_timeout,
strict=self.strict,
**self.conn_kw
)
return conn
def _get_conn(self, timeout=None):
"""
Get a connection. Will return a pooled connection if one is available.
If no connections are available and :prop:`.block` is ``False``, then a
fresh connection is returned.
:param timeout:
Seconds to wait before giving up and raising
:class:`urllib3.exceptions.EmptyPoolError` if the pool is empty and
:prop:`.block` is ``True``.
"""
conn = None
try:
conn = self.pool.get(block=self.block, timeout=timeout)
except AttributeError: # self.pool is None
raise ClosedPoolError(self, "Pool is closed.")
except queue.Empty:
if self.block:
raise EmptyPoolError(
self,
"Pool reached maximum size and no more connections are allowed.",
)
pass # Oh well, we'll create a new connection then
# If this is a persistent connection, check if it got disconnected
if conn and is_connection_dropped(conn):
log.debug("Resetting dropped connection: %s", self.host)
conn.close()
if getattr(conn, "auto_open", 1) == 0:
# This is a proxied connection that has been mutated by
# http.client._tunnel() and cannot be reused (since it would
# attempt to bypass the proxy)
conn = None
return conn or self._new_conn()
def _put_conn(self, conn):
"""
Put a connection back into the pool.
:param conn:
Connection object for the current host and port as returned by
:meth:`._new_conn` or :meth:`._get_conn`.
If the pool is already full, the connection is closed and discarded
because we exceeded maxsize. If connections are discarded frequently,
then maxsize should be increased.
If the pool is closed, then the connection will be closed and discarded.
"""
try:
self.pool.put(conn, block=False)
return # Everything is dandy, done.
except AttributeError:
# self.pool is None.
pass
except queue.Full:
# This should never happen if self.block == True
log.warning(
"Connection pool is full, discarding connection: %s. Connection pool size: %s",
self.host,
self.pool.qsize(),
)
# Connection never got put back into the pool, close it.
if conn:
conn.close()
def _validate_conn(self, conn):
"""
Called right before a request is made, after the socket is created.
"""
pass
def _prepare_proxy(self, conn):
# Nothing to do for HTTP connections.
pass
def _get_timeout(self, timeout):
"""Helper that always returns a :class:`urllib3.util.Timeout`"""
if timeout is _Default:
return self.timeout.clone()
if isinstance(timeout, Timeout):
return timeout.clone()
else:
# User passed us an int/float. This is for backwards compatibility,
# can be removed later
return Timeout.from_float(timeout)
def _raise_timeout(self, err, url, timeout_value):
"""Is the error actually a timeout? Will raise a ReadTimeout or pass"""
if isinstance(err, SocketTimeout):
raise ReadTimeoutError(
self, url, "Read timed out. (read timeout=%s)" % timeout_value
)
# See the above comment about EAGAIN in Python 3. In Python 2 we have
# to specifically catch it and throw the timeout error
if hasattr(err, "errno") and err.errno in _blocking_errnos:
raise ReadTimeoutError(
self, url, "Read timed out. (read timeout=%s)" % timeout_value
)
# Catch possible read timeouts thrown as SSL errors. If not the
# case, rethrow the original. We need to do this because of:
# http://bugs.python.org/issue10272
if "timed out" in str(err) or "did not complete (read)" in str(
err
): # Python < 2.7.4
raise ReadTimeoutError(
self, url, "Read timed out. (read timeout=%s)" % timeout_value
)
def _make_request(
self, conn, method, url, timeout=_Default, chunked=False, **httplib_request_kw
):
"""
Perform a request on a given urllib connection object taken from our
pool.
:param conn:
a connection from one of our connection pools
:param timeout:
Socket timeout in seconds for the request. This can be a
float or integer, which will set the same timeout value for
the socket connect and the socket read, or an instance of
:class:`urllib3.util.Timeout`, which gives you more fine-grained
control over your timeouts.
"""
self.num_requests += 1
timeout_obj = self._get_timeout(timeout)
timeout_obj.start_connect()
conn.timeout = Timeout.resolve_default_timeout(timeout_obj.connect_timeout)
# Trigger any extra validation we need to do.
try:
self._validate_conn(conn)
except (SocketTimeout, BaseSSLError) as e:
# Py2 raises this as a BaseSSLError, Py3 raises it as socket timeout.
self._raise_timeout(err=e, url=url, timeout_value=conn.timeout)
raise
# conn.request() calls http.client.*.request, not the method in
# urllib3.request. It also calls makefile (recv) on the socket.
try:
if chunked:
conn.request_chunked(method, url, **httplib_request_kw)
else:
conn.request(method, url, **httplib_request_kw)
# We are swallowing BrokenPipeError (errno.EPIPE) since the server is
# legitimately able to close the connection after sending a valid response.
# With this behaviour, the received response is still readable.
except BrokenPipeError:
# Python 3
pass
except IOError as e:
# Python 2 and macOS/Linux
# EPIPE and ESHUTDOWN are BrokenPipeError on Python 2, and EPROTOTYPE is needed on macOS
# https://erickt.github.io/blog/2014/11/19/adventures-in-debugging-a-potential-osx-kernel-bug/
if e.errno not in {
errno.EPIPE,
errno.ESHUTDOWN,
errno.EPROTOTYPE,
}:
raise
# Reset the timeout for the recv() on the socket
read_timeout = timeout_obj.read_timeout
# App Engine doesn't have a sock attr
if getattr(conn, "sock", None):
# In Python 3 socket.py will catch EAGAIN and return None when you
# try and read into the file pointer created by http.client, which
# instead raises a BadStatusLine exception. Instead of catching
# the exception and assuming all BadStatusLine exceptions are read
# timeouts, check for a zero timeout before making the request.
if read_timeout == 0:
raise ReadTimeoutError(
self, url, "Read timed out. (read timeout=%s)" % read_timeout
)
if read_timeout is Timeout.DEFAULT_TIMEOUT:
conn.sock.settimeout(socket.getdefaulttimeout())
else: # None or a value
conn.sock.settimeout(read_timeout)
# Receive the response from the server
try:
try:
# Python 2.7, use buffering of HTTP responses
httplib_response = conn.getresponse(buffering=True)
except TypeError:
# Python 3
try:
httplib_response = conn.getresponse()
except BaseException as e:
# Remove the TypeError from the exception chain in
# Python 3 (including for exceptions like SystemExit).
# Otherwise it looks like a bug in the code.
six.raise_from(e, None)
except (SocketTimeout, BaseSSLError, SocketError) as e:
self._raise_timeout(err=e, url=url, timeout_value=read_timeout)
raise
# AppEngine doesn't have a version attr.
http_version = getattr(conn, "_http_vsn_str", "HTTP/?")
log.debug(
'%s://%s:%s "%s %s %s" %s %s',
self.scheme,
self.host,
self.port,
method,
url,
http_version,
httplib_response.status,
httplib_response.length,
)
try:
assert_header_parsing(httplib_response.msg)
except (HeaderParsingError, TypeError) as hpe: # Platform-specific: Python 3
log.warning(
"Failed to parse headers (url=%s): %s",
self._absolute_url(url),
hpe,
exc_info=True,
)
return httplib_response
def _absolute_url(self, path):
return Url(scheme=self.scheme, host=self.host, port=self.port, path=path).url
def close(self):
"""
Close all pooled connections and disable the pool.
"""
if self.pool is None:
return
# Disable access to the pool
old_pool, self.pool = self.pool, None
# Close all the HTTPConnections in the pool.
_close_pool_connections(old_pool)
def is_same_host(self, url):
"""
Check if the given ``url`` is a member of the same host as this
connection pool.
"""
if url.startswith("/"):
return True
# TODO: Add optional support for socket.gethostbyname checking.
scheme, host, port = get_host(url)
if host is not None:
host = _normalize_host(host, scheme=scheme)
# Use explicit default port for comparison when none is given
if self.port and not port:
port = port_by_scheme.get(scheme)
elif not self.port and port == port_by_scheme.get(scheme):
port = None
return (scheme, host, port) == (self.scheme, self.host, self.port)
def urlopen(
self,
method,
url,
body=None,
headers=None,
retries=None,
redirect=True,
assert_same_host=True,
timeout=_Default,
pool_timeout=None,
release_conn=None,
chunked=False,
body_pos=None,
**response_kw
):
"""
Get a connection from the pool and perform an HTTP request. This is the
lowest level call for making a request, so you'll need to specify all
the raw details.
.. note::
More commonly, it's appropriate to use a convenience method provided
by :class:`.RequestMethods`, such as :meth:`request`.
.. note::
`release_conn` will only behave as expected if
`preload_content=False` because we want to make
`preload_content=False` the default behaviour someday soon without
breaking backwards compatibility.
:param method:
HTTP request method (such as GET, POST, PUT, etc.)
:param url:
The URL to perform the request on.
:param body:
Data to send in the request body, either :class:`str`, :class:`bytes`,
an iterable of :class:`str`/:class:`bytes`, or a file-like object.
:param headers:
Dictionary of custom headers to send, such as User-Agent,
If-None-Match, etc. If None, pool headers are used. If provided,
these headers completely replace any pool-specific headers.
:param retries:
Configure the number of retries to allow before raising a
:class:`~urllib3.exceptions.MaxRetryError` exception.
Pass ``None`` to retry until you receive a response. Pass a
:class:`~urllib3.util.retry.Retry` object for fine-grained control
over different types of retries.
Pass an integer number to retry connection errors that many times,
but no other types of errors. Pass zero to never retry.
If ``False``, then retries are disabled and any exception is raised
immediately. Also, instead of raising a MaxRetryError on redirects,
the redirect response will be returned.
:type retries: :class:`~urllib3.util.retry.Retry`, False, or an int.
:param redirect:
If True, automatically handle redirects (status codes 301, 302,
303, 307, 308). Each redirect counts as a retry. Disabling retries
will disable redirect, too.
:param assert_same_host:
If ``True``, will make sure that the host of the pool requests is
consistent else will raise HostChangedError. When ``False``, you can
use the pool on an HTTP proxy and request foreign hosts.
:param timeout:
If specified, overrides the default timeout for this one
request. It may be a float (in seconds) or an instance of
:class:`urllib3.util.Timeout`.
:param pool_timeout:
If set and the pool is set to block=True, then this method will
block for ``pool_timeout`` seconds and raise EmptyPoolError if no
connection is available within the time period.
:param release_conn:
If False, then the urlopen call will not release the connection
back into the pool once a response is received (but will release if
you read the entire contents of the response such as when
`preload_content=True`). This is useful if you're not preloading
the response's content immediately. You will need to call
``r.release_conn()`` on the response ``r`` to return the connection
back into the pool. If None, it takes the value of
``response_kw.get('preload_content', True)``.
:param chunked:
If True, urllib3 will send the body using chunked transfer
encoding. Otherwise, urllib3 will send the body using the standard
content-length form. Defaults to False.
:param int body_pos:
Position to seek to in file-like body in the event of a retry or
redirect. Typically this won't need to be set because urllib3 will
auto-populate the value when needed.
:param \\**response_kw:
Additional parameters are passed to
:meth:`urllib3.response.HTTPResponse.from_httplib`
"""
parsed_url = parse_url(url)
destination_scheme = parsed_url.scheme
if headers is None:
headers = self.headers
if not isinstance(retries, Retry):
retries = Retry.from_int(retries, redirect=redirect, default=self.retries)
if release_conn is None:
release_conn = response_kw.get("preload_content", True)
# Check host
if assert_same_host and not self.is_same_host(url):
raise HostChangedError(self, url, retries)
# Ensure that the URL we're connecting to is properly encoded
if url.startswith("/"):
url = six.ensure_str(_encode_target(url))
else:
url = six.ensure_str(parsed_url.url)
conn = None
# Track whether `conn` needs to be released before
# returning/raising/recursing. Update this variable if necessary, and
# leave `release_conn` constant throughout the function. That way, if
# the function recurses, the original value of `release_conn` will be
# passed down into the recursive call, and its value will be respected.
#
# See issue #651 [1] for details.
#
# [1] <https://github.com/urllib3/urllib3/issues/651>
release_this_conn = release_conn
http_tunnel_required = connection_requires_http_tunnel(
self.proxy, self.proxy_config, destination_scheme
)
# Merge the proxy headers. Only done when not using HTTP CONNECT. We
# have to copy the headers dict so we can safely change it without those
# changes being reflected in anyone else's copy.
if not http_tunnel_required:
headers = headers.copy()
headers.update(self.proxy_headers)
# Must keep the exception bound to a separate variable or else Python 3
# complains about UnboundLocalError.
err = None
# Keep track of whether we cleanly exited the except block. This
# ensures we do proper cleanup in finally.
clean_exit = False
# Rewind body position, if needed. Record current position
# for future rewinds in the event of a redirect/retry.
body_pos = set_file_position(body, body_pos)
try:
# Request a connection from the queue.
timeout_obj = self._get_timeout(timeout)
conn = self._get_conn(timeout=pool_timeout)
conn.timeout = timeout_obj.connect_timeout
is_new_proxy_conn = self.proxy is not None and not getattr(
conn, "sock", None
)
if is_new_proxy_conn and http_tunnel_required:
self._prepare_proxy(conn)
# Make the request on the httplib connection object.
httplib_response = self._make_request(
conn,
method,
url,
timeout=timeout_obj,
body=body,
headers=headers,
chunked=chunked,
)
# If we're going to release the connection in ``finally:``, then
# the response doesn't need to know about the connection. Otherwise
# it will also try to release it and we'll have a double-release
# mess.
response_conn = conn if not release_conn else None
# Pass method to Response for length checking
response_kw["request_method"] = method
# Import httplib's response into our own wrapper object
response = self.ResponseCls.from_httplib(
httplib_response,
pool=self,
connection=response_conn,
retries=retries,
**response_kw
)
# Everything went great!
clean_exit = True
except EmptyPoolError:
# Didn't get a connection from the pool, no need to clean up
clean_exit = True
release_this_conn = False
raise
except (
TimeoutError,
HTTPException,
SocketError,
ProtocolError,
BaseSSLError,
SSLError,
CertificateError,
) as e:
# Discard the connection for these exceptions. It will be
# replaced during the next _get_conn() call.
clean_exit = False
def _is_ssl_error_message_from_http_proxy(ssl_error):
# We're trying to detect the message 'WRONG_VERSION_NUMBER' but
# SSLErrors are kinda all over the place when it comes to the message,
# so we try to cover our bases here!
message = " ".join(re.split("[^a-z]", str(ssl_error).lower()))
return (
"wrong version number" in message or "unknown protocol" in message
)
# Try to detect a common user error with proxies which is to
# set an HTTP proxy to be HTTPS when it should be 'http://'
# (ie {'http': 'http://proxy', 'https': 'https://proxy'})
# Instead we add a nice error message and point to a URL.
if (
isinstance(e, BaseSSLError)
and self.proxy
and _is_ssl_error_message_from_http_proxy(e)
and conn.proxy
and conn.proxy.scheme == "https"
):
e = ProxyError(
"Your proxy appears to only use HTTP and not HTTPS, "
"try changing your proxy URL to be HTTP. See: "
"https://urllib3.readthedocs.io/en/1.26.x/advanced-usage.html"
"#https-proxy-error-http-proxy",
SSLError(e),
)
elif isinstance(e, (BaseSSLError, CertificateError)):
e = SSLError(e)
elif isinstance(e, (SocketError, NewConnectionError)) and self.proxy:
e = ProxyError("Cannot connect to proxy.", e)
elif isinstance(e, (SocketError, HTTPException)):
e = ProtocolError("Connection aborted.", e)
retries = retries.increment(
method, url, error=e, _pool=self, _stacktrace=sys.exc_info()[2]
)
retries.sleep()
# Keep track of the error for the retry warning.
err = e
finally:
if not clean_exit:
# We hit some kind of exception, handled or otherwise. We need
# to throw the connection away unless explicitly told not to.
# Close the connection, set the variable to None, and make sure
# we put the None back in the pool to avoid leaking it.
conn = conn and conn.close()
release_this_conn = True
if release_this_conn:
# Put the connection back to be reused. If the connection is
# expired then it will be None, which will get replaced with a
# fresh connection during _get_conn.
self._put_conn(conn)
if not conn:
# Try again
log.warning(
"Retrying (%r) after connection broken by '%r': %s", retries, err, url
)
return self.urlopen(
method,
url,
body,
headers,
retries,
redirect,
assert_same_host,
timeout=timeout,
pool_timeout=pool_timeout,
release_conn=release_conn,
chunked=chunked,
body_pos=body_pos,
**response_kw
)
# Handle redirect?
redirect_location = redirect and response.get_redirect_location()
if redirect_location:
if response.status == 303:
# Change the method according to RFC 9110, Section 15.4.4.
method = "GET"
# And lose the body not to transfer anything sensitive.
body = None
headers = HTTPHeaderDict(headers)._prepare_for_method_change()
try:
retries = retries.increment(method, url, response=response, _pool=self)
except MaxRetryError:
if retries.raise_on_redirect:
response.drain_conn()
raise
return response
response.drain_conn()
retries.sleep_for_retry(response)
log.debug("Redirecting %s -> %s", url, redirect_location)
return self.urlopen(
method,
redirect_location,
body,
headers,
retries=retries,
redirect=redirect,
assert_same_host=assert_same_host,
timeout=timeout,
pool_timeout=pool_timeout,
release_conn=release_conn,
chunked=chunked,
body_pos=body_pos,
**response_kw
)
# Check if we should retry the HTTP response.
has_retry_after = bool(response.headers.get("Retry-After"))
if retries.is_retry(method, response.status, has_retry_after):
try:
retries = retries.increment(method, url, response=response, _pool=self)
except MaxRetryError:
if retries.raise_on_status:
response.drain_conn()
raise
return response
response.drain_conn()
retries.sleep(response)
log.debug("Retry: %s", url)
return self.urlopen(
method,
url,
body,
headers,
retries=retries,
redirect=redirect,
assert_same_host=assert_same_host,
timeout=timeout,
pool_timeout=pool_timeout,
release_conn=release_conn,
chunked=chunked,
body_pos=body_pos,
**response_kw
)
return response
| HTTPConnectionPool |
python | tensorflow__tensorflow | tensorflow/python/framework/registry.py | {
"start": 1123,
"end": 3157
} | class ____(object):
"""Provides a registry for saving objects."""
__slots__ = ["_name", "_registry"]
def __init__(self, name):
"""Creates a new registry."""
self._name = name
self._registry = {}
def register(self, candidate, name=None):
"""Registers a Python object "candidate" for the given "name".
Args:
candidate: The candidate object to add to the registry.
name: An optional string specifying the registry key for the candidate.
If None, candidate.__name__ will be used.
Raises:
KeyError: If same name is used twice.
"""
if not name:
name = candidate.__name__
if name in self._registry:
frame = self._registry[name][_LOCATION_TAG]
raise KeyError(
"Registering two %s with name '%s'! "
"(Previous registration was in %s %s:%d)" %
(self._name, name, frame.name, frame.filename, frame.lineno))
logging.vlog(1, "Registering %s (%s) in %s.", name, candidate, self._name)
# stack trace is [this_function, Register(), user_function,...]
# so the user function is #2.
stack = traceback.extract_stack(limit=3)
stack_index = min(2, len(stack) - 1)
if stack_index >= 0:
location_tag = stack[stack_index]
else:
location_tag = ("UNKNOWN", "UNKNOWN", "UNKNOWN", "UNKNOWN", "UNKNOWN")
self._registry[name] = {_TYPE_TAG: candidate, _LOCATION_TAG: location_tag}
def list(self):
"""Lists registered items.
Returns:
A list of names of registered objects.
"""
return self._registry.keys()
def lookup(self, name):
"""Looks up "name".
Args:
name: a string specifying the registry key for the candidate.
Returns:
Registered object if found
Raises:
LookupError: if "name" has not been registered.
"""
name = compat.as_str(name)
if name in self._registry:
return self._registry[name][_TYPE_TAG]
else:
raise LookupError(
"%s registry has no entry for: %s" % (self._name, name))
| Registry |
python | pyparsing__pyparsing | examples/adventureEngine.py | {
"start": 6265,
"end": 6591
} | class ____(Command):
def __init__(self, quals):
super().__init__("INV", "taking inventory")
@staticmethod
def help_description():
return "INVENTORY or INV or I - lists what items you have"
def _do_command(self, player):
print(f"You have {enumerate_items(player.inv)}.")
| InventoryCommand |
python | allegroai__clearml | clearml/backend_api/services/v2_23/events.py | {
"start": 148950,
"end": 153287
} | class ____(Request):
"""
Used to compare scalar stats histogram of multiple tasks
:param tasks: List of task Task IDs. Maximum amount of tasks is 100
:type tasks: Sequence[str]
:param samples: The amount of histogram points to return. Optional, the default
value is 6000
:type samples: int
:param key: Histogram x axis to use: iter - iteration number iso_time - event
time as ISO formatted string timestamp - event timestamp as milliseconds since
epoch
:type key: ScalarKeyEnum
:param model_events: If set then the retrieving model events. Otherwise task
events
:type model_events: bool
"""
_service = "events"
_action = "multi_task_scalar_metrics_iter_histogram"
_version = "2.23"
_schema = {
"definitions": {
"scalar_key_enum": {
"enum": ["iter", "timestamp", "iso_time"],
"type": "string",
}
},
"properties": {
"key": {
"$ref": "#/definitions/scalar_key_enum",
"description": "\n Histogram x axis to use:\n iter - iteration number\n iso_time - event time as ISO formatted string\n timestamp - event timestamp as milliseconds since epoch\n ",
},
"model_events": {
"default": False,
"description": "If set then the retrieving model events. Otherwise task events",
"type": "boolean",
},
"samples": {
"description": "The amount of histogram points to return. Optional, the default value is 6000",
"type": "integer",
},
"tasks": {
"description": "List of task Task IDs. Maximum amount of tasks is 100",
"items": {"description": "Task ID", "type": "string"},
"type": "array",
},
},
"required": ["tasks"],
"type": "object",
}
def __init__(
self,
tasks: List[str],
samples: Optional[int] = None,
key: Any = None,
model_events: Optional[bool] = False,
**kwargs: Any
) -> None:
super(MultiTaskScalarMetricsIterHistogramRequest, self).__init__(**kwargs)
self.tasks = tasks
self.samples = samples
self.key = key
self.model_events = model_events
@schema_property("tasks")
def tasks(self) -> List[str]:
return self._property_tasks
@tasks.setter
def tasks(self, value: List[str]) -> None:
if value is None:
self._property_tasks = None
return
self.assert_isinstance(value, "tasks", (list, tuple))
self.assert_isinstance(value, "tasks", six.string_types, is_array=True)
self._property_tasks = value
@schema_property("samples")
def samples(self) -> Optional[int]:
return self._property_samples
@samples.setter
def samples(self, value: Optional[int]) -> None:
if value is None:
self._property_samples = None
return
if isinstance(value, float) and value.is_integer():
value = int(value)
self.assert_isinstance(value, "samples", six.integer_types)
self._property_samples = value
@schema_property("key")
def key(self) -> Any:
return self._property_key
@key.setter
def key(self, value: Any) -> None:
if value is None:
self._property_key = None
return
if isinstance(value, six.string_types):
try:
value = ScalarKeyEnum(value)
except ValueError:
pass
else:
self.assert_isinstance(value, "key", enum.Enum)
self._property_key = value
@schema_property("model_events")
def model_events(self) -> Optional[bool]:
return self._property_model_events
@model_events.setter
def model_events(self, value: Optional[bool]) -> None:
if value is None:
self._property_model_events = None
return
self.assert_isinstance(value, "model_events", (bool,))
self._property_model_events = value
| MultiTaskScalarMetricsIterHistogramRequest |
python | getsentry__sentry | tests/sentry/seer/autofix/test_autofix_utils.py | {
"start": 7629,
"end": 9453
} | class ____(TestCase):
def test_autofix_state_validate_parses_nested_structures(self):
state_data = {
"run_id": 1,
"request": {
"project_id": 42,
"organization_id": 123,
"issue": {"id": 999, "title": "Something broke"},
"repos": [
{
"provider": "github",
"owner": "getsentry",
"name": "sentry",
"external_id": "123",
}
],
},
"updated_at": "2025-08-25T12:34:56.000Z",
"status": "PROCESSING",
"codebases": {
"123": {
"repo_external_id": "123",
"file_changes": [],
"is_readable": True,
"is_writeable": False,
}
},
"steps": [{"key": "root_cause_analysis", "id": "rca"}],
"coding_agents": {
"agent-1": {
"id": "agent-1",
"status": "completed",
"name": "Autofixer",
"provider": "cursor_background_agent",
"started_at": "2025-08-25T12:00:00.000Z",
"results": [],
}
},
}
state = AutofixState.validate(state_data)
# Check that stuff is parsed
assert state.run_id == 1
assert state.status == AutofixStatus.PROCESSING
codebase = state.codebases["123"]
assert codebase.repo_external_id == "123"
# Top-level coding_agents map is parsed with enum status
assert state.coding_agents["agent-1"].status == CodingAgentStatus.COMPLETED
| TestAutofixStateParsing |
python | kamyu104__LeetCode-Solutions | Python/count-special-subsequences.py | {
"start": 628,
"end": 1284
} | class ____(object):
def numberOfSubsequences(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
def gcd(a, b):
while b:
a, b = b, a%b
return a
cnt = collections.defaultdict(int)
result = 0
for r in xrange(4, len(nums)-2):
q = r-2
for p in xrange((q-2)+1):
g = gcd(nums[p], nums[q])
cnt[nums[p]//g, nums[q]//g] += 1
for s in xrange(r+2, len(nums)):
g = gcd(nums[s], nums[r])
result += cnt[nums[s]//g, nums[r]//g]
return result
| Solution2 |
python | PrefectHQ__prefect | scripts/generate_example_pages.py | {
"start": 797,
"end": 6618
} | class ____(TypedDict, total=False):
title: str
description: str
icon: str
keywords: list[str]
order: int
async def get_examples() -> list[Example]:
examples: list[Example] = []
async for file in _EXAMPLES_DIR.iterdir():
if await file.is_file() and file.suffix == ".py":
text = await file.read_text()
example_frontmatter = extract_front_matter(text)
if not example_frontmatter:
continue
examples.append(
Example(
path=file,
title=example_frontmatter.get("title", ""),
description=example_frontmatter.get("description", ""),
icon=example_frontmatter.get("icon", ""),
keywords=example_frontmatter.get("keywords", []),
order=example_frontmatter.get("order"),
)
)
# Sort examples by order field (if present), then by filename
examples.sort(
key=lambda ex: (
ex.order if ex.order is not None else float("inf"),
ex.path.name,
)
)
return examples
async def convert_example_to_mdx_page(example: Example) -> str:
"""Render a Python code example to Markdown documentation format."""
content = await example.path.read_text()
# Extract frontmatter to check for custom github_url
example_frontmatter = extract_front_matter(content)
lines = _RE_NEWLINE.split(content)
markdown: list[str] = []
code: list[str] = []
for line in lines:
if line == "#" or line.startswith("# "):
if code:
markdown.extend(["```python", *code, "```", ""])
code = []
markdown.append(line[2:])
else:
if markdown and markdown[-1]:
markdown.append("")
if code or line:
code.append(line)
if code:
markdown.extend(["```python", *code, "```", ""])
text = "\n".join(markdown)
if _RE_FRONTMATTER.match(text):
# Strip out frontmatter from text.
if match := _RE_FRONTMATTER.search(text, 4):
# Use custom github_url if provided, otherwise use default
github_url = example_frontmatter.get(
"github_url",
f"{_GITHUB_BASE_URL}{example.path.relative_to(_REPO_ROOT)}",
)
# Use custom link text if this is an external URL
link_text = (
"View full project on GitHub"
if example_frontmatter.get("github_url")
else "View on GitHub"
)
# Using raw HTML for precise placement; most Markdown/MDX renderers will
# preserve the styling while allowing fallback to a plain link if HTML
# is stripped.
github_button = (
f'<a href="{github_url}" target="_blank">{link_text}</a>\n\n'
)
frontmatter = "---\n"
for line in text[: match.end()].split("\n"):
if line.startswith(("title:", "description:", "icon:", "keywords:")):
frontmatter += line + "\n"
frontmatter += "---\n\n"
# Insert the button at the very top of the document.
text = (
frontmatter
+ _AUTOGENERATED_NOTE
+ github_button
+ text[match.end() + 1 :]
)
return text
def extract_front_matter(text: str) -> Frontmatter:
# find the block between the first two "# ---" lines
m = re.search(
r"^(?:# ---\s*\n)(.*?)(?:\n# ---)", text, flags=re.DOTALL | re.MULTILINE
)
if not m:
return {}
# strip leading "# " from each line
fm = "\n".join(line.lstrip("# ").rstrip() for line in m.group(1).splitlines())
return yaml.safe_load(fm)
async def generate_index_page(examples: list[Example]) -> str:
TOP = textwrap.dedent(f"""---
title: Overview
icon: bars
---
{_AUTOGENERATED_NOTE}
Have an example to share? Check out our [contributing guide](/contribute/docs-contribute#contributing-examples) to get started.
<CardGroup cols={{3}}>
""")
BOTTOM = textwrap.dedent("""
</CardGroup>
""")
return (
TOP
+ "\n".join(
f"""
<Card title="{example.title}" icon="{example.icon}" href="/v3/examples/{slugify(example.path.stem)}">
{example.description}
</Card>
"""
for example in examples
)
+ BOTTOM
)
async def update_docs_json(examples: list[Example]) -> None:
docs_json_path = _REPO_ROOT / "docs" / "docs.json"
docs_json = await docs_json_path.read_text()
docs_json = json.loads(docs_json)
tabs = docs_json["navigation"]["tabs"]
for tab in tabs:
if tab["tab"] == "Examples":
tab["pages"] = [
"v3/examples/index",
{
"group": "Examples",
"pages": [
f"v3/examples/{slugify(example.path.stem)}"
for example in examples
],
},
]
break
docs_json["navigation"]["tabs"] = tabs
await docs_json_path.write_text(json.dumps(docs_json, indent=2))
async def main() -> None:
examples = await get_examples()
for example in examples:
text = await convert_example_to_mdx_page(example)
destination_path = _MDX_EXAMPLES_DIR / f"{slugify(example.path.stem)}.mdx"
await destination_path.write_text(text)
index_page = await generate_index_page(examples)
await (_MDX_EXAMPLES_DIR / "index.mdx").write_text(index_page)
await update_docs_json(examples)
if __name__ == "__main__":
anyio.run(main)
| Frontmatter |
python | huggingface__transformers | src/transformers/models/clap/feature_extraction_clap.py | {
"start": 1110,
"end": 18768
} | class ____(SequenceFeatureExtractor):
r"""
Constructs a CLAP feature extractor.
This feature extractor inherits from [`~feature_extraction_sequence_utils.SequenceFeatureExtractor`] which contains
most of the main methods. Users should refer to this superclass for more information regarding those methods.
This class extracts mel-filter bank features from raw speech using a custom numpy implementation of the *Short Time
Fourier Transform* (STFT) which should match pytorch's `torch.stft` equivalent.
Args:
feature_size (`int`, *optional*, defaults to 64):
The feature dimension of the extracted Mel spectrograms. This corresponds to the number of mel filters
(`n_mels`).
sampling_rate (`int`, *optional*, defaults to 48000):
The sampling rate at which the audio files should be digitalized expressed in hertz (Hz). This only serves
to warn users if the audio fed to the feature extractor does not have the same sampling rate.
hop_length (`int`,*optional*, defaults to 480):
Length of the overlapping windows for the STFT used to obtain the Mel Spectrogram. The audio will be split
in smaller `frames` with a step of `hop_length` between each frame.
max_length_s (`int`, *optional*, defaults to 10):
The maximum input length of the model in seconds. This is used to pad the audio.
fft_window_size (`int`, *optional*, defaults to 1024):
Size of the window (in samples) on which the Fourier transform is applied. This controls the frequency
resolution of the spectrogram. 400 means that the fourier transform is computed on windows of 400 samples.
padding_value (`float`, *optional*, defaults to 0.0):
Padding value used to pad the audio. Should correspond to silences.
return_attention_mask (`bool`, *optional*, defaults to `False`):
Whether or not the model should return the attention masks corresponding to the input.
frequency_min (`float`, *optional*, defaults to 0):
The lowest frequency of interest. The STFT will not be computed for values below this.
frequency_max (`float`, *optional*, defaults to 14000):
The highest frequency of interest. The STFT will not be computed for values above this.
top_db (`float`, *optional*):
The highest decibel value used to convert the mel spectrogram to the log scale. For more details see the
`audio_utils.power_to_db` function
truncation (`str`, *optional*, defaults to `"fusion"`):
Truncation pattern for long audio inputs. Two patterns are available:
- `fusion` will use `_random_mel_fusion`, which stacks 3 random crops from the mel spectrogram and a
downsampled version of the entire mel spectrogram.
If `config.fusion` is set to True, shorter audios also need to to return 4 mels, which will just be a copy
of the original mel obtained from the padded audio.
- `rand_trunc` will select a random crop of the mel spectrogram.
padding (`str`, *optional*, defaults to `"repeatpad"`):
Padding pattern for shorter audio inputs. Three patterns were originally implemented:
- `repeatpad`: the audio is repeated, and then padded to fit the `max_length`.
- `repeat`: the audio is repeated and then cut to fit the `max_length`
- `pad`: the audio is padded.
"""
model_input_names = ["input_features", "is_longer"]
def __init__(
self,
feature_size=64,
sampling_rate=48_000,
hop_length=480,
max_length_s=10,
fft_window_size=1024,
padding_value=0.0,
return_attention_mask=False, # pad inputs to max length with silence token (zero) and no attention mask
frequency_min: float = 0,
frequency_max: float = 14_000,
top_db: Optional[int] = None,
truncation: str = "fusion",
padding: str = "repeatpad",
**kwargs,
):
super().__init__(
feature_size=feature_size,
sampling_rate=sampling_rate,
padding_value=padding_value,
return_attention_mask=return_attention_mask,
**kwargs,
)
self.top_db = top_db
self.truncation = truncation
self.padding = padding
self.fft_window_size = fft_window_size
self.nb_frequency_bins = (fft_window_size >> 1) + 1
self.hop_length = hop_length
self.max_length_s = max_length_s
self.nb_max_samples = max_length_s * sampling_rate
self.sampling_rate = sampling_rate
self.frequency_min = frequency_min
self.frequency_max = frequency_max
self.mel_filters = mel_filter_bank(
num_frequency_bins=self.nb_frequency_bins,
num_mel_filters=feature_size,
min_frequency=frequency_min,
max_frequency=frequency_max,
sampling_rate=sampling_rate,
norm=None,
mel_scale="htk",
)
self.mel_filters_slaney = mel_filter_bank(
num_frequency_bins=self.nb_frequency_bins,
num_mel_filters=feature_size,
min_frequency=frequency_min,
max_frequency=frequency_max,
sampling_rate=sampling_rate,
norm="slaney",
mel_scale="slaney",
)
def to_dict(self) -> dict[str, Any]:
"""
Serializes this instance to a Python dictionary.
Returns:
`dict[str, Any]`: Dictionary of all the attributes that make up this configuration instance, except for the
mel filter banks, which do not need to be saved or printed as they are too long.
"""
output = copy.deepcopy(self.__dict__)
output["feature_extractor_type"] = self.__class__.__name__
if "mel_filters" in output:
del output["mel_filters"]
if "mel_filters_slaney" in output:
del output["mel_filters_slaney"]
return output
def _np_extract_fbank_features(self, waveform: np.ndarray, mel_filters: Optional[np.ndarray] = None) -> np.ndarray:
"""
Compute the log-mel spectrogram of the provided `waveform` using the Hann window. In CLAP, two different filter
banks are used depending on the truncation pattern:
- `self.mel_filters`: they correspond to the default parameters of `torchaudio` which can be obtained from
calling `torchaudio.transforms.MelSpectrogram().mel_scale.fb`. These filters are used when `truncation`
is set to `"fusion"`.
- `self.mel_filteres_slaney` : they correspond to the default parameters of `librosa` which used
`librosa.filters.mel` when computing the mel spectrogram. These filters were only used in the original
implementation when the truncation mode is not `"fusion"`.
"""
log_mel_spectrogram = spectrogram(
waveform,
window_function(self.fft_window_size, "hann"),
frame_length=self.fft_window_size,
hop_length=self.hop_length,
power=2.0,
mel_filters=mel_filters,
log_mel="dB",
)
return log_mel_spectrogram.T
def _random_mel_fusion(self, mel, total_frames, chunk_frames):
ranges = np.array_split(list(range(0, total_frames - chunk_frames + 1)), 3)
if len(ranges[1]) == 0:
# if the audio is too short, we just use the first chunk
ranges[1] = [0]
if len(ranges[2]) == 0:
# if the audio is too short, we just use the first chunk
ranges[2] = [0]
# randomly choose index for each part
idx_front = np.random.choice(ranges[0])
idx_middle = np.random.choice(ranges[1])
idx_back = np.random.choice(ranges[2])
mel_chunk_front = mel[idx_front : idx_front + chunk_frames, :]
mel_chunk_middle = mel[idx_middle : idx_middle + chunk_frames, :]
mel_chunk_back = mel[idx_back : idx_back + chunk_frames, :]
mel = torch.tensor(mel[None, None, :])
mel_shrink = torch.nn.functional.interpolate(
mel, size=[chunk_frames, 64], mode="bilinear", align_corners=False
)
mel_shrink = mel_shrink[0][0].numpy()
mel_fusion = np.stack([mel_shrink, mel_chunk_front, mel_chunk_middle, mel_chunk_back], axis=0)
return mel_fusion
def _get_input_mel(self, waveform: np.ndarray, max_length, truncation, padding) -> np.ndarray:
"""
Extracts the mel spectrogram and prepares it for the mode based on the `truncation` and `padding` arguments.
Four different path are possible:
- `truncation="fusion"` and the length of the waveform is greater than the max length: the mel spectrogram
will be computed on the entire audio. 3 random crops and a dowsampled version of the full mel spectrogram
are then stacked together. They will later be used for `feature_fusion`.
- `truncation="rand_trunc"` and the length of the waveform is smaller than the max length: the audio is
padded based on `padding`.
- `truncation="fusion"` and the length of the waveform is smaller than the max length: the audio is padded
based on `padding`, and is repeated `4` times.
- `truncation="rand_trunc"` and the length of the waveform is greater than the max length: the mel
spectrogram will be computed on a random crop of the waveform.
"""
if waveform.shape[0] > max_length:
if truncation == "rand_trunc":
longer = True
# random crop to max_length (for compatibility) -> this should be handled by self.pad
overflow = len(waveform) - max_length
idx = np.random.randint(0, overflow + 1)
waveform = waveform[idx : idx + max_length]
input_mel = self._np_extract_fbank_features(waveform, self.mel_filters_slaney)[None, :]
elif truncation == "fusion":
mel = self._np_extract_fbank_features(waveform, self.mel_filters)
chunk_frames = max_length // self.hop_length + 1 # the +1 related to how the spectrogram is computed
total_frames = mel.shape[0]
if chunk_frames == total_frames:
# there is a corner case where the audio length is larger than max_length but smaller than max_length+hop_length.
# In this case, we just use the whole audio.
input_mel = np.stack([mel, mel, mel, mel], axis=0)
longer = False
else:
input_mel = self._random_mel_fusion(mel, total_frames, chunk_frames)
longer = True
else:
raise NotImplementedError(f"data_truncating {truncation} not implemented")
else:
longer = False
# only use repeat as a new possible value for padding. you repeat the audio before applying the usual max_length padding
if waveform.shape[0] < max_length:
if padding == "repeat":
n_repeat = int(max_length / len(waveform))
waveform = np.tile(waveform, n_repeat + 1)[:max_length]
if padding == "repeatpad":
n_repeat = int(max_length / len(waveform))
waveform = np.tile(waveform, n_repeat)
waveform = np.pad(waveform, (0, max_length - waveform.shape[0]), mode="constant", constant_values=0)
if truncation == "fusion":
input_mel = self._np_extract_fbank_features(waveform, self.mel_filters)
input_mel = np.stack([input_mel, input_mel, input_mel, input_mel], axis=0)
else:
input_mel = self._np_extract_fbank_features(waveform, self.mel_filters_slaney)[None, :]
return input_mel, longer
def __call__(
self,
raw_speech: Union[np.ndarray, list[float], list[np.ndarray], list[list[float]]],
truncation: Optional[str] = None,
padding: Optional[str] = None,
max_length: Optional[int] = None,
sampling_rate: Optional[int] = None,
return_tensors: Optional[Union[str, TensorType]] = None,
**kwargs,
) -> BatchFeature:
"""
Main method to featurize and prepare for the model one or several sequence(s).
Args:
raw_speech (`np.ndarray`, `list[float]`, `list[np.ndarray]`, `list[list[float]]`):
The sequence or batch of sequences to be padded. Each sequence can be a numpy array, a list of float
values, a list of numpy arrays or a list of list of float values. Must be mono channel audio, not
stereo, i.e. single float per timestep.
truncation (`str`, *optional*):
Truncation pattern for long audio inputs. Two patterns are available:
- `fusion` will use `_random_mel_fusion`, which stacks 3 random crops from the mel spectrogram and
a downsampled version of the entire mel spectrogram.
If `config.fusion` is set to True, shorter audios also need to to return 4 mels, which will just be a
copy of the original mel obtained from the padded audio.
- `rand_trunc` will select a random crop of the mel spectrogram.
padding (`str`, *optional*):
Padding pattern for shorter audio inputs. Three patterns were originally implemented:
- `repeatpad`: the audio is repeated, and then padded to fit the `max_length`.
- `repeat`: the audio is repeated and then cut to fit the `max_length`
- `pad`: the audio is padded.
return_tensors (`str` or [`~utils.TensorType`], *optional*):
If set, will return tensors instead of list of python integers. Acceptable values are:
- `'pt'`: Return PyTorch `torch.np.array` objects.
- `'np'`: Return Numpy `np.ndarray` objects.
sampling_rate (`int`, *optional*):
The sampling rate at which the `raw_speech` input was sampled. It is strongly recommended to pass
`sampling_rate` at the forward call to prevent silent errors and allow automatic speech recognition
pipeline.
"""
truncation = truncation if truncation is not None else self.truncation
padding = padding if padding else self.padding
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
f"The model corresponding to this feature extractor: {self.__class__.__name__} was trained using a"
f" sampling rate of {self.sampling_rate}. Please make sure that the provided `raw_speech` input"
f" was sampled with {self.sampling_rate} and not {sampling_rate}."
)
else:
logger.warning(
f"It is strongly recommended to pass the `sampling_rate` argument to `{self.__class__.__name__}()`. "
"Failing to do so can result in silent errors that might be hard to debug."
)
is_batched_numpy = isinstance(raw_speech, np.ndarray) and len(raw_speech.shape) > 1
if is_batched_numpy and len(raw_speech.shape) > 2:
raise ValueError(f"Only mono-channel audio is supported for input to {self}")
is_batched = is_batched_numpy or (
isinstance(raw_speech, (list, tuple)) and (isinstance(raw_speech[0], (np.ndarray, tuple, list)))
)
if is_batched:
raw_speech = [np.asarray(speech, dtype=np.float64) for speech in raw_speech]
elif not is_batched and not isinstance(raw_speech, np.ndarray):
raw_speech = np.asarray(raw_speech, dtype=np.float64)
elif isinstance(raw_speech, np.ndarray) and raw_speech.dtype is np.dtype(np.float64):
raw_speech = raw_speech.astype(np.float64)
# always return batch
if not is_batched:
raw_speech = [np.asarray(raw_speech)]
# convert to mel spectrogram, truncate and pad if needed.
padded_inputs = [
self._get_input_mel(waveform, max_length if max_length else self.nb_max_samples, truncation, padding)
for waveform in raw_speech
]
input_mel = []
is_longer = []
for mel, longer in padded_inputs:
input_mel.append(mel)
is_longer.append(longer)
if truncation == "fusion" and sum(is_longer) == 0:
# if no audio is longer than 10s, then randomly select one audio to be longer
rand_idx = np.random.randint(0, len(input_mel))
is_longer[rand_idx] = True
if isinstance(input_mel[0], list):
input_mel = [np.asarray(feature, dtype=np.float64) for feature in input_mel]
# is_longer is a list of bool
is_longer = [[longer] for longer in is_longer]
input_features = {"input_features": input_mel, "is_longer": is_longer}
input_features = BatchFeature(input_features)
if return_tensors is not None:
input_features = input_features.convert_to_tensors(return_tensors)
return input_features
__all__ = ["ClapFeatureExtractor"]
| ClapFeatureExtractor |
python | arrow-py__arrow | tests/test_locales.py | {
"start": 3756,
"end": 6464
} | class ____:
def test_describe(self):
assert self.locale.describe("now", only_distance=True) == "instantly"
assert self.locale.describe("now", only_distance=False) == "just now"
def test_format_timeframe(self):
assert self.locale._format_timeframe("hours", 2) == "2 hours"
assert self.locale._format_timeframe("hour", 0) == "an hour"
def test_format_relative_now(self):
result = self.locale._format_relative("just now", "now", 0)
assert result == "just now"
def test_format_relative_past(self):
result = self.locale._format_relative("an hour", "hour", 1)
assert result == "in an hour"
def test_format_relative_future(self):
result = self.locale._format_relative("an hour", "hour", -1)
assert result == "an hour ago"
def test_ordinal_number(self):
assert self.locale.ordinal_number(0) == "0th"
assert self.locale.ordinal_number(1) == "1st"
assert self.locale.ordinal_number(2) == "2nd"
assert self.locale.ordinal_number(3) == "3rd"
assert self.locale.ordinal_number(4) == "4th"
assert self.locale.ordinal_number(10) == "10th"
assert self.locale.ordinal_number(11) == "11th"
assert self.locale.ordinal_number(12) == "12th"
assert self.locale.ordinal_number(13) == "13th"
assert self.locale.ordinal_number(14) == "14th"
assert self.locale.ordinal_number(21) == "21st"
assert self.locale.ordinal_number(22) == "22nd"
assert self.locale.ordinal_number(23) == "23rd"
assert self.locale.ordinal_number(24) == "24th"
assert self.locale.ordinal_number(100) == "100th"
assert self.locale.ordinal_number(101) == "101st"
assert self.locale.ordinal_number(102) == "102nd"
assert self.locale.ordinal_number(103) == "103rd"
assert self.locale.ordinal_number(104) == "104th"
assert self.locale.ordinal_number(110) == "110th"
assert self.locale.ordinal_number(111) == "111th"
assert self.locale.ordinal_number(112) == "112th"
assert self.locale.ordinal_number(113) == "113th"
assert self.locale.ordinal_number(114) == "114th"
assert self.locale.ordinal_number(121) == "121st"
assert self.locale.ordinal_number(122) == "122nd"
assert self.locale.ordinal_number(123) == "123rd"
assert self.locale.ordinal_number(124) == "124th"
def test_meridian_invalid_token(self):
assert self.locale.meridian(7, None) is None
assert self.locale.meridian(7, "B") is None
assert self.locale.meridian(7, "NONSENSE") is None
@pytest.mark.usefixtures("lang_locale")
| TestEnglishLocale |
python | coleifer__peewee | tests/pwiz_integration.py | {
"start": 3083,
"end": 3472
} | class ____(ModelTestCase):
database = db
requires = []
def setUp(self):
if not self.database.is_closed():
self.database.close()
if os.path.exists(self.database.database):
os.unlink(self.database.database)
super(BasePwizTestCase, self).setUp()
self.introspector = Introspector.from_database(self.database)
| BasePwizTestCase |
python | conda__conda | conda/__init__.py | {
"start": 3312,
"end": 4515
} | class ____(CondaError):
def __init__(self, errors: Iterable[CondaError]):
self.errors = errors
super().__init__(None)
def __repr__(self) -> str:
errs = []
for e in self.errors:
if isinstance(e, EnvironmentError) and not isinstance(e, CondaError):
errs.append(str(e))
else:
# We avoid Python casting this back to a str()
# by using e.__repr__() instead of repr(e)
# https://github.com/scrapy/cssselect/issues/34
errs.append(e.__repr__())
res = "\n".join(errs)
return res
def __str__(self) -> str:
return "\n".join(str(e) for e in self.errors) + "\n"
def dump_map(self) -> dict[str, str | tuple[str, ...]]:
return dict(
exception_type=str(type(self)),
exception_name=self.__class__.__name__,
errors=tuple(error.dump_map() for error in self.errors),
error="Multiple Errors Encountered.",
)
def contains(self, exception_class: BaseException | tuple[BaseException]) -> bool:
return any(isinstance(e, exception_class) for e in self.errors)
| CondaMultiError |
python | pytest-dev__pytest | testing/_py/test_local.py | {
"start": 17885,
"end": 28590
} | class ____(CommonFSTests):
def test_join_normpath(self, tmpdir):
assert tmpdir.join(".") == tmpdir
p = tmpdir.join(f"../{tmpdir.basename}")
assert p == tmpdir
p = tmpdir.join(f"..//{tmpdir.basename}/")
assert p == tmpdir
@skiponwin32
def test_dirpath_abs_no_abs(self, tmpdir):
p = tmpdir.join("foo")
assert p.dirpath("/bar") == tmpdir.join("bar")
assert tmpdir.dirpath("/bar", abs=True) == local("/bar")
def test_gethash(self, tmpdir):
from hashlib import md5
from hashlib import sha1 as sha
fn = tmpdir.join("testhashfile")
data = b"hello"
fn.write(data, mode="wb")
assert fn.computehash("md5") == md5(data).hexdigest()
assert fn.computehash("sha1") == sha(data).hexdigest()
with pytest.raises(ValueError):
fn.computehash("asdasd")
def test_remove_removes_readonly_file(self, tmpdir):
readonly_file = tmpdir.join("readonly").ensure()
readonly_file.chmod(0)
readonly_file.remove()
assert not readonly_file.check(exists=1)
def test_remove_removes_readonly_dir(self, tmpdir):
readonly_dir = tmpdir.join("readonlydir").ensure(dir=1)
readonly_dir.chmod(int("500", 8))
readonly_dir.remove()
assert not readonly_dir.check(exists=1)
def test_remove_removes_dir_and_readonly_file(self, tmpdir):
readonly_dir = tmpdir.join("readonlydir").ensure(dir=1)
readonly_file = readonly_dir.join("readonlyfile").ensure()
readonly_file.chmod(0)
readonly_dir.remove()
assert not readonly_dir.check(exists=1)
def test_remove_routes_ignore_errors(self, tmpdir, monkeypatch):
lst = []
monkeypatch.setattr("shutil.rmtree", lambda *args, **kwargs: lst.append(kwargs))
tmpdir.remove()
assert not lst[0]["ignore_errors"]
for val in (True, False):
lst[:] = []
tmpdir.remove(ignore_errors=val)
assert lst[0]["ignore_errors"] == val
def test_initialize_curdir(self):
assert str(local()) == os.getcwd()
@skiponwin32
def test_chdir_gone(self, path1):
p = path1.ensure("dir_to_be_removed", dir=1)
p.chdir()
p.remove()
pytest.raises(error.ENOENT, local)
assert path1.chdir() is None
assert os.getcwd() == str(path1)
with pytest.raises(error.ENOENT):
with p.as_cwd():
raise NotImplementedError
@skiponwin32
def test_chdir_gone_in_as_cwd(self, path1):
p = path1.ensure("dir_to_be_removed", dir=1)
p.chdir()
p.remove()
with path1.as_cwd() as old:
assert old is None
def test_as_cwd(self, path1):
dir = path1.ensure("subdir", dir=1)
old = local()
with dir.as_cwd() as x:
assert x == old
assert local() == dir
assert os.getcwd() == str(old)
def test_as_cwd_exception(self, path1):
old = local()
dir = path1.ensure("subdir", dir=1)
with pytest.raises(ValueError):
with dir.as_cwd():
raise ValueError()
assert old == local()
def test_initialize_reldir(self, path1):
with path1.as_cwd():
p = local("samplefile")
assert p.check()
def test_tilde_expansion(self, monkeypatch, tmpdir):
monkeypatch.setenv("HOME", str(tmpdir))
p = local("~", expanduser=True)
assert p == os.path.expanduser("~")
@pytest.mark.skipif(
not sys.platform.startswith("win32"), reason="case-insensitive only on windows"
)
def test_eq_hash_are_case_insensitive_on_windows(self):
a = local("/some/path")
b = local("/some/PATH")
assert a == b
assert hash(a) == hash(b)
assert a in {b}
assert a in {b: "b"}
def test_eq_with_strings(self, path1):
path1 = path1.join("sampledir")
path2 = str(path1)
assert path1 == path2
assert path2 == path1
path3 = path1.join("samplefile")
assert path3 != path2
assert path2 != path3
def test_eq_with_none(self, path1):
assert path1 != None # noqa: E711
def test_eq_non_ascii_unicode(self, path1):
path2 = path1.join("temp")
path3 = path1.join("ação")
path4 = path1.join("ディレクトリ")
assert path2 != path3
assert path2 != path4
assert path4 != path3
def test_gt_with_strings(self, path1):
path2 = path1.join("sampledir")
path3 = str(path1.join("ttt"))
assert path3 > path2
assert path2 < path3
assert path2 < "ttt"
assert "ttt" > path2
path4 = path1.join("aaa")
lst = [path2, path4, path3]
assert sorted(lst) == [path4, path2, path3]
def test_open_and_ensure(self, path1):
p = path1.join("sub1", "sub2", "file")
with p.open("w", ensure=1, encoding="utf-8") as f:
f.write("hello")
assert p.read_text(encoding="utf-8") == "hello"
def test_write_and_ensure(self, path1):
p = path1.join("sub1", "sub2", "file")
p.write_text("hello", ensure=1, encoding="utf-8")
assert p.read_text(encoding="utf-8") == "hello"
@pytest.mark.parametrize("bin", (False, True))
def test_dump(self, tmpdir, bin):
path = tmpdir.join(f"dumpfile{int(bin)}")
try:
d = {"answer": 42}
path.dump(d, bin=bin)
f = path.open("rb+")
import pickle
dnew = pickle.load(f)
assert d == dnew
finally:
f.close()
def test_setmtime(self):
import tempfile
fd, name = tempfile.mkstemp()
os.close(fd)
try:
# Do not use _pytest.timing here, as we do not want time mocking to affect this test.
mtime = int(time.time()) - 100
path = local(name)
assert path.mtime() != mtime
path.setmtime(mtime)
assert path.mtime() == mtime
path.setmtime()
assert path.mtime() != mtime
finally:
os.remove(name)
def test_normpath(self, path1):
new1 = path1.join("/otherdir")
new2 = path1.join("otherdir")
assert str(new1) == str(new2)
def test_mkdtemp_creation(self):
d = local.mkdtemp()
try:
assert d.check(dir=1)
finally:
d.remove(rec=1)
def test_tmproot(self):
d = local.mkdtemp()
tmproot = local.get_temproot()
try:
assert d.check(dir=1)
assert d.dirpath() == tmproot
finally:
d.remove(rec=1)
def test_chdir(self, tmpdir):
old = local()
try:
res = tmpdir.chdir()
assert str(res) == str(old)
assert os.getcwd() == str(tmpdir)
finally:
old.chdir()
def test_ensure_filepath_withdir(self, tmpdir):
newfile = tmpdir.join("test1", "test")
newfile.ensure()
assert newfile.check(file=1)
newfile.write_text("42", encoding="utf-8")
newfile.ensure()
s = newfile.read_text(encoding="utf-8")
assert s == "42"
def test_ensure_filepath_withoutdir(self, tmpdir):
newfile = tmpdir.join("test1file")
t = newfile.ensure()
assert t == newfile
assert newfile.check(file=1)
def test_ensure_dirpath(self, tmpdir):
newfile = tmpdir.join("test1", "testfile")
t = newfile.ensure(dir=1)
assert t == newfile
assert newfile.check(dir=1)
def test_ensure_non_ascii_unicode(self, tmpdir):
newfile = tmpdir.join("ação", "ディレクトリ")
t = newfile.ensure(dir=1)
assert t == newfile
assert newfile.check(dir=1)
@pytest.mark.xfail(run=False, reason="unreliable est for long filenames")
def test_long_filenames(self, tmpdir):
if sys.platform == "win32":
pytest.skip("win32: work around needed for path length limit")
# see http://codespeak.net/pipermail/py-dev/2008q2/000922.html
# testing paths > 260 chars (which is Windows' limitation, but
# depending on how the paths are used), but > 4096 (which is the
# Linux' limitation) - the behaviour of paths with names > 4096 chars
# is undetermined
newfilename = "/test" * 60 # type:ignore[unreachable,unused-ignore]
l1 = tmpdir.join(newfilename)
l1.ensure(file=True)
l1.write_text("foo", encoding="utf-8")
l2 = tmpdir.join(newfilename)
assert l2.read_text(encoding="utf-8") == "foo"
def test_visit_depth_first(self, tmpdir):
tmpdir.ensure("a", "1")
tmpdir.ensure("b", "2")
p3 = tmpdir.ensure("breadth")
lst = list(tmpdir.visit(lambda x: x.check(file=1)))
assert len(lst) == 3
# check that breadth comes last
assert lst[2] == p3
def test_visit_rec_fnmatch(self, tmpdir):
p1 = tmpdir.ensure("a", "123")
tmpdir.ensure(".b", "345")
lst = list(tmpdir.visit("???", rec="[!.]*"))
assert len(lst) == 1
# check that breadth comes last
assert lst[0] == p1
def test_fnmatch_file_abspath(self, tmpdir):
b = tmpdir.join("a", "b")
assert b.fnmatch(os.sep.join("ab"))
pattern = os.sep.join([str(tmpdir), "*", "b"])
assert b.fnmatch(pattern)
def test_sysfind(self):
name = (sys.platform == "win32" and "cmd") or "test"
x = local.sysfind(name)
assert x.check(file=1)
assert local.sysfind("jaksdkasldqwe") is None
assert local.sysfind(name, paths=[]) is None
x2 = local.sysfind(name, paths=[x.dirpath()])
assert x2 == x
def test_fspath_protocol_other_class(self, fake_fspath_obj):
# py.path is always absolute
py_path = local(fake_fspath_obj)
str_path = fake_fspath_obj.__fspath__()
assert py_path.check(endswith=str_path)
assert py_path.join(fake_fspath_obj).strpath == os.path.join(
py_path.strpath, str_path
)
@pytest.mark.xfail(
reason="#11603", raises=(error.EEXIST, error.ENOENT), strict=False
)
def test_make_numbered_dir_multiprocess_safe(self, tmpdir):
# https://github.com/pytest-dev/py/issues/30
with multiprocessing.Pool() as pool:
results = [
pool.apply_async(batch_make_numbered_dirs, [tmpdir, 100])
for _ in range(20)
]
for r in results:
assert r.get()
| TestLocalPath |
python | walkccc__LeetCode | solutions/1455. Check If a Word Occurs As a Prefix of Any Word in a Sentence/1455.py | {
"start": 0,
"end": 224
} | class ____:
def isPrefixOfWord(self, sentence: str, searchWord: str) -> int:
words = sentence.split()
for i, word in enumerate(words):
if word.startswith(searchWord):
return i + 1
return -1
| Solution |
python | pennersr__django-allauth | allauth/account/forms.py | {
"start": 20626,
"end": 22561
} | class ____(UserForm):
email = EmailField(required=True)
def clean_email(self):
from allauth.account import signals
value = self.cleaned_data["email"].lower()
adapter = get_adapter()
value = adapter.clean_email(value)
users = filter_users_by_email(value)
on_this_account = [u for u in users if u.pk == self.user.pk]
on_diff_account = [u for u in users if u.pk != self.user.pk]
if on_this_account:
raise adapter.validation_error("duplicate_email")
if (
# Email is taken by a different account...
on_diff_account
# We care about not having duplicate emails
and app_settings.UNIQUE_EMAIL
# Enumeration prevention is turned off.
and (not app_settings.PREVENT_ENUMERATION)
):
raise adapter.validation_error("email_taken")
if not EmailAddress.objects.can_add_email(self.user):
raise adapter.validation_error(
"max_email_addresses", app_settings.MAX_EMAIL_ADDRESSES
)
signals._add_email.send(
sender=self.user.__class__,
email=value,
user=self.user,
)
return value
def save(self, request):
if app_settings.EMAIL_VERIFICATION_BY_CODE_ENABLED:
email_address = EmailAddress(
user=self.user, email=self.cleaned_data["email"]
)
flows.email_verification.send_verification_email_to_address(
request, email_address
)
return email_address
elif app_settings.CHANGE_EMAIL:
return EmailAddress.objects.add_new_email(
request, self.user, self.cleaned_data["email"]
)
return EmailAddress.objects.add_email(
request, self.user, self.cleaned_data["email"], confirm=True
)
| AddEmailForm |
python | django__django | tests/gis_tests/test_gis_tests_utils.py | {
"start": 991,
"end": 1558
} | class ____(FuncTestMixin, SimpleTestCase):
@test_mutation()
def test_mutated_attribute(func):
func.attribute = "mutated"
@test_mutation()
def test_mutated_expressions(func):
func.source_expressions.clear()
@test_mutation()
def test_mutated_expression(func):
func.source_expressions[0].name = "mutated"
@test_mutation()
def test_mutated_expression_deep(func):
func.source_expressions[1].value[0] = "mutated"
@test_mutation(raises=False)
def test_not_mutated(func):
pass
| FuncTestMixinTests |
python | great-expectations__great_expectations | tests/integration/metrics/column/test_values_match_regex_values.py | {
"start": 782,
"end": 2856
} | class ____:
@parameterize_batch_for_data_sources(
data_source_configs=SQL_DATA_SOURCES_EXCEPT_SNOWFLAKE,
data=DATA_FRAME,
)
def test_partial_match_characters(self, batch_for_datasource: Batch) -> None:
metric = ColumnValuesMatchRegexValues(column=COLUMN_NAME, regex="ab")
metric_result = batch_for_datasource.compute_metrics(metric)
assert isinstance(metric_result, ColumnValuesMatchRegexValuesResult)
assert sorted(metric_result.value) == ["1ab2", "abc"]
@parameterize_batch_for_data_sources(
data_source_configs=SQL_DATA_SOURCES,
data=DATA_FRAME,
)
def test_special_characters(self, batch_for_datasource: Batch) -> None:
metric = ColumnValuesMatchRegexValues(column=COLUMN_NAME, regex="^(a|d).+")
metric_result = batch_for_datasource.compute_metrics(metric)
assert isinstance(metric_result, ColumnValuesMatchRegexValuesResult)
assert sorted(metric_result.value) == ["abc", "def"]
@parameterize_batch_for_data_sources(
data_source_configs=SQL_DATA_SOURCES,
data=DATA_FRAME_WITH_LOTS_OF_VALUES,
)
def test_default_limit(self, batch_for_datasource: Batch) -> None:
metric = ColumnValuesMatchRegexValues(column=COLUMN_NAME, regex=MATCH_ALL_REGEX)
metric_result = batch_for_datasource.compute_metrics(metric)
assert isinstance(metric_result, ColumnValuesMatchRegexValuesResult)
assert len(metric_result.value) == 20
@parameterize_batch_for_data_sources(
data_source_configs=SQL_DATA_SOURCES,
data=DATA_FRAME_WITH_LOTS_OF_VALUES,
)
def test_custom_limit(self, batch_for_datasource: Batch) -> None:
limit = 7
metric = ColumnValuesMatchRegexValues(
column=COLUMN_NAME, regex=MATCH_ALL_REGEX, limit=limit
)
metric_result = batch_for_datasource.compute_metrics(metric)
assert isinstance(metric_result, ColumnValuesMatchRegexValuesResult)
assert len(metric_result.value) == limit
| TestColumnValuesMatchRegexValues |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/constructor17.py | {
"start": 548,
"end": 698
} | class ____(Generic[T]):
def __new__(cls, *args, **kwargs):
return super().__new__(cls, *args, **kwargs)
def __init__(self, a: T): ...
| D |
python | modin-project__modin | modin/config/envvars.py | {
"start": 44252,
"end": 45208
} | class ____(EnvironmentVariable, type=bool):
"""
Whether to consider all active backends when performing a pre-operation switch for join operations.
Only used when AutoSwitchBackend is active.
By default, only backends already present in the arguments of a join operation are considered when
switching backends. Enabling this flag will allow join operations that are registered
as pre-op switches to consider backends other than those directly present in the arguments.
"""
varname = "MODIN_BACKEND_JOIN_CONSIDER_ALL_BACKENDS"
default = True
@classmethod
def enable(cls) -> None:
"""Enable casting in place when performing a merge operation betwen two different compilers."""
cls.put(True)
@classmethod
def disable(cls) -> None:
"""Disable casting in place when performing a merge operation betwen two different compilers."""
cls.put(False)
| BackendJoinConsiderAllBackends |
python | huggingface__transformers | src/transformers/models/tapas/tokenization_tapas.py | {
"start": 2054,
"end": 5855
} | class ____:
tokens: list[str]
column_ids: list[int]
row_ids: list[int]
segment_ids: list[int]
def _is_inner_wordpiece(token: str):
return token.startswith("##")
def load_vocab(vocab_file):
"""Loads a vocabulary file into a dictionary."""
vocab = collections.OrderedDict()
with open(vocab_file, "r", encoding="utf-8") as reader:
tokens = reader.readlines()
for index, token in enumerate(tokens):
token = token.rstrip("\n")
vocab[token] = index
return vocab
def whitespace_tokenize(text):
"""Runs basic whitespace cleaning and splitting on a piece of text."""
text = text.strip()
if not text:
return []
tokens = text.split()
return tokens
TAPAS_ENCODE_PLUS_ADDITIONAL_KWARGS_DOCSTRING = r"""
add_special_tokens (`bool`, *optional*, defaults to `True`):
Whether or not to encode the sequences with the special tokens relative to their model.
padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `False`):
Activates and controls padding. Accepts the following values:
- `True` or `'longest'`: Pad to the longest sequence in the batch (or no padding if only a single
sequence if provided).
- `'max_length'`: Pad to a maximum length specified with the argument `max_length` or to the maximum
acceptable input length for the model if that argument is not provided.
- `False` or `'do_not_pad'` (default): No padding (i.e., can output a batch with sequences of different
lengths).
truncation (`bool`, `str` or [`TapasTruncationStrategy`], *optional*, defaults to `False`):
Activates and controls truncation. Accepts the following values:
- `True` or `'drop_rows_to_fit'`: Truncate to a maximum length specified with the argument `max_length`
or to the maximum acceptable input length for the model if that argument is not provided. This will
truncate row by row, removing rows from the table.
- `False` or `'do_not_truncate'` (default): No truncation (i.e., can output batch with sequence lengths
greater than the model maximum admissible input size).
max_length (`int`, *optional*):
Controls the maximum length to use by one of the truncation/padding parameters.
If left unset or set to `None`, this will use the predefined model maximum length if a maximum length
is required by one of the truncation/padding parameters. If the model has no specific maximum input
length (like XLNet) truncation/padding to a maximum length will be deactivated.
is_split_into_words (`bool`, *optional*, defaults to `False`):
Whether or not the input is already pre-tokenized (e.g., split into words). If set to `True`, the
tokenizer assumes the input is already split into words (for instance, by splitting it on whitespace)
which it will tokenize. This is useful for NER or token classification.
pad_to_multiple_of (`int`, *optional*):
If set will pad the sequence to a multiple of the provided value. This is especially useful to enable
the use of Tensor Cores on NVIDIA hardware with compute capability `>= 7.5` (Volta).
return_tensors (`str` or [`~utils.TensorType`], *optional*):
If set, will return tensors instead of list of python integers. Acceptable values are:
- `'pt'`: Return PyTorch `torch.Tensor` objects.
- `'np'`: Return Numpy `np.ndarray` objects.
"""
| SerializedExample |
python | jazzband__django-simple-history | simple_history/tests/models.py | {
"start": 18305,
"end": 18461
} | class ____(models.Model):
name = models.CharField(max_length=50)
history = HistoricalRecords(inherit=True, table_name="base_places_history")
| BasePlace |
python | sqlalchemy__sqlalchemy | test/orm/test_subquery_relations.py | {
"start": 105392,
"end": 108005
} | class ____(
fixtures.DeclarativeMappedTest, testing.AssertsCompiledSQL
):
__dialect__ = "default"
@classmethod
def setup_classes(cls):
Base = cls.DeclarativeBasic
class Foo(Base):
__tablename__ = "foo"
id = Column(Integer, primary_key=True)
type = Column(String(50))
foo_id = Column(Integer, ForeignKey("foo.id"))
foo = relationship(
lambda: Foo, foreign_keys=foo_id, remote_side=id
)
__mapper_args__ = {
"polymorphic_on": type,
"polymorphic_identity": "foo",
}
class Bar(Foo):
__mapper_args__ = {"polymorphic_identity": "bar"}
@classmethod
def insert_data(cls, connection):
Foo, Bar = cls.classes("Foo", "Bar")
session = Session(connection)
target = Bar(id=1)
b1 = Bar(id=2, foo=Foo(id=3, foo=target))
session.add(b1)
session.commit()
def test_twolevel_subquery_w_polymorphic(self):
Foo, Bar = self.classes("Foo", "Bar")
r = with_polymorphic(Foo, "*", aliased=True)
attr1 = Foo.foo.of_type(r)
attr2 = r.foo
s = fixture_session()
q = (
s.query(Foo)
.filter(Foo.id == 2)
.options(subqueryload(attr1).subqueryload(attr2))
)
self.assert_sql_execution(
testing.db,
q.all,
CompiledSQL(
"SELECT foo.id AS foo_id_1, foo.type AS foo_type, "
"foo.foo_id AS foo_foo_id FROM foo WHERE foo.id = :id_1",
[{"id_1": 2}],
),
CompiledSQL(
"SELECT foo_1.id AS foo_1_id, foo_1.type AS foo_1_type, "
"foo_1.foo_id AS foo_1_foo_id, "
"anon_1.foo_foo_id AS anon_1_foo_foo_id "
"FROM (SELECT DISTINCT foo.foo_id AS foo_foo_id "
"FROM foo WHERE foo.id = :id_1) AS anon_1 "
"JOIN foo AS foo_1 ON foo_1.id = anon_1.foo_foo_id",
{"id_1": 2},
),
CompiledSQL(
"SELECT foo.id AS foo_id_1, foo.type AS foo_type, "
"foo.foo_id AS foo_foo_id, foo_1.foo_id AS foo_1_foo_id "
"FROM (SELECT DISTINCT foo.foo_id AS foo_foo_id FROM foo "
"WHERE foo.id = :id_1) AS anon_1 "
"JOIN foo AS foo_1 ON foo_1.id = anon_1.foo_foo_id "
"JOIN foo ON foo.id = foo_1.foo_id",
{"id_1": 2},
),
)
| SelfRefInheritanceAliasedTest |
python | sympy__sympy | sympy/vector/operators.py | {
"start": 1737,
"end": 8565
} | class ____(Expr):
"""
Represents unevaluated Curl.
Examples
========
>>> from sympy.vector import CoordSys3D, Curl
>>> R = CoordSys3D('R')
>>> v = R.y*R.z*R.i + R.x*R.z*R.j + R.x*R.y*R.k
>>> Curl(v)
Curl(R.y*R.z*R.i + R.x*R.z*R.j + R.x*R.y*R.k)
"""
def __new__(cls, expr):
expr = sympify(expr)
obj = Expr.__new__(cls, expr)
obj._expr = expr
return obj
def doit(self, **hints):
return curl(self._expr, doit=True)
def curl(vect, doit=True):
"""
Returns the curl of a vector field computed wrt the base scalars
of the given coordinate system.
Parameters
==========
vect : Vector
The vector operand
doit : bool
If True, the result is returned after calling .doit() on
each component. Else, the returned expression contains
Derivative instances
Examples
========
>>> from sympy.vector import CoordSys3D, curl
>>> R = CoordSys3D('R')
>>> v1 = R.y*R.z*R.i + R.x*R.z*R.j + R.x*R.y*R.k
>>> curl(v1)
0
>>> v2 = R.x*R.y*R.z*R.i
>>> curl(v2)
R.x*R.y*R.j + (-R.x*R.z)*R.k
"""
coord_sys = _get_coord_systems(vect)
if len(coord_sys) == 0:
return Vector.zero
elif len(coord_sys) == 1:
coord_sys = next(iter(coord_sys))
i, j, k = coord_sys.base_vectors()
x, y, z = coord_sys.base_scalars()
h1, h2, h3 = coord_sys.lame_coefficients()
vectx = vect.dot(i)
vecty = vect.dot(j)
vectz = vect.dot(k)
outvec = Vector.zero
outvec += (Derivative(vectz * h3, y) -
Derivative(vecty * h2, z)) * i / (h2 * h3)
outvec += (Derivative(vectx * h1, z) -
Derivative(vectz * h3, x)) * j / (h1 * h3)
outvec += (Derivative(vecty * h2, x) -
Derivative(vectx * h1, y)) * k / (h2 * h1)
if doit:
return outvec.doit()
return outvec
else:
if isinstance(vect, (Add, VectorAdd)):
from sympy.vector import express
try:
cs = next(iter(coord_sys))
args = [express(i, cs, variables=True) for i in vect.args]
except ValueError:
args = vect.args
return VectorAdd.fromiter(curl(i, doit=doit) for i in args)
elif isinstance(vect, (Mul, VectorMul)):
vector = [i for i in vect.args if isinstance(i, (Vector, Cross, Gradient))][0]
scalar = Mul.fromiter(i for i in vect.args if not isinstance(i, (Vector, Cross, Gradient)))
res = Cross(gradient(scalar), vector).doit() + scalar*curl(vector, doit=doit)
if doit:
return res.doit()
return res
elif isinstance(vect, (Cross, Curl, Gradient)):
return Curl(vect)
else:
raise ValueError("Invalid argument for curl")
def divergence(vect, doit=True):
"""
Returns the divergence of a vector field computed wrt the base
scalars of the given coordinate system.
Parameters
==========
vector : Vector
The vector operand
doit : bool
If True, the result is returned after calling .doit() on
each component. Else, the returned expression contains
Derivative instances
Examples
========
>>> from sympy.vector import CoordSys3D, divergence
>>> R = CoordSys3D('R')
>>> v1 = R.x*R.y*R.z * (R.i+R.j+R.k)
>>> divergence(v1)
R.x*R.y + R.x*R.z + R.y*R.z
>>> v2 = 2*R.y*R.z*R.j
>>> divergence(v2)
2*R.z
"""
coord_sys = _get_coord_systems(vect)
if len(coord_sys) == 0:
return S.Zero
elif len(coord_sys) == 1:
if isinstance(vect, (Cross, Curl, Gradient)):
return Divergence(vect)
# TODO: is case of many coord systems, this gets a random one:
coord_sys = next(iter(coord_sys))
i, j, k = coord_sys.base_vectors()
x, y, z = coord_sys.base_scalars()
h1, h2, h3 = coord_sys.lame_coefficients()
vx = _diff_conditional(vect.dot(i), x, h2, h3) \
/ (h1 * h2 * h3)
vy = _diff_conditional(vect.dot(j), y, h3, h1) \
/ (h1 * h2 * h3)
vz = _diff_conditional(vect.dot(k), z, h1, h2) \
/ (h1 * h2 * h3)
res = vx + vy + vz
if doit:
return res.doit()
return res
else:
if isinstance(vect, (Add, VectorAdd)):
return Add.fromiter(divergence(i, doit=doit) for i in vect.args)
elif isinstance(vect, (Mul, VectorMul)):
vector = [i for i in vect.args if isinstance(i, (Vector, Cross, Gradient))][0]
scalar = Mul.fromiter(i for i in vect.args if not isinstance(i, (Vector, Cross, Gradient)))
res = Dot(vector, gradient(scalar)) + scalar*divergence(vector, doit=doit)
if doit:
return res.doit()
return res
elif isinstance(vect, (Cross, Curl, Gradient)):
return Divergence(vect)
else:
raise ValueError("Invalid argument for divergence")
def gradient(scalar_field, doit=True):
"""
Returns the vector gradient of a scalar field computed wrt the
base scalars of the given coordinate system.
Parameters
==========
scalar_field : SymPy Expr
The scalar field to compute the gradient of
doit : bool
If True, the result is returned after calling .doit() on
each component. Else, the returned expression contains
Derivative instances
Examples
========
>>> from sympy.vector import CoordSys3D, gradient
>>> R = CoordSys3D('R')
>>> s1 = R.x*R.y*R.z
>>> gradient(s1)
R.y*R.z*R.i + R.x*R.z*R.j + R.x*R.y*R.k
>>> s2 = 5*R.x**2*R.z
>>> gradient(s2)
10*R.x*R.z*R.i + 5*R.x**2*R.k
"""
coord_sys = _get_coord_systems(scalar_field)
if len(coord_sys) == 0:
return Vector.zero
elif len(coord_sys) == 1:
coord_sys = next(iter(coord_sys))
h1, h2, h3 = coord_sys.lame_coefficients()
i, j, k = coord_sys.base_vectors()
x, y, z = coord_sys.base_scalars()
vx = Derivative(scalar_field, x) / h1
vy = Derivative(scalar_field, y) / h2
vz = Derivative(scalar_field, z) / h3
if doit:
return (vx * i + vy * j + vz * k).doit()
return vx * i + vy * j + vz * k
else:
if isinstance(scalar_field, (Add, VectorAdd)):
return VectorAdd.fromiter(gradient(i) for i in scalar_field.args)
if isinstance(scalar_field, (Mul, VectorMul)):
s = _split_mul_args_wrt_coordsys(scalar_field)
return VectorAdd.fromiter(scalar_field / i * gradient(i) for i in s)
return Gradient(scalar_field)
| Curl |
python | jmcnamara__XlsxWriter | xlsxwriter/test/comparison/test_chart_data_labels03.py | {
"start": 315,
"end": 1601
} | class ____(ExcelComparisonTest):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.set_filename("chart_data_labels03.xlsx")
def test_create_file(self):
"""Test the creation of a simple XlsxWriter file."""
workbook = Workbook(self.got_filename)
worksheet = workbook.add_worksheet()
chart = workbook.add_chart({"type": "bar"})
chart.axis_ids = [45693952, 45762816]
data = [
[1, 2, 3, 4, 5],
[2, 4, 6, 8, 10],
[3, 6, 9, 12, 15],
]
worksheet.write_column("A1", data[0])
worksheet.write_column("B1", data[1])
worksheet.write_column("C1", data[2])
chart.add_series(
{
"values": "=Sheet1!$A$1:$A$5",
"data_labels": {"value": 1, "position": "outside_end"},
}
)
chart.add_series(
{
"values": "=Sheet1!$B$1:$B$5",
"data_labels": {"value": 1, "position": "inside_base"},
}
)
chart.add_series({"values": "=Sheet1!$C$1:$C$5"})
worksheet.insert_chart("E9", chart)
workbook.close()
self.assertExcelEqual()
| TestCompareXLSXFiles |
python | explosion__spaCy | spacy/lang/hr/__init__.py | {
"start": 152,
"end": 251
} | class ____(Language):
lang = "hr"
Defaults = CroatianDefaults
__all__ = ["Croatian"]
| Croatian |
python | pytorch__pytorch | test/functorch/attn_positional.py | {
"start": 255,
"end": 4806
} | class ____(nn.Module):
def __init__(
self,
hidden_size,
num_attention_heads,
attention_probs_dropout_prob,
position_embedding_type=None,
max_position_embeddings=None,
):
super().__init__()
if hidden_size % num_attention_heads != 0:
raise ValueError(
f"The hidden size ({hidden_size}) is not a multiple of the number of attention "
f"heads ({num_attention_heads})"
)
self.num_attention_heads = num_attention_heads
self.attention_head_size = int(hidden_size / num_attention_heads)
self.all_head_size = self.num_attention_heads * self.attention_head_size
self.query = nn.Linear(hidden_size, self.all_head_size)
self.key = nn.Linear(hidden_size, self.all_head_size)
self.value = nn.Linear(hidden_size, self.all_head_size)
self.dropout = nn.Dropout(attention_probs_dropout_prob)
self.position_embedding_type = position_embedding_type
if self.position_embedding_type is not None:
assert max_position_embeddings is not None
self.max_position_embeddings = max_position_embeddings
self.distance_embedding = nn.Embedding(
2 * max_position_embeddings - 1, self.attention_head_size
)
def transpose_for_scores(self, x):
new_x_shape = x.size()[:-1] + (
self.num_attention_heads,
self.attention_head_size,
)
x = x.view(*new_x_shape)
return x.permute(0, 2, 1, 3)
def forward(
self,
hidden_states,
past_key_value=None,
):
q = self.query(hidden_states)
k = self.key(hidden_states)
v = self.value(hidden_states)
q = self.transpose_for_scores(q)
k = self.transpose_for_scores(k)
v = self.transpose_for_scores(v)
if past_key_value is not None:
k = torch.cat([past_key_value[0], k], dim=2)
v = torch.cat([past_key_value[1], v], dim=2)
# Take the dot product between "query" and "key" to get the raw attention scores.
attention_scores = torch.matmul(q, k.transpose(-1, -2))
attention_scores = attention_scores / math.sqrt(self.attention_head_size)
if self.position_embedding_type is not None:
seq_length = hidden_states.size()[1]
position_ids_l = torch.arange(
seq_length, dtype=torch.long, device=hidden_states.device
).view(-1, 1)
position_ids_r = torch.arange(
seq_length, dtype=torch.long, device=hidden_states.device
).view(1, -1)
distance = position_ids_l - position_ids_r
positional_embedding = self.distance_embedding(
distance + self.max_position_embeddings - 1
)
positional_embedding = positional_embedding.to(
dtype=q.dtype
) # fp16 compatibility
if self.position_embedding_type == "relative_key":
relative_position_scores = torch.einsum(
"bhld,lrd->bhlr", q, positional_embedding
)
attention_scores = attention_scores + relative_position_scores
elif self.position_embedding_type == "relative_key_query":
relative_position_scores_query = torch.einsum(
"bhld,lrd->bhlr", q, positional_embedding
)
relative_position_scores_key = torch.einsum(
"bhrd,lrd->bhlr", k, positional_embedding
)
attention_scores = (
attention_scores
+ relative_position_scores_query
+ relative_position_scores_key
)
attention_probs = attention_scores
# Normalize the attention scores to probabilities.
attention_probs = nn.functional.softmax(attention_scores, dim=-1)
# # This is actually dropping out entire tokens to attend to, which might
# # seem a bit unusual, but is taken from the original Transformer paper.
attention_probs = self.dropout(attention_probs)
context_layer = torch.matmul(attention_probs, v)
context_layer = context_layer.permute(0, 2, 1, 3).contiguous()
new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,)
context_layer = context_layer.view(*new_context_layer_shape)
return context_layer
| BertSelfAttention |
python | walkccc__LeetCode | solutions/3282. Reach End of Array With Max Score/3282.py | {
"start": 0,
"end": 174
} | class ____:
# Similar to 3205. Maximum Array Hopping Score I
def findMaximumScore(self, nums: list[int]) -> int:
return sum(itertools.accumulate(nums[:-1], max))
| Solution |
python | realpython__materials | python-built-in-functions/point_v1.py | {
"start": 91,
"end": 548
} | class ____:
def __init__(self, x, y):
self.set_x(x)
self.set_y(y)
def get_x(self):
return self._x
def set_x(self, x):
self._x = self.validate(x)
def get_y(self):
return self._y
def set_y(self, y):
self._y = self.validate(y)
def validate(self, value):
if not isinstance(value, int | float):
raise ValueError("coordinates must be numbers")
return value
| Point |
python | falconry__falcon | falcon/errors.py | {
"start": 67248,
"end": 69477
} | class ____(HTTPError):
"""500 Internal Server Error.
The server encountered an unexpected condition that prevented it
from fulfilling the request.
(See also: RFC 7231, Section 6.6.1)
All the arguments are defined as keyword-only.
Keyword Args:
title (str): Error title (default '500 Internal Server Error').
description (str): Human-friendly description of the error, along with
a helpful suggestion or two.
headers (dict or list): A ``dict`` of header names and values
to set, or a ``list`` of (*name*, *value*) tuples. Both *name* and
*value* must be of type ``str`` or ``StringType``, and only
character values 0x00 through 0xFF may be used on platforms that
use wide characters.
Note:
The Content-Type header, if present, will be overridden. If
you wish to return custom error messages, you can create
your own HTTP error class, and install an error handler
to convert it into an appropriate HTTP response for the
client
Note:
Falcon can process a list of ``tuple`` slightly faster
than a ``dict``.
href (str): A URL someone can visit to find out more information
(default ``None``). Unicode characters are percent-encoded.
href_text (str): If href is given, use this as the friendly
title/description for the link (default 'API documentation
for this error').
code (int): An internal code that customers can reference in their
support request or to help them when searching for knowledge
base articles related to this error (default ``None``).
"""
def __init__(
self,
*,
title: str | None = None,
description: str | None = None,
headers: HeaderArg | None = None,
**kwargs: HTTPErrorKeywordArguments,
):
super().__init__(
status.HTTP_500,
title=title,
description=description,
headers=headers,
**kwargs, # type: ignore[arg-type]
)
| HTTPInternalServerError |
python | jupyterlab__jupyterlab | jupyterlab/labextensions.py | {
"start": 11627,
"end": 12387
} | class ____(BaseExtensionApp):
description = """
Link local npm packages that are not lab extensions.
Links a package to the JupyterLab build process. A linked
package is manually re-installed from its source location when
`jupyter lab build` is run.
"""
should_build = Bool(True, config=True, help="Whether to build the app after the action")
def run_task(self):
self.extra_args = self.extra_args or [os.getcwd()]
options = AppOptions(
app_dir=self.app_dir,
logger=self.log,
labextensions_path=self.labextensions_path,
core_config=self.core_config,
)
return any(link_package(arg, app_options=options) for arg in self.extra_args)
| LinkLabExtensionApp |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/genericType28.py | {
"start": 2727,
"end": 2819
} | class ____(Co_TA[Contra_TA[T_contra]]): ...
# This should generate an error.
| ContraToCo_WithTA |
python | keras-team__keras | keras/src/saving/saving_lib_test.py | {
"start": 2445,
"end": 2870
} | class ____(keras.Model):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.dense1 = MyDense(1, name="my_dense_1")
self.dense2 = MyDense(1, name="my_dense_2")
def call(self, inputs):
out = self.dense1(inputs)
return self.dense2(out)
def one(self):
return 1
@keras.saving.register_keras_serializable(package="my_custom_package")
| CustomModelX |
python | apache__airflow | providers/amazon/src/airflow/providers/amazon/aws/hooks/batch_client.py | {
"start": 4951,
"end": 21972
} | class ____(AwsBaseHook):
"""
Interact with AWS Batch.
Provide thick wrapper around :external+boto3:py:class:`boto3.client("batch") <Batch.Client>`.
:param max_retries: exponential back-off retries, 4200 = 48 hours;
polling is only used when waiters is None
:param status_retries: number of HTTP retries to get job status, 10;
polling is only used when waiters is None
.. note::
Several methods use a default random delay to check or poll for job status, i.e.
``random.uniform(DEFAULT_DELAY_MIN, DEFAULT_DELAY_MAX)``
Using a random interval helps to avoid AWS API throttle limits
when many concurrent tasks request job-descriptions.
To modify the global defaults for the range of jitter allowed when a
random delay is used to check Batch job status, modify these defaults, e.g.:
.. code-block::
BatchClient.DEFAULT_DELAY_MIN = 0
BatchClient.DEFAULT_DELAY_MAX = 5
When explicit delay values are used, a 1 second random jitter is applied to the
delay (e.g. a delay of 0 sec will be a ``random.uniform(0, 1)`` delay. It is
generally recommended that random jitter is added to API requests. A
convenience method is provided for this, e.g. to get a random delay of
10 sec +/- 5 sec: ``delay = BatchClient.add_jitter(10, width=5, minima=0)``
Additional arguments (such as ``aws_conn_id``) may be specified and
are passed down to the underlying AwsBaseHook.
.. seealso::
- :class:`airflow.providers.amazon.aws.hooks.base_aws.AwsBaseHook`
- https://docs.aws.amazon.com/general/latest/gr/api-retries.html
- https://aws.amazon.com/blogs/architecture/exponential-backoff-and-jitter/
"""
MAX_RETRIES = 4200
STATUS_RETRIES = 10
# delays are in seconds
DEFAULT_DELAY_MIN = 1
DEFAULT_DELAY_MAX = 10
FAILURE_STATE = "FAILED"
SUCCESS_STATE = "SUCCEEDED"
RUNNING_STATE = "RUNNING"
INTERMEDIATE_STATES = (
"SUBMITTED",
"PENDING",
"RUNNABLE",
"STARTING",
RUNNING_STATE,
)
COMPUTE_ENVIRONMENT_TERMINAL_STATUS = ("VALID", "DELETED")
COMPUTE_ENVIRONMENT_INTERMEDIATE_STATUS = ("CREATING", "UPDATING", "DELETING")
JOB_QUEUE_TERMINAL_STATUS = ("VALID", "DELETED")
JOB_QUEUE_INTERMEDIATE_STATUS = ("CREATING", "UPDATING", "DELETING")
def __init__(
self, *args, max_retries: int | None = None, status_retries: int | None = None, **kwargs
) -> None:
# https://github.com/python/mypy/issues/6799 hence type: ignore
super().__init__(client_type="batch", *args, **kwargs) # type: ignore
self.max_retries = max_retries or self.MAX_RETRIES
self.status_retries = status_retries or self.STATUS_RETRIES
@property
def client(self) -> BatchProtocol | botocore.client.BaseClient:
"""
An AWS API client for Batch services.
:return: a boto3 'batch' client for the ``.region_name``
"""
return self.conn
def terminate_job(self, job_id: str, reason: str) -> dict:
"""
Terminate a Batch job.
:param job_id: a job ID to terminate
:param reason: a reason to terminate job ID
:return: an API response
"""
response = self.get_conn().terminate_job(jobId=job_id, reason=reason)
self.log.info(response)
return response
def check_job_success(self, job_id: str) -> bool:
"""
Check the final status of the Batch job.
Return True if the job 'SUCCEEDED', else raise an AirflowException.
:param job_id: a Batch job ID
:raises: AirflowException
"""
job = self.get_job_description(job_id)
job_status = job.get("status")
if job_status == self.SUCCESS_STATE:
self.log.info("AWS Batch job (%s) succeeded: %s", job_id, job)
return True
if job_status == self.FAILURE_STATE:
raise AirflowException(f"AWS Batch job ({job_id}) failed: {job}")
if job_status in self.INTERMEDIATE_STATES:
raise AirflowException(f"AWS Batch job ({job_id}) is not complete: {job}")
raise AirflowException(f"AWS Batch job ({job_id}) has unknown status: {job}")
def wait_for_job(
self,
job_id: str,
delay: int | float | None = None,
get_batch_log_fetcher: Callable[[str], AwsTaskLogFetcher | None] | None = None,
) -> None:
"""
Wait for Batch job to complete.
:param job_id: a Batch job ID
:param delay: a delay before polling for job status
:param get_batch_log_fetcher : a method that returns batch_log_fetcher
:raises: AirflowException
"""
self.delay(delay)
self.poll_for_job_running(job_id, delay)
batch_log_fetcher = None
try:
if get_batch_log_fetcher:
batch_log_fetcher = get_batch_log_fetcher(job_id)
if batch_log_fetcher:
batch_log_fetcher.start()
self.poll_for_job_complete(job_id, delay)
finally:
if batch_log_fetcher:
batch_log_fetcher.stop()
batch_log_fetcher.join()
self.log.info("AWS Batch job (%s) has completed", job_id)
def poll_for_job_running(self, job_id: str, delay: int | float | None = None) -> None:
"""
Poll for job running.
The status that indicates a job is running or already complete are: 'RUNNING'|'SUCCEEDED'|'FAILED'.
So the status options that this will wait for are the transitions from:
'SUBMITTED'>'PENDING'>'RUNNABLE'>'STARTING'>'RUNNING'|'SUCCEEDED'|'FAILED'
The completed status options are included for cases where the status
changes too quickly for polling to detect a RUNNING status that moves
quickly from STARTING to RUNNING to completed (often a failure).
:param job_id: a Batch job ID
:param delay: a delay before polling for job status
:raises: AirflowException
"""
self.delay(delay)
running_status = [self.RUNNING_STATE, self.SUCCESS_STATE, self.FAILURE_STATE]
self.poll_job_status(job_id, running_status)
def poll_for_job_complete(self, job_id: str, delay: int | float | None = None) -> None:
"""
Poll for job completion.
The status that indicates job completion are: 'SUCCEEDED'|'FAILED'.
So the status options that this will wait for are the transitions from:
'SUBMITTED'>'PENDING'>'RUNNABLE'>'STARTING'>'RUNNING'>'SUCCEEDED'|'FAILED'
:param job_id: a Batch job ID
:param delay: a delay before polling for job status
:raises: AirflowException
"""
self.delay(delay)
complete_status = [self.SUCCESS_STATE, self.FAILURE_STATE]
self.poll_job_status(job_id, complete_status)
def poll_job_status(self, job_id: str, match_status: list[str]) -> bool:
"""
Poll for job status using an exponential back-off strategy (with max_retries).
:param job_id: a Batch job ID
:param match_status: a list of job status to match; the Batch job status are:
'SUBMITTED'|'PENDING'|'RUNNABLE'|'STARTING'|'RUNNING'|'SUCCEEDED'|'FAILED'
:raises: AirflowException
"""
for retries in range(1 + self.max_retries):
if retries:
pause = self.exponential_delay(retries)
self.log.info(
"AWS Batch job (%s) status check (%d of %d) in the next %.2f seconds",
job_id,
retries,
self.max_retries,
pause,
)
self.delay(pause)
job = self.get_job_description(job_id)
job_status = job.get("status")
self.log.info(
"AWS Batch job (%s) check status (%s) in %s",
job_id,
job_status,
match_status,
)
if job_status in match_status:
return True
raise AirflowException(f"AWS Batch job ({job_id}) status checks exceed max_retries")
def get_job_description(self, job_id: str) -> dict:
"""
Get job description (using status_retries).
:param job_id: a Batch job ID
:return: an API response for describe jobs
:raises: AirflowException
"""
for retries in range(self.status_retries):
if retries:
pause = self.exponential_delay(retries)
self.log.info(
"AWS Batch job (%s) description retry (%d of %d) in the next %.2f seconds",
job_id,
retries,
self.status_retries,
pause,
)
self.delay(pause)
try:
response = self.get_conn().describe_jobs(jobs=[job_id])
return self.parse_job_description(job_id, response)
except AirflowException as err:
self.log.warning(err)
except botocore.exceptions.ClientError as err:
# Allow it to retry in case of exceeded quota limit of requests to AWS API
if err.response.get("Error", {}).get("Code") != "TooManyRequestsException":
raise
self.log.warning(
"Ignored TooManyRequestsException error, original message: %r. "
"Please consider to setup retries mode in boto3, "
"check Amazon Provider AWS Connection documentation for more details.",
str(err),
)
raise AirflowException(
f"AWS Batch job ({job_id}) description error: exceeded status_retries ({self.status_retries})"
)
@staticmethod
def parse_job_description(job_id: str, response: dict) -> dict:
"""
Parse job description to extract description for job_id.
:param job_id: a Batch job ID
:param response: an API response for describe jobs
:return: an API response to describe job_id
:raises: AirflowException
"""
jobs = response.get("jobs", [])
matching_jobs = [job for job in jobs if job.get("jobId") == job_id]
if len(matching_jobs) != 1:
raise AirflowException(f"AWS Batch job ({job_id}) description error: response: {response}")
return matching_jobs[0]
def get_job_awslogs_info(self, job_id: str) -> dict[str, str] | None:
all_info = self.get_job_all_awslogs_info(job_id)
if not all_info:
return None
if len(all_info) > 1:
self.log.warning(
"AWS Batch job (%s) has more than one log stream, only returning the first one.", job_id
)
return all_info[0]
def get_job_all_awslogs_info(self, job_id: str) -> list[dict[str, str]]:
"""
Parse job description to extract AWS CloudWatch information.
:param job_id: AWS Batch Job ID
"""
job_desc = self.get_job_description(job_id=job_id)
job_node_properties = job_desc.get("nodeProperties", {})
job_container_desc = job_desc.get("container", {})
if job_node_properties:
# one log config per node
log_configs = [
p.get("container", {}).get("logConfiguration", {})
for p in job_node_properties.get("nodeRangeProperties", {})
]
# one stream name per attempt
stream_names = [a.get("container", {}).get("logStreamName") for a in job_desc.get("attempts", [])]
elif job_container_desc:
log_configs = [job_container_desc.get("logConfiguration", {})]
stream_name = job_container_desc.get("logStreamName")
stream_names = [stream_name] if stream_name is not None else []
else:
raise AirflowException(
f"AWS Batch job ({job_id}) is not a supported job type. "
"Supported job types: container, array, multinode."
)
# If the user selected another logDriver than "awslogs", then CloudWatch logging is disabled.
if any(c.get("logDriver", "awslogs") != "awslogs" for c in log_configs):
self.log.warning(
"AWS Batch job (%s) uses non-aws log drivers. AWS CloudWatch logging disabled.", job_id
)
return []
if not stream_names:
# If this method is called very early after starting the AWS Batch job,
# there is a possibility that the AWS CloudWatch Stream Name would not exist yet.
# This can also happen in case of misconfiguration.
self.log.warning("AWS Batch job (%s) doesn't have any AWS CloudWatch Stream.", job_id)
return []
# Try to get user-defined log configuration options
log_options = [c.get("options", {}) for c in log_configs]
# cross stream names with options (i.e. attempts X nodes) to generate all log infos
result = []
for stream, option in itertools.product(stream_names, log_options):
result.append(
{
"awslogs_stream_name": stream,
# If the user did not specify anything, the default settings are:
# awslogs-group = /aws/batch/job
# awslogs-region = `same as AWS Batch Job region`
"awslogs_group": option.get("awslogs-group", "/aws/batch/job"),
"awslogs_region": option.get("awslogs-region", self.conn_region_name),
}
)
return result
@staticmethod
def add_jitter(delay: int | float, width: int | float = 1, minima: int | float = 0) -> float:
"""
Use delay +/- width for random jitter.
Adding jitter to status polling can help to avoid
AWS Batch API limits for monitoring Batch jobs with
a high concurrency in Airflow tasks.
:param delay: number of seconds to pause;
delay is assumed to be a positive number
:param width: delay +/- width for random jitter;
width is assumed to be a positive number
:param minima: minimum delay allowed;
minima is assumed to be a non-negative number
:return: uniform(delay - width, delay + width) jitter
and it is a non-negative number
"""
delay = abs(delay)
width = abs(width)
minima = abs(minima)
lower = max(minima, delay - width)
upper = delay + width
return random.uniform(lower, upper)
@staticmethod
def delay(delay: int | float | None = None) -> None:
"""
Pause execution for ``delay`` seconds.
:param delay: a delay to pause execution using ``time.sleep(delay)``;
a small 1 second jitter is applied to the delay.
.. note::
This method uses a default random delay, i.e.
``random.uniform(DEFAULT_DELAY_MIN, DEFAULT_DELAY_MAX)``;
using a random interval helps to avoid AWS API throttle limits
when many concurrent tasks request job-descriptions.
"""
if delay is None:
delay = random.uniform(BatchClientHook.DEFAULT_DELAY_MIN, BatchClientHook.DEFAULT_DELAY_MAX)
else:
delay = BatchClientHook.add_jitter(delay)
time.sleep(delay)
@staticmethod
def exponential_delay(tries: int) -> float:
"""
Apply an exponential back-off delay, with random jitter.
There is a maximum interval of 10 minutes (with random jitter between 3 and 10 minutes).
This is used in the :py:meth:`.poll_for_job_status` method.
Examples of behavior:
.. code-block:: python
def exp(tries):
max_interval = 600.0 # 10 minutes in seconds
delay = 1 + pow(tries * 0.6, 2)
delay = min(max_interval, delay)
print(delay / 3, delay)
for tries in range(10):
exp(tries)
# 0.33 1.0
# 0.45 1.35
# 0.81 2.44
# 1.41 4.23
# 2.25 6.76
# 3.33 10.00
# 4.65 13.95
# 6.21 18.64
# 8.01 24.04
# 10.05 30.15
.. seealso::
- https://docs.aws.amazon.com/general/latest/gr/api-retries.html
- https://aws.amazon.com/blogs/architecture/exponential-backoff-and-jitter/
:param tries: Number of tries
"""
max_interval = 600.0 # results in 3 to 10 minute delay
delay = 1 + pow(tries * 0.6, 2)
delay = min(max_interval, delay)
return random.uniform(delay / 3, delay)
| BatchClientHook |
python | pytorch__pytorch | torch/distributed/checkpoint/hf_storage.py | {
"start": 1143,
"end": 7760
} | class ____(FileSystemWriter):
"""
A writer that writes to storage in the huggingface safetensors format.
"""
def __init__(
self,
path: str,
fqn_to_index_mapping: Optional[dict[str, int]] = None,
thread_count: int = 1,
save_distributed: bool = False,
enable_consolidation: bool = False,
thread_count_consolidation: int = 1,
) -> None:
"""
Initialize the huggingface writer pointing to path.
Args:
path: directory where the checkpoint will be read from.
fqn_to_index_mapping: A mapping from tensor FQN to the index of the file that the tensor should be written to.
Indices are from 1 to N, where N is the number of files. If not provided,
the tensors will be written to a single file. If none, then all the tensors on the
same rank will be written to the same file.
thread_count: Number of threads to use to write distributed checkpoint. Default to 1.
save_distributed: If True, save the checkpoint using distributed APIs where every rank saves its own shard.
Default is False which assumes rank-0 checkpointing of the full state_dict.
enable_consolidation: If True, consolidate the sharded checkpoint after saving. The sharded tensors will be
saved to path/sharded and the full tensors will be saved to path. Default to False.
thread_count_consolidation: Number of threads to use for parallel processing of saving data
to consolidated output files. Default to 1.
"""
super().__init__(
path=path,
serialization_format=SerializationFormat.SAFETENSORS,
thread_count=thread_count,
)
self.fqn_to_index_mapping: Optional[dict[str, int]] = fqn_to_index_mapping
self.save_distributed: bool = save_distributed
self.enable_consolidation: bool = enable_consolidation
self.consolidated_output_path: Optional[str] = None
if self.enable_consolidation:
self.consolidated_output_path = str(self.path)
self.path = self.fs.concat_path(self.path, SHARDED_DIR_NAME)
self.thread_count_consolidation = thread_count_consolidation
def prepare_global_plan(self, plans: list[SavePlan]) -> list[SavePlan]:
new_plans = []
for i, plan in enumerate(plans, start=1):
storage_data: dict[str, Any] = {}
if self.fqn_to_index_mapping is not None:
storage_data["fqn_to_index_mapping"] = self.fqn_to_index_mapping
if self.save_distributed:
storage_data["shard_index"] = i
new_plans.append(dataclasses.replace(plan, storage_data=storage_data))
return new_plans
def write_data(
self,
plan: SavePlan,
planner: SavePlanner,
) -> Future[list[WriteResult]]:
if len(plan.items) == 0:
fut: Future = Future()
fut.set_result([])
return fut
# storage_plan is a map from key to file index
storage_data: dict[str, Any] = plan.storage_data
storage_plan: Optional[dict[str, int]] = None
shard_index: Optional[int] = None
if "fqn_to_index_mapping" in storage_data:
storage_plan = storage_data["fqn_to_index_mapping"]
if "shard_index" in storage_data:
shard_index = storage_data["shard_index"]
buckets = self._split_by_storage_plan(storage_plan, plan.items)
highest_index = max(storage_plan.values()) if storage_plan is not None else 1
file_queue: queue.Queue = queue.Queue()
for file_index, write_items in buckets.items():
file_name = _gen_file_name(file_index, highest_index, shard_index)
file_queue.put(
(self.fs.concat_path(self.path, file_name), file_name, write_items)
)
return super()._write_data(planner, file_queue)
def finish(self, metadata: Metadata, results: list[list[WriteResult]]) -> None:
if self.save_distributed and not self.enable_consolidation:
# if we are saving distributed, without consolidating,
# then we have no metadata to write because a metadata
# file with fqn to file mapping doesn't make sense
# in this case, because fqns will be in multiple files
logger.info("Not consolidating sharded checkpoint in finish step.")
return
if self.save_distributed:
fqn_to_index_mapping: dict[str, int] = (
self.fqn_to_index_mapping
if self.fqn_to_index_mapping is not None
else dict.fromkeys(metadata.state_dict_metadata.keys(), 1)
)
return consolidate_safetensors_files(
input_dir=str(self.path),
output_dir=self.consolidated_output_path, # type: ignore[arg-type]
num_threads=self.thread_count_consolidation,
fqn_to_index_mapping=fqn_to_index_mapping,
)
# writing a model.index.safetensors.json file with fqn to file mapping
# for the rank-0 checkpointing case
metadata_to_write = {}
storage_md = {}
total_size = 0
for wr_list in results:
storage_md.update(
{wr.index.fqn: wr.storage_data.relative_path for wr in wr_list}
)
total_size += sum([wr.storage_data.length for wr in wr_list])
metadata_to_write["metadata"] = {"total_size": total_size}
metadata_to_write["weight_map"] = storage_md
metadata_path = self.fs.concat_path(self.path, f"{_metadata_fn}")
with self.fs.create_stream(metadata_path, "w") as metadata_file:
json.dump(metadata_to_write, metadata_file, indent=2)
def _split_by_storage_plan(
self, storage_plan: Optional[dict[str, int]], items: list[WriteItem]
) -> dict[int, list[WriteItem]]:
# storage_plan is a map from key to index
if storage_plan is None:
return {1: items}
buckets = {}
for item in items:
key = item.index.fqn
idx = storage_plan[key]
if idx not in buckets:
buckets[idx] = [item]
else:
buckets[idx].append(item)
return buckets
@property
def metadata_path(self) -> str:
return _metadata_fn
| HuggingFaceStorageWriter |
python | viewflow__viewflow | viewflow/forms/renderers.py | {
"start": 23920,
"end": 24594
} | class ____(LayoutNode):
def __init__(self, text, **kwargs):
self.text = text
super().__init__(*kwargs)
def append(self, layout: FormLayout, form: forms.Form, root: ElementTree.Element):
wrapper = ElementTree.SubElement(
root,
"div",
{
"class": "vf-form-column mdc-layout-grid__cell mdc-layout-grid__cell--span-12"
},
)
ElementTree.SubElement(
wrapper,
"h6",
{
"class": "mdc-typography--caption",
"style": "margin-bottom: 16px;margin-top: 16px;",
},
).text = self.text
| Caption |
python | Textualize__textual | src/textual/messages.py | {
"start": 275,
"end": 379
} | class ____(Message, verbose=True):
"""Requests message pump to close."""
@rich.repr.auto
| CloseMessages |
python | tensorflow__tensorflow | tensorflow/python/autograph/converters/call_trees.py | {
"start": 1428,
"end": 2600
} | class ____(object):
"""Constructs a tuple representing the positional arguments in a call.
Example (yes, it's legal Python 3):
f(*args1, b, *args2, c, d) -> args1 + (b,) + args2 + (c, d)
"""
def __init__(self):
self._arg_accumulator = []
self._argspec = []
self._finalized = False
def _consume_args(self):
if self._arg_accumulator:
self._argspec.append(
gast.Tuple(elts=self._arg_accumulator, ctx=gast.Load()))
self._arg_accumulator = []
def add_arg(self, a):
self._arg_accumulator.append(a)
def add_stararg(self, a):
self._consume_args()
self._argspec.append(
gast.Call(
gast.Name(
'tuple', ctx=gast.Load(), annotation=None, type_comment=None),
args=[a],
keywords=()))
def finalize(self):
self._consume_args()
self._finalized = True
def to_ast(self):
assert self._finalized
if self._argspec:
result = self._argspec[0]
for i in range(1, len(self._argspec)):
result = gast.BinOp(result, gast.Add(), self._argspec[i])
return result
return gast.Tuple([], gast.Load())
| _ArgTemplateBuilder |
python | django__django | tests/admin_views/test_templatetags.py | {
"start": 563,
"end": 5581
} | class ____(AdminViewBasicTestCase):
request_factory = RequestFactory()
def test_submit_row(self):
"""
submit_row template tag should pass whole context.
"""
request = self.request_factory.get(
reverse("admin:auth_user_change", args=[self.superuser.pk])
)
request.user = self.superuser
admin = UserAdmin(User, site)
extra_context = {"extra": True}
response = admin.change_view(
request, str(self.superuser.pk), extra_context=extra_context
)
template_context = submit_row(response.context_data)
self.assertIs(template_context["extra"], True)
self.assertIs(template_context["show_save"], True)
def test_submit_row_save_as_new_add_permission_required(self):
change_user = User.objects.create_user(
username="change_user", password="secret", is_staff=True
)
change_user.user_permissions.add(
get_perm(User, get_permission_codename("change", User._meta)),
)
request = self.request_factory.get(
reverse("admin:auth_user_change", args=[self.superuser.pk])
)
request.user = change_user
admin = UserAdmin(User, site)
admin.save_as = True
response = admin.change_view(request, str(self.superuser.pk))
template_context = submit_row(response.context_data)
self.assertIs(template_context["show_save_as_new"], False)
add_user = User.objects.create_user(
username="add_user", password="secret", is_staff=True
)
add_user.user_permissions.add(
get_perm(User, get_permission_codename("add", User._meta)),
get_perm(User, get_permission_codename("change", User._meta)),
)
request = self.request_factory.get(
reverse("admin:auth_user_change", args=[self.superuser.pk])
)
request.user = add_user
response = admin.change_view(request, str(self.superuser.pk))
template_context = submit_row(response.context_data)
self.assertIs(template_context["show_save_as_new"], True)
def test_override_show_save_and_add_another(self):
request = self.request_factory.get(
reverse("admin:auth_user_change", args=[self.superuser.pk]),
)
request.user = self.superuser
admin = UserAdmin(User, site)
for extra_context, expected_flag in (
({}, True), # Default.
({"show_save_and_add_another": False}, False),
):
with self.subTest(show_save_and_add_another=expected_flag):
response = admin.change_view(
request,
str(self.superuser.pk),
extra_context=extra_context,
)
template_context = submit_row(response.context_data)
self.assertIs(
template_context["show_save_and_add_another"], expected_flag
)
def test_override_change_form_template_tags(self):
"""
admin_modify template tags follow the standard search pattern
admin/app_label/model/template.html.
"""
article = Article.objects.all()[0]
request = self.request_factory.get(
reverse("admin:admin_views_article_change", args=[article.pk])
)
request.user = self.superuser
admin = ArticleAdmin(Article, site)
extra_context = {"show_publish": True, "extra": True}
response = admin.change_view(
request, str(article.pk), extra_context=extra_context
)
response.render()
self.assertIs(response.context_data["show_publish"], True)
self.assertIs(response.context_data["extra"], True)
self.assertContains(response, 'name="_save"')
self.assertContains(response, 'name="_publish"')
self.assertContains(response, "override-change_form_object_tools")
self.assertContains(response, "override-prepopulated_fields_js")
def test_override_change_list_template_tags(self):
"""
admin_list template tags follow the standard search pattern
admin/app_label/model/template.html.
"""
request = self.request_factory.get(
reverse("admin:admin_views_article_changelist")
)
request.user = self.superuser
admin = ArticleAdmin(Article, site)
admin.date_hierarchy = "date"
admin.search_fields = ("title", "content")
response = admin.changelist_view(request)
response.render()
self.assertContains(response, "override-actions")
self.assertContains(response, "override-change_list_object_tools")
self.assertContains(response, "override-change_list_results")
self.assertContains(response, "override-date_hierarchy")
self.assertContains(response, "override-pagination")
self.assertContains(response, "override-search_form")
| AdminTemplateTagsTest |
python | keras-team__keras | keras/src/utils/dtype_utils_test.py | {
"start": 144,
"end": 1137
} | class ____(test_case.TestCase):
def test_bfloat16_dtype_size(self):
self.assertEqual(dtype_utils.dtype_size("bfloat16"), 16)
def test_float16_dtype_size(self):
self.assertEqual(dtype_utils.dtype_size("float16"), 16)
def test_float32_dtype_size(self):
self.assertEqual(dtype_utils.dtype_size("float32"), 32)
def test_int32_dtype_size(self):
self.assertEqual(dtype_utils.dtype_size("int32"), 32)
def test_float64_dtype_size(self):
self.assertEqual(dtype_utils.dtype_size("float64"), 64)
def test_int64_dtype_size(self):
self.assertEqual(dtype_utils.dtype_size("int64"), 64)
def test_uint8_dtype_size(self):
self.assertEqual(dtype_utils.dtype_size("uint8"), 8)
def test_bool_dtype_size(self):
self.assertEqual(dtype_utils.dtype_size("bool"), 1)
def test_invalid_dtype_size(self):
with self.assertRaises(ValueError):
dtype_utils.dtype_size("unknown_dtype")
| DtypeSizeTests |
python | ray-project__ray | python/ray/util/state/common.py | {
"start": 21302,
"end": 22952
} | class ____(StateSchema):
"""Node State"""
#: The id of the node.
node_id: str = state_column(filterable=True)
#: The ip address of the node.
node_ip: str = state_column(filterable=True)
#: If this is a head node.
is_head_node: bool = state_column(filterable=True)
#: The state of the node.
#:
#: ALIVE: The node is alive.
#: DEAD: The node is dead.
state: TypeNodeStatus = state_column(filterable=True)
#: The state message of the node.
#: This provides more detailed information about the node's state.
state_message: Optional[str] = state_column(filterable=False)
#: The name of the node if it is given by the name argument.
node_name: str = state_column(filterable=True)
#: The total resources of the node.
resources_total: dict = state_column(
filterable=False, format_fn=Humanify.node_resources
)
#: The labels of the node.
labels: dict = state_column(filterable=False)
#: The time when the node (raylet) starts.
start_time_ms: Optional[int] = state_column(
filterable=False, detail=True, format_fn=Humanify.timestamp
)
#: The time when the node exits. The timestamp could be delayed
#: if the node is dead unexpectedly (could be delayed
# up to 30 seconds).
end_time_ms: Optional[int] = state_column(
filterable=False, detail=True, format_fn=Humanify.timestamp
)
# NOTE: Declaring this as dataclass would make __init__ not being called properly.
# NOTE: `JobDetails` will be `None` in the minimal install because Pydantic is not
# installed. Inheriting from `None` raises an exception.
| NodeState |
python | arrow-py__arrow | tests/test_locales.py | {
"start": 109597,
"end": 110630
} | class ____:
def test_format_timeframe(self):
assert self.locale._format_timeframe("now", 0) == "agora"
assert self.locale._format_timeframe("second", 1) == "um segundo"
assert self.locale._format_timeframe("seconds", 30) == "30 segundos"
assert self.locale._format_timeframe("minute", 1) == "um minuto"
assert self.locale._format_timeframe("minutes", 40) == "40 minutos"
assert self.locale._format_timeframe("hour", 1) == "uma hora"
assert self.locale._format_timeframe("hours", 23) == "23 horas"
assert self.locale._format_timeframe("day", 1) == "um dia"
assert self.locale._format_timeframe("days", 12) == "12 dias"
assert self.locale._format_timeframe("month", 1) == "um mês"
assert self.locale._format_timeframe("months", 11) == "11 meses"
assert self.locale._format_timeframe("year", 1) == "um ano"
assert self.locale._format_timeframe("years", 12) == "12 anos"
@pytest.mark.usefixtures("lang_locale")
| TestPortugueseLocale |
python | openai__openai-python | src/openai/resources/beta/threads/runs/runs.py | {
"start": 150544,
"end": 152083
} | class ____:
def __init__(self, runs: AsyncRuns) -> None:
self._runs = runs
self.create = ( # pyright: ignore[reportDeprecated]
_legacy_response.async_to_raw_response_wrapper(
runs.create, # pyright: ignore[reportDeprecated],
)
)
self.retrieve = ( # pyright: ignore[reportDeprecated]
_legacy_response.async_to_raw_response_wrapper(
runs.retrieve, # pyright: ignore[reportDeprecated],
)
)
self.update = ( # pyright: ignore[reportDeprecated]
_legacy_response.async_to_raw_response_wrapper(
runs.update, # pyright: ignore[reportDeprecated],
)
)
self.list = ( # pyright: ignore[reportDeprecated]
_legacy_response.async_to_raw_response_wrapper(
runs.list, # pyright: ignore[reportDeprecated],
)
)
self.cancel = ( # pyright: ignore[reportDeprecated]
_legacy_response.async_to_raw_response_wrapper(
runs.cancel, # pyright: ignore[reportDeprecated],
)
)
self.submit_tool_outputs = ( # pyright: ignore[reportDeprecated]
_legacy_response.async_to_raw_response_wrapper(
runs.submit_tool_outputs, # pyright: ignore[reportDeprecated],
)
)
@cached_property
def steps(self) -> AsyncStepsWithRawResponse:
return AsyncStepsWithRawResponse(self._runs.steps)
| AsyncRunsWithRawResponse |
python | Textualize__textual | src/textual/css/tokenizer.py | {
"start": 4435,
"end": 4573
} | class ____(NamedTuple):
name: str
location: tuple[int, int]
length: int
code: str
@rich.repr.auto(angular=True)
| ReferencedBy |
python | google__jax | tests/x64_context_test.py | {
"start": 1292,
"end": 8307
} | class ____(jtu.JaxTestCase):
@jtu.sample_product(jit=jtu.JIT_IMPLEMENTATION)
def test_make_array(self, jit):
func = jit(lambda: jnp.array(np.float64(0)))
dtype_start = func().dtype
with enable_x64():
self.assertEqual(func().dtype, "float64")
with disable_x64():
self.assertEqual(func().dtype, "float32")
self.assertEqual(func().dtype, dtype_start)
@jtu.sample_product(
jit=jtu.JIT_IMPLEMENTATION,
enable_or_disable=[enable_x64, disable_x64],
)
def test_correctly_capture_default(self, jit, enable_or_disable):
# The fact we defined a jitted function with a block with a different value
# of `jax.config.enable_x64` has no impact on the output.
with enable_or_disable():
func = jit(lambda: jnp.array(np.float64(0)))
func()
expected_dtype = "float64" if jax.config._read("jax_enable_x64") else "float32"
self.assertEqual(func().dtype, expected_dtype)
with enable_x64():
self.assertEqual(func().dtype, "float64")
with disable_x64():
self.assertEqual(func().dtype, "float32")
@jtu.sample_product(jit=jtu.JIT_IMPLEMENTATION)
@jtu.run_on_devices("cpu") # Test presumes CPU precision
def test_near_singular_inverse(self, jit):
rng = jtu.rand_default(self.rng())
@partial(jit, static_argnums=1)
def near_singular_inverse(N=5, eps=1E-40):
X = rng((N, N), dtype='float64')
X = jnp.asarray(X)
X = X.at[-1].mul(eps)
return jnp.linalg.inv(X)
with enable_x64():
result_64 = near_singular_inverse()
self.assertTrue(jnp.all(jnp.isfinite(result_64)))
with disable_x64():
result_32 = near_singular_inverse()
self.assertTrue(jnp.all(~jnp.isfinite(result_32)))
@jtu.sample_product(jit=jtu.JIT_IMPLEMENTATION)
def test_while_loop(self, jit):
@jit
def count_to(N):
return lax.while_loop(lambda x: x < N, lambda x: x + 1.0, 0.0)
with enable_x64():
self.assertArraysEqual(count_to(10), jnp.float64(10), check_dtypes=True)
with disable_x64():
self.assertArraysEqual(count_to(10), jnp.float32(10), check_dtypes=True)
def test_thread_safety(self):
def func_x32():
with disable_x64():
time.sleep(0.1)
return jnp.array(np.int64(0)).dtype
def func_x64():
with enable_x64():
time.sleep(0.1)
return jnp.array(np.int64(0)).dtype
with concurrent.futures.ThreadPoolExecutor() as executor:
x32 = executor.submit(func_x32)
x64 = executor.submit(func_x64)
self.assertEqual(x64.result(), jnp.int64)
self.assertEqual(x32.result(), jnp.int32)
@jax.legacy_prng_key('allow')
@jax.debug_key_reuse(False)
@jtu.ignore_warning(category=UserWarning,
message="Explicitly requested dtype float64 is not available")
def test_jit_cache(self):
if jtu.test_device_matches(["tpu"]):
self.skipTest("64-bit random not available on TPU")
if config.explicit_x64_dtypes.value == config.ExplicitX64Mode.ERROR:
self.skipTest("Test uses float64 which is not available")
f = partial(random.uniform, random.PRNGKey(0), (1,), 'float64', -1, 1)
with disable_x64():
for _ in range(2):
f()
with enable_x64():
for _ in range(2):
f()
def test_convert_element_type(self):
# Regression test for part of https://github.com/jax-ml/jax/issues/5982
with enable_x64():
x = jnp.int64(1)
self.assertEqual(x.dtype, jnp.int64)
y = x.astype(jnp.int32)
self.assertEqual(y.dtype, jnp.int32)
z = jax.jit(lambda x: x.astype(jnp.int32))(x)
self.assertEqual(z.dtype, jnp.int32)
def test_python_scalar(self):
@jax.jit
def f(a):
with enable_x64():
return 2 + a
self.assertEqual(f(1).dtype, jnp.int64)
def test_grad(self):
def fun(x):
with enable_x64(True):
return jnp.sin(x)
self.assertEqual(
jax.grad(fun)(0.5).dtype,
jnp.float64 if jax.config.x64_enabled else jnp.float32,
)
def test_sin(self):
def fun(x):
with enable_x64(True):
x = jnp.asarray(x, dtype=jnp.float64)
return lax.sin(x)
self.assertEqual(fun(0.5).dtype, jnp.float64)
def test_mul(self):
def fun(x, y):
with enable_x64(True):
x = jnp.asarray(x, dtype=jnp.float64)
y = jnp.asarray(y, dtype=jnp.float64)
return lax.mul(x, y)
self.assertEqual(fun(0.5, 1.5).dtype, jnp.float64)
@jtu.sample_product(disable_jit=[True, False])
def test_scan_with_contextmanager(self, disable_jit):
with jax.disable_jit(disable_jit):
def f(a):
def body(carry, _):
with enable_x64():
y = (carry + a).astype(jnp.int64)
assert y.dtype == jnp.int64
z = y.astype(jnp.int32)
return carry, (z, y)
return lax.scan(body, jnp.int32(2), jnp.arange(4))
carry_out, ys_out = f(3)
self.assertEqual(carry_out.dtype, jnp.int32)
self.assertEqual(ys_out[0].dtype, jnp.int32)
self.assertEqual(ys_out[1].dtype, jnp.int64)
def test_custom_jvp(self):
@custom_jvp
def f(x):
return x ** 2.
@f.defjvp
def f_jvp(xs, ts):
x, = xs
t, = ts
self.assertTrue(jax.config.x64_enabled)
return f(x), t * jnp.sin(x)
def g(x):
with enable_x64():
x = jnp.array(x, jnp.float64)
return f(x)
self.assertEqual(g(5.).dtype, jnp.float64)
out_primal, out_tangent = jvp(g, (5.,), (1.,))
self.assertEqual(out_primal.dtype, jnp.float64)
self.assertEqual(out_tangent.dtype, jnp.float64)
with enable_x64(False):
self.assertEqual(g(5.).dtype, jnp.float64)
self.assertEqual(grad(g)(5.).dtype, jnp.float32)
self.assertEqual(grad(grad(g))(5.).dtype, jnp.float32)
self.assertEqual(grad(grad(grad(g)))(5.).dtype, jnp.float32)
with enable_x64(True):
self.assertEqual(g(5.).dtype, jnp.float64)
self.assertEqual(grad(g)(5.).dtype, jnp.float64)
self.assertEqual(grad(grad(g))(5.).dtype, jnp.float64)
self.assertEqual(grad(grad(grad(g)))(5.).dtype, jnp.float64)
def test_custom_vjp(self):
@custom_vjp
def f(x):
return x ** 2.
def f_fwd(x):
return f(x), jnp.sin(x)
def f_bwd(res, t):
return (res * t,)
f.defvjp(f_fwd, f_bwd)
def g(x):
with enable_x64():
x = jnp.array(x, jnp.float64)
return f(x)
with enable_x64(False):
self.assertEqual(g(5.).dtype, jnp.float64)
self.assertEqual(grad(g)(5.).dtype, jnp.float32)
self.assertEqual(grad(grad(g))(5.).dtype, jnp.float32)
self.assertEqual(grad(grad(grad(g)))(5.).dtype, jnp.float32)
with enable_x64(True):
self.assertEqual(g(5.).dtype, jnp.float64)
self.assertEqual(grad(g)(5.).dtype, jnp.float64)
self.assertEqual(grad(grad(g))(5.).dtype, jnp.float64)
self.assertEqual(grad(grad(grad(g)))(5.).dtype, jnp.float64)
if __name__ == "__main__":
absltest.main(testLoader=jtu.JaxTestLoader())
| X64ContextTests |
python | astropy__astropy | astropy/utils/xml/tests/test_iterparse.py | {
"start": 2719,
"end": 4758
} | class ____:
def __init__(self, fd, **kwargs):
self._file = fd
self._z = zlib.decompressobj(16 + zlib.MAX_WBITS)
def read(self, requested_length):
# emulate network buffering dynamics by clamping the read size
clamped_length = max(1, min(1 << 24, requested_length))
compressed = self._file.read(clamped_length)
plaintext = self._z.decompress(compressed)
# Only for real local files---just for the testcase
if len(compressed) == 0:
self.close()
return plaintext
def __getattr__(self, attr):
return getattr(self._file, attr)
# test_iterparser_over_read_simple() is a very cut down test,
# of the original more flexible test-case, but without external
# dependencies. The plaintext is compressed and then decompressed
# to provide a better emulation of the original situation where
# the bug was observed.
#
# If a dependency upon 'zlib' is not desired, it would be possible to
# simplify this testcase by replacing the compress/decompress with a
# read() method emulation that always returned more from a buffer
# that was requested.
def test_iterparser_over_read_simple():
# Take the plaintext of 512 tags, and compression it with a
# Gzip-style header (+16), to most closely emulate the behavior
# of most HTTP servers.
zlib_GZIP_STYLE_HEADER = 16
compo = zlib.compressobj(
zlib.Z_BEST_COMPRESSION, zlib.DEFLATED, zlib.MAX_WBITS + zlib_GZIP_STYLE_HEADER
)
# Bytes vs. String .encode()/.decode() for compatibility with Python 3.5.
s = compo.compress(VOTABLE_XML.encode())
s = s + compo.flush()
fd = io.BytesIO(s)
fd.seek(0)
# Finally setup the test of the C-based '_fast_iterparse()' iterator
# and a situation in which it can be called a-la the VOTable Parser.
MINIMUM_REQUESTABLE_BUFFER_SIZE = 1024
uncompressed_fd = UngzipFileWrapper(fd)
iterable = _fast_iterparse(uncompressed_fd.read, MINIMUM_REQUESTABLE_BUFFER_SIZE)
list(iterable)
| UngzipFileWrapper |
python | pytorch__pytorch | torch/_numpy/_dtypes.py | {
"start": 891,
"end": 937
} | class ____(number):
name = "inexact"
| inexact |
python | langchain-ai__langchain | libs/core/langchain_core/agents.py | {
"start": 4879,
"end": 8411
} | class ____(Serializable):
"""Final return value of an `ActionAgent`.
Agents return an `AgentFinish` when they have reached a stopping condition.
"""
return_values: dict
"""Dictionary of return values."""
log: str
"""Additional information to log about the return value.
This is used to pass along the full LLM prediction, not just the parsed out
return value.
For example, if the full LLM prediction was `Final Answer: 2` you may want to just
return `2` as a return value, but pass along the full string as a `log` (for
debugging or observability purposes).
"""
type: Literal["AgentFinish"] = "AgentFinish"
def __init__(self, return_values: dict, log: str, **kwargs: Any):
"""Override init to support instantiation by position for backward compat."""
super().__init__(return_values=return_values, log=log, **kwargs)
@classmethod
def is_lc_serializable(cls) -> bool:
"""Return `True` as this class is serializable."""
return True
@classmethod
def get_lc_namespace(cls) -> list[str]:
"""Get the namespace of the LangChain object.
Returns:
`["langchain", "schema", "agent"]`
"""
return ["langchain", "schema", "agent"]
@property
def messages(self) -> Sequence[BaseMessage]:
"""Messages that correspond to this observation."""
return [AIMessage(content=self.log)]
def _convert_agent_action_to_messages(
agent_action: AgentAction,
) -> Sequence[BaseMessage]:
"""Convert an agent action to a message.
This code is used to reconstruct the original AI message from the agent action.
Args:
agent_action: Agent action to convert.
Returns:
AIMessage that corresponds to the original tool invocation.
"""
if isinstance(agent_action, AgentActionMessageLog):
return agent_action.message_log
return [AIMessage(content=agent_action.log)]
def _convert_agent_observation_to_messages(
agent_action: AgentAction, observation: Any
) -> Sequence[BaseMessage]:
"""Convert an agent action to a message.
This code is used to reconstruct the original AI message from the agent action.
Args:
agent_action: Agent action to convert.
observation: Observation to convert to a message.
Returns:
`AIMessage` that corresponds to the original tool invocation.
"""
if isinstance(agent_action, AgentActionMessageLog):
return [_create_function_message(agent_action, observation)]
content = observation
if not isinstance(observation, str):
try:
content = json.dumps(observation, ensure_ascii=False)
except Exception:
content = str(observation)
return [HumanMessage(content=content)]
def _create_function_message(
agent_action: AgentAction, observation: Any
) -> FunctionMessage:
"""Convert agent action and observation into a function message.
Args:
agent_action: the tool invocation request from the agent.
observation: the result of the tool invocation.
Returns:
`FunctionMessage` that corresponds to the original tool invocation.
"""
if not isinstance(observation, str):
try:
content = json.dumps(observation, ensure_ascii=False)
except Exception:
content = str(observation)
else:
content = observation
return FunctionMessage(
name=agent_action.tool,
content=content,
)
| AgentFinish |
python | Textualize__textual | docs/examples/guide/layout/vertical_layout.py | {
"start": 80,
"end": 401
} | class ____(App):
CSS_PATH = "vertical_layout.tcss"
def compose(self) -> ComposeResult:
yield Static("One", classes="box")
yield Static("Two", classes="box")
yield Static("Three", classes="box")
if __name__ == "__main__":
app = VerticalLayoutExample()
app.run()
| VerticalLayoutExample |
python | apache__airflow | helm-tests/tests/helm_tests/airflow_core/test_triggerer.py | {
"start": 29524,
"end": 33607
} | class ____:
"""Tests triggerer keda autoscaler."""
def test_should_add_component_specific_labels(self):
docs = render_chart(
values={
"triggerer": {
"keda": {"enabled": True},
"labels": {"test_label": "test_label_value"},
},
},
show_only=["templates/triggerer/triggerer-kedaautoscaler.yaml"],
)
assert "test_label" in jmespath.search("metadata.labels", docs[0])
assert jmespath.search("metadata.labels", docs[0])["test_label"] == "test_label_value"
def test_should_remove_replicas_field(self):
docs = render_chart(
values={
"triggerer": {
"keda": {"enabled": True},
},
},
show_only=["templates/triggerer/triggerer-deployment.yaml"],
)
assert "replicas" not in jmespath.search("spec", docs[0])
@pytest.mark.parametrize(
"executor", ["CeleryExecutor", "CeleryKubernetesExecutor", "CeleryExecutor,KubernetesExecutor"]
)
def test_include_event_source_container_name_in_scaled_object_for_triggerer(self, executor):
docs = render_chart(
values={
"triggerer": {
"keda": {"enabled": True},
},
},
show_only=["templates/triggerer/triggerer-kedaautoscaler.yaml"],
)
assert jmespath.search("spec.scaleTargetRef.envSourceContainerName", docs[0]) == "triggerer"
@pytest.mark.parametrize(
("query", "expected_query"),
[
# default query
(
None,
"SELECT ceil(COUNT(*)::decimal / 1000) FROM trigger",
),
# test custom static query
(
"SELECT ceil(COUNT(*)::decimal / 2000) FROM trigger",
"SELECT ceil(COUNT(*)::decimal / 2000) FROM trigger",
),
# test custom template query
(
'SELECT ceil(COUNT(*)::decimal / {{ mul (include "triggerer.capacity" . | int) 2 }})'
" FROM trigger",
"SELECT ceil(COUNT(*)::decimal / 2000) FROM trigger",
),
],
)
def test_should_use_keda_query(self, query, expected_query):
docs = render_chart(
values={
"triggerer": {
"enabled": True,
"keda": {"enabled": True, **({"query": query} if query else {})},
},
},
show_only=["templates/triggerer/triggerer-kedaautoscaler.yaml"],
)
assert expected_query == jmespath.search("spec.triggers[0].metadata.query", docs[0])
def test_mysql_db_backend_keda_default_value(self):
docs = render_chart(
values={
"data": {"metadataConnection": {"protocol": "mysql"}},
"triggerer": {
"enabled": True,
"keda": {"enabled": True},
},
},
show_only=["templates/triggerer/triggerer-kedaautoscaler.yaml"],
)
assert jmespath.search("spec.triggerers[0].metadata.keda.usePgBouncer", docs[0]) is None
def test_mysql_db_backend_keda(self):
docs = render_chart(
values={
"data": {"metadataConnection": {"protocol": "mysql"}},
"triggerer": {
"enabled": True,
"keda": {"enabled": True},
},
},
show_only=["templates/triggerer/triggerer-kedaautoscaler.yaml"],
)
assert jmespath.search("spec.triggers[0].metadata.queryValue", docs[0]) == "1"
assert jmespath.search("spec.triggers[0].metadata.targetQueryValue", docs[0]) is None
assert jmespath.search("spec.triggers[0].metadata.connectionStringFromEnv", docs[0]) == "KEDA_DB_CONN"
assert jmespath.search("spec.triggers[0].metadata.connectionFromEnv", docs[0]) is None
| TestTriggererKedaAutoScaler |
python | celery__celery | t/unit/app/test_schedules.py | {
"start": 2841,
"end": 3356
} | class ____:
def test_ne(self):
s1 = schedule(10, app=self.app)
s2 = schedule(12, app=self.app)
s3 = schedule(10, app=self.app)
assert s1 == s3
assert s1 != s2
def test_pickle(self):
s1 = schedule(10, app=self.app)
fun, args = s1.__reduce__()
s2 = fun(*args)
assert s1 == s2
# This is needed for test_crontab_parser because datetime.utcnow doesn't pickle
# in python 2
def utcnow():
return datetime.now(timezone.utc)
| test_schedule |
python | PrefectHQ__prefect | src/integrations/prefect-github/prefect_github/schemas/graphql_schema.py | {
"start": 193462,
"end": 193869
} | class ____(sgqlc.types.Input):
"""
See source code for more info.
"""
__schema__ = graphql_schema
__field_names__ = ("field", "direction")
field = sgqlc.types.Field(
sgqlc.types.non_null(VerifiableDomainOrderField), graphql_name="field"
)
direction = sgqlc.types.Field(
sgqlc.types.non_null(OrderDirection), graphql_name="direction"
)
| VerifiableDomainOrder |
python | dagster-io__dagster | python_modules/dagster/dagster/_core/remote_origin.py | {
"start": 7479,
"end": 11197
} | class ____(IHaveNew, LegacyNamedTupleMixin, CodeLocationOrigin):
"""Identifies a repository location in a Python environment. Dagster creates a gRPC server
for these repository locations on startup.
"""
loadable_target_origin: LoadableTargetOrigin # pyright: ignore[reportIncompatibleMethodOverride]
location_name: str # pyright: ignore[reportIncompatibleMethodOverride]
def __new__(
cls, loadable_target_origin: LoadableTargetOrigin, location_name: Optional[str] = None
):
return super().__new__(
cls,
loadable_target_origin=loadable_target_origin,
location_name=location_name
if location_name
else _assign_loadable_target_origin_name(loadable_target_origin),
)
def get_display_metadata(self) -> Mapping[str, str]:
metadata = {
"python_file": self.loadable_target_origin.python_file,
"module_name": self.loadable_target_origin.module_name,
"autoload_defs_module_name": self.loadable_target_origin.autoload_defs_module_name,
"working_directory": self.loadable_target_origin.working_directory,
"attribute": self.loadable_target_origin.attribute,
"package_name": self.loadable_target_origin.package_name,
"executable_path": self.loadable_target_origin.executable_path,
}
return {key: value for key, value in metadata.items() if value is not None}
def create_location(self, instance: "DagsterInstance") -> NoReturn:
raise DagsterInvariantViolationError(
"A ManagedGrpcPythonEnvCodeLocationOrigin needs a GrpcServerRegistry"
" in order to create a code location."
)
def reload_location(self, instance: "DagsterInstance") -> NoReturn:
raise DagsterInvariantViolationError(
"A ManagedGrpcPythonEnvCodeLocationOrigin needs a GrpcServerRegistry"
" in order to reload a code location."
)
@contextmanager
def create_single_location(
self,
instance: "DagsterInstance",
) -> Iterator["GrpcServerCodeLocation"]:
from dagster._core.remote_representation.code_location import GrpcServerCodeLocation
from dagster._core.remote_representation.grpc_server_registry import GrpcServerRegistry
from dagster._core.workspace.context import WEBSERVER_GRPC_SERVER_HEARTBEAT_TTL
from dagster._grpc.server import GrpcServerCommand
with GrpcServerRegistry(
instance_ref=instance.get_ref(),
server_command=GrpcServerCommand.API_GRPC,
heartbeat_ttl=WEBSERVER_GRPC_SERVER_HEARTBEAT_TTL,
startup_timeout=(
instance.code_server_process_startup_timeout
if instance
else DEFAULT_LOCAL_CODE_SERVER_STARTUP_TIMEOUT
),
wait_for_processes_on_shutdown=instance.wait_for_local_code_server_processes_on_shutdown,
) as grpc_server_registry:
endpoint = grpc_server_registry.get_grpc_endpoint(self)
with GrpcServerCodeLocation(
origin=self,
port=endpoint.port,
socket=endpoint.socket,
host=endpoint.host,
heartbeat=True,
watch_server=False,
grpc_server_registry=grpc_server_registry,
instance=instance,
) as location:
yield location
# Different storage name for backcompat
@whitelist_for_serdes(
storage_name="GrpcServerRepositoryLocationOrigin",
skip_when_empty_fields={"use_ssl", "additional_metadata"},
)
@record_custom
| ManagedGrpcPythonEnvCodeLocationOrigin |
python | tensorflow__tensorflow | tensorflow/lite/python/lite_flex_test.py | {
"start": 5472,
"end": 8804
} | class ____(test_util.TensorFlowTestCase,
parameterized.TestCase):
@parameterized.named_parameters(
('EnableMlirConverter', True), # enable mlir
('DisableMlirConverter', False)) # disable mlir
@test_util.run_v2_only
def testFloat(self, enable_mlir):
input_data = constant_op.constant(1., shape=[1])
root = autotrackable.AutoTrackable()
root.v1 = variables.Variable(3.)
root.v2 = variables.Variable(2.)
root.f = def_function.function(lambda x: root.v1 * root.v2 * x)
concrete_func = root.f.get_concrete_function(input_data)
# Convert model.
converter = lite.TFLiteConverterV2.from_concrete_functions([concrete_func],
root)
converter.target_spec.supported_ops = set([lite.OpsSet.SELECT_TF_OPS])
converter.experimental_new_converter = enable_mlir
tflite_model = converter.convert()
# Check the model works with TensorFlow ops.
interpreter = Interpreter(model_content=tflite_model)
interpreter.allocate_tensors()
input_details = interpreter.get_input_details()
test_input = np.array([4.0], dtype=np.float32)
interpreter.set_tensor(input_details[0]['index'], test_input)
interpreter.invoke()
output_details = interpreter.get_output_details()
expected_output = np.array([24.0], dtype=np.float32)
output_data = interpreter.get_tensor(output_details[0]['index'])
self.assertTrue((expected_output == output_data).all())
# Ensure that input TFLite buffer is not reused for ops such as
# `TensorListSetItem`. The example model has a while loop, and the while body
# has a `TensorListSetItem` op which takes the output from a `Where` op.
@test_util.run_v2_only
def testDisableFlexTensorMemoryReusing(self):
@tf.function(input_signature=[
tf.TensorSpec(shape=[2, 3], dtype=tf.float32, name='x')
])
def model(x):
l = list_ops.tensor_list_reserve(
element_dtype=tf.int64, element_shape=[None, 1], num_elements=2)
init_state = (0, x, l)
condition = lambda i, x, l: i < 2
def body(i, x, l):
element = tf.where(x[i])
l = list_ops.tensor_list_set_item(l, i, element)
return i + 1, x, l
_, _, l_final = tf.while_loop(condition, body, init_state)
return list_ops.tensor_list_stack(l_final, element_dtype=tf.int64)
# Convert model.
converter = lite.TFLiteConverterV2.from_concrete_functions(
[model.get_concrete_function()])
converter.target_spec.supported_ops = set(
[lite.OpsSet.TFLITE_BUILTINS, lite.OpsSet.SELECT_TF_OPS])
tflite_model = converter.convert()
# Check the model produces correct result.
interpreter = Interpreter(model_content=tflite_model)
interpreter.allocate_tensors()
input_details = interpreter.get_input_details()
test_input = np.array([[1.0, 2.0, 0.0], [0.0, 5.0, 6.0]], dtype=np.float32)
interpreter.set_tensor(input_details[0]['index'], test_input)
interpreter.invoke()
output_details = interpreter.get_output_details()
expected_output = np.array([0, 1, 1, 2], dtype=np.int64)
output_data = interpreter.get_tensor(output_details[0]['index'])
self.assertTrue((expected_output == np.ndarray.flatten(output_data)).all())
| FromConcreteFunctionTest |
python | huggingface__transformers | src/transformers/models/phi4_multimodal/image_processing_phi4_multimodal_fast.py | {
"start": 1076,
"end": 1371
} | class ____(ImagesKwargs, total=False):
r"""
patch_size (`int`, *optional*):
The size of the patch.
dynamic_hd (`int`, *optional*):
The maximum number of crops per image.
"""
patch_size: int
dynamic_hd: int
@auto_docstring
| Phi4MultimodalImageProcessorKwargs |
python | huggingface__transformers | src/transformers/models/falcon_h1/modular_falcon_h1.py | {
"start": 2900,
"end": 7919
} | class ____(HybridMambaAttentionDynamicCache):
"""
A dynamic cache that can handle both the attention cache (which has a seq_len dimension) and the mamba cache
(which has a constant shape regardless of seq_len).
This cache has two sets of lists of tensors: `key_cache` and `value_cache` for attention cache and `conv_states`
and `ssm_states` for mamba cache. Each of these lists has `num_layers` tensors. The expected shape for each tensor
For attention layers, `key_cache` and `value_cache` have a shape of `(batch_size, num_heads, seq_len, head_dim)`,
while `conv_states` and `ssm_states` have a shape of `(batch_size, 0)` (empty tensors).
For mamba layers, `key_cache` and `value_cache` have a shape of `(batch_size, 0)` (empty tensors),
while `conv_states` represents the convolution state and has a shape of `(batch_size, d_inner, d_conv)`,
and `ssm_states` represents the ssm state and has a shape of `(batch_size, d_inner, d_state)`.
"""
def __init__(
self,
config: FalconH1Config,
batch_size: int,
dtype: torch.dtype = torch.float16,
devices: Optional[list[str]] = None,
):
self.seqlen_offset = 0
self.dtype = dtype
self.has_previous_state = False
self.conv_kernel_size = config.mamba_d_conv
self.intermediate_size = (
config.mamba_d_ssm if config.mamba_d_ssm is not None else int(config.mamba_expand * config.hidden_size)
)
self.conv_states = {
i: torch.zeros(
batch_size,
self.intermediate_size + 2 * config.mamba_n_groups * config.mamba_d_state,
self.conv_kernel_size,
device=devices[i],
dtype=dtype,
)
for i in range(config.num_hidden_layers)
}
self.ssm_states = {
i: torch.zeros(
batch_size,
config.mamba_n_heads,
config.mamba_d_head,
config.mamba_d_state,
device=devices[i],
dtype=dtype,
)
for i in range(config.num_hidden_layers)
}
self.transformer_layers = []
for i in range(config.num_hidden_layers):
self.transformer_layers.append(i)
self.key_cache: list[torch.Tensor] = []
self.value_cache: list[torch.Tensor] = []
def update(
self,
key_states: torch.Tensor,
value_states: torch.Tensor,
layer_idx: int,
cache_kwargs: Optional[dict[str, Any]] = None,
) -> tuple[torch.Tensor, torch.Tensor]:
"""
Updates the cache with the new `key_states` and `value_states` for the layer `layer_idx`.
Parameters:
key_states (`torch.Tensor`):
The new key states to cache.
value_states (`torch.Tensor`):
The new value states to cache.
layer_idx (`int`):
The index of the layer to cache the states for.
cache_kwargs (`dict[str, Any]`, `optional`):
Additional arguments for the cache subclass. No additional arguments are used in `DynamicCache`.
Return:
A tuple containing the updated key and value states.
"""
# Update the cache
if len(self.key_cache) <= layer_idx:
# There may be skipped layers, fill them with empty lists
for _ in range(len(self.key_cache), layer_idx):
self.key_cache.append([])
self.value_cache.append([])
self.key_cache.append(key_states)
self.value_cache.append(value_states)
elif len(self.key_cache[layer_idx]) == 0: # fills previously skipped layers; checking for tensor causes errors
self.key_cache[layer_idx] = key_states
self.value_cache[layer_idx] = value_states
else:
self.key_cache[layer_idx] = torch.cat([self.key_cache[layer_idx], key_states], dim=-2)
self.value_cache[layer_idx] = torch.cat([self.value_cache[layer_idx], value_states], dim=-2)
return self.key_cache[layer_idx], self.value_cache[layer_idx]
def update_conv_state(
self,
layer_idx: int,
new_conv_state: torch.Tensor,
cache_position: torch.LongTensor,
) -> torch.Tensor:
conv_state = self.conv_states[layer_idx]
cache_position = cache_position.clamp(0, self.conv_kernel_size - 1)
conv_state = conv_state.roll(shifts=-1, dims=-1)
if len(cache_position) > 1:
conv_state[:, :, :] = new_conv_state.to(conv_state.device)
else:
conv_state[:, :, -1] = new_conv_state[:, :, -1].to(conv_state.device)
self.conv_states[layer_idx].zero_()
self.conv_states[layer_idx] += conv_state
return self.conv_states[layer_idx]
def reset(self):
self.conv_states.zero_()
self.ssm_states.zero_()
| FalconHybridMambaAttentionDynamicCache |
python | scrapy__scrapy | tests/test_feedexport.py | {
"start": 23048,
"end": 27127
} | class ____(ABC):
mockserver: MockServer
class MyItem(scrapy.Item):
foo = scrapy.Field()
egg = scrapy.Field()
baz = scrapy.Field()
class MyItem2(scrapy.Item):
foo = scrapy.Field()
hello = scrapy.Field()
def _random_temp_filename(self, inter_dir="") -> Path:
chars = [random.choice(ascii_letters + digits) for _ in range(15)]
filename = "".join(chars)
return Path(self.temp_dir, inter_dir, filename)
@classmethod
def setup_class(cls):
cls.mockserver = MockServer()
cls.mockserver.__enter__()
@classmethod
def teardown_class(cls):
cls.mockserver.__exit__(None, None, None)
def setup_method(self):
self.temp_dir = tempfile.mkdtemp()
def teardown_method(self):
shutil.rmtree(self.temp_dir, ignore_errors=True)
async def exported_data(
self, items: Iterable[Any], settings: dict[str, Any]
) -> dict[str, Any]:
"""
Return exported data which a spider yielding ``items`` would return.
"""
class TestSpider(scrapy.Spider):
name = "testspider"
def parse(self, response):
yield from items
return await self.run_and_export(TestSpider, settings)
async def exported_no_data(self, settings: dict[str, Any]) -> dict[str, Any]:
"""
Return exported data which a spider yielding no ``items`` would return.
"""
class TestSpider(scrapy.Spider):
name = "testspider"
def parse(self, response):
pass
return await self.run_and_export(TestSpider, settings)
async def assertExported(
self,
items: Iterable[Any],
header: Iterable[str],
rows: Iterable[dict[str, Any]],
settings: dict[str, Any] | None = None,
) -> None:
await self.assertExportedCsv(items, header, rows, settings)
await self.assertExportedJsonLines(items, rows, settings)
await self.assertExportedXml(items, rows, settings)
await self.assertExportedPickle(items, rows, settings)
await self.assertExportedMarshal(items, rows, settings)
await self.assertExportedMultiple(items, rows, settings)
async def assertExportedCsv( # noqa: B027
self,
items: Iterable[Any],
header: Iterable[str],
rows: Iterable[dict[str, Any]],
settings: dict[str, Any] | None = None,
) -> None:
pass
async def assertExportedJsonLines( # noqa: B027
self,
items: Iterable[Any],
rows: Iterable[dict[str, Any]],
settings: dict[str, Any] | None = None,
) -> None:
pass
async def assertExportedXml( # noqa: B027
self,
items: Iterable[Any],
rows: Iterable[dict[str, Any]],
settings: dict[str, Any] | None = None,
) -> None:
pass
async def assertExportedMultiple( # noqa: B027
self,
items: Iterable[Any],
rows: Iterable[dict[str, Any]],
settings: dict[str, Any] | None = None,
) -> None:
pass
async def assertExportedPickle( # noqa: B027
self,
items: Iterable[Any],
rows: Iterable[dict[str, Any]],
settings: dict[str, Any] | None = None,
) -> None:
pass
async def assertExportedMarshal( # noqa: B027
self,
items: Iterable[Any],
rows: Iterable[dict[str, Any]],
settings: dict[str, Any] | None = None,
) -> None:
pass
@abstractmethod
async def run_and_export(
self, spider_cls: type[Spider], settings: dict[str, Any]
) -> dict[str, Any]:
pass
def _load_until_eof(self, data, load_func):
result = []
with tempfile.TemporaryFile() as temp:
temp.write(data)
temp.seek(0)
while True:
try:
result.append(load_func(temp))
except EOFError:
break
return result
| TestFeedExportBase |
python | readthedocs__readthedocs.org | readthedocs/core/management/commands/contact_owners.py | {
"start": 481,
"end": 6851
} | class ____(BaseCommand):
"""
Send an email or sticky notification from a file (markdown) to all owners.
Usage examples
--------------
Email all owners of the site::
django-admin contact_owners --email email.md
Email and send an ephemeral (disappears after shown once) notification
to all owners of the "readthedocs" organization::
django-admin contact_owners
--email email.md
--notification notification.md
--organization readthedocs
Send a sticky notifications to multiple users::
django-admin contact_owners
--notification notification.md
--sticky
--usernames usernames.txt
* ``usernames.txt`` is a text file containing one username per line.
* ``notifications.md`` is a Markdown file containing the message
to be included in the notification.
* ``email.md`` is a Markdown file with the first line as the subject,
and the rest is the content.
The context available is:
* ``user``
* ``production_uri``
.. code:: markdown
Read the Docs deprecated option, action required
Dear {{ user.firstname }},
Greetings from [Read the Docs]({{ production_uri }}).
.. note::
By default the command won't send the email/notification (dry-run mode),
add the ``--production`` flag to actually send the email/notification.
.. note::
If you need to extend the behavior or add a new use case,
we recommend creating a simple script file that re-use the methods
and functions from this command.
This is an example to contact Domain owners:
https://gist.github.com/humitos/3e08ed4763a9312f5c0a9a997ea95a42
"""
help = "Send an email or sticky notification from a file (Markdown) to users."
def add_arguments(self, parser):
parser.add_argument(
"--production",
action="store_true",
dest="production",
default=False,
help=(
"Send the email/notification for real, "
"otherwise we only logs the notification in the console (dryrun)."
),
)
parser.add_argument(
"--email",
help=(
"Path to a file with the email content in markdown. "
"The first line would be the subject."
),
)
parser.add_argument(
"--notification",
help="Path to a file with the notification content in markdown.",
)
parser.add_argument(
"--sticky",
action="store_true",
dest="sticky",
default=False,
help=("Make the notification sticky (the notification stays until the user closes it)"),
)
parser.add_argument(
"--organization",
help="Organization slug to filter by.",
)
parser.add_argument(
"--project",
help="Project slug to filter by.",
)
parser.add_argument(
"--usernames",
help="Path to a file with one username per line to filter by.",
)
def handle(self, *args, **options):
if not options["email"] and not options["notification"]:
print("--email or --notification is required.")
sys.exit(1)
project = options["project"]
organization = options["organization"]
usernames = options["usernames"]
if len([item for item in [project, organization, usernames] if bool(item)]) >= 2:
print("--project, --organization and --usernames can't be used together.")
sys.exit(1)
if project:
project = Project.objects.get(slug=project)
users = AdminPermission.owners(project)
elif organization:
organization = Organization.objects.get(slug=organization)
users = AdminPermission.owners(organization)
elif usernames:
file = Path(usernames)
with file.open(encoding="utf8") as f:
usernames = f.readlines()
# remove "\n" from lines
usernames = [line.strip() for line in usernames]
users = User.objects.filter(username__in=usernames)
elif settings.RTD_ALLOW_ORGANIZATIONS:
users = User.objects.filter(organizationowner__organization__disabled=False).distinct()
else:
users = User.objects.filter(projects__skip=False).distinct()
log.info(
"Command arguments.",
n_owners=users.count(),
production=bool(options["production"]),
email_filepath=options["email"],
notification_filepath=options["notification"],
sticky=options["sticky"],
)
if input("Continue? y/N: ") != "y":
print("Aborting run.")
return
notification_content = ""
if options["notification"]:
file = Path(options["notification"])
with file.open(encoding="utf8") as f:
notification_content = f.read()
email_subject = ""
email_content = ""
if options["email"]:
file = Path(options["email"])
with file.open(encoding="utf8") as f:
content = f.read().split("\n")
email_subject = content[0].strip()
email_content = "\n".join(content[1:]).strip()
resp = contact_users(
users=users,
email_subject=email_subject,
email_content=email_content,
notification_content=notification_content,
sticky_notification=options["sticky"],
dryrun=not options["production"],
)
email = resp["email"]
log.info(
"Sending emails finished.",
total=len(email["sent"]),
total_failed=len(email["failed"]),
sent_emails=email["sent"],
failed_emails=email["failed"],
)
notification = resp["notification"]
log.info(
"Sending notifications finished.",
total=len(notification["sent"]),
total_failed=len(notification["failed"]),
sent_notifications=notification["sent"],
failed_notifications=notification["failed"],
)
| Command |
python | sqlalchemy__sqlalchemy | test/dialect/sqlite/test_reflection.py | {
"start": 40289,
"end": 46305
} | class ____(fixtures.TestBase):
__only_on__ = "sqlite"
__backend__ = True
def _fixed_lookup_fixture(self):
return [
(sqltypes.String(), sqltypes.VARCHAR()),
(sqltypes.String(1), sqltypes.VARCHAR(1)),
(sqltypes.String(3), sqltypes.VARCHAR(3)),
(sqltypes.Text(), sqltypes.TEXT()),
(sqltypes.Unicode(), sqltypes.VARCHAR()),
(sqltypes.Unicode(1), sqltypes.VARCHAR(1)),
(sqltypes.UnicodeText(), sqltypes.TEXT()),
(sqltypes.CHAR(3), sqltypes.CHAR(3)),
(sqltypes.NUMERIC, sqltypes.NUMERIC()),
(sqltypes.NUMERIC(10, 2), sqltypes.NUMERIC(10, 2)),
(sqltypes.Numeric, sqltypes.NUMERIC()),
(sqltypes.Numeric(10, 2), sqltypes.NUMERIC(10, 2)),
(sqltypes.DECIMAL, sqltypes.DECIMAL()),
(sqltypes.DECIMAL(10, 2), sqltypes.DECIMAL(10, 2)),
(sqltypes.INTEGER, sqltypes.INTEGER()),
(sqltypes.BIGINT, sqltypes.BIGINT()),
(sqltypes.Float, sqltypes.FLOAT()),
(sqltypes.TIMESTAMP, sqltypes.TIMESTAMP()),
(sqltypes.DATETIME, sqltypes.DATETIME()),
(sqltypes.DateTime, sqltypes.DATETIME()),
(sqltypes.DateTime(), sqltypes.DATETIME()),
(sqltypes.DATE, sqltypes.DATE()),
(sqltypes.Date, sqltypes.DATE()),
(sqltypes.TIME, sqltypes.TIME()),
(sqltypes.Time, sqltypes.TIME()),
(sqltypes.BOOLEAN, sqltypes.BOOLEAN()),
(sqltypes.Boolean, sqltypes.BOOLEAN()),
(
sqlite.DATE(storage_format="%(year)04d%(month)02d%(day)02d"),
sqltypes.DATE(),
),
(
sqlite.TIME(
storage_format="%(hour)02d%(minute)02d%(second)02d"
),
sqltypes.TIME(),
),
(
sqlite.DATETIME(
storage_format="%(year)04d%(month)02d%(day)02d"
"%(hour)02d%(minute)02d%(second)02d"
),
sqltypes.DATETIME(),
),
]
def _unsupported_args_fixture(self):
return [
("INTEGER(5)", sqltypes.INTEGER()),
("DATETIME(6, 12)", sqltypes.DATETIME()),
]
def _type_affinity_fixture(self):
return [
("LONGTEXT", sqltypes.TEXT()),
("TINYINT", sqltypes.INTEGER()),
("MEDIUMINT", sqltypes.INTEGER()),
("INT2", sqltypes.INTEGER()),
("UNSIGNED BIG INT", sqltypes.INTEGER()),
("INT8", sqltypes.INTEGER()),
("CHARACTER(20)", sqltypes.TEXT()),
("CLOB", sqltypes.TEXT()),
("CLOBBER", sqltypes.TEXT()),
("VARYING CHARACTER(70)", sqltypes.TEXT()),
("NATIVE CHARACTER(70)", sqltypes.TEXT()),
("BLOB", sqltypes.BLOB()),
("BLOBBER", sqltypes.NullType()),
("DOUBLE PRECISION", sqltypes.REAL()),
("FLOATY", sqltypes.REAL()),
("SOMETHING UNKNOWN", sqltypes.NUMERIC()),
]
def _fixture_as_string(self, fixture):
for from_, to_ in fixture:
if isinstance(from_, sqltypes.TypeEngine):
from_ = str(from_.compile())
elif isinstance(from_, type):
from_ = str(from_().compile())
yield from_, to_
def _test_lookup_direct(self, fixture, warnings=False):
dialect = sqlite.dialect()
for from_, to_ in self._fixture_as_string(fixture):
if warnings:
def go():
return dialect._resolve_type_affinity(from_)
final_type = testing.assert_warnings(
go, ["Could not instantiate"], regex=True
)
else:
final_type = dialect._resolve_type_affinity(from_)
expected_type = type(to_)
is_(type(final_type), expected_type)
def _test_round_trip(self, fixture, warnings=False):
from sqlalchemy import inspect
for from_, to_ in self._fixture_as_string(fixture):
with testing.db.begin() as conn:
inspector = inspect(conn)
conn.exec_driver_sql("CREATE TABLE foo (data %s)" % from_)
try:
if warnings:
def go():
return inspector.get_columns("foo")[0]
col_info = testing.assert_warnings(
go, ["Could not instantiate"], regex=True
)
else:
col_info = inspector.get_columns("foo")[0]
expected_type = type(to_)
is_(type(col_info["type"]), expected_type)
# test args
for attr in ("scale", "precision", "length"):
if getattr(to_, attr, None) is not None:
eq_(
getattr(col_info["type"], attr),
getattr(to_, attr, None),
)
finally:
conn.exec_driver_sql("DROP TABLE foo")
def test_lookup_direct_lookup(self):
self._test_lookup_direct(self._fixed_lookup_fixture())
def test_lookup_direct_unsupported_args(self):
self._test_lookup_direct(
self._unsupported_args_fixture(), warnings=True
)
def test_lookup_direct_type_affinity(self):
self._test_lookup_direct(self._type_affinity_fixture())
def test_round_trip_direct_lookup(self):
self._test_round_trip(self._fixed_lookup_fixture())
def test_round_trip_direct_unsupported_args(self):
self._test_round_trip(self._unsupported_args_fixture(), warnings=True)
def test_round_trip_direct_type_affinity(self):
self._test_round_trip(self._type_affinity_fixture())
| TypeReflectionTest |
python | google__jax | tests/mosaic/gpu_torch_test.py | {
"start": 1412,
"end": 3406
} | class ____(parameterized.TestCase):
def setUp(self):
if (not jtu.test_device_matches(["cuda"]) or
not jtu.is_cuda_compute_capability_at_least("9.0")):
self.skipTest("Only works on GPU with capability >= sm90")
super().setUp()
self.prng = np.random.default_rng(1234)
self.context = mlir.make_ir_context()
mgpu_dialect.register_dialect(self.context)
self.enter_context(config.traceback_filtering("off"))
self.enter_context(self.context)
self.enter_context(ir.Location.unknown())
if torch is None:
raise unittest.SkipTest("Test requires PyTorch")
def test_basic(self):
def kernel(ctx, i_gmem, o_gmem, _):
x = mgpu.FragmentedArray.load_strided(i_gmem)
(x + x).store_untiled(o_gmem)
ty = jax.ShapeDtypeStruct((128, 128), jnp.float32)
x = torch.randn((128, 128), dtype=torch.float, device='cuda')
f = mgpu.as_torch_gpu_kernel(kernel, (1, 1, 1), (128, 1, 1), ty, ty, ())
y = f(x)
np.testing.assert_allclose(y.cpu(), x.cpu() * 2)
del y # Make sure the destructor runs successfully.
def test_inout(self):
def kernel(ctx, src, inout, dst, smem):
val = memref.load(inout, [])
gpu.barrier()
new_val = arith.constant(ir.IntegerType.get_signless(32), 42)
memref.store(new_val, inout, [])
x = mgpu.FragmentedArray.load_strided(src, is_signed=True)
(x + val).store_untiled(dst)
x = torch.arange(128, dtype=torch.int32, device='cuda')
y = torch.tensor(2.0, dtype=torch.int32, device='cuda')
x_ty = jax.ShapeDtypeStruct((128,), jnp.int32)
y_ty = jax.ShapeDtypeStruct((), jnp.int32)
kernel = mgpu.as_torch_gpu_kernel(
kernel, (1, 1, 1), (128, 1, 1), x_ty, x_ty, (), inout_shape=y_ty,
)
xo, yo = kernel(x, y)
np.testing.assert_array_equal(xo.cpu(), x.cpu() + 2.0)
np.testing.assert_array_equal(yo.cpu(), torch.tensor(42, dtype=torch.int32))
if __name__ == "__main__":
absltest.main(testLoader=jtu.JaxTestLoader())
| TorchTest |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.