language stringclasses 1 value | repo stringclasses 346 values | path stringlengths 6 201 | class_span dict | source stringlengths 21 2.38M | target stringlengths 1 96 |
|---|---|---|---|---|---|
python | airbytehq__airbyte | airbyte-integrations/connectors/source-appsflyer/source_appsflyer/source.py | {
"start": 6870,
"end": 7537
} | class ____:
additional_fields = additional_fields.raw_data
def request_params(
self, stream_state: Mapping[str, Any], stream_slice: Mapping[str, any] = None, next_page_token: Mapping[str, Any] = None
) -> MutableMapping[str, Any]:
params = super().request_params(stream_state, stream_slice, next_page_token)
params["from"] = stream_slice.get(self.cursor_field).to_datetime_string()
params["to"] = stream_slice.get(self.cursor_field + "_end").to_datetime_string()
# use currency set in the app settings to align with aggregate api currency.
params["currency"] = "preferred"
return params
| RawDataMixin |
python | skorch-dev__skorch | skorch/tests/test_setter.py | {
"start": 90,
"end": 2493
} | class ____:
@pytest.fixture
def net_dummy(self):
from skorch import NeuralNet
net = Mock(spec=NeuralNet)
net.lr = 0.01
return net
@pytest.fixture
def optimizer_dummy(self):
from torch.optim import Optimizer
optim = Mock(spec=Optimizer)
optim.param_groups = [
{'lr': 0.01, 'momentum': 0.9},
{'lr': 0.02, 'momentum': 0.9}
]
return optim
@pytest.fixture(scope='function')
def net_optim_dummy(self, net_dummy, optimizer_dummy):
net_dummy.optimizer_ = optimizer_dummy
return net_dummy
@pytest.fixture
def setter(self):
from skorch.setter import optimizer_setter
return optimizer_setter
def test_lr_attribute_is_updated(self, setter, net_optim_dummy):
new_lr = net_optim_dummy.lr + 1
setter(net_optim_dummy, 'lr', new_lr)
assert net_optim_dummy.lr == new_lr
def test_wrong_name_raises(self, setter, net_optim_dummy):
# should be 'param_groups' instead
param = 'optimizer__param_group__0__lr'
value = 0.1
with pytest.raises(AttributeError) as e:
setter(net_optim_dummy, param, value)
assert e.value.args[0] == (
'Invalid parameter "{param}" for optimizer "optimizer"'
.format(param=param)
)
@pytest.mark.parametrize('group', [0, 1])
@pytest.mark.parametrize('sub_param, value', [
('momentum', 0.1),
('lr', 0.3),
])
def test_only_specific_param_group_updated(self, setter, net_optim_dummy,
group, sub_param, value):
pgroups = net_optim_dummy.optimizer_.param_groups
param = 'optimizer__param_groups__{}__{}'.format(group, sub_param)
updated_group_pre = [g for i, g in enumerate(pgroups) if i == group]
static_groups_pre = [g for i, g, in enumerate(pgroups) if i != group]
assert len(updated_group_pre) == 1
setter(net_optim_dummy, param, value)
updated_group_new = [g for i, g in enumerate(pgroups) if i == group]
static_groups_new = [g for i, g, in enumerate(pgroups) if i != group]
assert updated_group_new[0][sub_param] == value
assert all(old[sub_param] == new[sub_param] for old, new in zip(
static_groups_pre, static_groups_new))
| TestOptimizerSetter |
python | mahmoud__glom | glom/core.py | {
"start": 67649,
"end": 84137
} | class ____:
'''
responsible for registration of target types for iteration
and attribute walking
'''
def __init__(self, register_default_types=True):
self._op_type_map = {}
self._op_type_tree = {} # see _register_fuzzy_type for details
self._type_cache = {}
self._op_auto_map = OrderedDict() # op name to function that returns handler function
self._register_builtin_ops()
if register_default_types:
self._register_default_types()
return
def get_handler(self, op, obj, path=None, raise_exc=True):
"""for an operation and object **instance**, obj, return the
closest-matching handler function, raising UnregisteredTarget
if no handler can be found for *obj* (or False if
raise_exc=False)
"""
ret = False
obj_type = type(obj)
cache_key = (obj_type, op)
if cache_key not in self._type_cache:
type_map = self.get_type_map(op)
if type_map:
try:
ret = type_map[obj_type]
except KeyError:
type_tree = self._op_type_tree.get(op, {})
closest = self._get_closest_type(obj, type_tree=type_tree)
if closest is None:
ret = False
else:
ret = type_map[closest]
if ret is False and raise_exc:
raise UnregisteredTarget(op, obj_type, type_map=type_map, path=path)
self._type_cache[cache_key] = ret
return self._type_cache[cache_key]
def get_type_map(self, op):
try:
return self._op_type_map[op]
except KeyError:
return OrderedDict()
def _get_closest_type(self, obj, type_tree):
default = None
for cur_type, sub_tree in type_tree.items():
if isinstance(obj, cur_type):
sub_type = self._get_closest_type(obj, type_tree=sub_tree)
ret = cur_type if sub_type is None else sub_type
return ret
return default
def _register_default_types(self):
self.register(object)
self.register(dict, get=operator.getitem)
self.register(dict, keys=dict.keys)
self.register(list, get=_get_sequence_item)
self.register(tuple, get=_get_sequence_item)
self.register(OrderedDict, get=operator.getitem)
self.register(OrderedDict, keys=OrderedDict.keys)
self.register(_AbstractIterable, iterate=iter)
self.register(_ObjStyleKeys, keys=_ObjStyleKeys.get_keys)
def _register_fuzzy_type(self, op, new_type, _type_tree=None):
"""Build a "type tree", an OrderedDict mapping registered types to
their subtypes
The type tree's invariant is that a key in the mapping is a
valid parent type of all its children.
Order is preserved such that non-overlapping parts of the
subtree take precedence by which was most recently added.
"""
if _type_tree is None:
try:
_type_tree = self._op_type_tree[op]
except KeyError:
_type_tree = self._op_type_tree[op] = OrderedDict()
registered = False
for cur_type, sub_tree in list(_type_tree.items()):
if issubclass(cur_type, new_type):
sub_tree = _type_tree.pop(cur_type) # mutation for recursion brevity
try:
_type_tree[new_type][cur_type] = sub_tree
except KeyError:
_type_tree[new_type] = OrderedDict({cur_type: sub_tree})
registered = True
elif issubclass(new_type, cur_type):
_type_tree[cur_type] = self._register_fuzzy_type(op, new_type, _type_tree=sub_tree)
registered = True
if not registered:
_type_tree[new_type] = OrderedDict()
return _type_tree
def register(self, target_type, **kwargs):
if not isinstance(target_type, type):
raise TypeError(f'register expected a type, not an instance: {target_type!r}')
exact = kwargs.pop('exact', None)
new_op_map = dict(kwargs)
for op_name in sorted(set(self._op_auto_map.keys()) | set(new_op_map.keys())):
cur_type_map = self._op_type_map.setdefault(op_name, OrderedDict())
if op_name in new_op_map:
handler = new_op_map[op_name]
elif target_type in cur_type_map:
handler = cur_type_map[target_type]
else:
try:
handler = self._op_auto_map[op_name](target_type)
except Exception as e:
raise TypeError('error while determining support for operation'
' "%s" on target type: %s (got %r)'
% (op_name, target_type.__name__, e))
if handler is not False and not callable(handler):
raise TypeError('expected handler for op "%s" to be'
' callable or False, not: %r' % (op_name, handler))
new_op_map[op_name] = handler
for op_name, handler in new_op_map.items():
self._op_type_map[op_name][target_type] = handler
if not exact:
for op_name in new_op_map:
self._register_fuzzy_type(op_name, target_type)
self._type_cache = {} # reset type cache
return
def register_op(self, op_name, auto_func=None, exact=False):
"""add operations beyond the builtins ('get' and 'iterate' at the time
of writing).
auto_func is a function that when passed a type, returns a
handler associated with op_name if it's supported, or False if
it's not.
See glom.core.register_op() for the global version used by
extensions.
"""
if not isinstance(op_name, basestring):
raise TypeError(f'expected op_name to be a text name, not: {op_name!r}')
if auto_func is None:
auto_func = lambda t: False
elif not callable(auto_func):
raise TypeError(f'expected auto_func to be callable, not: {auto_func!r}')
# determine support for any previously known types
known_types = set(sum([list(m.keys()) for m
in self._op_type_map.values()], []))
type_map = self._op_type_map.get(op_name, OrderedDict())
type_tree = self._op_type_tree.get(op_name, OrderedDict())
for t in sorted(known_types, key=lambda t: t.__name__):
if t in type_map:
continue
try:
handler = auto_func(t)
except Exception as e:
raise TypeError('error while determining support for operation'
' "%s" on target type: %s (got %r)'
% (op_name, t.__name__, e))
if handler is not False and not callable(handler):
raise TypeError('expected handler for op "%s" to be'
' callable or False, not: %r' % (op_name, handler))
type_map[t] = handler
if not exact:
for t in known_types:
self._register_fuzzy_type(op_name, t, _type_tree=type_tree)
self._op_type_map[op_name] = type_map
self._op_type_tree[op_name] = type_tree
self._op_auto_map[op_name] = auto_func
def _register_builtin_ops(self):
def _get_iterable_handler(type_obj):
return iter if callable(getattr(type_obj, '__iter__', None)) else False
self.register_op('iterate', _get_iterable_handler)
self.register_op('get', lambda _: getattr)
_DEFAULT_SCOPE = ChainMap({})
def glom(target, spec, **kwargs):
"""Access or construct a value from a given *target* based on the
specification declared by *spec*.
Accessing nested data, aka deep-get:
>>> target = {'a': {'b': 'c'}}
>>> glom(target, 'a.b')
'c'
Here the *spec* was just a string denoting a path,
``'a.b'``. As simple as it should be. You can also use
:mod:`glob`-like wildcard selectors:
>>> target = {'a': [{'k': 'v1'}, {'k': 'v2'}]}
>>> glom(target, 'a.*.k')
['v1', 'v2']
In addition to ``*``, you can also use ``**`` for recursive access:
>>> target = {'a': [{'k': 'v3'}, {'k': 'v4'}], 'k': 'v0'}
>>> glom(target, '**.k')
['v0', 'v3', 'v4']
The next example shows how to use nested data to
access many fields at once, and make a new nested structure.
Constructing, or restructuring more-complicated nested data:
>>> target = {'a': {'b': 'c', 'd': 'e'}, 'f': 'g', 'h': [0, 1, 2]}
>>> spec = {'a': 'a.b', 'd': 'a.d', 'h': ('h', [lambda x: x * 2])}
>>> output = glom(target, spec)
>>> pprint(output)
{'a': 'c', 'd': 'e', 'h': [0, 2, 4]}
``glom`` also takes a keyword-argument, *default*. When set,
if a ``glom`` operation fails with a :exc:`GlomError`, the
*default* will be returned, very much like
:meth:`dict.get()`:
>>> glom(target, 'a.xx', default='nada')
'nada'
The *skip_exc* keyword argument controls which errors should
be ignored.
>>> glom({}, lambda x: 100.0 / len(x), default=0.0, skip_exc=ZeroDivisionError)
0.0
Args:
target (object): the object on which the glom will operate.
spec (object): Specification of the output object in the form
of a dict, list, tuple, string, other glom construct, or
any composition of these.
default (object): An optional default to return in the case
an exception, specified by *skip_exc*, is raised.
skip_exc (Exception): An optional exception or tuple of
exceptions to ignore and return *default* (None if
omitted). If *skip_exc* and *default* are both not set,
glom raises errors through.
scope (dict): Additional data that can be accessed
via S inside the glom-spec. Read more: :ref:`scope`.
It's a small API with big functionality, and glom's power is
only surpassed by its intuitiveness. Give it a whirl!
"""
# TODO: check spec up front
default = kwargs.pop('default', None if 'skip_exc' in kwargs else _MISSING)
skip_exc = kwargs.pop('skip_exc', () if default is _MISSING else GlomError)
glom_debug = kwargs.pop('glom_debug', GLOM_DEBUG)
scope = _DEFAULT_SCOPE.new_child({
Path: kwargs.pop('path', []),
Inspect: kwargs.pop('inspector', None),
MODE: AUTO,
MIN_MODE: None,
CHILD_ERRORS: [],
'globals': ScopeVars({}, {}),
})
scope[UP] = scope
scope[ROOT] = scope
scope[T] = target
scope.update(kwargs.pop('scope', {}))
err = None
if kwargs:
raise TypeError('unexpected keyword args: %r' % sorted(kwargs.keys()))
try:
try:
ret = _glom(target, spec, scope)
except skip_exc:
if default is _MISSING:
raise
ret = default # should this also be arg_val'd?
except Exception as e:
if glom_debug:
raise
if isinstance(e, GlomError):
# need to change id or else py3 seems to not let us truncate the
# stack trace with the explicit "raise err" below
err = copy.copy(e)
err._set_wrapped(e)
else:
err = GlomError.wrap(e)
if isinstance(err, GlomError):
err._finalize(scope[LAST_CHILD_SCOPE])
else: # wrapping failed, fall back to default behavior
raise
if err:
raise err
return ret
def chain_child(scope):
"""
used for specs like Auto(tuple), Switch(), etc
that want to chain their child scopes together
returns a new scope that can be passed to
the next recursive glom call, e.g.
scope[glom](target, spec, chain_child(scope))
"""
if LAST_CHILD_SCOPE not in scope.maps[0]:
return scope # no children yet, nothing to do
# NOTE: an option here is to drill down on LAST_CHILD_SCOPE;
# this would have some interesting consequences for scoping
# of tuples
nxt_in_chain = scope[LAST_CHILD_SCOPE]
nxt_in_chain.maps[0][NO_PYFRAME] = True
# previous failed branches are forgiven as the
# scope is re-wired into a new stack
del nxt_in_chain.maps[0][CHILD_ERRORS][:]
return nxt_in_chain
unbound_methods = {type(str.__len__)} #, type(Ref.glomit)])
def _has_callable_glomit(obj):
glomit = getattr(obj, 'glomit', None)
return callable(glomit) and not isinstance(obj, type)
def _glom(target, spec, scope):
parent = scope
pmap = parent.maps[0]
scope = scope.new_child({
T: target,
Spec: spec,
UP: parent,
CHILD_ERRORS: [],
MODE: pmap[MODE],
MIN_MODE: pmap[MIN_MODE],
})
pmap[LAST_CHILD_SCOPE] = scope
try:
if type(spec) is TType: # must go first, due to callability
scope[MIN_MODE] = None # None is tombstone
return _t_eval(target, spec, scope)
elif _has_callable_glomit(spec):
scope[MIN_MODE] = None
return spec.glomit(target, scope)
return (scope.maps[0][MIN_MODE] or scope.maps[0][MODE])(target, spec, scope)
except Exception as e:
scope.maps[1][CHILD_ERRORS].append(scope)
scope.maps[0][CUR_ERROR] = e
if NO_PYFRAME in scope.maps[1]:
cur_scope = scope[UP]
while NO_PYFRAME in cur_scope.maps[0]:
cur_scope.maps[1][CHILD_ERRORS].append(cur_scope)
cur_scope.maps[0][CUR_ERROR] = e
cur_scope = cur_scope[UP]
raise
def AUTO(target, spec, scope):
if type(spec) is str: # shortcut to make deep-get use case faster
return _t_eval(target, Path.from_text(spec).path_t, scope)
if isinstance(spec, dict):
return _handle_dict(target, spec, scope)
elif isinstance(spec, list):
return _handle_list(target, spec, scope)
elif isinstance(spec, tuple):
return _handle_tuple(target, spec, scope)
elif isinstance(spec, basestring):
return Path.from_text(spec).glomit(target, scope)
elif callable(spec):
return spec(target)
raise TypeError('expected spec to be dict, list, tuple, callable, string,'
' or other Spec-like type, not: %r' % (spec,))
_DEFAULT_SCOPE.update({
glom: _glom,
TargetRegistry: TargetRegistry(register_default_types=True),
})
def register(target_type, **kwargs):
"""Register *target_type* so :meth:`~Glommer.glom()` will
know how to handle instances of that type as targets.
Here's an example of adding basic iterabile support for Django's ORM:
.. code-block:: python
import glom
import django.db.models
glom.register(django.db.models.Manager, iterate=lambda m: m.all())
glom.register(django.db.models.QuerySet, iterate=lambda qs: qs.all())
Args:
target_type (type): A type expected to appear in a glom()
call target
get (callable): A function which takes a target object and
a name, acting as a default accessor. Defaults to
:func:`getattr`.
iterate (callable): A function which takes a target object
and returns an iterator. Defaults to :func:`iter` if
*target_type* appears to be iterable.
exact (bool): Whether or not to match instances of subtypes
of *target_type*.
.. note::
The module-level :func:`register()` function affects the
module-level :func:`glom()` function's behavior. If this
global effect is undesirable for your application, or
you're implementing a library, consider instantiating a
:class:`Glommer` instance, and using the
:meth:`~Glommer.register()` and :meth:`Glommer.glom()`
methods instead.
"""
_DEFAULT_SCOPE[TargetRegistry].register(target_type, **kwargs)
return
def register_op(op_name, **kwargs):
"""For extension authors needing to add operations beyond the builtin
'get', 'iterate', 'keys', 'assign', and 'delete' to the default scope.
See TargetRegistry for more details.
"""
_DEFAULT_SCOPE[TargetRegistry].register_op(op_name, **kwargs)
return
| TargetRegistry |
python | joblib__joblib | examples/parallel_generator.py | {
"start": 1596,
"end": 11033
} | class ____(Thread):
"""Monitor the memory usage in MB in a separate thread.
Note that this class is good enough to highlight the memory profile of
Parallel in this example, but is not a general purpose profiler fit for
all cases.
"""
def __init__(self):
super().__init__()
self.stop = False
self.memory_buffer = []
self.start()
def get_memory(self):
"Get memory of a process and its children."
p = Process()
memory = p.memory_info().rss
for c in p.children():
memory += c.memory_info().rss
return memory
def run(self):
memory_start = self.get_memory()
while not self.stop:
self.memory_buffer.append(self.get_memory() - memory_start)
time.sleep(0.2)
def join(self):
self.stop = True
super().join()
##############################################################################
# Save memory by consuming the outputs of the tasks as fast as possible
##############################################################################
##############################################################################
# We create a task whose output takes about 15MB of RAM.
#
import numpy as np
def return_big_object(i):
time.sleep(0.1)
return i * np.ones((10000, 200), dtype=np.float64)
##############################################################################
# We create a reduce step. The input will be a generator on big objects
# generated in parallel by several instances of ``return_big_object``.
def accumulator_sum(generator):
result = 0
for value in generator:
result += value
print(".", end="", flush=True)
print("")
return result
##############################################################################
# We process many of the tasks in parallel. If ``return_as="list"`` (default),
# we should expect a usage of more than 2GB in RAM. Indeed, all the results
# are computed and stored in ``res`` before being processed by
# `accumulator_sum` and collected by the gc.
from joblib import Parallel, delayed
monitor = MemoryMonitor()
print("Running tasks with return_as='list'...")
res = Parallel(n_jobs=2, return_as="list")(
delayed(return_big_object)(i) for i in range(150)
)
print("Accumulate results:", end="")
res = accumulator_sum(res)
print("All tasks completed and reduced successfully.")
# Report memory usage
del res # we clean the result to avoid memory border effects
monitor.join()
peak = max(monitor.memory_buffer) / 1e9
print(f"Peak memory usage: {peak:.2f}GB")
##############################################################################
# If we use ``return_as="generator"``, ``res`` is simply a generator on the
# results that are ready. Here we consume the results as soon as they arrive
# with the ``accumulator_sum`` and once they have been used, they are collected
# by the gc. The memory footprint is thus reduced, typically around 300MB.
monitor_gen = MemoryMonitor()
print("Create result generator with return_as='generator'...")
res = Parallel(n_jobs=2, return_as="generator")(
delayed(return_big_object)(i) for i in range(150)
)
print("Accumulate results:", end="")
res = accumulator_sum(res)
print("All tasks completed and reduced successfully.")
# Report memory usage
del res # we clean the result to avoid memory border effects
monitor_gen.join()
peak = max(monitor_gen.memory_buffer) / 1e6
print(f"Peak memory usage: {peak:.2f}MB")
##############################################################################
# We can then report the memory usage across time of the two runs using the
# MemoryMonitor.
#
# In the first case, as the results accumulate in ``res``, the memory grows
# linearly and it is freed once the ``accumulator_sum`` function finishes.
#
# In the second case, the results are processed by the accumulator as soon as
# they arrive, and the memory does not need to be able to contain all
# the results.
import matplotlib.pyplot as plt
plt.figure(0)
plt.semilogy(np.maximum.accumulate(monitor.memory_buffer), label='return_as="list"')
plt.semilogy(
np.maximum.accumulate(monitor_gen.memory_buffer), label='return_as="generator"'
)
plt.xlabel("Time")
plt.xticks([], [])
plt.ylabel("Memory usage")
plt.yticks([1e7, 1e8, 1e9], ["10MB", "100MB", "1GB"])
plt.legend()
plt.show()
##############################################################################
# It is important to note that with ``return_as="generator"``, the results are
# still accumulated in RAM after computation. But as we asynchronously process
# them, they can be freed sooner. However, if the generator is not consumed
# the memory still grows linearly.
##############################################################################
# Further memory efficiency for commutative aggregation
##############################################################################
##############################################################################
# There is still room for improving the relief on memory allocation we get
# using ``return_as="generator"``. Indeed, notice how the generator of the
# previous example respects the order the tasks have been submitted with. This
# behavior can cause a build up in memory of results waiting to be consumed,
# in case some tasks finished before other tasks despite being submitted
# later. The corresponding results will be kept in memory until the slower
# tasks submitted earlier are done and have been iterated over.
#
# In case the downstream consumer of the results is reliant on the assumption
# that the results are yielded in the same order that the tasks were submitted,
# it can't be helped. But in our example, since the `+` operator is
# commutative, the function ``accumulator_sum`` does not need the generator to
# return the results with any particular order. In this case it's safe to use
# the option ``return_as="generator_unordered"``, so that the results are
# returned as soon as a task is completed, ignoring the order of task
# submission.
#
# Beware that the downstream consumer of the results must not expect them be
# returned with any deterministic or predictable order at all, since the
# progress of the tasks can depend on the availability of the workers, which
# can be affected by external events, such as system load, implementation
# details in the backend, etc.
##############################################################################
# To better highlight improvements in memory usage when using the parameter
# ``return_as="generator_unordered"``, let's explicitly add delay in some of
# the submitted tasks.
def return_big_object_delayed(i):
if (i + 20) % 60:
time.sleep(0.1)
else:
time.sleep(5)
return i * np.ones((10000, 200), dtype=np.float64)
##############################################################################
# Let's check memory usage when using ``return_as="generator"``...
monitor_delayed_gen = MemoryMonitor()
print("Create result generator on delayed tasks with return_as='generator'...")
res = Parallel(n_jobs=2, return_as="generator")(
delayed(return_big_object_delayed)(i) for i in range(150)
)
print("Accumulate results:", end="")
res = accumulator_sum(res)
print("All tasks completed and reduced successfully.")
# Report memory usage
del res # we clean the result to avoid memory border effects
monitor_delayed_gen.join()
peak = max(monitor_delayed_gen.memory_buffer) / 1e6
print(f"Peak memory usage: {peak:.2f}MB")
##############################################################################
# If we use ``return_as="generator_unordered"``, ``res`` will not enforce any
# order when returning the results, and will simply enable iterating on the
# results as soon as it's available. The peak memory usage is now controlled
# to an even lower level, since that results can be consumed immediately
# rather than being delayed by the compute of slower tasks that have been
# submitted earlier.
monitor_delayed_gen_unordered = MemoryMonitor()
print(
"Create result generator on delayed tasks with return_as='generator_unordered'..."
)
res = Parallel(n_jobs=2, return_as="generator_unordered")(
delayed(return_big_object_delayed)(i) for i in range(150)
)
print("Accumulate results:", end="")
res = accumulator_sum(res)
print("All tasks completed and reduced successfully.")
# Report memory usage
del res # we clean the result to avoid memory border effects
monitor_delayed_gen_unordered.join()
peak = max(monitor_delayed_gen_unordered.memory_buffer) / 1e6
print(f"Peak memory usage: {peak:.2f}MB")
##############################################################################
# Notice how the plot for ``'return_as="generator'`` now shows a high memory
# usage plateau when slow jobs cause a congestion of intermediate results
# waiting in RAM before in-order aggregation. This high memory usage is never
# observed when using ``'return_as="generator_unordered"``.
plt.figure(1)
plt.semilogy(
np.maximum.accumulate(monitor_delayed_gen.memory_buffer),
label='return_as="generator"',
)
plt.semilogy(
np.maximum.accumulate(monitor_delayed_gen_unordered.memory_buffer),
label='return_as="generator_unordered"',
)
plt.xlabel("Time")
plt.xticks([], [])
plt.ylabel("Memory usage")
plt.yticks([1e7, 1e8, 1e9], ["10MB", "100MB", "1GB"])
plt.legend()
plt.show()
| MemoryMonitor |
python | PrefectHQ__prefect | src/prefect/events/schemas/deployment_triggers.py | {
"start": 2626,
"end": 2842
} | class ____(BaseDeploymentTrigger, MetricTrigger):
"""
A trigger that fires based on the results of a metric query.
"""
trigger_type: ClassVar[Type[TriggerTypes]] = MetricTrigger
| DeploymentMetricTrigger |
python | huggingface__transformers | src/transformers/models/bert/modeling_bert.py | {
"start": 9154,
"end": 12652
} | class ____(nn.Module):
def __init__(self, config, is_causal=False, layer_idx=None):
super().__init__()
if config.hidden_size % config.num_attention_heads != 0 and not hasattr(config, "embedding_size"):
raise ValueError(
f"The hidden size ({config.hidden_size}) is not a multiple of the number of attention "
f"heads ({config.num_attention_heads})"
)
self.config = config
self.num_attention_heads = config.num_attention_heads
self.attention_head_size = int(config.hidden_size / config.num_attention_heads)
self.all_head_size = self.num_attention_heads * self.attention_head_size
self.scaling = self.attention_head_size**-0.5
self.query = nn.Linear(config.hidden_size, self.all_head_size)
self.key = nn.Linear(config.hidden_size, self.all_head_size)
self.value = nn.Linear(config.hidden_size, self.all_head_size)
self.dropout = nn.Dropout(config.attention_probs_dropout_prob)
self.is_causal = is_causal
self.layer_idx = layer_idx
def forward(
self,
hidden_states: torch.Tensor,
encoder_hidden_states: Optional[torch.FloatTensor] = None,
attention_mask: Optional[torch.FloatTensor] = None,
past_key_values: Optional[EncoderDecoderCache] = None,
**kwargs: Unpack[TransformersKwargs],
) -> tuple[torch.Tensor]:
# determine input shapes
bsz, tgt_len = hidden_states.shape[:-1]
src_len = encoder_hidden_states.shape[1]
q_input_shape = (bsz, tgt_len, -1, self.attention_head_size)
kv_input_shape = (bsz, src_len, -1, self.attention_head_size)
# get query proj
query_layer = self.query(hidden_states).view(*q_input_shape).transpose(1, 2)
is_updated = past_key_values.is_updated.get(self.layer_idx) if past_key_values is not None else False
if past_key_values is not None and is_updated:
# reuse k,v, cross_attentions
key_layer = past_key_values.cross_attention_cache.layers[self.layer_idx].keys
value_layer = past_key_values.cross_attention_cache.layers[self.layer_idx].values
else:
key_layer = self.key(encoder_hidden_states).view(*kv_input_shape).transpose(1, 2)
value_layer = self.value(encoder_hidden_states).view(*kv_input_shape).transpose(1, 2)
if past_key_values is not None:
# save all states to the cache
key_layer, value_layer = past_key_values.cross_attention_cache.update(
key_layer, value_layer, self.layer_idx
)
# set flag that curr layer for cross-attn is already updated so we can re-use in subsequent calls
past_key_values.is_updated[self.layer_idx] = True
attention_interface: Callable = eager_attention_forward
if self.config._attn_implementation != "eager":
attention_interface = ALL_ATTENTION_FUNCTIONS[self.config._attn_implementation]
attn_output, attn_weights = attention_interface(
self,
query_layer,
key_layer,
value_layer,
attention_mask,
dropout=0.0 if not self.training else self.dropout.p,
scaling=self.scaling,
**kwargs,
)
attn_output = attn_output.reshape(bsz, tgt_len, -1).contiguous()
return attn_output, attn_weights
| BertCrossAttention |
python | astropy__astropy | astropy/coordinates/baseframe.py | {
"start": 91942,
"end": 93042
} | class ____(BaseCoordinateFrame):
"""
A frame object that can't store data but can hold any arbitrary frame
attributes. Mostly useful as a utility for the high-level class to store
intermediate frame attributes.
Parameters
----------
frame_attrs : dict
A dictionary of attributes to be used as the frame attributes for this
frame.
"""
name = None # it's not a "real" frame so it doesn't have a name
def __init__(self, frame_attrs):
self.frame_attributes = {}
for name, default in frame_attrs.items():
self.frame_attributes[name] = Attribute(default)
setattr(self, "_" + name, default)
super().__init__(None)
def __getattr__(self, name):
if "_" + name in self.__dict__:
return getattr(self, "_" + name)
else:
raise AttributeError(f"no {name}")
def __setattr__(self, name, value):
if name in self.frame_attributes:
raise AttributeError(f"can't set frame attribute '{name}'")
super().__setattr__(name, value)
| GenericFrame |
python | arrow-py__arrow | tests/test_arrow.py | {
"start": 55282,
"end": 68624
} | class ____:
def test_span_attribute(self):
with pytest.raises(ValueError):
self.arrow.span("span")
def test_span_year(self):
floor, ceil = self.arrow.span("year")
assert floor == datetime(2013, 1, 1, tzinfo=tz.tzutc())
assert ceil == datetime(2013, 12, 31, 23, 59, 59, 999999, tzinfo=tz.tzutc())
def test_span_quarter(self):
floor, ceil = self.arrow.span("quarter")
assert floor == datetime(2013, 1, 1, tzinfo=tz.tzutc())
assert ceil == datetime(2013, 3, 31, 23, 59, 59, 999999, tzinfo=tz.tzutc())
def test_span_quarter_count(self):
floor, ceil = self.arrow.span("quarter", 2)
assert floor == datetime(2013, 1, 1, tzinfo=tz.tzutc())
assert ceil == datetime(2013, 6, 30, 23, 59, 59, 999999, tzinfo=tz.tzutc())
def test_span_year_count(self):
floor, ceil = self.arrow.span("year", 2)
assert floor == datetime(2013, 1, 1, tzinfo=tz.tzutc())
assert ceil == datetime(2014, 12, 31, 23, 59, 59, 999999, tzinfo=tz.tzutc())
def test_span_month(self):
floor, ceil = self.arrow.span("month")
assert floor == datetime(2013, 2, 1, tzinfo=tz.tzutc())
assert ceil == datetime(2013, 2, 28, 23, 59, 59, 999999, tzinfo=tz.tzutc())
def test_span_week(self):
"""
>>> self.arrow.format("YYYY-MM-DD") == "2013-02-15"
>>> self.arrow.isoweekday() == 5 # a Friday
"""
# span week from Monday to Sunday
floor, ceil = self.arrow.span("week")
assert floor == datetime(2013, 2, 11, tzinfo=tz.tzutc())
assert ceil == datetime(2013, 2, 17, 23, 59, 59, 999999, tzinfo=tz.tzutc())
# span week from Tuesday to Monday
floor, ceil = self.arrow.span("week", week_start=2)
assert floor == datetime(2013, 2, 12, tzinfo=tz.tzutc())
assert ceil == datetime(2013, 2, 18, 23, 59, 59, 999999, tzinfo=tz.tzutc())
# span week from Saturday to Friday
floor, ceil = self.arrow.span("week", week_start=6)
assert floor == datetime(2013, 2, 9, tzinfo=tz.tzutc())
assert ceil == datetime(2013, 2, 15, 23, 59, 59, 999999, tzinfo=tz.tzutc())
# span week from Sunday to Saturday
floor, ceil = self.arrow.span("week", week_start=7)
assert floor == datetime(2013, 2, 10, tzinfo=tz.tzutc())
assert ceil == datetime(2013, 2, 16, 23, 59, 59, 999999, tzinfo=tz.tzutc())
def test_span_day(self):
floor, ceil = self.arrow.span("day")
assert floor == datetime(2013, 2, 15, tzinfo=tz.tzutc())
assert ceil == datetime(2013, 2, 15, 23, 59, 59, 999999, tzinfo=tz.tzutc())
def test_span_hour(self):
floor, ceil = self.arrow.span("hour")
assert floor == datetime(2013, 2, 15, 3, tzinfo=tz.tzutc())
assert ceil == datetime(2013, 2, 15, 3, 59, 59, 999999, tzinfo=tz.tzutc())
def test_span_minute(self):
floor, ceil = self.arrow.span("minute")
assert floor == datetime(2013, 2, 15, 3, 41, tzinfo=tz.tzutc())
assert ceil == datetime(2013, 2, 15, 3, 41, 59, 999999, tzinfo=tz.tzutc())
def test_span_second(self):
floor, ceil = self.arrow.span("second")
assert floor == datetime(2013, 2, 15, 3, 41, 22, tzinfo=tz.tzutc())
assert ceil == datetime(2013, 2, 15, 3, 41, 22, 999999, tzinfo=tz.tzutc())
def test_span_microsecond(self):
floor, ceil = self.arrow.span("microsecond")
assert floor == datetime(2013, 2, 15, 3, 41, 22, 8923, tzinfo=tz.tzutc())
assert ceil == datetime(2013, 2, 15, 3, 41, 22, 8923, tzinfo=tz.tzutc())
def test_floor(self):
floor, ceil = self.arrow.span("month")
assert floor == self.arrow.floor("month")
assert ceil == self.arrow.ceil("month")
def test_floor_week_start(self):
"""
Test floor method with week_start parameter for different week starts.
"""
# Test with default week_start=1 (Monday)
floor_default = self.arrow.floor("week")
floor_span_default, _ = self.arrow.span("week")
assert floor_default == floor_span_default
# Test with week_start=1 (Monday) - explicit
floor_monday = self.arrow.floor("week", week_start=1)
floor_span_monday, _ = self.arrow.span("week", week_start=1)
assert floor_monday == floor_span_monday
# Test with week_start=7 (Sunday)
floor_sunday = self.arrow.floor("week", week_start=7)
floor_span_sunday, _ = self.arrow.span("week", week_start=7)
assert floor_sunday == floor_span_sunday
# Test with week_start=6 (Saturday)
floor_saturday = self.arrow.floor("week", week_start=6)
floor_span_saturday, _ = self.arrow.span("week", week_start=6)
assert floor_saturday == floor_span_saturday
# Test with week_start=2 (Tuesday)
floor_tuesday = self.arrow.floor("week", week_start=2)
floor_span_tuesday, _ = self.arrow.span("week", week_start=2)
assert floor_tuesday == floor_span_tuesday
def test_ceil_week_start(self):
"""
Test ceil method with week_start parameter for different week starts.
"""
# Test with default week_start=1 (Monday)
ceil_default = self.arrow.ceil("week")
_, ceil_span_default = self.arrow.span("week")
assert ceil_default == ceil_span_default
# Test with week_start=1 (Monday) - explicit
ceil_monday = self.arrow.ceil("week", week_start=1)
_, ceil_span_monday = self.arrow.span("week", week_start=1)
assert ceil_monday == ceil_span_monday
# Test with week_start=7 (Sunday)
ceil_sunday = self.arrow.ceil("week", week_start=7)
_, ceil_span_sunday = self.arrow.span("week", week_start=7)
assert ceil_sunday == ceil_span_sunday
# Test with week_start=6 (Saturday)
ceil_saturday = self.arrow.ceil("week", week_start=6)
_, ceil_span_saturday = self.arrow.span("week", week_start=6)
assert ceil_saturday == ceil_span_saturday
# Test with week_start=2 (Tuesday)
ceil_tuesday = self.arrow.ceil("week", week_start=2)
_, ceil_span_tuesday = self.arrow.span("week", week_start=2)
assert ceil_tuesday == ceil_span_tuesday
def test_floor_ceil_week_start_values(self):
"""
Test specific date values for floor and ceil with different week_start values.
The test arrow is 2013-02-15 (Friday, isoweekday=5).
"""
# Test Monday start (week_start=1)
# Friday should floor to previous Monday (2013-02-11)
floor_mon = self.arrow.floor("week", week_start=1)
assert floor_mon == datetime(2013, 2, 11, tzinfo=tz.tzutc())
# Friday should ceil to next Sunday (2013-02-17)
ceil_mon = self.arrow.ceil("week", week_start=1)
assert ceil_mon == datetime(2013, 2, 17, 23, 59, 59, 999999, tzinfo=tz.tzutc())
# Test Sunday start (week_start=7)
# Friday should floor to previous Sunday (2013-02-10)
floor_sun = self.arrow.floor("week", week_start=7)
assert floor_sun == datetime(2013, 2, 10, tzinfo=tz.tzutc())
# Friday should ceil to next Saturday (2013-02-16)
ceil_sun = self.arrow.ceil("week", week_start=7)
assert ceil_sun == datetime(2013, 2, 16, 23, 59, 59, 999999, tzinfo=tz.tzutc())
# Test Saturday start (week_start=6)
# Friday should floor to previous Saturday (2013-02-09)
floor_sat = self.arrow.floor("week", week_start=6)
assert floor_sat == datetime(2013, 2, 9, tzinfo=tz.tzutc())
# Friday should ceil to next Friday (2013-02-15)
ceil_sat = self.arrow.ceil("week", week_start=6)
assert ceil_sat == datetime(2013, 2, 15, 23, 59, 59, 999999, tzinfo=tz.tzutc())
def test_floor_ceil_week_start_backward_compatibility(self):
"""
Test that floor and ceil methods maintain backward compatibility
when called without the week_start parameter.
"""
# Test that calling floor/ceil without parameters works the same as before
floor_old = self.arrow.floor("week")
floor_new = self.arrow.floor("week", week_start=1) # default value
assert floor_old == floor_new
ceil_old = self.arrow.ceil("week")
ceil_new = self.arrow.ceil("week", week_start=1) # default value
assert ceil_old == ceil_new
def test_floor_ceil_week_start_ignored_for_non_week_frames(self):
"""
Test that week_start parameter is ignored for non-week frames.
"""
# Test that week_start parameter is ignored for different frames
for frame in ["hour", "day", "month", "year"]:
# floor should work the same with or without week_start for non-week frames
floor_without = self.arrow.floor(frame)
floor_with = self.arrow.floor(frame, week_start=7) # should be ignored
assert floor_without == floor_with
# ceil should work the same with or without week_start for non-week frames
ceil_without = self.arrow.ceil(frame)
ceil_with = self.arrow.ceil(frame, week_start=7) # should be ignored
assert ceil_without == ceil_with
def test_floor_ceil_week_start_validation(self):
"""
Test that week_start parameter validation works correctly for week frames.
"""
# Valid values should work for week frames
for week_start in range(1, 8):
self.arrow.floor("week", week_start=week_start)
self.arrow.ceil("week", week_start=week_start)
# Invalid values should raise ValueError for week frames
with pytest.raises(
ValueError, match="week_start argument must be between 1 and 7"
):
self.arrow.floor("week", week_start=0)
with pytest.raises(
ValueError, match="week_start argument must be between 1 and 7"
):
self.arrow.floor("week", week_start=8)
with pytest.raises(
ValueError, match="week_start argument must be between 1 and 7"
):
self.arrow.ceil("week", week_start=0)
with pytest.raises(
ValueError, match="week_start argument must be between 1 and 7"
):
self.arrow.ceil("week", week_start=8)
# Invalid week_start values should be ignored for non-week frames (no validation)
# This ensures the parameter doesn't cause errors for other frames
for frame in ["hour", "day", "month", "year"]:
# These should not raise errors even though week_start is invalid
self.arrow.floor(frame, week_start=0)
self.arrow.floor(frame, week_start=8)
self.arrow.ceil(frame, week_start=0)
self.arrow.ceil(frame, week_start=8)
def test_span_inclusive_inclusive(self):
floor, ceil = self.arrow.span("hour", bounds="[]")
assert floor == datetime(2013, 2, 15, 3, tzinfo=tz.tzutc())
assert ceil == datetime(2013, 2, 15, 4, tzinfo=tz.tzutc())
def test_span_exclusive_inclusive(self):
floor, ceil = self.arrow.span("hour", bounds="(]")
assert floor == datetime(2013, 2, 15, 3, 0, 0, 1, tzinfo=tz.tzutc())
assert ceil == datetime(2013, 2, 15, 4, tzinfo=tz.tzutc())
def test_span_exclusive_exclusive(self):
floor, ceil = self.arrow.span("hour", bounds="()")
assert floor == datetime(2013, 2, 15, 3, 0, 0, 1, tzinfo=tz.tzutc())
assert ceil == datetime(2013, 2, 15, 3, 59, 59, 999999, tzinfo=tz.tzutc())
def test_bounds_are_validated(self):
with pytest.raises(ValueError):
floor, ceil = self.arrow.span("hour", bounds="][")
def test_exact(self):
result_floor, result_ceil = self.arrow.span("hour", exact=True)
expected_floor = datetime(2013, 2, 15, 3, 41, 22, 8923, tzinfo=tz.tzutc())
expected_ceil = datetime(2013, 2, 15, 4, 41, 22, 8922, tzinfo=tz.tzutc())
assert result_floor == expected_floor
assert result_ceil == expected_ceil
def test_exact_inclusive_inclusive(self):
floor, ceil = self.arrow.span("minute", bounds="[]", exact=True)
assert floor == datetime(2013, 2, 15, 3, 41, 22, 8923, tzinfo=tz.tzutc())
assert ceil == datetime(2013, 2, 15, 3, 42, 22, 8923, tzinfo=tz.tzutc())
def test_exact_exclusive_inclusive(self):
floor, ceil = self.arrow.span("day", bounds="(]", exact=True)
assert floor == datetime(2013, 2, 15, 3, 41, 22, 8924, tzinfo=tz.tzutc())
assert ceil == datetime(2013, 2, 16, 3, 41, 22, 8923, tzinfo=tz.tzutc())
def test_exact_exclusive_exclusive(self):
floor, ceil = self.arrow.span("second", bounds="()", exact=True)
assert floor == datetime(2013, 2, 15, 3, 41, 22, 8924, tzinfo=tz.tzutc())
assert ceil == datetime(2013, 2, 15, 3, 41, 23, 8922, tzinfo=tz.tzutc())
def test_all_parameters_specified(self):
floor, ceil = self.arrow.span("week", bounds="()", exact=True, count=2)
assert floor == datetime(2013, 2, 15, 3, 41, 22, 8924, tzinfo=tz.tzutc())
assert ceil == datetime(2013, 3, 1, 3, 41, 22, 8922, tzinfo=tz.tzutc())
@pytest.mark.usefixtures("time_2013_01_01")
| TestArrowSpan |
python | numba__numba | numba/core/byteflow.py | {
"start": 69087,
"end": 78250
} | class ____(object):
"""State of the trace
"""
def __init__(self, bytecode, pc, nstack, blockstack, nullvals=()):
"""
Parameters
----------
bytecode : numba.bytecode.ByteCode
function bytecode
pc : int
program counter
nstack : int
stackdepth at entry
blockstack : Sequence[Dict]
A sequence of dictionary denoting entries on the blockstack.
"""
self._bytecode = bytecode
self._pc_initial = pc
self._pc = pc
self._nstack_initial = nstack
self._stack = []
self._blockstack_initial = tuple(blockstack)
self._blockstack = list(blockstack)
self._temp_registers = []
self._insts = []
self._outedges = []
self._terminated = False
self._phis = {}
self._outgoing_phis = UniqueDict()
self._used_regs = set()
for i in range(nstack):
if i in nullvals:
phi = self.make_temp("null$")
else:
phi = self.make_temp("phi")
self._phis[phi] = i
self.push(phi)
def __repr__(self):
return "State(pc_initial={} nstack_initial={})".format(
self._pc_initial, self._nstack_initial
)
def get_identity(self):
return (self._pc_initial, self._nstack_initial)
def __hash__(self):
return hash(self.get_identity())
def __lt__(self, other):
return self.get_identity() < other.get_identity()
def __eq__(self, other):
return self.get_identity() == other.get_identity()
@property
def pc_initial(self):
"""The starting bytecode offset of this State.
The PC given to the constructor.
"""
return self._pc_initial
@property
def instructions(self):
"""The list of instructions information as a 2-tuple of
``(pc : int, register_map : Dict)``
"""
return self._insts
@property
def outgoing_edges(self):
"""The list of outgoing edges.
Returns
-------
edges : List[State]
"""
return self._outedges
@property
def outgoing_phis(self):
"""The dictionary of outgoing phi nodes.
The keys are the name of the PHI nodes.
The values are the outgoing states.
"""
return self._outgoing_phis
@property
def blockstack_initial(self):
"""A copy of the initial state of the blockstack
"""
return self._blockstack_initial
@property
def stack_depth(self):
"""The current size of the stack
Returns
-------
res : int
"""
return len(self._stack)
def find_initial_try_block(self):
"""Find the initial *try* block.
"""
for blk in reversed(self._blockstack_initial):
if blk['kind'] == BlockKind('TRY'):
return blk
def has_terminated(self):
return self._terminated
def get_inst(self):
return self._bytecode[self._pc]
def advance_pc(self):
inst = self.get_inst()
self._pc = inst.next
def make_temp(self, prefix=""):
if not prefix:
name = "${prefix}{offset}{opname}.{tempct}".format(
prefix=prefix,
offset=self._pc,
opname=self.get_inst().opname.lower(),
tempct=len(self._temp_registers),
)
else:
name = "${prefix}{offset}.{tempct}".format(
prefix=prefix,
offset=self._pc,
tempct=len(self._temp_registers),
)
self._temp_registers.append(name)
return name
def append(self, inst, **kwargs):
"""Append new inst"""
self._insts.append((inst.offset, kwargs))
self._used_regs |= set(_flatten_inst_regs(kwargs.values()))
def get_tos(self):
return self.peek(1)
def peek(self, k):
"""Return the k'th element on the stack
"""
return self._stack[-k]
def push(self, item):
"""Push to stack"""
self._stack.append(item)
def pop(self):
"""Pop the stack"""
return self._stack.pop()
def swap(self, idx):
"""Swap stack[idx] with the tos"""
s = self._stack
s[-1], s[-idx] = s[-idx], s[-1]
def push_block(self, synblk):
"""Push a block to blockstack
"""
assert 'stack_depth' in synblk
self._blockstack.append(synblk)
def reset_stack(self, depth):
"""Reset the stack to the given stack depth.
Returning the popped items.
"""
self._stack, popped = self._stack[:depth], self._stack[depth:]
return popped
def make_block(self, kind, end, reset_stack=True, handler=None):
"""Make a new block
"""
d = {
'kind': BlockKind(kind),
'end': end,
'entry_stack': len(self._stack),
}
if reset_stack:
d['stack_depth'] = len(self._stack)
else:
d['stack_depth'] = None
d['handler'] = handler
return d
def pop_block(self):
"""Pop a block and unwind the stack
"""
b = self._blockstack.pop()
self.reset_stack(b['stack_depth'])
return b
def pop_block_and_above(self, blk):
"""Find *blk* in the blockstack and remove it and all blocks above it
from the stack.
"""
idx = self._blockstack.index(blk)
assert 0 <= idx < len(self._blockstack)
self._blockstack = self._blockstack[:idx]
def get_top_block(self, kind):
"""Find the first block that matches *kind*
"""
kind = BlockKind(kind)
for bs in reversed(self._blockstack):
if bs['kind'] == kind:
return bs
def get_top_block_either(self, *kinds):
"""Find the first block that matches *kind*
"""
kinds = {BlockKind(kind) for kind in kinds}
for bs in reversed(self._blockstack):
if bs['kind'] in kinds:
return bs
def has_active_try(self):
"""Returns a boolean indicating if the top-block is a *try* block
"""
return self.get_top_block('TRY') is not None
def get_varname(self, inst):
"""Get referenced variable name from the instruction's oparg
"""
return self.get_varname_by_arg(inst.arg)
def get_varname_by_arg(self, oparg: int):
"""Get referenced variable name from the oparg
"""
return self._bytecode.co_varnames[oparg]
def terminate(self):
"""Mark block as terminated
"""
self._terminated = True
def fork(self, pc, npop=0, npush=0, extra_block=None):
"""Fork the state
"""
# Handle changes on the stack
stack = list(self._stack)
if npop:
assert 0 <= npop <= len(self._stack)
nstack = len(self._stack) - npop
stack = stack[:nstack]
if npush:
assert 0 <= npush
for i in range(npush):
stack.append(self.make_temp())
# Handle changes on the blockstack
blockstack = list(self._blockstack)
if PYVERSION in ((3, 11), (3, 12), (3, 13), (3, 14)):
# pop expired block in destination pc
while blockstack:
top = blockstack[-1]
end = top.get('end_offset') or top['end']
if pc >= end:
blockstack.pop()
else:
break
elif PYVERSION in ((3, 10),):
pass # intentionally bypass
else:
raise NotImplementedError(PYVERSION)
if extra_block:
blockstack.append(extra_block)
self._outedges.append(Edge(
pc=pc, stack=tuple(stack), npush=npush,
blockstack=tuple(blockstack),
))
self.terminate()
def split_new_block(self):
"""Split the state
"""
self.fork(pc=self._pc)
def get_outgoing_states(self):
"""Get states for each outgoing edges
"""
# Should only call once
assert not self._outgoing_phis
ret = []
for edge in self._outedges:
state = State(bytecode=self._bytecode, pc=edge.pc,
nstack=len(edge.stack), blockstack=edge.blockstack,
nullvals=[i for i, v in enumerate(edge.stack)
if _is_null_temp_reg(v)])
ret.append(state)
# Map outgoing_phis
for phi, i in state._phis.items():
self._outgoing_phis[phi] = edge.stack[i]
return ret
def get_outgoing_edgepushed(self):
"""
Returns
-------
Dict[int, int]
where keys are the PC
values are the edge-pushed stack values
"""
return {edge.pc: tuple(edge.stack[-edge.npush:])
for edge in self._outedges}
| _State |
python | kamyu104__LeetCode-Solutions | Python/number-of-pairs-satisfying-inequality.py | {
"start": 121,
"end": 586
} | class ____(object):
def numberOfPairs(self, nums1, nums2, diff):
"""
:type nums1: List[int]
:type nums2: List[int]
:type diff: int
:rtype: int
"""
sl = SortedList()
result = 0
for x, y in itertools.izip(nums1, nums2):
result += sl.bisect_right((x-y)+diff)
sl.add(x-y)
return result
# Time: O(nlogn)
# Space: O(n)
import itertools
import bisect
| Solution |
python | getsentry__responses | responses/__init__.py | {
"start": 10650,
"end": 17166
} | class ____:
passthrough: bool = False
content_type: Optional[str] = None
headers: Optional[Mapping[str, str]] = None
stream: Optional[bool] = False
def __init__(
self,
method: str,
url: "_URLPatternType",
match_querystring: Union[bool, object] = None,
match: "_MatcherIterable" = (),
*,
passthrough: bool = False,
) -> None:
self.method: str = method
# ensure the url has a default path set if the url is a string
self.url: "_URLPatternType" = _ensure_url_default_path(url)
if self._should_match_querystring(match_querystring):
match = tuple(match) + (
_query_string_matcher(urlsplit(self.url).query), # type: ignore[arg-type]
)
self.match: "_MatcherIterable" = match
self._calls: CallList = CallList()
self.passthrough = passthrough
self.status: int = 200
self.body: "_Body" = ""
def __eq__(self, other: Any) -> bool:
if not isinstance(other, BaseResponse):
return False
if self.method != other.method:
return False
# Can't simply do an equality check on the objects directly here since __eq__ isn't
# implemented for regex. It might seem to work as regex is using a cache to return
# the same regex instances, but it doesn't in all cases.
self_url = self.url.pattern if isinstance(self.url, Pattern) else self.url
other_url = other.url.pattern if isinstance(other.url, Pattern) else other.url
return self_url == other_url
def __ne__(self, other: Any) -> bool:
return not self.__eq__(other)
def _should_match_querystring(
self, match_querystring_argument: Union[bool, object]
) -> Union[bool, object]:
if isinstance(self.url, Pattern):
# the old default from <= 0.9.0
return False
if match_querystring_argument is not None:
if not isinstance(match_querystring_argument, FalseBool):
warn(
(
"Argument 'match_querystring' is deprecated. "
"Use 'responses.matchers.query_param_matcher' or "
"'responses.matchers.query_string_matcher'"
),
DeprecationWarning,
)
return match_querystring_argument
return bool(urlsplit(self.url).query)
def _url_matches(self, url: "_URLPatternType", other: str) -> bool:
"""Compares two URLs.
Compares only scheme, netloc and path. If 'url' is a re.Pattern, then checks that
'other' matches the pattern.
Parameters
----------
url : Union["Pattern[str]", str]
Reference URL or Pattern to compare.
other : str
URl that should be compared.
Returns
-------
bool
True, if URLs are identical or 'other' matches the pattern.
"""
if isinstance(url, str):
if _has_unicode(url):
url = _clean_unicode(url)
return _get_url_and_path(url) == _get_url_and_path(other)
elif isinstance(url, Pattern) and url.match(other):
return True
else:
return False
@staticmethod
def _req_attr_matches(
match: "_MatcherIterable", request: "PreparedRequest"
) -> Tuple[bool, str]:
for matcher in match:
valid, reason = matcher(request)
if not valid:
return False, reason
return True, ""
def get_headers(self) -> HTTPHeaderDict:
headers = HTTPHeaderDict() # Duplicate headers are legal
# Add Content-Type if it exists and is not already in headers
if self.content_type and (
not self.headers or "Content-Type" not in self.headers
):
headers["Content-Type"] = self.content_type
# Extend headers if they exist
if self.headers:
headers.extend(self.headers)
return headers
def get_response(self, request: "PreparedRequest") -> HTTPResponse:
raise NotImplementedError
def matches(self, request: "PreparedRequest") -> Tuple[bool, str]:
if request.method != self.method:
return False, "Method does not match"
if not self._url_matches(self.url, str(request.url)):
return False, "URL does not match"
valid, reason = self._req_attr_matches(self.match, request)
if not valid:
return False, reason
return True, ""
@property
def call_count(self) -> int:
return len(self._calls)
@property
def calls(self) -> CallList:
return self._calls
def _form_response(
body: Union[BufferedReader, BytesIO],
headers: Optional[Mapping[str, str]],
status: int,
request_method: Optional[str],
) -> HTTPResponse:
"""
Function to generate `urllib3.response.HTTPResponse` object.
The cookie handling functionality of the `requests` library relies on the response object
having an original response object with the headers stored in the `msg` attribute.
Instead of supplying a file-like object of type `HTTPMessage` for the headers, we provide
the headers directly. This approach eliminates the need to parse the headers into a file-like
object and then rely on the library to unparse it back. These additional conversions can
introduce potential errors.
"""
data = BytesIO()
data.close()
"""
The type `urllib3.response.HTTPResponse` is incorrect; we should
use `http.client.HTTPResponse` instead. However, changing this requires opening
a real socket to imitate the object. This may not be desired, as some users may
want to completely restrict network access in their tests.
See https://github.com/getsentry/responses/issues/691
"""
orig_response = HTTPResponse(
body=data, # required to avoid "ValueError: Unable to determine whether fp is closed."
msg=headers, # type: ignore[arg-type]
preload_content=False,
)
return HTTPResponse(
status=status,
reason=client.responses.get(status, None),
body=body,
headers=headers,
original_response=orig_response, # type: ignore[arg-type] # See comment above
preload_content=False,
request_method=request_method,
)
| BaseResponse |
python | dask__distributed | distributed/worker.py | {
"start": 4887,
"end": 4947
} | class ____(TypedDict):
status: Literal["busy"]
| GetDataBusy |
python | scrapy__scrapy | tests/test_contracts.py | {
"start": 16782,
"end": 16955
} | class ____(Contract):
name = "test_contract"
def post_process(self, response):
raise KeyboardInterrupt("Post-process exception")
| CustomFailContractPostProcess |
python | anthropics__anthropic-sdk-python | src/anthropic/types/tool_use_block_param.py | {
"start": 322,
"end": 623
} | class ____(TypedDict, total=False):
id: Required[str]
input: Required[Dict[str, object]]
name: Required[str]
type: Required[Literal["tool_use"]]
cache_control: Optional[CacheControlEphemeralParam]
"""Create a cache control breakpoint at this content block."""
| ToolUseBlockParam |
python | getsentry__sentry | src/sentry/flags/providers.py | {
"start": 2159,
"end": 2851
} | class ____(Exception):
"""An unsupported provider type was specified."""
...
def get_provider(
organization_id: int, provider_name: str, headers: HttpHeaders
) -> ProviderProtocol[dict[str, Any]] | None:
match provider_name:
case "launchdarkly":
return LaunchDarklyProvider(organization_id, signature=headers.get("X-LD-Signature"))
case "generic":
return GenericProvider(organization_id, signature=headers.get("X-Sentry-Signature"))
case "unleash":
return UnleashProvider(organization_id, signature=headers.get("Authorization"))
case _:
return None
"""LaunchDarkly provider."""
| InvalidProvider |
python | donnemartin__interactive-coding-challenges | graphs_trees/graph/graph.py | {
"start": 139,
"end": 1148
} | class ____:
def __init__(self, key):
self.key = key
self.visit_state = State.unvisited
self.incoming_edges = 0
self.adj_nodes = {} # Key = key, val = Node
self.adj_weights = {} # Key = key, val = weight
def __repr__(self):
return str(self.key)
def __lt__(self, other):
return self.key < other.key
def add_neighbor(self, neighbor, weight=0):
if neighbor is None or weight is None:
raise TypeError('neighbor or weight cannot be None')
neighbor.incoming_edges += 1
self.adj_weights[neighbor.key] = weight
self.adj_nodes[neighbor.key] = neighbor
def remove_neighbor(self, neighbor):
if neighbor is None:
raise TypeError('neighbor cannot be None')
if neighbor.key not in self.adj_nodes:
raise KeyError('neighbor not found')
neighbor.incoming_edges -= 1
del self.adj_weights[neighbor.key]
del self.adj_nodes[neighbor.key]
| Node |
python | django__django | tests/servers/test_basehttp.py | {
"start": 8044,
"end": 9217
} | class ____(SimpleTestCase):
request_factory = RequestFactory()
def test_broken_pipe_errors(self):
"""WSGIServer handles broken pipe errors."""
request = WSGIRequest(self.request_factory.get("/").environ)
client_address = ("192.168.2.0", 8080)
msg = f"- Broken pipe from {client_address}"
tests = [
BrokenPipeError,
ConnectionAbortedError,
ConnectionResetError,
]
for exception in tests:
with self.subTest(exception=exception):
try:
server = WSGIServer(("localhost", 0), WSGIRequestHandler)
try:
raise exception()
except Exception:
with captured_stderr() as err:
with self.assertLogs("django.server", "INFO") as cm:
server.handle_error(request, client_address)
self.assertEqual(err.getvalue(), "")
self.assertEqual(cm.records[0].getMessage(), msg)
finally:
server.server_close()
| WSGIServerTestCase |
python | scipy__scipy | scipy/stats/tests/test_resampling.py | {
"start": 92213,
"end": 92510
} | class ____:
def test_rvs_and_random_state(self):
message = "Use of `rvs` and `rng` are mutually exclusive."
rng = np.random.default_rng(34982345)
with pytest.raises(ValueError, match=message):
stats.MonteCarloMethod(rvs=rng.random, rng=rng)
| TestMonteCarloMethod |
python | patrys__httmock | tests.py | {
"start": 4497,
"end": 4918
} | class ____(unittest.TestCase):
@with_httmock(any_mock)
def test_decorator(self):
r = requests.get('http://example.com/')
self.assertEqual(r.content, b'Hello from example.com')
@with_httmock(any_mock)
def test_iter_lines(self):
r = requests.get('http://example.com/')
self.assertEqual(list(r.iter_lines()),
[b'Hello from example.com'])
| DecoratorTest |
python | HypothesisWorks__hypothesis | hypothesis-python/tests/cover/test_compat.py | {
"start": 3772,
"end": 3915
} | class ____:
msg: str
def test_add_note_fails_gracefully_on_frozen_instance():
add_note(ImmutableError("msg"), "some note")
| ImmutableError |
python | kamyu104__LeetCode-Solutions | Python/reverse-words-in-a-string-ii.py | {
"start": 27,
"end": 532
} | class ____(object):
def reverseWords(self, s):
"""
:type s: a list of 1 length strings (List[str])
:rtype: nothing
"""
def reverse(s, begin, end):
for i in xrange((end - begin) / 2):
s[begin + i], s[end - 1 - i] = s[end - 1 - i], s[begin + i]
reverse(s, 0, len(s))
i = 0
for j in xrange(len(s) + 1):
if j == len(s) or s[j] == ' ':
reverse(s, i, j)
i = j + 1
| Solution |
python | apache__airflow | providers/google/src/airflow/providers/google/cloud/operators/cloud_build.py | {
"start": 12163,
"end": 16022
} | class ____(GoogleCloudBaseOperator):
"""
Creates a new BuildTrigger.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:CloudBuildCreateBuildTriggerOperator`
:param trigger: The BuildTrigger to create. If a dict is provided, it must be of the same form
as the protobuf message `google.cloud.devtools.cloudbuild_v1.types.BuildTrigger`
:param project_id: Optional, Google Cloud Project project_id where the function belongs.
If set to None or missing, the default project_id from the GCP connection is used.
:param retry: Optional, a retry object used to retry requests. If `None` is specified, requests
will not be retried.
:param timeout: Optional, the amount of time, in seconds, to wait for the request to complete.
Note that if `retry` is specified, the timeout applies to each individual attempt.
:param metadata: Optional, additional metadata that is provided to the method.
:param gcp_conn_id: Optional, the connection ID used to connect to Google Cloud Platform.
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
:param location: The location of the project.
"""
template_fields: Sequence[str] = ("project_id", "trigger", "gcp_conn_id", "location")
operator_extra_links = (
CloudBuildTriggersListLink(),
CloudBuildTriggerDetailsLink(),
)
def __init__(
self,
*,
trigger: dict | BuildTrigger,
project_id: str = PROVIDE_PROJECT_ID,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
gcp_conn_id: str = "google_cloud_default",
impersonation_chain: str | Sequence[str] | None = None,
location: str = "global",
**kwargs,
) -> None:
super().__init__(**kwargs)
self.trigger = trigger
self.project_id = project_id
self.retry = retry
self.timeout = timeout
self.metadata = metadata
self.gcp_conn_id = gcp_conn_id
self.impersonation_chain = impersonation_chain
self.location = location
@property
def extra_links_params(self) -> dict[str, Any]:
return {
"region": self.location,
}
def execute(self, context: Context):
hook = CloudBuildHook(gcp_conn_id=self.gcp_conn_id, impersonation_chain=self.impersonation_chain)
result = hook.create_build_trigger(
trigger=self.trigger,
project_id=self.project_id,
retry=self.retry,
timeout=self.timeout,
metadata=self.metadata,
location=self.location,
)
context["task_instance"].xcom_push(key="id", value=result.id)
project_id = self.project_id or hook.project_id
if project_id:
CloudBuildTriggerDetailsLink.persist(
context=context,
project_id=project_id,
trigger_id=result.id,
)
CloudBuildTriggersListLink.persist(
context=context,
project_id=project_id,
)
return BuildTrigger.to_dict(result)
| CloudBuildCreateBuildTriggerOperator |
python | django__django | tests/middleware_exceptions/middleware.py | {
"start": 2307,
"end": 2534
} | class ____(BaseMiddleware):
def process_template_response(self, request, response):
response.context_data["mw"].append(self.__class__.__name__)
return response
@async_only_middleware
| TemplateResponseMiddleware |
python | fluentpython__example-code-2e | 02-array-seq/lispy/py3.10/lis.py | {
"start": 5557,
"end": 6576
} | class ____:
"A user-defined Scheme procedure."
def __init__( # <1>
self, parms: list[Symbol], body: list[Expression], env: Environment
):
self.parms = parms # <2>
self.body = body
self.env = env
def __call__(self, *args: Expression) -> Any: # <3>
local_env = dict(zip(self.parms, args)) # <4>
env = Environment(local_env, self.env) # <5>
for exp in self.body: # <6>
result = evaluate(exp, env)
return result # <7>
# end::PROCEDURE[]
################ command-line interface
def run(source: str) -> Any:
global_env = Environment({}, standard_env())
tokens = tokenize(source)
while tokens:
exp = read_from_tokens(tokens)
result = evaluate(exp, global_env)
return result
def main(args: list[str]) -> None:
if len(args) == 1:
with open(args[0]) as fp:
run(fp.read())
else:
repl()
if __name__ == '__main__':
import sys
main(sys.argv[1:])
| Procedure |
python | scrapy__scrapy | tests/test_contracts.py | {
"start": 1154,
"end": 1355
} | class ____(Contract):
name = "custom_form"
request_cls = FormRequest
def adjust_request_args(self, args):
args["formdata"] = {"name": "scrapy"}
return args
| CustomFormContract |
python | pydantic__pydantic | pydantic/v1/errors.py | {
"start": 3531,
"end": 3600
} | class ____(PydanticErrorMixin, ValueError):
pass
| PydanticValueError |
python | wireservice__csvkit | csvkit/utilities/csvgrep.py | {
"start": 171,
"end": 3554
} | class ____(CSVKitUtility):
description = 'Search CSV files. Like the Unix "grep" command, but for tabular data.'
override_flags = ['L', 'I']
def add_arguments(self):
self.argparser.add_argument(
'-n', '--names', dest='names_only', action='store_true',
help='Display column names and indices from the input CSV and exit.')
self.argparser.add_argument(
'-c', '--columns', dest='columns',
help='A comma-separated list of column indices, names or ranges to be searched, e.g. "1,id,3-5".')
self.argparser.add_argument(
'-m', '--match', dest="pattern", action='store',
help='A string to search for.')
self.argparser.add_argument(
'-r', '--regex', dest='regex', action='store',
help='A regular expression to match.')
self.argparser.add_argument(
'-f', '--file', dest='matchfile', type=FileType('r'), action='store',
help='A path to a file. For each row, if any line in the file (stripped of line separators) is an exact '
'match of the cell value, the row matches.')
self.argparser.add_argument(
'-i', '--invert-match', dest='inverse', action='store_true',
help='Select non-matching rows, instead of matching rows.')
self.argparser.add_argument(
'-a', '--any-match', dest='any_match', action='store_true',
help='Select rows in which any column matches, instead of all columns.')
def main(self):
if self.args.names_only:
self.print_column_names()
return
if self.additional_input_expected():
sys.stderr.write('No input file or piped data provided. Waiting for standard input:\n')
if not self.args.columns:
self.argparser.error('You must specify at least one column to search using the -c option.')
if self.args.regex is None and self.args.pattern is None and self.args.matchfile is None:
self.argparser.error('One of -r, -m or -f must be specified, unless using the -n option.')
reader_kwargs = self.reader_kwargs
writer_kwargs = self.writer_kwargs
# Move the line_numbers option from the writer to the reader.
if writer_kwargs.pop('line_numbers', False):
reader_kwargs['line_numbers'] = True
rows, column_names, column_ids = self.get_rows_and_column_names_and_column_ids(**reader_kwargs)
if self.args.regex:
pattern = re.compile(self.args.regex)
elif self.args.matchfile:
lines = {line.rstrip() for line in self.args.matchfile}
self.args.matchfile.close()
def pattern(x):
return x in lines
else:
pattern = self.args.pattern
patterns = {column_id: pattern for column_id in column_ids}
filter_reader = FilteringCSVReader(rows, header=False, patterns=patterns,
inverse=self.args.inverse, any_match=self.args.any_match)
output = agate.csv.writer(self.output_file, **writer_kwargs)
output.writerow(column_names)
for row in filter_reader:
output.writerow(row)
def launch_new_instance():
utility = CSVGrep()
utility.run()
if __name__ == '__main__':
launch_new_instance()
| CSVGrep |
python | jmcnamara__XlsxWriter | xlsxwriter/test/worksheet/test_cond_format04.py | {
"start": 345,
"end": 2938
} | class ____(unittest.TestCase):
"""
Test assembling a complete Worksheet file.
"""
def test_assemble_xml_file(self):
"""Test writing a worksheet with conditional formatting."""
self.maxDiff = None
fh = StringIO()
worksheet = Worksheet()
worksheet._set_filehandle(fh)
worksheet.select()
worksheet.write("A1", 10)
worksheet.write("A2", 20)
worksheet.write("A3", 30)
worksheet.write("A4", 40)
worksheet.conditional_format(
"A1:A4",
{
"type": "duplicate",
"format": None,
},
)
worksheet.conditional_format(
"A1:A4",
{
"type": "unique",
"format": None,
},
)
worksheet._assemble_xml_file()
exp = _xml_to_list(
"""
<?xml version="1.0" encoding="UTF-8" standalone="yes"?>
<worksheet xmlns="http://schemas.openxmlformats.org/spreadsheetml/2006/main" xmlns:r="http://schemas.openxmlformats.org/officeDocument/2006/relationships">
<dimension ref="A1:A4"/>
<sheetViews>
<sheetView tabSelected="1" workbookViewId="0"/>
</sheetViews>
<sheetFormatPr defaultRowHeight="15"/>
<sheetData>
<row r="1" spans="1:1">
<c r="A1">
<v>10</v>
</c>
</row>
<row r="2" spans="1:1">
<c r="A2">
<v>20</v>
</c>
</row>
<row r="3" spans="1:1">
<c r="A3">
<v>30</v>
</c>
</row>
<row r="4" spans="1:1">
<c r="A4">
<v>40</v>
</c>
</row>
</sheetData>
<conditionalFormatting sqref="A1:A4">
<cfRule type="duplicateValues" priority="1"/>
<cfRule type="uniqueValues" priority="2"/>
</conditionalFormatting>
<pageMargins left="0.7" right="0.7" top="0.75" bottom="0.75" header="0.3" footer="0.3"/>
</worksheet>
"""
)
got = _xml_to_list(fh.getvalue())
self.assertEqual(exp, got)
| TestAssembleWorksheet |
python | python__mypy | mypy/plugin.py | {
"start": 8476,
"end": 8717
} | class ____(NamedTuple):
type: UnboundType # Type to analyze
context: Context # Relevant location context (e.g. for error messages)
api: TypeAnalyzerPluginInterface
@mypyc_attr(allow_interpreted_subclasses=True)
| AnalyzeTypeContext |
python | getsentry__sentry | tests/snuba/api/endpoints/test_organization_events_trace_metrics.py | {
"start": 192,
"end": 15431
} | class ____(OrganizationEventsEndpointTestBase):
dataset = "tracemetrics"
def test_simple_with_explicit_filter(self) -> None:
trace_metrics = [
self.create_trace_metric("foo", 1, "counter"),
self.create_trace_metric("bar", 2, "counter"),
]
self.store_trace_metrics(trace_metrics)
response = self.do_request(
{
"field": ["metric.name", "value"],
"query": "metric.name:foo metric.type:counter",
"orderby": "value",
"dataset": self.dataset,
}
)
assert response.status_code == 200, response.content
assert response.data["data"] == [
{
"id": mock.ANY,
"project.name": self.project.slug,
"metric.name": "foo",
"value": 1,
},
]
def test_simple_aggregation_with_explicit_filter(self) -> None:
trace_metrics = [
self.create_trace_metric("foo", 1, "counter"),
self.create_trace_metric("bar", 2, "counter"),
]
self.store_trace_metrics(trace_metrics)
response = self.do_request(
{
"field": ["metric.name", "sum(value)"],
"query": "metric.name:foo metric.type:counter",
"orderby": "sum(value)",
"dataset": self.dataset,
}
)
assert response.status_code == 200, response.content
assert response.data["data"] == [
{
"metric.name": "foo",
"sum(value)": 1,
},
]
def test_simple(self) -> None:
trace_metrics = [
self.create_trace_metric("foo", 1, "counter"),
self.create_trace_metric("bar", 2, "counter"),
]
self.store_trace_metrics(trace_metrics)
response = self.do_request(
{
"metricName": "foo",
"metricType": "counter",
"field": ["metric.name", "value"],
"orderby": "value",
"dataset": self.dataset,
}
)
assert response.status_code == 200, response.content
assert response.data["data"] == [
{
"id": mock.ANY,
"project.name": self.project.slug,
"metric.name": "foo",
"value": 1,
},
]
def test_simple_aggregation(self) -> None:
trace_metrics = [
self.create_trace_metric("foo", 1, "counter"),
self.create_trace_metric("bar", 2, "counter"),
]
self.store_trace_metrics(trace_metrics)
response = self.do_request(
{
"metricName": "foo",
"metricType": "counter",
"field": ["metric.name", "sum(value)"],
"orderby": "sum(value)",
"dataset": self.dataset,
}
)
assert response.status_code == 200, response.content
assert response.data["data"] == [
{
"metric.name": "foo",
"sum(value)": 1,
},
]
def test_sum(self):
self.store_trace_metrics(
[self.create_trace_metric("test_metric", i + 1, "counter") for i in range(6)]
)
response = self.do_request(
{
"metricName": "test_metric",
"metricType": "counter",
"field": ["sum(value)"],
"project": self.project.id,
"dataset": self.dataset,
"statsPeriod": "10m",
}
)
assert response.status_code == 200, response.content
data = response.data["data"]
meta = response.data["meta"]
assert len(data) == 1
assert data[0]["sum(value)"] == 21
assert meta["fields"]["sum(value)"] == "number"
assert meta["dataset"] == "tracemetrics"
def test_sum_with_counter_metric_type(self):
counter_metrics = [
self.create_trace_metric("request_count", 5.0, "counter"),
self.create_trace_metric("request_count", 3.0, "counter"),
]
self.store_trace_metrics(counter_metrics)
response = self.do_request(
{
"metricName": "request_count",
"metricType": "counter",
"field": ["sum(value,request_count,counter,-)"],
"project": self.project.id,
"dataset": self.dataset,
"statsPeriod": "10m",
}
)
assert response.status_code == 200, response.content
data = response.data["data"]
meta = response.data["meta"]
assert len(data) == 1
assert data[0]["sum(value,request_count,counter,-)"] == 8
assert meta["fields"]["sum(value,request_count,counter,-)"] == "number"
assert meta["dataset"] == "tracemetrics"
def test_sum_with_distribution_metric_type(self):
gauge_metrics = [
self.create_trace_metric("request_duration", 75.0, "distribution"),
self.create_trace_metric("request_duration", 80.0, "distribution"),
]
self.store_trace_metrics(gauge_metrics)
response = self.do_request(
{
"metricName": "request_duration",
"metricType": "distribution",
"field": [
"sum(value, request_duration, distribution, -)"
], # Trying space in the formula here to make sure it works.
"project": self.project.id,
"dataset": self.dataset,
"statsPeriod": "10m",
}
)
assert response.status_code == 200, response.content
data = response.data["data"]
assert data[0] == {
"sum(value, request_duration, distribution, -)": 155,
}
def test_per_minute_formula(self) -> None:
# Store 6 trace metrics over a 10 minute period
self.store_trace_metrics(
[self.create_trace_metric("test_metric", 1.0, "counter") for _ in range(6)]
)
response = self.do_request(
{
"metricName": "test_metric",
"metricType": "counter",
"field": ["per_minute(value)"],
"project": self.project.id,
"dataset": self.dataset,
"statsPeriod": "10m",
}
)
assert response.status_code == 200, response.content
data = response.data["data"]
meta = response.data["meta"]
assert len(data) == 1
assert data[0]["per_minute(value)"] == 0.6
assert meta["fields"]["per_minute(value)"] == "rate"
assert meta["dataset"] == "tracemetrics"
def test_per_second_formula(self) -> None:
# Store 6 trace metrics over a 10 minute period
self.store_trace_metrics(
[self.create_trace_metric("test_metric", 1.0, "counter") for _ in range(6)]
)
response = self.do_request(
{
"metricName": "test_metric",
"metricType": "counter",
"field": ["per_second(value)"],
"project": self.project.id,
"dataset": self.dataset,
"statsPeriod": "10m",
}
)
assert response.status_code == 200, response.content
data = response.data["data"]
meta = response.data["meta"]
assert len(data) == 1
assert (
data[0]["per_second(value)"] == 0.01
) # Over ten minute period, 6 events / 600 seconds = 0.01 events per second
assert meta["fields"]["per_second(value)"] == "rate"
assert meta["dataset"] == "tracemetrics"
def test_per_second_formula_with_counter_metric_type(self) -> None:
counter_metrics = [
self.create_trace_metric("request_count", 5.0, "counter"),
self.create_trace_metric("request_count", 3.0, "counter"),
]
self.store_trace_metrics(counter_metrics)
response = self.do_request(
{
"metricName": "request_count",
"metricType": "counter",
"field": ["per_second(value,request_count,counter,-)"],
"project": self.project.id,
"dataset": self.dataset,
"statsPeriod": "10m",
}
)
assert response.status_code == 200, response.content
data = response.data["data"]
assert data[0] == {
"per_second(value,request_count,counter,-)": pytest.approx(8 / 600, abs=0.001)
}
def test_per_second_formula_with_gauge_metric_type(self) -> None:
gauge_metrics = [
self.create_trace_metric("cpu_usage", 75.0, "gauge"),
self.create_trace_metric("cpu_usage", 80.0, "gauge"),
]
self.store_trace_metrics(gauge_metrics)
response = self.do_request(
{
"metricName": "cpu_usage",
"metricType": "gauge",
"field": [
"per_second(value, cpu_usage, gauge, -)"
], # Trying space in the formula here to make sure it works.
"project": self.project.id,
"dataset": self.dataset,
"statsPeriod": "10m",
}
)
assert response.status_code == 200, response.content
data = response.data["data"]
assert data[0] == {
"per_second(value, cpu_usage, gauge, -)": pytest.approx(2 / 600, abs=0.001)
}
def test_per_second_formula_with_gauge_metric_type_without_top_level_metric_type(self) -> None:
gauge_metrics = [
self.create_trace_metric("cpu_usage", 75.0, "gauge"),
self.create_trace_metric("cpu_usage", 80.0, "gauge"),
]
self.store_trace_metrics(gauge_metrics)
response = self.do_request(
{
"field": [
"per_second(value, cpu_usage, gauge, -)"
], # Trying space in the formula here to make sure it works.
"query": "metric.name:cpu_usage",
"project": self.project.id,
"dataset": self.dataset,
"statsPeriod": "10m",
}
)
assert response.status_code == 200, response.content
data = response.data["data"]
assert data[0] == {
"per_second(value, cpu_usage, gauge, -)": pytest.approx(2 / 600, abs=0.001)
}
def test_list_metrics(self):
trace_metrics = [
*[self.create_trace_metric("foo", 1, "counter") for _ in range(1)],
*[self.create_trace_metric("bar", 1, "gauge") for _ in range(2)],
*[self.create_trace_metric("baz", 1, "distribution") for _ in range(3)],
*[self.create_trace_metric("qux", 1, "distribution", "millisecond") for _ in range(4)],
]
self.store_trace_metrics(trace_metrics)
# this query does not filter on any metrics, so scan all metrics
response = self.do_request(
{
"field": ["metric.name", "metric.type", "metric.unit", "count(metric.name)"],
"orderby": "metric.name",
"dataset": self.dataset,
}
)
assert response.status_code == 200, response.content
assert response.data["data"] == [
{
"metric.name": "bar",
"metric.type": "gauge",
"metric.unit": None,
"count(metric.name)": 2,
},
{
"metric.name": "baz",
"metric.type": "distribution",
"metric.unit": None,
"count(metric.name)": 3,
},
{
"metric.name": "foo",
"metric.type": "counter",
"metric.unit": None,
"count(metric.name)": 1,
},
{
"metric.name": "qux",
"metric.type": "distribution",
"metric.unit": "millisecond",
"count(metric.name)": 4,
},
]
def test_aggregation_embedded_metric_name(self):
trace_metrics = [
self.create_trace_metric("foo", 1, "counter"),
self.create_trace_metric("foo", 1, "counter"),
self.create_trace_metric("bar", 2, "counter"),
]
self.store_trace_metrics(trace_metrics)
response = self.do_request(
{
"field": ["count(value,foo,counter,-)"],
"dataset": self.dataset,
}
)
assert response.status_code == 200, response.content
assert response.data["data"] == [
{"count(value,foo,counter,-)": 2},
]
def test_aggregation_embedded_metric_name_formula(self):
trace_metrics = [
*[self.create_trace_metric("foo", 1, "counter") for _ in range(6)],
self.create_trace_metric("bar", 594, "counter"),
]
self.store_trace_metrics(trace_metrics)
response = self.do_request(
{
"field": ["per_second(value,foo,counter,-)"],
"dataset": self.dataset,
"statsPeriod": "10m",
}
)
assert response.status_code == 200, response.content
assert response.data["data"] == [
# Over ten minute period, 6 events / 600 seconds = 0.01 events per second
{"per_second(value,foo,counter,-)": 0.01},
]
def test_aggregation_multiple_embedded_same_metric_name(self):
trace_metrics = [
self.create_trace_metric("foo", 1, "distribution"),
self.create_trace_metric("foo", 2, "distribution"),
self.create_trace_metric("bar", 2, "counter"),
]
self.store_trace_metrics(trace_metrics)
response = self.do_request(
{
"field": [
"min(value,foo,distribution,-)",
"max(value,foo,distribution,-)",
],
"dataset": self.dataset,
}
)
assert response.status_code == 200, response.content
assert response.data["data"] == [
{
"min(value,foo,distribution,-)": 1,
"max(value,foo,distribution,-)": 2,
},
]
def test_aggregation_multiple_embedded_different_metric_name(self):
response = self.do_request(
{
"field": [
"count(value,foo,counter,-)",
"count(value,bar,counter,-)",
],
"dataset": self.dataset,
"project": self.project.id,
}
)
assert response.status_code == 400, response.content
assert response.data == {
"detail": ErrorDetail(
"Cannot aggregate multiple metrics in 1 query.", code="parse_error"
)
}
| OrganizationEventsTraceMetricsEndpointTest |
python | pytorch__pytorch | test/test_functional_optim.py | {
"start": 2581,
"end": 6358
} | class ____(TestCase):
def _validate_parameters(self, params_1, params_2):
for p1, p2 in zip(params_1, params_2):
self.assertEqual(p1, p2)
# Dynamo fails at compiling this for python 3.8/3.11
# Since it passes while compiling the actual code under test
# we disable dynamo here.
@torch._disable_dynamo(recursive=False)
def _test_functional_optim_parity(self, optim_cls, *args, **kwargs):
module_optim = MyModule()
module_functional = MyModule()
optim_params = module_optim.parameters()
optim = optim_cls(optim_params, *args, **kwargs)
functional_optim_cls = functional_optim_map.get(optim_cls, None)
if not functional_optim_cls:
raise ValueError(f"Functional optimizer not implemented for {optim_cls}")
optim_functional = functional_optim_cls(
[], *args, **kwargs, _allow_empty_param_list=True
)
if not hasattr(optim_functional, "step_param"):
raise ValueError(
f"Functional optimizer class {optim_functional} must implement step_param method."
)
# Initial weights should match
self._validate_parameters(
module_optim.parameters(), module_functional.parameters()
)
# Save old parameters to verify optimizer modifies them.
old_module_optim_params = [
param.detach().clone() for param in module_optim.parameters()
]
old_module_functional_params = [
param.detach().clone() for param in module_functional.parameters()
]
t1 = torch.randn(3, 3)
for _ in range(10):
module_optim.zero_grad()
module_functional.zero_grad()
# Forward + Backward
optim_out = module_optim(t1).sum()
functional_out = module_functional(t1).sum()
optim_out.backward()
functional_out.backward()
# Optimizer step
optim.step()
# Functional optimizer step_param
for param in module_functional.parameters():
grad = param.grad
optim_functional.step_param(param, grad)
# Validate parameters are equal
for optim_param, functional_param in zip(
module_optim.parameters(), module_functional.parameters()
):
self.assertEqual(optim_param, functional_param)
# Validate parameters are modified.
for i, (optim_param, functional_param) in enumerate(
zip(module_optim.parameters(), module_functional.parameters())
):
self.assertNotEqual(old_module_optim_params[i], optim_param)
self.assertNotEqual(old_module_functional_params[i], functional_param)
def _test_functional_optim_registration(self):
fn_map_key = "MyDummyFnOptimizer"
fn_optim = MyDummyFnOptimizer
register_functional_optim(fn_map_key, fn_optim)
functional_optim_cls = functional_optim_map.get(fn_map_key, None)
if not functional_optim_cls:
raise ValueError(f"Functional optimizer not registered for {fn_map_key}")
def test_functional_optim_registration(self):
self._test_functional_optim_registration()
def test_functional_optim_parity_sgd(self):
self._test_functional_optim_parity(SGD, 1e-2, momentum=0.9, weight_decay=0.01)
def test_functional_optim_parity_adam(self):
self._test_functional_optim_parity(Adam, 1e-2, betas=(0.9, 0.999), eps=1e-6)
def test_functional_optim_parity_adam_w(self):
self._test_functional_optim_parity(AdamW, 1e-2, betas=(0.9, 0.999), eps=1e-6)
if __name__ == "__main__":
run_tests()
| TestFunctionalOptimParity |
python | ray-project__ray | python/ray/_private/thirdparty/pynvml/pynvml.py | {
"start": 74779,
"end": 75057
} | class ____(_PrintableStructure):
_fields_ = [
('version', c_uint),
('value', c_uint),
]
def __init__(self):
super(c_nvmlDeviceAddressingMode_t, self).__init__(version=nvmlDeviceAddressingMode_v1)
## Event structures
| c_nvmlDeviceAddressingMode_t |
python | walkccc__LeetCode | solutions/663. Equal Tree Partition/663.py | {
"start": 0,
"end": 409
} | class ____:
def checkEqualTree(self, root: TreeNode | None) -> bool:
if not root:
return False
seen = set()
def dfs(root: TreeNode | None) -> int:
if not root:
return 0
summ = root.val + dfs(root.left) + dfs(root.right)
seen.add(summ)
return summ
summ = root.val + dfs(root.left) + dfs(root.right)
return summ % 2 == 0 and summ // 2 in seen
| Solution |
python | gevent__gevent | src/greentest/3.9/test_socket.py | {
"start": 187636,
"end": 192894
} | class ____(SocketConnectedTest):
"""Unit tests for the object returned by socket.makefile()
self.read_file is the io object returned by makefile() on
the client connection. You can read from this file to
get output from the server.
self.write_file is the io object returned by makefile() on the
server connection. You can write to this file to send output
to the client.
"""
bufsize = -1 # Use default buffer size
encoding = 'utf-8'
errors = 'strict'
newline = None
read_mode = 'rb'
read_msg = MSG
write_mode = 'wb'
write_msg = MSG
def __init__(self, methodName='runTest'):
SocketConnectedTest.__init__(self, methodName=methodName)
def setUp(self):
self.evt1, self.evt2, self.serv_finished, self.cli_finished = [
threading.Event() for i in range(4)]
SocketConnectedTest.setUp(self)
self.read_file = self.cli_conn.makefile(
self.read_mode, self.bufsize,
encoding = self.encoding,
errors = self.errors,
newline = self.newline)
def tearDown(self):
self.serv_finished.set()
self.read_file.close()
self.assertTrue(self.read_file.closed)
self.read_file = None
SocketConnectedTest.tearDown(self)
def clientSetUp(self):
SocketConnectedTest.clientSetUp(self)
self.write_file = self.serv_conn.makefile(
self.write_mode, self.bufsize,
encoding = self.encoding,
errors = self.errors,
newline = self.newline)
def clientTearDown(self):
self.cli_finished.set()
self.write_file.close()
self.assertTrue(self.write_file.closed)
self.write_file = None
SocketConnectedTest.clientTearDown(self)
def testReadAfterTimeout(self):
# Issue #7322: A file object must disallow further reads
# after a timeout has occurred.
self.cli_conn.settimeout(1)
self.read_file.read(3)
# First read raises a timeout
self.assertRaises(socket.timeout, self.read_file.read, 1)
# Second read is disallowed
with self.assertRaises(OSError) as ctx:
self.read_file.read(1)
self.assertIn("cannot read from timed out object", str(ctx.exception))
def _testReadAfterTimeout(self):
self.write_file.write(self.write_msg[0:3])
self.write_file.flush()
self.serv_finished.wait()
def testSmallRead(self):
# Performing small file read test
first_seg = self.read_file.read(len(self.read_msg)-3)
second_seg = self.read_file.read(3)
msg = first_seg + second_seg
self.assertEqual(msg, self.read_msg)
def _testSmallRead(self):
self.write_file.write(self.write_msg)
self.write_file.flush()
def testFullRead(self):
# read until EOF
msg = self.read_file.read()
self.assertEqual(msg, self.read_msg)
def _testFullRead(self):
self.write_file.write(self.write_msg)
self.write_file.close()
def testUnbufferedRead(self):
# Performing unbuffered file read test
buf = type(self.read_msg)()
while 1:
char = self.read_file.read(1)
if not char:
break
buf += char
self.assertEqual(buf, self.read_msg)
def _testUnbufferedRead(self):
self.write_file.write(self.write_msg)
self.write_file.flush()
def testReadline(self):
# Performing file readline test
line = self.read_file.readline()
self.assertEqual(line, self.read_msg)
def _testReadline(self):
self.write_file.write(self.write_msg)
self.write_file.flush()
def testCloseAfterMakefile(self):
# The file returned by makefile should keep the socket open.
self.cli_conn.close()
# read until EOF
msg = self.read_file.read()
self.assertEqual(msg, self.read_msg)
def _testCloseAfterMakefile(self):
self.write_file.write(self.write_msg)
self.write_file.flush()
def testMakefileAfterMakefileClose(self):
self.read_file.close()
msg = self.cli_conn.recv(len(MSG))
if isinstance(self.read_msg, str):
msg = msg.decode()
self.assertEqual(msg, self.read_msg)
def _testMakefileAfterMakefileClose(self):
self.write_file.write(self.write_msg)
self.write_file.flush()
def testClosedAttr(self):
self.assertTrue(not self.read_file.closed)
def _testClosedAttr(self):
self.assertTrue(not self.write_file.closed)
def testAttributes(self):
self.assertEqual(self.read_file.mode, self.read_mode)
self.assertEqual(self.read_file.name, self.cli_conn.fileno())
def _testAttributes(self):
self.assertEqual(self.write_file.mode, self.write_mode)
self.assertEqual(self.write_file.name, self.serv_conn.fileno())
def testRealClose(self):
self.read_file.close()
self.assertRaises(ValueError, self.read_file.fileno)
self.cli_conn.close()
self.assertRaises(OSError, self.cli_conn.getsockname)
def _testRealClose(self):
pass
| FileObjectClassTestCase |
python | davidhalter__jedi | jedi/api/classes.py | {
"start": 27437,
"end": 28585
} | class ____(BaseSignature):
"""
A full signature object is the return value of
:meth:`.Script.get_signatures`.
"""
def __init__(self, inference_state, signature, call_details):
super().__init__(inference_state, signature)
self._call_details = call_details
self._signature = signature
@property
def index(self):
"""
Returns the param index of the current cursor position.
Returns None if the index cannot be found in the curent call.
:rtype: int
"""
return self._call_details.calculate_index(
self._signature.get_param_names(resolve_stars=True)
)
@property
def bracket_start(self):
"""
Returns a line/column tuple of the bracket that is responsible for the
last function call. The first line is 1 and the first column 0.
:rtype: int, int
"""
return self._call_details.bracket_leaf.start_pos
def __repr__(self):
return '<%s: index=%r %s>' % (
type(self).__name__,
self.index,
self._signature.to_string(),
)
| Signature |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/abstractClass2.py | {
"start": 234,
"end": 313
} | class ____(InterfaceA):
def a(self) -> None:
print("MixinA.a")
| MixinA |
python | numpy__numpy | numpy/tests/test_configtool.py | {
"start": 559,
"end": 1812
} | class ____:
def check_numpyconfig(self, arg):
p = subprocess.run(['numpy-config', arg], capture_output=True, text=True)
p.check_returncode()
return p.stdout.strip()
def test_configtool_version(self):
stdout = self.check_numpyconfig('--version')
assert stdout == np.__version__
def test_configtool_cflags(self):
stdout = self.check_numpyconfig('--cflags')
assert f'-I{os.fspath(INCLUDE_DIR)}' in stdout
def test_configtool_pkgconfigdir(self):
stdout = self.check_numpyconfig('--pkgconfigdir')
assert pathlib.Path(stdout) == PKG_CONFIG_DIR.resolve()
@pytest.mark.skipif(not IS_INSTALLED,
reason="numpy must be installed to check its entrypoints")
def test_pkg_config_entrypoint():
(entrypoint,) = importlib.metadata.entry_points(group='pkg_config', name='numpy')
assert entrypoint.value == numpy._core.lib.pkgconfig.__name__
@pytest.mark.skipif(not IS_INSTALLED,
reason="numpy.pc is only available when numpy is installed")
@pytest.mark.skipif(IS_EDITABLE, reason="editable installs don't have a numpy.pc")
def test_pkg_config_config_exists():
assert PKG_CONFIG_DIR.joinpath('numpy.pc').is_file()
| TestNumpyConfig |
python | ansible__ansible | lib/ansible/modules/package_facts.py | {
"start": 12714,
"end": 13350
} | class ____(CLIMgr):
CLI = 'apk'
def list_installed(self):
rc, out, err = module.run_command([self._cli, 'info', '-v'])
if rc != 0:
raise Exception("Unable to list packages rc=%s : %s" % (rc, err))
return out.splitlines()
def get_package_details(self, package):
raw_pkg_details = {'name': package, 'version': '', 'release': ''}
nvr = package.rsplit('-', 2)
try:
return {
'name': nvr[0],
'version': nvr[1],
'release': nvr[2],
}
except IndexError:
return raw_pkg_details
| APK |
python | PrefectHQ__prefect | src/prefect/events/actions.py | {
"start": 5875,
"end": 7032
} | class ____(Action):
"""Base class for Actions that operate on Work Queues and need to infer them from
events"""
source: Literal["selected", "inferred"] = Field(
"selected",
description=(
"Whether this Action applies to a specific selected "
"work queue (given by `work_queue_id`), or to a work queue that is "
"inferred from the triggering event. If the source is 'inferred', "
"the `work_queue_id` may not be set. If the source is 'selected', the "
"`work_queue_id` must be set."
),
)
work_queue_id: Optional[UUID] = Field(
None, description="The identifier of the work queue to pause"
)
@model_validator(mode="after")
def selected_work_queue_requires_id(self) -> Self:
wants_selected_work_queue = self.source == "selected"
has_work_queue_id = bool(self.work_queue_id)
if wants_selected_work_queue != has_work_queue_id:
raise ValueError(
"work_queue_id is "
+ ("not allowed" if has_work_queue_id else "required")
)
return self
| WorkQueueAction |
python | numba__numba | numba/tests/test_typingerror.py | {
"start": 6666,
"end": 7235
} | class ____(unittest.TestCase):
def test_readonly_array(self):
@jit("(f8[:],)", nopython=True)
def inner(x):
return x
@jit(nopython=True)
def outer():
return inner(gvalues)
gvalues = np.ones(10, dtype=np.float64)
with self.assertRaises(TypingError) as raises:
outer()
got = str(raises.exception)
pat = r"Invalid use of.*readonly array\(float64, 1d, C\)"
self.assertIsNotNone(re.search(pat, got))
if __name__ == '__main__':
unittest.main()
| TestCallError |
python | pytest-dev__pytest-rerunfailures | src/pytest_rerunfailures.py | {
"start": 14183,
"end": 15466
} | class ____(SocketDB):
def __init__(self):
super().__init__()
self.sock.bind(("127.0.0.1", 0))
self.sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
self.rerunfailures_db = {}
t = threading.Thread(target=self.run_server, daemon=True)
t.start()
@property
def sock_port(self):
return self.sock.getsockname()[1]
def run_server(self):
self.sock.listen()
while True:
conn, _ = self.sock.accept()
t = threading.Thread(target=self.run_connection, args=(conn,), daemon=True)
t.start()
def run_connection(self, conn):
with suppress(ConnectionError):
while True:
op, i, k, v = self._sock_recv(conn).split("|")
if op == "set":
self._set(i, k, int(v))
elif op == "get":
self._sock_send(conn, str(self._get(i, k)))
def _set(self, i: str, k: str, v: int):
if i not in self.rerunfailures_db:
self.rerunfailures_db[i] = {}
self.rerunfailures_db[i][k] = v
def _get(self, i: str, k: str) -> int:
try:
return self.rerunfailures_db[i][k]
except KeyError:
return 0
| ServerStatusDB |
python | huggingface__transformers | src/transformers/models/t5/modeling_t5.py | {
"start": 36231,
"end": 43361
} | class ____(T5PreTrainedModel):
_keys_to_ignore_on_load_unexpected = [
"decoder.block.0.layer.1.EncDecAttention.relative_attention_bias.weight",
]
_tied_weights_keys = {
"encoder.embed_tokens.weight": "shared.weight",
"decoder.embed_tokens.weight": "shared.weight",
}
def __init__(self, config: T5Config):
super().__init__(config)
self.shared = nn.Embedding(config.vocab_size, config.d_model)
encoder_config = copy.deepcopy(config)
encoder_config.is_decoder = False
encoder_config.use_cache = False
encoder_config.tie_encoder_decoder = False
self.encoder = T5Stack(encoder_config)
decoder_config = copy.deepcopy(config)
decoder_config.is_decoder = True
decoder_config.tie_encoder_decoder = False
decoder_config.num_layers = config.num_decoder_layers
self.decoder = T5Stack(decoder_config)
# Initialize weights and apply final processing
self.post_init()
def get_input_embeddings(self):
return self.shared
def set_input_embeddings(self, new_embeddings):
self.shared = new_embeddings
self.encoder.set_input_embeddings(new_embeddings)
self.decoder.set_input_embeddings(new_embeddings)
@auto_docstring
def forward(
self,
input_ids: Optional[torch.LongTensor] = None,
attention_mask: Optional[torch.FloatTensor] = None,
decoder_input_ids: Optional[torch.LongTensor] = None,
decoder_attention_mask: Optional[torch.BoolTensor] = None,
encoder_outputs: Optional[tuple[tuple[torch.FloatTensor]]] = None,
past_key_values: Optional[Cache] = None,
inputs_embeds: Optional[torch.Tensor] = None,
decoder_inputs_embeds: Optional[torch.Tensor] = None,
use_cache: Optional[bool] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
cache_position: Optional[torch.LongTensor] = None,
) -> Union[tuple[torch.FloatTensor], Seq2SeqModelOutput]:
r"""
input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
Indices of input sequence tokens in the vocabulary. T5 is a model with relative position embeddings so you
should be able to pad the inputs on both the right and the left.
Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
[`PreTrainedTokenizer.__call__`] for detail.
[What are input IDs?](../glossary#input-ids)
To know more on how to prepare `input_ids` for pretraining take a look a [T5 Training](./t5#training).
decoder_input_ids (`torch.LongTensor` of shape `(batch_size, target_sequence_length)`, *optional*):
Indices of decoder input sequence tokens in the vocabulary.
Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
[`PreTrainedTokenizer.__call__`] for details.
[What are decoder input IDs?](../glossary#decoder-input-ids)
T5 uses the `pad_token_id` as the starting token for `decoder_input_ids` generation. If `past_key_values`
is used, optionally only the last `decoder_input_ids` have to be input (see `past_key_values`).
To know more on how to prepare `decoder_input_ids` for pretraining take a look at [T5
Training](./t5#training).
decoder_attention_mask (`torch.BoolTensor` of shape `(batch_size, target_sequence_length)`, *optional*):
Default behavior: generate a tensor that ignores pad tokens in `decoder_input_ids`. Causal mask will also
be used by default.
Example:
```python
>>> from transformers import AutoTokenizer, T5Model
>>> tokenizer = AutoTokenizer.from_pretrained("google-t5/t5-small")
>>> model = T5Model.from_pretrained("google-t5/t5-small")
>>> input_ids = tokenizer(
... "Studies have been shown that owning a dog is good for you", return_tensors="pt"
... ).input_ids # Batch size 1
>>> decoder_input_ids = tokenizer("Studies show that", return_tensors="pt").input_ids # Batch size 1
>>> # preprocess: Prepend decoder_input_ids with start token which is pad token for T5Model.
>>> # This is not needed for torch's T5ForConditionalGeneration as it does this internally using labels arg.
>>> decoder_input_ids = model._shift_right(decoder_input_ids)
>>> # forward pass
>>> outputs = model(input_ids=input_ids, decoder_input_ids=decoder_input_ids)
>>> last_hidden_states = outputs.last_hidden_state
```"""
use_cache = use_cache if use_cache is not None else self.config.use_cache
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
# Encode if needed (training, first prediction pass)
if encoder_outputs is None:
encoder_outputs = self.encoder(
input_ids=input_ids,
attention_mask=attention_mask,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
elif return_dict and not isinstance(encoder_outputs, BaseModelOutput):
encoder_outputs = BaseModelOutput(
last_hidden_state=encoder_outputs[0],
hidden_states=encoder_outputs[1] if len(encoder_outputs) > 1 else None,
attentions=encoder_outputs[2] if len(encoder_outputs) > 2 else None,
)
hidden_states = encoder_outputs[0]
# Decode
decoder_outputs = self.decoder(
input_ids=decoder_input_ids,
attention_mask=decoder_attention_mask,
inputs_embeds=decoder_inputs_embeds,
past_key_values=past_key_values,
encoder_hidden_states=hidden_states,
encoder_attention_mask=attention_mask,
use_cache=use_cache,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
cache_position=cache_position,
)
if not return_dict:
return decoder_outputs + encoder_outputs
return Seq2SeqModelOutput(
last_hidden_state=decoder_outputs.last_hidden_state,
past_key_values=decoder_outputs.past_key_values,
decoder_hidden_states=decoder_outputs.hidden_states,
decoder_attentions=decoder_outputs.attentions,
cross_attentions=decoder_outputs.cross_attentions,
encoder_last_hidden_state=encoder_outputs.last_hidden_state,
encoder_hidden_states=encoder_outputs.hidden_states,
encoder_attentions=encoder_outputs.attentions,
)
@auto_docstring(
custom_intro="""
T5 Model with a `language modeling` head on top.
"""
)
| T5Model |
python | bottlepy__bottle | bottle.py | {
"start": 8465,
"end": 17214
} | class ____:
""" A Router is an ordered collection of route->target pairs. It is used to
efficiently match WSGI requests against a number of routes and return
the first target that satisfies the request. The target may be anything,
usually a string, ID or callable object. A route consists of a path-rule
and a HTTP method.
The path-rule is either a static path (e.g. `/contact`) or a dynamic
path that contains wildcards (e.g. `/wiki/<page>`). The wildcard syntax
and details on the matching order are described in docs:`routing`.
"""
default_pattern = '[^/]+'
default_filter = 're'
#: The current CPython regexp implementation does not allow more
#: than 99 matching groups per regular expression.
_MAX_GROUPS_PER_PATTERN = 99
def __init__(self, strict=False):
self.rules = [] # All rules in order
self._groups = {} # index of regexes to find them in dyna_routes
self.builder = {} # Data structure for the url builder
self.static = {} # Search structure for static routes
self.dyna_routes = {}
self.dyna_regexes = {} # Search structure for dynamic routes
#: If true, static routes are no longer checked first.
self.strict_order = strict
self.filters = {
're': lambda conf: (_re_flatten(conf or self.default_pattern),
None, None),
'int': lambda conf: (r'-?\d+', int, lambda x: str(int(x))),
'float': lambda conf: (r'-?[\d.]+', float, lambda x: str(float(x))),
'path': lambda conf: (r'.+?', None, None)
}
def add_filter(self, name, func):
""" Add a filter. The provided function is called with the configuration
string as parameter and must return a (regexp, to_python, to_url) tuple.
The first element is a string, the last two are callables or None. """
self.filters[name] = func
rule_syntax = re.compile('(\\\\*)'
'(?:(?::([a-zA-Z_][a-zA-Z_0-9]*)?()(?:#(.*?)#)?)'
'|(?:<([a-zA-Z_][a-zA-Z_0-9]*)?(?::([a-zA-Z_]*)'
'(?::((?:\\\\.|[^\\\\>])+)?)?)?>))')
def _itertokens(self, rule):
offset, prefix = 0, ''
for match in self.rule_syntax.finditer(rule):
prefix += rule[offset:match.start()]
g = match.groups()
if g[2] is not None:
depr(0, 13, "Use of old route syntax.",
"Use <name> instead of :name in routes.",
stacklevel=4)
if len(g[0]) % 2: # Escaped wildcard
prefix += match.group(0)[len(g[0]):]
offset = match.end()
continue
if prefix:
yield prefix, None, None
name, filtr, conf = g[4:7] if g[2] is None else g[1:4]
yield name, filtr or 'default', conf or None
offset, prefix = match.end(), ''
if offset <= len(rule) or prefix:
yield prefix + rule[offset:], None, None
def add(self, rule, method, target, name=None):
""" Add a new rule or replace the target for an existing rule. """
anons = 0 # Number of anonymous wildcards found
keys = [] # Names of keys
pattern = '' # Regular expression pattern with named groups
filters = [] # Lists of wildcard input filters
builder = [] # Data structure for the URL builder
is_static = True
for key, mode, conf in self._itertokens(rule):
if mode:
is_static = False
if mode == 'default': mode = self.default_filter
mask, in_filter, out_filter = self.filters[mode](conf)
if not key:
pattern += '(?:%s)' % mask
key = 'anon%d' % anons
anons += 1
else:
pattern += '(?P<%s>%s)' % (key, mask)
keys.append(key)
if in_filter: filters.append((key, in_filter))
builder.append((key, out_filter or str))
elif key:
pattern += re.escape(key)
builder.append((None, key))
self.builder[rule] = builder
if name: self.builder[name] = builder
if is_static and not self.strict_order:
self.static.setdefault(method, {})
self.static[method][self.build(rule)] = (target, None)
return
try:
re_pattern = re.compile('^(%s)$' % pattern)
re_match = re_pattern.match
except re.error as e:
raise RouteSyntaxError("Could not add Route: %s (%s)" % (rule, e))
if filters:
def getargs(path):
url_args = re_match(path).groupdict()
for name, wildcard_filter in filters:
try:
url_args[name] = wildcard_filter(url_args[name])
except ValueError:
raise HTTPError(400, 'Path has wrong format.')
return url_args
elif re_pattern.groupindex:
def getargs(path):
return re_match(path).groupdict()
else:
getargs = None
flatpat = _re_flatten(pattern)
whole_rule = (rule, flatpat, target, getargs)
if (flatpat, method) in self._groups:
if DEBUG:
msg = 'Route <%s %s> overwrites a previously defined route'
warnings.warn(msg % (method, rule), RuntimeWarning, stacklevel=3)
self.dyna_routes[method][
self._groups[flatpat, method]] = whole_rule
else:
self.dyna_routes.setdefault(method, []).append(whole_rule)
self._groups[flatpat, method] = len(self.dyna_routes[method]) - 1
self._compile(method)
def _compile(self, method):
all_rules = self.dyna_routes[method]
comborules = self.dyna_regexes[method] = []
maxgroups = self._MAX_GROUPS_PER_PATTERN
for x in range(0, len(all_rules), maxgroups):
some = all_rules[x:x + maxgroups]
combined = (flatpat for (_, flatpat, _, _) in some)
combined = '|'.join('(^%s$)' % flatpat for flatpat in combined)
combined = re.compile(combined).match
rules = [(target, getargs) for (_, _, target, getargs) in some]
comborules.append((combined, rules))
def build(self, _name, *anons, **query):
""" Build an URL by filling the wildcards in a rule. """
builder = self.builder.get(_name)
if not builder:
raise RouteBuildError("No route with that name.", _name)
try:
for i, value in enumerate(anons):
query['anon%d' % i] = value
url = ''.join([f(query.pop(n)) if n else f for (n, f) in builder])
return url if not query else url + '?' + urlencode(query, doseq=True)
except KeyError as E:
raise RouteBuildError('Missing URL argument: %r' % E.args[0])
def match(self, environ):
""" Return a (target, url_args) tuple or raise HTTPError(400/404/405). """
verb = environ['REQUEST_METHOD'].upper()
path = environ['PATH_INFO'] or '/'
methods = ('PROXY', 'HEAD', 'GET', 'ANY') if verb == 'HEAD' else ('PROXY', verb, 'ANY')
for method in methods:
if method in self.static and path in self.static[method]:
target, getargs = self.static[method][path]
return target, getargs(path) if getargs else {}
elif method in self.dyna_regexes:
for combined, rules in self.dyna_regexes[method]:
match = combined(path)
if match:
target, getargs = rules[match.lastindex - 1]
return target, getargs(path) if getargs else {}
# No matching route found. Collect alternative methods for 405 response
allowed = set([])
nocheck = set(methods)
for method in set(self.static) - nocheck:
if path in self.static[method]:
allowed.add(method)
for method in set(self.dyna_regexes) - allowed - nocheck:
for combined, rules in self.dyna_regexes[method]:
match = combined(path)
if match:
allowed.add(method)
if allowed:
allow_header = ",".join(sorted(allowed))
raise HTTPError(405, "Method not allowed.", Allow=allow_header)
# No matching route and no alternative method found. We give up
raise HTTPError(404, "Not found: " + repr(path))
| Router |
python | spack__spack | lib/spack/spack/stage.py | {
"start": 48244,
"end": 48352
} | class ____(spack.error.SpackError):
"""Superclass for all errors encountered during staging."""
| StageError |
python | django__django | tests/modeladmin/test_checks.py | {
"start": 12492,
"end": 15025
} | class ____(CheckTestCase):
def test_not_iterable(self):
class TestModelAdmin(ModelAdmin):
filter_horizontal = 10
self.assertIsInvalid(
TestModelAdmin,
ValidationTestModel,
"The value of 'filter_horizontal' must be a list or tuple.",
"admin.E018",
)
def test_missing_field(self):
class TestModelAdmin(ModelAdmin):
filter_horizontal = ("non_existent_field",)
self.assertIsInvalid(
TestModelAdmin,
ValidationTestModel,
"The value of 'filter_horizontal[0]' refers to 'non_existent_field', "
"which is not a field of 'modeladmin.ValidationTestModel'.",
"admin.E019",
)
def test_invalid_field_type(self):
class TestModelAdmin(ModelAdmin):
filter_horizontal = ("name",)
self.assertIsInvalid(
TestModelAdmin,
ValidationTestModel,
"The value of 'filter_horizontal[0]' must be a many-to-many field.",
"admin.E020",
)
@isolate_apps("modeladmin")
def test_invalid_reverse_m2m_field_with_related_name(self):
class Contact(Model):
pass
class Customer(Model):
contacts = ManyToManyField("Contact", related_name="customers")
class TestModelAdmin(ModelAdmin):
filter_horizontal = ["customers"]
self.assertIsInvalid(
TestModelAdmin,
Contact,
"The value of 'filter_horizontal[0]' must be a many-to-many field.",
"admin.E020",
)
@isolate_apps("modeladmin")
def test_invalid_m2m_field_with_through(self):
class Artist(Model):
bands = ManyToManyField("Band", through="BandArtist")
class BandArtist(Model):
artist = ForeignKey("Artist", on_delete=CASCADE)
band = ForeignKey("Band", on_delete=CASCADE)
class TestModelAdmin(ModelAdmin):
filter_horizontal = ["bands"]
self.assertIsInvalid(
TestModelAdmin,
Artist,
"The value of 'filter_horizontal[0]' cannot include the ManyToManyField "
"'bands', because that field manually specifies a relationship model.",
"admin.E013",
)
def test_valid_case(self):
class TestModelAdmin(ModelAdmin):
filter_horizontal = ("users",)
self.assertIsValid(TestModelAdmin, ValidationTestModel)
| FilterHorizontalCheckTests |
python | apache__airflow | airflow-core/tests/unit/api_fastapi/core_api/routes/public/test_task_instances.py | {
"start": 23913,
"end": 36419
} | class ____:
@pytest.fixture(autouse=True)
def setup_attrs(self) -> None:
self.default_time = DEFAULT_DATETIME_1
self.ti_init = {
"logical_date": self.default_time,
"state": State.RUNNING,
}
self.ti_extras = {
"start_date": self.default_time + dt.timedelta(days=1),
"end_date": self.default_time + dt.timedelta(days=2),
"pid": 100,
"duration": 10000,
"pool": "default_pool",
"queue": "default_queue",
}
clear_db_runs()
clear_rendered_ti_fields()
def create_dag_runs_with_mapped_tasks(self, dag_maker, session, dags=None):
for dag_id, dag in (dags or {}).items():
count = dag["success"] + dag["running"]
with dag_maker(
session=session, dag_id=dag_id, start_date=DEFAULT_DATETIME_1, serialized=True
) as sdag:
task1 = BaseOperator(task_id="op1")
mapped = MockOperator.partial(task_id="task_2", executor="default").expand(arg2=task1.output)
dr = dag_maker.create_dagrun(
run_id=f"run_{dag_id}",
logical_date=DEFAULT_DATETIME_1,
data_interval=(DEFAULT_DATETIME_1, DEFAULT_DATETIME_2),
)
dag_version = DagVersion.get_latest_version(dag_id)
session.add(
TaskMap(
dag_id=dr.dag_id,
task_id=task1.task_id,
run_id=dr.run_id,
map_index=-1,
length=count,
keys=None,
)
)
if count:
# Remove the map_index=-1 TI when we're creating other TIs
session.query(TaskInstance).filter(
TaskInstance.dag_id == mapped.dag_id,
TaskInstance.task_id == mapped.task_id,
TaskInstance.run_id == dr.run_id,
).delete()
for index, state in enumerate(
itertools.chain(
itertools.repeat(TaskInstanceState.SUCCESS, dag["success"]),
itertools.repeat(TaskInstanceState.FAILED, dag["failed"]),
itertools.repeat(TaskInstanceState.RUNNING, dag["running"]),
)
):
ti = TaskInstance(
mapped, run_id=dr.run_id, map_index=index, state=state, dag_version_id=dag_version.id
)
setattr(ti, "start_date", DEFAULT_DATETIME_1)
session.add(ti)
DagBundlesManager().sync_bundles_to_db()
dagbag = DagBag(os.devnull, include_examples=False)
dagbag.dags = {dag_id: dag_maker.dag}
sync_bag_to_db(dagbag, "dags-folder", None)
session.flush()
TaskMap.expand_mapped_task(sdag.task_dict[mapped.task_id], dr.run_id, session=session)
@pytest.fixture
def one_task_with_mapped_tis(self, dag_maker, session):
self.create_dag_runs_with_mapped_tasks(
dag_maker,
session,
dags={
"mapped_tis": {
"success": 3,
"failed": 0,
"running": 0,
},
},
)
@pytest.fixture
def one_task_with_single_mapped_ti(self, dag_maker, session):
self.create_dag_runs_with_mapped_tasks(
dag_maker,
session,
dags={
"mapped_tis": {
"success": 1,
"failed": 0,
"running": 0,
},
},
)
@pytest.fixture
def one_task_with_many_mapped_tis(self, dag_maker, session):
self.create_dag_runs_with_mapped_tasks(
dag_maker,
session,
dags={
"mapped_tis": {
"success": 5,
"failed": 20,
"running": 85,
},
},
)
@pytest.fixture
def one_task_with_zero_mapped_tis(self, dag_maker, session):
self.create_dag_runs_with_mapped_tasks(
dag_maker,
session,
dags={
"mapped_tis": {
"success": 0,
"failed": 0,
"running": 0,
},
},
)
def test_should_respond_401(self, unauthenticated_test_client):
response = unauthenticated_test_client.get(
"/dags/mapped_tis/dagRuns/run_mapped_tis/taskInstances/task_2/listMapped",
)
assert response.status_code == 401
def test_should_respond_403(self, unauthorized_test_client):
response = unauthorized_test_client.get(
"/dags/mapped_tis/dagRuns/run_mapped_tis/taskInstances/task_2/listMapped",
)
assert response.status_code == 403
def test_should_respond_404(self, test_client):
response = test_client.get(
"/dags/mapped_tis/dagRuns/run_mapped_tis/taskInstances/task_2/listMapped",
)
assert response.status_code == 404
assert response.json() == {"detail": "The Dag with ID: `mapped_tis` was not found"}
def test_should_respond_200(self, one_task_with_many_mapped_tis, test_client):
with assert_queries_count(4):
response = test_client.get(
"/dags/mapped_tis/dagRuns/run_mapped_tis/taskInstances/task_2/listMapped",
)
assert response.status_code == 200
assert response.json()["total_entries"] == 110
assert len(response.json()["task_instances"]) == 50
def test_offset_limit(self, test_client, one_task_with_many_mapped_tis):
response = test_client.get(
"/dags/mapped_tis/dagRuns/run_mapped_tis/taskInstances/task_2/listMapped",
params={"offset": 4, "limit": 10},
)
assert response.status_code == 200
body = response.json()
assert body["total_entries"] == 110
assert len(body["task_instances"]) == 10
assert list(range(4, 14)) == [ti["map_index"] for ti in body["task_instances"]]
@pytest.mark.parametrize(
("params", "expected_map_indexes"),
[
({"order_by": "map_index", "limit": 100}, list(range(100))),
({"order_by": "-map_index", "limit": 100}, list(range(109, 9, -1))),
(
{"order_by": "state", "limit": 108},
list(range(5, 25)) + list(range(25, 110)) + list(range(3)),
),
(
{"order_by": "-state", "limit": 100},
list(range(5)[::-1]) + list(range(25, 110)[::-1]) + list(range(15, 25)[::-1]),
),
({"order_by": "logical_date", "limit": 100}, list(range(100))),
({"order_by": "-logical_date", "limit": 100}, list(range(109, 9, -1))),
({"order_by": "data_interval_start", "limit": 100}, list(range(100))),
({"order_by": "-data_interval_start", "limit": 100}, list(range(109, 9, -1))),
],
)
def test_mapped_instances_order(
self, test_client, session, params, expected_map_indexes, one_task_with_many_mapped_tis
):
with assert_queries_count(4):
response = test_client.get(
"/dags/mapped_tis/dagRuns/run_mapped_tis/taskInstances/task_2/listMapped",
params=params,
)
assert response.status_code == 200
body = response.json()
assert body["total_entries"] == 110
assert len(body["task_instances"]) == params["limit"]
assert expected_map_indexes == [ti["map_index"] for ti in body["task_instances"]]
@pytest.mark.parametrize(
("params", "expected_map_indexes"),
[
({"order_by": "rendered_map_index", "limit": 108}, [0] + list(range(1, 108))), # Asc
({"order_by": "-rendered_map_index", "limit": 100}, [0] + list(range(11, 110)[::-1])), # Desc
],
)
def test_rendered_map_index_order(
self, test_client, session, params, expected_map_indexes, one_task_with_many_mapped_tis
):
ti = (
session.query(TaskInstance)
.where(TaskInstance.task_id == "task_2", TaskInstance.map_index == 0)
.first()
)
ti._rendered_map_index = "a"
session.commit()
with assert_queries_count(4):
response = test_client.get(
"/dags/mapped_tis/dagRuns/run_mapped_tis/taskInstances/task_2/listMapped",
params=params,
)
assert response.status_code == 200
body = response.json()
assert body["total_entries"] == 110
assert len(body["task_instances"]) == params["limit"]
assert expected_map_indexes == [ti["map_index"] for ti in body["task_instances"]]
def test_with_date(self, test_client, one_task_with_mapped_tis):
response = test_client.get(
"/dags/mapped_tis/dagRuns/run_mapped_tis/taskInstances/task_2/listMapped",
params={"start_date_gte": DEFAULT_DATETIME_1},
)
assert response.status_code == 200
body = response.json()
assert body["total_entries"] == 3
assert len(body["task_instances"]) == 3
response = test_client.get(
"/dags/mapped_tis/dagRuns/run_mapped_tis/taskInstances/task_2/listMapped",
params={"start_date_gte": DEFAULT_DATETIME_2},
)
assert response.status_code == 200
body = response.json()
assert body["total_entries"] == 0
assert body["task_instances"] == []
def test_with_logical_date(self, test_client, one_task_with_mapped_tis):
response = test_client.get(
"/dags/mapped_tis/dagRuns/run_mapped_tis/taskInstances/task_2/listMapped",
params={"logical_date_gte": DEFAULT_DATETIME_1},
)
assert response.status_code == 200
body = response.json()
assert body["total_entries"] == 3
assert len(body["task_instances"]) == 3
response = test_client.get(
"/dags/mapped_tis/dagRuns/run_mapped_tis/taskInstances/task_2/listMapped",
params={"logical_date_gte": DEFAULT_DATETIME_2},
)
assert response.status_code == 200
body = response.json()
assert body["total_entries"] == 0
assert body["task_instances"] == []
@pytest.mark.parametrize(
("query_params", "expected_total_entries", "expected_task_instance_count"),
[
({"state": "success"}, 3, 3),
({"state": "running"}, 0, 0),
({"pool": "default_pool"}, 3, 3),
({"pool": "test_pool"}, 0, 0),
({"queue": "default"}, 3, 3),
({"queue": "test_queue"}, 0, 0),
({"executor": "default"}, 3, 3),
({"executor": "no_exec"}, 0, 0),
({"map_index": [0, 1]}, 2, 2),
({"map_index": [5]}, 0, 0),
],
)
def test_mapped_task_instances_filters(
self,
test_client,
one_task_with_mapped_tis,
query_params,
expected_total_entries,
expected_task_instance_count,
):
response = test_client.get(
"/dags/mapped_tis/dagRuns/run_mapped_tis/taskInstances/task_2/listMapped",
params=query_params,
)
assert response.status_code == 200
body = response.json()
assert body["total_entries"] == expected_total_entries
assert len(body["task_instances"]) == expected_task_instance_count
def test_with_zero_mapped(self, test_client, one_task_with_zero_mapped_tis, session):
response = test_client.get(
"/dags/mapped_tis/dagRuns/run_mapped_tis/taskInstances/task_2/listMapped",
)
assert response.status_code == 200
body = response.json()
assert body["total_entries"] == 0
assert body["task_instances"] == []
def test_should_raise_404_not_found_for_nonexistent_task(
self, one_task_with_zero_mapped_tis, test_client
):
response = test_client.get(
"/dags/mapped_tis/dagRuns/run_mapped_tis/taskInstances/nonexistent_task/listMapped",
)
assert response.status_code == 404
assert response.json()["detail"] == "Task id nonexistent_task not found"
| TestGetMappedTaskInstances |
python | pandas-dev__pandas | pandas/core/arrays/datetimes.py | {
"start": 4051,
"end": 100299
} | class ____(dtl.TimelikeOps, dtl.DatelikeOps):
"""
Pandas ExtensionArray for tz-naive or tz-aware datetime data.
.. warning::
DatetimeArray is currently experimental, and its API may change
without warning. In particular, :attr:`DatetimeArray.dtype` is
expected to change to always be an instance of an ``ExtensionDtype``
subclass.
Parameters
----------
data : Series, Index, DatetimeArray, ndarray
The datetime data.
For DatetimeArray `values` (or a Series or Index boxing one),
`dtype` and `freq` will be extracted from `values`.
dtype : numpy.dtype or DatetimeTZDtype
Note that the only NumPy dtype allowed is 'datetime64[ns]'.
freq : str or Offset, optional
The frequency.
copy : bool, default False
Whether to copy the underlying array of values.
Attributes
----------
None
Methods
-------
None
See Also
--------
DatetimeIndex : Immutable Index for datetime-like data.
Series : One-dimensional labeled array capable of holding datetime-like data.
Timestamp : Pandas replacement for python datetime.datetime object.
to_datetime : Convert argument to datetime.
period_range : Return a fixed frequency PeriodIndex.
Examples
--------
>>> pd.arrays.DatetimeArray._from_sequence(
... pd.DatetimeIndex(["2023-01-01", "2023-01-02"], freq="D")
... )
<DatetimeArray>
['2023-01-01 00:00:00', '2023-01-02 00:00:00']
Length: 2, dtype: datetime64[us]
"""
_typ = "datetimearray"
_internal_fill_value = np.datetime64("NaT", "ns")
_recognized_scalars = (datetime, np.datetime64)
_is_recognized_dtype: Callable[[DtypeObj], bool] = lambda x: lib.is_np_dtype(
x, "M"
) or isinstance(x, DatetimeTZDtype)
_infer_matches = ("datetime", "datetime64", "date")
@property
def _scalar_type(self) -> type[Timestamp]:
return Timestamp
# define my properties & methods for delegation
_bool_ops: list[str] = [
"is_month_start",
"is_month_end",
"is_quarter_start",
"is_quarter_end",
"is_year_start",
"is_year_end",
"is_leap_year",
]
_field_ops: list[str] = [
"year",
"month",
"day",
"hour",
"minute",
"second",
"weekday",
"dayofweek",
"day_of_week",
"dayofyear",
"day_of_year",
"quarter",
"days_in_month",
"daysinmonth",
"microsecond",
"nanosecond",
]
_other_ops: list[str] = ["date", "time", "timetz"]
_datetimelike_ops: list[str] = (
_field_ops + _bool_ops + _other_ops + ["unit", "freq", "tz"]
)
_datetimelike_methods: list[str] = [
"to_period",
"tz_localize",
"tz_convert",
"normalize",
"strftime",
"round",
"floor",
"ceil",
"month_name",
"day_name",
"as_unit",
]
# ndim is inherited from ExtensionArray, must exist to ensure
# Timestamp.__richcmp__(DateTimeArray) operates pointwise
# ensure that operations with numpy arrays defer to our implementation
__array_priority__ = 1000
# -----------------------------------------------------------------
# Constructors
_dtype: np.dtype[np.datetime64] | DatetimeTZDtype
_freq: BaseOffset | None = None
@classmethod
def _validate_dtype(cls, values, dtype):
# used in TimeLikeOps.__init__
dtype = _validate_dt64_dtype(dtype)
_validate_dt64_dtype(values.dtype)
if isinstance(dtype, np.dtype):
if values.dtype != dtype:
raise ValueError("Values resolution does not match dtype.")
else:
vunit = np.datetime_data(values.dtype)[0]
if vunit != dtype.unit:
raise ValueError("Values resolution does not match dtype.")
return dtype
# error: Signature of "_simple_new" incompatible with supertype "NDArrayBacked"
@classmethod
def _simple_new( # type: ignore[override]
cls,
values: npt.NDArray[np.datetime64],
freq: BaseOffset | None = None,
dtype: np.dtype[np.datetime64] | DatetimeTZDtype = DT64NS_DTYPE,
) -> Self:
assert isinstance(values, np.ndarray)
assert dtype.kind == "M"
if isinstance(dtype, np.dtype):
assert dtype == values.dtype
assert not is_unitless(dtype)
else:
# DatetimeTZDtype. If we have e.g. DatetimeTZDtype[us, UTC],
# then values.dtype should be M8[us].
assert dtype._creso == get_unit_from_dtype(values.dtype)
result = super()._simple_new(values, dtype)
result._freq = freq
return result
@classmethod
def _from_sequence(cls, scalars, *, dtype=None, copy: bool = False) -> Self:
return cls._from_sequence_not_strict(scalars, dtype=dtype, copy=copy)
@classmethod
def _from_sequence_not_strict(
cls,
data,
*,
dtype=None,
copy: bool = False,
tz=lib.no_default,
freq: str | BaseOffset | lib.NoDefault | None = lib.no_default,
dayfirst: bool = False,
yearfirst: bool = False,
ambiguous: TimeAmbiguous = "raise",
) -> Self:
"""
A non-strict version of _from_sequence, called from DatetimeIndex.__new__.
"""
# if the user either explicitly passes tz=None or a tz-naive dtype, we
# disallows inferring a tz.
explicit_tz_none = tz is None
if tz is lib.no_default:
tz = None
else:
tz = timezones.maybe_get_tz(tz)
dtype = _validate_dt64_dtype(dtype)
# if dtype has an embedded tz, capture it
tz = _validate_tz_from_dtype(dtype, tz, explicit_tz_none)
unit = None
if dtype is not None:
unit = dtl.dtype_to_unit(dtype)
data, copy = dtl.ensure_arraylike_for_datetimelike(
data, copy, cls_name="DatetimeArray"
)
inferred_freq = None
if isinstance(data, DatetimeArray):
inferred_freq = data.freq
subarr, tz = _sequence_to_dt64(
data,
copy=copy,
tz=tz,
dayfirst=dayfirst,
yearfirst=yearfirst,
ambiguous=ambiguous,
out_unit=unit,
)
# We have to call this again after possibly inferring a tz above
_validate_tz_from_dtype(dtype, tz, explicit_tz_none)
if tz is not None and explicit_tz_none:
raise ValueError(
"Passed data is timezone-aware, incompatible with 'tz=None'. "
"Use obj.tz_localize(None) instead."
)
data_unit = np.datetime_data(subarr.dtype)[0]
data_unit = cast("TimeUnit", data_unit)
data_dtype = tz_to_dtype(tz, data_unit)
result = cls._simple_new(subarr, freq=inferred_freq, dtype=data_dtype)
if unit is not None and unit != result.unit:
# If unit was specified in user-passed dtype, cast to it here
# error: Argument 1 to "as_unit" of "TimelikeOps" has
# incompatible type "str"; expected "Literal['s', 'ms', 'us', 'ns']"
# [arg-type]
result = result.as_unit(unit) # type: ignore[arg-type]
validate_kwds = {"ambiguous": ambiguous}
result._maybe_pin_freq(freq, validate_kwds)
return result
@classmethod
def _generate_range(
cls,
start,
end,
periods: int | None,
freq,
tz=None,
normalize: bool = False,
ambiguous: TimeAmbiguous = "raise",
nonexistent: TimeNonexistent = "raise",
inclusive: IntervalClosedType = "both",
*,
unit: TimeUnit = "ns",
) -> Self:
periods = dtl.validate_periods(periods)
if freq is None and any(x is None for x in [periods, start, end]):
raise ValueError("Must provide freq argument if no data is supplied")
if com.count_not_none(start, end, periods, freq) != 3:
raise ValueError(
"Of the four parameters: start, end, periods, "
"and freq, exactly three must be specified"
)
freq = to_offset(freq)
if start is not None:
start = Timestamp(start)
if end is not None:
end = Timestamp(end)
if start is NaT or end is NaT:
raise ValueError("Neither `start` nor `end` can be NaT")
if unit is not None:
if unit not in ["s", "ms", "us", "ns"]:
raise ValueError("'unit' must be one of 's', 'ms', 'us', 'ns'")
else:
unit = "ns"
if start is not None:
start = start.as_unit(unit, round_ok=False)
if end is not None:
end = end.as_unit(unit, round_ok=False)
left_inclusive, right_inclusive = validate_inclusive(inclusive)
start, end = _maybe_normalize_endpoints(start, end, normalize)
tz = _infer_tz_from_endpoints(start, end, tz)
if tz is not None:
# Localize the start and end arguments
start = _maybe_localize_point(start, freq, tz, ambiguous, nonexistent)
end = _maybe_localize_point(end, freq, tz, ambiguous, nonexistent)
if freq is not None:
# Offset handling:
# Ticks (fixed-duration like hours/minutes): keep tz; do absolute-time math.
# Other calendar offsets: drop tz; do naive wall time; localize once later
# so `ambiguous`/`nonexistent` are applied correctly.
if not isinstance(freq, Tick):
if start is not None and start.tz is not None:
start = start.tz_localize(None)
if end is not None and end.tz is not None:
end = end.tz_localize(None)
if isinstance(freq, (Tick, Day)):
i8values = generate_regular_range(start, end, periods, freq, unit=unit)
else:
xdr = _generate_range(
start=start, end=end, periods=periods, offset=freq, unit=unit
)
i8values = np.array([x._value for x in xdr], dtype=np.int64)
endpoint_tz = start.tz if start is not None else end.tz
if tz is not None and endpoint_tz is None:
if not timezones.is_utc(tz):
# short-circuit tz_localize_to_utc which would make
# an unnecessary copy with UTC but be a no-op.
creso = abbrev_to_npy_unit(unit)
i8values = tzconversion.tz_localize_to_utc(
i8values,
tz,
ambiguous=ambiguous,
nonexistent=nonexistent,
creso=creso,
)
# i8values is localized datetime64 array -> have to convert
# start/end as well to compare
if start is not None:
start = start.tz_localize(tz, ambiguous, nonexistent)
if end is not None:
end = end.tz_localize(tz, ambiguous, nonexistent)
else:
# Create a linearly spaced date_range in local time
# Nanosecond-granularity timestamps aren't always correctly
# representable with doubles, so we limit the range that we
# pass to np.linspace as much as possible
periods = cast(int, periods)
i8values = (
np.linspace(0, end._value - start._value, periods, dtype="int64")
+ start._value
)
if i8values.dtype != "i8":
# 2022-01-09 I (brock) am not sure if it is possible for this
# to overflow and cast to e.g. f8, but if it does we need to cast
i8values = i8values.astype("i8")
if start == end:
if not left_inclusive and not right_inclusive:
i8values = i8values[1:-1]
else:
start_i8 = Timestamp(start)._value
end_i8 = Timestamp(end)._value
if not left_inclusive or not right_inclusive:
if not left_inclusive and len(i8values) and i8values[0] == start_i8:
i8values = i8values[1:]
if not right_inclusive and len(i8values) and i8values[-1] == end_i8:
i8values = i8values[:-1]
dt64_values = i8values.view(f"datetime64[{unit}]")
dtype = tz_to_dtype(tz, unit=unit)
return cls._simple_new(dt64_values, freq=freq, dtype=dtype)
# -----------------------------------------------------------------
# DatetimeLike Interface
def _unbox_scalar(self, value) -> np.datetime64:
if not isinstance(value, self._scalar_type) and value is not NaT:
raise ValueError("'value' should be a Timestamp.")
self._check_compatible_with(value)
if value is NaT:
return np.datetime64(value._value, self.unit)
else:
return value.as_unit(self.unit, round_ok=False).asm8
def _scalar_from_string(self, value) -> Timestamp | NaTType:
return Timestamp(value, tz=self.tz)
def _check_compatible_with(self, other) -> None:
if other is NaT:
return
self._assert_tzawareness_compat(other)
# -----------------------------------------------------------------
# Descriptive Properties
def _box_func(self, x: np.datetime64) -> Timestamp | NaTType:
# GH#42228
value = x.view("i8")
ts = Timestamp._from_value_and_reso(value, reso=self._creso, tz=self.tz)
return ts
@property
# error: Return type "Union[dtype, DatetimeTZDtype]" of "dtype"
# incompatible with return type "ExtensionDtype" in supertype
# "ExtensionArray"
def dtype(self) -> np.dtype[np.datetime64] | DatetimeTZDtype: # type: ignore[override]
"""
The dtype for the DatetimeArray.
.. warning::
A future version of pandas will change dtype to never be a
``numpy.dtype``. Instead, :attr:`DatetimeArray.dtype` will
always be an instance of an ``ExtensionDtype`` subclass.
Returns
-------
numpy.dtype or DatetimeTZDtype
If the values are tz-naive, then ``np.dtype('datetime64[ns]')``
is returned.
If the values are tz-aware, then the ``DatetimeTZDtype``
is returned.
"""
return self._dtype
@property
def tz(self) -> tzinfo | None:
"""
Return the timezone.
Returns
-------
zoneinfo.ZoneInfo,, datetime.tzinfo, pytz.tzinfo.BaseTZInfo, dateutil.tz.tz.tzfile, or None
Returns None when the array is tz-naive.
See Also
--------
DatetimeIndex.tz_localize : Localize tz-naive DatetimeIndex to a
given time zone, or remove timezone from a tz-aware DatetimeIndex.
DatetimeIndex.tz_convert : Convert tz-aware DatetimeIndex from
one time zone to another.
Examples
--------
For Series:
>>> s = pd.Series(["1/1/2020 10:00:00+00:00", "2/1/2020 11:00:00+00:00"])
>>> s = pd.to_datetime(s)
>>> s
0 2020-01-01 10:00:00+00:00
1 2020-02-01 11:00:00+00:00
dtype: datetime64[us, UTC]
>>> s.dt.tz
datetime.timezone.utc
For DatetimeIndex:
>>> idx = pd.DatetimeIndex(
... ["1/1/2020 10:00:00+00:00", "2/1/2020 11:00:00+00:00"]
... )
>>> idx.tz
datetime.timezone.utc
""" # noqa: E501
# GH 18595
return getattr(self.dtype, "tz", None)
@tz.setter
def tz(self, value):
# GH 3746: Prevent localizing or converting the index by setting tz
raise AttributeError(
"Cannot directly set timezone. Use tz_localize() "
"or tz_convert() as appropriate"
)
@property
def tzinfo(self) -> tzinfo | None:
"""
Alias for tz attribute
"""
return self.tz
@property # NB: override with cache_readonly in immutable subclasses
def is_normalized(self) -> bool:
"""
Returns True if all of the dates are at midnight ("no time")
"""
return is_date_array_normalized(self.asi8, self.tz, reso=self._creso)
@property # NB: override with cache_readonly in immutable subclasses
def _resolution_obj(self) -> Resolution:
return get_resolution(self.asi8, self.tz, reso=self._creso)
# ----------------------------------------------------------------
# Array-Like / EA-Interface Methods
def __array__(self, dtype=None, copy=None) -> np.ndarray:
if dtype is None and self.tz:
# The default for tz-aware is object, to preserve tz info
dtype = object
return super().__array__(dtype=dtype, copy=copy)
def __iter__(self) -> Iterator:
"""
Return an iterator over the boxed values
Yields
------
tstamp : Timestamp
"""
if self.ndim > 1:
for i in range(len(self)):
yield self[i]
else:
# convert in chunks of 10k for efficiency
data = self.asi8
length = len(self)
chunksize = _ITER_CHUNKSIZE
chunks = (length // chunksize) + 1
for i in range(chunks):
start_i = i * chunksize
end_i = min((i + 1) * chunksize, length)
converted = ints_to_pydatetime(
data[start_i:end_i],
tz=self.tz,
box="timestamp",
reso=self._creso,
)
yield from converted
def astype(self, dtype, copy: bool = True):
# We handle
# --> datetime
# --> period
# DatetimeLikeArrayMixin Super handles the rest.
dtype = pandas_dtype(dtype)
if dtype == self.dtype:
if copy:
return self.copy()
return self
elif isinstance(dtype, ExtensionDtype):
if not isinstance(dtype, DatetimeTZDtype):
# e.g. Sparse[datetime64[ns]]
return super().astype(dtype, copy=copy)
elif self.tz is None:
# pre-2.0 this did self.tz_localize(dtype.tz), which did not match
# the Series behavior which did
# values.tz_localize("UTC").tz_convert(dtype.tz)
raise TypeError(
"Cannot use .astype to convert from timezone-naive dtype to "
"timezone-aware dtype. Use obj.tz_localize instead or "
"series.dt.tz_localize instead"
)
else:
# tzaware unit conversion e.g. datetime64[s, UTC]
np_dtype = np.dtype(dtype.str)
res_values = astype_overflowsafe(self._ndarray, np_dtype, copy=copy)
return type(self)._simple_new(res_values, dtype=dtype, freq=self.freq)
elif (
self.tz is None
and lib.is_np_dtype(dtype, "M")
and not is_unitless(dtype)
and is_supported_dtype(dtype)
):
# unit conversion e.g. datetime64[s]
res_values = astype_overflowsafe(self._ndarray, dtype, copy=True)
return type(self)._simple_new(res_values, dtype=res_values.dtype)
# TODO: preserve freq?
elif self.tz is not None and lib.is_np_dtype(dtype, "M"):
# pre-2.0 behavior for DTA/DTI was
# values.tz_convert("UTC").tz_localize(None), which did not match
# the Series behavior
raise TypeError(
"Cannot use .astype to convert from timezone-aware dtype to "
"timezone-naive dtype. Use obj.tz_localize(None) or "
"obj.tz_convert('UTC').tz_localize(None) instead."
)
elif (
self.tz is None
and lib.is_np_dtype(dtype, "M")
and dtype != self.dtype
and is_unitless(dtype)
):
raise TypeError(
"Casting to unit-less dtype 'datetime64' is not supported. "
"Pass e.g. 'datetime64[ns]' instead."
)
elif isinstance(dtype, PeriodDtype):
return self.to_period(freq=dtype.freq)
return dtl.DatetimeLikeArrayMixin.astype(self, dtype, copy)
# -----------------------------------------------------------------
# Rendering Methods
def _format_native_types(
self, *, na_rep: str | float = "NaT", date_format=None, **kwargs
) -> npt.NDArray[np.object_]:
if date_format is None and self._is_dates_only:
# Only dates and no timezone: provide a default format
date_format = "%Y-%m-%d"
return tslib.format_array_from_datetime(
self.asi8, tz=self.tz, format=date_format, na_rep=na_rep, reso=self._creso
)
# -----------------------------------------------------------------
# Comparison Methods
def _assert_tzawareness_compat(self, other) -> None:
# adapted from _Timestamp._assert_tzawareness_compat
other_tz = getattr(other, "tzinfo", None)
other_dtype = getattr(other, "dtype", None)
if isinstance(other_dtype, DatetimeTZDtype):
# Get tzinfo from Series dtype
other_tz = other.dtype.tz
if other is NaT:
# pd.NaT quacks both aware and naive
pass
elif self.tz is None:
if other_tz is not None:
raise TypeError(
"Cannot compare tz-naive and tz-aware datetime-like objects."
)
elif other_tz is None:
raise TypeError(
"Cannot compare tz-naive and tz-aware datetime-like objects"
)
# -----------------------------------------------------------------
# Arithmetic Methods
def _add_offset(self, offset: BaseOffset) -> Self:
assert not isinstance(offset, Tick)
if self.tz is not None:
values = self.tz_localize(None)
else:
values = self
try:
res_values = offset._apply_array(values._ndarray)
if res_values.dtype.kind == "i":
# error: Argument 1 to "view" of "ndarray" has
# incompatible type
# "dtype[datetime64[date | int | None]] | DatetimeTZDtype";
# expected "dtype[Any] | _HasDType[dtype[Any]]" [arg-type]
res_values = res_values.view(values.dtype) # type: ignore[arg-type]
except NotImplementedError:
if get_option("performance_warnings"):
warnings.warn(
"Non-vectorized DateOffset being applied to Series or "
"DatetimeIndex.",
PerformanceWarning,
stacklevel=find_stack_level(),
)
res_values = self.astype("O") + offset
result = type(self)._from_sequence(res_values, dtype=self.dtype)
else:
result = type(self)._simple_new(res_values, dtype=res_values.dtype)
if offset.normalize:
result = result.normalize()
result._freq = None
if self.tz is not None:
result = result.tz_localize(self.tz)
return result
# -----------------------------------------------------------------
# Timezone Conversion and Localization Methods
def _local_timestamps(self) -> npt.NDArray[np.int64]:
"""
Convert to an i8 (unix-like nanosecond timestamp) representation
while keeping the local timezone and not using UTC.
This is used to calculate time-of-day information as if the timestamps
were timezone-naive.
"""
if self.tz is None or timezones.is_utc(self.tz):
# Avoid the copy that would be made in tzconversion
return self.asi8
return tz_convert_from_utc(self.asi8, self.tz, reso=self._creso)
def tz_convert(self, tz) -> Self:
"""
Convert tz-aware Datetime Array/Index from one time zone to another.
Parameters
----------
tz : str, zoneinfo.ZoneInfo, pytz.timezone, dateutil.tz.tzfile, datetime.tzinfo or None
Time zone for time. Corresponding timestamps would be converted
to this time zone of the Datetime Array/Index. A `tz` of None will
convert to UTC and remove the timezone information.
Returns
-------
Array or Index
Datetme Array/Index with target `tz`.
Raises
------
TypeError
If Datetime Array/Index is tz-naive.
See Also
--------
DatetimeIndex.tz : A timezone that has a variable offset from UTC.
DatetimeIndex.tz_localize : Localize tz-naive DatetimeIndex to a
given time zone, or remove timezone from a tz-aware DatetimeIndex.
Examples
--------
With the `tz` parameter, we can change the DatetimeIndex
to other time zones:
>>> dti = pd.date_range(
... start="2014-08-01 09:00", freq="h", periods=3, tz="Europe/Berlin"
... )
>>> dti
DatetimeIndex(['2014-08-01 09:00:00+02:00',
'2014-08-01 10:00:00+02:00',
'2014-08-01 11:00:00+02:00'],
dtype='datetime64[ns, Europe/Berlin]', freq='h')
>>> dti.tz_convert("US/Central")
DatetimeIndex(['2014-08-01 02:00:00-05:00',
'2014-08-01 03:00:00-05:00',
'2014-08-01 04:00:00-05:00'],
dtype='datetime64[ns, US/Central]', freq='h')
With the ``tz=None``, we can remove the timezone (after converting
to UTC if necessary):
>>> dti = pd.date_range(
... start="2014-08-01 09:00", freq="h", periods=3, tz="Europe/Berlin"
... )
>>> dti
DatetimeIndex(['2014-08-01 09:00:00+02:00',
'2014-08-01 10:00:00+02:00',
'2014-08-01 11:00:00+02:00'],
dtype='datetime64[us, Europe/Berlin]', freq='h')
>>> dti.tz_convert(None)
DatetimeIndex(['2014-08-01 07:00:00',
'2014-08-01 08:00:00',
'2014-08-01 09:00:00'],
dtype='datetime64[us]', freq='h')
""" # noqa: E501
tz = timezones.maybe_get_tz(tz)
if self.tz is None:
# tz naive, use tz_localize
raise TypeError(
"Cannot convert tz-naive timestamps, use tz_localize to localize"
)
# No conversion since timestamps are all UTC to begin with
dtype = tz_to_dtype(tz, unit=self.unit)
new_freq = None
if isinstance(self.freq, Tick):
new_freq = self.freq
return self._simple_new(self._ndarray, dtype=dtype, freq=new_freq)
@dtl.ravel_compat
def tz_localize(
self,
tz,
ambiguous: TimeAmbiguous = "raise",
nonexistent: TimeNonexistent = "raise",
) -> Self:
"""
Localize tz-naive Datetime Array/Index to tz-aware Datetime Array/Index.
This method takes a time zone (tz) naive Datetime Array/Index object
and makes this time zone aware. It does not move the time to another
time zone.
This method can also be used to do the inverse -- to create a time
zone unaware object from an aware object. To that end, pass `tz=None`.
Parameters
----------
tz : str, zoneinfo.ZoneInfo,, pytz.timezone, dateutil.tz.tzfile, datetime.tzinfo or None
Time zone to convert timestamps to. Passing ``None`` will
remove the time zone information preserving local time.
ambiguous : 'infer', 'NaT', bool array, default 'raise'
When clocks moved backward due to DST, ambiguous times may arise.
For example in Central European Time (UTC+01), when going from
03:00 DST to 02:00 non-DST, 02:30:00 local time occurs both at
00:30:00 UTC and at 01:30:00 UTC. In such a situation, the
`ambiguous` parameter dictates how ambiguous times should be
handled.
- 'infer' will attempt to infer fall dst-transition hours based on
order
- bool-ndarray where True signifies a DST time, False signifies a
non-DST time (note that this flag is only applicable for
ambiguous times)
- 'NaT' will return NaT where there are ambiguous times
- 'raise' will raise a ValueError if there are ambiguous
times.
nonexistent : 'shift_forward', 'shift_backward, 'NaT', timedelta, \
default 'raise'
A nonexistent time does not exist in a particular timezone
where clocks moved forward due to DST.
- 'shift_forward' will shift the nonexistent time forward to the
closest existing time
- 'shift_backward' will shift the nonexistent time backward to the
closest existing time
- 'NaT' will return NaT where there are nonexistent times
- timedelta objects will shift nonexistent times by the timedelta
- 'raise' will raise a ValueError if there are
nonexistent times.
Returns
-------
Same type as self
Array/Index converted to the specified time zone.
Raises
------
TypeError
If the Datetime Array/Index is tz-aware and tz is not None.
See Also
--------
DatetimeIndex.tz_convert : Convert tz-aware DatetimeIndex from
one time zone to another.
Examples
--------
>>> tz_naive = pd.date_range('2018-03-01 09:00', periods=3)
>>> tz_naive
DatetimeIndex(['2018-03-01 09:00:00', '2018-03-02 09:00:00',
'2018-03-03 09:00:00'],
dtype='datetime64[us]', freq='D')
Localize DatetimeIndex in US/Eastern time zone:
>>> tz_aware = tz_naive.tz_localize(tz='US/Eastern')
>>> tz_aware
DatetimeIndex(['2018-03-01 09:00:00-05:00',
'2018-03-02 09:00:00-05:00',
'2018-03-03 09:00:00-05:00'],
dtype='datetime64[us, US/Eastern]', freq=None)
With the ``tz=None``, we can remove the time zone information
while keeping the local time (not converted to UTC):
>>> tz_aware.tz_localize(None)
DatetimeIndex(['2018-03-01 09:00:00', '2018-03-02 09:00:00',
'2018-03-03 09:00:00'],
dtype='datetime64[us]', freq=None)
Be careful with DST changes. When there is sequential data, pandas can
infer the DST time:
>>> s = pd.to_datetime(pd.Series(['2018-10-28 01:30:00',
... '2018-10-28 02:00:00',
... '2018-10-28 02:30:00',
... '2018-10-28 02:00:00',
... '2018-10-28 02:30:00',
... '2018-10-28 03:00:00',
... '2018-10-28 03:30:00']))
>>> s.dt.tz_localize('CET', ambiguous='infer')
0 2018-10-28 01:30:00+02:00
1 2018-10-28 02:00:00+02:00
2 2018-10-28 02:30:00+02:00
3 2018-10-28 02:00:00+01:00
4 2018-10-28 02:30:00+01:00
5 2018-10-28 03:00:00+01:00
6 2018-10-28 03:30:00+01:00
dtype: datetime64[s, CET]
In some cases, inferring the DST is impossible. In such cases, you can
pass an ndarray to the ambiguous parameter to set the DST explicitly
>>> s = pd.to_datetime(pd.Series(['2018-10-28 01:20:00',
... '2018-10-28 02:36:00',
... '2018-10-28 03:46:00']))
>>> s.dt.tz_localize('CET', ambiguous=np.array([True, True, False]))
0 2018-10-28 01:20:00+02:00
1 2018-10-28 02:36:00+02:00
2 2018-10-28 03:46:00+01:00
dtype: datetime64[s, CET]
If the DST transition causes nonexistent times, you can shift these
dates forward or backwards with a timedelta object or `'shift_forward'`
or `'shift_backwards'`.
>>> s = pd.to_datetime(pd.Series(['2015-03-29 02:30:00',
... '2015-03-29 03:30:00'], dtype="M8[ns]"))
>>> s.dt.tz_localize('Europe/Warsaw', nonexistent='shift_forward')
0 2015-03-29 03:00:00+02:00
1 2015-03-29 03:30:00+02:00
dtype: datetime64[ns, Europe/Warsaw]
>>> s.dt.tz_localize('Europe/Warsaw', nonexistent='shift_backward')
0 2015-03-29 01:59:59.999999999+01:00
1 2015-03-29 03:30:00+02:00
dtype: datetime64[ns, Europe/Warsaw]
>>> s.dt.tz_localize('Europe/Warsaw', nonexistent=pd.Timedelta('1h'))
0 2015-03-29 03:30:00+02:00
1 2015-03-29 03:30:00+02:00
dtype: datetime64[ns, Europe/Warsaw]
""" # noqa: E501
nonexistent_options = ("raise", "NaT", "shift_forward", "shift_backward")
if nonexistent not in nonexistent_options and not isinstance(
nonexistent, timedelta
):
raise ValueError(
"The nonexistent argument must be one of 'raise', "
"'NaT', 'shift_forward', 'shift_backward' or "
"a timedelta object"
)
if self.tz is not None:
if tz is None:
new_dates = tz_convert_from_utc(self.asi8, self.tz, reso=self._creso)
else:
raise TypeError("Already tz-aware, use tz_convert to convert.")
else:
tz = timezones.maybe_get_tz(tz)
# Convert to UTC
new_dates = tzconversion.tz_localize_to_utc(
self.asi8,
tz,
ambiguous=ambiguous,
nonexistent=nonexistent,
creso=self._creso,
)
new_dates_dt64 = new_dates.view(f"M8[{self.unit}]")
dtype = tz_to_dtype(tz, unit=self.unit)
freq = None
if timezones.is_utc(tz) or (len(self) == 1 and not isna(new_dates_dt64[0])):
# we can preserve freq
# TODO: Also for fixed-offsets
freq = self.freq
elif tz is None and self.tz is None:
# no-op
freq = self.freq
return self._simple_new(new_dates_dt64, dtype=dtype, freq=freq)
# ----------------------------------------------------------------
# Conversion Methods - Vectorized analogues of Timestamp methods
def to_pydatetime(self) -> npt.NDArray[np.object_]:
"""
Return an ndarray of ``datetime.datetime`` objects.
Returns
-------
numpy.ndarray
An ndarray of ``datetime.datetime`` objects.
See Also
--------
DatetimeIndex.to_julian_date : Converts Datetime Array to float64 ndarray
of Julian Dates.
Examples
--------
>>> idx = pd.date_range("2018-02-27", periods=3)
>>> idx.to_pydatetime()
array([datetime.datetime(2018, 2, 27, 0, 0),
datetime.datetime(2018, 2, 28, 0, 0),
datetime.datetime(2018, 3, 1, 0, 0)], dtype=object)
"""
return ints_to_pydatetime(self.asi8, tz=self.tz, reso=self._creso)
def normalize(self) -> Self:
"""
Convert times to midnight.
The time component of the date-time is converted to midnight i.e.
00:00:00. This is useful in cases, when the time does not matter.
Length is unaltered. The timezones are unaffected.
This method is available on Series with datetime values under
the ``.dt`` accessor, and directly on Datetime Array/Index.
Returns
-------
DatetimeArray, DatetimeIndex or Series
The same type as the original data. Series will have the same
name and index. DatetimeIndex will have the same name.
See Also
--------
floor : Floor the datetimes to the specified freq.
ceil : Ceil the datetimes to the specified freq.
round : Round the datetimes to the specified freq.
Examples
--------
>>> idx = pd.date_range(
... start="2014-08-01 10:00", freq="h", periods=3, tz="Asia/Calcutta"
... )
>>> idx
DatetimeIndex(['2014-08-01 10:00:00+05:30',
'2014-08-01 11:00:00+05:30',
'2014-08-01 12:00:00+05:30'],
dtype='datetime64[us, Asia/Calcutta]', freq='h')
>>> idx.normalize()
DatetimeIndex(['2014-08-01 00:00:00+05:30',
'2014-08-01 00:00:00+05:30',
'2014-08-01 00:00:00+05:30'],
dtype='datetime64[us, Asia/Calcutta]', freq=None)
"""
new_values = normalize_i8_timestamps(self.asi8, self.tz, reso=self._creso)
dt64_values = new_values.view(self._ndarray.dtype)
dta = type(self)._simple_new(dt64_values, dtype=dt64_values.dtype)
dta = dta._with_freq("infer")
if self.tz is not None:
dta = dta.tz_localize(self.tz)
return dta
def to_period(self, freq=None) -> PeriodArray:
"""
Cast to PeriodArray/PeriodIndex at a particular frequency.
Converts DatetimeArray/Index to PeriodArray/PeriodIndex.
Parameters
----------
freq : str or Period, optional
One of pandas' :ref:`period aliases <timeseries.period_aliases>`
or a Period object. Will be inferred by default.
Returns
-------
PeriodArray/PeriodIndex
Immutable ndarray holding ordinal values at a particular frequency.
Raises
------
ValueError
When converting a DatetimeArray/Index with non-regular values,
so that a frequency cannot be inferred.
See Also
--------
PeriodIndex: Immutable ndarray holding ordinal values.
DatetimeIndex.to_pydatetime: Return DatetimeIndex as object.
Examples
--------
>>> df = pd.DataFrame(
... {"y": [1, 2, 3]},
... index=pd.to_datetime(
... [
... "2000-03-31 00:00:00",
... "2000-05-31 00:00:00",
... "2000-08-31 00:00:00",
... ]
... ),
... )
>>> df.index.to_period("M")
PeriodIndex(['2000-03', '2000-05', '2000-08'],
dtype='period[M]')
Infer the daily frequency
>>> idx = pd.date_range("2017-01-01", periods=2)
>>> idx.to_period()
PeriodIndex(['2017-01-01', '2017-01-02'],
dtype='period[D]')
"""
from pandas.core.arrays import PeriodArray
if self.tz is not None:
warnings.warn(
"Converting to PeriodArray/Index representation "
"will drop timezone information.",
UserWarning,
stacklevel=find_stack_level(),
)
if freq is None:
freq = self.freqstr or self.inferred_freq
if isinstance(self.freq, BaseOffset) and hasattr(
self.freq, "_period_dtype_code"
):
freq = PeriodDtype(self.freq)._freqstr
if freq is None:
raise ValueError(
"You must pass a freq argument as current index has none."
)
res = get_period_alias(freq)
# https://github.com/pandas-dev/pandas/issues/33358
if res is None:
res = freq
freq = res
return PeriodArray._from_datetime64(self._ndarray, freq, tz=self.tz)
# -----------------------------------------------------------------
# Properties - Vectorized Timestamp Properties/Methods
def month_name(self, locale=None) -> npt.NDArray[np.object_]:
"""
Return the month names with specified locale.
Parameters
----------
locale : str, optional
Locale determining the language in which to return the month name.
Default is English locale (``'en_US.utf8'``). Use the command
``locale -a`` on your terminal on Unix systems to find your locale
language code.
Returns
-------
Series or Index
Series or Index of month names.
See Also
--------
DatetimeIndex.day_name : Return the day names with specified locale.
Examples
--------
>>> s = pd.Series(pd.date_range(start="2018-01", freq="ME", periods=3))
>>> s
0 2018-01-31
1 2018-02-28
2 2018-03-31
dtype: datetime64[us]
>>> s.dt.month_name()
0 January
1 February
2 March
dtype: str
>>> idx = pd.date_range(start="2018-01", freq="ME", periods=3)
>>> idx
DatetimeIndex(['2018-01-31', '2018-02-28', '2018-03-31'],
dtype='datetime64[us]', freq='ME')
>>> idx.month_name()
Index(['January', 'February', 'March'], dtype='str')
Using the ``locale`` parameter you can set a different locale language,
for example: ``idx.month_name(locale='pt_BR.utf8')`` will return month
names in Brazilian Portuguese language.
>>> idx = pd.date_range(start="2018-01", freq="ME", periods=3)
>>> idx
DatetimeIndex(['2018-01-31', '2018-02-28', '2018-03-31'],
dtype='datetime64[us]', freq='ME')
>>> idx.month_name(locale="pt_BR.utf8") # doctest: +SKIP
Index(['Janeiro', 'Fevereiro', 'Março'], dtype='str')
"""
values = self._local_timestamps()
result = fields.get_date_name_field(
values, "month_name", locale=locale, reso=self._creso
)
result = self._maybe_mask_results(result, fill_value=None)
if using_string_dtype():
from pandas import (
StringDtype,
array as pd_array,
)
return pd_array(result, dtype=StringDtype(na_value=np.nan)) # type: ignore[return-value]
return result
def day_name(self, locale=None) -> npt.NDArray[np.object_]:
"""
Return the day names with specified locale.
Parameters
----------
locale : str, optional
Locale determining the language in which to return the day name.
Default is English locale (``'en_US.utf8'``). Use the command
``locale -a`` on your terminal on Unix systems to find your locale
language code.
Returns
-------
Series or Index
Series or Index of day names.
See Also
--------
DatetimeIndex.month_name : Return the month names with specified locale.
Examples
--------
>>> s = pd.Series(pd.date_range(start="2018-01-01", freq="D", periods=3))
>>> s
0 2018-01-01
1 2018-01-02
2 2018-01-03
dtype: datetime64[us]
>>> s.dt.day_name()
0 Monday
1 Tuesday
2 Wednesday
dtype: str
>>> idx = pd.date_range(start="2018-01-01", freq="D", periods=3)
>>> idx
DatetimeIndex(['2018-01-01', '2018-01-02', '2018-01-03'],
dtype='datetime64[us]', freq='D')
>>> idx.day_name()
Index(['Monday', 'Tuesday', 'Wednesday'], dtype='str')
Using the ``locale`` parameter you can set a different locale language,
for example: ``idx.day_name(locale='pt_BR.utf8')`` will return day
names in Brazilian Portuguese language.
>>> idx = pd.date_range(start="2018-01-01", freq="D", periods=3)
>>> idx
DatetimeIndex(['2018-01-01', '2018-01-02', '2018-01-03'],
dtype='datetime64[us]', freq='D')
>>> idx.day_name(locale="pt_BR.utf8") # doctest: +SKIP
Index(['Segunda', 'Terça', 'Quarta'], dtype='str')
"""
values = self._local_timestamps()
result = fields.get_date_name_field(
values, "day_name", locale=locale, reso=self._creso
)
result = self._maybe_mask_results(result, fill_value=None)
if using_string_dtype():
# TODO: no tests that check for dtype of result as of 2024-08-15
from pandas import (
StringDtype,
array as pd_array,
)
return pd_array(result, dtype=StringDtype(na_value=np.nan)) # type: ignore[return-value]
return result
@property
def time(self) -> npt.NDArray[np.object_]:
"""
Returns numpy array of :class:`datetime.time` objects.
The time part of the Timestamps.
See Also
--------
DatetimeIndex.timetz : Returns numpy array of :class:`datetime.time`
objects with timezones. The time part of the Timestamps.
DatetimeIndex.date : Returns numpy array of python :class:`datetime.date`
objects. Namely, the date part of Timestamps without time and timezone
information.
Examples
--------
For Series:
>>> s = pd.Series(["1/1/2020 10:00:00+00:00", "2/1/2020 11:00:00+00:00"])
>>> s = pd.to_datetime(s)
>>> s
0 2020-01-01 10:00:00+00:00
1 2020-02-01 11:00:00+00:00
dtype: datetime64[us, UTC]
>>> s.dt.time
0 10:00:00
1 11:00:00
dtype: object
For DatetimeIndex:
>>> idx = pd.DatetimeIndex(
... ["1/1/2020 10:00:00+00:00", "2/1/2020 11:00:00+00:00"]
... )
>>> idx.time
array([datetime.time(10, 0), datetime.time(11, 0)], dtype=object)
"""
# If the Timestamps have a timezone that is not UTC,
# convert them into their i8 representation while
# keeping their timezone and not using UTC
timestamps = self._local_timestamps()
return ints_to_pydatetime(timestamps, box="time", reso=self._creso)
@property
def timetz(self) -> npt.NDArray[np.object_]:
"""
Returns numpy array of :class:`datetime.time` objects with timezones.
The time part of the Timestamps.
See Also
--------
DatetimeIndex.time : Returns numpy array of :class:`datetime.time` objects.
The time part of the Timestamps.
DatetimeIndex.tz : Return the timezone.
Examples
--------
For Series:
>>> s = pd.Series(["1/1/2020 10:00:00+00:00", "2/1/2020 11:00:00+00:00"])
>>> s = pd.to_datetime(s)
>>> s
0 2020-01-01 10:00:00+00:00
1 2020-02-01 11:00:00+00:00
dtype: datetime64[us, UTC]
>>> s.dt.timetz
0 10:00:00+00:00
1 11:00:00+00:00
dtype: object
For DatetimeIndex:
>>> idx = pd.DatetimeIndex(
... ["1/1/2020 10:00:00+00:00", "2/1/2020 11:00:00+00:00"]
... )
>>> idx.timetz
array([datetime.time(10, 0, tzinfo=datetime.timezone.utc),
datetime.time(11, 0, tzinfo=datetime.timezone.utc)], dtype=object)
"""
return ints_to_pydatetime(self.asi8, self.tz, box="time", reso=self._creso)
@property
def date(self) -> npt.NDArray[np.object_]:
"""
Returns numpy array of python :class:`datetime.date` objects.
Namely, the date part of Timestamps without time and
timezone information.
See Also
--------
DatetimeIndex.time : Returns numpy array of :class:`datetime.time` objects.
The time part of the Timestamps.
DatetimeIndex.year : The year of the datetime.
DatetimeIndex.month : The month as January=1, December=12.
DatetimeIndex.day : The day of the datetime.
Examples
--------
For Series:
>>> s = pd.Series(["1/1/2020 10:00:00+00:00", "2/1/2020 11:00:00+00:00"])
>>> s = pd.to_datetime(s)
>>> s
0 2020-01-01 10:00:00+00:00
1 2020-02-01 11:00:00+00:00
dtype: datetime64[us, UTC]
>>> s.dt.date
0 2020-01-01
1 2020-02-01
dtype: object
For DatetimeIndex:
>>> idx = pd.DatetimeIndex(
... ["1/1/2020 10:00:00+00:00", "2/1/2020 11:00:00+00:00"]
... )
>>> idx.date
array([datetime.date(2020, 1, 1), datetime.date(2020, 2, 1)], dtype=object)
"""
# If the Timestamps have a timezone that is not UTC,
# convert them into their i8 representation while
# keeping their timezone and not using UTC
timestamps = self._local_timestamps()
return ints_to_pydatetime(timestamps, box="date", reso=self._creso)
def isocalendar(self) -> DataFrame:
"""
Calculate year, week, and day according to the ISO 8601 standard.
Returns
-------
DataFrame
With columns year, week and day.
See Also
--------
Timestamp.isocalendar : Function return a 3-tuple containing ISO year,
week number, and weekday for the given Timestamp object.
datetime.date.isocalendar : Return a named tuple object with
three components: year, week and weekday.
Examples
--------
>>> idx = pd.date_range(start="2019-12-29", freq="D", periods=4)
>>> idx.isocalendar()
year week day
2019-12-29 2019 52 7
2019-12-30 2020 1 1
2019-12-31 2020 1 2
2020-01-01 2020 1 3
>>> idx.isocalendar().week
2019-12-29 52
2019-12-30 1
2019-12-31 1
2020-01-01 1
Freq: D, Name: week, dtype: UInt32
"""
from pandas import DataFrame
values = self._local_timestamps()
sarray = fields.build_isocalendar_sarray(values, reso=self._creso)
iso_calendar_df = DataFrame(
sarray, columns=["year", "week", "day"], dtype="UInt32"
)
if self._hasna:
iso_calendar_df.iloc[self._isnan] = None
return iso_calendar_df
year = _field_accessor(
"year",
"Y",
"""
The year of the datetime.
See Also
--------
DatetimeIndex.month: The month as January=1, December=12.
DatetimeIndex.day: The day of the datetime.
Examples
--------
>>> datetime_series = pd.Series(
... pd.date_range("2000-01-01", periods=3, freq="YE")
... )
>>> datetime_series
0 2000-12-31
1 2001-12-31
2 2002-12-31
dtype: datetime64[us]
>>> datetime_series.dt.year
0 2000
1 2001
2 2002
dtype: int32
""",
)
month = _field_accessor(
"month",
"M",
"""
The month as January=1, December=12.
See Also
--------
DatetimeIndex.year: The year of the datetime.
DatetimeIndex.day: The day of the datetime.
Examples
--------
>>> datetime_series = pd.Series(
... pd.date_range("2000-01-01", periods=3, freq="ME")
... )
>>> datetime_series
0 2000-01-31
1 2000-02-29
2 2000-03-31
dtype: datetime64[us]
>>> datetime_series.dt.month
0 1
1 2
2 3
dtype: int32
""",
)
day = _field_accessor(
"day",
"D",
"""
The day of the datetime.
See Also
--------
DatetimeIndex.year: The year of the datetime.
DatetimeIndex.month: The month as January=1, December=12.
DatetimeIndex.hour: The hours of the datetime.
Examples
--------
>>> datetime_series = pd.Series(
... pd.date_range("2000-01-01", periods=3, freq="D")
... )
>>> datetime_series
0 2000-01-01
1 2000-01-02
2 2000-01-03
dtype: datetime64[us]
>>> datetime_series.dt.day
0 1
1 2
2 3
dtype: int32
""",
)
hour = _field_accessor(
"hour",
"h",
"""
The hours of the datetime.
See Also
--------
DatetimeIndex.day: The day of the datetime.
DatetimeIndex.minute: The minutes of the datetime.
DatetimeIndex.second: The seconds of the datetime.
Examples
--------
>>> datetime_series = pd.Series(
... pd.date_range("2000-01-01", periods=3, freq="h")
... )
>>> datetime_series
0 2000-01-01 00:00:00
1 2000-01-01 01:00:00
2 2000-01-01 02:00:00
dtype: datetime64[us]
>>> datetime_series.dt.hour
0 0
1 1
2 2
dtype: int32
""",
)
minute = _field_accessor(
"minute",
"m",
"""
The minutes of the datetime.
See Also
--------
DatetimeIndex.hour: The hours of the datetime.
DatetimeIndex.second: The seconds of the datetime.
Examples
--------
>>> datetime_series = pd.Series(
... pd.date_range("2000-01-01", periods=3, freq="min")
... )
>>> datetime_series
0 2000-01-01 00:00:00
1 2000-01-01 00:01:00
2 2000-01-01 00:02:00
dtype: datetime64[us]
>>> datetime_series.dt.minute
0 0
1 1
2 2
dtype: int32
""",
)
second = _field_accessor(
"second",
"s",
"""
The seconds of the datetime.
See Also
--------
DatetimeIndex.minute: The minutes of the datetime.
DatetimeIndex.microsecond: The microseconds of the datetime.
DatetimeIndex.nanosecond: The nanoseconds of the datetime.
Examples
--------
>>> datetime_series = pd.Series(
... pd.date_range("2000-01-01", periods=3, freq="s")
... )
>>> datetime_series
0 2000-01-01 00:00:00
1 2000-01-01 00:00:01
2 2000-01-01 00:00:02
dtype: datetime64[us]
>>> datetime_series.dt.second
0 0
1 1
2 2
dtype: int32
""",
)
microsecond = _field_accessor(
"microsecond",
"us",
"""
The microseconds of the datetime.
See Also
--------
DatetimeIndex.second: The seconds of the datetime.
DatetimeIndex.nanosecond: The nanoseconds of the datetime.
Examples
--------
>>> datetime_series = pd.Series(
... pd.date_range("2000-01-01", periods=3, freq="us")
... )
>>> datetime_series
0 2000-01-01 00:00:00.000000
1 2000-01-01 00:00:00.000001
2 2000-01-01 00:00:00.000002
dtype: datetime64[us]
>>> datetime_series.dt.microsecond
0 0
1 1
2 2
dtype: int32
""",
)
nanosecond = _field_accessor(
"nanosecond",
"ns",
"""
The nanoseconds of the datetime.
See Also
--------
DatetimeIndex.second: The seconds of the datetime.
DatetimeIndex.microsecond: The microseconds of the datetime.
Examples
--------
>>> datetime_series = pd.Series(
... pd.date_range("2000-01-01", periods=3, freq="ns")
... )
>>> datetime_series
0 2000-01-01 00:00:00.000000000
1 2000-01-01 00:00:00.000000001
2 2000-01-01 00:00:00.000000002
dtype: datetime64[ns]
>>> datetime_series.dt.nanosecond
0 0
1 1
2 2
dtype: int32
""",
)
_dayofweek_doc = """
The day of the week with Monday=0, Sunday=6.
Return the day of the week. It is assumed the week starts on
Monday, which is denoted by 0 and ends on Sunday which is denoted
by 6. This method is available on both Series with datetime
values (using the `dt` accessor) or DatetimeIndex.
Returns
-------
Series or Index
Containing integers indicating the day number.
See Also
--------
Series.dt.dayofweek : Alias.
Series.dt.weekday : Alias.
Series.dt.day_name : Returns the name of the day of the week.
Examples
--------
>>> s = pd.date_range('2016-12-31', '2017-01-08', freq='D').to_series()
>>> s.dt.dayofweek
2016-12-31 5
2017-01-01 6
2017-01-02 0
2017-01-03 1
2017-01-04 2
2017-01-05 3
2017-01-06 4
2017-01-07 5
2017-01-08 6
Freq: D, dtype: int32
"""
day_of_week = _field_accessor("day_of_week", "dow", _dayofweek_doc)
dayofweek = day_of_week
weekday = day_of_week
day_of_year = _field_accessor(
"dayofyear",
"doy",
"""
The ordinal day of the year.
See Also
--------
DatetimeIndex.dayofweek : The day of the week with Monday=0, Sunday=6.
DatetimeIndex.day : The day of the datetime.
Examples
--------
For Series:
>>> s = pd.Series(["1/1/2020 10:00:00+00:00", "2/1/2020 11:00:00+00:00"])
>>> s = pd.to_datetime(s)
>>> s
0 2020-01-01 10:00:00+00:00
1 2020-02-01 11:00:00+00:00
dtype: datetime64[us, UTC]
>>> s.dt.dayofyear
0 1
1 32
dtype: int32
For DatetimeIndex:
>>> idx = pd.DatetimeIndex(["1/1/2020 10:00:00+00:00",
... "2/1/2020 11:00:00+00:00"])
>>> idx.dayofyear
Index([1, 32], dtype='int32')
""",
)
dayofyear = day_of_year
quarter = _field_accessor(
"quarter",
"q",
"""
The quarter of the date.
See Also
--------
DatetimeIndex.snap : Snap time stamps to nearest occurring frequency.
DatetimeIndex.time : Returns numpy array of datetime.time objects.
The time part of the Timestamps.
Examples
--------
For Series:
>>> s = pd.Series(["1/1/2020 10:00:00+00:00", "4/1/2020 11:00:00+00:00"])
>>> s = pd.to_datetime(s)
>>> s
0 2020-01-01 10:00:00+00:00
1 2020-04-01 11:00:00+00:00
dtype: datetime64[us, UTC]
>>> s.dt.quarter
0 1
1 2
dtype: int32
For DatetimeIndex:
>>> idx = pd.DatetimeIndex(["1/1/2020 10:00:00+00:00",
... "2/1/2020 11:00:00+00:00"])
>>> idx.quarter
Index([1, 1], dtype='int32')
""",
)
days_in_month = _field_accessor(
"days_in_month",
"dim",
"""
The number of days in the month.
See Also
--------
Series.dt.day : Return the day of the month.
Series.dt.is_month_end : Return a boolean indicating if the
date is the last day of the month.
Series.dt.is_month_start : Return a boolean indicating if the
date is the first day of the month.
Series.dt.month : Return the month as January=1 through December=12.
Examples
--------
>>> s = pd.Series(["1/1/2020 10:00:00+00:00", "2/1/2020 11:00:00+00:00"])
>>> s = pd.to_datetime(s)
>>> s
0 2020-01-01 10:00:00+00:00
1 2020-02-01 11:00:00+00:00
dtype: datetime64[us, UTC]
>>> s.dt.daysinmonth
0 31
1 29
dtype: int32
""",
)
daysinmonth = days_in_month
_is_month_doc = """
Indicates whether the date is the {first_or_last} day of the month.
Returns
-------
Series or array
For Series, returns a Series with boolean values.
For DatetimeIndex, returns a boolean array.
See Also
--------
is_month_start : Return a boolean indicating whether the date
is the first day of the month.
is_month_end : Return a boolean indicating whether the date
is the last day of the month.
Examples
--------
This method is available on Series with datetime values under
the ``.dt`` accessor, and directly on DatetimeIndex.
>>> s = pd.Series(pd.date_range("2018-02-27", periods=3))
>>> s
0 2018-02-27
1 2018-02-28
2 2018-03-01
dtype: datetime64[us]
>>> s.dt.is_month_start
0 False
1 False
2 True
dtype: bool
>>> s.dt.is_month_end
0 False
1 True
2 False
dtype: bool
>>> idx = pd.date_range("2018-02-27", periods=3)
>>> idx.is_month_start
array([False, False, True])
>>> idx.is_month_end
array([False, True, False])
"""
is_month_start = _field_accessor(
"is_month_start", "is_month_start", _is_month_doc.format(first_or_last="first")
)
is_month_end = _field_accessor(
"is_month_end", "is_month_end", _is_month_doc.format(first_or_last="last")
)
is_quarter_start = _field_accessor(
"is_quarter_start",
"is_quarter_start",
"""
Indicator for whether the date is the first day of a quarter.
Returns
-------
is_quarter_start : Series or DatetimeIndex
The same type as the original data with boolean values. Series will
have the same name and index. DatetimeIndex will have the same
name.
See Also
--------
quarter : Return the quarter of the date.
is_quarter_end : Similar property for indicating the quarter end.
Examples
--------
This method is available on Series with datetime values under
the ``.dt`` accessor, and directly on DatetimeIndex.
>>> df = pd.DataFrame({'dates': pd.date_range("2017-03-30",
... periods=4)})
>>> df.assign(quarter=df.dates.dt.quarter,
... is_quarter_start=df.dates.dt.is_quarter_start)
dates quarter is_quarter_start
0 2017-03-30 1 False
1 2017-03-31 1 False
2 2017-04-01 2 True
3 2017-04-02 2 False
>>> idx = pd.date_range('2017-03-30', periods=4)
>>> idx
DatetimeIndex(['2017-03-30', '2017-03-31', '2017-04-01', '2017-04-02'],
dtype='datetime64[us]', freq='D')
>>> idx.is_quarter_start
array([False, False, True, False])
""",
)
is_quarter_end = _field_accessor(
"is_quarter_end",
"is_quarter_end",
"""
Indicator for whether the date is the last day of a quarter.
Returns
-------
is_quarter_end : Series or DatetimeIndex
The same type as the original data with boolean values. Series will
have the same name and index. DatetimeIndex will have the same
name.
See Also
--------
quarter : Return the quarter of the date.
is_quarter_start : Similar property indicating the quarter start.
Examples
--------
This method is available on Series with datetime values under
the ``.dt`` accessor, and directly on DatetimeIndex.
>>> df = pd.DataFrame({'dates': pd.date_range("2017-03-30",
... periods=4)})
>>> df.assign(quarter=df.dates.dt.quarter,
... is_quarter_end=df.dates.dt.is_quarter_end)
dates quarter is_quarter_end
0 2017-03-30 1 False
1 2017-03-31 1 True
2 2017-04-01 2 False
3 2017-04-02 2 False
>>> idx = pd.date_range('2017-03-30', periods=4)
>>> idx
DatetimeIndex(['2017-03-30', '2017-03-31', '2017-04-01', '2017-04-02'],
dtype='datetime64[us]', freq='D')
>>> idx.is_quarter_end
array([False, True, False, False])
""",
)
is_year_start = _field_accessor(
"is_year_start",
"is_year_start",
"""
Indicate whether the date is the first day of a year.
Returns
-------
Series or DatetimeIndex
The same type as the original data with boolean values. Series will
have the same name and index. DatetimeIndex will have the same
name.
See Also
--------
is_year_end : Similar property indicating the last day of the year.
Examples
--------
This method is available on Series with datetime values under
the ``.dt`` accessor, and directly on DatetimeIndex.
>>> dates = pd.Series(pd.date_range("2017-12-30", periods=3))
>>> dates
0 2017-12-30
1 2017-12-31
2 2018-01-01
dtype: datetime64[us]
>>> dates.dt.is_year_start
0 False
1 False
2 True
dtype: bool
>>> idx = pd.date_range("2017-12-30", periods=3)
>>> idx
DatetimeIndex(['2017-12-30', '2017-12-31', '2018-01-01'],
dtype='datetime64[us]', freq='D')
>>> idx.is_year_start
array([False, False, True])
This method, when applied to Series with datetime values under
the ``.dt`` accessor, will lose information about Business offsets.
>>> dates = pd.Series(pd.date_range("2020-10-30", periods=4, freq="BYS"))
>>> dates
0 2021-01-01
1 2022-01-03
2 2023-01-02
3 2024-01-01
dtype: datetime64[us]
>>> dates.dt.is_year_start
0 True
1 False
2 False
3 True
dtype: bool
>>> idx = pd.date_range("2020-10-30", periods=4, freq="BYS")
>>> idx
DatetimeIndex(['2021-01-01', '2022-01-03', '2023-01-02', '2024-01-01'],
dtype='datetime64[us]', freq='BYS-JAN')
>>> idx.is_year_start
array([ True, True, True, True])
""",
)
is_year_end = _field_accessor(
"is_year_end",
"is_year_end",
"""
Indicate whether the date is the last day of the year.
Returns
-------
Series or DatetimeIndex
The same type as the original data with boolean values. Series will
have the same name and index. DatetimeIndex will have the same
name.
See Also
--------
is_year_start : Similar property indicating the start of the year.
Examples
--------
This method is available on Series with datetime values under
the ``.dt`` accessor, and directly on DatetimeIndex.
>>> dates = pd.Series(pd.date_range("2017-12-30", periods=3))
>>> dates
0 2017-12-30
1 2017-12-31
2 2018-01-01
dtype: datetime64[us]
>>> dates.dt.is_year_end
0 False
1 True
2 False
dtype: bool
>>> idx = pd.date_range("2017-12-30", periods=3)
>>> idx
DatetimeIndex(['2017-12-30', '2017-12-31', '2018-01-01'],
dtype='datetime64[us]', freq='D')
>>> idx.is_year_end
array([False, True, False])
""",
)
is_leap_year = _field_accessor(
"is_leap_year",
"is_leap_year",
"""
Boolean indicator if the date belongs to a leap year.
A leap year is a year, which has 366 days (instead of 365) including
29th of February as an intercalary day.
Leap years are years which are multiples of four with the exception
of years divisible by 100 but not by 400.
Returns
-------
Series or ndarray
Booleans indicating if dates belong to a leap year.
See Also
--------
DatetimeIndex.is_year_end : Indicate whether the date is the
last day of the year.
DatetimeIndex.is_year_start : Indicate whether the date is the first
day of a year.
Examples
--------
This method is available on Series with datetime values under
the ``.dt`` accessor, and directly on DatetimeIndex.
>>> idx = pd.date_range("2012-01-01", "2015-01-01", freq="YE")
>>> idx
DatetimeIndex(['2012-12-31', '2013-12-31', '2014-12-31'],
dtype='datetime64[us]', freq='YE-DEC')
>>> idx.is_leap_year
array([ True, False, False])
>>> dates_series = pd.Series(idx)
>>> dates_series
0 2012-12-31
1 2013-12-31
2 2014-12-31
dtype: datetime64[us]
>>> dates_series.dt.is_leap_year
0 True
1 False
2 False
dtype: bool
""",
)
def to_julian_date(self) -> npt.NDArray[np.float64]:
"""
Convert TimeStamp to a Julian Date.
This method returns the number of days as a float since noon January 1, 4713 BC.
https://en.wikipedia.org/wiki/Julian_day
Returns
-------
ndarray or Index
Float values that represent each date in Julian Calendar.
See Also
--------
Timestamp.to_julian_date : Equivalent method on ``Timestamp`` objects.
Examples
--------
>>> idx = pd.DatetimeIndex(["2028-08-12 00:54", "2028-08-12 02:06"])
>>> idx.to_julian_date()
Index([2461995.5375, 2461995.5875], dtype='float64')
"""
# http://mysite.verizon.net/aesir_research/date/jdalg2.htm
year = np.asarray(self.year)
month = np.asarray(self.month)
day = np.asarray(self.day)
testarr = month < 3
year[testarr] -= 1
month[testarr] += 12
return (
day
+ np.trunc((153 * month - 457) / 5)
+ 365 * year
+ np.floor(year / 4)
- np.floor(year / 100)
+ np.floor(year / 400)
+ 1_721_118.5
+ (
self.hour
+ self.minute / 60
+ self.second / 3600
+ self.microsecond / 3600 / 10**6
+ self.nanosecond / 3600 / 10**9
)
/ 24
)
# -----------------------------------------------------------------
# Reductions
def _reduce(
self, name: str, *, skipna: bool = True, keepdims: bool = False, **kwargs
):
result = super()._reduce(name, skipna=skipna, keepdims=keepdims, **kwargs)
if keepdims and isinstance(result, np.ndarray):
if name == "std":
from pandas.core.arrays import TimedeltaArray
return TimedeltaArray._from_sequence(result)
else:
return self._from_sequence(result, dtype=self.dtype)
return result
def std(
self,
axis=None,
dtype=None,
out=None,
ddof: int = 1,
keepdims: bool = False,
skipna: bool = True,
) -> Timedelta:
"""
Return sample standard deviation over requested axis.
Normalized by `N-1` by default. This can be changed using ``ddof``.
Parameters
----------
axis : int, optional
Axis for the function to be applied on. For :class:`pandas.Series`
this parameter is unused and defaults to ``None``.
dtype : dtype, optional, default None
Type to use in computing the standard deviation. For arrays of
integer type the default is float64, for arrays of float types
it is the same as the array type.
out : ndarray, optional, default None
Alternative output array in which to place the result. It must have
the same shape as the expected output but the type (of the
calculated values) will be cast if necessary.
ddof : int, default 1
Degrees of Freedom. The divisor used in calculations is `N - ddof`,
where `N` represents the number of elements.
keepdims : bool, optional
If this is set to True, the axes which are reduced are left in the
result as dimensions with size one. With this option, the result
will broadcast correctly against the input array. If the default
value is passed, then keepdims will not be passed through to the
std method of sub-classes of ndarray, however any non-default value
will be. If the sub-class method does not implement keepdims any
exceptions will be raised.
skipna : bool, default True
Exclude NA/null values. If an entire row/column is ``NA``, the result
will be ``NA``.
Returns
-------
Timedelta
Standard deviation over requested axis.
See Also
--------
numpy.ndarray.std : Returns the standard deviation of the array elements
along given axis.
Series.std : Return sample standard deviation over requested axis.
Examples
--------
For :class:`pandas.DatetimeIndex`:
>>> idx = pd.date_range("2001-01-01 00:00", periods=3)
>>> idx
DatetimeIndex(['2001-01-01', '2001-01-02', '2001-01-03'],
dtype='datetime64[us]', freq='D')
>>> idx.std()
Timedelta('1 days 00:00:00')
"""
# Because std is translation-invariant, we can get self.std
# by calculating (self - Timestamp(0)).std, and we can do it
# without creating a copy by using a view on self._ndarray
from pandas.core.arrays import TimedeltaArray
# Find the td64 dtype with the same resolution as our dt64 dtype
dtype_str = self._ndarray.dtype.name.replace("datetime64", "timedelta64")
dtype = np.dtype(dtype_str)
tda = TimedeltaArray._simple_new(self._ndarray.view(dtype), dtype=dtype)
return tda.std(axis=axis, out=out, ddof=ddof, keepdims=keepdims, skipna=skipna)
# -------------------------------------------------------------------
# Constructor Helpers
def _sequence_to_dt64(
data: ArrayLike,
*,
copy: bool = False,
tz: tzinfo | None = None,
dayfirst: bool = False,
yearfirst: bool = False,
ambiguous: TimeAmbiguous = "raise",
out_unit: str | None = None,
) -> tuple[np.ndarray, tzinfo | None]:
"""
Parameters
----------
data : np.ndarray or ExtensionArray
dtl.ensure_arraylike_for_datetimelike has already been called.
copy : bool, default False
tz : tzinfo or None, default None
dayfirst : bool, default False
yearfirst : bool, default False
ambiguous : str, bool, or arraylike, default 'raise'
See pandas._libs.tslibs.tzconversion.tz_localize_to_utc.
out_unit : str or None, default None
Desired output resolution.
Returns
-------
result : numpy.ndarray
The sequence converted to a numpy array with dtype ``datetime64[unit]``.
Where `unit` is "ns" unless specified otherwise by `out_unit`.
tz : tzinfo or None
Either the user-provided tzinfo or one inferred from the data.
Raises
------
TypeError : PeriodDType data is passed
"""
# By this point we are assured to have either a numpy array or Index
data, copy = maybe_convert_dtype(data, copy, tz=tz)
data_dtype = getattr(data, "dtype", None)
out_dtype = DT64NS_DTYPE
if out_unit is not None:
out_dtype = np.dtype(f"M8[{out_unit}]")
if data_dtype == object or is_string_dtype(data_dtype):
# TODO: We do not have tests specific to string-dtypes,
# also complex or categorical or other extension
data = cast(np.ndarray, data)
copy = False
if lib.infer_dtype(data, skipna=False) == "integer":
# Much more performant than going through array_to_datetime
data = data.astype(np.int64)
elif tz is not None and ambiguous == "raise":
obj_data = np.asarray(data, dtype=object)
result = tslib.array_to_datetime_with_tz(
obj_data,
tz=tz,
dayfirst=dayfirst,
yearfirst=yearfirst,
creso=abbrev_to_npy_unit(out_unit),
)
return result, tz
else:
converted, inferred_tz = objects_to_datetime64(
data,
dayfirst=dayfirst,
yearfirst=yearfirst,
allow_object=False,
out_unit=out_unit,
)
copy = False
if tz and inferred_tz:
# two timezones: convert to intended from base UTC repr
# GH#42505 by convention, these are _already_ UTC
result = converted
elif inferred_tz:
tz = inferred_tz
result = converted
else:
result, _ = _construct_from_dt64_naive(
converted, tz=tz, copy=copy, ambiguous=ambiguous
)
return result, tz
data_dtype = data.dtype
# `data` may have originally been a Categorical[datetime64[ns, tz]],
# so we need to handle these types.
if isinstance(data_dtype, DatetimeTZDtype):
# DatetimeArray -> ndarray
data = cast(DatetimeArray, data)
tz = _maybe_infer_tz(tz, data.tz)
result = data._ndarray
elif lib.is_np_dtype(data_dtype, "M"):
# tz-naive DatetimeArray or ndarray[datetime64]
if isinstance(data, DatetimeArray):
data = data._ndarray
data = cast(np.ndarray, data)
result, copy = _construct_from_dt64_naive(
data, tz=tz, copy=copy, ambiguous=ambiguous
)
else:
# must be integer dtype otherwise
# assume this data are epoch timestamps
if data.dtype != INT64_DTYPE:
data = data.astype(np.int64, copy=False)
copy = False
data = cast(np.ndarray, data)
result = data.view(out_dtype)
if copy:
result = result.copy()
assert isinstance(result, np.ndarray), type(result)
assert result.dtype.kind == "M"
assert result.dtype != "M8"
assert is_supported_dtype(result.dtype)
return result, tz
def _construct_from_dt64_naive(
data: np.ndarray, *, tz: tzinfo | None, copy: bool, ambiguous: TimeAmbiguous
) -> tuple[np.ndarray, bool]:
"""
Convert datetime64 data to a supported dtype, localizing if necessary.
"""
# Caller is responsible for ensuring
# lib.is_np_dtype(data.dtype)
new_dtype = data.dtype
if not is_supported_dtype(new_dtype):
# Cast to the nearest supported unit, generally "s"
new_dtype = get_supported_dtype(new_dtype)
data = astype_overflowsafe(data, dtype=new_dtype, copy=False)
copy = False
if data.dtype.byteorder == ">":
# TODO: better way to handle this? non-copying alternative?
# without this, test_constructor_datetime64_bigendian fails
data = data.astype(data.dtype.newbyteorder("<"))
new_dtype = data.dtype
copy = False
if tz is not None:
# Convert tz-naive to UTC
# TODO: if tz is UTC, are there situations where we *don't* want a
# copy? tz_localize_to_utc always makes one.
shape = data.shape
if data.ndim > 1:
data = data.ravel()
data_unit = get_unit_from_dtype(new_dtype)
data = tzconversion.tz_localize_to_utc(
data.view("i8"), tz, ambiguous=ambiguous, creso=data_unit
)
data = data.view(new_dtype)
data = data.reshape(shape)
assert data.dtype == new_dtype, data.dtype
result = data
return result, copy
def objects_to_datetime64(
data: np.ndarray,
dayfirst,
yearfirst,
utc: bool = False,
errors: DateTimeErrorChoices = "raise",
allow_object: bool = False,
out_unit: str | None = None,
) -> tuple[np.ndarray, tzinfo | None]:
"""
Convert data to array of timestamps.
Parameters
----------
data : np.ndarray[object]
dayfirst : bool
yearfirst : bool
utc : bool, default False
Whether to convert/localize timestamps to UTC.
errors : {'raise', 'coerce'}
allow_object : bool
Whether to return an object-dtype ndarray instead of raising if the
data contains more than one timezone.
out_unit : str or None, default None
None indicates we should do resolution inference.
Returns
-------
result : ndarray
np.datetime64[out_unit] if returned values represent wall times or UTC
timestamps.
object if mixed timezones
inferred_tz : tzinfo or None
If not None, then the datetime64 values in `result` denote UTC timestamps.
Raises
------
ValueError : if data cannot be converted to datetimes
TypeError : When a type cannot be converted to datetime
"""
assert errors in ["raise", "coerce"]
# if str-dtype, convert
data = np.asarray(data, dtype=np.object_)
result, tz_parsed = tslib.array_to_datetime(
data,
errors=errors,
utc=utc,
dayfirst=dayfirst,
yearfirst=yearfirst,
creso=abbrev_to_npy_unit(out_unit),
)
if tz_parsed is not None:
# We can take a shortcut since the datetime64 numpy array
# is in UTC
return result, tz_parsed
elif result.dtype.kind == "M":
return result, tz_parsed
elif result.dtype == object:
# GH#23675 when called via `pd.to_datetime`, returning an object-dtype
# array is allowed. When called via `pd.DatetimeIndex`, we can
# only accept datetime64 dtype, so raise TypeError if object-dtype
# is returned, as that indicates the values can be recognized as
# datetimes but they have conflicting timezones/awareness
if allow_object:
return result, tz_parsed
raise TypeError("DatetimeIndex has mixed timezones")
else: # pragma: no cover
# GH#23675 this TypeError should never be hit, whereas the TypeError
# in the object-dtype branch above is reachable.
raise TypeError(result)
def maybe_convert_dtype(data, copy: bool, tz: tzinfo | None = None):
"""
Convert data based on dtype conventions, issuing
errors where appropriate.
Parameters
----------
data : np.ndarray or pd.Index
copy : bool
tz : tzinfo or None, default None
Returns
-------
data : np.ndarray or pd.Index
copy : bool
Raises
------
TypeError : PeriodDType data is passed
"""
if not hasattr(data, "dtype"):
# e.g. collections.deque
return data, copy
if is_float_dtype(data.dtype):
# pre-2.0 we treated these as wall-times, inconsistent with ints
# GH#23675, GH#45573 deprecated to treat symmetrically with integer dtypes.
# Note: data.astype(np.int64) fails ARM tests, see
# https://github.com/pandas-dev/pandas/issues/49468.
data = data.astype(DT64NS_DTYPE).view("i8")
copy = False
elif lib.is_np_dtype(data.dtype, "m") or is_bool_dtype(data.dtype):
# GH#29794 enforcing deprecation introduced in GH#23539
raise TypeError(f"dtype {data.dtype} cannot be converted to datetime64[ns]")
elif isinstance(data.dtype, PeriodDtype):
# Note: without explicitly raising here, PeriodIndex
# test_setops.test_join_does_not_recur fails
raise TypeError(
"Passing PeriodDtype data is invalid. Use `data.to_timestamp()` instead"
)
elif isinstance(data.dtype, ExtensionDtype) and not isinstance(
data.dtype, DatetimeTZDtype
):
# TODO: We have no tests for these
data = np.array(data, dtype=np.object_)
copy = False
return data, copy
# -------------------------------------------------------------------
# Validation and Inference
def _maybe_infer_tz(tz: tzinfo | None, inferred_tz: tzinfo | None) -> tzinfo | None:
"""
If a timezone is inferred from data, check that it is compatible with
the user-provided timezone, if any.
Parameters
----------
tz : tzinfo or None
inferred_tz : tzinfo or None
Returns
-------
tz : tzinfo or None
Raises
------
TypeError : if both timezones are present but do not match
"""
if tz is None:
tz = inferred_tz
elif inferred_tz is None:
pass
elif not timezones.tz_compare(tz, inferred_tz):
raise TypeError(
f"data is already tz-aware {inferred_tz}, unable to set specified tz: {tz}"
)
return tz
def _validate_dt64_dtype(dtype):
"""
Check that a dtype, if passed, represents either a numpy datetime64[ns]
dtype or a pandas DatetimeTZDtype.
Parameters
----------
dtype : object
Returns
-------
dtype : None, numpy.dtype, or DatetimeTZDtype
Raises
------
ValueError : invalid dtype
Notes
-----
Unlike _validate_tz_from_dtype, this does _not_ allow non-existent
tz errors to go through
"""
if dtype is not None:
dtype = pandas_dtype(dtype)
if dtype == np.dtype("M8"):
# no precision, disallowed GH#24806
msg = (
"Passing in 'datetime64' dtype with no precision is not allowed. "
"Please pass in 'datetime64[ns]' instead."
)
raise ValueError(msg)
if (
isinstance(dtype, np.dtype)
and (dtype.kind != "M" or not is_supported_dtype(dtype))
) or not isinstance(dtype, (np.dtype, DatetimeTZDtype)):
raise ValueError(
f"Unexpected value for 'dtype': '{dtype}'. "
"Must be 'datetime64[s]', 'datetime64[ms]', 'datetime64[us]', "
"'datetime64[ns]' or DatetimeTZDtype'."
)
if getattr(dtype, "tz", None):
# https://github.com/pandas-dev/pandas/issues/18595
# Ensure that we have a standard timezone for pytz objects.
# Without this, things like adding an array of timedeltas and
# a tz-aware Timestamp (with a tz specific to its datetime) will
# be incorrect(ish?) for the array as a whole
dtype = cast(DatetimeTZDtype, dtype)
dtype = DatetimeTZDtype(
unit=dtype.unit, tz=timezones.tz_standardize(dtype.tz)
)
return dtype
def _validate_tz_from_dtype(
dtype, tz: tzinfo | None, explicit_tz_none: bool = False
) -> tzinfo | None:
"""
If the given dtype is a DatetimeTZDtype, extract the implied
tzinfo object from it and check that it does not conflict with the given
tz.
Parameters
----------
dtype : dtype, str
tz : None, tzinfo
explicit_tz_none : bool, default False
Whether tz=None was passed explicitly, as opposed to lib.no_default.
Returns
-------
tz : consensus tzinfo
Raises
------
ValueError : on tzinfo mismatch
"""
if dtype is not None:
if isinstance(dtype, str):
try:
dtype = DatetimeTZDtype.construct_from_string(dtype)
except TypeError:
# Things like `datetime64[ns]`, which is OK for the
# constructors, but also nonsense, which should be validated
# but not by us. We *do* allow non-existent tz errors to
# go through
pass
dtz = getattr(dtype, "tz", None)
if dtz is not None:
if tz is not None and not timezones.tz_compare(tz, dtz):
raise ValueError("cannot supply both a tz and a dtype with a tz")
if explicit_tz_none:
raise ValueError("Cannot pass both a timezone-aware dtype and tz=None")
tz = dtz
if tz is not None and lib.is_np_dtype(dtype, "M"):
# We also need to check for the case where the user passed a
# tz-naive dtype (i.e. datetime64[ns])
if tz is not None and not timezones.tz_compare(tz, dtz):
raise ValueError(
"cannot supply both a tz and a "
"timezone-naive dtype (i.e. datetime64[ns])"
)
return tz
def _infer_tz_from_endpoints(
start: Timestamp, end: Timestamp, tz: tzinfo | None
) -> tzinfo | None:
"""
If a timezone is not explicitly given via `tz`, see if one can
be inferred from the `start` and `end` endpoints. If more than one
of these inputs provides a timezone, require that they all agree.
Parameters
----------
start : Timestamp
end : Timestamp
tz : tzinfo or None
Returns
-------
tz : tzinfo or None
Raises
------
TypeError : if start and end timezones do not agree
"""
try:
inferred_tz = timezones.infer_tzinfo(start, end)
except AssertionError as err:
# infer_tzinfo raises AssertionError if passed mismatched timezones
raise TypeError(
"Start and end cannot both be tz-aware with different timezones"
) from err
inferred_tz = timezones.maybe_get_tz(inferred_tz)
tz = timezones.maybe_get_tz(tz)
if tz is not None and inferred_tz is not None:
if not timezones.tz_compare(inferred_tz, tz):
raise AssertionError("Inferred time zone not equal to passed time zone")
elif inferred_tz is not None:
tz = inferred_tz
return tz
def _maybe_normalize_endpoints(
start: _TimestampNoneT1, end: _TimestampNoneT2, normalize: bool
) -> tuple[_TimestampNoneT1, _TimestampNoneT2]:
if normalize:
if start is not None:
start = start.normalize()
if end is not None:
end = end.normalize()
return start, end
def _maybe_localize_point(
ts: Timestamp | None, freq, tz, ambiguous, nonexistent
) -> Timestamp | None:
"""
Localize a start or end Timestamp to the timezone of the corresponding
start or end Timestamp
Parameters
----------
ts : start or end Timestamp to potentially localize
freq : Tick, DateOffset, or None
tz : str, timezone object or None
ambiguous: str, localization behavior for ambiguous times
nonexistent: str, localization behavior for nonexistent times
Returns
-------
ts : Timestamp
"""
# Make sure start and end are timezone localized if:
# 1) freq = a Timedelta-like frequency (Tick)
# 2) freq = None i.e. generating a linspaced range
if ts is not None and ts.tzinfo is None:
# Note: We can't ambiguous='infer' a singular ambiguous time; however,
# we have historically defaulted ambiguous=False
ambiguous = ambiguous if ambiguous != "infer" else False
localize_args = {"ambiguous": ambiguous, "nonexistent": nonexistent, "tz": None}
if isinstance(freq, Tick) or freq is None:
localize_args["tz"] = tz
ts = ts.tz_localize(**localize_args)
return ts
def _generate_range(
start: Timestamp | None,
end: Timestamp | None,
periods: int | None,
offset: BaseOffset,
*,
unit: TimeUnit,
) -> Generator[Timestamp]:
"""
Generates a sequence of dates corresponding to the specified time
offset. Similar to dateutil.rrule except uses pandas DateOffset
objects to represent time increments.
Parameters
----------
start : Timestamp or None
end : Timestamp or None
periods : int or None
offset : DateOffset
unit : str
Notes
-----
* This method is faster for generating weekdays than dateutil.rrule
* At least two of (start, end, periods) must be specified.
* If both start and end are specified, the returned dates will
satisfy start <= date <= end.
Returns
-------
dates : generator object
"""
offset = to_offset(offset)
# Argument 1 to "Timestamp" has incompatible type "Optional[Timestamp]";
# expected "Union[integer[Any], float, str, date, datetime64]"
start = Timestamp(start) # type: ignore[arg-type]
if start is not NaT:
start = start.as_unit(unit)
else:
start = None
# Argument 1 to "Timestamp" has incompatible type "Optional[Timestamp]";
# expected "Union[integer[Any], float, str, date, datetime64]"
end = Timestamp(end) # type: ignore[arg-type]
if end is not NaT:
end = end.as_unit(unit)
else:
end = None
if start and not offset.is_on_offset(start):
# Incompatible types in assignment (expression has type "datetime",
# variable has type "Optional[Timestamp]")
# GH #56147 account for negative direction and range bounds
if offset.n >= 0:
start = offset.rollforward(start) # type: ignore[assignment]
else:
start = offset.rollback(start) # type: ignore[assignment]
# Unsupported operand types for < ("Timestamp" and "None")
if periods is None and end < start and offset.n >= 0: # type: ignore[operator]
end = None
periods = 0
if end is None:
# error: No overload variant of "__radd__" of "BaseOffset" matches
# argument type "None"
end = start + (periods - 1) * offset # type: ignore[operator]
if start is None:
# error: No overload variant of "__radd__" of "BaseOffset" matches
# argument type "None"
start = end - (periods - 1) * offset # type: ignore[operator]
start = cast(Timestamp, start)
end = cast(Timestamp, end)
cur = start
if offset.n >= 0:
while cur <= end:
yield cur
if cur == end:
# GH#24252 avoid overflows by not performing the addition
# in offset.apply unless we have to
break
# faster than cur + offset
next_date = offset._apply(cur)
next_date = next_date.as_unit(unit)
if next_date <= cur:
raise ValueError(f"Offset {offset} did not increment date")
cur = next_date
else:
while cur >= end:
yield cur
if cur == end:
# GH#24252 avoid overflows by not performing the addition
# in offset.apply unless we have to
break
# faster than cur + offset
next_date = offset._apply(cur)
next_date = next_date.as_unit(unit)
if next_date >= cur:
raise ValueError(f"Offset {offset} did not decrement date")
cur = next_date
| DatetimeArray |
python | catalyst-team__catalyst | catalyst/settings.py | {
"start": 3501,
"end": 10370
} | class ____(FrozenClass):
"""Catalyst settings."""
def __init__( # noqa: D107
self,
# [subpackages]
cv_required: Optional[bool] = None,
ml_required: Optional[bool] = None,
# [integrations]
optuna_required: Optional[bool] = None,
# [dl-extras]
onnx_required: Optional[bool] = None,
pruning_required: Optional[bool] = None,
quantization_required: Optional[bool] = None,
# [logging]
comet_required: Optional[bool] = None,
mlflow_required: Optional[bool] = None,
neptune_required: Optional[bool] = None,
wandb_required: Optional[bool] = None,
# [extras]
use_lz4: Optional[bool] = None,
use_pyarrow: Optional[bool] = None,
use_libjpeg_turbo: Optional[bool] = None,
log_batch_metrics: Optional[bool] = None,
log_epoch_metrics: Optional[bool] = None,
compute_per_class_metrics: Optional[bool] = None,
# [versions]
is_torch_1_7_0: Optional[bool] = None,
):
# True – use the package
# None – use the package if available
# False - block the package
# [subpackages]
self.cv_required: bool = _get_optional_value(
cv_required,
_is_cv_available,
"catalyst[cv] is not available, "
"to install it, run `pip install catalyst[cv]`.",
)
self.ml_required: bool = _get_optional_value(
ml_required,
_is_ml_available,
"catalyst[ml] is not available, "
"to install it, run `pip install catalyst[ml]`.",
)
# [integrations]
self.optuna_required: bool = _get_optional_value(
optuna_required,
_is_optuna_available,
"catalyst[optuna] is not available, "
"to install it, run `pip install catalyst[optuna]`.",
)
# [engines]
self.amp_required: bool = _get_optional_value(
None,
_is_amp_available,
"catalyst[amp] is not available, "
"to install it, run `pip install catalyst[amp]`.",
)
self.apex_required: bool = _get_optional_value(
None,
_is_apex_avalilable,
"catalyst[apex] is not available, "
"to install it, run `pip install catalyst[apex]`.",
)
self.xla_required: bool = _get_optional_value(
None,
_is_xla_available,
"catalyst[xla] is not available, "
"to install it, run `pip install catalyst[xla]`.",
)
self.fairscale_required: bool = _get_optional_value(
None,
_is_fairscale_available,
"catalyst[fairscale] is not available, "
"to install it, run `pip install catalyst[fairscale]`.",
)
self.deepspeed_required: bool = _get_optional_value(
None,
_is_deepspeed_available,
"catalyst[deepspeed] is not available, "
"to install it, run `pip install catalyst[deepspeed]`.",
)
# [dl-extras]
self.onnx_required: bool = _get_optional_value(
onnx_required,
_is_onnx_available,
"catalyst[onnx] is not available, to install it, "
"run `pip install catalyst[onnx]` or `pip install catalyst[onnx-gpu]`.",
)
self.pruning_required: bool = _get_optional_value(
pruning_required,
_is_pruning_available,
"catalyst[pruning] is not available, to install it, "
"run `pip install catalyst[pruning]`.",
)
self.quantization_required: bool = _get_optional_value(
quantization_required,
_is_quantization_available,
"catalyst[quantization] is not available, to install it, "
"run `pip install catalyst[quantization]`.",
)
# [logging]
# self.alchemy_required: bool = alchemy_required
self.comet_required: bool = _get_optional_value(
comet_required,
_is_comet_available,
"comet is not available, to install, run 'pip install comet_ml'.",
)
self.mlflow_required: bool = _get_optional_value(
mlflow_required,
_is_mlflow_available,
"catalyst[mlflow] is not available, to install it, "
"run `pip install catalyst[mlflow]`.",
)
self.neptune_required: bool = _get_optional_value(
neptune_required,
_is_neptune_available,
"neptune is not available, to install it, run `pip install neptune-client`.",
)
self.wandb_required: bool = _get_optional_value(
wandb_required,
_is_wandb_available,
"wandb is not available, to install it, " "run `pip install wandb`.",
)
# [extras]
self.yaml_required: bool = _get_optional_value(
None,
_is_yaml_available,
"yaml is not available, to install it, " "run `pip install PyYAML>=5.1`.",
)
self.use_lz4: bool = use_lz4 or os.environ.get("CATALYST_USE_LZ4", "0") == "1"
self.use_pyarrow: bool = (
use_pyarrow or os.environ.get("CATALYST_USE_PYARROW", "0") == "1"
)
self.use_libjpeg_turbo: bool = (
use_libjpeg_turbo or os.environ.get("CATALYST_USE_LIBJPEG_TURBO", "0") == "1"
)
self.log_batch_metrics: bool = (
log_batch_metrics or os.environ.get("CATALYST_LOG_BATCH_METRICS", "0") == "1"
)
self.log_epoch_metrics: bool = (
log_epoch_metrics or os.environ.get("CATALYST_LOG_EPOCH_METRICS", "1") == "1"
)
self.compute_per_class_metrics: bool = (
compute_per_class_metrics
or os.environ.get("CATALYST_COMPUTE_PER_CLASS_METRICS", "0") == "1"
)
# [versions]
self.is_torch_1_7_0: bool = _get_optional_value(
is_torch_1_7_0, _is_torch_1_7_0, "upgrade to torch >= 1.7.0."
)
@staticmethod
def _optional_value(value, default):
return value if value is not None else default
@staticmethod
def parse() -> "Settings":
"""Parse and return the settings.
Returns:
Settings: Dictionary of the parsed and merged Settings.
"""
kwargrs = MergedConfigParser(ConfigFileFinder("catalyst")).parse()
return Settings(**kwargrs)
def type_hint(self, key: str):
"""Returns type hint for the specified ``key``.
Args:
key: key of interest
Returns:
type hint for the specified key
"""
# return get_type_hints(self).get(key, None)
return type(getattr(self, key, None))
DEFAULT_SETTINGS = Settings()
| Settings |
python | ipython__ipython | IPython/lib/pretty.py | {
"start": 17415,
"end": 18664
} | class ____:
""" Object which emits a line-wrapped call expression in the form `__name(*args, **kwargs)` """
def __init__(__self, __name, *args, **kwargs):
# dunders are to avoid clashes with kwargs, as python's name managing
# will kick in.
self = __self
self.name = __name
self.args = args
self.kwargs = kwargs
@classmethod
def factory(cls, name):
def inner(*args, **kwargs):
return cls(name, *args, **kwargs)
return inner
def _repr_pretty_(self, p, cycle):
# dunders are to avoid clashes with kwargs, as python's name managing
# will kick in.
started = False
def new_item():
nonlocal started
if started:
p.text(",")
p.breakable()
started = True
prefix = self.name + "("
with p.group(len(prefix), prefix, ")"):
for arg in self.args:
new_item()
p.pretty(arg)
for arg_name, arg in self.kwargs.items():
new_item()
arg_prefix = arg_name + "="
with p.group(len(arg_prefix), arg_prefix):
p.pretty(arg)
| CallExpression |
python | pandas-dev__pandas | asv_bench/benchmarks/reshape.py | {
"start": 1120,
"end": 1514
} | class ____:
def setup(self):
arrays = [np.arange(100).repeat(100), np.roll(np.tile(np.arange(100), 100), 25)]
index = MultiIndex.from_arrays(arrays)
self.df = DataFrame(np.random.randn(10000, 4), index=index)
self.udf = self.df.unstack(1)
def time_stack(self):
self.udf.stack()
def time_unstack(self):
self.df.unstack(1)
| SimpleReshape |
python | dagster-io__dagster | examples/docs_snippets/docs_snippets/integrations/looker/customize_upstream_dependencies.py | {
"start": 409,
"end": 1292
} | class ____(DagsterLookerApiTranslator):
def get_asset_spec(
self, looker_structure: LookerApiTranslatorStructureData
) -> dg.AssetSpec:
# We create the default asset spec using super()
default_spec = super().get_asset_spec(looker_structure)
# We customize upstream dependencies for the Looker view named `my_looker_view`
return default_spec.replace_attributes(
deps=["my_upstream_asset"]
if looker_structure.structure_type == LookerStructureType.VIEW
and looker_structure.data.view_name == "my_looker_view"
else ...
)
looker_specs = load_looker_asset_specs(
looker_resource, dagster_looker_translator=CustomDagsterLookerApiTranslator()
)
# end_upstream_asset
defs = dg.Definitions(assets=[*looker_specs], resources={"looker": looker_resource})
| CustomDagsterLookerApiTranslator |
python | airbytehq__airbyte | airbyte-ci/connectors/metadata_service/lib/metadata_service/registry.py | {
"start": 1393,
"end": 14730
} | class ____(json.JSONDecoder):
"""A JSON decoder that converts "null" strings to None."""
def __init__(self, *args, **kwargs):
super().__init__(object_hook=self.object_hook, *args, **kwargs)
def object_hook(self, obj):
return {k: (None if v == "null" else v) for k, v in obj.items()}
def _apply_metrics_to_registry_entry(registry_entry_dict: dict, connector_type: ConnectorTypes, latest_metrics_dict: dict) -> dict:
"""Apply the metrics to the registry entry.
Args:
registry_entry_dict (dict): The registry entry.
latest_metrics_dict (dict): The metrics.
Returns:
dict: The registry entry with metrics.
"""
connector_id = registry_entry_dict[ConnectorTypePrimaryKey[connector_type.value]]
metrics = latest_metrics_dict.get(connector_id, {})
# Safely add metrics to ["generated"]["metrics"], knowing that the key may not exist, or might be None
registry_entry_dict = set_with(registry_entry_dict, "generated.metrics", metrics, default_none_to_dict)
return registry_entry_dict
def _apply_release_candidate_entries(registry_entry_dict: dict, docker_repository_to_rc_registry_entry: dict) -> dict:
"""Apply the optionally existing release candidate entries to the registry entry.
We need both the release candidate metadata entry and the release candidate registry entry because the metadata entry contains the rollout configuration, and the registry entry contains the actual RC registry entry.
Args:
registry_entry_dict (dict): The registry entry.
docker_repository_to_rc_registry_entry (dict): Mapping of docker repository to release candidate registry entry.
Returns:
dict: The registry entry with release candidates applied.
"""
registry_entry_dict = copy.deepcopy(registry_entry_dict)
if registry_entry_dict["dockerRepository"] in docker_repository_to_rc_registry_entry:
release_candidate_registry_entry = docker_repository_to_rc_registry_entry[registry_entry_dict["dockerRepository"]]
registry_entry_dict = _apply_release_candidates(registry_entry_dict, release_candidate_registry_entry)
return registry_entry_dict
def _apply_release_candidates(
latest_registry_entry: dict,
release_candidate_registry_entry: PolymorphicRegistryEntry,
) -> dict:
"""Apply the release candidate entries to the registry entry.
Args:
latest_registry_entry (dict): The latest registry entry.
release_candidate_registry_entry (PolymorphicRegistryEntry): The release candidate registry entry.
Returns:
dict: The registry entry with release candidates applied.
"""
try:
if not release_candidate_registry_entry.releases.rolloutConfiguration.enableProgressiveRollout:
return latest_registry_entry
# Handle if releases or rolloutConfiguration is not present in the release candidate registry entry
except AttributeError:
return latest_registry_entry
# If the relase candidate is older than the latest registry entry, don't apply the release candidate and return the latest registry entry
if semver.Version.parse(release_candidate_registry_entry.dockerImageTag) < semver.Version.parse(
latest_registry_entry["dockerImageTag"]
):
return latest_registry_entry
updated_registry_entry = copy.deepcopy(latest_registry_entry)
updated_registry_entry.setdefault("releases", {})
updated_registry_entry["releases"]["releaseCandidates"] = {
release_candidate_registry_entry.dockerImageTag: to_json_sanitized_dict(release_candidate_registry_entry)
}
return updated_registry_entry
def _build_connector_registry(
latest_registry_entries: list[PolymorphicRegistryEntry], latest_connector_metrics: dict, docker_repository_to_rc_registry_entry: dict
) -> ConnectorRegistryV0:
registry_dict = {"sources": [], "destinations": []}
for latest_registry_entry in latest_registry_entries:
connector_type = _get_connector_type_from_registry_entry(latest_registry_entry)
plural_connector_type = f"{connector_type.value}s"
registry_entry_dict = to_json_sanitized_dict(latest_registry_entry)
enriched_registry_entry_dict = _apply_metrics_to_registry_entry(registry_entry_dict, connector_type, latest_connector_metrics)
enriched_registry_entry_dict = _apply_release_candidate_entries(
enriched_registry_entry_dict, docker_repository_to_rc_registry_entry
)
registry_dict[plural_connector_type].append(enriched_registry_entry_dict)
return ConnectorRegistryV0.parse_obj(registry_dict)
def _convert_json_to_metrics_dict(jsonl_string: str) -> dict:
"""Convert the jsonl string to a metrics dict.
Args:
jsonl_string (str): The jsonl string.
Returns:
dict: The metrics dict.
"""
metrics_dict = defaultdict(dict)
jsonl_lines = jsonl_string.splitlines()
for line in jsonl_lines:
data = json.loads(line, cls=StringNullJsonDecoder)
connector_data = data["_airbyte_data"]
connector_definition_id = connector_data["connector_definition_id"]
airbyte_platform = connector_data["airbyte_platform"]
metrics_dict[connector_definition_id][airbyte_platform] = connector_data
return metrics_dict
def _get_connector_type_from_registry_entry(registry_entry: PolymorphicRegistryEntry) -> ConnectorTypes:
"""Get the connector type from the registry entry.
Args:
registry_entry (PolymorphicRegistryEntry): The registry entry.
Returns:
ConnectorTypes: The connector type.
"""
if hasattr(registry_entry, ConnectorTypePrimaryKey.SOURCE):
return ConnectorTypes.SOURCE
elif hasattr(registry_entry, ConnectorTypePrimaryKey.DESTINATION):
return ConnectorTypes.DESTINATION
else:
raise ValueError("Registry entry is not a source or destination")
@sentry_sdk.trace
def _get_latest_registry_entries(bucket: storage.Bucket, registry_type: str) -> list[PolymorphicRegistryEntry]:
"""Get the latest registry entries from the GCS bucket.
Args:
bucket (storage.Bucket): The GCS bucket.
registry_type (str): The registry type.
Returns:
list[PolymorphicRegistryEntry]: The latest registry entries.
"""
registry_type_file_name = f"{registry_type}.json"
try:
logger.info(f"Listing blobs in the latest folder: {METADATA_FOLDER}/**/latest/{registry_type_file_name}")
blobs = bucket.list_blobs(match_glob=f"{METADATA_FOLDER}/**/latest/{registry_type_file_name}")
except Exception as e:
logger.error(f"Error listing blobs in the latest folder: {str(e)}")
return []
latest_registry_entries = []
for blob in blobs:
logger.info(f"Reading blob: {blob.name}")
registry_dict = json.loads(safe_read_gcs_file(blob))
try:
if registry_dict.get(ConnectorTypePrimaryKey.SOURCE.value):
registry_model = ConnectorRegistrySourceDefinition.parse_obj(registry_dict)
elif registry_dict.get(ConnectorTypePrimaryKey.DESTINATION.value):
registry_model = ConnectorRegistryDestinationDefinition.parse_obj(registry_dict)
else:
logger.warning(f"Failed to parse registry model for {blob.name}. Skipping.")
continue
except Exception as e:
logger.error(f"Error parsing registry model for {blob.name}: {str(e)}")
continue
latest_registry_entries.append(registry_model)
return latest_registry_entries
@sentry_sdk.trace
def _get_release_candidate_registry_entries(bucket: storage.Bucket, registry_type: str) -> list[PolymorphicRegistryEntry]:
"""Get the release candidate registry entries from the GCS bucket.
Args:
bucket (storage.Bucket): The GCS bucket.
registry_type (str): The registry type.
Returns:
list[PolymorphicRegistryEntry]: The release candidate registry entries.
"""
blobs = bucket.list_blobs(match_glob=f"{METADATA_FOLDER}/**/release_candidate/{registry_type}.json")
release_candidate_registry_entries = []
for blob in blobs:
logger.info(f"Reading blob: {blob.name}")
registry_dict = json.loads(safe_read_gcs_file(blob))
try:
if "/source-" in blob.name:
registry_model = ConnectorRegistrySourceDefinition.parse_obj(registry_dict)
else:
registry_model = ConnectorRegistryDestinationDefinition.parse_obj(registry_dict)
except Exception as e:
logger.error(f"Error parsing registry model for {blob.name}: {str(e)}")
continue
release_candidate_registry_entries.append(registry_model)
return release_candidate_registry_entries
@sentry_sdk.trace
def _get_latest_connector_metrics(bucket: storage.Bucket) -> dict:
"""Get the latest connector metrics from the GCS bucket.
Args:
bucket (storage.Bucket): The GCS bucket.
Returns:
dict: The latest connector metrics.
"""
try:
logger.info(f"Getting blobs in the analytics folder: {ANALYTICS_FOLDER}")
blobs = bucket.list_blobs(prefix=f"{ANALYTICS_FOLDER}/")
except Exception as e:
logger.error(f"Unexpected error listing blobs at {ANALYTICS_FOLDER}: {str(e)}")
return {}
if not blobs:
raise ValueError("No blobs found in the analytics folder")
# Sort blobs by updated time (most recent first)
most_recent_blob = max(blobs, key=lambda blob: blob.updated)
latest_metrics_jsonl = safe_read_gcs_file(most_recent_blob)
if latest_metrics_jsonl is None:
logger.warning(f"No metrics found for {most_recent_blob.name}")
return {}
try:
latest_metrics_dict = _convert_json_to_metrics_dict(latest_metrics_jsonl)
except Exception as e:
logger.error(f"Error converting json to metrics dict: {str(e)}")
return {}
return latest_metrics_dict
@sentry_sdk.trace
def _persist_registry(registry: ConnectorRegistryV0, registry_name: str, bucket: storage.Bucket) -> None:
"""Persist the registry to a json file on GCS bucket
Args:
registry (ConnectorRegistryV0): The registry.
registry_name (str): The name of the registry. One of "cloud" or "oss".
bucket (storage.Bucket): The GCS bucket.
Returns:
None
"""
registry_file_name = f"{registry_name}_registry.json"
registry_file_path = f"{REGISTRIES_FOLDER}/{registry_file_name}"
registry_json = registry.json(exclude_none=True)
registry_json = json.dumps(json.loads(registry_json), sort_keys=True)
try:
logger.info(f"Uploading {registry_name} registry to {registry_file_path}")
blob = bucket.blob(registry_file_path)
# In cloud, airbyte-cron polls the registry frequently, to enable faster connector updates.
# We should set a lower cache duration on the blob so that the cron receives an up-to-date view of the registry.
# However, OSS polls the registry much less frequently, so the default cache setting (1hr max-age) is fine.
if registry_name == "cloud":
blob.cache_control = "public, max-age=120"
blob.upload_from_string(registry_json.encode("utf-8"), content_type="application/json")
logger.info(f"Successfully uploaded {registry_name} registry to {registry_file_path}")
return
except Exception as e:
logger.error(f"Error persisting {registry_file_name} to json: {str(e)}")
raise e
def generate_and_persist_connector_registry(bucket_name: str, registry_type: str) -> None:
"""Generate and persist the registry to a json file on GCS bucket.
Args:
bucket_name (str): The name of the GCS bucket.
registry_type (str): The type of the registry.
Returns:
tuple[bool, Optional[str]]: A tuple containing a boolean indicating success and an optional error message.
"""
if registry_type not in VALID_REGISTRIES:
raise ValueError(f"Invalid registry type: {registry_type}. Valid types are: {', '.join(VALID_REGISTRIES)}.")
gcs_client = get_gcs_storage_client()
registry_bucket = gcs_client.bucket(bucket_name)
analytics_bucket = gcs_client.bucket(ANALYTICS_BUCKET)
latest_registry_entries = _get_latest_registry_entries(registry_bucket, registry_type)
release_candidate_registry_entries = _get_release_candidate_registry_entries(registry_bucket, registry_type)
docker_repository_to_rc_registry_entry = {
release_candidate_registry_entries.dockerRepository: release_candidate_registry_entries
for release_candidate_registry_entries in release_candidate_registry_entries
}
latest_connector_metrics = _get_latest_connector_metrics(analytics_bucket)
connector_registry = _build_connector_registry(
latest_registry_entries, latest_connector_metrics, docker_repository_to_rc_registry_entry
)
try:
_persist_registry(connector_registry, registry_type, registry_bucket)
except Exception as e:
message = f"*🤖 🔴 _Registry Generation_ FAILED*:\nFailed to generate and persist {registry_type} registry."
send_slack_message(PUBLISH_UPDATE_CHANNEL, message)
raise e
| StringNullJsonDecoder |
python | spack__spack | share/spack/qa/flake8_formatter.py | {
"start": 1439,
"end": 4173
} | class ____(Pylint):
def __init__(self, options):
self.spack_errors = {}
self.error_seen = False
super().__init__(options)
def after_init(self) -> None:
"""Overriding to keep format string from being unset in Default"""
pass
def beginning(self, filename):
self.filename = filename
self.file_lines = None
self.spack_errors = defaultdict(list)
for file_pattern, errors in pattern_exemptions.items():
if file_pattern.search(filename):
for code, pat_arr in errors.items():
self.spack_errors[code].extend(pat_arr)
def handle(self, error: Violation) -> None:
"""Handle an error reported by Flake8.
This defaults to calling :meth:`format`, :meth:`show_source`, and
then :meth:`write`. This version implements the pattern-based ignore
behavior from `spack flake8` as a native flake8 plugin.
:param error:
This will be an instance of
:class:`~flake8.style_guide.Violation`.
"""
# print(error.code)
# print(error.physical_line)
# get list of patterns for this error code
pats = self.spack_errors.get(error.code, None)
# if any pattern matches, skip line
if pats is not None and any((pat.search(error.physical_line) for pat in pats)):
return
# Special F811 handling
# Prior to Python 3.8, `noqa: F811` needed to be placed on the `@when`
# line
# Starting with Python 3.8, it must be placed on the `def` line
# https://gitlab.com/pycqa/flake8/issues/583
# we can only determine if F811 should be ignored given the previous
# line, so get the previous line and check it
if self.spack_errors.get("F811", False) and error.code == "F811" and error.line_number > 1:
if self.file_lines is None:
if self.filename in {"stdin", "-", "(none)", None}:
self.file_lines = pycodestyle.stdin_get_value().splitlines(True)
else:
self.file_lines = pycodestyle.readlines(self.filename)
for pat in self.spack_errors["F811"]:
if pat.search(self.file_lines[error.line_number - 2]):
return
self.error_seen = True
line = self.format(error)
source = self.show_source(error)
self.write(line, source)
def stop(self):
"""Override stop to check whether any errors we consider to be errors
were reported.
This is a hack, but it makes flake8 behave the desired way.
"""
if not self.error_seen:
sys.exit(0)
| SpackFormatter |
python | huggingface__transformers | tests/models/hiera/test_modeling_hiera.py | {
"start": 26112,
"end": 26354
} | class ____(unittest.TestCase, BackboneTesterMixin):
all_model_classes = (HieraBackbone,) if is_torch_available() else ()
config_class = HieraConfig
def setUp(self):
self.model_tester = HieraModelTester(self)
| HieraBackboneTest |
python | TheAlgorithms__Python | ciphers/onepad_cipher.py | {
"start": 16,
"end": 1855
} | class ____:
@staticmethod
def encrypt(text: str) -> tuple[list[int], list[int]]:
"""
Function to encrypt text using pseudo-random numbers
>>> Onepad().encrypt("")
([], [])
>>> Onepad().encrypt([])
([], [])
>>> random.seed(1)
>>> Onepad().encrypt(" ")
([6969], [69])
>>> random.seed(1)
>>> Onepad().encrypt("Hello")
([9729, 114756, 4653, 31309, 10492], [69, 292, 33, 131, 61])
>>> Onepad().encrypt(1)
Traceback (most recent call last):
...
TypeError: 'int' object is not iterable
>>> Onepad().encrypt(1.1)
Traceback (most recent call last):
...
TypeError: 'float' object is not iterable
"""
plain = [ord(i) for i in text]
key = []
cipher = []
for i in plain:
k = random.randint(1, 300)
c = (i + k) * k
cipher.append(c)
key.append(k)
return cipher, key
@staticmethod
def decrypt(cipher: list[int], key: list[int]) -> str:
"""
Function to decrypt text using pseudo-random numbers.
>>> Onepad().decrypt([], [])
''
>>> Onepad().decrypt([35], [])
''
>>> Onepad().decrypt([], [35])
Traceback (most recent call last):
...
IndexError: list index out of range
>>> random.seed(1)
>>> Onepad().decrypt([9729, 114756, 4653, 31309, 10492], [69, 292, 33, 131, 61])
'Hello'
"""
plain = []
for i in range(len(key)):
p = int((cipher[i] - (key[i]) ** 2) / key[i])
plain.append(chr(p))
return "".join(plain)
if __name__ == "__main__":
c, k = Onepad().encrypt("Hello")
print(c, k)
print(Onepad().decrypt(c, k))
| Onepad |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-github/source_github/github_schema.py | {
"start": 1229876,
"end": 1230141
} | class ____(sgqlc.types.Type, Node, AuditEntry, EnterpriseAuditEntryData, OrganizationAuditEntryData):
"""Audit log entry for a members_can_delete_repos.enable event."""
__schema__ = github_schema
__field_names__ = ()
| MembersCanDeleteReposEnableAuditEntry |
python | ray-project__ray | python/ray/train/v2/_internal/exceptions.py | {
"start": 3294,
"end": 3773
} | class ____(RayTrainError):
"""Exception raised when the checkpoint manager fails to initialize from a snapshot.
Example scenarios:
1. The checkpoint manager snapshot version is old and
incompatible with the current version of Ray Train.
2. The checkpoint manager snapshot JSON file is corrupted.
3. The checkpoint manager snapshot references checkpoints that cannot be found
in the run storage path.
"""
| CheckpointManagerInitializationError |
python | pandas-dev__pandas | asv_bench/benchmarks/array.py | {
"start": 1331,
"end": 1537
} | class ____:
def setup(self):
N = 10_000
self.tuples = [(i, i + 1) for i in range(N)]
def time_from_tuples(self):
pd.arrays.IntervalArray.from_tuples(self.tuples)
| IntervalArray |
python | tensorflow__tensorflow | tensorflow/lite/tools/flatbuffer_utils_test.py | {
"start": 3233,
"end": 5407
} | class ____(test_util.TensorFlowTestCase):
def testStripStrings(self):
# 1. SETUP
# Define the initial model
initial_model = test_utils.build_mock_model()
final_model = copy.deepcopy(initial_model)
# 2. INVOKE
# Invoke the strip_strings function
flatbuffer_utils.strip_strings(final_model)
# 3. VALIDATE
# Validate that the initial and final models are the same except strings
# Validate the description
self.assertIsNotNone(initial_model.description)
self.assertIsNone(final_model.description)
self.assertIsNotNone(initial_model.signatureDefs)
self.assertIsNone(final_model.signatureDefs)
# Validate the main subgraph's name, inputs, outputs, operators and tensors
initial_subgraph = initial_model.subgraphs[0]
final_subgraph = final_model.subgraphs[0]
self.assertIsNotNone(initial_model.subgraphs[0].name)
self.assertIsNone(final_model.subgraphs[0].name)
for i in range(len(initial_subgraph.inputs)):
self.assertEqual(initial_subgraph.inputs[i], final_subgraph.inputs[i])
for i in range(len(initial_subgraph.outputs)):
self.assertEqual(initial_subgraph.outputs[i], final_subgraph.outputs[i])
for i in range(len(initial_subgraph.operators)):
self.assertEqual(initial_subgraph.operators[i].opcodeIndex,
final_subgraph.operators[i].opcodeIndex)
initial_tensors = initial_subgraph.tensors
final_tensors = final_subgraph.tensors
for i in range(len(initial_tensors)):
self.assertIsNotNone(initial_tensors[i].name)
self.assertIsNone(final_tensors[i].name)
self.assertEqual(initial_tensors[i].type, final_tensors[i].type)
self.assertEqual(initial_tensors[i].buffer, final_tensors[i].buffer)
for j in range(len(initial_tensors[i].shape)):
self.assertEqual(initial_tensors[i].shape[j], final_tensors[i].shape[j])
# Validate the first valid buffer (index 0 is always None)
initial_buffer = initial_model.buffers[1].data
final_buffer = final_model.buffers[1].data
for i in range(initial_buffer.size):
self.assertEqual(initial_buffer.data[i], final_buffer.data[i])
| StripStringsTest |
python | weaviate__weaviate-python-client | weaviate/collections/queries/near_object/query/async_.py | {
"start": 312,
"end": 461
} | class ____(
Generic[Properties, References],
_NearObjectQueryExecutor[ConnectionAsync, Properties, References],
):
pass
| _NearObjectQueryAsync |
python | ray-project__ray | doc/source/ray-overview/examples/e2e-multimodal-ai-workloads/doggos/doggos/embed.py | {
"start": 265,
"end": 3427
} | class ____(object):
def __init__(self, model_id, device):
# Load CLIP model and processor
self.processor = CLIPProcessor.from_pretrained(model_id)
self.model = CLIPModel.from_pretrained(model_id)
self.model.to(device)
self.device = device
def __call__(self, batch):
# Load and preprocess images
images = [
Image.fromarray(np.uint8(img)).convert("RGB") for img in batch["image"]
]
inputs = self.processor(images=images, return_tensors="pt", padding=True).to(
self.device
)
# Generate embeddings
with torch.inference_mode():
batch["embedding"] = self.model.get_image_features(**inputs).cpu().numpy()
return batch
def get_top_matches(query_embedding, embeddings_ds, class_filters=None, n=4):
rows = embeddings_ds.take_all()
if class_filters:
class_filters = set(class_filters)
rows = [r for r in rows if r["class"] in class_filters]
if not rows:
return []
# Vectorise
embeddings = np.vstack([r["embedding"] for r in rows]).astype(np.float32)
sims = 1 - cdist([query_embedding], embeddings, metric="cosine")[0]
# Stable top N in NumPy
k = min(n, sims.size)
idx = np.argpartition(-sims, k - 1)[:k]
idx = idx[np.argsort(-sims[idx])]
# Package results
return [
{
"class": rows[i]["class"],
"path": rows[i]["path"],
"similarity": float(sims[i]),
}
for i in idx
]
def display_top_matches(url, matches):
fig, axes = plt.subplots(1, len(matches) + 1, figsize=(15, 5))
# Display query image
axes[0].imshow(url_to_array(url=url))
axes[0].axis("off")
axes[0].set_title("Query image")
# Display matches
for i, match in enumerate(matches):
bucket = match["path"].split("/")[0]
key = "/".join(match["path"].split("/")[1:])
url = f"https://{bucket}.s3.us-west-2.amazonaws.com/{key}"
image = url_to_array(url=url)
axes[i + 1].imshow(image)
axes[i + 1].axis("off")
axes[i + 1].set_title(f"{match['class']} ({match['similarity']:.2f})")
plt.tight_layout()
plt.show()
if __name__ == "__main__":
# Load data
ds = ray.data.read_images(
"s3://doggos-dataset/train",
include_paths=True,
shuffle="files",
)
ds = ds.map(add_class)
# Batch embedding generation
embeddings_ds = ds.map_batches(
EmbedImages,
fn_constructor_kwargs={
"model_id": "openai/clip-vit-base-patch32",
"device": "cuda",
}, # class kwargs
fn_kwargs={},
compute=ray.data.ActorPoolStrategy(size=4),
batch_size=64,
num_gpus=1,
accelerator_type="T4",
)
embeddings_ds = embeddings_ds.drop_columns(["image"]) # remove image column
# Save to artifact storage
embeddings_path = os.path.join("/mnt/user_storage", "doggos/embeddings")
if os.path.exists(embeddings_path):
shutil.rmtree(embeddings_path) # clean up
embeddings_ds.write_parquet(embeddings_path)
| EmbedImages |
python | allegroai__clearml | clearml/backend_api/services/v2_9/tasks.py | {
"start": 87208,
"end": 88074
} | class ____(Response):
"""
Response of tasks.clone endpoint.
:param id: ID of the new task
:type id: str
"""
_service = "tasks"
_action = "clone"
_version = "2.9"
_schema = {
"definitions": {},
"properties": {"id": {"description": "ID of the new task", "type": ["string", "null"]}},
"type": "object",
}
def __init__(self, id: Optional[str] = None, **kwargs: Any) -> None:
super(CloneResponse, self).__init__(**kwargs)
self.id = id
@schema_property("id")
def id(self) -> Optional[str]:
return self._property_id
@id.setter
def id(self, value: Optional[str]) -> None:
if value is None:
self._property_id = None
return
self.assert_isinstance(value, "id", six.string_types)
self._property_id = value
| CloneResponse |
python | automl__auto-sklearn | test/test_pipeline/components/feature_preprocessing/test_NoPreprocessing.py | {
"start": 211,
"end": 1025
} | class ____(PreprocessingTestCase):
def test_default_configuration(self):
transformation, original = _test_preprocessing(NoPreprocessing)
self.assertEqual(transformation.shape[0], original.shape[0])
self.assertEqual(transformation.shape[1], original.shape[1])
self.assertFalse((transformation == 0).all())
self.assertEqual(np.sum(original), np.sum(transformation))
self.assertEqual(np.min(original), np.min(transformation))
self.assertEqual(np.max(original), np.max(transformation))
self.assertEqual(np.std(original), np.std(transformation))
self.assertEqual(np.mean(original), np.mean(transformation))
def test_preprocessing_dtype(self):
super(NoneComponentTest, self)._test_preprocessing_dtype(NoPreprocessing)
| NoneComponentTest |
python | django__django | tests/model_fields/models.py | {
"start": 1985,
"end": 2925
} | class ____(models.Model):
class Suit(models.IntegerChoices):
DIAMOND = 1, "Diamond"
SPADE = 2, "Spade"
HEART = 3, "Heart"
CLUB = 4, "Club"
def get_choices():
return [(i, str(i)) for i in range(3)]
no_choices = models.IntegerField(null=True)
empty_choices = models.IntegerField(choices=(), null=True)
with_choices = models.IntegerField(choices=[(1, "A")], null=True)
with_choices_dict = models.IntegerField(choices={1: "A"}, null=True)
with_choices_nested_dict = models.IntegerField(
choices={"Thing": {1: "A"}}, null=True
)
empty_choices_bool = models.BooleanField(choices=())
empty_choices_text = models.TextField(choices=())
choices_from_enum = models.IntegerField(choices=Suit)
choices_from_iterator = models.IntegerField(choices=((i, str(i)) for i in range(3)))
choices_from_callable = models.IntegerField(choices=get_choices)
| Choiceful |
python | django__django | tests/admin_views/admin.py | {
"start": 13270,
"end": 13334
} | class ____(admin.StackedInline):
model = Grommet
| GrommetInline |
python | wandb__wandb | wandb/vendor/pygments/lexers/trafficscript.py | {
"start": 430,
"end": 1546
} | class ____(RegexLexer):
"""
For `Riverbed Stingray Traffic Manager <http://www.riverbed.com/stingray>`_
.. versionadded:: 2.1
"""
name = 'TrafficScript'
aliases = ['rts','trafficscript']
filenames = ['*.rts']
tokens = {
'root' : [
(r"'(\\\\|\\[^\\]|[^'\\])*'", String),
(r'"', String, 'escapable-string'),
(r'(0x[0-9a-fA-F]+|\d+)', Number),
(r'\d+\.\d+', Number.Float),
(r'\$[a-zA-Z](\w|_)*', Name.Variable),
(r'(if|else|for(each)?|in|while|do|break|sub|return|import)', Keyword),
(r'[a-zA-Z][\w.]*', Name.Function),
(r'[-+*/%=,;(){}<>^.!~|&\[\]\?\:]', Operator),
(r'(>=|<=|==|!=|'
r'&&|\|\||'
r'\+=|.=|-=|\*=|/=|%=|<<=|>>=|&=|\|=|\^=|'
r'>>|<<|'
r'\+\+|--|=>)', Operator),
(r'[ \t\r]+', Text),
(r'#[^\n]*', Comment),
],
'escapable-string' : [
(r'\\[tsn]', String.Escape),
(r'[^"]', String),
(r'"', String, '#pop'),
],
}
| RtsLexer |
python | numba__numba | numba/tests/cfunc_cache_usecases.py | {
"start": 715,
"end": 1608
} | class ____(TestCase):
"""
Tests for functionality of this module's cfuncs.
Note this does not define any "test_*" method, instead check_module()
should be called by hand.
"""
def check_module(self, mod):
f = mod.add_usecase
self.assertPreciseEqual(f.ctypes(2.0, 3.0), 6.0)
f = mod.add_nocache_usecase
self.assertPreciseEqual(f.ctypes(2.0, 3.0), 6.0)
f = mod.outer
self.assertPreciseEqual(f.ctypes(5.0, 2.0), 4.0)
f = mod.div_usecase
with captured_stderr() as err:
self.assertPreciseEqual(f.ctypes(7, 2), 3.5)
self.assertEqual(err.getvalue(), "")
with captured_stderr() as err:
f.ctypes(7, 0)
err = err.getvalue()
self.assertIn("ZeroDivisionError", err)
def self_test():
mod = sys.modules[__name__]
_TestModule().check_module(mod)
| _TestModule |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-github/source_github/github_schema.py | {
"start": 1585542,
"end": 1585734
} | class ____(sgqlc.types.Union):
"""Types which can be actors for `BranchActorAllowance` objects."""
__schema__ = github_schema
__types__ = (App, Team, User)
| BranchActorAllowanceActor |
python | huggingface__transformers | src/transformers/models/granitemoehybrid/modeling_granitemoehybrid.py | {
"start": 64579,
"end": 72073
} | class ____(GraniteMoeHybridPreTrainedModel, GenerationMixin):
_tied_weights_keys = {"lm_head.weight": "model.embed_tokens.weight"}
_tp_plan = {"lm_head": "colwise_rep"}
_pp_plan = {"lm_head": (["hidden_states"], ["logits"])}
def __init__(self, config: GraniteMoeHybridConfig):
super().__init__(config)
self.model = GraniteMoeHybridModel(config)
self.vocab_size = config.vocab_size
self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
self.router_aux_loss_coef = config.router_aux_loss_coef
self.num_experts = config.num_local_experts
self.num_experts_per_tok = config.num_experts_per_tok
self.logits_scaling = config.logits_scaling
# Initialize weights and apply final processing
self.post_init()
@auto_docstring
@can_return_tuple
def forward(
self,
input_ids: Optional[torch.LongTensor] = None,
attention_mask: Optional[torch.Tensor] = None,
position_ids: Optional[torch.LongTensor] = None,
past_key_values: Optional[Cache] = None,
inputs_embeds: Optional[torch.FloatTensor] = None,
labels: Optional[torch.LongTensor] = None,
output_router_logits: Optional[bool] = None,
cache_position: Optional[torch.LongTensor] = None,
logits_to_keep: Union[int, torch.Tensor] = 0,
**kwargs,
) -> Union[tuple, MoeCausalLMOutputWithPast]:
r"""
labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
Labels for computing the masked language modeling loss. Indices should either be in `[0, ...,
config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored
(masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`.
Example:
```python
>>> from transformers import AutoTokenizer, GraniteMoeHybridForCausalLM
>>> model = GraniteMoeHybridForCausalLM.from_pretrained("ibm/PowerMoE-3b")
>>> tokenizer = AutoTokenizer.from_pretrained("ibm/PowerMoE-3b")
>>> prompt = "Hey, are you conscious? Can you talk to me?"
>>> inputs = tokenizer(prompt, return_tensors="pt")
>>> # Generate
>>> generate_ids = model.generate(inputs.input_ids, max_length=30)
>>> tokenizer.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0]
"Hey, are you conscious? Can you talk to me?\nI'm not conscious, but I can talk to you."
```"""
output_router_logits = (
output_router_logits if output_router_logits is not None else self.config.output_router_logits
)
# decoder outputs consists of (dec_features, layer_state, dec_hidden, dec_attn)
outputs = self.model(
input_ids=input_ids,
attention_mask=attention_mask,
position_ids=position_ids,
past_key_values=past_key_values,
inputs_embeds=inputs_embeds,
cache_position=cache_position,
**kwargs,
)
# Only compute necessary logits
hidden_states = outputs.last_hidden_state
slice_indices = slice(-logits_to_keep, None) if isinstance(logits_to_keep, int) else logits_to_keep
logits = self.lm_head(hidden_states[:, slice_indices, :])
logits = logits / self.config.logits_scaling
loss = None
if labels is not None:
# Upcast to float if we need to compute the loss to avoid potential precision issues
logits = logits.float()
# Flatten the tokens
loss = self.loss_function(
logits,
labels,
vocab_size=self.config.vocab_size,
**kwargs,
)
aux_loss = None
if output_router_logits:
aux_loss = load_balancing_loss_func(
outputs.router_logits,
self.num_experts,
self.num_experts_per_tok,
attention_mask,
)
if labels is not None:
loss += self.router_aux_loss_coef * aux_loss.to(loss.device) # make sure to reside in the same device
return MoeCausalLMOutputWithPast(
loss=loss,
aux_loss=aux_loss,
logits=logits,
past_key_values=outputs.past_key_values,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
router_logits=outputs.router_logits,
)
def prepare_inputs_for_generation(
self,
input_ids,
past_key_values=None,
attention_mask=None,
inputs_embeds=None,
cache_position=None,
position_ids=None,
use_cache=True,
**kwargs,
):
# Overwritten -- has a unique cache type, `HybridMambaAttentionDynamicCache`
empty_past_kv = past_key_values is None
# If we have cache: let's slice `input_ids` through `cache_position`, to keep only the unprocessed tokens
# Exception 1: when passing input_embeds, input_ids may be missing entries
# Exception 2: some generation methods do special slicing of input_ids, so we don't need to do it here
# Exception 3: with synced GPUs cache_position may go out of bounds, but we only want dummy token in that case.
# (we can't check exception 3 while compiling)
if not empty_past_kv:
if (
inputs_embeds is not None # Exception 1
or cache_position[-1] >= input_ids.shape[1] # Exception 3
):
input_ids = input_ids[:, -cache_position.shape[0] :]
elif input_ids.shape[1] != cache_position.shape[0]: # Default case (the "else", a no op, is Exception 2)
input_ids = input_ids[:, cache_position]
elif use_cache:
past_key_values = HybridMambaAttentionDynamicCache(
self.config, input_ids.shape[0], self.dtype, device=self.device
)
if attention_mask is not None and position_ids is None:
# create position_ids on the fly for batch generation
position_ids = attention_mask.long().cumsum(-1) - 1
position_ids.masked_fill_(attention_mask == 0, 1)
if not empty_past_kv:
position_ids = position_ids[:, -input_ids.shape[1] :]
# if `inputs_embeds` are passed, we only want to use them in the 1st generation step
if inputs_embeds is not None and empty_past_kv:
model_inputs = {"inputs_embeds": inputs_embeds}
else:
model_inputs = {"input_ids": input_ids.contiguous()} # `contiguous()` needed for compilation use cases
model_inputs.update(
{
"position_ids": position_ids,
"past_key_values": past_key_values,
"use_cache": use_cache,
"attention_mask": attention_mask,
"cache_position": cache_position,
}
)
# Forward ALL kwargs that are uninitialized (e.g. `use_cache`).
for key, value in kwargs.items():
if key not in model_inputs:
model_inputs[key] = value
return model_inputs
__all__ = ["GraniteMoeHybridForCausalLM", "GraniteMoeHybridModel", "GraniteMoeHybridPreTrainedModel"]
| GraniteMoeHybridForCausalLM |
python | MongoEngine__mongoengine | tests/fields/test_map_field.py | {
"start": 100,
"end": 4155
} | class ____(MongoDBTestCase):
def test_mapfield(self):
"""Ensure that the MapField handles the declared type."""
class Simple(Document):
mapping = MapField(IntField())
Simple.drop_collection()
e = Simple()
e.mapping["someint"] = 1
e.save()
with pytest.raises(ValidationError):
e.mapping["somestring"] = "abc"
e.save()
with pytest.raises(ValidationError):
class NoDeclaredType(Document):
mapping = MapField()
def test_complex_mapfield(self):
"""Ensure that the MapField can handle complex declared types."""
class SettingBase(EmbeddedDocument):
meta = {"allow_inheritance": True}
class StringSetting(SettingBase):
value = StringField()
class IntegerSetting(SettingBase):
value = IntField()
class Extensible(Document):
mapping = MapField(EmbeddedDocumentField(SettingBase))
Extensible.drop_collection()
e = Extensible()
e.mapping["somestring"] = StringSetting(value="foo")
e.mapping["someint"] = IntegerSetting(value=42)
e.save()
e2 = Extensible.objects.get(id=e.id)
assert isinstance(e2.mapping["somestring"], StringSetting)
assert isinstance(e2.mapping["someint"], IntegerSetting)
with pytest.raises(ValidationError):
e.mapping["someint"] = 123
e.save()
def test_embedded_mapfield_db_field(self):
class Embedded(EmbeddedDocument):
number = IntField(default=0, db_field="i")
class Test(Document):
my_map = MapField(field=EmbeddedDocumentField(Embedded), db_field="x")
Test.drop_collection()
test = Test()
test.my_map["DICTIONARY_KEY"] = Embedded(number=1)
test.save()
Test.objects.update_one(inc__my_map__DICTIONARY_KEY__number=1)
test = Test.objects.get()
assert test.my_map["DICTIONARY_KEY"].number == 2
doc = self.db.test.find_one()
assert doc["x"]["DICTIONARY_KEY"]["i"] == 2
def test_mapfield_numerical_index(self):
"""Ensure that MapField accept numeric strings as indexes."""
class Embedded(EmbeddedDocument):
name = StringField()
class Test(Document):
my_map = MapField(EmbeddedDocumentField(Embedded))
Test.drop_collection()
test = Test()
test.my_map["1"] = Embedded(name="test")
test.save()
test.my_map["1"].name = "test updated"
test.save()
def test_map_field_lookup(self):
"""Ensure MapField lookups succeed on Fields without a lookup
method.
"""
class Action(EmbeddedDocument):
operation = StringField()
object = StringField()
class Log(Document):
name = StringField()
visited = MapField(DateTimeField())
actions = MapField(EmbeddedDocumentField(Action))
Log.drop_collection()
Log(
name="wilson",
visited={"friends": datetime.datetime.now()},
actions={"friends": Action(operation="drink", object="beer")},
).save()
assert 1 == Log.objects(visited__friends__exists=True).count()
assert (
1
== Log.objects(
actions__friends__operation="drink", actions__friends__object="beer"
).count()
)
def test_map_field_unicode(self):
class Info(EmbeddedDocument):
description = StringField()
value_list = ListField(field=StringField())
class BlogPost(Document):
info_dict = MapField(field=EmbeddedDocumentField(Info))
BlogPost.drop_collection()
tree = BlogPost(info_dict={"éééé": {"description": "VALUE: éééé"}})
tree.save()
assert (
BlogPost.objects.get(id=tree.id).info_dict["éééé"].description
== "VALUE: éééé"
)
| TestMapField |
python | PyCQA__pylint | pylint/checkers/base/name_checker/naming_style.py | {
"start": 2896,
"end": 3318
} | class ____(NamingStyle):
"""Regex rules for UPPER_CASE naming style."""
CLASS_NAME_RGX = re.compile(r"[^\W\da-z][^\Wa-z]*$")
MOD_NAME_RGX = CLASS_NAME_RGX
CONST_NAME_RGX = re.compile(r"([^\W\da-z][^\Wa-z]*|__.*__)$")
COMP_VAR_RGX = CLASS_NAME_RGX
DEFAULT_NAME_RGX = re.compile(r"([^\W\da-z][^\Wa-z]*|__[^\W\dA-Z_]\w+__)$")
CLASS_ATTRIBUTE_RGX = re.compile(r"[^\W\da-z][^\Wa-z]*$")
| UpperCaseStyle |
python | pytorch__pytorch | test/onnx/exporter/test_verification.py | {
"start": 235,
"end": 3305
} | class ____(common_utils.TestCase):
def test_from_tensors(self):
# Test with tensors
expected = torch.tensor([1.0, 2.0, 3.0])
actual = torch.tensor([1.0, 2.0, 3.0])
verification_info = _verification.VerificationInfo.from_tensors(
"test_tensor", expected, actual
)
self.assertEqual(verification_info.name, "test_tensor")
self.assertEqual(verification_info.max_abs_diff, 0)
self.assertEqual(verification_info.max_rel_diff, 0)
torch.testing.assert_close(
verification_info.abs_diff_hist[0], torch.tensor([3.0] + [0.0] * 8)
)
torch.testing.assert_close(
verification_info.rel_diff_hist[0], torch.tensor([3.0] + [0.0] * 8)
)
self.assertEqual(verification_info.expected_dtype, torch.float32)
self.assertEqual(verification_info.actual_dtype, torch.float32)
def test_from_tensors_int(self):
# Test with int tensors
expected = torch.tensor([1])
actual = 1
verification_info = _verification.VerificationInfo.from_tensors(
"test_tensor_int", expected, actual
)
self.assertEqual(verification_info.name, "test_tensor_int")
self.assertEqual(verification_info.max_abs_diff, 0)
self.assertEqual(verification_info.max_rel_diff, 0)
torch.testing.assert_close(
verification_info.abs_diff_hist[0], torch.tensor([1.0] + [0.0] * 8)
)
torch.testing.assert_close(
verification_info.rel_diff_hist[0], torch.tensor([1.0] + [0.0] * 8)
)
self.assertEqual(verification_info.expected_dtype, torch.int64)
self.assertEqual(verification_info.actual_dtype, torch.int64)
def test_asdict(self):
# Test the asdict method
expected = torch.tensor([1.0, 2.0, 3.0])
actual = torch.tensor([1.0, 2.0, 3.0])
verification_info = _verification.VerificationInfo.from_tensors(
"test_tensor", expected, actual
)
asdict_result = verification_info.asdict()
self.assertEqual(asdict_result["name"], "test_tensor")
self.assertEqual(asdict_result["max_abs_diff"], 0)
self.assertEqual(asdict_result["max_rel_diff"], 0)
self.assertEqual(
asdict_result["abs_diff_hist"],
[
[3.0] + [0.0] * 8,
[0.0, 1e-06, 1e-05, 0.0001, 0.001, 0.01, 0.1, 1.0, 10.0, 1000000.0],
],
)
self.assertEqual(
asdict_result["rel_diff_hist"],
[
[3.0] + [0.0] * 8,
[0.0, 1e-06, 1e-05, 0.0001, 0.001, 0.01, 0.1, 1.0, 10.0, 1000000.0],
],
)
self.assertEqual(asdict_result["expected_dtype"], "torch.float32")
self.assertEqual(asdict_result["actual_dtype"], "torch.float32")
# Ensure it can be round tripped as json
json_str = json.dumps(asdict_result)
loaded_dict = json.loads(json_str)
self.assertEqual(loaded_dict, asdict_result)
| VerificationInfoTest |
python | mlflow__mlflow | examples/llama_index/workflow/workflow/events.py | {
"start": 573,
"end": 785
} | class ____(Event):
"""Event to send retrieval result from each retriever to the gather step."""
nodes: list[NodeWithScore]
retriever: Literal["vector_search", "bm25", "web_search"]
| RetrievalResultEvent |
python | pandas-dev__pandas | asv_bench/benchmarks/rolling.py | {
"start": 1055,
"end": 1631
} | class ____:
params = (
["DataFrame", "Series"],
[3, 300],
["int", "float"],
[sum, np.sum, lambda x: np.sum(x) + 5],
[True, False],
)
param_names = ["constructor", "window", "dtype", "function", "raw"]
def setup(self, constructor, window, dtype, function, raw):
N = 10**3
arr = (100 * np.random.random(N)).astype(dtype)
self.roll = getattr(pd, constructor)(arr).rolling(window)
def time_rolling(self, constructor, window, dtype, function, raw):
self.roll.apply(function, raw=raw)
| Apply |
python | huggingface__transformers | src/transformers/models/blenderbot/modeling_blenderbot.py | {
"start": 10711,
"end": 13541
} | class ____(GradientCheckpointingLayer):
def __init__(self, config: BlenderbotConfig):
super().__init__()
self.embed_dim = config.d_model
self.self_attn = BlenderbotAttention(
embed_dim=self.embed_dim,
num_heads=config.encoder_attention_heads,
dropout=config.attention_dropout,
config=config,
)
self.self_attn_layer_norm = nn.LayerNorm(self.embed_dim)
self.dropout = config.dropout
self.activation_fn = ACT2FN[config.activation_function]
self.activation_dropout = config.activation_dropout
self.fc1 = nn.Linear(self.embed_dim, config.encoder_ffn_dim)
self.fc2 = nn.Linear(config.encoder_ffn_dim, self.embed_dim)
self.final_layer_norm = nn.LayerNorm(self.embed_dim)
def forward(
self,
hidden_states: torch.Tensor,
attention_mask: torch.Tensor,
output_attentions: bool = False,
) -> torch.Tensor:
"""
Args:
hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)`
attention_mask (`torch.FloatTensor`): attention mask of size
`(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values.
output_attentions (`bool`, *optional*):
Whether or not to return the attentions tensors of all attention layers. See `attentions` under
returned tensors for more detail.
"""
residual = hidden_states
hidden_states = self.self_attn_layer_norm(hidden_states)
hidden_states, attn_weights = self.self_attn(
hidden_states=hidden_states,
attention_mask=attention_mask,
output_attentions=output_attentions,
)
hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)
hidden_states = residual + hidden_states
residual = hidden_states
hidden_states = self.final_layer_norm(hidden_states)
hidden_states = self.activation_fn(self.fc1(hidden_states))
hidden_states = nn.functional.dropout(hidden_states, p=self.activation_dropout, training=self.training)
hidden_states = self.fc2(hidden_states)
hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)
hidden_states = residual + hidden_states
if hidden_states.dtype == torch.float16:
clamp_value = torch.finfo(hidden_states.dtype).max - 1000
hidden_states = torch.clamp(hidden_states, min=-clamp_value, max=clamp_value)
return hidden_states, attn_weights
# Copied from transformers.models.mbart.modeling_mbart.MBartDecoderLayer with MBart->Blenderbot, MBART->BLENDERBOT
| BlenderbotEncoderLayer |
python | getsentry__sentry | src/social_auth/exceptions.py | {
"start": 1262,
"end": 1433
} | class ____(AuthException):
"""Auth process was canceled by user."""
def __str__(self) -> str:
return gettext("Authentication process canceled")
| AuthCanceled |
python | kamyu104__LeetCode-Solutions | Python/check-if-one-string-swap-can-make-strings-equal.py | {
"start": 48,
"end": 492
} | class ____(object):
def areAlmostEqual(self, s1, s2):
"""
:type s1: str
:type s2: str
:rtype: bool
"""
diff = []
for a, b in itertools.izip(s1, s2):
if a == b:
continue
if len(diff) == 2:
return False
diff.append([a, b] if not diff else [b, a])
return not diff or (len(diff) == 2 and diff[0] == diff[1])
| Solution |
python | pandas-dev__pandas | pandas/tests/series/test_formats.py | {
"start": 8878,
"end": 17404
} | class ____:
def test_categorical_repr_unicode(self):
# see gh-21002
class County:
name = "San Sebastián"
state = "PR"
def __repr__(self) -> str:
return self.name + ", " + self.state
cat = Categorical([County() for _ in range(61)])
idx = Index(cat)
ser = idx.to_series()
repr(ser)
str(ser)
def test_categorical_repr(self, using_infer_string):
a = Series(Categorical([1, 2, 3, 4]))
exp = (
"0 1\n1 2\n2 3\n3 4\n"
"dtype: category\nCategories (4, int64): [1, 2, 3, 4]"
)
assert exp == a.__str__()
a = Series(Categorical(["a", "b"] * 25))
exp = (
"0 a\n1 b\n"
" ..\n"
"48 a\n49 b\n"
"Length: 50, dtype: category\nCategories (2, object): ['a', 'b']"
)
if using_infer_string:
exp = exp.replace("object", "str")
with option_context("display.max_rows", 5):
assert exp == repr(a)
levs = list("abcdefghijklmnopqrstuvwxyz")
a = Series(Categorical(["a", "b"], categories=levs, ordered=True))
exp = (
"0 a\n1 b\n"
"dtype: category\n"
"Categories (26, object): ['a' < 'b' < 'c' < 'd' ... "
"'w' < 'x' < 'y' < 'z']"
)
if using_infer_string:
exp = exp.replace("object", "str")
assert exp == a.__str__()
def test_categorical_series_repr(self):
s = Series(Categorical([1, 2, 3]))
exp = """0 1
1 2
2 3
dtype: category
Categories (3, int64): [1, 2, 3]"""
assert repr(s) == exp
s = Series(Categorical(np.arange(10)))
exp = f"""0 0
1 1
2 2
3 3
4 4
5 5
6 6
7 7
8 8
9 9
dtype: category
Categories (10, {np.dtype(int)}): [0, 1, 2, 3, ..., 6, 7, 8, 9]"""
assert repr(s) == exp
def test_categorical_series_repr_ordered(self):
s = Series(Categorical([1, 2, 3], ordered=True))
exp = """0 1
1 2
2 3
dtype: category
Categories (3, int64): [1 < 2 < 3]"""
assert repr(s) == exp
s = Series(Categorical(np.arange(10), ordered=True))
exp = f"""0 0
1 1
2 2
3 3
4 4
5 5
6 6
7 7
8 8
9 9
dtype: category
Categories (10, {np.dtype(int)}): [0 < 1 < 2 < 3 ... 6 < 7 < 8 < 9]"""
assert repr(s) == exp
def test_categorical_series_repr_datetime(self):
idx = date_range("2011-01-01 09:00", freq="h", periods=5, unit="ns")
s = Series(Categorical(idx))
exp = """0 2011-01-01 09:00:00
1 2011-01-01 10:00:00
2 2011-01-01 11:00:00
3 2011-01-01 12:00:00
4 2011-01-01 13:00:00
dtype: category
Categories (5, datetime64[ns]): [2011-01-01 09:00:00, 2011-01-01 10:00:00, 2011-01-01 11:00:00,
2011-01-01 12:00:00, 2011-01-01 13:00:00]""" # noqa: E501
assert repr(s) == exp
idx = date_range(
"2011-01-01 09:00", freq="h", periods=5, tz="US/Eastern", unit="ns"
)
s = Series(Categorical(idx))
exp = """0 2011-01-01 09:00:00-05:00
1 2011-01-01 10:00:00-05:00
2 2011-01-01 11:00:00-05:00
3 2011-01-01 12:00:00-05:00
4 2011-01-01 13:00:00-05:00
dtype: category
Categories (5, datetime64[ns, US/Eastern]): [2011-01-01 09:00:00-05:00, 2011-01-01 10:00:00-05:00,
2011-01-01 11:00:00-05:00, 2011-01-01 12:00:00-05:00,
2011-01-01 13:00:00-05:00]""" # noqa: E501
assert repr(s) == exp
def test_categorical_series_repr_datetime_ordered(self):
idx = date_range("2011-01-01 09:00", freq="h", periods=5, unit="ns")
s = Series(Categorical(idx, ordered=True))
exp = """0 2011-01-01 09:00:00
1 2011-01-01 10:00:00
2 2011-01-01 11:00:00
3 2011-01-01 12:00:00
4 2011-01-01 13:00:00
dtype: category
Categories (5, datetime64[ns]): [2011-01-01 09:00:00 < 2011-01-01 10:00:00 < 2011-01-01 11:00:00 <
2011-01-01 12:00:00 < 2011-01-01 13:00:00]""" # noqa: E501
assert repr(s) == exp
idx = date_range(
"2011-01-01 09:00", freq="h", periods=5, tz="US/Eastern", unit="ns"
)
s = Series(Categorical(idx, ordered=True))
exp = """0 2011-01-01 09:00:00-05:00
1 2011-01-01 10:00:00-05:00
2 2011-01-01 11:00:00-05:00
3 2011-01-01 12:00:00-05:00
4 2011-01-01 13:00:00-05:00
dtype: category
Categories (5, datetime64[ns, US/Eastern]): [2011-01-01 09:00:00-05:00 < 2011-01-01 10:00:00-05:00 <
2011-01-01 11:00:00-05:00 < 2011-01-01 12:00:00-05:00 <
2011-01-01 13:00:00-05:00]""" # noqa: E501
assert repr(s) == exp
def test_categorical_series_repr_period(self):
idx = period_range("2011-01-01 09:00", freq="h", periods=5)
s = Series(Categorical(idx))
exp = """0 2011-01-01 09:00
1 2011-01-01 10:00
2 2011-01-01 11:00
3 2011-01-01 12:00
4 2011-01-01 13:00
dtype: category
Categories (5, period[h]): [2011-01-01 09:00, 2011-01-01 10:00, 2011-01-01 11:00, 2011-01-01 12:00,
2011-01-01 13:00]""" # noqa: E501
assert repr(s) == exp
idx = period_range("2011-01", freq="M", periods=5)
s = Series(Categorical(idx))
exp = """0 2011-01
1 2011-02
2 2011-03
3 2011-04
4 2011-05
dtype: category
Categories (5, period[M]): [2011-01, 2011-02, 2011-03, 2011-04, 2011-05]"""
assert repr(s) == exp
def test_categorical_series_repr_period_ordered(self):
idx = period_range("2011-01-01 09:00", freq="h", periods=5)
s = Series(Categorical(idx, ordered=True))
exp = """0 2011-01-01 09:00
1 2011-01-01 10:00
2 2011-01-01 11:00
3 2011-01-01 12:00
4 2011-01-01 13:00
dtype: category
Categories (5, period[h]): [2011-01-01 09:00 < 2011-01-01 10:00 < 2011-01-01 11:00 < 2011-01-01 12:00 <
2011-01-01 13:00]""" # noqa: E501
assert repr(s) == exp
idx = period_range("2011-01", freq="M", periods=5)
s = Series(Categorical(idx, ordered=True))
exp = """0 2011-01
1 2011-02
2 2011-03
3 2011-04
4 2011-05
dtype: category
Categories (5, period[M]): [2011-01 < 2011-02 < 2011-03 < 2011-04 < 2011-05]"""
assert repr(s) == exp
def test_categorical_series_repr_timedelta(self):
idx = timedelta_range("1 days", periods=5)
s = Series(Categorical(idx))
exp = """0 1 days
1 2 days
2 3 days
3 4 days
4 5 days
dtype: category
Categories (5, timedelta64[ns]): [1 days, 2 days, 3 days, 4 days, 5 days]"""
assert repr(s) == exp
idx = timedelta_range("1 hours", periods=10)
s = Series(Categorical(idx))
exp = """0 0 days 01:00:00
1 1 days 01:00:00
2 2 days 01:00:00
3 3 days 01:00:00
4 4 days 01:00:00
5 5 days 01:00:00
6 6 days 01:00:00
7 7 days 01:00:00
8 8 days 01:00:00
9 9 days 01:00:00
dtype: category
Categories (10, timedelta64[ns]): [0 days 01:00:00, 1 days 01:00:00, 2 days 01:00:00,
3 days 01:00:00, ..., 6 days 01:00:00, 7 days 01:00:00,
8 days 01:00:00, 9 days 01:00:00]""" # noqa: E501
assert repr(s) == exp
def test_categorical_series_repr_timedelta_ordered(self):
idx = timedelta_range("1 days", periods=5)
s = Series(Categorical(idx, ordered=True))
exp = """0 1 days
1 2 days
2 3 days
3 4 days
4 5 days
dtype: category
Categories (5, timedelta64[ns]): [1 days < 2 days < 3 days < 4 days < 5 days]"""
assert repr(s) == exp
idx = timedelta_range("1 hours", periods=10)
s = Series(Categorical(idx, ordered=True))
exp = """0 0 days 01:00:00
1 1 days 01:00:00
2 2 days 01:00:00
3 3 days 01:00:00
4 4 days 01:00:00
5 5 days 01:00:00
6 6 days 01:00:00
7 7 days 01:00:00
8 8 days 01:00:00
9 9 days 01:00:00
dtype: category
Categories (10, timedelta64[ns]): [0 days 01:00:00 < 1 days 01:00:00 < 2 days 01:00:00 <
3 days 01:00:00 ... 6 days 01:00:00 < 7 days 01:00:00 <
8 days 01:00:00 < 9 days 01:00:00]""" # noqa: E501
assert repr(s) == exp
| TestCategoricalRepr |
python | walkccc__LeetCode | solutions/250. Count Univalue Subtrees/250.py | {
"start": 0,
"end": 396
} | class ____:
def countUnivalSubtrees(self, root: TreeNode | None) -> int:
ans = 0
def isUnival(root: TreeNode | None, val: int) -> bool:
nonlocal ans
if not root:
return True
if isUnival(root.left, root.val) & isUnival(root.right, root.val):
ans += 1
return root.val == val
return False
isUnival(root, math.inf)
return ans
| Solution |
python | tensorflow__tensorflow | tensorflow/python/ops/bincount_ops_test.py | {
"start": 13785,
"end": 18895
} | class ____(test.TestCase, parameterized.TestCase):
def testSparseCountSparseOutputBadIndicesShape(self):
indices = [[[0], [0]], [[0], [1]], [[1], [0]], [[1], [2]]]
values = [1, 1, 1, 10]
weights = [1, 2, 4, 6]
dense_shape = [2, 3]
with self.assertRaisesRegex(errors.InvalidArgumentError,
"Input indices must be a 2-dimensional tensor"):
self.evaluate(
gen_count_ops.SparseCountSparseOutput(
indices=indices,
values=values,
dense_shape=dense_shape,
weights=weights,
binary_output=False))
def testSparseCountSparseOutputBadWeightsShape(self):
indices = [[0, 0], [0, 1], [1, 0], [1, 2]]
values = [1, 1, 1, 10]
weights = [1, 2, 4]
dense_shape = [2, 3]
with self.assertRaisesRegex(errors.InvalidArgumentError,
"Weights and values must have the same shape"):
self.evaluate(
gen_count_ops.SparseCountSparseOutput(
indices=indices,
values=values,
dense_shape=dense_shape,
weights=weights,
binary_output=False))
def testSparseCountSparseOutputBadNumberOfValues(self):
indices = [[0, 0], [0, 1], [1, 0]]
values = [1, 1, 1, 10]
weights = [1, 2, 4, 6]
dense_shape = [2, 3]
with self.assertRaisesRegex(
errors.InvalidArgumentError,
"Number of values must match first dimension of indices"):
self.evaluate(
gen_count_ops.SparseCountSparseOutput(
indices=indices,
values=values,
dense_shape=dense_shape,
weights=weights,
binary_output=False))
def testSparseCountSparseOutputNegativeValue(self):
indices = [[0, 0], [0, 1], [1, 0], [1, 2]]
values = [1, 1, -1, 10]
dense_shape = [2, 3]
with self.assertRaisesRegex(errors.InvalidArgumentError,
"Input values must all be non-negative"):
self.evaluate(
gen_count_ops.SparseCountSparseOutput(
indices=indices,
values=values,
dense_shape=dense_shape,
binary_output=False))
def testRaggedCountSparseOutput(self):
splits = [0, 4, 7]
values = [1, 1, 2, 1, 2, 10, 5]
weights = [1, 2, 3, 4, 5, 6, 7]
output_indices, output_values, output_shape = self.evaluate(
gen_count_ops.RaggedCountSparseOutput(
splits=splits, values=values, weights=weights, binary_output=False))
self.assertAllEqual([[0, 1], [0, 2], [1, 2], [1, 5], [1, 10]],
output_indices)
self.assertAllEqual([7, 3, 5, 7, 6], output_values)
self.assertAllEqual([2, 11], output_shape)
def testRaggedCountSparseOutputBadWeightsShape(self):
splits = [0, 4, 7]
values = [1, 1, 2, 1, 2, 10, 5]
weights = [1, 2, 3, 4, 5, 6]
with self.assertRaisesRegex(errors.InvalidArgumentError,
"Weights and values must have the same shape"):
self.evaluate(
gen_count_ops.RaggedCountSparseOutput(
splits=splits,
values=values,
weights=weights,
binary_output=False))
def testRaggedCountSparseOutputEmptySplits(self):
splits = []
values = [1, 1, 2, 1, 2, 10, 5]
weights = [1, 2, 3, 4, 5, 6, 7]
with self.assertRaisesRegex(
errors.InvalidArgumentError,
"Must provide at least 2 elements for the splits argument"):
self.evaluate(
gen_count_ops.RaggedCountSparseOutput(
splits=splits,
values=values,
weights=weights,
binary_output=False))
def testRaggedCountSparseOutputBadSplitsStart(self):
splits = [1, 7]
values = [1, 1, 2, 1, 2, 10, 5]
weights = [1, 2, 3, 4, 5, 6, 7]
with self.assertRaisesRegex(errors.InvalidArgumentError,
"Splits must start with 0"):
self.evaluate(
gen_count_ops.RaggedCountSparseOutput(
splits=splits,
values=values,
weights=weights,
binary_output=False))
def testRaggedCountSparseOutputBadSplitsEnd(self):
splits = [0, 5]
values = [1, 1, 2, 1, 2, 10, 5]
weights = [1, 2, 3, 4, 5, 6, 7]
with self.assertRaisesRegex(errors.InvalidArgumentError,
"Splits must end with the number of values"):
self.evaluate(
gen_count_ops.RaggedCountSparseOutput(
splits=splits,
values=values,
weights=weights,
binary_output=False))
def testRaggedCountSparseOutputNegativeValue(self):
splits = [0, 4, 7]
values = [1, 1, 2, 1, -2, 10, 5]
with self.assertRaisesRegex(errors.InvalidArgumentError,
"Input values must all be non-negative"):
self.evaluate(
gen_count_ops.RaggedCountSparseOutput(
splits=splits, values=values, binary_output=False))
if __name__ == "__main__":
test.main()
| RawOpsTest |
python | readthedocs__readthedocs.org | readthedocs/rtd_tests/tests/test_domains.py | {
"start": 417,
"end": 1255
} | class ____(TestCase):
def setUp(self):
self.project = get(Project, slug="kong")
def test_save_parsing(self):
domain = get(Domain, domain="google.com")
self.assertEqual(domain.domain, "google.com")
domain.domain = "google.com"
self.assertEqual(domain.domain, "google.com")
domain.domain = "https://google.com"
domain.save()
self.assertEqual(domain.domain, "google.com")
domain.domain = "www.google.com"
domain.save()
self.assertEqual(domain.domain, "www.google.com")
# We are using random domain names to test the form validation,
# so we are mocking the DNS resolver to avoid making real DNS queries.
@mock.patch(
"readthedocs.projects.forms.dns.resolver.resolve",
new=mock.MagicMock(side_effect=dns.resolver.NoAnswer),
)
| ModelTests |
python | doocs__leetcode | solution/2900-2999/2998.Minimum Number of Operations to Make X and Y Equal/Solution.py | {
"start": 0,
"end": 481
} | class ____:
def minimumOperationsToMakeEqual(self, x: int, y: int) -> int:
@cache
def dfs(x: int) -> int:
if y >= x:
return y - x
ans = x - y
ans = min(ans, x % 5 + 1 + dfs(x // 5))
ans = min(ans, 5 - x % 5 + 1 + dfs(x // 5 + 1))
ans = min(ans, x % 11 + 1 + dfs(x // 11))
ans = min(ans, 11 - x % 11 + 1 + dfs(x // 11 + 1))
return ans
return dfs(x)
| Solution |
python | tensorflow__tensorflow | tensorflow/python/ops/batch_norm_benchmark.py | {
"start": 4473,
"end": 10657
} | class ____(test.Benchmark):
"""Benchmark batch normalization."""
def _run_graph(self, device, input_shape, axes, num_layers, mode, scale,
train, num_iters):
"""Run the graph and print its execution time.
Args:
device: string, the device to run on.
input_shape: shape of the input tensor.
axes: axes that are to be normalized across.
num_layers: number of batch normalization layers in the graph.
mode: "op", "py" or "slow" depending on the implementation.
scale: scale after normalization.
train: if true, also run backprop.
num_iters: number of steps to run.
Returns:
The duration of the run in seconds.
"""
graph = ops.Graph()
with graph.as_default():
outputs = build_graph(device, input_shape, axes, num_layers, mode, scale,
train)
with session_lib.Session(graph=graph) as session:
variables.global_variables_initializer().run()
_ = session.run([out.op for out in outputs]) # warm up.
start_time = time.time()
for _ in range(num_iters):
_ = session.run([out.op for out in outputs])
duration = time.time() - start_time
print("%s shape:%d/%d #layers:%d mode:%s scale:%r train:%r - %f secs" %
(device, len(input_shape), len(axes), num_layers, mode, scale, train,
duration / num_iters))
name_template = (
"batch_norm_{device}_input_shape_{shape}_axes_{axes}_mode_{mode}_"
"layers_{num_layers}_scale_{scale}_"
"train_{train}")
self.report_benchmark(
name=name_template.format(
device=device,
mode=mode,
num_layers=num_layers,
scale=scale,
train=train,
shape=str(input_shape).replace(" ", ""),
axes=str(axes)).replace(" ", ""),
iters=num_iters,
wall_time=duration / num_iters)
return duration
def benchmark_batch_norm(self):
print("Forward convolution (lower layers).")
shape = [8, 128, 128, 32]
axes = [0, 1, 2]
t1 = self._run_graph("cpu", shape, axes, 10, "op", True, False, 5)
t2 = self._run_graph("cpu", shape, axes, 10, "py", True, False, 5)
t3 = self._run_graph("cpu", shape, axes, 10, "slow", True, False, 5)
print_difference("op vs py", t1, t2)
print_difference("py vs slow", t2, t3)
if FLAGS.use_gpu:
t1 = self._run_graph("gpu", shape, axes, 10, "op", True, False, 50)
t2 = self._run_graph("gpu", shape, axes, 10, "py", True, False, 50)
t3 = self._run_graph("gpu", shape, axes, 10, "slow", True, False, 50)
print_difference("op vs py", t1, t2)
print_difference("py vs slow", t2, t3)
print("Forward/backward convolution (lower layers).")
t1 = self._run_graph("cpu", shape, axes, 10, "op", True, True, 5)
t2 = self._run_graph("cpu", shape, axes, 10, "py", True, True, 5)
t3 = self._run_graph("cpu", shape, axes, 10, "slow", True, True, 5)
print_difference("op vs py", t1, t2)
print_difference("py vs slow", t2, t3)
if FLAGS.use_gpu:
t1 = self._run_graph("gpu", shape, axes, 10, "op", True, True, 50)
t2 = self._run_graph("gpu", shape, axes, 10, "py", True, True, 50)
t3 = self._run_graph("gpu", shape, axes, 10, "slow", True, True, 50)
print_difference("op vs py", t1, t2)
print_difference("py vs slow", t2, t3)
print("Forward convolution (higher layers).")
shape = [256, 17, 17, 32]
axes = [0, 1, 2]
t1 = self._run_graph("cpu", shape, axes, 10, "op", True, False, 5)
t2 = self._run_graph("cpu", shape, axes, 10, "py", True, False, 5)
t3 = self._run_graph("cpu", shape, axes, 10, "slow", True, False, 5)
print_difference("op vs py", t1, t2)
print_difference("py vs slow", t2, t3)
if FLAGS.use_gpu:
t1 = self._run_graph("gpu", shape, axes, 10, "op", True, False, 50)
t2 = self._run_graph("gpu", shape, axes, 10, "py", True, False, 50)
t3 = self._run_graph("gpu", shape, axes, 10, "slow", True, False, 50)
print_difference("op vs py", t1, t2)
print_difference("py vs slow", t2, t3)
print("Forward/backward convolution (higher layers).")
t1 = self._run_graph("cpu", shape, axes, 10, "op", True, True, 5)
t2 = self._run_graph("cpu", shape, axes, 10, "py", True, True, 5)
t3 = self._run_graph("cpu", shape, axes, 10, "slow", True, True, 5)
print_difference("op vs py", t1, t2)
print_difference("py vs slow", t2, t3)
if FLAGS.use_gpu:
t1 = self._run_graph("gpu", shape, axes, 10, "op", True, True, 50)
t2 = self._run_graph("gpu", shape, axes, 10, "py", True, True, 50)
t3 = self._run_graph("gpu", shape, axes, 10, "slow", True, True, 50)
print_difference("op vs py", t1, t2)
print_difference("py vs slow", t2, t3)
print("Forward fully-connected.")
shape = [1024, 32]
axes = [0]
t1 = self._run_graph("cpu", shape, axes, 10, "py", True, False, 5)
t2 = self._run_graph("cpu", shape, axes, 10, "slow", True, False, 5)
print_difference("py vs slow", t1, t2)
if FLAGS.use_gpu:
t1 = self._run_graph("gpu", shape, axes, 10, "py", True, False, 50)
t2 = self._run_graph("gpu", shape, axes, 10, "slow", True, False, 50)
print_difference("py vs slow", t1, t2)
print("Forward/backward fully-connected.")
t1 = self._run_graph("cpu", shape, axes, 10, "py", True, True, 50)
t2 = self._run_graph("cpu", shape, axes, 10, "slow", True, True, 50)
print_difference("py vs slow", t1, t2)
if FLAGS.use_gpu:
t1 = self._run_graph("gpu", shape, axes, 10, "py", True, True, 5)
t2 = self._run_graph("gpu", shape, axes, 10, "slow", True, True, 5)
print_difference("py vs slow", t1, t2)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.register("type", "bool", lambda v: v.lower() == "true")
parser.add_argument(
"--use_gpu",
type="bool",
nargs="?",
const=True,
default=True,
help="Run GPU benchmarks."
)
global FLAGS # pylint:disable=global-at-module-level
FLAGS, unparsed = parser.parse_known_args()
test.main(argv=[sys.argv[0]] + unparsed)
| BatchNormBenchmark |
python | Lightning-AI__lightning | src/lightning/pytorch/strategies/launchers/multiprocessing.py | {
"start": 1829,
"end": 12575
} | class ____(_Launcher):
r"""Launches processes that run a given function in parallel, and joins them all at the end.
The main process in which this launcher is invoked creates N so-called worker processes (using
:func:`torch.multiprocessing.start_processes`) that run the given function.
Worker processes have a rank that ranges from 0 to N - 1.
Note:
- This launcher requires all objects to be pickleable.
- It is important that the entry point to the program/script is guarded by ``if __name__ == "__main__"``.
- With start method 'fork' the user must ensure that no CUDA context gets created in the main process before
the launcher is invoked. E.g., one should avoid creating cuda tensors or calling ``torch.cuda.*`` functions
before calling ``Trainer.fit``.
Args:
strategy: A reference to the strategy that is used together with this launcher.
start_method: The method how to start the processes.
- 'spawn': The default start method. Requires all objects to be pickleable.
- 'fork': Preferable for IPython/Jupyter environments where 'spawn' is not available. Not available on
the Windows platform for example.
- 'forkserver': Alternative implementation to 'fork'.
"""
def __init__(
self, strategy: "pl.strategies.ParallelStrategy", start_method: Literal["spawn", "fork", "forkserver"] = "spawn"
) -> None:
self._strategy = strategy
self._start_method = start_method
if start_method not in mp.get_all_start_methods():
raise ValueError(
f"The start method '{self._start_method}' is not available on this platform. Available methods are:"
f" {', '.join(mp.get_all_start_methods())}"
)
self.procs: list[mp.Process] = []
self._already_fit = False
@property
@override
def is_interactive_compatible(self) -> bool:
# The start method 'spawn' is not supported in interactive environments
# The start method 'fork' is the only one supported in Jupyter environments, with constraints around CUDA
# initialization. For more context, see https://github.com/Lightning-AI/pytorch-lightning/issues/7550
return self._start_method == "fork"
@override
def launch(self, function: Callable, *args: Any, trainer: Optional["pl.Trainer"] = None, **kwargs: Any) -> Any:
"""Launches processes that run the given function in parallel.
The function is allowed to have a return value. However, when all processes join, only the return value
of worker process 0 gets returned from this `launch` method in the main process.
Arguments:
function: The entry point for all launched processes.
*args: Optional positional arguments to be passed to the given function.
trainer: Optional reference to the :class:`~lightning.pytorch.trainer.trainer.Trainer` for which
a selected set of attributes get restored in the main process after processes join.
**kwargs: Optional keyword arguments to be passed to the given function.
"""
if self._start_method in ("fork", "forkserver"):
_check_bad_cuda_fork()
if self._start_method == "spawn":
_check_missing_main_guard()
if self._already_fit and trainer is not None and trainer.state.fn == TrainerFn.FITTING:
# resolving https://github.com/Lightning-AI/pytorch-lightning/issues/18775 will lift this restriction
raise NotImplementedError(
"Calling `trainer.fit()` twice on the same Trainer instance using a spawn-based strategy is not"
" supported. You can work around this limitation by creating a new Trainer instance and passing the"
" `fit(ckpt_path=...)` argument."
)
# The default cluster environment in Lightning chooses a random free port number
# This needs to be done in the main process here before starting processes to ensure each rank will connect
# through the same port
assert self._strategy.cluster_environment is not None
os.environ["MASTER_PORT"] = str(self._strategy.cluster_environment.main_port)
context = mp.get_context(self._start_method)
return_queue = context.SimpleQueue()
if self._start_method == "spawn":
global_states = _GlobalStateSnapshot.capture()
process_args = [trainer, function, args, kwargs, return_queue, global_states]
else:
process_args = [trainer, function, args, kwargs, return_queue]
process_context = mp.start_processes(
self._wrapping_function,
args=process_args,
nprocs=self._strategy.num_processes,
start_method=self._start_method,
join=False, # we will join ourselves to get the process references
)
self.procs = process_context.processes
while not process_context.join():
pass
worker_output = return_queue.get()
if trainer is None:
return worker_output
self._already_fit |= trainer.state.fn == TrainerFn.FITTING
self._recover_results_in_main_process(worker_output, trainer)
return worker_output.trainer_results
def _wrapping_function(
self,
process_idx: int,
trainer: Optional["pl.Trainer"],
function: Callable,
args: Any,
kwargs: Any,
return_queue: Union[mp.SimpleQueue, queue.Queue],
global_states: Optional["_GlobalStateSnapshot"] = None,
) -> None:
if global_states:
global_states.restore()
if self._start_method == "spawn" and isinstance(self._strategy.accelerator, CPUAccelerator):
args, kwargs = _disable_module_memory_sharing((args, kwargs))
_set_num_threads_if_needed(num_processes=self._strategy.num_processes)
os.environ["LOCAL_RANK"] = str(process_idx)
results = function(*args, **kwargs)
if trainer is not None:
results = self._collect_rank_zero_results(trainer, results)
if process_idx == 0:
return_queue.put(move_data_to_device(results, "cpu"))
def _recover_results_in_main_process(self, worker_output: "_WorkerOutput", trainer: "pl.Trainer") -> None:
# transfer back the best path to the trainer
if trainer.checkpoint_callback and hasattr(trainer.checkpoint_callback, "best_model_path"):
trainer.checkpoint_callback.best_model_path = str(worker_output.best_model_path)
# TODO: pass also best score
# load last weights
if worker_output.weights_path is not None:
ckpt = self._strategy.checkpoint_io.load_checkpoint(worker_output.weights_path)
# choose non-strict loading of parameters on the main process, because the model's composition
# could have changed in the worker process (layers added or removed)
trainer.lightning_module.load_state_dict(ckpt, strict=False)
self._strategy.checkpoint_io.remove_checkpoint(worker_output.weights_path)
trainer.state = worker_output.trainer_state
# get the `callback_metrics` and set it to the trainer
self.update_main_process_results(trainer, worker_output.extra)
def _collect_rank_zero_results(self, trainer: "pl.Trainer", results: Any) -> Optional["_WorkerOutput"]:
rank_zero_debug("Collecting results from rank 0 process.")
checkpoint_callback = trainer.checkpoint_callback
best_model_path = (
checkpoint_callback.best_model_path
if checkpoint_callback and hasattr(checkpoint_callback, "best_model_path")
else None
)
# requires to compute the state_dict on all processes in case Metrics are present
state_dict = trainer.lightning_module.state_dict()
if self._strategy.local_rank != 0:
return None
# save the last weights
weights_path = None
if trainer.state.fn == TrainerFn.FITTING:
# use tempdir here to avoid race conditions because the filesystem may be shared between nodes
weights_path = os.path.join(tempfile.mkdtemp(), ".temp.ckpt")
self._strategy.checkpoint_io.save_checkpoint(state_dict, weights_path)
# add extra result data from trainer to send to main process
extra = self.get_extra_results(trainer)
return _WorkerOutput(best_model_path, weights_path, trainer.state, results, extra)
def get_extra_results(self, trainer: "pl.Trainer") -> dict[str, Any]:
"""Gather extra state from the Trainer and return it as a dictionary for sending back to the main process. To
avoid issues with memory sharing, we convert tensors to bytes.
Args:
trainer: reference to the Trainer.
Returns:
A dictionary with items to send back to the main process where :meth:`update_main_process_results` will
process this output.
"""
callback_metrics = apply_to_collection(trainer.callback_metrics, Tensor, lambda t: t.cpu())
buffer = io.BytesIO()
torch.save(callback_metrics, buffer)
# send tensors as bytes to avoid issues with memory sharing
return {"callback_metrics_bytes": buffer.getvalue()}
def update_main_process_results(self, trainer: "pl.Trainer", extra: dict[str, Any]) -> None:
"""Retrieve the :attr:`trainer.callback_metrics` dictionary from the given queue. To preserve consistency, we
convert bytes back to ``torch.Tensor``.
Args:
trainer: reference to the Trainer.
extra: A dictionary with trainer state that was sent from the worker process and needs to be restored
on the current trainer.
"""
# NOTE: `get_extra_results` needs to be called before
callback_metrics_bytes = extra["callback_metrics_bytes"]
callback_metrics = torch.load(io.BytesIO(callback_metrics_bytes), weights_only=True)
trainer.callback_metrics.update(callback_metrics)
@override
def kill(self, signum: _SIGNUM) -> None:
for proc in self.procs:
if proc.is_alive() and proc.pid is not None:
log.debug(f"Process {os.getpid()} is terminating {proc.pid} with {signum}")
with suppress(ProcessLookupError):
os.kill(proc.pid, signum)
def __getstate__(self) -> dict:
state = self.__dict__.copy()
state["procs"] = [] # SpawnProcess can't be pickled
return state
| _MultiProcessingLauncher |
python | huggingface__transformers | tests/models/flava/test_modeling_flava.py | {
"start": 42001,
"end": 43184
} | class ____(FlavaModelTest):
all_model_classes = (FlavaForPreTraining,) if is_torch_available() else ()
class_for_tester = FlavaForPreTrainingTester
@unittest.skip(
reason="This architecture seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124"
)
def test_training_gradient_checkpointing(self):
pass
@unittest.skip(
reason="This architecture seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124"
)
def test_training_gradient_checkpointing_use_reentrant(self):
pass
@unittest.skip(
reason="This architecture seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124"
)
def test_training_gradient_checkpointing_use_reentrant_false(self):
pass
# We will verify our results on an image of cute cats
def prepare_img():
url = "http://images.cocodataset.org/val2017/000000039769.jpg"
im = Image.open(requests.get(url, stream=True).raw)
return im
@require_vision
@require_torch
| FlavaForPreTrainingTest |
python | pyca__cryptography | src/cryptography/hazmat/primitives/_asymmetric.py | {
"start": 335,
"end": 532
} | class ____(metaclass=abc.ABCMeta):
@property
@abc.abstractmethod
def name(self) -> str:
"""
A string naming this padding (e.g. "PSS", "PKCS1").
"""
| AsymmetricPadding |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-github/source_github/github_schema.py | {
"start": 538364,
"end": 538788
} | class ____(sgqlc.types.Type):
"""Autogenerated return type of CreateIssue"""
__schema__ = github_schema
__field_names__ = ("client_mutation_id", "issue")
client_mutation_id = sgqlc.types.Field(String, graphql_name="clientMutationId")
"""A unique identifier for the client performing the mutation."""
issue = sgqlc.types.Field("Issue", graphql_name="issue")
"""The new issue."""
| CreateIssuePayload |
python | falconry__falcon | tests/test_after_hooks.py | {
"start": 2590,
"end": 3011
} | class ____:
@falcon.after(serialize_body)
@falcon.after(validate_output)
def on_get(self, req, resp):
self.req = req
self.resp = resp
@falcon.after(serialize_body)
def on_put(self, req, resp):
self.req = req
self.resp = resp
resp.text = {'animal': 'falcon'}
@falcon.after(Smartness())
def on_post(self, req, resp):
pass
| WrappedRespondersResource |
python | numba__numba | numba/core/callconv.py | {
"start": 36072,
"end": 36473
} | class ____(object):
def __init__(self, call_conv):
self.call_conv = call_conv
def fp_zero_division(self, builder, exc_args=None, loc=None):
if self.raise_on_fp_zero_division:
self.call_conv.return_user_exc(builder, ZeroDivisionError, exc_args,
loc)
return True
else:
return False
| ErrorModel |
python | encode__django-rest-framework | rest_framework/utils/serializer_helpers.py | {
"start": 156,
"end": 1329
} | class ____(dict):
"""
Return object from `serializer.data` for the `Serializer` class.
Includes a backlink to the serializer instance for renderers
to use if they need richer field information.
"""
def __init__(self, *args, **kwargs):
self.serializer = kwargs.pop('serializer')
super().__init__(*args, **kwargs)
def copy(self):
return ReturnDict(self, serializer=self.serializer)
def __repr__(self):
return dict.__repr__(self)
def __reduce__(self):
# Pickling these objects will drop the .serializer backlink,
# but preserve the raw data.
return (dict, (dict(self),))
# These are basically copied from OrderedDict, with `serializer` added.
def __or__(self, other):
if not isinstance(other, dict):
return NotImplemented
new = self.__class__(self, serializer=self.serializer)
new.update(other)
return new
def __ror__(self, other):
if not isinstance(other, dict):
return NotImplemented
new = self.__class__(other, serializer=self.serializer)
new.update(self)
return new
| ReturnDict |
python | Netflix__metaflow | metaflow/_vendor/importlib_metadata/__init__.py | {
"start": 9704,
"end": 10900
} | class ____:
"""
Compatibility add-in for mapping to indicate that
mapping behavior is deprecated.
>>> recwarn = getfixture('recwarn')
>>> class DeprecatedDict(Deprecated, dict): pass
>>> dd = DeprecatedDict(foo='bar')
>>> dd.get('baz', None)
>>> dd['foo']
'bar'
>>> list(dd)
['foo']
>>> list(dd.keys())
['foo']
>>> 'foo' in dd
True
>>> list(dd.values())
['bar']
>>> len(recwarn)
1
"""
_warn = functools.partial(
warnings.warn,
"SelectableGroups dict interface is deprecated. Use select.",
DeprecationWarning,
stacklevel=pypy_partial(2),
)
def __getitem__(self, name):
self._warn()
return super().__getitem__(name)
def get(self, name, default=None):
self._warn()
return super().get(name, default)
def __iter__(self):
self._warn()
return super().__iter__()
def __contains__(self, *args):
self._warn()
return super().__contains__(*args)
def keys(self):
self._warn()
return super().keys()
def values(self):
self._warn()
return super().values()
| Deprecated |
python | scrapy__scrapy | tests/test_downloader_handlers_http_base.py | {
"start": 23481,
"end": 24656
} | class ____(TestHttp11Base):
is_secure = True
tls_log_message = (
'SSL connection certificate: issuer "/C=IE/O=Scrapy/CN=localhost", '
'subject "/C=IE/O=Scrapy/CN=localhost"'
)
@deferred_f_from_coro_f
async def test_tls_logging(self, mockserver: MockServer) -> None:
crawler = get_crawler(
settings_dict={"DOWNLOADER_CLIENT_TLS_VERBOSE_LOGGING": True}
)
download_handler = build_from_crawler(self.download_handler_cls, crawler)
try:
with LogCapture() as log_capture:
request = Request(mockserver.url("/text", is_secure=self.is_secure))
response = await maybe_deferred_to_future(
download_handler.download_request(request, DefaultSpider())
)
assert response.body == b"Works"
log_capture.check_present(
("scrapy.core.downloader.tls", "DEBUG", self.tls_log_message)
)
finally:
d = download_handler.close() # type: ignore[attr-defined]
if d is not None:
await maybe_deferred_to_future(d)
| TestHttps11Base |
python | sphinx-doc__sphinx | sphinx/addnodes.py | {
"start": 13716,
"end": 13848
} | class ____(nodes.Element):
"""Node for "only" directives (conditional inclusion based on tags)."""
# meta-information nodes
| only |
python | keras-team__keras | keras/src/layers/preprocessing/image_preprocessing/aug_mix.py | {
"start": 846,
"end": 11221
} | class ____(BaseImagePreprocessingLayer):
"""Performs the AugMix data augmentation technique.
AugMix aims to produce images with variety while preserving the image
semantics and local statistics. During the augmentation process,
the same augmentation is applied across all images in the batch
in num_chains different ways, with each chain consisting of
chain_depth augmentations.
**Note:** This layer is safe to use inside a `tf.data` or `grain` pipeline
(independently of which backend you're using).
References:
- [AugMix paper](https://arxiv.org/pdf/1912.02781)
- [Official Code](https://github.com/google-research/augmix)
Args:
value_range: the range of values the incoming images will have.
Represented as a two number tuple written (low, high).
This is typically either `(0, 1)` or `(0, 255)` depending
on how your preprocessing pipeline is set up.
num_chains: an integer representing the number of different chains to
be mixed, defaults to 3.
chain_depth: an integer representing the maximum number of
transformations to be applied in each chain. The actual number
of transformations in each chain will be sampled randomly
from the range `[0, `chain_depth`]`. Defaults to 3.
factor: The strength of the augmentation as a normalized value
between 0 and 1. Default is 0.3.
alpha: a float value used as the probability coefficients for the
Beta and Dirichlet distributions, defaults to 1.0.
all_ops: Use all operations (including random_brightness,
random_color_degeneration, random_contrast and random_sharpness).
Default is True.
interpolation: The interpolation method to use for resizing operations.
Options include `"nearest"`, `"bilinear"`. Default is `"bilinear"`.
seed: Integer. Used to create a random seed.
"""
_USE_BASE_FACTOR = False
_FACTOR_BOUNDS = (0, 1)
def __init__(
self,
value_range=(0, 255),
num_chains=3,
chain_depth=3,
factor=0.3,
alpha=1.0,
all_ops=True,
interpolation="bilinear",
seed=None,
data_format=None,
**kwargs,
):
super().__init__(data_format=data_format, **kwargs)
self.value_range = value_range
self.num_chains = num_chains
self.chain_depth = chain_depth
self._set_factor(factor)
self.alpha = alpha
self.all_ops = all_ops
self.interpolation = interpolation
self.seed = seed
self.generator = SeedGenerator(seed)
if self.all_ops:
self._augment_layers = AUGMENT_LAYERS_ALL
else:
self._augment_layers = AUGMENT_LAYERS
self.random_shear = layers.RandomShear(
x_factor=self.factor,
y_factor=self.factor,
interpolation=interpolation,
seed=self.seed,
data_format=data_format,
**kwargs,
)
self.random_translation = layers.RandomTranslation(
height_factor=self.factor,
width_factor=self.factor,
interpolation=interpolation,
seed=self.seed,
data_format=data_format,
**kwargs,
)
self.random_rotation = layers.RandomRotation(
factor=self.factor,
interpolation=interpolation,
seed=self.seed,
data_format=data_format,
**kwargs,
)
self.solarization = layers.Solarization(
addition_factor=self.factor,
threshold_factor=self.factor,
value_range=self.value_range,
seed=self.seed,
data_format=data_format,
**kwargs,
)
self.random_posterization = layers.RandomPosterization(
factor=max(1, int(8 * self.factor[1])),
value_range=self.value_range,
seed=self.seed,
data_format=data_format,
**kwargs,
)
self.auto_contrast = layers.AutoContrast(
value_range=self.value_range, data_format=data_format, **kwargs
)
self.equalization = layers.Equalization(
value_range=self.value_range, data_format=data_format, **kwargs
)
if self.all_ops:
self.random_brightness = layers.RandomBrightness(
factor=self.factor,
value_range=self.value_range,
seed=self.seed,
data_format=data_format,
**kwargs,
)
self.random_color_degeneration = layers.RandomColorDegeneration(
factor=self.factor,
value_range=self.value_range,
seed=self.seed,
data_format=data_format,
**kwargs,
)
self.random_contrast = layers.RandomContrast(
factor=self.factor,
value_range=self.value_range,
seed=self.seed,
data_format=data_format,
**kwargs,
)
self.random_sharpness = layers.RandomSharpness(
factor=self.factor,
value_range=self.value_range,
seed=self.seed,
data_format=data_format,
**kwargs,
)
def build(self, input_shape):
for layer_name in self._augment_layers:
augmentation_layer = getattr(self, layer_name)
augmentation_layer.build(input_shape)
def _sample_from_dirichlet(self, shape, alpha, seed):
gamma_sample = self.backend.random.gamma(
shape=shape,
alpha=alpha,
seed=seed,
)
return gamma_sample / self.backend.numpy.sum(
gamma_sample, axis=-1, keepdims=True
)
def get_random_transformation(self, data, training=True, seed=None):
if not training:
return None
if backend_utils.in_tf_graph():
self.backend.set_backend("tensorflow")
for layer_name in self._augment_layers:
augmentation_layer = getattr(self, layer_name)
augmentation_layer.backend.set_backend("tensorflow")
seed = seed or self._get_seed_generator(self.backend._backend)
chain_mixing_weights = self._sample_from_dirichlet(
[self.num_chains], self.alpha, seed
)
weight_sample = self.backend.random.beta(
shape=(),
alpha=self.alpha,
beta=self.alpha,
seed=seed,
)
chain_transforms = []
for _ in range(self.num_chains):
depth_transforms = []
for _ in range(self.chain_depth):
layer_name = py_random.choice(self._augment_layers + [None])
if layer_name is None:
continue
augmentation_layer = getattr(self, layer_name)
depth_transforms.append(
{
"layer_name": layer_name,
"transformation": (
augmentation_layer.get_random_transformation(
data,
seed=self._get_seed_generator(
self.backend._backend
),
)
),
}
)
chain_transforms.append(depth_transforms)
transformation = {
"chain_mixing_weights": chain_mixing_weights,
"weight_sample": weight_sample,
"chain_transforms": chain_transforms,
}
return transformation
def transform_images(self, images, transformation, training=True):
if training:
images = self.backend.cast(images, self.compute_dtype)
chain_mixing_weights = self.backend.cast(
transformation["chain_mixing_weights"], dtype=self.compute_dtype
)
weight_sample = self.backend.cast(
transformation["weight_sample"], dtype=self.compute_dtype
)
chain_transforms = transformation["chain_transforms"]
aug_images = self.backend.numpy.zeros_like(images)
for idx, chain_transform in enumerate(chain_transforms):
copied_images = self.backend.numpy.copy(images)
for depth_transform in chain_transform:
layer_name = depth_transform["layer_name"]
layer_transform = depth_transform["transformation"]
augmentation_layer = getattr(self, layer_name)
copied_images = augmentation_layer.transform_images(
copied_images, layer_transform
)
aug_images += copied_images * chain_mixing_weights[idx]
images = weight_sample * images + (1 - weight_sample) * aug_images
images = self.backend.numpy.clip(
images, self.value_range[0], self.value_range[1]
)
images = self.backend.cast(images, self.compute_dtype)
return images
def transform_labels(self, labels, transformation, training=True):
return labels
def transform_bounding_boxes(
self,
bounding_boxes,
transformation,
training=True,
):
return bounding_boxes
def transform_segmentation_masks(
self, segmentation_masks, transformation, training=True
):
return self.transform_images(
segmentation_masks, transformation, training=training
)
def compute_output_shape(self, input_shape):
return input_shape
def get_config(self):
config = {
"value_range": self.value_range,
"num_chains": self.chain_depth,
"chain_depth": self.num_chains,
"factor": self.factor,
"alpha": self.alpha,
"all_ops": self.all_ops,
"interpolation": self.interpolation,
"seed": self.seed,
}
base_config = super().get_config()
return {**base_config, **config}
| AugMix |
python | tiangolo__fastapi | docs_src/security/tutorial005_an.py | {
"start": 1541,
"end": 5498
} | class ____(User):
hashed_password: str
password_hash = PasswordHash.recommended()
oauth2_scheme = OAuth2PasswordBearer(
tokenUrl="token",
scopes={"me": "Read information about the current user.", "items": "Read items."},
)
app = FastAPI()
def verify_password(plain_password, hashed_password):
return password_hash.verify(plain_password, hashed_password)
def get_password_hash(password):
return password_hash.hash(password)
def get_user(db, username: str):
if username in db:
user_dict = db[username]
return UserInDB(**user_dict)
def authenticate_user(fake_db, username: str, password: str):
user = get_user(fake_db, username)
if not user:
return False
if not verify_password(password, user.hashed_password):
return False
return user
def create_access_token(data: dict, expires_delta: Union[timedelta, None] = None):
to_encode = data.copy()
if expires_delta:
expire = datetime.now(timezone.utc) + expires_delta
else:
expire = datetime.now(timezone.utc) + timedelta(minutes=15)
to_encode.update({"exp": expire})
encoded_jwt = jwt.encode(to_encode, SECRET_KEY, algorithm=ALGORITHM)
return encoded_jwt
async def get_current_user(
security_scopes: SecurityScopes, token: Annotated[str, Depends(oauth2_scheme)]
):
if security_scopes.scopes:
authenticate_value = f'Bearer scope="{security_scopes.scope_str}"'
else:
authenticate_value = "Bearer"
credentials_exception = HTTPException(
status_code=status.HTTP_401_UNAUTHORIZED,
detail="Could not validate credentials",
headers={"WWW-Authenticate": authenticate_value},
)
try:
payload = jwt.decode(token, SECRET_KEY, algorithms=[ALGORITHM])
username = payload.get("sub")
if username is None:
raise credentials_exception
scope: str = payload.get("scope", "")
token_scopes = scope.split(" ")
token_data = TokenData(scopes=token_scopes, username=username)
except (InvalidTokenError, ValidationError):
raise credentials_exception
user = get_user(fake_users_db, username=token_data.username)
if user is None:
raise credentials_exception
for scope in security_scopes.scopes:
if scope not in token_data.scopes:
raise HTTPException(
status_code=status.HTTP_401_UNAUTHORIZED,
detail="Not enough permissions",
headers={"WWW-Authenticate": authenticate_value},
)
return user
async def get_current_active_user(
current_user: Annotated[User, Security(get_current_user, scopes=["me"])],
):
if current_user.disabled:
raise HTTPException(status_code=400, detail="Inactive user")
return current_user
@app.post("/token")
async def login_for_access_token(
form_data: Annotated[OAuth2PasswordRequestForm, Depends()],
) -> Token:
user = authenticate_user(fake_users_db, form_data.username, form_data.password)
if not user:
raise HTTPException(status_code=400, detail="Incorrect username or password")
access_token_expires = timedelta(minutes=ACCESS_TOKEN_EXPIRE_MINUTES)
access_token = create_access_token(
data={"sub": user.username, "scope": " ".join(form_data.scopes)},
expires_delta=access_token_expires,
)
return Token(access_token=access_token, token_type="bearer")
@app.get("/users/me/", response_model=User)
async def read_users_me(
current_user: Annotated[User, Depends(get_current_active_user)],
):
return current_user
@app.get("/users/me/items/")
async def read_own_items(
current_user: Annotated[User, Security(get_current_active_user, scopes=["items"])],
):
return [{"item_id": "Foo", "owner": current_user.username}]
@app.get("/status/")
async def read_system_status(current_user: Annotated[User, Depends(get_current_user)]):
return {"status": "ok"}
| UserInDB |
python | PyCQA__pyflakes | pyflakes/test/test_imports.py | {
"start": 28250,
"end": 33939
} | class ____(TestCase):
"""
Tests for suppression of unused import warnings by C{__all__}.
"""
def test_ignoredInFunction(self):
"""
An C{__all__} definition does not suppress unused import warnings in a
function scope.
"""
self.flakes('''
def foo():
import bar
__all__ = ["bar"]
''', m.UnusedImport, m.UnusedVariable)
def test_ignoredInClass(self):
"""
An C{__all__} definition in a class does not suppress unused import warnings.
"""
self.flakes('''
import bar
class foo:
__all__ = ["bar"]
''', m.UnusedImport)
def test_ignored_when_not_directly_assigned(self):
self.flakes('''
import bar
(__all__,) = ("foo",)
''', m.UnusedImport)
def test_warningSuppressed(self):
"""
If a name is imported and unused but is named in C{__all__}, no warning
is reported.
"""
self.flakes('''
import foo
__all__ = ["foo"]
''')
self.flakes('''
import foo
__all__ = ("foo",)
''')
def test_augmentedAssignment(self):
"""
The C{__all__} variable is defined incrementally.
"""
self.flakes('''
import a
import c
__all__ = ['a']
__all__ += ['b']
if 1 < 3:
__all__ += ['c', 'd']
''', m.UndefinedExport, m.UndefinedExport)
def test_list_concatenation_assignment(self):
"""
The C{__all__} variable is defined through list concatenation.
"""
self.flakes('''
import sys
__all__ = ['a'] + ['b'] + ['c']
''', m.UndefinedExport, m.UndefinedExport, m.UndefinedExport, m.UnusedImport)
def test_tuple_concatenation_assignment(self):
"""
The C{__all__} variable is defined through tuple concatenation.
"""
self.flakes('''
import sys
__all__ = ('a',) + ('b',) + ('c',)
''', m.UndefinedExport, m.UndefinedExport, m.UndefinedExport, m.UnusedImport)
def test_all_with_attributes(self):
self.flakes('''
from foo import bar
__all__ = [bar.__name__]
''')
def test_all_with_names(self):
# not actually valid, but shouldn't produce a crash
self.flakes('''
from foo import bar
__all__ = [bar]
''')
def test_all_with_attributes_added(self):
self.flakes('''
from foo import bar
from bar import baz
__all__ = [bar.__name__] + [baz.__name__]
''')
def test_all_mixed_attributes_and_strings(self):
self.flakes('''
from foo import bar
from foo import baz
__all__ = ['bar', baz.__name__]
''')
def test_unboundExported(self):
"""
If C{__all__} includes a name which is not bound, a warning is emitted.
"""
self.flakes('''
__all__ = ["foo"]
''', m.UndefinedExport)
# Skip this in __init__.py though, since the rules there are a little
# different.
for filename in ["foo/__init__.py", "__init__.py"]:
self.flakes('''
__all__ = ["foo"]
''', filename=filename)
def test_importStarExported(self):
"""
Report undefined if import * is used
"""
self.flakes('''
from math import *
__all__ = ['sin', 'cos']
csc(1)
''', m.ImportStarUsed, m.ImportStarUsage, m.ImportStarUsage, m.ImportStarUsage)
def test_importStarNotExported(self):
"""Report unused import when not needed to satisfy __all__."""
self.flakes('''
from foolib import *
a = 1
__all__ = ['a']
''', m.ImportStarUsed, m.UnusedImport)
def test_usedInGenExp(self):
"""
Using a global in a generator expression results in no warnings.
"""
self.flakes('import fu; (fu for _ in range(1))')
self.flakes('import fu; (1 for _ in range(1) if fu)')
def test_redefinedByGenExp(self):
"""
Re-using a global name as the loop variable for a generator
expression results in a redefinition warning.
"""
self.flakes('import fu; (1 for fu in range(1))',
m.RedefinedWhileUnused, m.UnusedImport)
def test_usedAsDecorator(self):
"""
Using a global name in a decorator statement results in no warnings,
but using an undefined name in a decorator statement results in an
undefined name warning.
"""
self.flakes('''
from interior import decorate
@decorate
def f():
return "hello"
''')
self.flakes('''
from interior import decorate
@decorate('value')
def f():
return "hello"
''')
self.flakes('''
@decorate
def f():
return "hello"
''', m.UndefinedName)
def test_usedAsClassDecorator(self):
"""
Using an imported name as a class decorator results in no warnings,
but using an undefined name as a class decorator results in an
undefined name warning.
"""
self.flakes('''
from interior import decorate
@decorate
class foo:
pass
''')
self.flakes('''
from interior import decorate
@decorate("foo")
class bar:
pass
''')
self.flakes('''
@decorate
class foo:
pass
''', m.UndefinedName)
| TestSpecialAll |
python | dagster-io__dagster | examples/docs_snippets/docs_snippets/concepts/ops_jobs_graphs/jobs_with_config_mapping.py | {
"start": 291,
"end": 807
} | class ____(Config):
simplified_param: str
@config_mapping
def simplified_config(val: SimplifiedConfig) -> RunConfig:
return RunConfig(
ops={"do_something": DoSomethingConfig(config_param=val.simplified_param)}
)
@job(config=simplified_config)
def do_it_all_with_simplified_config():
do_something()
if __name__ == "__main__":
# Will log "config_param: stuff"
do_it_all_with_simplified_config.execute_in_process(
run_config={"simplified_param": "stuff"}
)
| SimplifiedConfig |
python | getsentry__sentry | src/sentry/core/endpoints/project_details.py | {
"start": 18469,
"end": 18822
} | class ____(ProjectPermission):
scope_map = {
"GET": ["project:read", "project:write", "project:admin"],
"POST": ["project:write", "project:admin"],
# PUT checks for permissions based on fields
"PUT": ["project:read", "project:write", "project:admin"],
"DELETE": ["project:admin"],
}
| RelaxedProjectPermission |
python | huggingface__transformers | tests/models/marian/test_modeling_marian.py | {
"start": 15190,
"end": 17742
} | class ____(unittest.TestCase):
src = "en"
tgt = "de"
src_text = [
"I am a small frog.",
"Now I can forget the 100 words of german that I know.",
"Tom asked his teacher for advice.",
"That's how I would do it.",
"Tom really admired Mary's courage.",
"Turn around and close your eyes.",
]
expected_text = [
"Ich bin ein kleiner Frosch.",
"Jetzt kann ich die 100 Wörter des Deutschen vergessen, die ich kenne.",
"Tom bat seinen Lehrer um Rat.",
"So würde ich das machen.",
"Tom bewunderte Marias Mut wirklich.",
"Drehen Sie sich um und schließen Sie die Augen.",
]
# ^^ actual C++ output differs slightly: (1) des Deutschen removed, (2) ""-> "O", (3) tun -> machen
@classmethod
def setUpClass(cls) -> None:
cls.model_name = f"Helsinki-NLP/opus-mt-{cls.src}-{cls.tgt}"
return cls
@cached_property
def tokenizer(self):
return AutoTokenizer.from_pretrained(self.model_name)
@property
def eos_token_id(self) -> int:
return self.tokenizer.eos_token_id
@cached_property
def model(self):
model: MarianMTModel = AutoModelWithLMHead.from_pretrained(self.model_name).to(torch_device)
c = model.config
self.assertListEqual(c.bad_words_ids, [[c.pad_token_id]])
self.assertEqual(c.max_length, 512)
self.assertEqual(c.decoder_start_token_id, c.pad_token_id)
if torch_device == "cuda":
return model.half()
else:
return model
def _assert_generated_batch_equal_expected(self, **tokenizer_kwargs):
generated_words = self.translate_src_text(**tokenizer_kwargs)
self.assertListEqual(self.expected_text, generated_words)
def translate_src_text(self, **tokenizer_kwargs):
model_inputs = self.tokenizer(self.src_text, padding=True, return_tensors="pt", **tokenizer_kwargs).to(
torch_device
)
self.assertEqual(self.model.device, model_inputs.input_ids.device)
generated_ids = self.model.generate(
model_inputs.input_ids,
attention_mask=model_inputs.attention_mask,
num_beams=2,
max_length=128,
renormalize_logits=True, # Marian should always renormalize its logits. See #25459
)
generated_words = self.tokenizer.batch_decode(generated_ids, skip_special_tokens=True)
return generated_words
@require_sentencepiece
@require_tokenizers
| MarianIntegrationTest |
python | mlflow__mlflow | mlflow/types/schema.py | {
"start": 25286,
"end": 26737
} | class ____(BaseType):
def __init__(self):
"""
AnyType can store any json-serializable data including None values.
For example:
.. code-block::python
from mlflow.types.schema import AnyType, Schema, ColSpec
schema = Schema([ColSpec(type=AnyType(), name="id")])
.. Note::
AnyType should be used when the field is None, the type is not known
at the time of data creation, or the field can have multiple types.
e.g. for GenAI flavors, the model output could contain `None` values,
and `AnyType` can be used to represent them.
AnyType has no data validation at all, please be aware of this when
using it.
"""
def __repr__(self) -> str:
return "Any"
def __eq__(self, other) -> bool:
return isinstance(other, AnyType)
def to_dict(self):
return {"type": ANY_TYPE}
def _merge(self, other: BaseType) -> BaseType:
if self == other:
return deepcopy(self)
if isinstance(other, DataType):
return other
if not isinstance(other, BaseType):
raise MlflowException(
f"Can't merge AnyType with {type(other).__name__}, "
"it must be a BaseType or DataType"
)
# Merging AnyType with another type makes the other type optional
return other._merge(self)
| AnyType |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.