language
stringclasses 1
value | repo
stringclasses 346
values | path
stringlengths 6
201
| class_span
dict | source
stringlengths 21
2.38M
| target
stringlengths 1
96
|
|---|---|---|---|---|---|
python
|
wandb__wandb
|
wandb/vendor/pygments/lexers/go.py
|
{
"start": 427,
"end": 3701
}
|
class ____(RegexLexer):
"""
For `Go <http://golang.org>`_ source.
.. versionadded:: 1.2
"""
name = 'Go'
filenames = ['*.go']
aliases = ['go']
mimetypes = ['text/x-gosrc']
flags = re.MULTILINE | re.UNICODE
tokens = {
'root': [
(r'\n', Text),
(r'\s+', Text),
(r'\\\n', Text), # line continuations
(r'//(.*?)\n', Comment.Single),
(r'/(\\\n)?[*](.|\n)*?[*](\\\n)?/', Comment.Multiline),
(r'(import|package)\b', Keyword.Namespace),
(r'(var|func|struct|map|chan|type|interface|const)\b',
Keyword.Declaration),
(words((
'break', 'default', 'select', 'case', 'defer', 'go',
'else', 'goto', 'switch', 'fallthrough', 'if', 'range',
'continue', 'for', 'return'), suffix=r'\b'),
Keyword),
(r'(true|false|iota|nil)\b', Keyword.Constant),
# It seems the builtin types aren't actually keywords, but
# can be used as functions. So we need two declarations.
(words((
'uint', 'uint8', 'uint16', 'uint32', 'uint64',
'int', 'int8', 'int16', 'int32', 'int64',
'float', 'float32', 'float64',
'complex64', 'complex128', 'byte', 'rune',
'string', 'bool', 'error', 'uintptr',
'print', 'println', 'panic', 'recover', 'close', 'complex',
'real', 'imag', 'len', 'cap', 'append', 'copy', 'delete',
'new', 'make'), suffix=r'\b(\()'),
bygroups(Name.Builtin, Punctuation)),
(words((
'uint', 'uint8', 'uint16', 'uint32', 'uint64',
'int', 'int8', 'int16', 'int32', 'int64',
'float', 'float32', 'float64',
'complex64', 'complex128', 'byte', 'rune',
'string', 'bool', 'error', 'uintptr'), suffix=r'\b'),
Keyword.Type),
# imaginary_lit
(r'\d+i', Number),
(r'\d+\.\d*([Ee][-+]\d+)?i', Number),
(r'\.\d+([Ee][-+]\d+)?i', Number),
(r'\d+[Ee][-+]\d+i', Number),
# float_lit
(r'\d+(\.\d+[eE][+\-]?\d+|'
r'\.\d*|[eE][+\-]?\d+)', Number.Float),
(r'\.\d+([eE][+\-]?\d+)?', Number.Float),
# int_lit
# -- octal_lit
(r'0[0-7]+', Number.Oct),
# -- hex_lit
(r'0[xX][0-9a-fA-F]+', Number.Hex),
# -- decimal_lit
(r'(0|[1-9][0-9]*)', Number.Integer),
# char_lit
(r"""'(\\['"\\abfnrtv]|\\x[0-9a-fA-F]{2}|\\[0-7]{1,3}"""
r"""|\\u[0-9a-fA-F]{4}|\\U[0-9a-fA-F]{8}|[^\\])'""",
String.Char),
# StringLiteral
# -- raw_string_lit
(r'`[^`]*`', String),
# -- interpreted_string_lit
(r'"(\\\\|\\"|[^"])*"', String),
# Tokens
(r'(<<=|>>=|<<|>>|<=|>=|&\^=|&\^|\+=|-=|\*=|/=|%=|&=|\|=|&&|\|\|'
r'|<-|\+\+|--|==|!=|:=|\.\.\.|[+\-*/%&])', Operator),
(r'[|^<>=!()\[\]{}.,;:]', Punctuation),
# identifier
(r'[^\W\d]\w*', Name.Other),
]
}
|
GoLexer
|
python
|
ray-project__ray
|
python/ray/_private/thirdparty/pynvml/pynvml.py
|
{
"start": 101308,
"end": 102760
}
|
class ____(Structure):
_fields_ = [("flags", c_uint),
("utilization", c_nvmlGpuDynamicPstatesUtilization_t * NVML_MAX_GPU_UTILIZATIONS)]
NVML_MAX_THERMAL_SENSORS_PER_GPU = 3
NVML_THERMAL_TARGET_NONE = 0
NVML_THERMAL_TARGET_GPU = 1
NVML_THERMAL_TARGET_MEMORY = 2
NVML_THERMAL_TARGET_POWER_SUPPLY = 4
NVML_THERMAL_TARGET_BOARD = 8
NVML_THERMAL_TARGET_VCD_BOARD = 9
NVML_THERMAL_TARGET_VCD_INLET = 10
NVML_THERMAL_TARGET_VCD_OUTLET = 11
NVML_THERMAL_TARGET_ALL = 15
NVML_THERMAL_TARGET_UNKNOWN = -1
NVML_THERMAL_CONTROLLER_NONE = 0
NVML_THERMAL_CONTROLLER_GPU_INTERNAL = 1
NVML_THERMAL_CONTROLLER_ADM1032 = 2
NVML_THERMAL_CONTROLLER_ADT7461 = 3
NVML_THERMAL_CONTROLLER_MAX6649 = 4
NVML_THERMAL_CONTROLLER_MAX1617 = 5
NVML_THERMAL_CONTROLLER_LM99 = 6
NVML_THERMAL_CONTROLLER_LM89 = 7
NVML_THERMAL_CONTROLLER_LM64 = 8
NVML_THERMAL_CONTROLLER_G781 = 9
NVML_THERMAL_CONTROLLER_ADT7473 = 10
NVML_THERMAL_CONTROLLER_SBMAX6649 = 11
NVML_THERMAL_CONTROLLER_VBIOSEVT = 12
NVML_THERMAL_CONTROLLER_OS = 13
NVML_THERMAL_CONTROLLER_NVSYSCON_CANOAS = 14
NVML_THERMAL_CONTROLLER_NVSYSCON_E551 = 15
NVML_THERMAL_CONTROLLER_MAX6649R = 16
NVML_THERMAL_CONTROLLER_ADT7473S = 17
NVML_THERMAL_CONTROLLER_UNKNOWN = -1
|
c_nvmlGpuDynamicPstatesInfo_t
|
python
|
kamyu104__LeetCode-Solutions
|
Python/make-array-strictly-increasing.py
|
{
"start": 73,
"end": 851
}
|
class ____(object):
def makeArrayIncreasing(self, arr1, arr2):
"""
:type arr1: List[int]
:type arr2: List[int]
:rtype: int
"""
arr2 = sorted(set(arr2))
dp = {0: -1} # dp[min_cost] = end_with_val
for val1 in arr1:
next_dp = collections.defaultdict(lambda: float("inf"))
for cost, val in dp.iteritems():
if val < val1:
next_dp[cost] = min(next_dp[cost], val1)
k = bisect.bisect_right(arr2, val)
if k == len(arr2):
continue
next_dp[cost+1] = min(next_dp[cost+1], arr2[k])
dp = next_dp
if not dp:
return -1
return min(dp.iterkeys())
|
Solution
|
python
|
pytorch__pytorch
|
torch/ao/quantization/quantizer/quantizer.py
|
{
"start": 847,
"end": 2188
}
|
class ____(QuantizationSpecBase):
"""Quantization spec for common operators that allows user to specify how to
quantize a Tensor, this includes dtype, quant_min, quant_max etc.
"""
dtype: torch.dtype
# observer or fake_quantize constructor such as
# MinMaxObserver, PerChannelHistogramObserver etc.
# or we can attach some custom args to them
# e.g. MinMaxObserver.with_args(eps=eps)
observer_or_fake_quant_ctr: _ObserverOrFakeQuantizeConstructor
quant_min: int | None = None
quant_max: int | None = None
qscheme: torch.qscheme | None = None
ch_axis: int | None = None
is_dynamic: bool = False
def __post_init__(self):
# TODO: add init for quant_min/quant_max
# quant_min must be less than quant_max
if (
self.quant_min is not None
and self.quant_max is not None
and self.quant_min > self.quant_max
):
raise ValueError(
f"quant_min {self.quant_min} must be <= quant_max {self.quant_max}."
)
# ch_axis must be less than the number of channels
# but no way to check here. Just check that it is not < 0.
if self.ch_axis is not None and self.ch_axis < 0:
raise ValueError("Ch_axis is < 0.")
@dataclass(eq=True, frozen=True)
|
QuantizationSpec
|
python
|
scipy__scipy
|
scipy/optimize/_differentiable_functions.py
|
{
"start": 447,
"end": 1495
}
|
class ____:
"""
Wrapper class for gradient calculation
"""
def __init__(
self,
grad,
fun=None,
args=None,
finite_diff_options=None,
):
self.fun = fun
self.grad = grad
self.args = [] if args is None else args
self.finite_diff_options = finite_diff_options
self.ngev = 0
# number of function evaluations consumed by finite difference
self.nfev = 0
def __call__(self, x, f0=None, **kwds):
# Send a copy because the user may overwrite it.
# The user of this class might want `x` to remain unchanged.
if callable(self.grad):
g = np.atleast_1d(self.grad(np.copy(x), *self.args))
elif self.grad in FD_METHODS:
g, dct = approx_derivative(
self.fun,
x,
f0=f0,
**self.finite_diff_options,
)
self.nfev += dct['nfev']
self.ngev += 1
return g
|
_ScalarGradWrapper
|
python
|
mahmoud__glom
|
glom/core.py
|
{
"start": 39035,
"end": 46498
}
|
class ____:
"""Specifier type designed for easy invocation of callables from glom.
Args:
func (callable): A function or other callable object.
``Invoke`` is similar to :func:`functools.partial`, but with the
ability to set up a "templated" call which interleaves constants and
glom specs.
For example, the following creates a spec which can be used to
check if targets are integers:
>>> is_int = Invoke(isinstance).specs(T).constants(int)
>>> glom(5, is_int)
True
And this composes like any other glom spec:
>>> target = [7, object(), 9]
>>> glom(target, [is_int])
[True, False, True]
Another example, mixing positional and keyword arguments:
>>> spec = Invoke(sorted).specs(T).constants(key=int, reverse=True)
>>> target = ['10', '5', '20', '1']
>>> glom(target, spec)
['20', '10', '5', '1']
Invoke also helps with evaluating zero-argument functions:
>>> glom(target={}, spec=Invoke(int))
0
(A trivial example, but from timestamps to UUIDs, zero-arg calls do come up!)
.. note::
``Invoke`` is mostly for functions, object construction, and callable
objects. For calling methods, consider the :attr:`~glom.T` object.
"""
def __init__(self, func):
if not callable(func) and not _is_spec(func, strict=True):
raise TypeError('expected func to be a callable or Spec instance,'
' not: %r' % (func,))
self.func = func
self._args = ()
# a registry of every known kwarg to its freshest value as set
# by the methods below. the **kw dict is used as a unique marker.
self._cur_kwargs = {}
@classmethod
def specfunc(cls, spec):
"""Creates an :class:`Invoke` instance where the function is
indicated by a spec.
>>> spec = Invoke.specfunc('func').constants(5)
>>> glom({'func': range}, (spec, list))
[0, 1, 2, 3, 4]
"""
return cls(Spec(spec))
def constants(self, *a, **kw):
"""Returns a new :class:`Invoke` spec, with the provided positional
and keyword argument values stored for passing to the
underlying function.
>>> spec = Invoke(T).constants(5)
>>> glom(range, (spec, list))
[0, 1, 2, 3, 4]
Subsequent positional arguments are appended:
>>> spec = Invoke(T).constants(2).constants(10, 2)
>>> glom(range, (spec, list))
[2, 4, 6, 8]
Keyword arguments also work as one might expect:
>>> round_2 = Invoke(round).constants(ndigits=2).specs(T)
>>> glom(3.14159, round_2)
3.14
:meth:`~Invoke.constants()` and other :class:`Invoke`
methods may be called multiple times, just remember that every
call returns a new spec.
"""
ret = self.__class__(self.func)
ret._args = self._args + ('C', a, kw)
ret._cur_kwargs = dict(self._cur_kwargs)
ret._cur_kwargs.update({k: kw for k, _ in kw.items()})
return ret
def specs(self, *a, **kw):
"""Returns a new :class:`Invoke` spec, with the provided positional
and keyword arguments stored to be interpreted as specs, with
the results passed to the underlying function.
>>> spec = Invoke(range).specs('value')
>>> glom({'value': 5}, (spec, list))
[0, 1, 2, 3, 4]
Subsequent positional arguments are appended:
>>> spec = Invoke(range).specs('start').specs('end', 'step')
>>> target = {'start': 2, 'end': 10, 'step': 2}
>>> glom(target, (spec, list))
[2, 4, 6, 8]
Keyword arguments also work as one might expect:
>>> multiply = lambda x, y: x * y
>>> times_3 = Invoke(multiply).constants(y=3).specs(x='value')
>>> glom({'value': 5}, times_3)
15
:meth:`~Invoke.specs()` and other :class:`Invoke`
methods may be called multiple times, just remember that every
call returns a new spec.
"""
ret = self.__class__(self.func)
ret._args = self._args + ('S', a, kw)
ret._cur_kwargs = dict(self._cur_kwargs)
ret._cur_kwargs.update({k: kw for k, _ in kw.items()})
return ret
def star(self, args=None, kwargs=None):
"""Returns a new :class:`Invoke` spec, with *args* and/or *kwargs*
specs set to be "starred" or "star-starred" (respectively)
>>> spec = Invoke(zip).star(args='lists')
>>> target = {'lists': [[1, 2], [3, 4], [5, 6]]}
>>> list(glom(target, spec))
[(1, 3, 5), (2, 4, 6)]
Args:
args (spec): A spec to be evaluated and "starred" into the
underlying function.
kwargs (spec): A spec to be evaluated and "star-starred" into
the underlying function.
One or both of the above arguments should be set.
The :meth:`~Invoke.star()`, like other :class:`Invoke`
methods, may be called multiple times. The *args* and *kwargs*
will be stacked in the order in which they are provided.
"""
if args is None and kwargs is None:
raise TypeError('expected one or both of args/kwargs to be passed')
ret = self.__class__(self.func)
ret._args = self._args + ('*', args, kwargs)
ret._cur_kwargs = dict(self._cur_kwargs)
return ret
def __repr__(self):
base_fname = self.__class__.__name__
fname_map = {'C': 'constants', 'S': 'specs', '*': 'star'}
if type(self.func) is Spec:
base_fname += '.specfunc'
args = (self.func.spec,)
else:
args = (self.func,)
chunks = [format_invocation(base_fname, args, repr=bbrepr)]
for i in range(len(self._args) // 3):
op, args, _kwargs = self._args[i * 3: i * 3 + 3]
fname = fname_map[op]
if op in ('C', 'S'):
kwargs = [(k, v) for k, v in _kwargs.items()
if self._cur_kwargs[k] is _kwargs]
else:
kwargs = {}
if args:
kwargs['args'] = args
if _kwargs:
kwargs['kwargs'] = _kwargs
args = ()
chunks.append('.' + format_invocation(fname, args, kwargs, repr=bbrepr))
return ''.join(chunks)
def glomit(self, target, scope):
all_args = []
all_kwargs = {}
recurse = lambda spec: scope[glom](target, spec, scope)
func = recurse(self.func) if _is_spec(self.func, strict=True) else self.func
for i in range(len(self._args) // 3):
op, args, kwargs = self._args[i * 3: i * 3 + 3]
if op == 'C':
all_args.extend(args)
all_kwargs.update({k: v for k, v in kwargs.items()
if self._cur_kwargs[k] is kwargs})
elif op == 'S':
all_args.extend([recurse(arg) for arg in args])
all_kwargs.update({k: recurse(v) for k, v in kwargs.items()
if self._cur_kwargs[k] is kwargs})
elif op == '*':
if args is not None:
all_args.extend(recurse(args))
if kwargs is not None:
all_kwargs.update(recurse(kwargs))
return func(*all_args, **all_kwargs)
|
Invoke
|
python
|
run-llama__llama_index
|
llama-index-core/llama_index/core/download/utils.py
|
{
"start": 4400,
"end": 4806
}
|
class ____:
"""Context manager for changing the current working directory."""
def __init__(self, new_path: str):
self.new_path = os.path.expanduser(new_path)
def __enter__(self) -> None:
self.saved_path = os.getcwd()
os.chdir(self.new_path)
def __exit__(self, etype: object, value: object, traceback: object) -> None:
os.chdir(self.saved_path)
|
ChangeDirectory
|
python
|
networkx__networkx
|
networkx/classes/reportviews.py
|
{
"start": 12224,
"end": 15277
}
|
class ____:
"""A View class for degree of nodes in a NetworkX Graph
The functionality is like dict.items() with (node, degree) pairs.
Additional functionality includes read-only lookup of node degree,
and calling with optional features nbunch (for only a subset of nodes)
and weight (use edge weights to compute degree).
Parameters
==========
graph : NetworkX graph-like class
nbunch : node, container of nodes, or None meaning all nodes (default=None)
weight : bool or string (default=None)
Notes
-----
DegreeView can still lookup any node even if nbunch is specified.
Examples
--------
>>> G = nx.path_graph(3)
>>> DV = G.degree()
>>> assert DV[2] == 1
>>> assert sum(deg for n, deg in DV) == 4
>>> DVweight = G.degree(weight="span")
>>> G.add_edge(1, 2, span=34)
>>> DVweight[2]
34
>>> DVweight[0] # default edge weight is 1
1
>>> sum(span for n, span in DVweight) # sum weighted degrees
70
>>> DVnbunch = G.degree(nbunch=(1, 2))
>>> assert len(list(DVnbunch)) == 2 # iteration over nbunch only
"""
def __init__(self, G, nbunch=None, weight=None):
self._graph = G
self._succ = G._succ if hasattr(G, "_succ") else G._adj
self._pred = G._pred if hasattr(G, "_pred") else G._adj
self._nodes = self._succ if nbunch is None else list(G.nbunch_iter(nbunch))
self._weight = weight
def __call__(self, nbunch=None, weight=None):
if nbunch is None:
if weight == self._weight:
return self
return self.__class__(self._graph, None, weight)
try:
if nbunch in self._nodes:
if weight == self._weight:
return self[nbunch]
return self.__class__(self._graph, None, weight)[nbunch]
except TypeError:
pass
return self.__class__(self._graph, nbunch, weight)
def __getitem__(self, n):
weight = self._weight
succs = self._succ[n]
preds = self._pred[n]
if weight is None:
return len(succs) + len(preds)
return sum(dd.get(weight, 1) for dd in succs.values()) + sum(
dd.get(weight, 1) for dd in preds.values()
)
def __iter__(self):
weight = self._weight
if weight is None:
for n in self._nodes:
succs = self._succ[n]
preds = self._pred[n]
yield (n, len(succs) + len(preds))
else:
for n in self._nodes:
succs = self._succ[n]
preds = self._pred[n]
deg = sum(dd.get(weight, 1) for dd in succs.values()) + sum(
dd.get(weight, 1) for dd in preds.values()
)
yield (n, deg)
def __len__(self):
return len(self._nodes)
def __str__(self):
return str(list(self))
def __repr__(self):
return f"{self.__class__.__name__}({dict(self)})"
|
DiDegreeView
|
python
|
dask__dask
|
dask/dataframe/dask_expr/_expr.py
|
{
"start": 95119,
"end": 96887
}
|
class ____(Expr):
"""Mixin class for partition filtering
A ``PartitionsFiltered`` subclass must define a ``_partitions`` parameter. When
``_partitions`` is defined, the following expressions must produce the same output
for :cls:`PartitionsFiltered`:
- ``cls(expr: Expr, ..., _partitions)``
- ``Partitions(cls(expr: Expr, ...), _partitions)``
In order to leverage the default ``Expr._layer`` method, subclasses should define
``_filtered_task`` instead of ``_task``.
"""
@property
def _filtered(self) -> bool:
"""Whether output partitions have been filtered"""
return self.operand("_partitions") is not None
@property
def _partitions(self) -> list | tuple | range:
"""Selected partition indices"""
if self._filtered:
return self.operand("_partitions")
else:
return range(self.npartitions)
@functools.cached_property
def divisions(self):
# Common case: Use self._divisions()
full_divisions = super().divisions
if not self._filtered:
return full_divisions
# Specific case: Specific partitions were selected
new_divisions = []
for part in self._partitions:
new_divisions.append(full_divisions[part])
new_divisions.append(full_divisions[part + 1])
return tuple(new_divisions)
@property
def npartitions(self):
if self._filtered:
return len(self._partitions)
return super().npartitions
def _task(self, name: Key, index: int) -> Task:
return self._filtered_task(name, self._partitions[index])
def _filtered_task(self, name: Key, index: int) -> Task:
raise NotImplementedError()
|
PartitionsFiltered
|
python
|
crytic__slither
|
slither/detectors/attributes/const_functions_state.py
|
{
"start": 440,
"end": 3411
}
|
class ____(AbstractDetector):
"""
Constant function detector
"""
ARGUMENT = "constant-function-state" # run the detector with slither.py --ARGUMENT
HELP = "Constant functions changing the state" # help information
IMPACT = DetectorClassification.MEDIUM
CONFIDENCE = DetectorClassification.MEDIUM
WIKI = "https://github.com/crytic/slither/wiki/Detector-Documentation#constant-functions-changing-the-state"
WIKI_TITLE = "Constant functions changing the state"
# region wiki_description
WIKI_DESCRIPTION = """
Functions declared as `constant`/`pure`/`view` change the state.
`constant`/`pure`/`view` was not enforced prior to Solidity 0.5.
Starting from Solidity 0.5, a call to a `constant`/`pure`/`view` function uses the `STATICCALL` opcode, which reverts in case of state modification.
As a result, a call to an [incorrectly labeled function may trap a contract compiled with Solidity 0.5](https://solidity.readthedocs.io/en/develop/050-breaking-changes.html#interoperability-with-older-contracts)."""
# endregion wiki_description
# region wiki_exploit_scenario
WIKI_EXPLOIT_SCENARIO = """
```solidity
contract Constant{
uint counter;
function get() public view returns(uint){
counter = counter +1;
return counter
}
}
```
`Constant` was deployed with Solidity 0.4.25. Bob writes a smart contract that interacts with `Constant` in Solidity 0.5.0.
All the calls to `get` revert, breaking Bob's smart contract execution."""
# endregion wiki_exploit_scenario
WIKI_RECOMMENDATION = (
"Ensure that attributes of contracts compiled prior to Solidity 0.5.0 are correct."
)
VULNERABLE_SOLC_VERSIONS = ALL_SOLC_VERSIONS_04
def _detect(self) -> List[Output]:
"""Detect the constant function changing the state
Recursively visit the calls
Returns:
list: {'vuln', 'filename,'contract','func','#varsWritten'}
"""
results = []
for c in self.contracts:
for f in c.functions:
if f.contract_declarer != c:
continue
if f.view or f.pure:
variables_written = f.all_state_variables_written()
if variables_written:
attr = "view" if f.view else "pure"
info: DETECTOR_INFO = [
f,
f" is declared {attr} but changes state variables:\n",
]
for variable_written in variables_written:
info += ["\t- ", variable_written, "\n"]
res = self.generate_result(info, {"contains_assembly": False})
results.append(res)
return results
@staticmethod
def _format(slither: SlitherCompilationUnit, result: Dict) -> None:
custom_format(slither, result)
|
ConstantFunctionsState
|
python
|
coleifer__peewee
|
examples/analytics/app.py
|
{
"start": 1952,
"end": 4355
}
|
class ____(BaseModel):
account = ForeignKeyField(Account, backref='pageviews')
url = TextField()
timestamp = DateTimeField(default=datetime.datetime.now)
title = TextField(default='')
ip = CharField(default='')
referrer = TextField(default='')
headers = BinaryJSONField()
params = BinaryJSONField()
@classmethod
def create_from_request(cls, account, request):
parsed = urlparse(request.args['url'])
params = dict(parse_qsl(parsed.query))
return PageView.create(
account=account,
url=parsed.path,
title=request.args.get('t') or '',
ip=request.headers.get('x-forwarded-for', request.remote_addr),
referrer=request.args.get('ref') or '',
headers=dict(request.headers),
params=params)
@app.route('/a.gif')
def analyze():
# Make sure an account id and url were specified.
if not request.args.get('id') or not request.args.get('url'):
abort(404)
# Ensure the account id is valid.
try:
account = Account.get(Account.id == request.args['id'])
except Account.DoesNotExist:
abort(404)
# Ensure the account id matches the domain of the URL we wish to record.
if not account.verify_url(request.args['url']):
abort(403)
# Store the page-view data in the database.
PageView.create_from_request(account, request)
# Return a 1px gif.
response = Response(app.config['BEACON'], mimetype='image/gif')
response.headers['Cache-Control'] = 'private, no-cache'
return response
@app.route('/a.js')
def script():
account_id = request.args.get('id')
if account_id:
return Response(
app.config['JAVASCRIPT'] % (app.config['DOMAIN'], account_id),
mimetype='text/javascript')
return Response('', mimetype='text/javascript')
@app.errorhandler(404)
def not_found(e):
return Response('<h3>Not found.</h3>')
# Request handlers -- these two hooks are provided by flask and we will use them
# to create and tear down a database connection on each request.
@app.before_request
def before_request():
g.db = database
g.db.connection()
@app.after_request
def after_request(response):
g.db.close()
return response
if __name__ == '__main__':
database.create_tables([Account, PageView], safe=True)
app.run(debug=True)
|
PageView
|
python
|
openai__openai-python
|
src/openai/resources/containers/files/files.py
|
{
"start": 19111,
"end": 19790
}
|
class ____:
def __init__(self, files: AsyncFiles) -> None:
self._files = files
self.create = _legacy_response.async_to_raw_response_wrapper(
files.create,
)
self.retrieve = _legacy_response.async_to_raw_response_wrapper(
files.retrieve,
)
self.list = _legacy_response.async_to_raw_response_wrapper(
files.list,
)
self.delete = _legacy_response.async_to_raw_response_wrapper(
files.delete,
)
@cached_property
def content(self) -> AsyncContentWithRawResponse:
return AsyncContentWithRawResponse(self._files.content)
|
AsyncFilesWithRawResponse
|
python
|
huggingface__transformers
|
tests/models/pixtral/test_processing_pixtral.py
|
{
"start": 972,
"end": 13015
}
|
class ____(ProcessorTesterMixin, unittest.TestCase):
processor_class = PixtralProcessor
model_id = "mistral-community/pixtral-12b"
url_0 = url_to_local_path(
"https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/tasks/australia.jpg"
)
image_0 = np.random.randint(255, size=(3, 876, 1300), dtype=np.uint8)
url_1 = "http://images.cocodataset.org/val2017/000000039769.jpg"
image_1 = np.random.randint(255, size=(3, 480, 640), dtype=np.uint8)
image_2 = np.random.randint(255, size=(3, 1024, 1024), dtype=np.uint8)
@parameterized.expand([(1, "pt"), (2, "pt")])
@unittest.skip("Not tested before, to investigate")
def test_apply_chat_template_image(self, batch_size, return_tensors):
pass
def test_image_token_filling(self):
processor = self.processor_class.from_pretrained(self.tmpdirname)
# Important to check with non square image
image = torch.randint(0, 2, (3, 500, 316))
expected_image_tokens = 640
image_token_index = 10
messages = [
{
"role": "user",
"content": [
{"type": "image"},
{"type": "text", "text": "What is shown in this image?"},
],
},
]
inputs = processor(
text=[processor.apply_chat_template(messages)],
images=[image],
return_tensors="pt",
)
image_tokens = (inputs["input_ids"] == image_token_index).sum().item()
self.assertEqual(expected_image_tokens, image_tokens)
def test_processor_with_single_image(self):
processor = self.processor_class.from_pretrained(self.tmpdirname)
prompt_string = "USER: [IMG]\nWhat's the content of the image? ASSISTANT:"
# Make small for checking image token expansion
processor.image_processor.size = {"longest_edge": 30}
processor.image_processor.patch_size = {"height": 2, "width": 2}
# Test passing in an image
inputs_image = processor(text=prompt_string, images=self.image_0, return_tensors="pt")
self.assertIn("input_ids", inputs_image)
self.assertTrue(len(inputs_image["input_ids"]) == 1)
self.assertIsInstance(inputs_image["input_ids"], torch.Tensor)
self.assertIsInstance(inputs_image["pixel_values"], torch.Tensor)
self.assertTrue(inputs_image["pixel_values"].shape == torch.Size([1, 3, 32, 32]))
# fmt: off
input_ids = inputs_image["input_ids"]
self.assertEqual(
input_ids[0].tolist(),
# Equivalent to "USER: [IMG][IMG][IMG_BREAK][IMG][IMG][IMG_END]\nWhat's the content of the image? ASSISTANT:"
[21510, 1058, 1032, 10, 10, 12, 10, 10, 13, 1010, 7493, 1681, 1278, 4701, 1307, 1278, 3937, 1063, 1349, 4290, 16002, 41150, 1058]
)
# fmt: on
# Test passing in a url
inputs_url = processor(text=prompt_string, images=self.url_0, return_tensors="pt")
self.assertIn("input_ids", inputs_url)
self.assertTrue(len(inputs_url["input_ids"]) == 1)
self.assertIsInstance(inputs_url["input_ids"], torch.Tensor)
self.assertIsInstance(inputs_image["pixel_values"], torch.Tensor)
self.assertTrue(inputs_image["pixel_values"].shape == torch.Size([1, 3, 32, 32]))
# fmt: off
input_ids = inputs_url["input_ids"]
self.assertEqual(
input_ids[0].tolist(),
# Equivalent to "USER: [IMG][IMG][IMG_BREAK][IMG][IMG][IMG_END]\nWhat's the content of the image? ASSISTANT:"
[21510, 1058, 1032, 10, 10, 12, 10, 10, 13, 1010, 7493, 1681, 1278, 4701, 1307, 1278, 3937, 1063, 1349, 4290, 16002, 41150, 1058]
)
# fmt: on
# Test passing inputs as a single list
inputs_image = processor(text=prompt_string, images=[self.image_0], return_tensors="pt")
self.assertTrue(inputs_image["pixel_values"].shape == torch.Size([1, 3, 32, 32]))
# fmt: off
self.assertEqual(
inputs_image["input_ids"][0].tolist(),
[21510, 1058, 1032, 10, 10, 12, 10, 10, 13, 1010, 7493, 1681, 1278, 4701, 1307, 1278, 3937, 1063, 1349, 4290, 16002, 41150, 1058]
)
# fmt: on
# Test as nested single list
inputs_image = processor(text=prompt_string, images=[[self.image_0]], return_tensors="pt")
self.assertTrue(inputs_image["pixel_values"].shape == torch.Size([1, 3, 32, 32]))
# fmt: off
self.assertEqual(
inputs_image["input_ids"][0].tolist(),
[21510, 1058, 1032, 10, 10, 12, 10, 10, 13, 1010, 7493, 1681, 1278, 4701, 1307, 1278, 3937, 1063, 1349, 4290, 16002, 41150, 1058]
)
# fmt: on
def test_processor_with_multiple_images_single_list(self):
processor = self.processor_class.from_pretrained(self.tmpdirname)
prompt_string = "USER: [IMG][IMG]\nWhat's the difference between these two images? ASSISTANT:"
# Make small for checking image token expansion
processor.image_processor.size = {"longest_edge": 30}
processor.image_processor.patch_size = {"height": 2, "width": 2}
# Test passing in an image
inputs_image = processor(text=prompt_string, images=[self.image_0, self.image_1], return_tensors="pt")
self.assertIn("input_ids", inputs_image)
self.assertTrue(len(inputs_image["input_ids"]) == 1)
self.assertIsInstance(inputs_image["input_ids"], torch.Tensor)
self.assertIsInstance(inputs_image["pixel_values"], torch.Tensor)
self.assertTrue(inputs_image["pixel_values"].shape == torch.Size([2, 3, 32, 32]))
# fmt: off
input_ids = inputs_image["input_ids"]
self.assertEqual(
input_ids[0].tolist(),
# Equivalent to ["USER: [IMG][IMG][IMG_BREAK][IMG][IMG][IMG_END][IMG][IMG][IMG_BREAK][IMG][IMG][IMG_END]\nWhat's the difference between these two images? ASSISTANT:"]
[21510, 1058, 1032, 10, 10, 12, 10, 10, 13, 10, 10, 12, 10, 10, 13, 1010, 7493, 1681, 1278, 6592, 2396, 2576, 2295, 8061, 1063, 1349, 4290, 16002, 41150, 1058]
)
# fmt: on
# Test passing in a url
inputs_url = processor(text=prompt_string, images=[self.url_0, self.url_1], return_tensors="pt")
self.assertIn("input_ids", inputs_url)
self.assertTrue(len(inputs_url["input_ids"]) == 1)
self.assertIsInstance(inputs_url["input_ids"], torch.Tensor)
self.assertIsInstance(inputs_image["pixel_values"], torch.Tensor)
self.assertTrue(inputs_image["pixel_values"].shape == torch.Size([2, 3, 32, 32]))
# fmt: off
input_ids = inputs_url["input_ids"]
self.assertEqual(
input_ids[0].tolist(),
# Equivalent to ["USER: [IMG][IMG][IMG_BREAK][IMG][IMG][IMG_END][IMG][IMG][IMG_BREAK][IMG][IMG][IMG_END]\nWhat's the difference between these two images? ASSISTANT:"]
[21510, 1058, 1032, 10, 10, 12, 10, 10, 13, 10, 10, 12, 10, 10, 13, 1010, 7493, 1681, 1278, 6592, 2396, 2576, 2295, 8061, 1063, 1349, 4290, 16002, 41150, 1058]
)
# fmt: on
# Test passing in as a nested list
inputs_url = processor(text=prompt_string, images=[[self.image_0, self.image_1]], return_tensors="pt")
self.assertTrue(inputs_image["pixel_values"].shape == torch.Size([2, 3, 32, 32]))
# fmt: off
self.assertEqual(
inputs_url["input_ids"][0].tolist(),
[21510, 1058, 1032, 10, 10, 12, 10, 10, 13, 10, 10, 12, 10, 10, 13, 1010, 7493, 1681, 1278, 6592, 2396, 2576, 2295, 8061, 1063, 1349, 4290, 16002, 41150, 1058]
)
# fmt: on
def test_processor_with_multiple_images_multiple_lists(self):
processor = self.processor_class.from_pretrained(self.tmpdirname)
prompt_string = [
"USER: [IMG][IMG]\nWhat's the difference between these two images? ASSISTANT:",
"USER: [IMG]\nWhat's the content of the image? ASSISTANT:",
]
processor.tokenizer.pad_token = "</s>"
image_inputs = [[self.image_0, self.image_1], [self.image_2]]
# Make small for checking image token expansion
processor.image_processor.size = {"longest_edge": 30}
processor.image_processor.patch_size = {"height": 2, "width": 2}
# Test passing in an image
inputs_image = processor(text=prompt_string, images=image_inputs, return_tensors="pt", padding=True)
self.assertIn("input_ids", inputs_image)
self.assertTrue(len(inputs_image["input_ids"]) == 2)
self.assertIsInstance(inputs_image["input_ids"], torch.Tensor)
self.assertIsInstance(inputs_image["pixel_values"], torch.Tensor)
self.assertTrue(inputs_image["pixel_values"].shape == torch.Size([3, 3, 32, 32]))
# fmt: off
input_ids = inputs_image["input_ids"]
self.assertEqual(
input_ids[0].tolist(),
# Equivalent to ["USER: [IMG][IMG][IMG_BREAK][IMG][IMG][IMG_END][IMG][IMG][IMG_BREAK][IMG][IMG][IMG_END]\nWhat's the difference between these two images? ASSISTANT:"]
[21510, 1058, 1032, 10, 10, 12, 10, 10, 13, 10, 10, 12, 10, 10, 13, 1010, 7493, 1681, 1278, 6592, 2396, 2576, 2295, 8061, 1063, 1349, 4290, 16002, 41150, 1058]
)
# fmt: on
# Test passing in a url
inputs_url = processor(text=prompt_string, images=image_inputs, return_tensors="pt", padding=True)
self.assertIn("input_ids", inputs_url)
self.assertTrue(len(inputs_url["input_ids"]) == 2)
self.assertIsInstance(inputs_url["input_ids"], torch.Tensor)
self.assertIsInstance(inputs_image["pixel_values"], torch.Tensor)
self.assertTrue(inputs_image["pixel_values"].shape == torch.Size([3, 3, 32, 32]))
# fmt: off
input_ids = inputs_url["input_ids"]
self.assertEqual(
input_ids[0].tolist(),
# Equivalent to ["USER: [IMG][IMG][IMG_BREAK][IMG][IMG][IMG_END][IMG][IMG][IMG_BREAK][IMG][IMG][IMG_END]\nWhat's the difference between these two images? ASSISTANT:"]
[21510, 1058, 1032, 10, 10, 12, 10, 10, 13, 10, 10, 12, 10, 10, 13, 1010, 7493, 1681, 1278, 6592, 2396, 2576, 2295, 8061, 1063, 1349, 4290, 16002, 41150, 1058]
)
# fmt: on
# Test passing as a single flat list
inputs_image = processor(
text=prompt_string, images=[self.image_0, self.image_1, self.image_2], return_tensors="pt", padding=True
)
self.assertTrue(inputs_image["pixel_values"].shape == torch.Size([3, 3, 32, 32]))
# fmt: off
self.assertEqual(
inputs_image["input_ids"][0].tolist(),
[21510, 1058, 1032, 10, 10, 12, 10, 10, 13, 10, 10, 12, 10, 10, 13, 1010, 7493, 1681, 1278, 6592, 2396, 2576, 2295, 8061, 1063, 1349, 4290, 16002, 41150, 1058]
)
# fmt: on
def test_processor_returns_full_length_batches(self):
# to avoid https://github.com/huggingface/transformers/issues/34204
processor = self.processor_class.from_pretrained(self.tmpdirname)
prompt_string = [
"USER: [IMG]\nWhat's the content of the image? ASSISTANT:",
] * 5
processor.tokenizer.pad_token = "</s>"
image_inputs = [[self.image_0]] * 5
# Make small for checking image token expansion
processor.image_processor.size = {"longest_edge": 30}
processor.image_processor.patch_size = {"height": 2, "width": 2}
# Test passing in an image
inputs_image = processor(text=prompt_string, images=image_inputs, return_tensors="pt", padding=True)
self.assertIn("input_ids", inputs_image)
self.assertTrue(len(inputs_image["input_ids"]) == 5)
self.assertTrue(len(inputs_image["pixel_values"]) == 5)
|
PixtralProcessorTest
|
python
|
wandb__wandb
|
wandb/vendor/pygments/lexers/algebra.py
|
{
"start": 6167,
"end": 7201
}
|
class ____(RegexLexer):
"""
A `BC <https://www.gnu.org/software/bc/>`_ lexer.
.. versionadded:: 2.1
"""
name = 'BC'
aliases = ['bc']
filenames = ['*.bc']
tokens = {
'root': [
(r'/\*', Comment.Multiline, 'comment'),
(r'"(?:[^"\\]|\\.)*"', String),
(r'[{}();,]', Punctuation),
(words(('if', 'else', 'while', 'for', 'break', 'continue',
'halt', 'return', 'define', 'auto', 'print', 'read',
'length', 'scale', 'sqrt', 'limits', 'quit',
'warranty'), suffix=r'\b'), Keyword),
(r'\+\+|--|\|\||&&|'
r'([-<>+*%\^/!=])=?', Operator),
# bc doesn't support exponential
(r'[0-9]+(\.[0-9]*)?', Number),
(r'\.[0-9]+', Number),
(r'.', Text)
],
'comment': [
(r'[^*/]+', Comment.Multiline),
(r'\*/', Comment.Multiline, '#pop'),
(r'[*/]', Comment.Multiline)
],
}
|
BCLexer
|
python
|
ray-project__ray
|
rllib/examples/envs/classes/cartpole_with_protobuf_observation_space.py
|
{
"start": 208,
"end": 2973
}
|
class ____(CartPoleEnv):
"""CartPole gym environment that has a protobuf observation space.
Sometimes, it is more performant for an environment to publish its observations
as a protobuf message (instead of a heavily nested Dict).
The protobuf message used here is originally defined in the
`./utils/cartpole_observations.proto` file. We converted this file into a python
importable module by compiling it with:
`protoc --python_out=. cartpole_observations.proto`
.. which yielded the `cartpole_observations_proto.py` file in the same directory
(we import this file's `CartPoleObservation` message here).
The new observation space is a (binary) Box(0, 255, ([len of protobuf],), uint8).
A ConnectorV2 pipeline or simpler gym.Wrapper will have to be used to convert this
observation format into an NN-readable (e.g. float32) 1D tensor.
"""
def __init__(self, config=None):
super().__init__()
dummy_obs = self._convert_observation_to_protobuf(
np.array([1.0, 1.0, 1.0, 1.0])
)
bin_length = len(dummy_obs)
self.observation_space = gym.spaces.Box(0, 255, (bin_length,), np.uint8)
def step(self, action):
observation, reward, terminated, truncated, info = super().step(action)
proto_observation = self._convert_observation_to_protobuf(observation)
return proto_observation, reward, terminated, truncated, info
def reset(self, **kwargs):
observation, info = super().reset(**kwargs)
proto_observation = self._convert_observation_to_protobuf(observation)
return proto_observation, info
def _convert_observation_to_protobuf(self, observation):
x_pos, x_veloc, angle_pos, angle_veloc = observation
# Create the Protobuf message
cartpole_observation = CartPoleObservation()
cartpole_observation.x_pos = x_pos
cartpole_observation.x_veloc = x_veloc
cartpole_observation.angle_pos = angle_pos
cartpole_observation.angle_veloc = angle_veloc
# Serialize to binary string.
return np.frombuffer(cartpole_observation.SerializeToString(), np.uint8)
if __name__ == "__main__":
env = CartPoleWithProtobufObservationSpace()
obs, info = env.reset()
# Test loading a protobuf object with data from the obs binary string
# (uint8 ndarray).
byte_str = obs.tobytes()
obs_protobuf = CartPoleObservation()
obs_protobuf.ParseFromString(byte_str)
print(obs_protobuf)
terminated = truncated = False
while not terminated and not truncated:
action = env.action_space.sample()
obs, reward, terminated, truncated, info = env.step(action)
print(obs)
|
CartPoleWithProtobufObservationSpace
|
python
|
pypa__pipenv
|
pipenv/vendor/click/core.py
|
{
"start": 111658,
"end": 114142
}
|
class ____(Parameter):
"""Arguments are positional parameters to a command. They generally
provide fewer features than options but can have infinite ``nargs``
and are required by default.
All parameters are passed onwards to the constructor of :class:`Parameter`.
"""
param_type_name = "argument"
def __init__(
self,
param_decls: t.Sequence[str],
required: t.Optional[bool] = None,
**attrs: t.Any,
) -> None:
if required is None:
if attrs.get("default") is not None:
required = False
else:
required = attrs.get("nargs", 1) > 0
if "multiple" in attrs:
raise TypeError("__init__() got an unexpected keyword argument 'multiple'.")
super().__init__(param_decls, required=required, **attrs)
if __debug__:
if self.default is not None and self.nargs == -1:
raise TypeError("'default' is not supported for nargs=-1.")
@property
def human_readable_name(self) -> str:
if self.metavar is not None:
return self.metavar
return self.name.upper() # type: ignore
def make_metavar(self) -> str:
if self.metavar is not None:
return self.metavar
var = self.type.get_metavar(self)
if not var:
var = self.name.upper() # type: ignore
if not self.required:
var = f"[{var}]"
if self.nargs != 1:
var += "..."
return var
def _parse_decls(
self, decls: t.Sequence[str], expose_value: bool
) -> t.Tuple[t.Optional[str], t.List[str], t.List[str]]:
if not decls:
if not expose_value:
return None, [], []
raise TypeError("Could not determine name for argument")
if len(decls) == 1:
name = arg = decls[0]
name = name.replace("-", "_").lower()
else:
raise TypeError(
"Arguments take exactly one parameter declaration, got"
f" {len(decls)}."
)
return name, [arg], []
def get_usage_pieces(self, ctx: Context) -> t.List[str]:
return [self.make_metavar()]
def get_error_hint(self, ctx: Context) -> str:
return f"'{self.make_metavar()}'"
def add_to_parser(self, parser: OptionParser, ctx: Context) -> None:
parser.add_argument(dest=self.name, nargs=self.nargs, obj=self)
|
Argument
|
python
|
keras-team__keras
|
keras/src/layers/preprocessing/image_preprocessing/bounding_boxes/formats.py
|
{
"start": 2829,
"end": 3377
}
|
class ____:
"""REL_YXYX contains axis indices for the REL_YXYX format.
REL_YXYX is like YXYX, but each value is relative to the width and height of
the origin image. Values are percentages of the origin images' width and
height respectively.
The REL_YXYX format consists of the following required indices:
- TOP: top of the bounding box
- LEFT: left of the bounding box
- BOTTOM: bottom of the bounding box
- RIGHT: right of the bounding box
"""
TOP = 0
LEFT = 1
BOTTOM = 2
RIGHT = 3
|
REL_YXYX
|
python
|
spack__spack
|
lib/spack/spack/vendor/macholib/mach_o.py
|
{
"start": 31182,
"end": 31420
}
|
class ____(Structure):
_fields_ = (("offset", p_uint32), ("nhints", p_uint32))
def describe(self):
s = {}
s["offset"] = int(self.offset)
s["nhints"] = int(self.nhints)
return s
|
twolevel_hints_command
|
python
|
astropy__astropy
|
astropy/table/column.py
|
{
"start": 41330,
"end": 50645
}
|
class ____(BaseColumn):
"""Define a data column for use in a Table object.
Parameters
----------
data : list, ndarray, or None
Column data values
name : str
Column name and key for reference within Table
dtype : `~numpy.dtype`-like
Data type for column
shape : tuple or ()
Dimensions of a single row element in the column data
length : int or 0
Number of row elements in column data
description : str or None
Full description of column
unit : str or None
Physical unit
format : str, None, or callable
Format string for outputting column values. This can be an
"old-style" (``format % value``) or "new-style" (`str.format`)
format specification string or a function or any callable object that
accepts a single value and returns a string.
meta : dict-like or None
Meta-data associated with the column
Examples
--------
A Column can be created in two different ways:
- Provide a ``data`` value but not ``shape`` or ``length`` (which are
inferred from the data).
Examples::
col = Column(data=[1, 2], name='name') # shape=(2,)
col = Column(data=[[1, 2], [3, 4]], name='name') # shape=(2, 2)
col = Column(data=[1, 2], name='name', dtype=float)
col = Column(data=np.array([1, 2]), name='name')
col = Column(data=['hello', 'world'], name='name')
The ``dtype`` argument can be any value which is an acceptable
fixed-size data-type initializer for the numpy.dtype() method. See
`<https://numpy.org/doc/stable/reference/arrays.dtypes.html>`_.
Examples include:
- Python non-string type (float, int, bool)
- Numpy non-string type (e.g. np.float32, np.int64, np.bool\\_)
- Numpy.dtype array-protocol type strings (e.g. 'i4', 'f8', 'S15')
If no ``dtype`` value is provide then the type is inferred using
``np.array(data)``.
- Provide ``length`` and optionally ``shape``, but not ``data``
Examples::
col = Column(name='name', length=5)
col = Column(name='name', dtype=int, length=10, shape=(3,4))
The default ``dtype`` is ``np.float64``. The ``shape`` argument is the
array shape of a single cell in the column.
To access the ``Column`` data as a raw `numpy.ndarray` object, you can use
one of the ``data`` or ``value`` attributes (which are equivalent)::
col.data
col.value
"""
def __new__(
cls,
data=None,
name=None,
dtype=None,
shape=(),
length=0,
description=None,
unit=None,
format=None,
meta=None,
copy=COPY_IF_NEEDED,
copy_indices=True,
):
if isinstance(data, MaskedColumn) and np.any(data.mask):
raise TypeError(
"Cannot convert a MaskedColumn with masked value to a Column"
)
self = super().__new__(
cls,
data=data,
name=name,
dtype=dtype,
shape=shape,
length=length,
description=description,
unit=unit,
format=format,
meta=meta,
copy=copy,
copy_indices=copy_indices,
)
return self
def __setattr__(self, item, value):
if not isinstance(self, MaskedColumn) and item == "mask":
raise AttributeError(
"cannot set mask value to a column in non-masked Table"
)
super().__setattr__(item, value)
if item == "unit" and issubclass(self.dtype.type, np.number):
try:
converted = self.parent_table._convert_col_for_table(self)
except AttributeError: # Either no parent table or parent table is None
pass
else:
if converted is not self:
self.parent_table.replace_column(self.name, converted)
def _base_repr_(self, html=False):
# If scalar then just convert to correct numpy type and use numpy repr
if self.ndim == 0:
return repr(self.item())
descr_vals = [self.__class__.__name__]
unit = None if self.unit is None else str(self.unit)
shape = None if self.ndim <= 1 else self.shape[1:]
for attr, val in (
("name", self.name),
("dtype", dtype_info_name(self.dtype)),
("shape", shape),
("unit", unit),
("format", self.format),
("description", self.description),
("length", len(self)),
):
if val is not None:
descr_vals.append(f"{attr}={val!r}")
descr = "<" + " ".join(descr_vals) + ">\n"
if html:
from astropy.utils.xml.writer import xml_escape
descr = xml_escape(descr)
data_lines, outs = self._formatter._pformat_col(
self, show_name=False, show_unit=False, show_length=False, html=html
)
out = descr + "\n".join(data_lines)
return out
def _repr_html_(self):
return self._base_repr_(html=True)
def __repr__(self):
return self._base_repr_(html=False)
def __str__(self):
# If scalar then just convert to correct numpy type and use numpy repr
if self.ndim == 0:
return str(self.item())
lines, outs = self._formatter._pformat_col(self)
return "\n".join(lines)
def __bytes__(self):
return str(self).encode("utf-8")
def _check_string_truncate(self, value):
"""
Emit a warning if any elements of ``value`` will be truncated when
``value`` is assigned to self.
"""
# Convert input ``value`` to the string dtype of this column and
# find the length of the longest string in the array.
value = np.asanyarray(value, dtype=self.dtype.type)
if value.size == 0:
return
value_str_len = np.char.str_len(value).max()
# Parse the array-protocol typestring (e.g. '|U15') of self.dtype which
# has the character repeat count on the right side.
self_str_len = dtype_bytes_or_chars(self.dtype)
if value_str_len > self_str_len:
warnings.warn(
f"truncated right side string(s) longer than {self_str_len} "
"character(s) during assignment",
StringTruncateWarning,
stacklevel=3,
)
def __setitem__(self, index, value):
if self.dtype.char == "S":
value = self._encode_str(value)
# Issue warning for string assignment that truncates ``value``
if issubclass(self.dtype.type, np.character):
self._check_string_truncate(value)
# update indices
self.info.adjust_indices(index, value, len(self))
# Set items using a view of the underlying data, as it gives an
# order-of-magnitude speed-up. [#2994]
self.data[index] = value
__eq__ = _make_compare("__eq__")
__ne__ = _make_compare("__ne__")
__gt__ = _make_compare("__gt__")
__lt__ = _make_compare("__lt__")
__ge__ = _make_compare("__ge__")
__le__ = _make_compare("__le__")
def insert(self, obj, values, axis=0):
"""
Insert values before the given indices in the column and return
a new `~astropy.table.Column` object.
Parameters
----------
obj : int, slice or sequence of int
Object that defines the index or indices before which ``values`` is
inserted.
values : array-like
Value(s) to insert. If the type of ``values`` is different from
that of the column, ``values`` is converted to the matching type.
``values`` should be shaped so that it can be broadcast appropriately.
axis : int, optional
Axis along which to insert ``values``. If ``axis`` is None then
the column array is flattened before insertion. Default is 0,
which will insert a row.
Returns
-------
out : `~astropy.table.Column`
A copy of column with ``values`` and ``mask`` inserted. Note that the
insertion does not occur in-place: a new column is returned.
"""
if self.dtype.kind == "O":
# Even if values is array-like (e.g. [1,2,3]), insert as a single
# object. Numpy.insert instead inserts each element in an array-like
# input individually.
data = np.insert(self, obj, None, axis=axis)
data[obj] = values
else:
self_for_insert = _expand_string_array_for_values(self, values)
data = np.insert(self_for_insert, obj, values, axis=axis)
out = data.view(self.__class__)
out.__array_finalize__(self)
return out
# We do this to make the methods show up in the API docs
name = BaseColumn.name
unit = BaseColumn.unit
copy = BaseColumn.copy
more = BaseColumn.more
pprint = BaseColumn.pprint
pformat = BaseColumn.pformat
convert_unit_to = BaseColumn.convert_unit_to
quantity = BaseColumn.quantity
to = BaseColumn.to
|
Column
|
python
|
getsentry__sentry
|
tests/sentry/relocation/tasks/test_process.py
|
{
"start": 45032,
"end": 49447
}
|
class ____(RelocationTaskTestCase):
def setUp(self) -> None:
super().setUp()
self.relocation.step = Relocation.Step.PREPROCESSING.value
self.relocation.latest_task = OrderedTask.PREPROCESSING_BASELINE_CONFIG.name
self.relocation.want_usernames = ["a", "b", "c"]
self.relocation.save()
self.create_user("c")
self.create_user("d")
self.create_user("e")
self.relocation_storage = get_relocation_storage()
def test_success(
self, preprocessing_complete_mock: Mock, fake_message_builder: Mock, fake_kms_client: Mock
):
self.mock_message_builder(fake_message_builder)
self.mock_kms_client(fake_kms_client)
preprocessing_colliding_users(self.uuid)
assert fake_kms_client.return_value.asymmetric_decrypt.call_count == 0
assert fake_kms_client.return_value.get_public_key.call_count == 1
assert fake_message_builder.call_count == 0
assert preprocessing_complete_mock.call_count == 1
(_, files) = self.relocation_storage.listdir(f"runs/{self.uuid}/in")
assert len(files) == 1
assert "colliding-users.tar" in files
with self.relocation_storage.open(f"runs/{self.uuid}/in/colliding-users.tar") as fp:
json_models = json.loads(
decrypt_encrypted_tarball(fp, LocalFileDecryptor.from_bytes(self.priv_key_pem))
)
assert len(json_models) > 0
# Only user `c` was colliding, so only they should be exported.
for json_model in json_models:
if NormalizedModelName(json_model["model"]) == get_model_name(User):
assert json_model["fields"]["username"] == "c"
def test_retry_if_attempts_left(
self, preprocessing_complete_mock: Mock, fake_message_builder: Mock, fake_kms_client: Mock
):
RelocationFile.objects.filter(relocation=self.relocation).delete()
self.mock_message_builder(fake_message_builder)
self.mock_kms_client(fake_kms_client)
# An exception being raised will trigger a retry task.
with pytest.raises(Exception):
fake_kms_client.return_value.get_public_key.side_effect = Exception("Test")
preprocessing_colliding_users(self.uuid)
assert fake_kms_client.return_value.asymmetric_decrypt.call_count == 0
assert fake_kms_client.return_value.get_public_key.call_count == 1
assert fake_message_builder.call_count == 0
assert preprocessing_complete_mock.call_count == 0
relocation = Relocation.objects.get(uuid=self.uuid)
assert relocation.status == Relocation.Status.IN_PROGRESS.value
assert relocation.latest_notified != Relocation.EmailKind.FAILED.value
assert not relocation.failure_reason
def test_fail_if_no_attempts_left(
self, preprocessing_complete_mock: Mock, fake_message_builder: Mock, fake_kms_client: Mock
):
self.relocation.latest_task = OrderedTask.PREPROCESSING_COLLIDING_USERS.name
self.relocation.latest_task_attempts = MAX_FAST_TASK_RETRIES
self.relocation.save()
RelocationFile.objects.filter(relocation=self.relocation).delete()
self.mock_message_builder(fake_message_builder)
self.mock_kms_client(fake_kms_client)
fake_kms_client.return_value.get_public_key.side_effect = Exception("Test")
with pytest.raises(Exception):
preprocessing_colliding_users(self.uuid)
assert fake_kms_client.return_value.asymmetric_decrypt.call_count == 0
assert fake_kms_client.return_value.get_public_key.call_count == 1
assert fake_message_builder.call_count == 1
assert fake_message_builder.call_args.kwargs["type"] == "relocation.failed"
fake_message_builder.return_value.send_async.assert_called_once_with(
to=[self.owner.email, self.superuser.email]
)
assert preprocessing_complete_mock.call_count == 0
relocation = Relocation.objects.get(uuid=self.uuid)
assert relocation.status == Relocation.Status.FAILURE.value
assert relocation.latest_notified == Relocation.EmailKind.FAILED.value
assert relocation.failure_reason == ERR_PREPROCESSING_INTERNAL
@patch("sentry.relocation.utils.MessageBuilder")
@patch("sentry.relocation.tasks.process.validating_start.apply_async")
|
PreprocessingCollidingUsersTest
|
python
|
lepture__mistune
|
tests/test_directives.py
|
{
"start": 1211,
"end": 2294
}
|
class ____(BaseTestCase):
def test_rst_toc(self):
md = create_markdown(
escape=False,
plugins=[
RSTDirective([CustomizeTableOfContents()]),
],
)
html = md("# h1\n\n.. toc::\n")
self.assertIn('<h1 id="t-1">h1</h1>', html)
self.assertIn('<a href="#t-1">h1</a>', html)
def test_fenced_toc(self):
md = create_markdown(
escape=False,
plugins=[
FencedDirective([CustomizeTableOfContents()]),
],
)
html = md("# h1\n\n```{toc}\n```\n")
self.assertIn('<h1 id="t-1">h1</h1>', html)
self.assertIn('<a href="#t-1">h1</a>', html)
def test_colon_fenced_toc(self):
md = create_markdown(
escape=False,
plugins=[
FencedDirective([CustomizeTableOfContents()], ":"),
],
)
html = md("# h1\n\n:::{toc}\n:::\n")
self.assertIn('<h1 id="t-1">h1</h1>', html)
self.assertIn('<a href="#t-1">h1</a>', html)
|
TestCustomizeToc
|
python
|
dagster-io__dagster
|
python_modules/libraries/dagster-airbyte/dagster_airbyte/legacy_resources.py
|
{
"start": 1146,
"end": 1353
}
|
class ____:
def __init__(self) -> None:
self.request_cache: dict[str, Optional[Mapping[str, object]]] = {}
# Int in case we nest contexts
self.cache_enabled = 0
|
AirbyteResourceState
|
python
|
pytest-dev__pytest
|
testing/python/approx.py
|
{
"start": 38588,
"end": 39128
}
|
class ____: # incomplete
"""sequence like"""
_x: int
_y: int
_z: int
def __init__(self, x: int, y: int, z: int):
self._x, self._y, self._z = x, y, z
def __repr__(self) -> str:
return f"<MyVec3 {self._x} {self._y} {self._z}>"
def __len__(self) -> int:
return 3
def __getitem__(self, key: int) -> int:
if key == 0:
return self._x
if key == 1:
return self._y
if key == 2:
return self._z
raise IndexError(key)
|
MyVec3
|
python
|
cherrypy__cherrypy
|
cherrypy/lib/encoding.py
|
{
"start": 2302,
"end": 18361
}
|
class ____:
"""An HTTP response payload encoder."""
default_encoding = 'utf-8'
failmsg = 'Response body could not be encoded with %r.'
encoding = None
errors = 'strict'
text_only = True
add_charset = True
debug = False
def __init__(self, **kwargs):
"""Initialize HTTP response payload encoder."""
for k, v in kwargs.items():
setattr(self, k, v)
self.attempted_charsets = set()
request = cherrypy.serving.request
if request.handler is not None:
# Replace request.handler with self
if self.debug:
cherrypy.log('Replacing request.handler', 'TOOLS.ENCODE')
self.oldhandler = request.handler
request.handler = self
def encode_stream(self, encoding):
"""Encode a streaming response body.
Use a generator wrapper, and just pray it works as the stream is
being written out.
"""
if encoding in self.attempted_charsets:
return False
self.attempted_charsets.add(encoding)
def encoder(body):
for chunk in body:
if isinstance(chunk, str):
chunk = chunk.encode(encoding, self.errors)
yield chunk
self.body = encoder(self.body)
return True
def encode_string(self, encoding):
"""Encode a buffered response body."""
if encoding in self.attempted_charsets:
return False
self.attempted_charsets.add(encoding)
body = []
for chunk in self.body:
if isinstance(chunk, str):
try:
chunk = chunk.encode(encoding, self.errors)
except (LookupError, UnicodeError):
return False
body.append(chunk)
self.body = body
return True
def find_acceptable_charset(self):
"""Deduce acceptable charset for HTTP response."""
request = cherrypy.serving.request
response = cherrypy.serving.response
if self.debug:
cherrypy.log(
'response.stream %r' % response.stream,
'TOOLS.ENCODE',
)
if response.stream:
encoder = self.encode_stream
else:
encoder = self.encode_string
if 'Content-Length' in response.headers:
# Delete Content-Length header so finalize() recalcs it.
# Encoded strings may be of different lengths from their
# unicode equivalents, and even from each other. For example:
# >>> t = u"\u7007\u3040"
# >>> len(t)
# 2
# >>> len(t.encode("UTF-8"))
# 6
# >>> len(t.encode("utf7"))
# 8
del response.headers['Content-Length']
# Parse the Accept-Charset request header, and try to provide one
# of the requested charsets (in order of user preference).
encs = request.headers.elements('Accept-Charset')
charsets = [enc.value.lower() for enc in encs]
if self.debug:
cherrypy.log('charsets %s' % repr(charsets), 'TOOLS.ENCODE')
if self.encoding is not None:
# If specified, force this encoding to be used, or fail.
encoding = self.encoding.lower()
if self.debug:
cherrypy.log(
'Specified encoding %r' % encoding,
'TOOLS.ENCODE',
)
if (not charsets) or '*' in charsets or encoding in charsets:
if self.debug:
cherrypy.log(
'Attempting encoding %r' % encoding,
'TOOLS.ENCODE',
)
if encoder(encoding):
return encoding
else:
if not encs:
if self.debug:
cherrypy.log(
'Attempting default encoding %r'
% self.default_encoding,
'TOOLS.ENCODE',
)
# Any character-set is acceptable.
if encoder(self.default_encoding):
return self.default_encoding
else:
raise cherrypy.HTTPError(
500,
self.failmsg % self.default_encoding,
)
else:
for element in encs:
if element.qvalue > 0:
if element.value == '*':
# Matches any charset. Try our default.
if self.debug:
cherrypy.log(
'Attempting default encoding due '
'to %r' % element,
'TOOLS.ENCODE',
)
if encoder(self.default_encoding):
return self.default_encoding
else:
encoding = element.value
if self.debug:
cherrypy.log(
'Attempting encoding %s (qvalue >'
'0)' % element,
'TOOLS.ENCODE',
)
if encoder(encoding):
return encoding
if '*' not in charsets:
# If no "*" is present in an Accept-Charset field, then all
# character sets not explicitly mentioned get a quality
# value of 0, except for ISO-8859-1, which gets a quality
# value of 1 if not explicitly mentioned.
iso = 'iso-8859-1'
if iso not in charsets:
if self.debug:
cherrypy.log(
'Attempting ISO-8859-1 encoding',
'TOOLS.ENCODE',
)
if encoder(iso):
return iso
# No suitable encoding found.
ac = request.headers.get('Accept-Charset')
if ac is None:
msg = 'Your client did not send an Accept-Charset header.'
else:
msg = 'Your client sent this Accept-Charset header: %s.' % ac
_charsets = ', '.join(sorted(self.attempted_charsets))
msg += ' We tried these charsets: %s.' % (_charsets,)
raise cherrypy.HTTPError(406, msg)
def __call__(self, *args, **kwargs):
"""Set up encoding for the HTTP response."""
response = cherrypy.serving.response
self.body = self.oldhandler(*args, **kwargs)
self.body = prepare_iter(self.body)
ct = response.headers.elements('Content-Type')
if self.debug:
cherrypy.log(
'Content-Type: %r' % [str(h) for h in ct],
'TOOLS.ENCODE',
)
if ct and self.add_charset:
ct = ct[0]
if self.text_only:
if ct.value.lower().startswith('text/'):
if self.debug:
cherrypy.log(
'Content-Type %s starts with "text/"' % ct,
'TOOLS.ENCODE',
)
do_find = True
else:
if self.debug:
cherrypy.log(
'Not finding because Content-Type %s '
'does not start with "text/"' % ct,
'TOOLS.ENCODE',
)
do_find = False
else:
if self.debug:
cherrypy.log(
'Finding because not text_only',
'TOOLS.ENCODE',
)
do_find = True
if do_find:
# Set "charset=..." param on response Content-Type header
ct.params['charset'] = self.find_acceptable_charset()
if self.debug:
cherrypy.log(
'Setting Content-Type %s' % ct,
'TOOLS.ENCODE',
)
response.headers['Content-Type'] = str(ct)
return self.body
def prepare_iter(value):
"""Ensure response body is iterable and resolves to False when empty."""
if isinstance(value, text_or_bytes):
# strings get wrapped in a list because iterating over a single
# item list is much faster than iterating over every character
# in a long string.
if value:
value = [value]
else:
# [''] doesn't evaluate to False, so replace it with [].
value = []
# Don't use isinstance here; io.IOBase which has an ABC takes
# 1000 times as long as, say, isinstance(value, str)
elif hasattr(value, 'read'):
value = file_generator(value)
elif value is None:
value = []
return value
# GZIP
def compress(body, compress_level):
"""Compress 'body' at the given compress_level."""
import zlib
# See https://tools.ietf.org/html/rfc1952
yield b'\x1f\x8b' # ID1 and ID2: gzip marker
yield b'\x08' # CM: compression method
yield b'\x00' # FLG: none set
# MTIME: 4 bytes
yield struct.pack('<L', int(time.time()) & int('FFFFFFFF', 16))
# RFC 1952, section 2.3.1:
#
# XFL (eXtra FLags)
# These flags are available for use by specific compression
# methods. The "deflate" method (CM = 8) sets these flags as
# follows:
#
# XFL = 2 - compressor used maximum compression,
# slowest algorithm
# XFL = 4 - compressor used fastest algorithm
if compress_level == _COMPRESSION_LEVEL_BEST:
yield b'\x02' # XFL: max compression, slowest algo
elif compress_level == _COMPRESSION_LEVEL_FAST:
yield b'\x04' # XFL: min compression, fastest algo
else:
yield b'\x00' # XFL: compression unset/tradeoff
yield b'\xff' # OS: unknown
crc = zlib.crc32(b'')
size = 0
zobj = zlib.compressobj(
compress_level,
zlib.DEFLATED,
-zlib.MAX_WBITS,
zlib.DEF_MEM_LEVEL,
0,
)
for line in body:
size += len(line)
crc = zlib.crc32(line, crc)
yield zobj.compress(line)
yield zobj.flush()
# CRC32: 4 bytes
yield struct.pack('<L', crc & int('FFFFFFFF', 16))
# ISIZE: 4 bytes
yield struct.pack('<L', size & int('FFFFFFFF', 16))
def decompress(body):
"""Decompress a blob of bytes."""
import gzip
zbuf = io.BytesIO()
zbuf.write(body)
zbuf.seek(0)
zfile = gzip.GzipFile(mode='rb', fileobj=zbuf)
data = zfile.read()
zfile.close()
return data
def gzip(
compress_level=5,
mime_types=['text/html', 'text/plain'],
debug=False,
):
"""Try to gzip the response body if Content-Type in mime_types.
cherrypy.response.headers['Content-Type'] must be set to one of the
values in the mime_types arg before calling this function.
The provided list of mime-types must be of one of the following form:
* `type/subtype`
* `type/*`
* `type/*+subtype`
No compression is performed if any of the following hold:
* The client sends no Accept-Encoding request header
* No 'gzip' or 'x-gzip' is present in the Accept-Encoding header
* No 'gzip' or 'x-gzip' with a qvalue > 0 is present
* The 'identity' value is given with a qvalue > 0.
"""
request = cherrypy.serving.request
response = cherrypy.serving.response
set_vary_header(response, 'Accept-Encoding')
if not response.body:
# Response body is empty (might be a 304 for instance)
if debug:
cherrypy.log('No response body', context='TOOLS.GZIP')
return
# If returning cached content (which should already have been gzipped),
# don't re-zip.
if getattr(request, 'cached', False):
if debug:
cherrypy.log('Not gzipping cached response', context='TOOLS.GZIP')
return
acceptable = request.headers.elements('Accept-Encoding')
if not acceptable:
# If no Accept-Encoding field is present in a request,
# the server MAY assume that the client will accept any
# content coding. In this case, if "identity" is one of
# the available content-codings, then the server SHOULD use
# the "identity" content-coding, unless it has additional
# information that a different content-coding is meaningful
# to the client.
if debug:
cherrypy.log('No Accept-Encoding', context='TOOLS.GZIP')
return
ct = response.headers.get('Content-Type', '').split(';')[0]
for coding in acceptable:
if coding.value == 'identity' and coding.qvalue != 0:
if debug:
cherrypy.log(
'Non-zero identity qvalue: %s' % coding,
context='TOOLS.GZIP',
)
return
if coding.value in ('gzip', 'x-gzip'):
if coding.qvalue == 0:
if debug:
cherrypy.log(
'Zero gzip qvalue: %s' % coding,
context='TOOLS.GZIP',
)
return
if ct not in mime_types:
# If the list of provided mime-types contains tokens
# such as 'text/*' or 'application/*+xml',
# we go through them and find the most appropriate one
# based on the given content-type.
# The pattern matching is only caring about the most
# common cases, as stated above, and doesn't support
# for extra parameters.
found = False
if '/' in ct:
ct_media_type, ct_sub_type = ct.split('/')
for mime_type in mime_types:
if '/' in mime_type:
media_type, sub_type = mime_type.split('/')
if ct_media_type == media_type:
if sub_type == '*':
found = True
break
elif '+' in sub_type and '+' in ct_sub_type:
ct_left, ct_right = ct_sub_type.split('+')
left, right = sub_type.split('+')
if left == '*' and ct_right == right:
found = True
break
if not found:
if debug:
cherrypy.log(
'Content-Type %s not in mime_types %r'
% (ct, mime_types),
context='TOOLS.GZIP',
)
return
if debug:
cherrypy.log('Gzipping', context='TOOLS.GZIP')
# Return a generator that compresses the page
response.headers['Content-Encoding'] = 'gzip'
response.body = compress(response.body, compress_level)
if 'Content-Length' in response.headers:
# Delete Content-Length header so finalize() recalcs it.
del response.headers['Content-Length']
return
if debug:
cherrypy.log('No acceptable encoding found.', context='GZIP')
cherrypy.HTTPError(406, 'identity, gzip').set_response()
|
ResponseEncoder
|
python
|
airbytehq__airbyte
|
airbyte-integrations/connectors/source-github/source_github/github_schema.py
|
{
"start": 970994,
"end": 971382
}
|
class ____(sgqlc.types.Type):
"""An edge in a connection."""
__schema__ = github_schema
__field_names__ = ("cursor", "node")
cursor = sgqlc.types.Field(sgqlc.types.non_null(String), graphql_name="cursor")
"""A cursor for use in pagination."""
node = sgqlc.types.Field(SocialAccount, graphql_name="node")
"""The item at the end of the edge."""
|
SocialAccountEdge
|
python
|
wandb__wandb
|
wandb/sdk/lib/retry.py
|
{
"start": 10705,
"end": 10980
}
|
class ____(abc.ABC):
"""A backoff strategy: decides whether to sleep or give up when an exception is raised."""
@abc.abstractmethod
def next_sleep_or_reraise(self, exc: Exception) -> datetime.timedelta:
raise NotImplementedError # pragma: no cover
|
Backoff
|
python
|
joke2k__faker
|
faker/providers/address/ro_RO/__init__.py
|
{
"start": 71,
"end": 9605
}
|
class ____(AddressProvider):
street_prefixes = (
"Strada",
"Aleea",
"Intrarea",
"Bulevardul",
"Soseaua",
"Drumul",
)
street_name_formats = (
"{{street_prefix}} {{last_name}}",
"{{street_prefix}} {{first_name}} {{last_name}}",
"{{street_prefix}} {{last_name}}",
)
street_address_formats = (
"{{street_name}}",
"{{street_name}} {{building_number}}",
"{{street_name}} {{building_number}} {{secondary_address}}",
)
address_formats = ("{{street_address}}\n{{city}}, {{postcode}}",)
building_number_formats = ("Nr. %#", "Nr. %##")
secondary_address_formats = ("Bl. %# Sc. %# Ap. %##",)
postcode_formats = (
"1#####",
"2#####",
"3#####",
"4#####",
"5#####",
"6#####",
"7#####",
"8#####",
"9#####",
)
city_formats = ("{{city_name}}",)
cities = (
"Cluj-Napoca",
"Timisoara",
"Iasi",
"Constanta",
"Craiova",
"Brasov",
"Galati",
"Ploiesti",
"Oradea",
"Braila",
"Arad",
"Pitesti",
"Sibiu",
"Bacau",
"Targu Mures",
"Baia Mare",
"Buzau",
"Botosani",
"Satu Mare",
"Suceava",
"Ramnicu Valcea",
"Drobeta-Turnu Severin",
"Piatra-Neamt",
"Targoviste",
"Targu Jiu",
"Focsani",
"Tulcea",
"Resita",
"Slatina",
"Bistrita",
"Calarasi",
"Giurgiu",
"Deva",
"Hunedoara",
"Zalau",
"Barlad",
"Alba Iulia",
"Sfantu Gheorghe",
"Roman",
"Vaslui",
"Turda",
"Medias",
"Alexandria",
"Voluntari",
"Pipera (Voluntari)",
"Slobozia",
"Lugoj",
"Medgidia",
"Onesti",
"Miercurea-Ciuc",
"Petrosani",
"Tecuci",
"Mangalia",
"Odorheiu Secuiesc",
"Ramnicu Sarat",
"Sighetu Marmatiei",
"Campina",
"Navodari",
"Campulung",
"Caracal",
"Sacele",
"Fagaras",
"Dej",
"Rosiori de Vede",
"Mioveni",
"Curtea de Arges",
"Husi",
"Reghin",
"Sighisoara",
"Pantelimon",
"Pascani",
"Oltenita",
"Turnu Magurele",
"Caransebes",
"Falticeni",
"Radauti",
"Lupeni",
"Dorohoi",
"Vulcan",
"Campia Turzii",
"Zarnesti",
"Borsa",
"Popesti-Leordeni",
"Codlea",
"Carei",
"Moinesti",
"Petrila",
"Sebes",
"Tarnaveni",
"Floresti",
"Gherla",
"Fetesti-Gara",
"Buftea",
"Cugir",
"Moreni",
"Gheorgheni",
"Comanesti",
"Salonta",
"Cernavoda",
"Targu Secuiesc",
"Bailesti",
"Campulung Moldovenesc",
"Aiud",
"Dragasani",
"Valea Caselor (Dragasani)",
"Bals",
"Bocsa",
"Motru",
"Corabia",
"Bragadiru",
"Urziceni",
"Rasnov",
"Rasnov Romacril",
"Buhusi",
"Zimnicea",
"Marghita",
"Mizil",
"Cisnadie",
"Targu Neamt",
"Calafat",
"Vatra Dornei",
"Adjud",
"Gaesti",
"Tandarei",
"Gura Humorului",
"Chitila",
"Viseu de Sus",
"Otopeni",
"Ludus",
"Brad",
"Dragu-Brad",
"Valu lui Traian",
"Cumpana",
"Sannicolau Mare",
"Valenii de Munte",
"Jilava",
"Dabuleni",
"Filiasi",
"Blaj",
"Ovidiu",
"Simleu Silvaniei",
"Matca",
"Pecica",
"Rovinari",
"Videle",
"Baicoi",
"Pucioasa",
"Jimbolia",
"Baia Sprie",
"Targu Frumos",
"Vicovu de Sus",
"Orsova",
"Sinaia",
"Negresti-Oas",
"Beius",
"Santana",
"Pechea",
"Simeria",
"Boldesti-Scaeni",
"Poienile de sub Munte",
"Valea lui Mihai",
"Covasna",
"Targu Ocna",
"Toplita",
"Sovata",
"Otelu Rosu",
"Oravita",
"Moisei",
"Harsova",
"Murfatlar",
"Beclean",
"Poiana Mare",
"Huedin",
"Babadag",
"Marasesti",
"Topoloveni",
"Sangeorgiu de Mures",
"Jibou",
"Sabaoani",
"Hateg",
"Avrig",
"Darmanesti",
"Marginea",
"Moldova Veche",
"Ineu",
"Bolintin-Vale",
"Mihail Kogalniceanu",
"Macin",
"Tomesti",
"Nasaud",
"Uricani",
"Rosu",
"Calan",
"Borcea",
"Afumati",
"Domnesti",
"Draganesti-Olt",
"Cristuru Secuiesc",
"1 Decembrie",
"Lumina",
"Fetesti",
"Mogosoaia",
"Modelu",
"Dumbravita",
"Seini",
"Alesd",
"Sangeorz-Bai",
"Curtici",
"Darabani",
"Nadlac",
"Victoria",
"Amara",
"Branesti",
"Harlau",
"Lipova",
"Techirghiol",
"Agnita",
"Sacueni",
"Titu",
"Siret",
"Segarcea",
"Odobesti",
"Podu Iloaiei",
"Ocna Mures",
"Urlati",
"Strehaia",
"Tasnad",
"Cajvana",
"Tuzla",
"Sadova",
"Vlahita",
"Stei",
"Diosig",
"Cobadin",
"Gilau",
"Vladimirescu",
"Dancu",
"Bumbesti-Jiu",
"Busteni",
"Peretu",
"Cudalbi",
"Bosanci",
"Balotesti",
"Lunca Cetatuii",
"Dragalina",
"Fieni",
"Chisineu-Cris",
"Balan",
"Sandominic",
"Strejnicu",
"Baciu",
"Fundulea",
"Remetea",
"Fagetel (Remetea)",
"Ianca",
"Roseti",
"Breaza de Sus",
"Cornetu",
"Insuratei",
"Apahida",
"Berceni",
"Vicovu de Jos",
"Savinesti (Poiana Teiului)",
"Savinesti",
"Teius",
"Barbulesti",
"Plosca",
"Toflea",
"Magurele",
"Feldru",
"Anina",
"Negresti",
"Valea Mare (Negresti)",
"Peris",
"Fundeni",
"Giroc",
"Baile Borsa",
"Oituz",
"Rucar",
"Curcani",
"Babeni",
"Valea Mare (Babeni)",
"Rodna",
"Deta",
"Ruscova",
"Intorsura Buzaului",
"Pancota",
"Glina",
"Talmaciu",
"Copsa Mica",
"Motatei",
"Gugesti",
"Schela Cladovei",
"Sancraiu de Mures",
"Iernut",
"Targu Lapus",
"Maieru",
"Prejmer",
"Pogoanele",
"Dobroesti",
"Baraolt",
"Arbore",
"Homocea",
"Corund",
"Tufesti",
"Giarmata",
"Baia",
"Dumbraveni",
"Eforie Nord",
"Horodnic de Sus",
"Greci",
"Tudora",
"Straja",
"Rasinari",
"Sebis",
"Raducaneni",
"Siria",
"Paunesti",
"Saveni",
"Tunari",
)
states: Tuple[Tuple[str, str], ...] = (
("AB", "Alba"),
("AG", "Argeș"),
("AR", "Arad"),
("B", "București"),
("BC", "Bacău"),
("BH", "Bihor"),
("BN", "Bistrița-Năsăud"),
("BR", "Brăila"),
("BT", "Botoșani"),
("BV", "Brașov"),
("BZ", "Buzău"),
("CJ", "Cluj"),
("CL", "Călărași"),
("CS", "Caraș Severin"),
("CT", "Constanța"),
("CV", "Covasna"),
("DB", "Dâmbovița"),
("DJ", "Dolj"),
("GJ", "Gorj"),
("GL", "Galați"),
("GR", "Giurgiu"),
("HD", "Hunedoara"),
("HR", "Harghita"),
("IF", "Ilfov"),
("IL", "Ialomița"),
("IS", "Iași"),
("MH", "Mehedinți"),
("MM", "Maramureș"),
("MS", "Mureș"),
("NT", "Neamț"),
("OT", "Olt"),
("PH", "Prahova"),
("SB", "Sibiu"),
("SJ", "Sălaj"),
("SM", "Satu Mare"),
("SV", "Suceava"),
("TL", "Tulcea"),
("TM", "Timiș"),
("TR", "Teleorman"),
("VL", "Vâlcea"),
("VN", "Vrancea"),
("VS", "Vaslui"),
)
def street_prefix(self) -> str:
"""
:example: 'Strada'
"""
return self.random_element(self.street_prefixes)
def secondary_address(self) -> str:
"""
:example: 'Bl. 123 Sc. 2 Ap. 15'
"""
return self.numerify(self.random_element(self.secondary_address_formats))
def city_name(self) -> str:
return self.random_element(self.cities)
def city_with_postcode(self) -> str:
return self.postcode() + " " + self.random_element(self.cities)
def administrative_unit(self) -> str:
"""
:example: u'Timiș'
"""
return self.random_element(self.states)[1] # type: ignore
state = administrative_unit
def state_abbr(self) -> str:
"""
:example: u'TM'
"""
return self.random_element(self.states)[0] # type: ignore
|
Provider
|
python
|
google__pytype
|
pytype/typegraph/typegraph_serializer_test.py
|
{
"start": 165,
"end": 1464
}
|
class ____(test_base.BaseTest):
def test_basic(self):
ctx = test_utils.make_context(self.options)
# Max depth is arbitrarily chosen from analyze.py.
loc, defs = ctx.vm.run_program(src="", filename="", maximum_depth=3)
ctx.vm.analyze(loc, defs, maximum_depth=3)
prog = ctx.program
enc = typegraph_serializer.TypegraphEncoder()
ser = enc.default(prog)
self.assertEqual(len(prog.cfg_nodes), len(ser["cfg_nodes"]))
self.assertEqual(len(prog.variables), len(ser["variables"]))
# cfg.Program doesn't store all bindings directly, like it does for nodes
# and variables, so just check against the next binding ID.
self.assertEqual(prog.next_binding_id, len(ser["bindings"]))
self.assertEqual(prog.entrypoint.id, ser["entrypoint"])
def test_deserialize(self):
ctx = test_utils.make_context(self.options)
# Max depth is arbitrarily chosen from analyze.py.
loc, defs = ctx.vm.run_program(src="", filename="", maximum_depth=3)
ctx.vm.analyze(loc, defs, maximum_depth=3)
prog = ctx.program
ser = typegraph_serializer.encode_program(prog)
dec = typegraph_serializer.decode_program(ser)
self.assertIsInstance(dec, typegraph_serializer.SerializedProgram)
if __name__ == "__main__":
test_base.main()
|
TypegraphSerializerTest
|
python
|
pypa__hatch
|
tests/publish/plugin/test_interface.py
|
{
"start": 79,
"end": 214
}
|
class ____(PublisherInterface): # no cov
PLUGIN_NAME = "mock"
def publish(self, artifacts, options):
pass
|
MockPublisher
|
python
|
oauthlib__oauthlib
|
tests/oauth1/rfc5849/test_signatures.py
|
{
"start": 1958,
"end": 36119
}
|
class ____(TestCase):
"""
Unit tests for the oauthlib/oauth1/rfc5849/signature.py module.
The tests in this class are organised into sections, to test the
functions relating to:
- Signature base string calculation
- HMAC-based signature methods
- RSA-based signature methods
- PLAINTEXT signature method
Each section is separated by a comment beginning with "====".
Those comments have been formatted to remain visible when the code is
collapsed using PyCharm's code folding feature. That is, those section
heading comments do not have any other comment lines around it, so they
don't get collapsed when the contents of the class is collapsed. While
there is a "Sequential comments" option in the code folding configuration,
by default they are folded.
They all use some/all of the example test vector, defined in the first
section below.
"""
# ==== Example test vector =======================================
eg_signature_base_string = (
'POST&http%3A%2F%2Fexample.com%2Frequest&a2%3Dr%2520b%26a3%3D2%2520q'
'%26a3%3Da%26b5%3D%253D%25253D%26c%2540%3D%26c2%3D%26oauth_consumer_'
'key%3D9djdj82h48djs9d2%26oauth_nonce%3D7d8f3e4a%26oauth_signature_m'
'ethod%3DHMAC-SHA1%26oauth_timestamp%3D137131201%26oauth_token%3Dkkk'
'9d7dh3k39sjv7'
)
# The _signature base string_ above is copied from the end of
# RFC 5849 section 3.4.1.1.
#
# It corresponds to the three values below.
#
# The _normalized parameters_ below is copied from the end of
# RFC 5849 section 3.4.1.3.2.
eg_http_method = 'POST'
eg_base_string_uri = 'http://example.com/request'
eg_normalized_parameters = (
'a2=r%20b&a3=2%20q&a3=a&b5=%3D%253D&c%40=&c2=&oauth_consumer_key=9dj'
'dj82h48djs9d2&oauth_nonce=7d8f3e4a&oauth_signature_method=HMAC-SHA1'
'&oauth_timestamp=137131201&oauth_token=kkk9d7dh3k39sjv7'
)
# The above _normalized parameters_ corresponds to the parameters below.
#
# The parameters below is copied from the table at the end of
# RFC 5849 section 3.4.1.3.1.
eg_params = [
('b5', '=%3D'),
('a3', 'a'),
('c@', ''),
('a2', 'r b'),
('oauth_consumer_key', '9djdj82h48djs9d2'),
('oauth_token', 'kkk9d7dh3k39sjv7'),
('oauth_signature_method', 'HMAC-SHA1'),
('oauth_timestamp', '137131201'),
('oauth_nonce', '7d8f3e4a'),
('c2', ''),
('a3', '2 q'),
]
# The above parameters correspond to parameters from the three values below.
#
# These come from RFC 5849 section 3.4.1.3.1.
eg_uri_query = 'b5=%3D%253D&a3=a&c%40=&a2=r%20b'
eg_body = 'c2&a3=2+q'
eg_authorization_header = (
'OAuth realm="Example", oauth_consumer_key="9djdj82h48djs9d2",'
' oauth_token="kkk9d7dh3k39sjv7", oauth_signature_method="HMAC-SHA1",'
' oauth_timestamp="137131201", oauth_nonce="7d8f3e4a",'
' oauth_signature="djosJKDKJSD8743243%2Fjdk33klY%3D"'
)
# ==== Signature base string calculating function tests ==========
def test_signature_base_string(self):
"""
Test the ``signature_base_string`` function.
"""
# Example from RFC 5849
self.assertEqual(
self.eg_signature_base_string,
signature_base_string(
self.eg_http_method,
self.eg_base_string_uri,
self.eg_normalized_parameters))
# Test method is always uppercase in the signature base string
for test_method in ['POST', 'Post', 'pOST', 'poST', 'posT', 'post']:
self.assertEqual(
self.eg_signature_base_string,
signature_base_string(
test_method,
self.eg_base_string_uri,
self.eg_normalized_parameters))
def test_base_string_uri(self):
"""
Test the ``base_string_uri`` function.
"""
# ----------------
# Examples from the OAuth 1.0a specification: RFC 5849.
# First example from RFC 5849 section 3.4.1.2.
#
# GET /r%20v/X?id=123 HTTP/1.1
# Host: EXAMPLE.COM:80
#
# Note: there is a space between "r" and "v"
self.assertEqual(
'http://example.com/r%20v/X',
base_string_uri('http://EXAMPLE.COM:80/r v/X?id=123'))
# Second example from RFC 5849 section 3.4.1.2.
#
# GET /?q=1 HTTP/1.1
# Host: www.example.net:8080
self.assertEqual(
'https://www.example.net:8080/',
base_string_uri('https://www.example.net:8080/?q=1'))
# ----------------
# Scheme: will always be in lowercase
for uri in [
'foobar://www.example.com',
'FOOBAR://www.example.com',
'Foobar://www.example.com',
'FooBar://www.example.com',
'fOObAR://www.example.com',
]:
self.assertEqual('foobar://www.example.com/', base_string_uri(uri))
# ----------------
# Host: will always be in lowercase
for uri in [
'http://www.example.com',
'http://WWW.EXAMPLE.COM',
'http://www.EXAMPLE.com',
'http://wWW.eXAMPLE.cOM',
]:
self.assertEqual('http://www.example.com/', base_string_uri(uri))
# base_string_uri has an optional host parameter that can be used to
# override the URI's netloc (or used as the host if there is no netloc)
# The "netloc" refers to the "hostname[:port]" part of the URI.
self.assertEqual(
'http://actual.example.com/',
base_string_uri('http://IGNORE.example.com', 'ACTUAL.example.com'))
self.assertEqual(
'http://override.example.com/path',
base_string_uri('http:///path', 'OVERRIDE.example.com'))
# ----------------
# Host: valid host allows for IPv4 and IPv6
self.assertEqual(
'https://192.168.0.1/',
base_string_uri('https://192.168.0.1')
)
self.assertEqual(
'https://192.168.0.1:13000/',
base_string_uri('https://192.168.0.1:13000')
)
self.assertEqual(
'https://[123:db8:fd00:1000::5]:13000/',
base_string_uri('https://[123:db8:fd00:1000::5]:13000')
)
self.assertEqual(
'https://[123:db8:fd00:1000::5]/',
base_string_uri('https://[123:db8:fd00:1000::5]')
)
# ----------------
# Port: default ports always excluded; non-default ports always included
self.assertEqual(
"http://www.example.com/",
base_string_uri("http://www.example.com:80/")) # default port
self.assertEqual(
"https://www.example.com/",
base_string_uri("https://www.example.com:443/")) # default port
self.assertEqual(
"https://www.example.com:999/",
base_string_uri("https://www.example.com:999/")) # non-default port
self.assertEqual(
"http://www.example.com:443/",
base_string_uri("HTTP://www.example.com:443/")) # non-default port
self.assertEqual(
"https://www.example.com:80/",
base_string_uri("HTTPS://www.example.com:80/")) # non-default port
self.assertEqual(
"http://www.example.com/",
base_string_uri("http://www.example.com:/")) # colon but no number
# ----------------
# Paths
self.assertEqual(
'http://www.example.com/',
base_string_uri('http://www.example.com')) # no slash
self.assertEqual(
'http://www.example.com/',
base_string_uri('http://www.example.com/')) # with slash
self.assertEqual(
'http://www.example.com:8080/',
base_string_uri('http://www.example.com:8080')) # no slash
self.assertEqual(
'http://www.example.com:8080/',
base_string_uri('http://www.example.com:8080/')) # with slash
self.assertEqual(
'http://www.example.com/foo/bar',
base_string_uri('http://www.example.com/foo/bar')) # no slash
self.assertEqual(
'http://www.example.com/foo/bar/',
base_string_uri('http://www.example.com/foo/bar/')) # with slash
# ----------------
# Query parameters & fragment IDs do not appear in the base string URI
self.assertEqual(
'https://www.example.com/path',
base_string_uri('https://www.example.com/path?foo=bar'))
self.assertEqual(
'https://www.example.com/path',
base_string_uri('https://www.example.com/path#fragment'))
# ----------------
# Percent encoding
#
# RFC 5849 does not specify what characters are percent encoded, but in
# one of its examples it shows spaces being percent encoded.
# So it is assumed that spaces must be encoded, but we don't know what
# other characters are encoded or not.
self.assertEqual(
'https://www.example.com/hello%20world',
base_string_uri('https://www.example.com/hello world'))
self.assertEqual(
'https://www.hello%20world.com/',
base_string_uri('https://www.hello world.com/'))
# ----------------
# Errors detected
# base_string_uri expects a string
self.assertRaises(ValueError, base_string_uri, None)
self.assertRaises(ValueError, base_string_uri, 42)
self.assertRaises(ValueError, base_string_uri, b'http://example.com')
# Missing scheme is an error
self.assertRaises(ValueError, base_string_uri, '')
self.assertRaises(ValueError, base_string_uri, ' ') # single space
self.assertRaises(ValueError, base_string_uri, 'http')
self.assertRaises(ValueError, base_string_uri, 'example.com')
# Missing host is an error
self.assertRaises(ValueError, base_string_uri, 'http:')
self.assertRaises(ValueError, base_string_uri, 'http://')
self.assertRaises(ValueError, base_string_uri, 'http://:8080')
# Port is not a valid TCP/IP port number
self.assertRaises(ValueError, base_string_uri, 'http://eg.com:0')
self.assertRaises(ValueError, base_string_uri, 'http://eg.com:-1')
self.assertRaises(ValueError, base_string_uri, 'http://eg.com:65536')
self.assertRaises(ValueError, base_string_uri, 'http://eg.com:3.14')
self.assertRaises(ValueError, base_string_uri, 'http://eg.com:BAD')
self.assertRaises(ValueError, base_string_uri, 'http://eg.com:NaN')
self.assertRaises(ValueError, base_string_uri, 'http://eg.com: ')
self.assertRaises(ValueError, base_string_uri, 'http://eg.com:42:42')
def test_collect_parameters(self):
"""
Test the ``collect_parameters`` function.
"""
# ----------------
# Examples from the OAuth 1.0a specification: RFC 5849.
params = collect_parameters(
self.eg_uri_query,
self.eg_body,
{'Authorization': self.eg_authorization_header})
# Check params contains the same pairs as control_params, ignoring order
self.assertEqual(sorted(self.eg_params), sorted(params))
# ----------------
# Examples with no parameters
self.assertEqual([], collect_parameters('', '', {}))
self.assertEqual([], collect_parameters(None, None, None))
self.assertEqual([], collect_parameters())
self.assertEqual([], collect_parameters(headers={'foo': 'bar'}))
# ----------------
# Test effect of exclude_oauth_signature"
no_sig = collect_parameters(
headers={'authorization': self.eg_authorization_header})
with_sig = collect_parameters(
headers={'authorization': self.eg_authorization_header},
exclude_oauth_signature=False)
self.assertEqual(sorted(no_sig + [('oauth_signature',
'djosJKDKJSD8743243/jdk33klY=')]),
sorted(with_sig))
# ----------------
# Test effect of "with_realm" as well as header name case insensitivity
no_realm = collect_parameters(
headers={'authorization': self.eg_authorization_header},
with_realm=False)
with_realm = collect_parameters(
headers={'AUTHORIZATION': self.eg_authorization_header},
with_realm=True)
self.assertEqual(sorted(no_realm + [('realm', 'Example')]),
sorted(with_realm))
def test_normalize_parameters(self):
"""
Test the ``normalize_parameters`` function.
"""
# headers = {'Authorization': self.authorization_header}
# parameters = collect_parameters(
# uri_query=self.uri_query, body=self.body, headers=headers)
# normalized = normalize_parameters(parameters)
#
# # Unicode everywhere and always
# self.assertIsInstance(normalized, str)
#
# # Lets see if things are in order
# # check to see that querystring keys come in alphanumeric order:
# querystring_keys = ['a2', 'a3', 'b5', 'oauth_consumer_key',
# 'oauth_nonce', 'oauth_signature_method',
# 'oauth_timestamp', 'oauth_token']
# index = -1 # start at -1 because the 'a2' key starts at index 0
# for key in querystring_keys:
# self.assertGreater(normalized.index(key), index)
# index = normalized.index(key)
# ----------------
# Example from the OAuth 1.0a specification: RFC 5849.
# Params from end of section 3.4.1.3.1. and the expected
# normalized parameters from the end of section 3.4.1.3.2.
self.assertEqual(self.eg_normalized_parameters,
normalize_parameters(self.eg_params))
# ==== HMAC-based signature method tests =========================
hmac_client = MockClient(
client_secret='ECrDNoq1VYzzzzzzzzzyAK7TwZNtPnkqatqZZZZ',
resource_owner_secret='just-a-string asdasd')
# The following expected signatures were calculated by putting the value of
# the eg_signature_base_string in a file ("base-str.txt") and running:
#
# echo -n `cat base-str.txt` | openssl dgst -hmac KEY -sha1 -binary| base64
#
# Where the KEY is the concatenation of the client_secret, an ampersand and
# the resource_owner_secret. But those values need to be encoded properly,
# so the spaces in the resource_owner_secret must be represented as '%20'.
#
# Note: the "echo -n" is needed to remove the last newline character, which
# most text editors will add.
expected_signature_hmac_sha1 = \
'wsdNmjGB7lvis0UJuPAmjvX/PXw='
expected_signature_hmac_sha256 = \
'wdfdHUKXHbOnOGZP8WFAWMSAmWzN3EVBWWgXGlC/Eo4='
expected_signature_hmac_sha512 = (
'u/vlyZFDxOWOZ9UUXwRBJHvq8/T4jCA74ocRmn2ECnjUBTAeJiZIRU8hDTjS88Tz'
'1fGONffMpdZxUkUTW3k1kg=='
)
def test_sign_hmac_sha1_with_client(self):
"""
Test sign and verify with HMAC-SHA1.
"""
self.assertEqual(
self.expected_signature_hmac_sha1,
sign_hmac_sha1_with_client(self.eg_signature_base_string,
self.hmac_client))
self.assertTrue(verify_hmac_sha1(
MockRequest('POST',
'http://example.com/request',
self.eg_params,
self.expected_signature_hmac_sha1),
self.hmac_client.client_secret,
self.hmac_client.resource_owner_secret))
def test_sign_hmac_sha256_with_client(self):
"""
Test sign and verify with HMAC-SHA256.
"""
self.assertEqual(
self.expected_signature_hmac_sha256,
sign_hmac_sha256_with_client(self.eg_signature_base_string,
self.hmac_client))
self.assertTrue(verify_hmac_sha256(
MockRequest('POST',
'http://example.com/request',
self.eg_params,
self.expected_signature_hmac_sha256),
self.hmac_client.client_secret,
self.hmac_client.resource_owner_secret))
def test_sign_hmac_sha512_with_client(self):
"""
Test sign and verify with HMAC-SHA512.
"""
self.assertEqual(
self.expected_signature_hmac_sha512,
sign_hmac_sha512_with_client(self.eg_signature_base_string,
self.hmac_client))
self.assertTrue(verify_hmac_sha512(
MockRequest('POST',
'http://example.com/request',
self.eg_params,
self.expected_signature_hmac_sha512),
self.hmac_client.client_secret,
self.hmac_client.resource_owner_secret))
def test_hmac_false_positives(self):
"""
Test verify_hmac-* functions will correctly detect invalid signatures.
"""
_ros = self.hmac_client.resource_owner_secret
for functions in [
(sign_hmac_sha1_with_client, verify_hmac_sha1),
(sign_hmac_sha256_with_client, verify_hmac_sha256),
(sign_hmac_sha512_with_client, verify_hmac_sha512),
]:
signing_function = functions[0]
verify_function = functions[1]
good_signature = \
signing_function(
self.eg_signature_base_string,
self.hmac_client)
bad_signature_on_different_value = \
signing_function(
'not the signature base string',
self.hmac_client)
bad_signature_produced_by_different_client_secret = \
signing_function(
self.eg_signature_base_string,
MockClient(client_secret='wrong-secret',
resource_owner_secret=_ros))
bad_signature_produced_by_different_resource_owner_secret = \
signing_function(
self.eg_signature_base_string,
MockClient(client_secret=self.hmac_client.client_secret,
resource_owner_secret='wrong-secret'))
bad_signature_produced_with_no_resource_owner_secret = \
signing_function(
self.eg_signature_base_string,
MockClient(client_secret=self.hmac_client.client_secret))
bad_signature_produced_with_no_client_secret = \
signing_function(
self.eg_signature_base_string,
MockClient(resource_owner_secret=_ros))
self.assertTrue(verify_function(
MockRequest('POST',
'http://example.com/request',
self.eg_params,
good_signature),
self.hmac_client.client_secret,
self.hmac_client.resource_owner_secret))
for bad_signature in [
'',
'ZG9uJ3QgdHJ1c3QgbWUK', # random base64 encoded value
'altérer', # value with a non-ASCII character in it
bad_signature_on_different_value,
bad_signature_produced_by_different_client_secret,
bad_signature_produced_by_different_resource_owner_secret,
bad_signature_produced_with_no_resource_owner_secret,
bad_signature_produced_with_no_client_secret,
]:
self.assertFalse(verify_function(
MockRequest('POST',
'http://example.com/request',
self.eg_params,
bad_signature),
self.hmac_client.client_secret,
self.hmac_client.resource_owner_secret))
# ==== RSA-based signature methods tests =========================
rsa_private_client = MockClient(rsa_key='''
-----BEGIN RSA PRIVATE KEY-----
MIICXgIBAAKBgQDk1/bxyS8Q8jiheHeYYp/4rEKJopeQRRKKpZI4s5i+UPwVpupG
AlwXWfzXwSMaKPAoKJNdu7tqKRniqst5uoHXw98gj0x7zamu0Ck1LtQ4c7pFMVah
5IYGhBi2E9ycNS329W27nJPWNCbESTu7snVlG8V8mfvGGg3xNjTMO7IdrwIDAQAB
AoGBAOQ2KuH8S5+OrsL4K+wfjoCi6MfxCUyqVU9GxocdM1m30WyWRFMEz2nKJ8fR
p3vTD4w8yplTOhcoXdQZl0kRoaDzrcYkm2VvJtQRrX7dKFT8dR8D/Tr7dNQLOXfC
DY6xveQczE7qt7Vk7lp4FqmxBsaaEuokt78pOOjywZoInjZhAkEA9wz3zoZNT0/i
rf6qv2qTIeieUB035N3dyw6f1BGSWYaXSuerDCD/J1qZbAPKKhyHZbVawFt3UMhe
542UftBaxQJBAO0iJy1I8GQjGnS7B3yvyH3CcLYGy296+XO/2xKp/d/ty1OIeovx
C60pLNwuFNF3z9d2GVQAdoQ89hUkOtjZLeMCQQD0JO6oPHUeUjYT+T7ImAv7UKVT
Suy30sKjLzqoGw1kR+wv7C5PeDRvscs4wa4CW9s6mjSrMDkDrmCLuJDtmf55AkEA
kmaMg2PNrjUR51F0zOEFycaaqXbGcFwe1/xx9zLmHzMDXd4bsnwt9kk+fe0hQzVS
JzatanQit3+feev1PN3QewJAWv4RZeavEUhKv+kLe95Yd0su7lTLVduVgh4v5yLT
Ga6FHdjGPcfajt+nrpB1n8UQBEH9ZxniokR/IPvdMlxqXA==
-----END RSA PRIVATE KEY-----
''')
rsa_public_client = MockClient(rsa_key='''
-----BEGIN RSA PUBLIC KEY-----
MIGJAoGBAOTX9vHJLxDyOKF4d5hin/isQomil5BFEoqlkjizmL5Q/BWm6kYCXBdZ
/NfBIxoo8Cgok127u2opGeKqy3m6gdfD3yCPTHvNqa7QKTUu1DhzukUxVqHkhgaE
GLYT3Jw1Lfb1bbuck9Y0JsRJO7uydWUbxXyZ+8YaDfE2NMw7sh2vAgMBAAE=
-----END RSA PUBLIC KEY-----
''')
# The above private key was generated using:
# $ openssl genrsa -out example.pvt 1024
# $ chmod 600 example.pvt
# Public key was extract from it using:
# $ ssh-keygen -e -m pem -f example.pvt
# PEM encoding requires the key to be concatenated with linebreaks.
# The following expected signatures were calculated by putting the private
# key in a file (test.pvt) and the value of sig_base_str_rsa in another file
# ("base-str.txt") and running:
#
# echo -n `cat base-str.txt` | openssl dgst -sha1 -sign test.pvt| base64
#
# Note: the "echo -n" is needed to remove the last newline character, which
# most text editors will add.
expected_signature_rsa_sha1 = (
'mFY2KOEnlYWsTvUA+5kxuBIcvBYXu+ljw9ttVJQxKduMueGSVPCB1tK1PlqVLK738'
'HK0t19ecBJfb6rMxUwrriw+MlBO+jpojkZIWccw1J4cAb4qu4M81DbpUAq4j/1w/Q'
'yTR4TWCODlEfN7Zfgy8+pf+TjiXfIwRC1jEWbuL1E='
)
expected_signature_rsa_sha256 = (
'jqKl6m0WS69tiVJV8ZQ6aQEfJqISoZkiPBXRv6Al2+iFSaDpfeXjYm+Hbx6m1azR'
'drZ/35PM3cvuid3LwW/siAkzb0xQcGnTyAPH8YcGWzmnKGY7LsB7fkqThchNxvRK'
'/N7s9M1WMnfZZ+1dQbbwtTs1TG1+iexUcV7r3M7Heec='
)
expected_signature_rsa_sha512 = (
'jL1CnjlsNd25qoZVHZ2oJft47IRYTjpF5CvCUjL3LY0NTnbEeVhE4amWXUFBe9GL'
'DWdUh/79ZWNOrCirBFIP26cHLApjYdt4ZG7EVK0/GubS2v8wT1QPRsog8zyiMZkm'
'g4JXdWCGXG8YRvRJTg+QKhXuXwS6TcMNakrgzgFIVhA='
)
def test_sign_rsa_sha1_with_client(self):
"""
Test sign and verify with RSA-SHA1.
"""
self.assertEqual(
self.expected_signature_rsa_sha1,
sign_rsa_sha1_with_client(self.eg_signature_base_string,
self.rsa_private_client))
self.assertTrue(verify_rsa_sha1(
MockRequest('POST',
'http://example.com/request',
self.eg_params,
self.expected_signature_rsa_sha1),
self.rsa_public_client.rsa_key))
def test_sign_rsa_sha256_with_client(self):
"""
Test sign and verify with RSA-SHA256.
"""
self.assertEqual(
self.expected_signature_rsa_sha256,
sign_rsa_sha256_with_client(self.eg_signature_base_string,
self.rsa_private_client))
self.assertTrue(verify_rsa_sha256(
MockRequest('POST',
'http://example.com/request',
self.eg_params,
self.expected_signature_rsa_sha256),
self.rsa_public_client.rsa_key))
def test_sign_rsa_sha512_with_client(self):
"""
Test sign and verify with RSA-SHA512.
"""
self.assertEqual(
self.expected_signature_rsa_sha512,
sign_rsa_sha512_with_client(self.eg_signature_base_string,
self.rsa_private_client))
self.assertTrue(verify_rsa_sha512(
MockRequest('POST',
'http://example.com/request',
self.eg_params,
self.expected_signature_rsa_sha512),
self.rsa_public_client.rsa_key))
def test_rsa_false_positives(self):
"""
Test verify_rsa-* functions will correctly detect invalid signatures.
"""
another_client = MockClient(rsa_key='''
-----BEGIN RSA PRIVATE KEY-----
MIICXQIBAAKBgQDZcD/1OZNJJ6Y3QZM16Z+O7fkD9kTIQuT2BfpAOUvDfxzYhVC9
TNmSDHCQhr+ClutyolBk5jTE1/FXFUuHoPsTrkI7KQFXPP834D4gnSY9jrAiUJHe
DVF6wXNuS7H4Ueh16YPjUxgLLRh/nn/JSEj98gsw+7DP01OWMfWS99S7eQIDAQAB
AoGBALsQZRXVyK7BG7CiC8HwEcNnXDpaXmZjlpNKJTenk1THQMvONd4GBZAuf5D3
PD9fE4R1u/ByVKecmBaxTV+L0TRQfD8K/nbQe0SKRQIkLI2ymLJKC/eyw5iTKT0E
+BS6wYpVd+mfcqgvpHOYpUmz9X8k/eOa7uslFmvt+sDb5ZcBAkEA+++SRqqUxFEG
s/ZWAKw9p5YgkeVUOYVUwyAeZ97heySrjVzg1nZ6v6kv7iOPi9KOEpaIGPW7x1K/
uQuSt4YEqQJBANzyNqZTTPpv7b/R8ABFy0YMwPVNt3b1GOU1Xxl6iuhH2WcHuueo
UB13JHoZCMZ7hsEqieEz6uteUjdRzRPKclECQFNhVK4iop3emzNQYeJTHwyp+RmQ
JrHq2MTDioyiDUouNsDQbnFMQQ/RtNVB265Q/0hTnbN1ELLFRkK9+87VghECQQC9
hacLFPk6+TffCp3sHfI3rEj4Iin1iFhKhHWGzW7JwJfjoOXaQK44GDLZ6Q918g+t
MmgDHR2tt8KeYTSgfU+BAkBcaVF91EQ7VXhvyABNYjeYP7lU7orOgdWMa/zbLXSU
4vLsK1WOmwPY9zsXpPkilqszqcru4gzlG462cSbEdAW9
-----END RSA PRIVATE KEY-----
''')
for functions in [
(sign_rsa_sha1_with_client, verify_rsa_sha1),
(sign_rsa_sha256_with_client, verify_rsa_sha256),
(sign_rsa_sha512_with_client, verify_rsa_sha512),
]:
signing_function = functions[0]
verify_function = functions[1]
good_signature = \
signing_function(self.eg_signature_base_string,
self.rsa_private_client)
bad_signature_on_different_value = \
signing_function('wrong value signed', self.rsa_private_client)
bad_signature_produced_by_different_private_key = \
signing_function(self.eg_signature_base_string, another_client)
self.assertTrue(verify_function(
MockRequest('POST',
'http://example.com/request',
self.eg_params,
good_signature),
self.rsa_public_client.rsa_key))
for bad_signature in [
'',
'ZG9uJ3QgdHJ1c3QgbWUK', # random base64 encoded value
'altérer', # value with a non-ASCII character in it
bad_signature_on_different_value,
bad_signature_produced_by_different_private_key,
]:
self.assertFalse(verify_function(
MockRequest('POST',
'http://example.com/request',
self.eg_params,
bad_signature),
self.rsa_public_client.rsa_key))
def test_rsa_bad_keys(self):
"""
Testing RSA sign and verify with bad key values produces errors.
This test is useful for coverage tests, since it runs the code branches
that deal with error situations.
"""
# Signing needs a private key
for bad_value in [None, '']:
self.assertRaises(ValueError,
sign_rsa_sha1_with_client,
self.eg_signature_base_string,
MockClient(rsa_key=bad_value))
self.assertRaises(InvalidKeyError,
sign_rsa_sha1_with_client,
self.eg_signature_base_string,
MockClient(rsa_key='foobar'))
self.assertRaises(AttributeError,
sign_rsa_sha1_with_client,
self.eg_signature_base_string,
self.rsa_public_client) # public key doesn't sign
# Verify needs a public key
for bad_value in [None, '', 'foobar', self.rsa_private_client.rsa_key]:
self.assertRaises(TypeError,
verify_rsa_sha1,
MockRequest('POST',
'http://example.com/request',
self.eg_params,
self.expected_signature_rsa_sha1),
MockClient(rsa_key=bad_value))
# For completeness, this text could repeat the above for RSA-SHA256 and
# RSA-SHA512 signing and verification functions.
def test_rsa_jwt_algorithm_cache(self):
# Tests cache of RSAAlgorithm objects is implemented correctly.
# This is difficult to test, since the cache is internal.
#
# Running this test with coverage will show the cache-hit branch of code
# being executed by two signing operations with the same hash algorithm.
self.test_sign_rsa_sha1_with_client() # creates cache entry
self.test_sign_rsa_sha1_with_client() # reuses cache entry
# Some possible bugs will be detected if multiple signing operations
# with different hash algorithms produce the wrong results (e.g. if the
# cache incorrectly returned the previously used algorithm, instead
# of the one that is needed).
self.test_sign_rsa_sha256_with_client()
self.test_sign_rsa_sha256_with_client()
self.test_sign_rsa_sha1_with_client()
self.test_sign_rsa_sha256_with_client()
self.test_sign_rsa_sha512_with_client()
# ==== PLAINTEXT signature method tests ==========================
plaintext_client = hmac_client # for convenience, use the same HMAC secrets
expected_signature_plaintext = (
'ECrDNoq1VYzzzzzzzzzyAK7TwZNtPnkqatqZZZZ'
'&'
'just-a-string%20%20%20%20asdasd')
def test_sign_plaintext_with_client(self):
# With PLAINTEXT, the "signature" is always the same: regardless of the
# contents of the request. It is the concatenation of the encoded
# client_secret, an ampersand, and the encoded resource_owner_secret.
#
# That is why the spaces in the resource owner secret are "%20".
self.assertEqual(self.expected_signature_plaintext,
sign_plaintext_with_client(None, # request is ignored
self.plaintext_client))
self.assertTrue(verify_plaintext(
MockRequest('PUT',
'http://example.com/some-other-path',
[('description', 'request is ignored in PLAINTEXT')],
self.expected_signature_plaintext),
self.plaintext_client.client_secret,
self.plaintext_client.resource_owner_secret))
def test_plaintext_false_positives(self):
"""
Test verify_plaintext function will correctly detect invalid signatures.
"""
_ros = self.plaintext_client.resource_owner_secret
good_signature = \
sign_plaintext_with_client(
self.eg_signature_base_string,
self.plaintext_client)
bad_signature_produced_by_different_client_secret = \
sign_plaintext_with_client(
self.eg_signature_base_string,
MockClient(client_secret='wrong-secret',
resource_owner_secret=_ros))
bad_signature_produced_by_different_resource_owner_secret = \
sign_plaintext_with_client(
self.eg_signature_base_string,
MockClient(client_secret=self.plaintext_client.client_secret,
resource_owner_secret='wrong-secret'))
bad_signature_produced_with_no_resource_owner_secret = \
sign_plaintext_with_client(
self.eg_signature_base_string,
MockClient(client_secret=self.plaintext_client.client_secret))
bad_signature_produced_with_no_client_secret = \
sign_plaintext_with_client(
self.eg_signature_base_string,
MockClient(resource_owner_secret=_ros))
self.assertTrue(verify_plaintext(
MockRequest('POST',
'http://example.com/request',
self.eg_params,
good_signature),
self.plaintext_client.client_secret,
self.plaintext_client.resource_owner_secret))
for bad_signature in [
'',
'ZG9uJ3QgdHJ1c3QgbWUK', # random base64 encoded value
'altérer', # value with a non-ASCII character in it
bad_signature_produced_by_different_client_secret,
bad_signature_produced_by_different_resource_owner_secret,
bad_signature_produced_with_no_resource_owner_secret,
bad_signature_produced_with_no_client_secret,
]:
self.assertFalse(verify_plaintext(
MockRequest('POST',
'http://example.com/request',
self.eg_params,
bad_signature),
self.plaintext_client.client_secret,
self.plaintext_client.resource_owner_secret))
|
SignatureTests
|
python
|
davidhalter__parso
|
parso/python/errors.py
|
{
"start": 18215,
"end": 18295
}
|
class ____(NormalizerConfig):
normalizer_class = ErrorFinder
|
ErrorFinderConfig
|
python
|
getsentry__sentry
|
src/sentry/users/models/authenticator.py
|
{
"start": 6524,
"end": 8850
}
|
class ____(ControlOutboxProducingModel):
# It only makes sense to import/export this data when doing a full global backup/restore, so it
# lives in the `Global` scope, even though it only depends on the `User` model.
__relocation_scope__ = RelocationScope.Global
id = BoundedAutoField(primary_key=True)
user = FlexibleForeignKey("sentry.User", db_index=True)
created_at = models.DateTimeField(_("created at"), default=timezone.now)
last_used_at = models.DateTimeField(_("last used at"), null=True)
type = BoundedPositiveIntegerField(choices=AUTHENTICATOR_CHOICES)
config = AuthenticatorConfig()
objects: ClassVar[AuthenticatorManager] = AuthenticatorManager()
class AlreadyEnrolled(Exception):
pass
class Meta:
app_label = "sentry"
db_table = "auth_authenticator"
verbose_name = _("authenticator")
verbose_name_plural = _("authenticators")
unique_together = (("user", "type"),)
def outboxes_for_update(self, shard_identifier: int | None = None) -> list[ControlOutboxBase]:
regions = find_regions_for_user(self.user_id)
return OutboxCategory.USER_UPDATE.as_control_outboxes(
region_names=regions,
shard_identifier=self.user_id,
object_identifier=self.user_id,
)
@cached_property
def interface(self) -> OtpMixin | AuthenticatorInterface:
return AUTHENTICATOR_INTERFACES_BY_TYPE[self.type](self)
def mark_used(self, save: bool = True) -> None:
self.last_used_at = timezone.now()
if save:
self.save()
def reset_fields(self, save: bool = True) -> None:
self.created_at = timezone.now()
self.last_used_at = None
if save:
self.save()
def __repr__(self) -> str:
return f"<Authenticator user={self.user.email!r} interface={self.interface.interface_id!r}>"
@classmethod
def sanitize_relocation_json(
cls, json: Any, sanitizer: Sanitizer, model_name: NormalizedModelName | None = None
) -> None:
model_name = get_model_name(cls) if model_name is None else model_name
super().sanitize_relocation_json(json, sanitizer, model_name)
sanitizer.set_string(json, SanitizableField(model_name, "config"), lambda _: '""')
|
Authenticator
|
python
|
tensorflow__tensorflow
|
tensorflow/python/keras/losses.py
|
{
"start": 41087,
"end": 71120
}
|
class ____(LossFunctionWrapper):
"""Computes the Huber loss between `y_true` and `y_pred`.
For each value x in `error = y_true - y_pred`:
```
loss = 0.5 * x^2 if |x| <= d
loss = 0.5 * d^2 + d * (|x| - d) if |x| > d
```
where d is `delta`. See: https://en.wikipedia.org/wiki/Huber_loss
Standalone usage:
>>> y_true = [[0, 1], [0, 0]]
>>> y_pred = [[0.6, 0.4], [0.4, 0.6]]
>>> # Using 'auto'/'sum_over_batch_size' reduction type.
>>> h = tf.keras.losses.Huber()
>>> h(y_true, y_pred).numpy()
0.155
>>> # Calling with 'sample_weight'.
>>> h(y_true, y_pred, sample_weight=[1, 0]).numpy()
0.09
>>> # Using 'sum' reduction type.
>>> h = tf.keras.losses.Huber(
... reduction=tf.keras.losses.Reduction.SUM)
>>> h(y_true, y_pred).numpy()
0.31
>>> # Using 'none' reduction type.
>>> h = tf.keras.losses.Huber(
... reduction=tf.keras.losses.Reduction.NONE)
>>> h(y_true, y_pred).numpy()
array([0.18, 0.13], dtype=float32)
Usage with the `compile()` API:
```python
model.compile(optimizer='sgd', loss=tf.keras.losses.Huber())
```
"""
def __init__(self,
delta=1.0,
reduction=losses_utils.ReductionV2.AUTO,
name='huber_loss'):
"""Initializes `Huber` instance.
Args:
delta: A float, the point where the Huber loss function changes from a
quadratic to linear.
reduction: Type of `tf.keras.losses.Reduction` to apply to
loss. Default value is `AUTO`. `AUTO` indicates that the reduction
option will be determined by the usage context. For almost all cases
this defaults to `SUM_OVER_BATCH_SIZE`. When used with
`tf.distribute.Strategy`, outside of built-in training loops such as
`tf.keras` `compile` and `fit`, using `AUTO` or `SUM_OVER_BATCH_SIZE`
will raise an error. Please see this custom training [tutorial](
https://www.tensorflow.org/tutorials/distribute/custom_training) for
more details.
name: Optional name for the instance. Defaults to 'huber_loss'.
"""
super().__init__(huber, name=name, reduction=reduction, delta=delta)
@dispatch.add_dispatch_support
def mean_squared_error(y_true, y_pred):
"""Computes the mean squared error between labels and predictions.
After computing the squared distance between the inputs, the mean value over
the last dimension is returned.
`loss = mean(square(y_true - y_pred), axis=-1)`
Standalone usage:
>>> y_true = np.random.randint(0, 2, size=(2, 3))
>>> y_pred = np.random.random(size=(2, 3))
>>> loss = tf.keras.losses.mean_squared_error(y_true, y_pred)
>>> assert loss.shape == (2,)
>>> assert np.array_equal(
... loss.numpy(), np.mean(np.square(y_true - y_pred), axis=-1))
Args:
y_true: Ground truth values. shape = `[batch_size, d0, .. dN]`.
y_pred: The predicted values. shape = `[batch_size, d0, .. dN]`.
Returns:
Mean squared error values. shape = `[batch_size, d0, .. dN-1]`.
"""
y_pred = tensor_conversion.convert_to_tensor_v2_with_dispatch(y_pred)
y_true = math_ops.cast(y_true, y_pred.dtype)
return backend.mean(math_ops.squared_difference(y_pred, y_true), axis=-1)
def _ragged_tensor_apply_loss(loss_fn, y_true, y_pred, y_pred_extra_dim=False):
"""Apply a loss function on a per batch basis.
Args:
loss_fn: The loss function
y_true: truth values (RaggedTensor)
y_pred: predicted values (RaggedTensor)
y_pred_extra_dim: whether y_pred has an additional dimension compared to
y_true
Returns:
Loss-function result. A dense tensor if the output has a single dimension
(per-batch loss value); a ragged tensor otherwise.
"""
def rt_is_equiv_dense(rt):
"""Returns true if this RaggedTensor has the same row_lengths across
all ragged dimensions and thus can be converted to a dense tensor
without loss of information.
Args:
rt: RaggedTensor.
"""
return math_ops.reduce_all([
math_ops.equal(
math_ops.reduce_variance(math_ops.cast(row_lens, backend.floatx())),
constant_op.constant([0.])) for row_lens in rt.nested_row_lengths()
])
def _convert_to_dense(inputs):
return tuple(
rt.to_tensor() if isinstance(rt, ragged_tensor.RaggedTensor) else rt
for rt in inputs)
def _call_loss(inputs, ragged_output):
""" Adapt the result to ragged or dense tensor according to the expected
output type. This is done so that all the return values of the map
operation have the same type.
"""
r = loss_fn(*inputs)
if ragged_output and not isinstance(r, ragged_tensor.RaggedTensor):
r = ragged_tensor.RaggedTensor.from_tensor(r)
elif not ragged_output and isinstance(r, ragged_tensor.RaggedTensor):
r = r.to_tensor()
return r
def _wrapper(inputs, ragged_output):
_, y_pred = inputs
if isinstance(y_pred, ragged_tensor.RaggedTensor):
return cond.cond(
rt_is_equiv_dense(y_pred),
lambda: _call_loss(_convert_to_dense(inputs), ragged_output),
lambda: _call_loss(inputs, ragged_output))
return loss_fn(*inputs)
if not isinstance(y_true, ragged_tensor.RaggedTensor):
return loss_fn(y_true, y_pred.to_tensor())
lshape = y_pred.shape.as_list()[1:-1]
if len(lshape) > 0:
spec = ragged_tensor.RaggedTensorSpec(shape=lshape, dtype=y_pred.dtype)
else:
spec = tensor_spec.TensorSpec(shape=[], dtype=y_pred.dtype)
nested_splits_list = [rt.nested_row_splits for rt in (y_true, y_pred)]
if y_pred_extra_dim:
# The last dimension of a categorical prediction may be ragged or not.
rdims = [len(slist) for slist in nested_splits_list]
if rdims[0] == rdims[1] - 1:
nested_splits_list[1] = nested_splits_list[1][:-1]
map_fn = functools.partial(_wrapper, ragged_output=len(lshape) > 1)
assertion_list = ragged_util.assert_splits_match(nested_splits_list)
with ops.control_dependencies(assertion_list):
return ragged_map_ops.map_fn(map_fn, elems=(y_true, y_pred), dtype=spec)
@dispatch.dispatch_for_types(mean_squared_error, ragged_tensor.RaggedTensor)
def _ragged_tensor_mse(y_true, y_pred):
"""Implements support for handling RaggedTensors.
Args:
y_true: RaggedTensor truth values. shape = `[batch_size, d0, .. dN]`.
y_pred: RaggedTensor predicted values. shape = `[batch_size, d0, .. dN]`.
Returns:
Mean squared error values. shape = `[batch_size, d0, .. dN-1]`.
When the number of dimensions of the batch feature vector [d0, .. dN] is
greater than one the return value is a RaggedTensor. Otherwise a Dense
tensor with dimensions [batch_size] is returned.
"""
return _ragged_tensor_apply_loss(mean_squared_error, y_true, y_pred)
@dispatch.add_dispatch_support
def mean_absolute_error(y_true, y_pred):
"""Computes the mean absolute error between labels and predictions.
`loss = mean(abs(y_true - y_pred), axis=-1)`
Standalone usage:
>>> y_true = np.random.randint(0, 2, size=(2, 3))
>>> y_pred = np.random.random(size=(2, 3))
>>> loss = tf.keras.losses.mean_absolute_error(y_true, y_pred)
>>> assert loss.shape == (2,)
>>> assert np.array_equal(
... loss.numpy(), np.mean(np.abs(y_true - y_pred), axis=-1))
Args:
y_true: Ground truth values. shape = `[batch_size, d0, .. dN]`.
y_pred: The predicted values. shape = `[batch_size, d0, .. dN]`.
Returns:
Mean absolute error values. shape = `[batch_size, d0, .. dN-1]`.
"""
y_pred = tensor_conversion.convert_to_tensor_v2_with_dispatch(y_pred)
y_true = math_ops.cast(y_true, y_pred.dtype)
return backend.mean(math_ops.abs(y_pred - y_true), axis=-1)
@dispatch.dispatch_for_types(mean_absolute_error, ragged_tensor.RaggedTensor)
def _ragged_tensor_mae(y_true, y_pred):
"""RaggedTensor adapter for mean_absolute_error."""
return _ragged_tensor_apply_loss(mean_absolute_error, y_true, y_pred)
@dispatch.add_dispatch_support
def mean_absolute_percentage_error(y_true, y_pred):
"""Computes the mean absolute percentage error between `y_true` and `y_pred`.
`loss = 100 * mean(abs((y_true - y_pred) / y_true), axis=-1)`
Standalone usage:
>>> y_true = np.random.random(size=(2, 3))
>>> y_true = np.maximum(y_true, 1e-7) # Prevent division by zero
>>> y_pred = np.random.random(size=(2, 3))
>>> loss = tf.keras.losses.mean_absolute_percentage_error(y_true, y_pred)
>>> assert loss.shape == (2,)
>>> assert np.array_equal(
... loss.numpy(),
... 100. * np.mean(np.abs((y_true - y_pred) / y_true), axis=-1))
Args:
y_true: Ground truth values. shape = `[batch_size, d0, .. dN]`.
y_pred: The predicted values. shape = `[batch_size, d0, .. dN]`.
Returns:
Mean absolute percentage error values. shape = `[batch_size, d0, .. dN-1]`.
"""
y_pred = tensor_conversion.convert_to_tensor_v2_with_dispatch(y_pred)
y_true = math_ops.cast(y_true, y_pred.dtype)
diff = math_ops.abs(
(y_true - y_pred) / backend.maximum(math_ops.abs(y_true),
backend.epsilon()))
return 100. * backend.mean(diff, axis=-1)
@dispatch.dispatch_for_types(mean_absolute_percentage_error,
ragged_tensor.RaggedTensor)
def _ragged_tensor_mape(y_true, y_pred):
"""Support RaggedTensors."""
return _ragged_tensor_apply_loss(mean_absolute_percentage_error, y_true,
y_pred)
@dispatch.add_dispatch_support
def mean_squared_logarithmic_error(y_true, y_pred):
"""Computes the mean squared logarithmic error between `y_true` and `y_pred`.
`loss = mean(square(log(y_true + 1) - log(y_pred + 1)), axis=-1)`
Standalone usage:
>>> y_true = np.random.randint(0, 2, size=(2, 3))
>>> y_pred = np.random.random(size=(2, 3))
>>> loss = tf.keras.losses.mean_squared_logarithmic_error(y_true, y_pred)
>>> assert loss.shape == (2,)
>>> y_true = np.maximum(y_true, 1e-7)
>>> y_pred = np.maximum(y_pred, 1e-7)
>>> assert np.allclose(
... loss.numpy(),
... np.mean(
... np.square(np.log(y_true + 1.) - np.log(y_pred + 1.)), axis=-1))
Args:
y_true: Ground truth values. shape = `[batch_size, d0, .. dN]`.
y_pred: The predicted values. shape = `[batch_size, d0, .. dN]`.
Returns:
Mean squared logarithmic error values. shape = `[batch_size, d0, .. dN-1]`.
"""
y_pred = tensor_conversion.convert_to_tensor_v2_with_dispatch(y_pred)
y_true = math_ops.cast(y_true, y_pred.dtype)
first_log = math_ops.log(backend.maximum(y_pred, backend.epsilon()) + 1.)
second_log = math_ops.log(backend.maximum(y_true, backend.epsilon()) + 1.)
return backend.mean(
math_ops.squared_difference(first_log, second_log), axis=-1)
@dispatch.dispatch_for_types(mean_squared_logarithmic_error,
ragged_tensor.RaggedTensor)
def _ragged_tensor_msle(y_true, y_pred):
"""Implements support for handling RaggedTensors."""
return _ragged_tensor_apply_loss(mean_squared_logarithmic_error, y_true,
y_pred)
def _maybe_convert_labels(y_true):
"""Converts binary labels into -1/1."""
are_zeros = math_ops.equal(y_true, 0)
are_ones = math_ops.equal(y_true, 1)
is_binary = math_ops.reduce_all(math_ops.logical_or(are_zeros, are_ones))
def _convert_binary_labels():
# Convert the binary labels to -1 or 1.
return 2. * y_true - 1.
updated_y_true = smart_cond.smart_cond(is_binary, _convert_binary_labels,
lambda: y_true)
return updated_y_true
@dispatch.add_dispatch_support
def squared_hinge(y_true, y_pred):
"""Computes the squared hinge loss between `y_true` and `y_pred`.
`loss = mean(square(maximum(1 - y_true * y_pred, 0)), axis=-1)`
Standalone usage:
>>> y_true = np.random.choice([-1, 1], size=(2, 3))
>>> y_pred = np.random.random(size=(2, 3))
>>> loss = tf.keras.losses.squared_hinge(y_true, y_pred)
>>> assert loss.shape == (2,)
>>> assert np.array_equal(
... loss.numpy(),
... np.mean(np.square(np.maximum(1. - y_true * y_pred, 0.)), axis=-1))
Args:
y_true: The ground truth values. `y_true` values are expected to be -1 or 1.
If binary (0 or 1) labels are provided we will convert them to -1 or 1.
shape = `[batch_size, d0, .. dN]`.
y_pred: The predicted values. shape = `[batch_size, d0, .. dN]`.
Returns:
Squared hinge loss values. shape = `[batch_size, d0, .. dN-1]`.
"""
y_pred = tensor_conversion.convert_to_tensor_v2_with_dispatch(y_pred)
y_true = math_ops.cast(y_true, y_pred.dtype)
y_true = _maybe_convert_labels(y_true)
return backend.mean(
math_ops.square(math_ops.maximum(1. - y_true * y_pred, 0.)), axis=-1)
@dispatch.add_dispatch_support
def hinge(y_true, y_pred):
"""Computes the hinge loss between `y_true` and `y_pred`.
`loss = mean(maximum(1 - y_true * y_pred, 0), axis=-1)`
Standalone usage:
>>> y_true = np.random.choice([-1, 1], size=(2, 3))
>>> y_pred = np.random.random(size=(2, 3))
>>> loss = tf.keras.losses.hinge(y_true, y_pred)
>>> assert loss.shape == (2,)
>>> assert np.array_equal(
... loss.numpy(),
... np.mean(np.maximum(1. - y_true * y_pred, 0.), axis=-1))
Args:
y_true: The ground truth values. `y_true` values are expected to be -1 or 1.
If binary (0 or 1) labels are provided they will be converted to -1 or 1.
shape = `[batch_size, d0, .. dN]`.
y_pred: The predicted values. shape = `[batch_size, d0, .. dN]`.
Returns:
Hinge loss values. shape = `[batch_size, d0, .. dN-1]`.
"""
y_pred = tensor_conversion.convert_to_tensor_v2_with_dispatch(y_pred)
y_true = math_ops.cast(y_true, y_pred.dtype)
y_true = _maybe_convert_labels(y_true)
return backend.mean(math_ops.maximum(1. - y_true * y_pred, 0.), axis=-1)
@dispatch.add_dispatch_support
def categorical_hinge(y_true, y_pred):
"""Computes the categorical hinge loss between `y_true` and `y_pred`.
`loss = maximum(neg - pos + 1, 0)`
where `neg=maximum((1-y_true)*y_pred) and pos=sum(y_true*y_pred)`
Standalone usage:
>>> y_true = np.random.randint(0, 3, size=(2,))
>>> y_true = tf.keras.utils.to_categorical(y_true, num_classes=3)
>>> y_pred = np.random.random(size=(2, 3))
>>> loss = tf.keras.losses.categorical_hinge(y_true, y_pred)
>>> assert loss.shape == (2,)
>>> pos = np.sum(y_true * y_pred, axis=-1)
>>> neg = np.amax((1. - y_true) * y_pred, axis=-1)
>>> assert np.array_equal(loss.numpy(), np.maximum(0., neg - pos + 1.))
Args:
y_true: The ground truth values. `y_true` values are expected to be
either `{-1, +1}` or `{0, 1}` (i.e. a one-hot-encoded tensor).
y_pred: The predicted values.
Returns:
Categorical hinge loss values.
"""
y_pred = tensor_conversion.convert_to_tensor_v2_with_dispatch(y_pred)
y_true = math_ops.cast(y_true, y_pred.dtype)
pos = math_ops.reduce_sum(y_true * y_pred, axis=-1)
neg = math_ops.reduce_max((1. - y_true) * y_pred, axis=-1)
zero = math_ops.cast(0., y_pred.dtype)
return math_ops.maximum(neg - pos + 1., zero)
@dispatch.add_dispatch_support
def huber(y_true, y_pred, delta=1.0):
"""Computes Huber loss value.
For each value x in `error = y_true - y_pred`:
```
loss = 0.5 * x^2 if |x| <= d
loss = d * |x| - 0.5 * d^2 if |x| > d
```
where d is `delta`. See: https://en.wikipedia.org/wiki/Huber_loss
Args:
y_true: tensor of true targets.
y_pred: tensor of predicted targets.
delta: A float, the point where the Huber loss function changes from a
quadratic to linear.
Returns:
Tensor with one scalar loss entry per sample.
"""
y_pred = math_ops.cast(y_pred, dtype=backend.floatx())
y_true = math_ops.cast(y_true, dtype=backend.floatx())
delta = math_ops.cast(delta, dtype=backend.floatx())
error = math_ops.subtract(y_pred, y_true)
abs_error = math_ops.abs(error)
half = tensor_conversion.convert_to_tensor_v2_with_dispatch(
0.5, dtype=abs_error.dtype
)
return backend.mean(
array_ops.where_v2(abs_error <= delta, half * math_ops.square(error),
delta * abs_error - half * math_ops.square(delta)),
axis=-1)
@dispatch.add_dispatch_support
def log_cosh(y_true, y_pred):
"""Logarithm of the hyperbolic cosine of the prediction error.
`log(cosh(x))` is approximately equal to `(x ** 2) / 2` for small `x` and
to `abs(x) - log(2)` for large `x`. This means that 'logcosh' works mostly
like the mean squared error, but will not be so strongly affected by the
occasional wildly incorrect prediction.
Standalone usage:
>>> y_true = np.random.random(size=(2, 3))
>>> y_pred = np.random.random(size=(2, 3))
>>> loss = tf.keras.losses.logcosh(y_true, y_pred)
>>> assert loss.shape == (2,)
>>> x = y_pred - y_true
>>> assert np.allclose(
... loss.numpy(),
... np.mean(x + np.log(np.exp(-2. * x) + 1.) - math_ops.log(2.), axis=-1),
... atol=1e-5)
Args:
y_true: Ground truth values. shape = `[batch_size, d0, .. dN]`.
y_pred: The predicted values. shape = `[batch_size, d0, .. dN]`.
Returns:
Logcosh error values. shape = `[batch_size, d0, .. dN-1]`.
"""
y_pred = tensor_conversion.convert_to_tensor_v2_with_dispatch(y_pred)
y_true = math_ops.cast(y_true, y_pred.dtype)
def _logcosh(x):
return x + math_ops.softplus(-2. * x) - math_ops.cast(
math_ops.log(2.), x.dtype)
return backend.mean(_logcosh(y_pred - y_true), axis=-1)
@dispatch.add_dispatch_support
def categorical_crossentropy(y_true,
y_pred,
from_logits=False,
label_smoothing=0,
axis=-1):
"""Computes the categorical crossentropy loss.
Standalone usage:
>>> y_true = [[0, 1, 0], [0, 0, 1]]
>>> y_pred = [[0.05, 0.95, 0], [0.1, 0.8, 0.1]]
>>> loss = tf.keras.losses.categorical_crossentropy(y_true, y_pred)
>>> assert loss.shape == (2,)
>>> loss.numpy()
array([0.0513, 2.303], dtype=float32)
Args:
y_true: Tensor of one-hot true targets.
y_pred: Tensor of predicted targets.
from_logits: Whether `y_pred` is expected to be a logits tensor. By default,
we assume that `y_pred` encodes a probability distribution.
label_smoothing: Float in [0, 1]. If > `0` then smooth the labels. For
example, if `0.1`, use `0.1 / num_classes` for non-target labels
and `0.9 + 0.1 / num_classes` for target labels.
axis: Defaults to -1. The dimension along which the entropy is
computed.
Returns:
Categorical crossentropy loss value.
"""
y_pred = tensor_conversion.convert_to_tensor_v2_with_dispatch(y_pred)
y_true = math_ops.cast(y_true, y_pred.dtype)
label_smoothing = tensor_conversion.convert_to_tensor_v2_with_dispatch(
label_smoothing, dtype=backend.floatx()
)
def _smooth_labels():
num_classes = math_ops.cast(array_ops.shape(y_true)[-1], y_pred.dtype)
return y_true * (1.0 - label_smoothing) + (label_smoothing / num_classes)
y_true = smart_cond.smart_cond(label_smoothing, _smooth_labels,
lambda: y_true)
return backend.categorical_crossentropy(
y_true, y_pred, from_logits=from_logits, axis=axis)
@dispatch.dispatch_for_types(categorical_crossentropy,
ragged_tensor.RaggedTensor)
def _ragged_tensor_categorical_crossentropy(y_true,
y_pred,
from_logits=False,
label_smoothing=0,
axis=-1):
"""Implements support for handling RaggedTensors.
Args:
y_true: Tensor of one-hot true targets.
y_pred: Tensor of predicted targets.
from_logits: Whether `y_pred` is expected to be a logits tensor. By default,
we assume that `y_pred` encodes a probability distribution.
label_smoothing: Float in [0, 1]. If > `0` then smooth the labels. For
example, if `0.1`, use `0.1 / num_classes` for non-target labels and `0.9
+ 0.1 / num_classes` for target labels.
axis: The axis along which to compute crossentropy (the features axis).
Defaults to -1.
Returns:
Categorical crossentropy loss value.
Expected shape: (batch, sequence_len, n_classes) with sequence_len
being variable per batch.
Return shape: (batch, sequence_len).
When used by CategoricalCrossentropy() with the default reduction
(SUM_OVER_BATCH_SIZE), the reduction averages the loss over the
number of elements independent of the batch. E.g. if the RaggedTensor
has 2 batches with [2, 1] values respectively the resulting loss is
the sum of the individual loss values divided by 3.
"""
fn = functools.partial(
categorical_crossentropy,
from_logits=from_logits,
label_smoothing=label_smoothing,
axis=axis)
return _ragged_tensor_apply_loss(fn, y_true, y_pred)
@dispatch.add_dispatch_support
def sparse_categorical_crossentropy(y_true, y_pred, from_logits=False, axis=-1):
"""Computes the sparse categorical crossentropy loss.
Standalone usage:
>>> y_true = [1, 2]
>>> y_pred = [[0.05, 0.95, 0], [0.1, 0.8, 0.1]]
>>> loss = tf.keras.losses.sparse_categorical_crossentropy(y_true, y_pred)
>>> assert loss.shape == (2,)
>>> loss.numpy()
array([0.0513, 2.303], dtype=float32)
Args:
y_true: Ground truth values.
y_pred: The predicted values.
from_logits: Whether `y_pred` is expected to be a logits tensor. By default,
we assume that `y_pred` encodes a probability distribution.
axis: Defaults to -1. The dimension along which the entropy is
computed.
Returns:
Sparse categorical crossentropy loss value.
"""
y_pred = tensor_conversion.convert_to_tensor_v2_with_dispatch(y_pred)
y_true = math_ops.cast(y_true, y_pred.dtype)
return backend.sparse_categorical_crossentropy(
y_true, y_pred, from_logits=from_logits, axis=axis)
@dispatch.dispatch_for_types(sparse_categorical_crossentropy,
ragged_tensor.RaggedTensor)
def _ragged_tensor_sparse_categorical_crossentropy(y_true,
y_pred,
from_logits=False,
axis=-1):
""" Implements support for handling RaggedTensors.
Expected y_pred shape: (batch, sequence_len, n_classes) with sequence_len
being variable per batch.
Return shape: (batch, sequence_len).
When used by SparseCategoricalCrossentropy() with the default reduction
(SUM_OVER_BATCH_SIZE), the reduction averages the loss over the
number of elements independent of the batch. E.g. if the RaggedTensor
has 2 batches with [2, 1] values respectively, the resulting loss is
the sum of the individual loss values divided by 3.
"""
fn = functools.partial(
sparse_categorical_crossentropy, from_logits=from_logits, axis=axis)
return _ragged_tensor_apply_loss(fn, y_true, y_pred, y_pred_extra_dim=True)
@dispatch.add_dispatch_support
def binary_crossentropy(y_true,
y_pred,
from_logits=False,
label_smoothing=0,
axis=-1):
"""Computes the binary crossentropy loss.
Standalone usage:
>>> y_true = [[0, 1], [0, 0]]
>>> y_pred = [[0.6, 0.4], [0.4, 0.6]]
>>> loss = tf.keras.losses.binary_crossentropy(y_true, y_pred)
>>> assert loss.shape == (2,)
>>> loss.numpy()
array([0.916 , 0.714], dtype=float32)
Args:
y_true: Ground truth values. shape = `[batch_size, d0, .. dN]`.
y_pred: The predicted values. shape = `[batch_size, d0, .. dN]`.
from_logits: Whether `y_pred` is expected to be a logits tensor. By default,
we assume that `y_pred` encodes a probability distribution.
label_smoothing: Float in [0, 1]. If > `0` then smooth the labels by
squeezing them towards 0.5 That is, using `1. - 0.5 * label_smoothing`
for the target class and `0.5 * label_smoothing` for the non-target class.
axis: The axis along which the mean is computed. Defaults to -1.
Returns:
Binary crossentropy loss value. shape = `[batch_size, d0, .. dN-1]`.
"""
y_pred = tensor_conversion.convert_to_tensor_v2_with_dispatch(y_pred)
y_true = math_ops.cast(y_true, y_pred.dtype)
label_smoothing = tensor_conversion.convert_to_tensor_v2_with_dispatch(
label_smoothing, dtype=backend.floatx()
)
def _smooth_labels():
return y_true * (1.0 - label_smoothing) + 0.5 * label_smoothing
y_true = smart_cond.smart_cond(label_smoothing, _smooth_labels,
lambda: y_true)
return backend.mean(
backend.binary_crossentropy(y_true, y_pred, from_logits=from_logits),
axis=axis)
@dispatch.dispatch_for_types(binary_crossentropy, ragged_tensor.RaggedTensor)
def _ragged_tensor_binary_crossentropy(y_true,
y_pred,
from_logits=False,
label_smoothing=0,
axis=-1):
"""Implements support for handling RaggedTensors.
Args:
y_true: Tensor of one-hot true targets.
y_pred: Tensor of predicted targets.
from_logits: Whether `y_pred` is expected to be a logits tensor. By default,
we assume that `y_pred` encodes a probability distribution.
label_smoothing: Float in [0, 1]. If > `0` then smooth the labels. For
example, if `0.1`, use `0.1 / num_classes` for non-target labels
and `0.9 + 0.1 / num_classes` for target labels.
axis: Axis along which to compute crossentropy.
Returns:
Binary crossentropy loss value.
Expected shape: (batch, sequence_len) with sequence_len being variable
per batch.
Return shape: (batch,); returns the per batch mean of the loss values.
When used by BinaryCrossentropy() with the default reduction
(SUM_OVER_BATCH_SIZE), the reduction averages the per batch losses over
the number of batches.
"""
fn = functools.partial(
binary_crossentropy,
from_logits=from_logits,
label_smoothing=label_smoothing,
axis=axis)
return _ragged_tensor_apply_loss(fn, y_true, y_pred)
@dispatch.add_dispatch_support
def kl_divergence(y_true, y_pred):
"""Computes Kullback-Leibler divergence loss between `y_true` and `y_pred`.
`loss = y_true * log(y_true / y_pred)`
See: https://en.wikipedia.org/wiki/Kullback%E2%80%93Leibler_divergence
Standalone usage:
>>> y_true = np.random.randint(0, 2, size=(2, 3)).astype(np.float64)
>>> y_pred = np.random.random(size=(2, 3))
>>> loss = tf.keras.losses.kullback_leibler_divergence(y_true, y_pred)
>>> assert loss.shape == (2,)
>>> y_true = tf.keras.backend.clip(y_true, 1e-7, 1)
>>> y_pred = tf.keras.backend.clip(y_pred, 1e-7, 1)
>>> assert np.array_equal(
... loss.numpy(), np.sum(y_true * np.log(y_true / y_pred), axis=-1))
Args:
y_true: Tensor of true targets.
y_pred: Tensor of predicted targets.
Returns:
A `Tensor` with loss.
Raises:
TypeError: If `y_true` cannot be cast to the `y_pred.dtype`.
"""
y_pred = tensor_conversion.convert_to_tensor_v2_with_dispatch(y_pred)
y_true = math_ops.cast(y_true, y_pred.dtype)
y_true = backend.clip(y_true, backend.epsilon(), 1)
y_pred = backend.clip(y_pred, backend.epsilon(), 1)
return math_ops.reduce_sum(y_true * math_ops.log(y_true / y_pred), axis=-1)
@dispatch.add_dispatch_support
def poisson(y_true, y_pred):
"""Computes the Poisson loss between y_true and y_pred.
The Poisson loss is the mean of the elements of the `Tensor`
`y_pred - y_true * log(y_pred)`.
Standalone usage:
>>> y_true = np.random.randint(0, 2, size=(2, 3))
>>> y_pred = np.random.random(size=(2, 3))
>>> loss = tf.keras.losses.poisson(y_true, y_pred)
>>> assert loss.shape == (2,)
>>> y_pred = y_pred + 1e-7
>>> assert np.allclose(
... loss.numpy(), np.mean(y_pred - y_true * np.log(y_pred), axis=-1),
... atol=1e-5)
Args:
y_true: Ground truth values. shape = `[batch_size, d0, .. dN]`.
y_pred: The predicted values. shape = `[batch_size, d0, .. dN]`.
Returns:
Poisson loss value. shape = `[batch_size, d0, .. dN-1]`.
Raises:
InvalidArgumentError: If `y_true` and `y_pred` have incompatible shapes.
"""
y_pred = tensor_conversion.convert_to_tensor_v2_with_dispatch(y_pred)
y_true = math_ops.cast(y_true, y_pred.dtype)
return backend.mean(
y_pred - y_true * math_ops.log(y_pred + backend.epsilon()), axis=-1)
@dispatch.add_dispatch_support
def cosine_similarity(y_true, y_pred, axis=-1):
"""Computes the cosine similarity between labels and predictions.
Note that it is a number between -1 and 1. When it is a negative number
between -1 and 0, 0 indicates orthogonality and values closer to -1
indicate greater similarity. The values closer to 1 indicate greater
dissimilarity. This makes it usable as a loss function in a setting
where you try to maximize the proximity between predictions and
targets. If either `y_true` or `y_pred` is a zero vector, cosine
similarity will be 0 regardless of the proximity between predictions
and targets.
`loss = -sum(l2_norm(y_true) * l2_norm(y_pred))`
Standalone usage:
>>> y_true = [[0., 1.], [1., 1.], [1., 1.]]
>>> y_pred = [[1., 0.], [1., 1.], [-1., -1.]]
>>> loss = tf.keras.losses.cosine_similarity(y_true, y_pred, axis=1)
>>> loss.numpy()
array([-0., -0.999, 0.999], dtype=float32)
Args:
y_true: Tensor of true targets.
y_pred: Tensor of predicted targets.
axis: Axis along which to determine similarity.
Returns:
Cosine similarity tensor.
"""
y_true = nn.l2_normalize(y_true, axis=axis)
y_pred = nn.l2_normalize(y_pred, axis=axis)
return -math_ops.reduce_sum(y_true * y_pred, axis=axis)
|
Huber
|
python
|
charliermarsh__ruff
|
crates/ruff_linter/resources/test/fixtures/pyflakes/F706.py
|
{
"start": 31,
"end": 66
}
|
class ____:
return 2
return 3
|
Foo
|
python
|
sqlalchemy__sqlalchemy
|
lib/sqlalchemy/engine/interfaces.py
|
{
"start": 98633,
"end": 108216
}
|
class ____:
"""A messenger object for a Dialect that corresponds to a single
execution.
"""
engine: Engine
"""engine which the Connection is associated with"""
connection: Connection
"""Connection object which can be freely used by default value
generators to execute SQL. This Connection should reference the
same underlying connection/transactional resources of
root_connection."""
root_connection: Connection
"""Connection object which is the source of this ExecutionContext."""
dialect: Dialect
"""dialect which created this ExecutionContext."""
cursor: DBAPICursor
"""DB-API cursor procured from the connection"""
compiled: Optional[Compiled]
"""if passed to constructor, sqlalchemy.engine.base.Compiled object
being executed"""
statement: str
"""string version of the statement to be executed. Is either
passed to the constructor, or must be created from the
sql.Compiled object by the time pre_exec() has completed."""
invoked_statement: Optional[Executable]
"""The Executable statement object that was given in the first place.
This should be structurally equivalent to compiled.statement, but not
necessarily the same object as in a caching scenario the compiled form
will have been extracted from the cache.
"""
parameters: _AnyMultiExecuteParams
"""bind parameters passed to the execute() or exec_driver_sql() methods.
These are always stored as a list of parameter entries. A single-element
list corresponds to a ``cursor.execute()`` call and a multiple-element
list corresponds to ``cursor.executemany()``, except in the case
of :attr:`.ExecuteStyle.INSERTMANYVALUES` which will use
``cursor.execute()`` one or more times.
"""
no_parameters: bool
"""True if the execution style does not use parameters"""
isinsert: bool
"""True if the statement is an INSERT."""
isupdate: bool
"""True if the statement is an UPDATE."""
execute_style: ExecuteStyle
"""the style of DBAPI cursor method that will be used to execute
a statement.
.. versionadded:: 2.0
"""
executemany: bool
"""True if the context has a list of more than one parameter set.
Historically this attribute links to whether ``cursor.execute()`` or
``cursor.executemany()`` will be used. It also can now mean that
"insertmanyvalues" may be used which indicates one or more
``cursor.execute()`` calls.
"""
prefetch_cols: util.generic_fn_descriptor[Optional[Sequence[Column[Any]]]]
"""a list of Column objects for which a client-side default
was fired off. Applies to inserts and updates."""
postfetch_cols: util.generic_fn_descriptor[Optional[Sequence[Column[Any]]]]
"""a list of Column objects for which a server-side default or
inline SQL expression value was fired off. Applies to inserts
and updates."""
execution_options: _ExecuteOptions
"""Execution options associated with the current statement execution"""
@classmethod
def _init_ddl(
cls,
dialect: Dialect,
connection: Connection,
dbapi_connection: PoolProxiedConnection,
execution_options: _ExecuteOptions,
compiled_ddl: DDLCompiler,
) -> ExecutionContext:
raise NotImplementedError()
@classmethod
def _init_compiled(
cls,
dialect: Dialect,
connection: Connection,
dbapi_connection: PoolProxiedConnection,
execution_options: _ExecuteOptions,
compiled: SQLCompiler,
parameters: _CoreMultiExecuteParams,
invoked_statement: Executable,
extracted_parameters: Optional[Sequence[BindParameter[Any]]],
cache_hit: CacheStats = CacheStats.CACHING_DISABLED,
) -> ExecutionContext:
raise NotImplementedError()
@classmethod
def _init_statement(
cls,
dialect: Dialect,
connection: Connection,
dbapi_connection: PoolProxiedConnection,
execution_options: _ExecuteOptions,
statement: str,
parameters: _DBAPIMultiExecuteParams,
) -> ExecutionContext:
raise NotImplementedError()
@classmethod
def _init_default(
cls,
dialect: Dialect,
connection: Connection,
dbapi_connection: PoolProxiedConnection,
execution_options: _ExecuteOptions,
) -> ExecutionContext:
raise NotImplementedError()
def _exec_default(
self,
column: Optional[Column[Any]],
default: DefaultGenerator,
type_: Optional[TypeEngine[Any]],
) -> Any:
raise NotImplementedError()
def _prepare_set_input_sizes(
self,
) -> Optional[List[Tuple[str, Any, TypeEngine[Any]]]]:
raise NotImplementedError()
def _get_cache_stats(self) -> str:
raise NotImplementedError()
def _setup_result_proxy(self) -> CursorResult[Any]:
raise NotImplementedError()
def fire_sequence(self, seq: Sequence_SchemaItem, type_: Integer) -> int:
"""given a :class:`.Sequence`, invoke it and return the next int
value"""
raise NotImplementedError()
def create_cursor(self) -> DBAPICursor:
"""Return a new cursor generated from this ExecutionContext's
connection.
Some dialects may wish to change the behavior of
connection.cursor(), such as postgresql which may return a PG
"server side" cursor.
"""
raise NotImplementedError()
def pre_exec(self) -> None:
"""Called before an execution of a compiled statement.
If a compiled statement was passed to this ExecutionContext,
the `statement` and `parameters` datamembers must be
initialized after this statement is complete.
"""
raise NotImplementedError()
def get_out_parameter_values(
self, out_param_names: Sequence[str]
) -> Sequence[Any]:
"""Return a sequence of OUT parameter values from a cursor.
For dialects that support OUT parameters, this method will be called
when there is a :class:`.SQLCompiler` object which has the
:attr:`.SQLCompiler.has_out_parameters` flag set. This flag in turn
will be set to True if the statement itself has :class:`.BindParameter`
objects that have the ``.isoutparam`` flag set which are consumed by
the :meth:`.SQLCompiler.visit_bindparam` method. If the dialect
compiler produces :class:`.BindParameter` objects with ``.isoutparam``
set which are not handled by :meth:`.SQLCompiler.visit_bindparam`, it
should set this flag explicitly.
The list of names that were rendered for each bound parameter
is passed to the method. The method should then return a sequence of
values corresponding to the list of parameter objects. Unlike in
previous SQLAlchemy versions, the values can be the **raw values** from
the DBAPI; the execution context will apply the appropriate type
handler based on what's present in self.compiled.binds and update the
values. The processed dictionary will then be made available via the
``.out_parameters`` collection on the result object. Note that
SQLAlchemy 1.4 has multiple kinds of result object as part of the 2.0
transition.
.. versionadded:: 1.4 - added
:meth:`.ExecutionContext.get_out_parameter_values`, which is invoked
automatically by the :class:`.DefaultExecutionContext` when there
are :class:`.BindParameter` objects with the ``.isoutparam`` flag
set. This replaces the practice of setting out parameters within
the now-removed ``get_result_proxy()`` method.
"""
raise NotImplementedError()
def post_exec(self) -> None:
"""Called after the execution of a compiled statement.
If a compiled statement was passed to this ExecutionContext,
the `last_insert_ids`, `last_inserted_params`, etc.
datamembers should be available after this method completes.
"""
raise NotImplementedError()
def handle_dbapi_exception(self, e: BaseException) -> None:
"""Receive a DBAPI exception which occurred upon execute, result
fetch, etc."""
raise NotImplementedError()
def lastrow_has_defaults(self) -> bool:
"""Return True if the last INSERT or UPDATE row contained
inlined or database-side defaults.
"""
raise NotImplementedError()
def get_rowcount(self) -> Optional[int]:
"""Return the DBAPI ``cursor.rowcount`` value, or in some
cases an interpreted value.
See :attr:`_engine.CursorResult.rowcount` for details on this.
"""
raise NotImplementedError()
def fetchall_for_returning(self, cursor: DBAPICursor) -> Sequence[Any]:
"""For a RETURNING result, deliver cursor.fetchall() from the
DBAPI cursor.
This is a dialect-specific hook for dialects that have special
considerations when calling upon the rows delivered for a
"RETURNING" statement. Default implementation is
``cursor.fetchall()``.
This hook is currently used only by the :term:`insertmanyvalues`
feature. Dialects that don't set ``use_insertmanyvalues=True``
don't need to consider this hook.
.. versionadded:: 2.0.10
"""
raise NotImplementedError()
|
ExecutionContext
|
python
|
microsoft__pyright
|
packages/pyright-internal/src/tests/samples/typeNarrowingTypeIs1.py
|
{
"start": 1218,
"end": 1621
}
|
class ____:
pass
def func6(val: AFinal | BFinal) -> None:
if type(val) is AFinal:
reveal_type(val, expected_text="AFinal")
else:
reveal_type(val, expected_text="BFinal")
def func7(val: Any):
if type(val) is int:
reveal_type(val, expected_text="int")
else:
reveal_type(val, expected_text="Any")
reveal_type(val, expected_text="int | Any")
|
BFinal
|
python
|
huggingface__transformers
|
src/transformers/models/bridgetower/modeling_bridgetower.py
|
{
"start": 29624,
"end": 32754
}
|
class ____(GradientCheckpointingLayer):
def __init__(self, config, layer_idx=None):
super().__init__()
self.chunk_size_feed_forward = config.chunk_size_feed_forward
self.seq_len_dim = 1
self.attention = BridgeTowerAttention(config, is_causal=config.is_decoder, layer_idx=layer_idx)
self.is_decoder = config.is_decoder
self.add_cross_attention = config.add_cross_attention
if self.add_cross_attention:
if not self.is_decoder:
raise ValueError(f"{self} should be used as a decoder model if cross attention is added")
self.crossattention = BridgeTowerAttention(
config,
is_causal=False,
layer_idx=layer_idx,
is_cross_attention=True,
)
self.intermediate = BridgeTowerIntermediate(config)
self.output = BridgeTowerOutput(config)
# copied from transformers.models.bert.modeling_bert.BertLayer.forward
def forward(
self,
hidden_states: torch.Tensor,
attention_mask: Optional[torch.FloatTensor] = None,
encoder_hidden_states: Optional[torch.FloatTensor] = None,
encoder_attention_mask: Optional[torch.FloatTensor] = None,
past_key_values: Optional[Cache] = None,
cache_position: Optional[torch.Tensor] = None,
**kwargs: Unpack[TransformersKwargs],
) -> tuple[torch.Tensor]:
outputs = ()
self_attention_output, self_attn_weights = self.attention(
hidden_states,
attention_mask,
past_key_values=past_key_values,
cache_position=cache_position,
**kwargs,
)
attention_output = self_attention_output
if self.is_decoder and encoder_hidden_states is not None:
if not hasattr(self, "crossattention"):
raise ValueError(
f"If `encoder_hidden_states` are passed, {self} has to be instantiated with cross-attention layers"
" by setting `config.add_cross_attention=True`"
)
cross_attention_output, cross_attn_weights = self.crossattention(
self_attention_output,
None, # attention_mask
encoder_hidden_states,
encoder_attention_mask,
past_key_values=past_key_values,
**kwargs,
)
attention_output = cross_attention_output
outputs = (cross_attn_weights,)
layer_output = apply_chunking_to_forward(
self.feed_forward_chunk, self.chunk_size_feed_forward, self.seq_len_dim, attention_output
)
return outputs + (
layer_output,
self_attn_weights,
)
def feed_forward_chunk(self, attention_output):
intermediate_output = self.intermediate(attention_output)
layer_output = self.output(intermediate_output, attention_output)
return layer_output
# copied from transformers.models.roberta.modeling_roberta.RobertaEncoder with Roberta->BridgeTowerText
|
BridgeTowerTextLayer
|
python
|
mahmoud__boltons
|
boltons/socketutils.py
|
{
"start": 3696,
"end": 24349
}
|
class ____:
"""Mainly provides recv_until and recv_size. recv, send, sendall, and
peek all function as similarly as possible to the built-in socket
API.
This type has been tested against both the built-in socket type as
well as those from gevent and eventlet. It also features support
for sockets with timeouts set to 0 (aka nonblocking), provided the
caller is prepared to handle the EWOULDBLOCK exceptions.
Args:
sock (socket): The connected socket to be wrapped.
timeout (float): The default timeout for sends and recvs, in
seconds. Set to ``None`` for no timeout, and 0 for
nonblocking. Defaults to *sock*'s own timeout if already set,
and 10 seconds otherwise.
maxsize (int): The default maximum number of bytes to be received
into the buffer before it is considered full and raises an
exception. Defaults to 32 kilobytes.
recvsize (int): The number of bytes to recv for every
lower-level :meth:`socket.recv` call. Defaults to *maxsize*.
*timeout* and *maxsize* can both be overridden on individual socket
operations.
All ``recv`` methods return bytestrings (:class:`bytes`) and can
raise :exc:`socket.error`. :exc:`Timeout`,
:exc:`ConnectionClosed`, and :exc:`MessageTooLong` all inherit
from :exc:`socket.error` and exist to provide better error
messages. Received bytes are always buffered, even if an exception
is raised. Use :meth:`BufferedSocket.getrecvbuffer` to retrieve
partial recvs.
BufferedSocket does not replace the built-in socket by any
means. While the overlapping parts of the API are kept parallel to
the built-in :class:`socket.socket`, BufferedSocket does not
inherit from socket, and most socket functionality is only
available on the underlying socket. :meth:`socket.getpeername`,
:meth:`socket.getsockname`, :meth:`socket.fileno`, and others are
only available on the underlying socket that is wrapped. Use the
``BufferedSocket.sock`` attribute to access it. See the examples
for more information on how to use BufferedSockets with built-in
sockets.
The BufferedSocket is threadsafe, but consider the semantics of
your protocol before accessing a single socket from multiple
threads. Similarly, once the BufferedSocket is constructed, avoid
using the underlying socket directly. Only use it for operations
unrelated to messages, e.g., :meth:`socket.getpeername`.
"""
def __init__(self, sock, timeout=_UNSET,
maxsize=DEFAULT_MAXSIZE, recvsize=_UNSET):
self.sock = sock
self.rbuf = b''
self.sbuf = []
self.maxsize = int(maxsize)
if timeout is _UNSET:
if self.sock.gettimeout() is None:
self.timeout = DEFAULT_TIMEOUT
else:
self.timeout = self.sock.gettimeout()
else:
if timeout is None:
self.timeout = timeout
else:
self.timeout = float(timeout)
if recvsize is _UNSET:
self._recvsize = self.maxsize
else:
self._recvsize = int(recvsize)
self._send_lock = RLock()
self._recv_lock = RLock()
def settimeout(self, timeout):
"Set the default *timeout* for future operations, in seconds."
self.timeout = timeout
def gettimeout(self):
return self.timeout
def setblocking(self, blocking):
self.timeout = None if blocking else 0.0
def setmaxsize(self, maxsize):
"""Set the default maximum buffer size *maxsize* for future
operations, in bytes. Does not truncate the current buffer.
"""
self.maxsize = maxsize
def getrecvbuffer(self):
"Returns the receive buffer bytestring (rbuf)."
with self._recv_lock:
return self.rbuf
def getsendbuffer(self):
"Returns a copy of the send buffer list."
with self._send_lock:
return b''.join(self.sbuf)
def recv(self, size, flags=0, timeout=_UNSET):
"""Returns **up to** *size* bytes, using the internal buffer before
performing a single :meth:`socket.recv` operation.
Args:
size (int): The maximum number of bytes to receive.
flags (int): Kept for API compatibility with sockets. Only
the default, ``0``, is valid.
timeout (float): The timeout for this operation. Can be
``0`` for nonblocking and ``None`` for no
timeout. Defaults to the value set in the constructor
of BufferedSocket.
If the operation does not complete in *timeout* seconds, a
:exc:`Timeout` is raised. Much like the built-in
:class:`socket.socket`, if this method returns an empty string,
then the socket is closed and recv buffer is empty. Further
calls to recv will raise :exc:`socket.error`.
"""
with self._recv_lock:
if timeout is _UNSET:
timeout = self.timeout
if flags:
raise ValueError("non-zero flags not supported: %r" % flags)
if len(self.rbuf) >= size:
data, self.rbuf = self.rbuf[:size], self.rbuf[size:]
return data
if self.rbuf:
ret, self.rbuf = self.rbuf, b''
return ret
self.sock.settimeout(timeout)
try:
data = self.sock.recv(self._recvsize)
except socket.timeout:
raise Timeout(timeout) # check the rbuf attr for more
if len(data) > size:
data, self.rbuf = data[:size], data[size:]
return data
def peek(self, size, timeout=_UNSET):
"""Returns *size* bytes from the socket and/or internal buffer. Bytes
are retained in BufferedSocket's internal recv buffer. To only
see bytes in the recv buffer, use :meth:`getrecvbuffer`.
Args:
size (int): The exact number of bytes to peek at
timeout (float): The timeout for this operation. Can be 0 for
nonblocking and None for no timeout. Defaults to the value
set in the constructor of BufferedSocket.
If the appropriate number of bytes cannot be fetched from the
buffer and socket before *timeout* expires, then a
:exc:`Timeout` will be raised. If the connection is closed, a
:exc:`ConnectionClosed` will be raised.
"""
with self._recv_lock:
if len(self.rbuf) >= size:
return self.rbuf[:size]
data = self.recv_size(size, timeout=timeout)
self.rbuf = data + self.rbuf
return data
def recv_close(self, timeout=_UNSET, maxsize=_UNSET):
"""Receive until the connection is closed, up to *maxsize* bytes. If
more than *maxsize* bytes are received, raises :exc:`MessageTooLong`.
"""
# recv_close works by using recv_size to request maxsize data,
# and ignoring ConnectionClose, returning and clearing the
# internal buffer instead. It raises an exception if
# ConnectionClosed isn't raised.
with self._recv_lock:
if maxsize is _UNSET:
maxsize = self.maxsize
if maxsize is None:
maxsize = _RECV_LARGE_MAXSIZE
try:
recvd = self.recv_size(maxsize + 1, timeout)
except ConnectionClosed:
ret, self.rbuf = self.rbuf, b''
else:
# put extra received bytes (now in rbuf) after recvd
self.rbuf = recvd + self.rbuf
size_read = min(maxsize, len(self.rbuf))
raise MessageTooLong(size_read) # check receive buffer
return ret
def recv_until(self, delimiter, timeout=_UNSET, maxsize=_UNSET,
with_delimiter=False):
"""Receive until *delimiter* is found, *maxsize* bytes have been read,
or *timeout* is exceeded.
Args:
delimiter (bytes): One or more bytes to be searched for
in the socket stream.
timeout (float): The timeout for this operation. Can be 0 for
nonblocking and None for no timeout. Defaults to the value
set in the constructor of BufferedSocket.
maxsize (int): The maximum size for the internal buffer.
Defaults to the value set in the constructor.
with_delimiter (bool): Whether or not to include the
delimiter in the output. ``False`` by default, but
``True`` is useful in cases where one is simply
forwarding the messages.
``recv_until`` will raise the following exceptions:
* :exc:`Timeout` if more than *timeout* seconds expire.
* :exc:`ConnectionClosed` if the underlying socket is closed
by the sending end.
* :exc:`MessageTooLong` if the delimiter is not found in the
first *maxsize* bytes.
* :exc:`socket.error` if operating in nonblocking mode
(*timeout* equal to 0), or if some unexpected socket error
occurs, such as operating on a closed socket.
"""
with self._recv_lock:
if maxsize is _UNSET:
maxsize = self.maxsize
if maxsize is None:
maxsize = _RECV_LARGE_MAXSIZE
if timeout is _UNSET:
timeout = self.timeout
len_delimiter = len(delimiter)
sock = self.sock
recvd = bytearray(self.rbuf)
start = time.time()
find_offset_start = 0 # becomes a negative index below
if not timeout: # covers None (no timeout) and 0 (nonblocking)
sock.settimeout(timeout)
try:
while 1:
offset = recvd.find(delimiter, find_offset_start, maxsize)
if offset != -1: # str.find returns -1 when no match found
if with_delimiter: # include delimiter in return
offset += len_delimiter
rbuf_offset = offset
else:
rbuf_offset = offset + len_delimiter
break
elif len(recvd) > maxsize:
raise MessageTooLong(maxsize, delimiter) # see rbuf
if timeout:
cur_timeout = timeout - (time.time() - start)
if cur_timeout <= 0.0:
raise socket.timeout()
sock.settimeout(cur_timeout)
nxt = sock.recv(self._recvsize)
if not nxt:
args = (len(recvd), delimiter)
msg = ('connection closed after reading %s bytes'
' without finding symbol: %r' % args)
raise ConnectionClosed(msg) # check the recv buffer
recvd.extend(nxt)
find_offset_start = -len(nxt) - len_delimiter + 1
except socket.timeout:
self.rbuf = bytes(recvd)
msg = ('read %s bytes without finding delimiter: %r'
% (len(recvd), delimiter))
raise Timeout(timeout, msg) # check the recv buffer
except Exception:
self.rbuf = bytes(recvd)
raise
val, self.rbuf = bytes(recvd[:offset]), bytes(recvd[rbuf_offset:])
return val
def recv_size(self, size, timeout=_UNSET):
"""Read off of the internal buffer, then off the socket, until
*size* bytes have been read.
Args:
size (int): number of bytes to read before returning.
timeout (float): The timeout for this operation. Can be 0 for
nonblocking and None for no timeout. Defaults to the value
set in the constructor of BufferedSocket.
If the appropriate number of bytes cannot be fetched from the
buffer and socket before *timeout* expires, then a
:exc:`Timeout` will be raised. If the connection is closed, a
:exc:`ConnectionClosed` will be raised.
"""
with self._recv_lock:
if timeout is _UNSET:
timeout = self.timeout
chunks = []
total_bytes = 0
try:
start = time.time()
self.sock.settimeout(timeout)
nxt = self.rbuf or self.sock.recv(self._recvsize)
while nxt:
total_bytes += len(nxt)
if total_bytes >= size:
break
chunks.append(nxt)
if timeout:
cur_timeout = timeout - (time.time() - start)
if cur_timeout <= 0.0:
raise socket.timeout()
self.sock.settimeout(cur_timeout)
nxt = self.sock.recv(self._recvsize)
else:
msg = ('connection closed after reading %s of %s requested'
' bytes' % (total_bytes, size))
raise ConnectionClosed(msg) # check recv buffer
except socket.timeout:
self.rbuf = b''.join(chunks)
msg = f'read {total_bytes} of {size} bytes'
raise Timeout(timeout, msg) # check recv buffer
except Exception:
# received data is still buffered in the case of errors
self.rbuf = b''.join(chunks)
raise
extra_bytes = total_bytes - size
if extra_bytes:
last, self.rbuf = nxt[:-extra_bytes], nxt[-extra_bytes:]
else:
last, self.rbuf = nxt, b''
chunks.append(last)
return b''.join(chunks)
def send(self, data, flags=0, timeout=_UNSET):
"""Send the contents of the internal send buffer, as well as *data*,
to the receiving end of the connection. Returns the total
number of bytes sent. If no exception is raised, all of *data* was
sent and the internal send buffer is empty.
Args:
data (bytes): The bytes to send.
flags (int): Kept for API compatibility with sockets. Only
the default 0 is valid.
timeout (float): The timeout for this operation. Can be 0 for
nonblocking and None for no timeout. Defaults to the value
set in the constructor of BufferedSocket.
Will raise :exc:`Timeout` if the send operation fails to
complete before *timeout*. In the event of an exception, use
:meth:`BufferedSocket.getsendbuffer` to see which data was
unsent.
"""
with self._send_lock:
if timeout is _UNSET:
timeout = self.timeout
if flags:
raise ValueError("non-zero flags not supported")
sbuf = self.sbuf
sbuf.append(data)
if len(sbuf) > 1:
sbuf[:] = [b''.join([s for s in sbuf if s])]
self.sock.settimeout(timeout)
start, total_sent = time.time(), 0
try:
while sbuf[0]:
sent = self.sock.send(sbuf[0])
total_sent += sent
sbuf[0] = sbuf[0][sent:]
if timeout:
cur_timeout = timeout - (time.time() - start)
if cur_timeout <= 0.0:
raise socket.timeout()
self.sock.settimeout(cur_timeout)
except socket.timeout:
raise Timeout(timeout, '%s bytes unsent' % len(sbuf[0]))
return total_sent
def sendall(self, data, flags=0, timeout=_UNSET):
"""A passthrough to :meth:`~BufferedSocket.send`, retained for
parallelism to the :class:`socket.socket` API.
"""
return self.send(data, flags, timeout)
def flush(self):
"Send the contents of the internal send buffer."
with self._send_lock:
self.send(b'')
return
def buffer(self, data):
"Buffer *data* bytes for the next send operation."
with self._send_lock:
self.sbuf.append(data)
return
# # #
# # # Passing through some socket basics
# # #
def getsockname(self):
"""Convenience function to return the wrapped socket's own address.
See :meth:`socket.getsockname` for more details.
"""
return self.sock.getsockname()
def getpeername(self):
"""Convenience function to return the remote address to which the
wrapped socket is connected. See :meth:`socket.getpeername`
for more details.
"""
return self.sock.getpeername()
def getsockopt(self, level, optname, buflen=None):
"""Convenience function passing through to the wrapped socket's
:meth:`socket.getsockopt`.
"""
args = (level, optname)
if buflen is not None:
args += (buflen,)
return self.sock.getsockopt(*args)
def setsockopt(self, level, optname, value):
"""Convenience function passing through to the wrapped socket's
:meth:`socket.setsockopt`.
"""
return self.sock.setsockopt(level, optname, value)
@property
def type(self):
"""A passthrough to the wrapped socket's type. Valid usages should
only ever see :data:`socket.SOCK_STREAM`.
"""
return self.sock.type
@property
def family(self):
"""A passthrough to the wrapped socket's family. BufferedSocket
supports all widely-used families, so this read-only attribute
can be one of :data:`socket.AF_INET` for IP,
:data:`socket.AF_INET6` for IPv6, and :data:`socket.AF_UNIX`
for UDS.
"""
return self.sock.family
@property
def proto(self):
"""A passthrough to the wrapped socket's protocol. The ``proto``
attribute is very rarely used, so it's always 0, meaning "the
default" protocol. Pretty much all the practical information
is in :attr:`~BufferedSocket.type` and
:attr:`~BufferedSocket.family`, so you can go back to never
thinking about this.
"""
return self.sock.proto
# # #
# # # Now for some more advanced interpretations of the builtin socket
# # #
def fileno(self):
"""Returns the file descriptor of the wrapped socket. -1 if it has
been closed on this end.
Note that this makes the BufferedSocket selectable, i.e.,
usable for operating system event loops without any external
libraries. Keep in mind that the operating system cannot know
about data in BufferedSocket's internal buffer. Exercise
discipline with calling ``recv*`` functions.
"""
return self.sock.fileno()
def close(self):
"""Closes the wrapped socket, and empties the internal buffers. The
send buffer is not flushed automatically, so if you have been
calling :meth:`~BufferedSocket.buffer`, be sure to call
:meth:`~BufferedSocket.flush` before calling this
method. After calling this method, future socket operations
will raise :exc:`socket.error`.
"""
with self._recv_lock:
with self._send_lock:
self.rbuf = b''
self.rbuf_unconsumed = self.rbuf
self.sbuf[:] = []
self.sock.close()
return
def shutdown(self, how):
"""Convenience method which passes through to the wrapped socket's
:meth:`~socket.shutdown`. Semantics vary by platform, so no
special internal handling is done with the buffers. This
method exists to facilitate the most common usage, wherein a
full ``shutdown`` is followed by a
:meth:`~BufferedSocket.close`. Developers requiring more
support, please open `an issue`_.
.. _an issue: https://github.com/mahmoud/boltons/issues
"""
with self._recv_lock:
with self._send_lock:
self.sock.shutdown(how)
return
# end BufferedSocket
|
BufferedSocket
|
python
|
ethereum__web3.py
|
web3/contract/base_contract.py
|
{
"start": 47561,
"end": 50631
}
|
class ____:
"""
An alternative Contract API.
This call:
> contract.caller({'from': eth.accounts[1], 'gas': 100000, ...}).add(2, 3)
is equivalent to this call in the classic contract:
> contract.functions.add(2, 3).call({'from': eth.accounts[1], 'gas': 100000, ...})
Other options for invoking this class include:
> contract.caller.add(2, 3)
or
> contract.caller().add(2, 3)
or
> contract.caller(transaction={'from': eth.accounts[1], 'gas': 100000, ...}).add(2, 3) # noqa: E501
"""
# mypy types
_functions: Sequence[ABIFunction]
def __init__(
self,
abi: ABI,
w3: Union["Web3", "AsyncWeb3[Any]"],
address: ChecksumAddress,
decode_tuples: bool | None = False,
) -> None:
self.w3 = w3
self.address = address
self.abi = abi
self.decode_tuples = decode_tuples
self._functions = []
def __getattr__(self, function_name: str) -> Any:
function_names = [
get_name_from_abi_element_identifier(fn["name"])
for fn in self._functions
if fn.get("type") == "function"
]
if self.abi is None:
raise NoABIFound(
"There is no ABI found for this contract.",
)
elif not self._functions or len(self._functions) == 0:
raise NoABIFunctionsFound(
"The ABI for this contract contains no function definitions. ",
"Are you sure you provided the correct contract ABI?",
)
elif get_name_from_abi_element_identifier(function_name) not in function_names:
functions_available = ", ".join(function_names)
raise ABIFunctionNotFound(
f"The function '{function_name}' was not found in this contract's ABI.",
" Here is a list of all of the function names found: ",
f"{functions_available}. ",
"Did you mean to call one of those functions?",
)
else:
function_identifier = function_name
if "(" not in function_name:
function_identifier = _get_any_abi_signature_with_name(
function_name, self._functions
)
return super().__getattribute__(function_identifier)
def __hasattr__(self, function_name: str) -> bool:
try:
return function_name in self.__dict__["_functions"]
except ABIFunctionNotFound:
return False
@staticmethod
def call_function(
fn: TContractFn,
*args: Any,
transaction: TxParams | None = None,
block_identifier: BlockIdentifier = None,
ccip_read_enabled: bool | None = None,
**kwargs: Any,
) -> Any:
if transaction is None:
transaction = {}
return fn(*args, **kwargs).call(
transaction=transaction,
block_identifier=block_identifier,
ccip_read_enabled=ccip_read_enabled,
)
|
BaseContractCaller
|
python
|
PrefectHQ__prefect
|
src/prefect/task_worker.py
|
{
"start": 2527,
"end": 20805
}
|
class ____:
"""This class is responsible for serving tasks that may be executed in the background
by a task runner via the traditional engine machinery.
When `start()` is called, the task worker will open a websocket connection to a
server-side queue of scheduled task runs. When a scheduled task run is found, the
scheduled task run is submitted to the engine for execution with a minimal `EngineContext`
so that the task run can be governed by orchestration rules.
Args:
- tasks: A list of tasks to serve. These tasks will be submitted to the engine
when a scheduled task run is found.
- limit: The maximum number of tasks that can be run concurrently. Defaults to 10.
Pass `None` to remove the limit.
"""
def __init__(
self,
*tasks: Task[P, R],
limit: int | None = 10,
):
self.tasks: list["Task[..., Any]"] = []
for t in tasks:
if not TYPE_CHECKING:
if not isinstance(t, Task):
continue
if t.cache_policy in [None, NO_CACHE, NotSet]:
self.tasks.append(
t.with_options(persist_result=True, cache_policy=DEFAULT)
)
else:
self.tasks.append(t.with_options(persist_result=True))
self.task_keys: set[str] = set(t.task_key for t in tasks if isinstance(t, Task)) # pyright: ignore[reportUnnecessaryIsInstance]
self._started_at: Optional[DateTime] = None
self.stopping: bool = False
self._client = get_client()
self._exit_stack = AsyncExitStack()
try:
asyncio.get_running_loop()
except RuntimeError:
raise RuntimeError(
"TaskWorker must be initialized within an async context."
)
self._runs_task_group: Optional[anyio.abc.TaskGroup] = None
self._executor = ThreadPoolExecutor(max_workers=limit if limit else None)
self._limiter = anyio.CapacityLimiter(limit) if limit else None
self.in_flight_task_runs: dict[str, dict[UUID, DateTime]] = {
task_key: {} for task_key in self.task_keys
}
self.finished_task_runs: dict[str, int] = {
task_key: 0 for task_key in self.task_keys
}
@property
def client_id(self) -> str:
return f"{socket.gethostname()}-{os.getpid()}"
@property
def started_at(self) -> Optional[DateTime]:
return self._started_at
@property
def started(self) -> bool:
return self._started_at is not None
@property
def limit(self) -> Optional[int]:
return int(self._limiter.total_tokens) if self._limiter else None
@property
def current_tasks(self) -> Optional[int]:
return (
int(self._limiter.borrowed_tokens)
if self._limiter
else sum(len(runs) for runs in self.in_flight_task_runs.values())
)
@property
def available_tasks(self) -> Optional[int]:
return int(self._limiter.available_tokens) if self._limiter else None
def handle_sigterm(self, signum: int, frame: object) -> None:
"""
Shuts down the task worker when a SIGTERM is received.
"""
logger.info("SIGTERM received, initiating graceful shutdown...")
from_sync.call_in_loop_thread(create_call(self.stop))
sys.exit(0)
@sync_compatible
async def start(self, timeout: Optional[float] = None) -> None:
"""
Starts a task worker, which runs the tasks provided in the constructor.
Args:
timeout: If provided, the task worker will exit after the given number of
seconds. Defaults to None, meaning the task worker will run indefinitely.
"""
_register_signal(signal.SIGTERM, self.handle_sigterm)
start_client_metrics_server()
async with asyncnullcontext() if self.started else self:
logger.info("Starting task worker...")
try:
with timeout_async(timeout):
await self._subscribe_to_task_scheduling()
except InvalidStatus as exc:
if exc.response.status_code == 403:
logger.error(
"403: Could not establish a connection to the `/task_runs/subscriptions/scheduled`"
f" endpoint found at:\n\n {get_current_settings().api.url}"
"\n\nPlease double-check the values of"
" `PREFECT_API_AUTH_STRING` and `PREFECT_SERVER_API_AUTH_STRING` if running a Prefect server "
"or `PREFECT_API_URL` and `PREFECT_API_KEY` environment variables if using Prefect Cloud."
)
else:
raise
@sync_compatible
async def stop(self):
"""Stops the task worker's polling cycle."""
if not self.started:
raise RuntimeError(
"Task worker has not yet started. Please start the task worker by"
" calling .start()"
)
self._started_at = None
self.stopping = True
raise StopTaskWorker
async def _acquire_token(self, task_run_id: UUID) -> bool:
try:
if self._limiter:
await self._limiter.acquire_on_behalf_of(task_run_id)
except RuntimeError:
logger.debug(f"Token already acquired for task run: {task_run_id!r}")
return False
return True
def _release_token(self, task_run_id: UUID) -> bool:
try:
if self._limiter:
self._limiter.release_on_behalf_of(task_run_id)
except RuntimeError:
logger.debug(f"No token to release for task run: {task_run_id!r}")
return False
return True
async def _subscribe_to_task_scheduling(self):
base_url = get_current_settings().api.url
if base_url is None:
raise ValueError(
"`PREFECT_API_URL` must be set to use the task worker. "
"Task workers are not compatible with the ephemeral API."
)
task_keys_repr = " | ".join(
task_key.split(".")[-1].split("-")[0] for task_key in sorted(self.task_keys)
)
logger.info(f"Subscribing to runs of task(s): {task_keys_repr}")
async for task_run in Subscription(
model=TaskRun,
path="/task_runs/subscriptions/scheduled",
keys=self.task_keys,
client_id=self.client_id,
base_url=base_url,
):
logger.info(f"Received task run: {task_run.id} - {task_run.name}")
token_acquired = await self._acquire_token(task_run.id)
if token_acquired:
assert self._runs_task_group is not None, (
"Task group was not initialized"
)
self._runs_task_group.start_soon(
self._safe_submit_scheduled_task_run, task_run
)
async def _safe_submit_scheduled_task_run(self, task_run: TaskRun):
self.in_flight_task_runs[task_run.task_key][task_run.id] = (
prefect.types._datetime.now("UTC")
)
try:
await self._submit_scheduled_task_run(task_run)
except BaseException as exc:
logger.exception(
f"Failed to submit task run {task_run.id!r}",
exc_info=exc,
)
finally:
self.in_flight_task_runs[task_run.task_key].pop(task_run.id, None)
self.finished_task_runs[task_run.task_key] += 1
self._release_token(task_run.id)
async def _submit_scheduled_task_run(self, task_run: TaskRun):
if TYPE_CHECKING:
assert task_run.state is not None
logger.debug(
f"Found task run: {task_run.name!r} in state: {task_run.state.name!r}"
)
task = next((t for t in self.tasks if t.task_key == task_run.task_key), None)
if not task:
if get_current_settings().tasks.scheduling.delete_failed_submissions:
logger.warning(
f"Task {task_run.name!r} not found in task worker registry."
)
await self._client._client.delete(f"/task_runs/{task_run.id}") # type: ignore
return
# The ID of the parameters for this run are stored in the Scheduled state's
# state_details. If there is no parameters_id, then the task was created
# without parameters.
parameters = {}
wait_for = []
run_context = None
if should_try_to_read_parameters(task, task_run):
parameters_id = task_run.state.state_details.task_parameters_id
if parameters_id is None:
logger.warning(
f"Task run {task_run.id!r} has no parameters ID. Skipping parameter retrieval."
)
return
task.persist_result = True
store = await ResultStore(
result_storage=await get_or_create_default_task_scheduling_storage()
).update_for_task(task)
try:
run_data: dict[str, Any] = await read_parameters(store, parameters_id)
parameters = run_data.get("parameters", {})
wait_for = run_data.get("wait_for", [])
run_context = run_data.get("context", None)
except Exception as exc:
logger.exception(
f"Failed to read parameters for task run {task_run.id!r}",
exc_info=exc,
)
if get_current_settings().tasks.scheduling.delete_failed_submissions:
logger.info(
f"Deleting task run {task_run.id!r} because it failed to submit"
)
await self._client._client.delete(f"/task_runs/{task_run.id}")
return
initial_state = task_run.state
new_state = Pending()
new_state.state_details.deferred = True
new_state.state_details.task_run_id = task_run.id
new_state.state_details.flow_run_id = task_run.flow_run_id
state = new_state
task_run.state = state
emit_task_run_state_change_event(
task_run=task_run,
initial_state=initial_state,
validated_state=state,
)
if task_run_url := url_for(task_run):
logger.info(
f"Submitting task run {task_run.name!r} to engine. View in the UI: {task_run_url}"
)
if task.isasync:
await run_task_async(
task=task,
task_run_id=task_run.id,
task_run=task_run,
parameters=parameters,
wait_for=wait_for,
return_type="state",
context=run_context,
)
else:
context = copy_context()
future = self._executor.submit(
context.run,
run_task_sync,
task=task,
task_run_id=task_run.id,
task_run=task_run,
parameters=parameters,
wait_for=wait_for,
return_type="state",
context=run_context,
)
await asyncio.wrap_future(future)
async def execute_task_run(self, task_run: TaskRun) -> None:
"""Execute a task run in the task worker."""
async with self if not self.started else asyncnullcontext():
token_acquired = await self._acquire_token(task_run.id)
if token_acquired:
await self._safe_submit_scheduled_task_run(task_run)
async def __aenter__(self) -> Self:
logger.debug("Starting task worker...")
if self._client._closed: # pyright: ignore[reportPrivateUsage]
self._client = get_client()
self._runs_task_group = anyio.create_task_group()
await self._exit_stack.__aenter__()
await self._exit_stack.enter_async_context(self._client)
await self._exit_stack.enter_async_context(self._runs_task_group)
self._exit_stack.enter_context(self._executor)
self._started_at = prefect.types._datetime.now("UTC")
return self
async def __aexit__(self, *exc_info: Any) -> None:
logger.debug("Stopping task worker...")
self._started_at = None
await self._exit_stack.__aexit__(*exc_info)
def create_status_server(task_worker: TaskWorker) -> FastAPI:
status_app = FastAPI()
@status_app.get("/status")
def status(): # pyright: ignore[reportUnusedFunction]
if TYPE_CHECKING:
assert task_worker.started_at is not None
return {
"client_id": task_worker.client_id,
"started_at": task_worker.started_at.isoformat(),
"stopping": task_worker.stopping,
"limit": task_worker.limit,
"current": task_worker.current_tasks,
"available": task_worker.available_tasks,
"tasks": sorted(task_worker.task_keys),
"finished": task_worker.finished_task_runs,
"in_flight": {
key: {str(run): start.isoformat() for run, start in tasks.items()}
for key, tasks in task_worker.in_flight_task_runs.items()
},
}
return status_app
@sync_compatible
async def serve(
*tasks: Task[P, R],
limit: Optional[int] = 10,
status_server_port: Optional[int] = None,
timeout: Optional[float] = None,
):
"""Serve the provided tasks so that their runs may be submitted to
and executed in the engine. Tasks do not need to be within a flow run context to be
submitted. You must `.submit` the same task object that you pass to `serve`.
Args:
- tasks: A list of tasks to serve. When a scheduled task run is found for a
given task, the task run will be submitted to the engine for execution.
- limit: The maximum number of tasks that can be run concurrently. Defaults to 10.
Pass `None` to remove the limit.
- status_server_port: An optional port on which to start an HTTP server
exposing status information about the task worker. If not provided, no
status server will run.
- timeout: If provided, the task worker will exit after the given number of
seconds. Defaults to None, meaning the task worker will run indefinitely.
Example:
```python
from prefect import task
from prefect.task_worker import serve
@task(log_prints=True)
def say(message: str):
print(message)
@task(log_prints=True)
def yell(message: str):
print(message.upper())
# starts a long-lived process that listens for scheduled runs of these tasks
serve(say, yell)
```
"""
task_worker = TaskWorker(*tasks, limit=limit)
status_server_task = None
if status_server_port is not None:
server = uvicorn.Server(
uvicorn.Config(
app=create_status_server(task_worker),
host="127.0.0.1",
port=status_server_port,
access_log=False,
log_level="warning",
)
)
loop = asyncio.get_event_loop()
status_server_task = loop.create_task(server.serve())
try:
await task_worker.start(timeout=timeout)
except TimeoutError:
if timeout is not None:
logger.info(f"Task worker timed out after {timeout} seconds. Exiting...")
else:
raise
except BaseExceptionGroup as exc: # novermin
exceptions = exc.exceptions
n_exceptions = len(exceptions)
logger.error(
f"Task worker stopped with {n_exceptions} exception{'s' if n_exceptions != 1 else ''}:"
f"\n" + "\n".join(str(e) for e in exceptions)
)
except StopTaskWorker:
logger.info("Task worker stopped.")
except (asyncio.CancelledError, KeyboardInterrupt):
logger.info("Task worker interrupted, stopping...")
finally:
if status_server_task:
status_server_task.cancel()
try:
await status_server_task
except asyncio.CancelledError:
pass
async def store_parameters(
result_store: ResultStore, identifier: UUID, parameters: dict[str, Any]
) -> None:
"""Store parameters for a task run in the result store.
Args:
result_store: The result store to store the parameters in.
identifier: The identifier of the task run.
parameters: The parameters to store.
"""
if result_store.result_storage is None:
raise ValueError(
"Result store is not configured - must have a result storage block to store parameters"
)
record = ResultRecord(
result=parameters,
metadata=ResultRecordMetadata(
serializer=result_store.serializer, storage_key=str(identifier)
),
)
await call_explicitly_async_block_method(
result_store.result_storage,
"write_path",
(f"parameters/{identifier}",),
{"content": record.serialize()},
)
async def read_parameters(
result_store: ResultStore, identifier: UUID
) -> dict[str, Any]:
"""Read parameters for a task run from the result store.
Args:
result_store: The result store to read the parameters from.
identifier: The identifier of the task run.
Returns:
The parameters for the task run.
"""
if result_store.result_storage is None:
raise ValueError(
"Result store is not configured - must have a result storage block to read parameters"
)
record: ResultRecord[Any] = ResultRecord[Any].deserialize(
await call_explicitly_async_block_method(
result_store.result_storage,
"read_path",
(f"parameters/{identifier}",),
{},
)
)
return record.result
|
TaskWorker
|
python
|
tensorflow__tensorflow
|
tensorflow/python/kernel_tests/array_ops/array_ops_test.py
|
{
"start": 12968,
"end": 14453
}
|
class ____(test_util.TensorFlowTestCase):
def testExpandScalar(self):
scalar = "hello"
scalar_expanded = array_ops.expand_dims(scalar, [0])
self.assertEqual(scalar_expanded.get_shape(), (1,))
def testSqueezeScalar(self):
scalar = "hello"
scalar_squeezed = array_ops.squeeze(scalar, ())
self.assertEqual(scalar_squeezed.get_shape(), ())
def testSqueezeMatrix(self):
matrix = [[1, 2, 3]]
matrix_squeezed = array_ops.squeeze(matrix, [0])
self.assertEqual(matrix_squeezed.get_shape(), (3))
with self.assertRaisesRegex(
Exception, "Can not squeeze dim.1., expected a dimension of 1, got 3"):
matrix_squeezed = array_ops.squeeze(matrix, [1])
def testSqueezeScalarDim(self):
matrix = [[1, 2, 3]]
matrix_squeezed = array_ops.squeeze(matrix, 0)
self.assertEqual(matrix_squeezed.get_shape(), (3))
def testExpandDimsWithNonScalarDim(self):
with self.assertRaisesRegex(Exception,
"must be a tensor with a single value"):
array_ops.expand_dims(1, axis=[0, 1])
def testReshapeWithManyDims(self):
with self.assertRaisesRegex(errors.InvalidArgumentError,
"too many dimensions"):
self.evaluate(
array_ops.reshape(
tensor=[[1]],
shape=constant_op.constant([1 for i in range(254)],
dtype=dtypes.int64)))
@test_util.with_eager_op_as_function
|
OperatorShapeTest
|
python
|
pytorch__pytorch
|
test/inductor/test_remote_cache.py
|
{
"start": 872,
"end": 1833
}
|
class ____(TestCase):
def test_normal_logging(
self,
) -> None:
c = RemoteCache(NoopBackend(), RemoteCachePassthroughSerde())
c.put("test", "value")
c.get("test")
def test_failure_no_sample(
self,
) -> None:
c = RemoteCache(FailingBackend(), RemoteCachePassthroughSerde())
with self.assertRaises(AssertionError):
c.put("test", "value")
with self.assertRaises(AssertionError):
c.get("test")
def test_failure_logging(
self,
) -> None:
c = FakeCache()
with self.assertRaises(AssertionError):
c.put("test", "value")
self.assertEqual(c.sample.fail_reason, "testput")
with self.assertRaises(AssertionError):
c.get("test")
self.assertEqual(c.sample.fail_reason, "testget")
if __name__ == "__main__":
from torch._inductor.test_case import run_tests
run_tests()
|
TestRemoteCache
|
python
|
weaviate__weaviate-python-client
|
weaviate/rbac/models.py
|
{
"start": 9907,
"end": 9977
}
|
class ____(_CollectionsPermission):
pass
|
CollectionsPermissionOutput
|
python
|
kamyu104__LeetCode-Solutions
|
Python/process-string-with-special-operations-ii.py
|
{
"start": 51,
"end": 805
}
|
class ____(object):
def processStr(self, s, k):
"""
:type s: str
:type k: int
:rtype: str
"""
l = 0
for x in s:
if x == '*':
l = max(l-1, 0)
elif x == '#':
l <<= 1
elif x == '%':
continue
else:
l += 1
if k >= l:
return '.'
for x in reversed(s):
if x == '*':
l += 1
elif x == '#':
l >>= 1
if k >= l:
k -= l
elif x == '%':
k = (l-1)-k
else:
l -= 1
if l == k:
return x
|
Solution
|
python
|
airbytehq__airbyte
|
airbyte-integrations/connectors/source-tiktok-marketing/unit_tests/integration/test_reports_hourly.py
|
{
"start": 16238,
"end": 20643
}
|
class ____(TestCase):
stream_name = "advertisers_reports_hourly"
advertiser_id = "872746382648"
cursor = "2024-01-01 10:00:00"
cursor_field = "stat_time_hour"
metrics = [
"spend",
"cpc",
"cpm",
"impressions",
"clicks",
"ctr",
"reach",
"cost_per_1000_reached",
"frequency",
"video_play_actions",
"video_watched_2s",
"video_watched_6s",
"average_video_play",
"average_video_play_per_user",
"video_views_p25",
"video_views_p50",
"video_views_p75",
"video_views_p100",
"profile_visits",
"likes",
"comments",
"shares",
"follows",
"clicks_on_music_disc",
"real_time_app_install",
"real_time_app_install_cost",
"app_install",
]
def catalog(self, sync_mode: SyncMode = SyncMode.full_refresh):
return CatalogBuilder().with_stream(name=self.stream_name, sync_mode=sync_mode).build()
def config(self):
return ConfigBuilder().with_end_date("2024-01-02").build()
def state(self):
return (
StateBuilder()
.with_stream_state(
stream_name=self.stream_name,
state={
"states": [
{"partition": {"advertiser_id": self.advertiser_id, "parent_slice": {}}, "cursor": {self.cursor_field: self.cursor}}
]
},
)
.build()
)
def mock_response(self, http_mocker: HttpMocker):
http_mocker.get(
HttpRequest(
url=f"https://business-api.tiktok.com/open_api/v1.3/report/integrated/get/",
query_params={
"service_type": "AUCTION",
"report_type": "BASIC",
"data_level": "AUCTION_ADVERTISER",
"dimensions": '["advertiser_id", "stat_time_hour"]',
"metrics": str(self.metrics).replace("'", '"'),
"start_date": self.config()["start_date"],
"end_date": self.config()["start_date"],
"page_size": 1000,
"advertiser_id": self.advertiser_id,
},
),
HttpResponse(body=json.dumps(find_template(self.stream_name, __file__)), status_code=200),
)
http_mocker.get(
HttpRequest(
url=f"https://business-api.tiktok.com/open_api/v1.3/report/integrated/get/",
query_params={
"service_type": "AUCTION",
"report_type": "BASIC",
"data_level": "AUCTION_ADVERTISER",
"dimensions": '["advertiser_id", "stat_time_hour"]',
"metrics": str(self.metrics).replace("'", '"'),
"start_date": self.config()["end_date"],
"end_date": self.config()["end_date"],
"page_size": 1000,
"advertiser_id": self.advertiser_id,
},
),
HttpResponse(body=json.dumps(EMPTY_LIST_RESPONSE), status_code=200),
)
@HttpMocker()
def test_basic_read(self, http_mocker: HttpMocker):
mock_advertisers_slices(http_mocker, self.config())
self.mock_response(http_mocker)
output = read(get_source(config=self.config(), state=None), self.config(), self.catalog())
assert len(output.records) == 2
assert output.records[0].record.data.get("advertiser_id") is not None
assert output.records[0].record.data.get("stat_time_hour") is not None
@HttpMocker()
def test_read_with_state(self, http_mocker: HttpMocker):
mock_advertisers_slices(http_mocker, self.config())
self.mock_response(http_mocker)
output = read(
source=get_source(config=self.config(), state=self.state()),
config=self.config(),
catalog=self.catalog(sync_mode=SyncMode.incremental),
state=self.state(),
)
assert len(output.records) == 2
assert output.state_messages[1].state.stream.stream_state.states == [
{"cursor": {"stat_time_hour": self.cursor}, "partition": {"advertiser_id": self.advertiser_id, "parent_slice": {}}}
]
|
TestAdvertisersReportsHourly
|
python
|
great-expectations__great_expectations
|
great_expectations/render/components.py
|
{
"start": 15779,
"end": 16528
}
|
class ____(RenderedComponentContent):
def __init__(
self, content_blocks, styling=None, content_block_type="content_block_container"
) -> None:
super().__init__(content_block_type=content_block_type, styling=styling)
self.content_blocks = content_blocks
@override
def to_json_dict(self) -> dict[str, JSONValues]:
"""Returns a JSON-serializable dict representation of this RenderedContentBlockContainer.
Returns:
A JSON-serializable dict representation of this RenderedContentBlockContainer.
"""
d = super().to_json_dict()
d["content_blocks"] = RenderedContent.rendered_content_list_to_json(self.content_blocks)
return d
|
RenderedContentBlockContainer
|
python
|
ray-project__ray
|
python/ray/util/collective/examples/nccl_allreduce_multigpu_example.py
|
{
"start": 126,
"end": 1201
}
|
class ____:
def __init__(self):
with Device(0):
self.send1 = cp.ones((4,), dtype=cp.float32)
with Device(1):
self.send2 = cp.ones((4,), dtype=cp.float32) * 2
self.recv = cp.zeros((4,), dtype=cp.float32)
def setup(self, world_size, rank):
collective.init_collective_group(world_size, rank, "nccl", "177")
return True
def compute(self):
collective.allreduce_multigpu([self.send1, self.send2], "177")
return [self.send1, self.send2], self.send1.device, self.send2.device
def destroy(self):
collective.destroy_collective_group("177")
if __name__ == "__main__":
ray.init(address="auto")
num_workers = 2
workers = []
init_rets = []
for i in range(num_workers):
w = Worker.remote()
workers.append(w)
init_rets.append(w.setup.remote(num_workers, i))
a = ray.get(init_rets)
results = ray.get([w.compute.remote() for w in workers])
print(results)
ray.get([w.destroy.remote() for w in workers])
ray.shutdown()
|
Worker
|
python
|
facebook__pyre-check
|
client/commands/tests/infer_test.py
|
{
"start": 28905,
"end": 49690
}
|
class ____(testslide.TestCase):
def _assert_stubs(
self,
data: Dict[str, Any],
expected: str,
annotate_attributes: bool = False,
use_future_annotations: bool = False,
quote_annotations: bool = False,
simple_annotations: bool = False,
test_path: str = "/root/test.py",
qualifier: str = "test",
root: str = "/root",
) -> None:
infer_output = infer.RawInferOutput.create_from_json(
{
category: [
{
"location": {
"path": test_path,
"qualifier": qualifier,
"line": 1,
},
**value,
}
for value in values
]
for category, values in data.items()
}
)
module_annotations = infer.create_module_annotations(
infer_output=infer_output,
base_path=Path(root),
options=infer.StubGenerationOptions(
annotate_attributes=annotate_attributes,
use_future_annotations=use_future_annotations,
quote_annotations=quote_annotations,
simple_annotations=simple_annotations,
),
)
if len(module_annotations) != 1:
raise AssertionError("Expected exactly one module!")
module_annotation = module_annotations[0]
actual = module_annotation.to_stubs()
_assert_stubs_equal(actual, expected)
def test_stubs_defines(self) -> None:
self._assert_stubs(
{
"defines": [
{
"return": "int",
"name": "test.Test.ret_int",
"parent": "test.Test",
"parameters": [
{
"name": "self",
"annotation": None,
"value": None,
"index": 0,
}
],
"async": False,
}
]
},
"""\
class Test:
def ret_int(self) -> int: ...
""",
)
self._assert_stubs(
{
"defines": [
{
"return": "int",
"name": "test.returns_int",
"parent": None,
"parameters": [],
"async": True,
}
]
},
"""\
async def returns_int() -> int: ...
""",
)
self._assert_stubs(
{
"defines": [
{
"return": "int",
"name": "test.with_params",
"parent": None,
"parameters": [
{"name": "y", "annotation": None, "value": "7", "index": 0},
{
"name": "x",
"annotation": "int",
"value": "5",
"index": 1,
},
],
"async": False,
}
]
},
"def with_params(y=..., x: int = ...) -> int: ...",
)
self._assert_stubs(
{
"defines": [
{
"return": "str",
"name": "test.returns_string",
"parent": None,
"parameters": [],
"async": False,
}
]
},
"def returns_string() -> str: ...",
)
self._assert_stubs(
{
"defines": [
{
"return": "bool",
"name": "test.returns_bool",
"parent": None,
"parameters": [],
"async": False,
}
]
},
"def returns_bool() -> bool: ...",
)
self._assert_stubs(
{
"defines": [
{
"return": "float",
"name": "test.returns_float",
"parent": None,
"parameters": [],
"async": False,
}
]
},
"def returns_float() -> float: ...",
)
self._assert_stubs(
{
"defines": [
{
"name": "test.missing_param_test",
"parent": None,
"parameters": [
{"name": "x", "annotation": "int", "value": "5", "index": 0}
],
"async": False,
}
]
},
"def missing_param_test(x: int = ...): ...",
)
self._assert_stubs(
{
"defines": [
{
"return": "float",
"name": "test.some_fun.another_fun",
"parent": None,
"parameters": [],
"async": False,
}
]
},
"def another_fun() -> float: ...",
)
self._assert_stubs(
{
"defines": [
{
"return": "int",
"name": "ret_int",
"parent": "test.Test.Test2",
"parameters": [
{
"name": "self",
"annotation": None,
"value": None,
"index": 0,
}
],
"async": False,
}
]
},
"",
)
self._assert_stubs(
{
"defines": [
{
"return": "typing.Union[int, str]",
"name": "ret_union",
"parent": "test.Test.Test2",
"parameters": [
{
"name": "self",
"annotation": None,
"value": None,
"index": 0,
}
],
"async": False,
}
]
},
"",
)
self._assert_stubs(
{
"defines": [
{
"return": "typing.Union[int, str]",
"name": "ret_union",
"parent": "test.Test.Test2",
"parameters": [
{
"name": "self",
"annotation": None,
"value": None,
"index": 0,
}
],
"async": False,
},
{
"return": "typing.Dict[int, str]",
"name": "ret_dict",
"parent": "test.Test",
"parameters": [
{
"name": "self",
"annotation": None,
"value": None,
"index": 0,
}
],
"async": False,
},
],
},
"""\
class Test:
def ret_dict(self) -> typing.Dict[int, str]: ...
""",
)
self._assert_stubs(
{
"defines": [
{
"return": "typing.Union[typing.Dict[str, int], str]",
"name": "b",
"parent": "test.Test",
"parameters": [
{
"name": "self",
"annotation": None,
"value": None,
"index": 0,
}
],
"async": False,
},
{
"return": "typing.Union[typing.Dict[str, int], str]",
"name": "a",
"parent": "test.Test",
"parameters": [
{
"name": "self",
"annotation": None,
"value": None,
"index": 0,
}
],
"async": False,
},
],
},
"""\
class Test:
def b(self) -> typing.Union[typing.Dict[str, int], str]: ...
def a(self) -> typing.Union[typing.Dict[str, int], str]: ...
""",
)
self._assert_stubs(
{
"defines": [
{
"return": "typing.Union[typing.Dict[str, int], str]",
"name": "f",
"parent": "test.TestA",
"parameters": [
{
"name": "self",
"annotation": None,
"value": None,
"index": 0,
}
],
"async": False,
},
{
"return": "typing.Union[typing.Dict[str, int], str]",
"name": "f",
"parent": "test.TestB",
"parameters": [
{
"name": "self",
"annotation": None,
"value": None,
"index": 0,
}
],
"async": False,
},
],
},
"""\
class TestA:
def f(self) -> typing.Union[typing.Dict[str, int], str]: ...
class TestB:
def f(self) -> typing.Union[typing.Dict[str, int], str]: ...
""",
)
self._assert_stubs(
{
"defines": [
{
"return": "test.TestA",
"name": "f",
"parent": "test.TestA",
"parameters": [
{
"name": "self",
"annotation": None,
"value": None,
"index": 0,
},
{
"name": "input",
"annotation": "test.TestA",
"value": None,
"index": 0,
},
],
"async": False,
},
{
"return": "typing.Union[typing.Dict[str, int], str]",
"name": "g",
"parent": "test.TestA",
"parameters": [
{
"name": "self",
"annotation": None,
"value": None,
"index": 0,
},
{
"name": "input",
"annotation": "int",
"value": None,
"index": 0,
},
],
"async": False,
},
],
},
"""\
class TestA:
def f(self, input: "TestA") -> "TestA": ...
def g(self, input: int) -> typing.Union[typing.Dict[str, int], str]: ...
""",
)
def test_stubs_globals(self) -> None:
self._assert_stubs(
{
"globals": [{"annotation": "int", "name": "global", "parent": None}],
},
"""\
global: int = ...
""",
)
def test_stubs_attributes(self) -> None:
self._assert_stubs(
{
"attributes": [
{
"annotation": "int",
"name": "attribute_name",
"parent": "test.test",
}
],
},
"""\
class test:
attribute_name: int = ...
""",
annotate_attributes=True,
)
self._assert_stubs(
{
"attributes": [
{
"annotation": "int",
"name": "attribute_name",
"parent": "test.Test",
}
],
},
"""\
""",
annotate_attributes=False,
)
self._assert_stubs(
{
"attributes": [
{
"annotation": "test.test",
"name": "attribute_name",
"parent": "test.test",
}
],
},
"""\
class test:
attribute_name: "test" = ...
""",
annotate_attributes=True,
)
def test_stubs_attributes__path_matches_qualifier(self) -> None:
self._assert_stubs(
{
"attributes": [
{
"annotation": "int",
"name": "some_attribute",
"parent": "foo.bar.test.Foo",
}
],
},
"""\
class Foo:
some_attribute: int = ...
""",
annotate_attributes=True,
root="/root",
test_path="/root/foo/bar/test.py",
qualifier="foo.bar.test",
)
def test_stubs_attributes__full_path_but_does_not_match_qualifier(self) -> None:
self._assert_stubs(
{
"attributes": [
{
"annotation": "int",
"name": "some_attribute",
"parent": "foo.bar.test.Foo",
}
],
},
"""\
class Foo:
some_attribute: int = ...
""",
annotate_attributes=True,
root="/root",
test_path="/root/extra_module/foo/bar/test.py",
qualifier="foo.bar.test",
)
def test_stubs_attributes__nested_class_is_ignored(self) -> None:
self._assert_stubs(
{
"attributes": [
{
"annotation": "int",
"name": "some_attribute",
"parent": "foo.bar.test.Foo.MyNestedClass",
}
],
},
"",
annotate_attributes=True,
root="/root",
test_path="/root/extra_module/foo/bar/test.py",
qualifier="foo.bar.test",
)
def test_stubs_no_typing_import(self) -> None:
"""
Make sure we don't spuriously import from typing
NOTE: This logic is almost certainly incomplete - if another function
in the same module used typing.Union, we would produce incorrect stubs.
We should determine whether it is truly necessary to import from typing,
because doing it correctly in edge cases is nontrivial.
"""
self._assert_stubs(
{
"defines": [
{
"return": "Union[int, str]",
"name": "test.with_params",
"parent": None,
"parameters": [
{"name": "y", "annotation": None, "value": "7", "index": 0},
{
"name": "x",
"annotation": "typing.List[int]",
"value": "[5]",
"index": 1,
},
],
"async": False,
}
]
},
"""\
def with_params(y=..., x: typing.List[int] = ...) -> Union[int, str]: ...
""",
)
def test_stubs_quote(self) -> None:
"""
Test generating stubs with quoted annotations
"""
self._assert_stubs(
{
"defines": [
{
"return": "Union[int, str]",
"name": "test.with_params",
"parent": None,
"parameters": [
{"name": "y", "annotation": None, "value": "7", "index": 0},
{
"name": "x",
"annotation": "typing.List[int]",
"value": "[5]",
"index": 1,
},
],
"async": False,
}
]
},
"""\
def with_params(y=..., x: "typing.List[int]" = ...) -> "Union[int, str]": ...
""",
quote_annotations=True,
)
def test_stubs_simple(self) -> None:
"""
Test generating stubs while omitting annotations that aren't guaranteed landable
"""
self._assert_stubs(
{
"defines": [
{
"return": "None",
"name": "test.with_params",
"parent": None,
"parameters": [
{
"name": "x",
"annotation": "int",
"value": None,
"index": 0,
},
{
"name": "y",
"annotation": "typing.List[int]",
"value": None,
"index": 1,
},
{
"name": "z",
"annotation": "Union[int, str]",
"value": None,
"index": 2,
},
],
"async": False,
}
]
},
"""\
def with_params(x: int, y, z: Union[int, str]) -> None: ...
""",
simple_annotations=True,
)
|
StubGenerationTest
|
python
|
gevent__gevent
|
src/greentest/3.14/test_smtplib.py
|
{
"start": 37799,
"end": 38628
}
|
class ____(smtpd.SMTPServer):
channel_class = SimSMTPChannel
def __init__(self, *args, **kw):
self._extra_features = []
self._addresses = {}
smtpd.SMTPServer.__init__(self, *args, **kw)
def handle_accepted(self, conn, addr):
self._SMTPchannel = self.channel_class(
self._extra_features, self, conn, addr,
decode_data=self._decode_data)
def process_message(self, peer, mailfrom, rcpttos, data):
self._addresses['from'] = mailfrom
self._addresses['tos'] = rcpttos
def add_feature(self, feature):
self._extra_features.append(feature)
def handle_error(self):
raise
# Test various SMTP & ESMTP commands/behaviors that require a simulated server
# (i.e., something with more features than DebuggingServer)
|
SimSMTPServer
|
python
|
numpy__numpy
|
numpy/_core/tests/test_item_selection.py
|
{
"start": 131,
"end": 3769
}
|
class ____:
def test_simple(self):
a = [[1, 2], [3, 4]]
a_str = [[b'1', b'2'], [b'3', b'4']]
modes = ['raise', 'wrap', 'clip']
indices = [-1, 4]
index_arrays = [np.empty(0, dtype=np.intp),
np.empty((), dtype=np.intp),
np.empty((1, 1), dtype=np.intp)]
real_indices = {'raise': {-1: 1, 4: IndexError},
'wrap': {-1: 1, 4: 0},
'clip': {-1: 0, 4: 1}}
# Currently all types but object, use the same function generation.
# So it should not be necessary to test all. However test also a non
# refcounted struct on top of object, which has a size that hits the
# default (non-specialized) path.
types = int, object, np.dtype([('', 'i2', 3)])
for t in types:
# ta works, even if the array may be odd if buffer interface is used
ta = np.array(a if np.issubdtype(t, np.number) else a_str, dtype=t)
tresult = list(ta.T.copy())
for index_array in index_arrays:
if index_array.size != 0:
tresult[0].shape = (2,) + index_array.shape
tresult[1].shape = (2,) + index_array.shape
for mode in modes:
for index in indices:
real_index = real_indices[mode][index]
if real_index is IndexError and index_array.size != 0:
index_array.put(0, index)
assert_raises(IndexError, ta.take, index_array,
mode=mode, axis=1)
elif index_array.size != 0:
index_array.put(0, index)
res = ta.take(index_array, mode=mode, axis=1)
assert_array_equal(res, tresult[real_index])
else:
res = ta.take(index_array, mode=mode, axis=1)
assert_(res.shape == (2,) + index_array.shape)
def test_refcounting(self):
objects = [object() for i in range(10)]
if HAS_REFCOUNT:
orig_rcs = [sys.getrefcount(o) for o in objects]
for mode in ('raise', 'clip', 'wrap'):
a = np.array(objects)
b = np.array([2, 2, 4, 5, 3, 5])
a.take(b, out=a[:6], mode=mode)
del a
if HAS_REFCOUNT:
assert_(all(sys.getrefcount(o) == rc + 1
for o, rc in zip(objects, orig_rcs)))
# not contiguous, example:
a = np.array(objects * 2)[::2]
a.take(b, out=a[:6], mode=mode)
del a
if HAS_REFCOUNT:
assert_(all(sys.getrefcount(o) == rc + 1
for o, rc in zip(objects, orig_rcs)))
def test_unicode_mode(self):
d = np.arange(10)
k = b'\xc3\xa4'.decode("UTF8")
assert_raises(ValueError, d.take, 5, mode=k)
def test_empty_partition(self):
# In reference to github issue #6530
a_original = np.array([0, 2, 4, 6, 8, 10])
a = a_original.copy()
# An empty partition should be a successful no-op
a.partition(np.array([], dtype=np.int16))
assert_array_equal(a, a_original)
def test_empty_argpartition(self):
# In reference to github issue #6530
a = np.array([0, 2, 4, 6, 8, 10])
a = a.argpartition(np.array([], dtype=np.int16))
b = np.array([0, 1, 2, 3, 4, 5])
assert_array_equal(a, b)
|
TestTake
|
python
|
pytorch__pytorch
|
torch/_higher_order_ops/utils.py
|
{
"start": 1145,
"end": 39717
}
|
class ____(RuntimeError):
reason: str
def autograd_not_implemented_inner(
operator: OperatorBase, delayed_error: bool, *args: Any, **kwargs: Any
) -> Any:
"""If autograd is enabled and any of the arguments require grad this will either
raise an error or return a DelayedError depending on the value of delayed.
Args:
operator: The Operator to call with the *args and **kwargs with
op_name: The name of the Operator
delayed_error: If True, return a DelayedError instead of raising an error
args: The flattened operands to the Operator
kwargs: The keyword arguments to the Operator
Raises:
RuntimeError: If autograd is enabled and any of the arguments to the Operator
"""
with torch._C._AutoDispatchBelowAutograd():
result = operator(*args, **kwargs)
flat_operands = pytree.arg_tree_leaves(*args)
if torch.is_grad_enabled() and any(
f.requires_grad for f in flat_operands if isinstance(f, torch.Tensor)
):
if delayed_error:
err_fn = torch._C._functions.DelayedError(
f"Autograd not implemented for {str(operator)}",
1,
)
def fake_requires_grad(tensor):
if torch.is_floating_point(tensor) or torch.is_complex(tensor):
tensor = tensor.detach()
tensor.requires_grad = True
return tensor
return pytree.tree_map_only(
torch.Tensor, lambda x: err_fn(fake_requires_grad(x)), result
)
else:
raise RuntimeError(f"Autograd not implemented for {str(operator)}")
return result
def autograd_not_implemented(op: OperatorBase, deferred_error: bool) -> Callable:
def inner(*args, **kwargs):
return autograd_not_implemented_inner(op, deferred_error, *args, **kwargs)
return inner
def _maybe_run_with_interpreter(fn):
maybe_interpreted_fn = fn
if isinstance(fn, torch.fx.GraphModule) and fx_traceback.has_preserved_node_meta():
# Running graph with interpreter is needed for propagating the stack_trace
def graph_with_interpreter(*args):
with fx_traceback.preserve_node_meta():
return torch.fx.Interpreter(fn).run(*args)
maybe_interpreted_fn = graph_with_interpreter
return maybe_interpreted_fn
def _maybe_compile_and_run_fn(fn, *args):
if not torch.compiler.is_dynamo_compiling():
with setup_compilation_env() as backend: # type: ignore[attr-defined]
return torch.compile(fn, backend=backend, fullgraph=True)(*args)
else:
return fn(*args)
def reenter_make_fx(fn):
from torch.fx.experimental.proxy_tensor import _CURRENT_MAKE_FX_TRACER
@functools.wraps(fn)
def wrapped(*args):
assert _CURRENT_MAKE_FX_TRACER is not None, (
"Cannot reenter make_fx when we're not under a make_fx tracing session"
)
gm = _CURRENT_MAKE_FX_TRACER.trace_subgraph(
_maybe_run_with_interpreter(fn), *args
)
return gm
return wrapped
def _maybe_reenter_make_fx(fn):
from torch.fx.experimental.proxy_tensor import _CURRENT_MAKE_FX_TRACER
if _CURRENT_MAKE_FX_TRACER is not None:
return reenter_make_fx(fn)
else:
def _maybe_make_fx_with_fake_mode(fn):
@functools.wraps(fn)
def wrapped(*args):
from torch._guards import detect_fake_mode
fake_mode = detect_fake_mode(args)
if fake_mode is None:
# we creaeta a fake_mode here to make sure we could
# trace the graph with data-dependent calls e.g. .item()
return make_fx(fn, tracing_mode="fake")(*args)
# Tracing with real if all inputs have been fakfied
return make_fx(fn)(*args)
return wrapped
return _maybe_make_fx_with_fake_mode(fn)
def check_meta_consistency(
lhs_list: list[Union[torch.Tensor, torch.SymInt, int]],
rhs_list: list[Union[torch.Tensor, torch.SymInt, int]],
lhs_name: str,
rhs_name: str,
include_contiguity: bool = True,
) -> None:
def diff_meta_pairs(
lhs_list: list[Union[torch.Tensor, torch.SymInt, int]],
rhs_list: list[Union[torch.Tensor, torch.SymInt, int]],
) -> list[str]:
def diff_meta(
lhs: Union[torch.Tensor, torch.SymInt, int],
rhs: Union[torch.Tensor, torch.SymInt, int],
) -> str:
if isinstance(lhs, torch.Tensor) and isinstance(rhs, torch.Tensor):
return ", ".join(
diff_tensor_meta(
_extract_tensor_metadata(
lhs, include_contiguity=include_contiguity
),
_extract_tensor_metadata(
rhs, include_contiguity=include_contiguity
),
check_grad=False,
)
)
else:
def _both_int_types(lhs, rhs):
return isinstance(lhs, (int, torch.SymInt)) and isinstance(
rhs, (int, torch.SymInt)
)
def _both_tensor(lhs, rhs):
return isinstance(lhs, torch.Tensor) and isinstance(
rhs, torch.Tensor
)
if not _both_int_types(lhs, rhs) and not _both_tensor(lhs, rhs):
return f"type: {lhs} vs {rhs}"
return ""
# Manually check the device of lhs and rhs as this field is currently not part of TensorMetadata
def diff_device(
lhs: Union[torch.Tensor, torch.SymInt, int],
rhs: Union[torch.Tensor, torch.SymInt, int],
) -> str:
if isinstance(lhs, torch.Tensor) and isinstance(rhs, torch.Tensor):
if (
rhs.device.type == lhs.device.type
and rhs.device.index == lhs.device.index
):
return ""
else:
return "device"
return ""
if len(lhs_list) != len(rhs_list):
raise torch._dynamo.exc.UncapturedHigherOrderOpError(
f"Expected {lhs_name} and {rhs_name} to have same number of outputs but got lhs:{lhs_list} and rhs:{rhs_list}"
)
all_diffs = []
for i, (lhs, rhs) in enumerate(zip(lhs_list, rhs_list)):
if diff := diff_meta(lhs, rhs):
all_diffs.append(
f"pair[{i}] differ in {diff}, where lhs is {lhs} and rhs is {rhs}"
)
if diff := diff_device(lhs, rhs):
all_diffs.append(
f"pair[{i}] differ in {diff}, where lhs is {lhs} and rhs is {rhs}"
)
return all_diffs
if all_diffs := diff_meta_pairs(lhs_list, rhs_list):
diff_str = "\n".join(all_diffs)
raise torch._dynamo.exc.UncapturedHigherOrderOpError(
f"Expected {lhs_name} and {rhs_name} to have same metadata but found:\n{diff_str}"
)
@contextmanager
def setup_compilation_env():
"""
Context manager that sets up proper environment and backend when invoking torch.compile
inside torch.export region or inside HOP.
"""
from torch._dynamo.backends.debugging import (
make_eager_backend_with_torch_function_modes,
)
from torch.fx.experimental.proxy_tensor import (
_temp_remove_pre_dispatch_torch_function_mode,
)
with (
_set_compilation_env(),
torch._dynamo.utils.disable_cache_limit(),
_temp_remove_pre_dispatch_torch_function_mode() as pre_dispatch_mode,
_temp_remove_metadata_torch_function_mode() as metadata_mode,
):
modes = [
mode for mode in (pre_dispatch_mode, metadata_mode) if mode is not None
]
if modes:
yield make_eager_backend_with_torch_function_modes(modes)
else:
yield "eager"
@contextmanager
def _set_compilation_env():
_old_is_tracing = torch.fx._symbolic_trace._is_fx_tracing_flag
_old_allow_empty_graphs = torch._dynamo.config.allow_empty_graphs
_old_capture_scalar_outputs = torch._dynamo.config.capture_scalar_outputs
# The issue is tracked in https://github.com/pytorch/pytorch/issues/144360: when dynamo finds
# the top-level frame produces no graph, the default behavior is to fallback to eager.
# Then when it encounters an inner function, it will try to trace that function again, which is unnecessary.
# For while_loop, during inspecting the inner call, we trace into the python dispathcer
# logic, which is not tracable as of today. So the proper fix can be either 1. allow dispatch
# logic to be dynamo tracable or 2. fixing https://github.com/pytorch/pytorch/issues/144360.
# but it exposes some bugs in existing tests so we have to have a temporary flag to control
# the behavior, which allows dynamo to store an empty graph for a frame without falling back to eager
try:
# We need to turn off the is_fx_tracing_flag. Remove this flag check from dyanmo
# once we are confident fx tracing works with dynamo.
torch.fx._symbolic_trace._is_fx_tracing_flag = False
# pyrefly: ignore [bad-assignment]
torch._dynamo.config.allow_empty_graphs = True
torch._dynamo.config.capture_scalar_outputs = True
yield
finally:
torch.fx._symbolic_trace._is_fx_tracing_flag = _old_is_tracing
torch._dynamo.config.allow_empty_graphs = _old_allow_empty_graphs
torch._dynamo.config.capture_scalar_outputs = _old_capture_scalar_outputs
# The invariant here is that we always trace the branch with fake tensor
def _maybe_fake_tracing(fn, inputs: list[Any], pre_dispatch):
fake_mode_det = detect_fake_mode(inputs)
fake_mode: AbstractContextManager = nullcontext()
tracing_mode = "fake"
if fake_mode_det is not None:
fake_mode = fake_mode_det
tracing_mode = "real"
# Note: we need to turn off proxy tensor mode to avoid tracing infra
# code that happens in make_fx e.g. we now call as_strided when wrapping tensor
# as fake tensor.
with fake_mode, disable_proxy_modes_tracing():
gm = make_fx(
fn,
tracing_mode=tracing_mode,
pre_dispatch=pre_dispatch,
_error_on_data_dependent_ops=False,
)(*inputs)
if not isinstance(fake_mode, nullcontext) and fake_mode.shape_env is not None: # type: ignore[attr-defined]
insert_deferred_runtime_asserts(
gm,
fake_mode.shape_env, # type: ignore[attr-defined]
"hoo_maybe_fake_tracing",
export=True, # type: ignore[attr-defined]
)
return gm
def potential_input_alias_or_mutation(gm, inputs, pre_dispatch=False):
try:
gm = _maybe_fake_tracing(gm, inputs, pre_dispatch)
except UnsupportedAliasMutationException:
# this can happen when nested cond_op is
# functionalized
return True
except Exception as e:
raise e
example_inputs = [
ph.meta.get("val", None) for ph in gm.graph.find_nodes(op="placeholder")
]
(
inp_inp_alias_map,
inp_out_alias_map,
out_out_alias_map,
inp_mutation,
) = check_input_alias_and_mutation(gm, example_inputs)
return (inp_inp_alias_map, inp_out_alias_map, out_out_alias_map), inp_mutation
def analyze_potential_input_alias_or_mutation(name, aliases, input_mutations):
if any(len(a) > 0 for a in aliases):
# TODO: Investigate here further which node is exactly aliasing
raise RuntimeError(
f"{name} where aliases appear. "
+ f"In particular, these inputs \
{set(el for el_map in aliases if len(el_map.keys()) > 0 for el in el_map)} " # noqa: C401
+ "get aliased. Please ensure that this doesn't happen."
)
if len(input_mutations):
# TODO: Investigate here further which node is exactly mutating the inputs
raise RuntimeError(
f"{name} where the inputs are mutated. "
+ f"In particular, these nodes are mutating the inputs \
{set(el for el in input_mutations)}." # noqa: C401
+ "Please ensure that this doesn't happen."
)
def _has_potential_branch_input_mutation(gm, inputs, pre_dispatch=False):
(
(_, _, _),
inp_mutation,
) = potential_input_alias_or_mutation(gm, inputs, pre_dispatch)
return len(inp_mutation) > 0
def has_potential_input_alias_or_mutation(gm, inputs, pre_dispatch=False):
(
(
inp_inp_alias_map,
inp_out_alias_map,
out_out_alias_map,
),
inp_mutation,
) = potential_input_alias_or_mutation(gm, inputs, pre_dispatch)
return (
any(
(
len(inp_inp_alias_map) > 0,
len(inp_out_alias_map) > 0,
len(out_out_alias_map) > 0,
)
),
len(inp_mutation) > 0,
)
def _collect_fake_inputs(inputs):
from torch._subclasses.fake_tensor import FakeTensor
# Get the example values of the inputs.
inputs_fake: list[Union[FakeTensor, torch.Tensor, int]] = []
for inp in inputs:
if isinstance(inp, (torch.fx.proxy.Proxy, torch.fx.node.Node)):
inp = inp.node if isinstance(inp, torch.fx.proxy.Proxy) else inp
if hasattr(inp, "meta"):
val = inp.meta["example_value"]
if isinstance(val, torch.Tensor):
if torch._C._functorch.is_batchedtensor(
val
) or torch._C._functorch.is_functionaltensor(val):
# This case is for batched or functional tensors
# Unwrap the tensors
while torch._C._functorch.is_batchedtensor(
val
) or torch._C._functorch.is_functionaltensor(val):
val = torch._C._functorch.get_unwrapped(val)
assert isinstance(val, FakeTensor)
inputs_fake.append(val)
else:
# This is the standard case of a TensorVariable
assert isinstance(val, FakeTensor)
inputs_fake.append(val)
else:
# This case is for SymInts and other non-Tensor elements
assert not isinstance(val, torch.Tensor)
inputs_fake.append(val)
else:
# This case is for ints
assert isinstance(inp, int)
inputs_fake.append(inp)
return inputs_fake
def _check_alias_and_mutation(graph_module, inputs_fake, name, pre_dispatch):
aliases, inp_mutation = has_potential_input_alias_or_mutation(
graph_module, inputs_fake, pre_dispatch=pre_dispatch
)
if aliases:
raise RuntimeError(f"{name} might be aliasing the input or the output!") # noqa: F541
if inp_mutation:
raise RuntimeError(f"{name} might be modifying the input!") # noqa: F541
def unique_graph_id(proxy_mode, prefix):
"""Returns a unique name and id for a graph to be added to a proxy_mode tracer"""
# There are probably better ways - I know that create_arg has some self incrementing name
# magic to it, but since we explicitly have to get the name for register_module,
# I was not sure how to do that. This kinda simulates it.
return unique_graph_name_with_root(proxy_mode.tracer.root, prefix)
def unique_graph_name_with_root(
root: torch.fx.GraphModule, prefix: str
) -> tuple[int, str]:
next_name = None
i = 0
# pyrefly: ignore [bad-assignment]
while not next_name:
candidate = f"{prefix}_{i}"
if hasattr(root, candidate):
i += 1
else:
next_name = candidate
return i, next_name
def _from_fun(t):
from torch._functorch.aot_autograd import from_fun
if isinstance(t, torch.Tensor):
if t.dtype != torch.bool:
return torch.empty_strided(
t.size(),
t.stride(),
dtype=t.dtype,
requires_grad=t.requires_grad,
device=t.device,
)
else:
# clone of a functional tensor produces a functional tensor
# but we want to avoid it so we clone a non-functional version
maybe_unfunc_t = t
if isinstance(t, FunctionalTensor):
torch._sync(t)
maybe_unfunc_t = from_fun(t)
elif torch._is_functional_tensor(t):
# need to handle both types of functionalization here:
# these are the tensors that came from the user,
# which could be either FunctionalTensorWrapper or FunctionalTensor
torch._sync(t)
maybe_unfunc_t = torch._from_functional_tensor(t)
return maybe_unfunc_t.clone()
return t
def clone_outputs_aliasing_inputs(args):
input_storage = {
StorageWeakRef(arg._typed_storage())
for arg in args
if isinstance(arg, torch.Tensor)
}
def maybe_clone(t):
if (
isinstance(t, torch.Tensor)
and StorageWeakRef(t._typed_storage()) in input_storage
):
return t.clone()
return t
return maybe_clone
def prepare_fw_with_masks(fn):
def fw_with_masks(*args):
fw_out = fn(*args)
return fw_out, [
bool(isinstance(ret, torch.Tensor) and ret.requires_grad) for ret in fw_out
]
return fw_with_masks
def prepare_fw_with_masks_all_requires_grad(fn):
def fw_with_masks(*args):
fw_out = fn(*args)
# Note [force all outputs to be require grad]
# Instead of using the original fn, we set the output of original
# fn to all require grad. This is consistent with the behavior
# of autograd.Function, where if any one of the inputs requires grad
# all output will be require grad. This also makes the downstream
# require_gradness reasoning much easier.
if pytree.tree_any_only(torch.Tensor, lambda t: t.requires_grad, args):
fw_out = pytree.tree_map_only(
torch.Tensor,
lambda x: x.requires_grad_(True) if x.dtype.is_floating_point else x,
fw_out,
)
def _query_requires_grad(t: torch.Tensor) -> bool:
if torch._is_functional_tensor(t):
t = torch._from_functional_tensor(t)
return t.requires_grad
return fw_out, pytree.tree_map_only(torch.Tensor, _query_requires_grad, fw_out)
return fw_with_masks
# This function replaces None gradients with all-zero gradients.
# `None` gradients are problematic for CUDA graphs. Those gradients are
# replaced with an all-zero tensor for better optimization
def unmask_none_gradients(grads, operands):
allowed_types = (torch.Tensor, int, torch.SymInt)
assert all(isinstance(o, allowed_types) for o in operands), (
f"operands can only be of {allowed_types} but got {[type(o) for o in operands]}"
)
unmasked_grads = []
for g, o in zip(grads, operands):
if g is not None:
unmasked_grads.append(g)
else:
# In case the operand is an int or a torch.SymInt, return None
# This can happen for lifted_arguments. E.g., the shapes of a dynamic tensor are lifted and passed
# as additional arguments
unmasked_grads.append(
torch.zeros_like(o) if isinstance(o, torch.Tensor) else None
)
return unmasked_grads
def _maybe_fake_prop_ignore_unbacked(fn, args):
with ExitStack() as ctx_stack:
if (fake_mode := detect_fake_mode(args)) is not None:
ctx_stack.enter_context(fake_mode)
if fake_mode.shape_env is not None:
ctx_stack.enter_context(
fake_mode.shape_env.ignore_fresh_unbacked_symbols()
)
return fn(*args)
def redirect_to_mode(hop: OperatorBase, mode):
"""Utility for redispatching HOP to underlying mode
Args:
hop: The HOP to redispatch
mode: The mode to redispatch to
Returns:
A decorated function that implements the HOP for the given mode
"""
@hop.py_impl(mode)
def impl(mode, *args, **kwargs):
return mode.__torch_dispatch__(hop, [], args, kwargs)
return impl
# TODO: The parameter use_output_and_grad_bw is required because some operations
# that utilize this function, such as the while_loop, may require (grad, fwd_outputs)
def create_fw_bw_graph(fn, use_output_and_grad_bw, fw_inputs, fw_outputs):
from torch._functorch.aot_autograd import AOTConfig, create_joint
# Note:[HOP create fw_bw graph] We create "clean" environments for make_fx by suspending all dispatch keys
# between Autograd and Python key. Currently, we only suspend functionalization but more can be
# added when required. Will encounter two problems if we don't suspend functionalization:
#
# 1. make_fx fails to capture operations on input: the inputs are wrapped as _to_functional_tensor_wrapper,
# but they will be unwrapped before entering ProxyTorchDispatchMode as part of the dispatching.
# However, it's the outside wrapper that tracer creates proxies for. This casuses tracer fail to
# fetch the proxy for the inputs and fail to capture any operations on them.
#
# 2. make_fx fails to capture output: the outputs after ProxyTorchDispatchMode are further
# wrapped as FunctionalTensorWrapper in Functionalize key after return. However, the tracer
# only associates the inner tensor with proxy in ProxyTorchDispatchMode. Therefore,
# when creating the output node, it fails to associate the wrapped tensor with its proxy.
# Instead, it will create _tensor_constant as output.
dummy_aot_config = AOTConfig(
fw_compiler=None, # type: ignore[arg-type]
bw_compiler=None, # type: ignore[arg-type]
partition_fn=None, # type: ignore[arg-type]
decompositions={},
num_params_buffers=0,
aot_id=0,
keep_inference_input_mutations=False,
)
example_grad = [_from_fun(out) for out in fw_outputs]
num_grads = len(example_grad)
fw_graph = _maybe_reenter_make_fx(fn)(*fw_inputs)
def joint_fn(*joint_operands_grads):
if use_output_and_grad_bw:
grads = joint_operands_grads[0]
inputs = joint_operands_grads[1][-1:]
else:
grads = joint_operands_grads[:num_grads]
inputs = joint_operands_grads[num_grads:]
joint = create_joint(prepare_fw_with_masks(fn), aot_config=dummy_aot_config)
_, grads = joint(
list(inputs),
[grad for grad in grads if grad is not None and grad.requires_grad],
)
# Unmask None gradients to all-zero gradients
unmasked_grads = unmask_none_gradients(grads, inputs)
# In order to keep map functional for backward graph,
# we clone outputs that are aliasing inputs
maybe_clone = clone_outputs_aliasing_inputs(joint_operands_grads)
return pytree.tree_map(maybe_clone, unmasked_grads)
if use_output_and_grad_bw:
example_xs_out = list(fw_inputs) + list(fw_outputs)
joint_graph = _maybe_reenter_make_fx(joint_fn)(
(list(example_grad), list(example_xs_out))
)
else:
example_xs_out = list(fw_inputs)
joint_graph = _maybe_reenter_make_fx(joint_fn)(
*(list(example_grad) + list(example_xs_out))
)
return fw_graph, joint_graph
def _unstack_pytree(xs):
flat_xs, inspec = pytree.tree_flatten(xs)
if not all(isinstance(xs, torch.Tensor) for xs in flat_xs):
raise RuntimeError(f"Leaves of xs must be Tensor {flat_xs}")
if not all(xs.shape[0] == flat_xs[0].shape[0] for xs in flat_xs):
raise RuntimeError(
f"Leaves of xs must have same leading dimension size {[xs.shape for xs in flat_xs]}"
)
a = zip(*flat_xs)
pytrees = [pytree.tree_unflatten(tuple, inspec) for tuple in a]
return pytrees
def _stack_pytree(pytrees):
flat_out = []
out_spec = None
for pt in pytrees:
flat_pt, out_spec = pytree.tree_flatten(pt)
flat_out.append(flat_pt)
assert out_spec is not None
b = zip(*flat_out)
stacked_out = []
for leaves in b:
if all(isinstance(leaf, torch.Tensor) for leaf in leaves):
stacked_out.append(torch.stack(leaves))
elif all(leaf is None for leaf in leaves):
# Backward graph can return None output when forward inputs doesn't require grad.
# When we eagerly execute backward graph, we need to call _stack_pytree on its output,
# therefore we need to deal with None output.
stacked_out.append(None) # type: ignore[arg-type]
else:
raise RuntimeError(f"Cannot stack {leaves}.")
return pytree.tree_unflatten(stacked_out, out_spec)
# We cannot call save_for_backward for symints. This helper function
# can be used to save symints as direct attributes of ctx in autograd.Function.
#
# For example, if args = (x, y, s0, z, s1),
# save_tensors_and_symints_for_backward will partition the args into two lists, and a bookkeeping list pos:
# partitioned_args[0] = (x, y, z)
# partitioned_args[1] = (s0, s1)
# pos = (0, 0, 1, 0, 1)
# pos list keeps track of which partition the args
# is partitioned into in order to recover it in saved_tensors_and_symints.
#
# In saved_tensors_and_symints, we can recover the original args by:
# iterating over the pos list and pop one item from the front of partitioned_args[pos[i]].
# We use t_idx and s_idx to keep track of the next index of the item we are going to pop for the two lists.
def save_tensors_and_symints_for_backward(ctx, args):
assert all(
isinstance(arg, (torch.Tensor, torch.SymInt, int, type(None))) for arg in args
), args
partitioned_args: list[Any] = [[], []]
pos = []
for arg in args:
idx = 0 if isinstance(arg, torch.Tensor) else 1
partitioned_args[idx].append(arg)
pos.append(idx)
assert not hasattr(ctx, "sym_int_args"), "ctx already has sym_int_args attribute."
assert not hasattr(ctx, "pos"), "ctx already has pos attribute."
ctx.save_for_backward(*partitioned_args[0])
ctx.sym_int_args = partitioned_args[1]
ctx.pos = pos
def saved_tensors_and_symints(ctx):
args = []
t_idx = 0
s_idx = 0
saved_tensors = ctx.saved_tensors
for p in ctx.pos:
if p == 0:
args.append(saved_tensors[t_idx])
t_idx += 1
else:
args.append(ctx.sym_int_args[s_idx])
s_idx += 1
assert t_idx + s_idx == len(ctx.pos)
return tuple(args)
def split_into_chunks(iterable: Sequence[Any], chunk_sizes: list[int]) -> list[Any]:
assert sum(chunk_sizes) == len(iterable), (
"the sum of all chunks needs to match the length of the iterable."
)
elements = []
idx = 0
for size in chunk_sizes:
elements.append(iterable[idx : idx + size])
idx += size
return elements
def _clone_aliasing_output(inputs: Sequence[Any], outputs: Sequence[Any]):
# For tensors whose grad is None, create zero tensors as gradients
# This invariant is useful for cudagraph.
# Elimitate input-output, output-output aliasing
seen_input_storages = {
StorageWeakRef(t._typed_storage())
for t in inputs
if isinstance(t, torch.Tensor)
}
seen_output_storages = set()
final_outputs = []
for out in outputs:
if isinstance(out, torch.Tensor):
out_storage = StorageWeakRef(out._typed_storage())
if (
out_storage in seen_input_storages
or out_storage in seen_output_storages
):
out = out.clone()
seen_output_storages.add(StorageWeakRef(out._typed_storage()))
final_outputs.append(out)
return final_outputs
def create_bw_fn(
fn: Callable, args: tuple[Any, ...], return_fw_outputs: bool = False
) -> Callable:
"""
For a fn that accepts flat inputs and returns flat outputs:
fw_out = fn(*args),
this function returns:
grad_args = bw_fn(*args_and_grad_output)
with the following invariants:
1. args + fw_out has an 1-1 correspondence to args_and_grad_output
2. grad_args has an 1-1 corresponsence to args
3. for tensor arg whose requires_grad is False, its corresponding grad in
grad_args will be a zero tensor with the same shape.
"""
from torch._functorch.aot_autograd import AOTConfig, create_joint
# pyrefly: ignore [missing-module-attribute]
from torch._higher_order_ops.utils import prepare_fw_with_masks_all_requires_grad
dummy_aot_config = AOTConfig(
fw_compiler=None, # type: ignore[arg-type]
bw_compiler=None, # type: ignore[arg-type]
partition_fn=None, # type: ignore[arg-type]
decompositions={},
num_params_buffers=0,
aot_id=0,
keep_inference_input_mutations=False,
)
n_primals = len(args)
bw_fn = create_joint(
prepare_fw_with_masks_all_requires_grad(fn), aot_config=dummy_aot_config
)
def flat_fn(*args_and_grad_outs):
primals = args_and_grad_outs[:n_primals]
tangents = args_and_grad_outs[n_primals:]
fw_outs, grad_args = bw_fn(primals, tangents)
assert len(args) == len(grad_args)
# For tensors whose grad is None, create zero tensors as gradients
# This invariant is useful for cudagraph.
grad_args = [
torch.zeros_like(arg)
if isinstance(arg, torch.Tensor) and grad is None
else grad
for grad, arg in zip(grad_args, primals)
]
final_grads = _clone_aliasing_output(args_and_grad_outs, grad_args)
if return_fw_outputs:
return *fw_outs, *final_grads
return final_grads
return flat_fn
def get_dummy_aot_autograd_config():
from torch._functorch.aot_autograd import AOTConfig
return AOTConfig(
fw_compiler=None, # type: ignore[arg-type]
bw_compiler=None, # type: ignore[arg-type]
partition_fn=None, # type: ignore[arg-type]
decompositions={},
num_params_buffers=0,
aot_id=0,
keep_inference_input_mutations=False,
)
# Slices off the first element of a given dimension
def first_slice_copy(t: torch.Tensor, dim: int = 0) -> torch.Tensor:
return torch.select_copy(t, dim, 0)
# Returns a mask whether a list element is a tensor or not
def get_tensor_mask(tensor_list: Iterable[Any]) -> list[bool]:
return [bool(isinstance(v, torch.Tensor)) for v in tensor_list]
def mask_list(
mask: list[bool], inp: list[Any], other: Optional[list[Any]] = None
) -> list[Any]:
# Masks elements on an `inp` list.
# If other is None, then the elements of the `inp` list where the mask is False are removed
# If other is not None, then the elements of the `inp` list where the mask is False are
# replaced with the elements of the `other` list
assert len(mask) == len(inp), (
"The length of the mask needs to be identical to the length of the input"
)
if other is not None:
assert len(inp) == len(other), (
"If an input and an other list is provided, they need to have the same length"
)
return [i if m else o for m, i, o in zip(mask, inp, other)]
else:
return [i for m, i in zip(mask, inp) if m]
def first_slice_copy_with_grad(li: Iterable[Any]) -> list[Any]:
# First_slice_copy does not keep the original requires_grad flag,
# but we need it for materialize_as_graph
# in order to compute the correct gradients
# The reason why first_slice_copy doesn't keep requires_grad flag is
# because it's called in torch.autograd.Function.backward/forward.
slc = [first_slice_copy(x).requires_grad_(x.requires_grad) for x in li]
return slc
# Reports the difference between meta of two tensors in a string
def diff_tensor_meta(
meta1: TensorMetadata, meta2: TensorMetadata, check_grad=True
) -> list[str]:
from torch.fx.experimental.symbolic_shapes import GuardOnDataDependentSymNode
pair_diffs = []
for meta_name in TensorMetadata._fields:
if not check_grad and meta_name == "requires_grad":
continue
val1 = getattr(meta1, meta_name)
val2 = getattr(meta2, meta_name)
try:
if val1 != val2:
pair_diffs.append(f"'{meta_name}: {val1} vs {val2}'")
except GuardOnDataDependentSymNode:
pair_diffs.append(f"'{meta_name}: {val1} vs {val2}'")
continue
return pair_diffs
# Note [lifted arg types in hop]
# For dynamoed hops, we automatically lift the free symbols in tensors as arguments.
# This has implications for the types of lifted args for different dispatch keys:
# 1. functionalization, FakeTensorMode, ProxyTorchDispatchMode, Autograd need to support torch.Symint
# lifted args because it's on the path of torch.compile(dynamic=True).
# 2. functionalization, FakeTensorMode, ProxyTorchDispatchMode, Autograd, CompositeExplicitAutograd need
# to support int arguments. In the eager run case, we re-trace the subgraph in AutogradKey, so inner
# hops may receive int inputs from the shape of outer tensor inputs.
# However, CompositeExplicitAutograd won't receive SymInt inputs because it only accepts real tensor inputs.
def validate_subgraph_args_types(lifted_args: Union[tuple[Any, ...], list[Any]]):
allowed_types = (torch.Tensor, int, torch.SymInt)
assert all(
isinstance(arg, (torch.Tensor, int, torch.SymInt)) for arg in lifted_args
), (
f"{lifted_args} can only be of {allowed_types} but got {tuple(type(arg) for arg in lifted_args)}"
)
# TODO: Return a more detailed information as to which node
# causes a mutation or an alias. This may requires a per operator tensor version checking
def check_input_alias_and_mutation(
gm: torch.fx.GraphModule,
fake_args: list[FakeTensor],
) -> tuple[dict[int, int], dict[int, int], dict[int, int], list[int]]:
(
inp_inp_alias_map,
inp_out_alias_map,
out_out_alias_map,
mutated_inputs,
) = check_input_alias_and_mutation_return_outputs(gm)[:-1]
# pyrefly: ignore [bad-return]
return inp_inp_alias_map, inp_out_alias_map, out_out_alias_map, mutated_inputs
def _tensor_storage(t) -> StorageWeakRef:
return StorageWeakRef(t._typed_storage())
def check_input_alias_and_mutation_return_outputs(
gm: torch.fx.GraphModule,
) -> tuple[
dict[int, int],
dict[int, int],
dict[int, int],
list[int],
Union[tuple[Any, ...], list[Any]],
]:
def _get_example_value(n):
if not isinstance(n, torch.fx.Node):
return n
else:
return n.meta["val"] if "val" in n.meta else n.meta["example_value"]
fake_args = [
_get_example_value(n)
for n in gm.graph.find_nodes(op="placeholder")
if isinstance(n, torch.fx.Node) and "val" in n.meta
]
outputs = [
_get_example_value(n)
for n in pytree.tree_flatten(gm.graph.find_nodes(op="output")[0].args[0])[0]
]
# We need to analyze the original fake_args to detect
# inp-inp alias.
inp_storage_map = {
_tensor_storage(inp): i
for i, inp in enumerate(fake_args)
if isinstance(inp, torch.Tensor)
}
out_storage_map = {
_tensor_storage(out): i
for i, out in enumerate(outputs)
if isinstance(out, torch.Tensor)
}
inp_inp_alias_map = {
i: inp_storage_map[_tensor_storage(inp)]
for i, inp in enumerate(fake_args)
if isinstance(inp, torch.Tensor) and inp_storage_map[_tensor_storage(inp)] != i
}
out_out_alias_map = {
i: out_storage_map[_tensor_storage(out)]
for i, out in enumerate(outputs)
if isinstance(out, torch.Tensor) and out_storage_map[_tensor_storage(out)] != i
}
inp_out_alias_map = {
i: out_storage_map[_tensor_storage(inp)]
for i, inp in enumerate(fake_args)
if isinstance(inp, torch.Tensor) and _tensor_storage(inp) in out_storage_map
}
mutated_inputs = []
for node in gm.graph.nodes:
if node.op == "call_function" and isinstance(
node.target, torch._ops.OpOverload
):
for arg_node, arg_schema in zip(node.args, node.target._schema.arguments):
if arg_schema.is_write:
arg_val = _get_example_value(arg_node)
assert isinstance(arg_val, torch.Tensor)
if _tensor_storage(arg_val) in inp_storage_map:
mutated_inputs.append(inp_storage_map[_tensor_storage(arg_val)])
return (
inp_inp_alias_map,
inp_out_alias_map,
out_out_alias_map,
mutated_inputs,
outputs,
)
registered_hop_fake_fns: dict[torch._ops.OpOverload, Callable] = {}
F = TypeVar("F", bound=Callable)
@overload
def register_fake(hop, fn: None = None) -> Callable[[F], F]: ...
@overload
def register_fake(hop, fn: F) -> F: ...
def register_fake(hop, fn=None):
"""
Register a fake function for a HOP. This is conceptually equivalent of the
register_fake utility for the custom ops. The registered function is called
inside the fake_tensor _dispatch_impl.
"""
assert hop not in registered_hop_fake_fns
def register(func: F) -> F:
from torch._subclasses.fake_tensor import FakeTensorMode
redirect_to_mode(hop, FakeTensorMode)
registered_hop_fake_fns[hop] = func
return func
if fn is None:
return register
return register(fn)
|
UnsupportedAliasMutationException
|
python
|
sqlalchemy__sqlalchemy
|
lib/sqlalchemy/sql/traversals.py
|
{
"start": 14473,
"end": 33091
}
|
class ____(HasTraversalDispatch, util.MemoizedSlots):
__slots__ = "stack", "cache", "anon_map"
def __init__(self):
self.stack: Deque[
Tuple[
Optional[ExternallyTraversible],
Optional[ExternallyTraversible],
]
] = deque()
self.cache = set()
def _memoized_attr_anon_map(self):
return (anon_map(), anon_map())
def compare(
self,
obj1: ExternallyTraversible,
obj2: ExternallyTraversible,
**kw: Any,
) -> bool:
stack = self.stack
cache = self.cache
compare_annotations = kw.get("compare_annotations", False)
stack.append((obj1, obj2))
while stack:
left, right = stack.popleft()
if left is right:
continue
elif left is None or right is None:
# we know they are different so no match
return False
elif (left, right) in cache:
continue
cache.add((left, right))
visit_name = left.__visit_name__
if visit_name != right.__visit_name__:
return False
meth = getattr(self, "compare_%s" % visit_name, None)
if meth:
attributes_compared = meth(left, right, **kw)
if attributes_compared is COMPARE_FAILED:
return False
elif attributes_compared is SKIP_TRAVERSE:
continue
# attributes_compared is returned as a list of attribute
# names that were "handled" by the comparison method above.
# remaining attribute names in the _traverse_internals
# will be compared.
else:
attributes_compared = ()
for (
(left_attrname, left_visit_sym),
(right_attrname, right_visit_sym),
) in zip_longest(
left._traverse_internals,
right._traverse_internals,
fillvalue=(None, None),
):
if not compare_annotations and (
(left_attrname == "_annotations")
or (right_attrname == "_annotations")
):
continue
if (
left_attrname != right_attrname
or left_visit_sym is not right_visit_sym
):
return False
elif left_attrname in attributes_compared:
continue
assert left_visit_sym is not None
assert left_attrname is not None
assert right_attrname is not None
dispatch = self.dispatch(left_visit_sym)
assert dispatch is not None, (
f"{self.__class__} has no dispatch for "
f"'{self._dispatch_lookup[left_visit_sym]}'"
)
left_child = operator.attrgetter(left_attrname)(left)
right_child = operator.attrgetter(right_attrname)(right)
if left_child is None:
if right_child is not None:
return False
else:
continue
elif right_child is None:
return False
comparison = dispatch(
left_attrname, left, left_child, right, right_child, **kw
)
if comparison is COMPARE_FAILED:
return False
return True
def compare_inner(self, obj1, obj2, **kw):
comparator = self.__class__()
return comparator.compare(obj1, obj2, **kw)
def visit_has_cache_key(
self, attrname, left_parent, left, right_parent, right, **kw
):
if left._gen_cache_key(self.anon_map[0], []) != right._gen_cache_key(
self.anon_map[1], []
):
return COMPARE_FAILED
def visit_propagate_attrs(
self, attrname, left_parent, left, right_parent, right, **kw
):
return self.compare_inner(
left.get("plugin_subject", None), right.get("plugin_subject", None)
)
def visit_has_cache_key_list(
self, attrname, left_parent, left, right_parent, right, **kw
):
for l, r in zip_longest(left, right, fillvalue=None):
if l is None:
if r is not None:
return COMPARE_FAILED
else:
continue
elif r is None:
return COMPARE_FAILED
if l._gen_cache_key(self.anon_map[0], []) != r._gen_cache_key(
self.anon_map[1], []
):
return COMPARE_FAILED
def visit_executable_options(
self, attrname, left_parent, left, right_parent, right, **kw
):
for l, r in zip_longest(left, right, fillvalue=None):
if l is None:
if r is not None:
return COMPARE_FAILED
else:
continue
elif r is None:
return COMPARE_FAILED
if (
l._gen_cache_key(self.anon_map[0], [])
if l._is_has_cache_key
else l
) != (
r._gen_cache_key(self.anon_map[1], [])
if r._is_has_cache_key
else r
):
return COMPARE_FAILED
def visit_clauseelement(
self, attrname, left_parent, left, right_parent, right, **kw
):
self.stack.append((left, right))
def visit_fromclause_canonical_column_collection(
self, attrname, left_parent, left, right_parent, right, **kw
):
for lcol, rcol in zip_longest(left, right, fillvalue=None):
self.stack.append((lcol, rcol))
def visit_fromclause_derived_column_collection(
self, attrname, left_parent, left, right_parent, right, **kw
):
pass
def visit_string_clauseelement_dict(
self, attrname, left_parent, left, right_parent, right, **kw
):
for lstr, rstr in zip_longest(
sorted(left), sorted(right), fillvalue=None
):
if lstr != rstr:
return COMPARE_FAILED
self.stack.append((left[lstr], right[rstr]))
def visit_clauseelement_tuples(
self, attrname, left_parent, left, right_parent, right, **kw
):
for ltup, rtup in zip_longest(left, right, fillvalue=None):
if ltup is None or rtup is None:
return COMPARE_FAILED
for l, r in zip_longest(ltup, rtup, fillvalue=None):
self.stack.append((l, r))
def visit_multi_list(
self, attrname, left_parent, left, right_parent, right, **kw
):
for l, r in zip_longest(left, right, fillvalue=None):
if isinstance(l, str):
if not isinstance(r, str) or l != r:
return COMPARE_FAILED
elif isinstance(r, str):
if not isinstance(l, str) or l != r:
return COMPARE_FAILED
else:
self.stack.append((l, r))
def visit_clauseelement_list(
self, attrname, left_parent, left, right_parent, right, **kw
):
for l, r in zip_longest(left, right, fillvalue=None):
self.stack.append((l, r))
def visit_clauseelement_tuple(
self, attrname, left_parent, left, right_parent, right, **kw
):
for l, r in zip_longest(left, right, fillvalue=None):
self.stack.append((l, r))
def _compare_unordered_sequences(self, seq1, seq2, **kw):
if seq1 is None:
return seq2 is None
completed: Set[object] = set()
for clause in seq1:
for other_clause in set(seq2).difference(completed):
if self.compare_inner(clause, other_clause, **kw):
completed.add(other_clause)
break
return len(completed) == len(seq1) == len(seq2)
def visit_clauseelement_unordered_set(
self, attrname, left_parent, left, right_parent, right, **kw
):
return self._compare_unordered_sequences(left, right, **kw)
def visit_fromclause_ordered_set(
self, attrname, left_parent, left, right_parent, right, **kw
):
for l, r in zip_longest(left, right, fillvalue=None):
self.stack.append((l, r))
def visit_string(
self, attrname, left_parent, left, right_parent, right, **kw
):
return left == right
def visit_string_list(
self, attrname, left_parent, left, right_parent, right, **kw
):
return left == right
def visit_string_multi_dict(
self, attrname, left_parent, left, right_parent, right, **kw
):
for lk, rk in zip_longest(
sorted(left.keys()), sorted(right.keys()), fillvalue=(None, None)
):
if lk != rk:
return COMPARE_FAILED
lv, rv = left[lk], right[rk]
lhc = isinstance(left, HasCacheKey)
rhc = isinstance(right, HasCacheKey)
if lhc and rhc:
if lv._gen_cache_key(
self.anon_map[0], []
) != rv._gen_cache_key(self.anon_map[1], []):
return COMPARE_FAILED
elif lhc != rhc:
return COMPARE_FAILED
elif lv != rv:
return COMPARE_FAILED
def visit_multi(
self, attrname, left_parent, left, right_parent, right, **kw
):
lhc = isinstance(left, HasCacheKey)
rhc = isinstance(right, HasCacheKey)
if lhc and rhc:
if left._gen_cache_key(
self.anon_map[0], []
) != right._gen_cache_key(self.anon_map[1], []):
return COMPARE_FAILED
elif lhc != rhc:
return COMPARE_FAILED
else:
return left == right
def visit_anon_name(
self, attrname, left_parent, left, right_parent, right, **kw
):
return _resolve_name_for_compare(
left_parent, left, self.anon_map[0], **kw
) == _resolve_name_for_compare(
right_parent, right, self.anon_map[1], **kw
)
def visit_boolean(
self, attrname, left_parent, left, right_parent, right, **kw
):
return left == right
def visit_operator(
self, attrname, left_parent, left, right_parent, right, **kw
):
return left == right
def visit_type(
self, attrname, left_parent, left, right_parent, right, **kw
):
return left._compare_type_affinity(right)
def visit_plain_dict(
self, attrname, left_parent, left, right_parent, right, **kw
):
return left == right
def visit_dialect_options(
self, attrname, left_parent, left, right_parent, right, **kw
):
return left == right
def visit_annotations_key(
self, attrname, left_parent, left, right_parent, right, **kw
):
if left and right:
return (
left_parent._annotations_cache_key
== right_parent._annotations_cache_key
)
else:
return left == right
def visit_compile_state_funcs(
self, attrname, left_parent, left, right_parent, right, **kw
):
return tuple((fn.__code__, c_key) for fn, c_key in left) == tuple(
(fn.__code__, c_key) for fn, c_key in right
)
def visit_plain_obj(
self, attrname, left_parent, left, right_parent, right, **kw
):
return left == right
def visit_named_ddl_element(
self, attrname, left_parent, left, right_parent, right, **kw
):
if left is None:
if right is not None:
return COMPARE_FAILED
return left.name == right.name
def visit_prefix_sequence(
self, attrname, left_parent, left, right_parent, right, **kw
):
for (l_clause, l_str), (r_clause, r_str) in zip_longest(
left, right, fillvalue=(None, None)
):
if l_str != r_str:
return COMPARE_FAILED
else:
self.stack.append((l_clause, r_clause))
def visit_setup_join_tuple(
self, attrname, left_parent, left, right_parent, right, **kw
):
# TODO: look at attrname for "legacy_join" and use different structure
for (
(l_target, l_onclause, l_from, l_flags),
(r_target, r_onclause, r_from, r_flags),
) in zip_longest(left, right, fillvalue=(None, None, None, None)):
if l_flags != r_flags:
return COMPARE_FAILED
self.stack.append((l_target, r_target))
self.stack.append((l_onclause, r_onclause))
self.stack.append((l_from, r_from))
def visit_memoized_select_entities(
self, attrname, left_parent, left, right_parent, right, **kw
):
return self.visit_clauseelement_tuple(
attrname, left_parent, left, right_parent, right, **kw
)
def visit_table_hint_list(
self, attrname, left_parent, left, right_parent, right, **kw
):
left_keys = sorted(left, key=lambda elem: (elem[0].fullname, elem[1]))
right_keys = sorted(
right, key=lambda elem: (elem[0].fullname, elem[1])
)
for (ltable, ldialect), (rtable, rdialect) in zip_longest(
left_keys, right_keys, fillvalue=(None, None)
):
if ldialect != rdialect:
return COMPARE_FAILED
elif left[(ltable, ldialect)] != right[(rtable, rdialect)]:
return COMPARE_FAILED
else:
self.stack.append((ltable, rtable))
def visit_statement_hint_list(
self, attrname, left_parent, left, right_parent, right, **kw
):
return left == right
def visit_unknown_structure(
self, attrname, left_parent, left, right_parent, right, **kw
):
raise NotImplementedError()
def visit_dml_ordered_values(
self, attrname, left_parent, left, right_parent, right, **kw
):
# sequence of tuple pairs
for (lk, lv), (rk, rv) in zip_longest(
left, right, fillvalue=(None, None)
):
if not self._compare_dml_values_or_ce(lk, rk, **kw):
return COMPARE_FAILED
def _compare_dml_values_or_ce(self, lv, rv, **kw):
lvce = hasattr(lv, "__clause_element__")
rvce = hasattr(rv, "__clause_element__")
if lvce != rvce:
return False
elif lvce and not self.compare_inner(lv, rv, **kw):
return False
elif not lvce and lv != rv:
return False
elif not self.compare_inner(lv, rv, **kw):
return False
return True
def visit_dml_values(
self, attrname, left_parent, left, right_parent, right, **kw
):
if left is None or right is None or len(left) != len(right):
return COMPARE_FAILED
if isinstance(left, collections_abc.Sequence):
for lv, rv in zip(left, right):
if not self._compare_dml_values_or_ce(lv, rv, **kw):
return COMPARE_FAILED
elif isinstance(right, collections_abc.Sequence):
return COMPARE_FAILED
else:
# dictionaries guaranteed to support insert ordering in
# py37 so that we can compare the keys in order. without
# this, we can't compare SQL expression keys because we don't
# know which key is which
for (lk, lv), (rk, rv) in zip(left.items(), right.items()):
if not self._compare_dml_values_or_ce(lk, rk, **kw):
return COMPARE_FAILED
if not self._compare_dml_values_or_ce(lv, rv, **kw):
return COMPARE_FAILED
def visit_dml_multi_values(
self, attrname, left_parent, left, right_parent, right, **kw
):
for lseq, rseq in zip_longest(left, right, fillvalue=None):
if lseq is None or rseq is None:
return COMPARE_FAILED
for ld, rd in zip_longest(lseq, rseq, fillvalue=None):
if (
self.visit_dml_values(
attrname, left_parent, ld, right_parent, rd, **kw
)
is COMPARE_FAILED
):
return COMPARE_FAILED
def visit_params(
self, attrname, left_parent, left, right_parent, right, **kw
):
return left == right
def compare_expression_clauselist(self, left, right, **kw):
if left.operator is right.operator:
if operators.is_associative(left.operator):
if self._compare_unordered_sequences(
left.clauses, right.clauses, **kw
):
return ["operator", "clauses"]
else:
return COMPARE_FAILED
else:
return ["operator"]
else:
return COMPARE_FAILED
def compare_clauselist(self, left, right, **kw):
return self.compare_expression_clauselist(left, right, **kw)
def compare_binary(self, left, right, **kw):
if left.operator == right.operator:
if operators.is_commutative(left.operator):
if (
self.compare_inner(left.left, right.left, **kw)
and self.compare_inner(left.right, right.right, **kw)
) or (
self.compare_inner(left.left, right.right, **kw)
and self.compare_inner(left.right, right.left, **kw)
):
return ["operator", "negate", "left", "right"]
else:
return COMPARE_FAILED
else:
return ["operator", "negate"]
else:
return COMPARE_FAILED
def compare_bindparam(self, left, right, **kw):
compare_keys = kw.pop("compare_keys", True)
compare_values = kw.pop("compare_values", True)
if compare_values:
omit = []
else:
# this means, "skip these, we already compared"
omit = ["callable", "value"]
if not compare_keys:
omit.append("key")
return omit
|
TraversalComparatorStrategy
|
python
|
bokeh__bokeh
|
tests/unit/bokeh/colors/test_util__colors.py
|
{
"start": 1014,
"end": 1432
}
|
class ____(bcu.ColorGroup):
_colors = ("Red", "Green", "Blue")
#-----------------------------------------------------------------------------
# General API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Dev API
#-----------------------------------------------------------------------------
|
_TestGroup
|
python
|
kubernetes-client__python
|
kubernetes/client/models/v1_service_list.py
|
{
"start": 383,
"end": 6798
}
|
class ____(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'api_version': 'str',
'items': 'list[V1Service]',
'kind': 'str',
'metadata': 'V1ListMeta'
}
attribute_map = {
'api_version': 'apiVersion',
'items': 'items',
'kind': 'kind',
'metadata': 'metadata'
}
def __init__(self, api_version=None, items=None, kind=None, metadata=None, local_vars_configuration=None): # noqa: E501
"""V1ServiceList - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._api_version = None
self._items = None
self._kind = None
self._metadata = None
self.discriminator = None
if api_version is not None:
self.api_version = api_version
self.items = items
if kind is not None:
self.kind = kind
if metadata is not None:
self.metadata = metadata
@property
def api_version(self):
"""Gets the api_version of this V1ServiceList. # noqa: E501
APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources # noqa: E501
:return: The api_version of this V1ServiceList. # noqa: E501
:rtype: str
"""
return self._api_version
@api_version.setter
def api_version(self, api_version):
"""Sets the api_version of this V1ServiceList.
APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources # noqa: E501
:param api_version: The api_version of this V1ServiceList. # noqa: E501
:type: str
"""
self._api_version = api_version
@property
def items(self):
"""Gets the items of this V1ServiceList. # noqa: E501
List of services # noqa: E501
:return: The items of this V1ServiceList. # noqa: E501
:rtype: list[V1Service]
"""
return self._items
@items.setter
def items(self, items):
"""Sets the items of this V1ServiceList.
List of services # noqa: E501
:param items: The items of this V1ServiceList. # noqa: E501
:type: list[V1Service]
"""
if self.local_vars_configuration.client_side_validation and items is None: # noqa: E501
raise ValueError("Invalid value for `items`, must not be `None`") # noqa: E501
self._items = items
@property
def kind(self):
"""Gets the kind of this V1ServiceList. # noqa: E501
Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds # noqa: E501
:return: The kind of this V1ServiceList. # noqa: E501
:rtype: str
"""
return self._kind
@kind.setter
def kind(self, kind):
"""Sets the kind of this V1ServiceList.
Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds # noqa: E501
:param kind: The kind of this V1ServiceList. # noqa: E501
:type: str
"""
self._kind = kind
@property
def metadata(self):
"""Gets the metadata of this V1ServiceList. # noqa: E501
:return: The metadata of this V1ServiceList. # noqa: E501
:rtype: V1ListMeta
"""
return self._metadata
@metadata.setter
def metadata(self, metadata):
"""Sets the metadata of this V1ServiceList.
:param metadata: The metadata of this V1ServiceList. # noqa: E501
:type: V1ListMeta
"""
self._metadata = metadata
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, V1ServiceList):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, V1ServiceList):
return True
return self.to_dict() != other.to_dict()
|
V1ServiceList
|
python
|
apache__airflow
|
airflow-core/src/airflow/api_fastapi/core_api/openapi/exceptions.py
|
{
"start": 855,
"end": 1637
}
|
class ____(BaseModel):
"""HTTPException Model used for error response."""
detail: str | dict
def create_openapi_http_exception_doc(responses_status_code: list[int]) -> dict:
"""
Will create additional response example for errors raised by the endpoint.
There is no easy way to introspect the code and automatically see what HTTPException are actually
raised by the endpoint implementation. This piece of documentation needs to be kept
in sync with the endpoint code manually.
Validation error i.e 422 are natively added to the openapi documentation by FastAPI.
"""
responses_status_code = sorted(responses_status_code)
return {status_code: {"model": HTTPExceptionResponse} for status_code in responses_status_code}
|
HTTPExceptionResponse
|
python
|
readthedocs__readthedocs.org
|
readthedocs/embed/v3/tests/test_basics.py
|
{
"start": 163,
"end": 2479
}
|
class ____:
@pytest.fixture(autouse=True)
def setup_method(self, settings):
settings.PUBLIC_DOMAIN = "readthedocs.io"
settings.RTD_EMBED_API_EXTERNAL_DOMAINS = [r"^docs\.project\.com$"]
self.api_url = reverse("embed_api_v3")
yield
cache.clear()
def test_not_url_query_argument(self, client):
params = {}
response = client.get(self.api_url, params)
assert response.status_code == 400
assert response.json() == {"error": 'Invalid arguments. Please provide "url".'}
def test_not_allowed_domain(self, client):
params = {
"url": "https://docs.notalloweddomain.com#title",
}
response = client.get(self.api_url, params)
assert response.status_code == 400
assert response.json() == {
"error": "External domain not allowed. domain=docs.notalloweddomain.com"
}
def test_malformed_url(self, client):
params = {
"url": "https:///page.html#title",
}
response = client.get(self.api_url, params)
assert response.status_code == 400
assert response.json() == {
"error": f'The URL requested is malformed. url={params["url"]}'
}
def test_rate_limit_domain(self, client):
params = {
"url": "https://docs.project.com#title",
}
cache_key = "embed-api-docs.project.com"
cache.set(cache_key, settings.RTD_EMBED_API_DOMAIN_RATE_LIMIT)
response = client.get(self.api_url, params)
assert response.status_code == 429
assert response.json() == {
"error": "Too many requests for this domain. domain=docs.project.com"
}
def test_infinite_redirect(self, client, requests_mock):
requests_mock.get(
"https://docs.project.com",
status_code=302,
headers={
"Location": "https://docs.project.com",
},
)
params = {
"url": "https://docs.project.com#title",
}
response = client.get(self.api_url, params)
assert response.status_code == 400
assert response.json() == {
"error": f'The URL requested generates too many redirects. url={params["url"]}'
}
|
TestEmbedAPIv3Basics
|
python
|
huggingface__transformers
|
src/transformers/models/sam2_video/modular_sam2_video.py
|
{
"start": 64012,
"end": 65878
}
|
class ____(nn.Module):
def __init__(self, config: Sam2VideoConfig):
super().__init__()
hidden_size = config.memory_encoder_hidden_size
output_channels = config.memory_encoder_output_channels
self.mask_downsampler = Sam2VideoMaskDownSampler(config)
self.feature_projection = nn.Conv2d(hidden_size, hidden_size, kernel_size=1)
self.memory_fuser = Sam2VideoMemoryFuser(config)
self.position_encoding = Sam2VideoPositionEmbeddingSine(num_pos_feats=output_channels // 2, normalize=True)
self.projection = nn.Conv2d(hidden_size, output_channels, kernel_size=1)
def forward(
self,
vision_features: torch.Tensor,
masks: torch.Tensor,
) -> tuple[torch.Tensor, torch.Tensor]:
## Process masks
masks = self.mask_downsampler(masks)
## Fuse pixel_features and downsampled masks
vision_features = self.feature_projection(vision_features)
vision_features = vision_features + masks
vision_features = self.memory_fuser(vision_features)
vision_features = self.projection(vision_features)
vision_pos_enc = self.position_encoding(vision_features.shape, vision_features.device, vision_features.dtype)
return vision_features, vision_pos_enc
# a large negative value as a placeholder score for missing objects
NO_OBJ_SCORE = -1024.0
def get_1d_sine_pe(pos_inds, dim, temperature=10000):
"""
Get 1D sine positional embedding as in the original Transformer paper.
"""
pe_dim = dim // 2
dim_t = torch.arange(pe_dim, dtype=torch.float32, device=pos_inds.device)
dim_t = temperature ** (2 * (dim_t // 2) / pe_dim)
pos_embed = pos_inds.unsqueeze(-1) / dim_t
pos_embed = torch.cat([pos_embed.sin(), pos_embed.cos()], dim=-1)
return pos_embed
@auto_docstring
|
Sam2VideoMemoryEncoder
|
python
|
run-llama__llama_index
|
llama-index-integrations/vector_stores/llama-index-vector-stores-google/llama_index/vector_stores/google/genai_extension.py
|
{
"start": 3604,
"end": 4732
}
|
class ____:
"""
Global configuration for Google Generative AI API.
Normally, the defaults should work fine. Use this to pass Google Auth credentials
such as using a service account. Refer to for auth credentials documentation:
https://developers.google.com/identity/protocols/oauth2/service-account#creatinganaccount.
Attributes:
api_endpoint: The Google Generative API endpoint address.
user_agent: The user agent to use for logging.
page_size: For paging RPCs, how many entities to return per RPC.
testing: Are the unit tests running?
auth_credentials: For setting credentials such as using service accounts.
"""
api_endpoint: str = _DEFAULT_API_ENDPOINT
user_agent: str = _USER_AGENT
page_size: int = _DEFAULT_PAGE_SIZE
testing: bool = False
auth_credentials: Optional[credentials.Credentials] = None
def set_config(config: Config) -> None:
"""Set global defaults for operations with Google Generative AI API."""
global _config
_config = config
def get_config() -> Config:
return _config
_config = Config()
|
Config
|
python
|
etianen__django-reversion
|
tests/test_app/tests/test_models.py
|
{
"start": 16522,
"end": 17321
}
|
class ____(TestBase):
def setUp(self):
reversion.register(TestModelInlineByNaturalKey, use_natural_foreign_keys=True)
reversion.register(TestModelWithNaturalKey)
def testNaturalKeyInline(self):
with reversion.create_revision():
inline = TestModelWithNaturalKey.objects.create()
obj = TestModelInlineByNaturalKey.objects.create(test_model=inline)
self.assertEqual(json.loads(Version.objects.get_for_object(obj).get().serialized_data), [{
'fields': {'test_model': ['v1']},
'model': 'test_app.testmodelinlinebynaturalkey',
'pk': 1
}])
self.assertEqual(Version.objects.get_for_object(obj).get().field_dict, {
'test_model_id': 1,
'id': 1,
})
|
NaturalKeyTest
|
python
|
ray-project__ray
|
python/ray/_private/runtime_env/uv.py
|
{
"start": 1512,
"end": 7742
}
|
class ____:
def __init__(
self,
target_dir: str,
runtime_env: "RuntimeEnv", # noqa: F821
logger: Optional[logging.Logger] = default_logger,
):
try:
import virtualenv # noqa: F401 ensure virtualenv exists.
except ImportError:
raise RuntimeError(
f"Please install virtualenv "
f"`{sys.executable} -m pip install virtualenv`"
f"to enable uv runtime env."
)
logger.debug("Setting up uv for runtime_env: %s", runtime_env)
self._target_dir = target_dir
# An empty directory is created to execute cmd.
self._exec_cwd = os.path.join(self._target_dir, "exec_cwd")
self._runtime_env = runtime_env
self._logger = logger
self._uv_config = self._runtime_env.uv_config()
self._uv_env = os.environ.copy()
self._uv_env.update(self._runtime_env.env_vars())
async def _install_uv(
self, path: str, cwd: str, pip_env: dict, logger: logging.Logger
):
"""Before package install, make sure the required version `uv` (if specifieds)
is installed.
"""
virtualenv_path = virtualenv_utils.get_virtualenv_path(path)
python = virtualenv_utils.get_virtualenv_python(path)
def _get_uv_exec_to_install() -> str:
"""Get `uv` executable with version to install."""
uv_version = self._uv_config.get("uv_version", None)
if uv_version:
return f"uv{uv_version}"
# Use default version.
return "uv"
uv_install_cmd = [
python,
"-m",
"pip",
"install",
"--disable-pip-version-check",
"--no-cache-dir",
_get_uv_exec_to_install(),
]
logger.info("Installing package uv to %s", virtualenv_path)
await check_output_cmd(uv_install_cmd, logger=logger, cwd=cwd, env=pip_env)
async def _check_uv_existence(
self, path: str, cwd: str, env: dict, logger: logging.Logger
) -> bool:
"""Check and return the existence of `uv` in virtual env."""
python = virtualenv_utils.get_virtualenv_python(path)
check_existence_cmd = [
python,
"-m",
"uv",
"version",
]
try:
# If `uv` doesn't exist, exception will be thrown.
await check_output_cmd(check_existence_cmd, logger=logger, cwd=cwd, env=env)
return True
except Exception:
return False
async def _uv_check(sef, python: str, cwd: str, logger: logging.Logger) -> None:
"""Check virtual env dependency compatibility.
If any incompatibility detected, exception will be thrown.
param:
python: the path for python executable within virtual environment.
"""
cmd = [python, "-m", "uv", "pip", "check"]
await check_output_cmd(
cmd,
logger=logger,
cwd=cwd,
)
async def _install_uv_packages(
self,
path: str,
uv_packages: List[str],
cwd: str,
pip_env: Dict,
logger: logging.Logger,
):
"""Install required python packages via `uv`."""
virtualenv_path = virtualenv_utils.get_virtualenv_path(path)
python = virtualenv_utils.get_virtualenv_python(path)
# TODO(fyrestone): Support -i, --no-deps, --no-cache-dir, ...
requirements_file = dependency_utils.get_requirements_file(path, uv_packages)
# Check existence for `uv` and see if we could skip `uv` installation.
uv_exists = await self._check_uv_existence(path, cwd, pip_env, logger)
# Install uv, which acts as the default package manager.
if (not uv_exists) or (self._uv_config.get("uv_version", None) is not None):
await self._install_uv(path, cwd, pip_env, logger)
# Avoid blocking the event loop.
loop = get_running_loop()
await loop.run_in_executor(
None, dependency_utils.gen_requirements_txt, requirements_file, uv_packages
)
# Install all dependencies.
#
# Difference with pip:
# 1. `--disable-pip-version-check` has no effect for uv.
uv_install_cmd = [
python,
"-m",
"uv",
"pip",
"install",
"-r",
requirements_file,
]
uv_opt_list = self._uv_config.get("uv_pip_install_options", ["--no-cache"])
if uv_opt_list:
uv_install_cmd += uv_opt_list
logger.info("Installing python requirements to %s", virtualenv_path)
await check_output_cmd(uv_install_cmd, logger=logger, cwd=cwd, env=pip_env)
# Check python environment for conflicts.
if self._uv_config.get("uv_check", False):
await self._uv_check(python, cwd, logger)
async def _run(self):
path = self._target_dir
logger = self._logger
uv_packages = self._uv_config["packages"]
# We create an empty directory for exec cmd so that the cmd will
# run more stable. e.g. if cwd has ray, then checking ray will
# look up ray in cwd instead of site packages.
os.makedirs(self._exec_cwd, exist_ok=True)
try:
await virtualenv_utils.create_or_get_virtualenv(
path, self._exec_cwd, logger
)
python = virtualenv_utils.get_virtualenv_python(path)
async with dependency_utils.check_ray(python, self._exec_cwd, logger):
# Install packages with uv.
await self._install_uv_packages(
path,
uv_packages,
self._exec_cwd,
self._uv_env,
logger,
)
except Exception:
logger.info("Delete incomplete virtualenv: %s", path)
shutil.rmtree(path, ignore_errors=True)
logger.exception("Failed to install uv packages.")
raise
def __await__(self):
return self._run().__await__()
|
UvProcessor
|
python
|
PrefectHQ__prefect
|
src/prefect/server/database/orm_models.py
|
{
"start": 4720,
"end": 5425
}
|
class ____(Base):
"""SQLAlchemy mixin of a flow."""
name: Mapped[str]
tags: Mapped[list[str]] = mapped_column(JSON, server_default="[]", default=list)
labels: Mapped[Optional[schemas.core.KeyValueLabels]] = mapped_column(JSON)
flow_runs: Mapped[list["FlowRun"]] = relationship(
back_populates="flow", lazy="raise"
)
deployments: Mapped[list["Deployment"]] = relationship(
back_populates="flow", lazy="raise"
)
__table_args__: Any = (
sa.UniqueConstraint("name"),
sa.Index("ix_flow__created", "created"),
sa.Index("trgm_ix_flow_name", "name", postgresql_using="gin").ddl_if(
dialect="postgresql"
),
)
|
Flow
|
python
|
pytorch__pytorch
|
test/test_serialization.py
|
{
"start": 4256,
"end": 4738
}
|
class ____(torch.Tensor):
@staticmethod
def __new__(cls, elem, **kwargs):
assert elem.dtype is torch.uint8
assert not kwargs.get("requires_grad", False)
kwargs["requires_grad"] = False
return torch.Tensor._make_wrapper_subclass(cls, up_size(elem.shape), dtype=torch.int4, **kwargs)
def __init__(self, elem):
self.elem = elem
@classmethod
def __torch_dispatch__(cls, func, types, args, kwargs=None):
pass
|
Int4Tensor
|
python
|
apache__airflow
|
providers/google/src/airflow/providers/google/cloud/operators/dataplex.py
|
{
"start": 36280,
"end": 39491
}
|
class ____(GoogleCloudBaseOperator):
"""
Deletes a DataScan resource.
:param project_id: Required. The ID of the Google Cloud project that the lake belongs to.
:param region: Required. The ID of the Google Cloud region that the lake belongs to.
:param data_scan_id: Required. Data Quality scan identifier.
:param api_version: The version of the api that will be requested for example 'v1'.
:param retry: A retry object used to retry requests. If `None` is specified, requests
will not be retried.
:param timeout: The amount of time, in seconds, to wait for the request to complete.
Note that if `retry` is specified, the timeout applies to each individual attempt.
:param metadata: Additional metadata that is provided to the method.
:param gcp_conn_id: The connection ID to use when fetching connection info.
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
:return: None
"""
template_fields = ("project_id", "data_scan_id", "impersonation_chain")
def __init__(
self,
project_id: str,
region: str,
data_scan_id: str,
api_version: str = "v1",
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
gcp_conn_id: str = "google_cloud_default",
impersonation_chain: str | Sequence[str] | None = None,
*args,
**kwargs,
) -> None:
super().__init__(*args, **kwargs)
self.project_id = project_id
self.region = region
self.data_scan_id = data_scan_id
self.api_version = api_version
self.retry = retry
self.timeout = timeout
self.metadata = metadata
self.gcp_conn_id = gcp_conn_id
self.impersonation_chain = impersonation_chain
def execute(self, context: Context) -> None:
hook = DataplexHook(
gcp_conn_id=self.gcp_conn_id,
api_version=self.api_version,
impersonation_chain=self.impersonation_chain,
)
self.log.info("Deleting Dataplex Data Quality Scan: %s", self.data_scan_id)
operation = hook.delete_data_scan(
project_id=self.project_id,
region=self.region,
data_scan_id=self.data_scan_id,
retry=self.retry,
timeout=self.timeout,
metadata=self.metadata,
)
hook.wait_for_operation(timeout=self.timeout, operation=operation)
self.log.info("Dataplex Data Quality scan %s deleted successfully!", self.data_scan_id)
|
DataplexDeleteDataQualityScanOperator
|
python
|
sqlalchemy__sqlalchemy
|
lib/sqlalchemy/sql/annotation.py
|
{
"start": 1441,
"end": 2989
}
|
class ____(ExternallyTraversible):
__slots__ = ()
_annotations: util.immutabledict[str, Any] = EMPTY_ANNOTATIONS
proxy_set: util.generic_fn_descriptor[FrozenSet[Any]]
_is_immutable: bool
def _annotate(self, values: _AnnotationDict) -> Self:
raise NotImplementedError()
@overload
def _deannotate(
self,
values: Literal[None] = ...,
clone: bool = ...,
) -> Self: ...
@overload
def _deannotate(
self,
values: Sequence[str] = ...,
clone: bool = ...,
) -> SupportsAnnotations: ...
def _deannotate(
self,
values: Optional[Sequence[str]] = None,
clone: bool = False,
) -> SupportsAnnotations:
raise NotImplementedError()
@util.memoized_property
def _annotations_cache_key(self) -> Tuple[Any, ...]:
anon_map_ = anon_map()
return self._gen_annotations_cache_key(anon_map_)
def _gen_annotations_cache_key(
self, anon_map: anon_map
) -> Tuple[Any, ...]:
return (
"_annotations",
tuple(
(
key,
(
value._gen_cache_key(anon_map, [])
if isinstance(value, HasCacheKey)
else value
),
)
for key, value in sorted(
self._annotations.items(), key=_get_item0
)
),
)
_get_item0 = itemgetter(0)
|
SupportsAnnotations
|
python
|
langchain-ai__langchain
|
libs/langchain_v1/tests/unit_tests/agents/test_response_format.py
|
{
"start": 2930,
"end": 5869
}
|
class ____:
def test_pydantic_model(self) -> None:
"""Test response_format as Pydantic model."""
tool_calls = [
[{"args": {}, "id": "1", "name": "get_weather"}],
[
{
"name": "WeatherBaseModel",
"id": "2",
"args": WEATHER_DATA,
}
],
]
model = FakeToolCallingModel(tool_calls=tool_calls)
agent = create_agent(model, [get_weather], response_format=WeatherBaseModel)
response = agent.invoke({"messages": [HumanMessage("What's the weather?")]})
assert response["structured_response"] == EXPECTED_WEATHER_PYDANTIC
assert len(response["messages"]) == 5
def test_dataclass(self) -> None:
"""Test response_format as dataclass."""
tool_calls = [
[{"args": {}, "id": "1", "name": "get_weather"}],
[
{
"name": "WeatherDataclass",
"id": "2",
"args": WEATHER_DATA,
}
],
]
model = FakeToolCallingModel(tool_calls=tool_calls)
agent = create_agent(model, [get_weather], response_format=WeatherDataclass)
response = agent.invoke({"messages": [HumanMessage("What's the weather?")]})
assert response["structured_response"] == EXPECTED_WEATHER_DATACLASS
assert len(response["messages"]) == 5
def test_typed_dict(self) -> None:
"""Test response_format as TypedDict."""
tool_calls = [
[{"args": {}, "id": "1", "name": "get_weather"}],
[
{
"name": "WeatherTypedDict",
"id": "2",
"args": WEATHER_DATA,
}
],
]
model = FakeToolCallingModel(tool_calls=tool_calls)
agent = create_agent(model, [get_weather], response_format=WeatherTypedDict)
response = agent.invoke({"messages": [HumanMessage("What's the weather?")]})
assert response["structured_response"] == EXPECTED_WEATHER_DICT
assert len(response["messages"]) == 5
def test_json_schema(self) -> None:
"""Test response_format as JSON schema."""
tool_calls = [
[{"args": {}, "id": "1", "name": "get_weather"}],
[
{
"name": "weather_schema",
"id": "2",
"args": WEATHER_DATA,
}
],
]
model = FakeToolCallingModel(tool_calls=tool_calls)
agent = create_agent(model, [get_weather], response_format=weather_json_schema)
response = agent.invoke({"messages": [HumanMessage("What's the weather?")]})
assert response["structured_response"] == EXPECTED_WEATHER_DICT
assert len(response["messages"]) == 5
|
TestResponseFormatAsModel
|
python
|
TheAlgorithms__Python
|
machine_learning/automatic_differentiation.py
|
{
"start": 3962,
"end": 4715
}
|
class ____:
"""
Class represents operation between single or two Variable objects.
Operation objects contains type of operation, pointers to input Variable
objects and pointer to resulting Variable from the operation.
"""
def __init__(
self,
op_type: OpType,
other_params: dict | None = None,
) -> None:
self.op_type = op_type
self.other_params = {} if other_params is None else other_params
def add_params(self, params: list[Variable]) -> None:
self.params = params
def add_output(self, output: Variable) -> None:
self.output = output
def __eq__(self, value) -> bool:
return self.op_type == value if isinstance(value, OpType) else False
|
Operation
|
python
|
etianen__django-reversion
|
tests/test_app/tests/test_models.py
|
{
"start": 5526,
"end": 5993
}
|
class ____(TestModelMixin, TestBase):
databases = {"default", "postgres"}
def testGetForObjectReferenceModelDb(self):
with reversion.create_revision(using="postgres"):
obj = TestModel.objects.create()
self.assertEqual(Version.objects.get_for_object_reference(TestModel, obj.pk).count(), 0)
self.assertEqual(Version.objects.using("postgres").get_for_object_reference(TestModel, obj.pk).count(), 1)
|
GetForObjectReferenceDbTest
|
python
|
django-import-export__django-import-export
|
tests/core/tests/test_command_utils.py
|
{
"start": 1038,
"end": 2514
}
|
class ____(TestCase):
def test_load_by_format_name(self):
format_class = get_format_class("CSV", None)
self.assertIsInstance(format_class, base_formats.CSV)
def test_load_by_full_format_path(self):
format_class = get_format_class("import_export.formats.base_formats.CSV", None)
self.assertIsInstance(format_class, base_formats.CSV)
def test_invalid_format_name(self):
invalid_format = "EXCEL"
with self.assertRaises(CommandError) as context:
get_format_class(invalid_format, None)
self.assertIn(
"Cannot import 'EXCEL' or 'import_export.formats.base_formats.EXCEL'",
str(context.exception),
)
def test_load_by_file_name_with_known_mime_type(self):
format_class = get_format_class(None, "test.csv")
self.assertIsInstance(format_class, base_formats.CSV)
def test_load_by_file_name_with_unknown_mime_type(self):
with self.assertRaises(CommandError) as context:
get_format_class(None, "test.unknown")
self.assertIn(
"Cannot determine MIME type for 'test.unknown'", str(context.exception)
)
def test_load_by_file_name_with_no_mime_mapping(self):
with self.assertRaises(CommandError) as context:
get_format_class(None, "test.pdf")
self.assertIn(
"Cannot find format for MIME type 'application/pdf'", str(context.exception)
)
|
GetFormatClassTest
|
python
|
getsentry__sentry
|
tests/sentry/event_manager/test_severity.py
|
{
"start": 1079,
"end": 11270
}
|
class ____(TestCase):
@patch(
"sentry.event_manager.severity_connection_pool.urlopen",
return_value=HTTPResponse(body=orjson.dumps({"severity": 0.1231})),
)
def test_error_event_simple(self, mock_urlopen: MagicMock) -> None:
manager = EventManager(
make_event(
exception={
"values": [
{
"type": "NopeError",
"value": "Nopey McNopeface",
"mechanism": {"type": "generic", "handled": True},
}
]
},
platform="python",
)
)
event = manager.save(self.project.id)
severity, reason = _get_severity_score(event)
payload = {
"message": "NopeError: Nopey McNopeface",
"has_stacktrace": 0,
"handled": True,
}
mock_urlopen.assert_called_with(
"POST",
"/v0/issues/severity-score",
body=orjson.dumps(payload),
headers={"content-type": "application/json;charset=utf-8"},
timeout=options.get("issues.severity.seer-timeout", settings.SEER_SEVERITY_TIMEOUT),
)
assert severity == 0.1231
assert reason == "ml"
assert cache.get(SEER_ERROR_COUNT_KEY) == 0
with (
override_options({"seer.api.use-shared-secret": 1.0}),
override_settings(SEER_API_SHARED_SECRET="some-secret"),
):
_get_severity_score(event)
mock_urlopen.assert_called_with(
"POST",
"/v0/issues/severity-score",
body=orjson.dumps(payload),
headers={
"content-type": "application/json;charset=utf-8",
"Authorization": "Rpcsignature rpc0:b14214093c3e7c633e68ac90b01087e710fe2f96c0544b232b9ec9bc6ca971f4",
},
timeout=options.get("issues.severity.seer-timeout", settings.SEER_SEVERITY_TIMEOUT),
)
@patch(
"sentry.event_manager.severity_connection_pool.urlopen",
return_value=HTTPResponse(body=orjson.dumps({"severity": 0.1231})),
)
def test_message_event_simple(
self,
mock_urlopen: MagicMock,
) -> None:
cases: list[dict[str, Any]] = [
{"message": "Dogs are great!"},
{"logentry": {"formatted": "Dogs are great!"}},
{"logentry": {"message": "Dogs are great!"}},
]
for case in cases:
manager = EventManager(make_event(**case))
event = manager.save(self.project.id)
severity, reason = _get_severity_score(event)
payload = {
"message": "Dogs are great!",
"has_stacktrace": 0,
"handled": None,
}
mock_urlopen.assert_called_with(
"POST",
"/v0/issues/severity-score",
body=orjson.dumps(payload),
headers={"content-type": "application/json;charset=utf-8"},
timeout=options.get("issues.severity.seer-timeout", settings.SEER_SEVERITY_TIMEOUT),
)
assert severity == 0.1231
assert reason == "ml"
assert cache.get(SEER_ERROR_COUNT_KEY) == 0
@patch(
"sentry.event_manager.severity_connection_pool.urlopen",
return_value=HTTPResponse(body=orjson.dumps({"severity": 0.1231})),
)
def test_uses_exception(
self,
mock_urlopen: MagicMock,
) -> None:
manager = EventManager(
make_event(
exception={"values": [{"type": "NopeError", "value": "Nopey McNopeface"}]},
platform="python",
)
)
event = manager.save(self.project.id)
event.data["metadata"]["title"] = "Dogs are great!"
_get_severity_score(event)
assert (
orjson.loads(mock_urlopen.call_args.kwargs["body"])["message"]
== "NopeError: Nopey McNopeface"
)
@patch(
"sentry.event_manager.severity_connection_pool.urlopen",
return_value=HTTPResponse(body=orjson.dumps({"severity": 0.1231})),
)
def test_short_circuit_level(
self,
mock_urlopen: MagicMock,
) -> None:
cases: list[tuple[str, float, str]] = [
("fatal", 1.0, "log_level_fatal"),
("info", 0.0, "log_level_info"),
("debug", 0.0, "log_level_info"),
("error", 0.1231, "ml"),
]
for level, expected_severity, expected_reason in cases:
manager = EventManager(
make_event(
exception={"values": [{"type": "NopeError", "value": "Nopey McNopeface"}]},
level=level,
platform="python",
)
)
event = manager.save(self.project.id)
severity, reason = _get_severity_score(event)
assert severity == expected_severity
assert reason == expected_reason
@patch(
"sentry.event_manager.severity_connection_pool.urlopen",
return_value=HTTPResponse(body=orjson.dumps({"severity": 0.1231})),
)
@patch("sentry.event_manager.logger.warning")
def test_unusable_event_title(
self,
mock_logger_warning: MagicMock,
mock_urlopen: MagicMock,
) -> None:
for title in PLACEHOLDER_EVENT_TITLES:
manager = EventManager(make_event(exception={"values": []}, platform="python"))
event = manager.save(self.project.id)
# `title` is a property with no setter, but it pulls from `data`, so it's equivalent
# to set it there
event.data["title"] = title
severity, reason = _get_severity_score(event)
mock_urlopen.assert_not_called()
mock_logger_warning.assert_called_with(
"Unable to get severity score because of unusable `message` value '%s'",
title,
extra={
"event_id": event.event_id,
"op": "event_manager._get_severity_score",
"event_type": "default",
"title": title,
},
)
assert severity == 0.0
assert reason == "bad_title"
@pytest.mark.skip(reason="flaky: #103306")
@patch(
"sentry.event_manager.severity_connection_pool.urlopen",
side_effect=MaxRetryError(
severity_connection_pool, "/issues/severity-score", Exception("It broke")
),
)
@patch("sentry.event_manager.metrics.incr")
def test_max_retry_exception(
self,
mock_metrics_incr: MagicMock,
_mock_urlopen: MagicMock,
) -> None:
manager = EventManager(
make_event(
exception={
"values": [
{
"type": "NopeError",
"value": "Nopey McNopeface",
"mechanism": {"type": "generic", "handled": True},
}
]
},
platform="python",
)
)
event = manager.save(self.project.id)
severity, reason = _get_severity_score(event)
mock_metrics_incr.assert_called_with(
"issues.severity.error", tags={"reason": "max_retries"}
)
assert severity == 1.0
assert reason == "microservice_max_retry"
assert cache.get(SEER_ERROR_COUNT_KEY) == 1
@patch(
"sentry.event_manager.severity_connection_pool.urlopen",
side_effect=ConnectTimeoutError(),
)
@patch("sentry.event_manager.metrics.incr")
def test_timeout_error(
self,
mock_metrics_incr: MagicMock,
_mock_urlopen: MagicMock,
) -> None:
manager = EventManager(
make_event(
exception={
"values": [
{
"type": "NopeError",
"value": "Nopey McNopeface",
"mechanism": {"type": "generic", "handled": True},
}
]
},
platform="python",
)
)
event = manager.save(self.project.id)
severity, reason = _get_severity_score(event)
mock_metrics_incr.assert_called_with("issues.severity.error", tags={"reason": "timeout"})
assert severity == 1.0
assert reason == "microservice_timeout"
assert cache.get(SEER_ERROR_COUNT_KEY) == 1
@patch(
"sentry.event_manager.severity_connection_pool.urlopen",
side_effect=Exception("It broke"),
)
@patch("sentry.event_manager.sentry_sdk.capture_exception")
@patch("sentry.event_manager.metrics.incr")
def test_other_exception(
self,
mock_metrics_incr: MagicMock,
mock_capture_exception: MagicMock,
_mock_urlopen: MagicMock,
) -> None:
manager = EventManager(
make_event(
exception={
"values": [
{
"type": "NopeError",
"value": "Nopey McNopeface",
"mechanism": {"type": "generic", "handled": True},
}
],
},
platform="python",
)
)
event = manager.save(self.project.id)
severity, reason = _get_severity_score(event)
mock_capture_exception.assert_called_once_with()
mock_metrics_incr.assert_called_with("issues.severity.error", tags={"reason": "unknown"})
assert severity == 1.0
assert reason == "microservice_error"
assert cache.get(SEER_ERROR_COUNT_KEY) == 1
@with_feature("projects:first-event-severity-calculation")
@with_feature("organizations:seer-based-priority")
|
TestGetEventSeverity
|
python
|
huggingface__transformers
|
src/transformers/models/cvt/modeling_cvt.py
|
{
"start": 16827,
"end": 17866
}
|
class ____(nn.Module):
def __init__(self, config):
super().__init__()
self.config = config
self.stages = nn.ModuleList([])
for stage_idx in range(len(config.depth)):
self.stages.append(CvtStage(config, stage_idx))
def forward(self, pixel_values, output_hidden_states=False, return_dict=True):
all_hidden_states = () if output_hidden_states else None
hidden_state = pixel_values
cls_token = None
for _, (stage_module) in enumerate(self.stages):
hidden_state, cls_token = stage_module(hidden_state)
if output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_state,)
if not return_dict:
return tuple(v for v in [hidden_state, cls_token, all_hidden_states] if v is not None)
return BaseModelOutputWithCLSToken(
last_hidden_state=hidden_state,
cls_token_value=cls_token,
hidden_states=all_hidden_states,
)
@auto_docstring
|
CvtEncoder
|
python
|
numba__numba
|
numba/tests/test_parfors_passes.py
|
{
"start": 4650,
"end": 7154
}
|
class ____(BaseTest):
sub_pass_class = numba.parfors.parfor.ConvertSetItemPass
def test_setitem_full_slice(self):
def test_impl():
n = 10
a = np.ones(n)
a[:] = 7
return a
sub_pass = self.run_parfor_sub_pass(test_impl, ())
self.assertEqual(len(sub_pass.rewritten), 1)
[record] = sub_pass.rewritten
self.assertEqual(record["reason"], "slice")
self.check_records(sub_pass.rewritten)
self.run_parallel(test_impl)
def test_setitem_slice_stop_bound(self):
def test_impl():
n = 10
a = np.ones(n)
a[:5] = 7
return a
sub_pass = self.run_parfor_sub_pass(test_impl, ())
self.assertEqual(len(sub_pass.rewritten), 1)
[record] = sub_pass.rewritten
self.assertEqual(record["reason"], "slice")
self.check_records(sub_pass.rewritten)
self.run_parallel(test_impl)
def test_setitem_slice_start_bound(self):
def test_impl():
n = 10
a = np.ones(n)
a[4:] = 7
return a
sub_pass = self.run_parfor_sub_pass(test_impl, ())
self.assertEqual(len(sub_pass.rewritten), 1)
[record] = sub_pass.rewritten
self.assertEqual(record["reason"], "slice")
self.check_records(sub_pass.rewritten)
self.run_parallel(test_impl)
def test_setitem_gather_if_scalar(self):
def test_impl():
n = 10
a = np.ones(n)
b = np.ones_like(a, dtype=np.bool_)
a[b] = 7
return a
sub_pass = self.run_parfor_sub_pass(test_impl, ())
self.assertEqual(len(sub_pass.rewritten), 1)
[record] = sub_pass.rewritten
self.assertEqual(record["reason"], "masked_assign_broadcast_scalar")
self.check_records(sub_pass.rewritten)
self.run_parallel(test_impl)
def test_setitem_gather_if_array(self):
def test_impl():
n = 10
a = np.ones(n)
b = np.ones_like(a, dtype=np.bool_)
c = np.ones_like(a)
a[b] = c[b]
return a
sub_pass = self.run_parfor_sub_pass(test_impl, ())
self.assertEqual(len(sub_pass.rewritten), 1)
[record] = sub_pass.rewritten
self.assertEqual(record["reason"], "masked_assign_array")
self.check_records(sub_pass.rewritten)
self.run_parallel(test_impl)
|
TestConvertSetItemPass
|
python
|
numpy__numpy
|
numpy/_core/tests/test_datetime.py
|
{
"start": 593,
"end": 123524
}
|
class ____:
def test_string(self):
msg = "no explicit representation of timezones available for " \
"np.datetime64"
with pytest.warns(UserWarning, match=msg):
np.datetime64('2000-01-01T00+01')
def test_datetime(self):
msg = "no explicit representation of timezones available for " \
"np.datetime64"
with pytest.warns(UserWarning, match=msg):
t0 = np.datetime64('2023-06-09T12:18:40Z', 'ns')
t0 = np.datetime64('2023-06-09T12:18:40', 'ns')
def test_datetime_dtype_creation(self):
for unit in ['Y', 'M', 'W', 'D',
'h', 'm', 's', 'ms', 'us',
'μs', # alias for us
'ns', 'ps', 'fs', 'as']:
dt1 = np.dtype(f'M8[750{unit}]')
assert_(dt1 == np.dtype(f'datetime64[750{unit}]'))
dt2 = np.dtype(f'm8[{unit}]')
assert_(dt2 == np.dtype(f'timedelta64[{unit}]'))
# Generic units shouldn't add [] to the end
assert_equal(str(np.dtype("M8")), "datetime64")
# Should be possible to specify the endianness
assert_equal(np.dtype("=M8"), np.dtype("M8"))
assert_equal(np.dtype("=M8[s]"), np.dtype("M8[s]"))
assert_(np.dtype(">M8") == np.dtype("M8") or
np.dtype("<M8") == np.dtype("M8"))
assert_(np.dtype(">M8[D]") == np.dtype("M8[D]") or
np.dtype("<M8[D]") == np.dtype("M8[D]"))
assert_(np.dtype(">M8") != np.dtype("<M8"))
assert_equal(np.dtype("=m8"), np.dtype("m8"))
assert_equal(np.dtype("=m8[s]"), np.dtype("m8[s]"))
assert_(np.dtype(">m8") == np.dtype("m8") or
np.dtype("<m8") == np.dtype("m8"))
assert_(np.dtype(">m8[D]") == np.dtype("m8[D]") or
np.dtype("<m8[D]") == np.dtype("m8[D]"))
assert_(np.dtype(">m8") != np.dtype("<m8"))
# Check that the parser rejects bad datetime types
assert_raises(TypeError, np.dtype, 'M8[badunit]')
assert_raises(TypeError, np.dtype, 'm8[badunit]')
assert_raises(TypeError, np.dtype, 'M8[YY]')
assert_raises(TypeError, np.dtype, 'm8[YY]')
assert_raises(TypeError, np.dtype, 'm4')
assert_raises(TypeError, np.dtype, 'M7')
assert_raises(TypeError, np.dtype, 'm7')
assert_raises(TypeError, np.dtype, 'M16')
assert_raises(TypeError, np.dtype, 'm16')
assert_raises(TypeError, np.dtype, 'M8[3000000000ps]')
def test_datetime_casting_rules(self):
# Cannot cast safely/same_kind between timedelta and datetime
assert_(not np.can_cast('m8', 'M8', casting='same_kind'))
assert_(not np.can_cast('M8', 'm8', casting='same_kind'))
assert_(not np.can_cast('m8', 'M8', casting='safe'))
assert_(not np.can_cast('M8', 'm8', casting='safe'))
# Can cast safely/same_kind from integer to timedelta
assert_(np.can_cast('i8', 'm8', casting='same_kind'))
assert_(np.can_cast('i8', 'm8', casting='safe'))
assert_(np.can_cast('i4', 'm8', casting='same_kind'))
assert_(np.can_cast('i4', 'm8', casting='safe'))
assert_(np.can_cast('u4', 'm8', casting='same_kind'))
assert_(np.can_cast('u4', 'm8', casting='safe'))
# Cannot cast safely from unsigned integer of the same size, which
# could overflow
assert_(np.can_cast('u8', 'm8', casting='same_kind'))
assert_(not np.can_cast('u8', 'm8', casting='safe'))
# Cannot cast safely/same_kind from float to timedelta
assert_(not np.can_cast('f4', 'm8', casting='same_kind'))
assert_(not np.can_cast('f4', 'm8', casting='safe'))
# Cannot cast safely/same_kind from integer to datetime
assert_(not np.can_cast('i8', 'M8', casting='same_kind'))
assert_(not np.can_cast('i8', 'M8', casting='safe'))
# Cannot cast safely/same_kind from bool to datetime
assert_(not np.can_cast('b1', 'M8', casting='same_kind'))
assert_(not np.can_cast('b1', 'M8', casting='safe'))
# Can cast safely/same_kind from bool to timedelta
assert_(np.can_cast('b1', 'm8', casting='same_kind'))
assert_(np.can_cast('b1', 'm8', casting='safe'))
# Can cast datetime safely from months/years to days
assert_(np.can_cast('M8[M]', 'M8[D]', casting='safe'))
assert_(np.can_cast('M8[Y]', 'M8[D]', casting='safe'))
# Cannot cast timedelta safely from months/years to days
assert_(not np.can_cast('m8[M]', 'm8[D]', casting='safe'))
assert_(not np.can_cast('m8[Y]', 'm8[D]', casting='safe'))
# Can cast datetime same_kind from months/years to days
assert_(np.can_cast('M8[M]', 'M8[D]', casting='same_kind'))
assert_(np.can_cast('M8[Y]', 'M8[D]', casting='same_kind'))
# Can't cast timedelta same_kind from months/years to days
assert_(not np.can_cast('m8[M]', 'm8[D]', casting='same_kind'))
assert_(not np.can_cast('m8[Y]', 'm8[D]', casting='same_kind'))
# Can cast datetime same_kind across the date/time boundary
assert_(np.can_cast('M8[D]', 'M8[h]', casting='same_kind'))
# Can cast timedelta same_kind across the date/time boundary
assert_(np.can_cast('m8[D]', 'm8[h]', casting='same_kind'))
assert_(np.can_cast('m8[h]', 'm8[D]', casting='same_kind'))
# Cannot cast safely if the integer multiplier doesn't divide
assert_(not np.can_cast('M8[7h]', 'M8[3h]', casting='safe'))
assert_(not np.can_cast('M8[3h]', 'M8[6h]', casting='safe'))
# But can cast same_kind
assert_(np.can_cast('M8[7h]', 'M8[3h]', casting='same_kind'))
# Can cast safely if the integer multiplier does divide
assert_(np.can_cast('M8[6h]', 'M8[3h]', casting='safe'))
# We can always cast types with generic units (corresponding to NaT) to
# more specific types
assert_(np.can_cast('m8', 'm8[h]', casting='same_kind'))
assert_(np.can_cast('m8', 'm8[h]', casting='safe'))
assert_(np.can_cast('M8', 'M8[h]', casting='same_kind'))
assert_(np.can_cast('M8', 'M8[h]', casting='safe'))
# but not the other way around
assert_(not np.can_cast('m8[h]', 'm8', casting='same_kind'))
assert_(not np.can_cast('m8[h]', 'm8', casting='safe'))
assert_(not np.can_cast('M8[h]', 'M8', casting='same_kind'))
assert_(not np.can_cast('M8[h]', 'M8', casting='safe'))
def test_datetime_prefix_conversions(self):
# regression tests related to gh-19631;
# test metric prefixes from seconds down to
# attoseconds for bidirectional conversions
smaller_units = ['M8[7000ms]',
'M8[2000us]',
'M8[1000ns]',
'M8[5000ns]',
'M8[2000ps]',
'M8[9000fs]',
'M8[1000as]',
'M8[2000000ps]',
'M8[1000000as]',
'M8[2000000000ps]',
'M8[1000000000as]']
larger_units = ['M8[7s]',
'M8[2ms]',
'M8[us]',
'M8[5us]',
'M8[2ns]',
'M8[9ps]',
'M8[1fs]',
'M8[2us]',
'M8[1ps]',
'M8[2ms]',
'M8[1ns]']
for larger_unit, smaller_unit in zip(larger_units, smaller_units):
assert np.can_cast(larger_unit, smaller_unit, casting='safe')
assert np.can_cast(smaller_unit, larger_unit, casting='safe')
@pytest.mark.parametrize("unit", [
"s", "ms", "us", "ns", "ps", "fs", "as"])
def test_prohibit_negative_datetime(self, unit):
with assert_raises(TypeError):
np.array([1], dtype=f"M8[-1{unit}]")
def test_compare_generic_nat(self):
# regression tests for gh-6452
assert_(np.datetime64('NaT') !=
np.datetime64('2000') + np.timedelta64('NaT'))
assert_(np.datetime64('NaT') != np.datetime64('NaT', 'us'))
assert_(np.datetime64('NaT', 'us') != np.datetime64('NaT'))
@pytest.mark.parametrize("size", [
3, 21, 217, 1000])
def test_datetime_nat_argsort_stability(self, size):
# NaT < NaT should be False internally for
# sort stability
expected = np.arange(size)
arr = np.tile(np.datetime64('NaT'), size)
assert_equal(np.argsort(arr, kind='mergesort'), expected)
@pytest.mark.parametrize("size", [
3, 21, 217, 1000])
def test_timedelta_nat_argsort_stability(self, size):
# NaT < NaT should be False internally for
# sort stability
expected = np.arange(size)
arr = np.tile(np.timedelta64('NaT'), size)
assert_equal(np.argsort(arr, kind='mergesort'), expected)
@pytest.mark.parametrize("arr, expected", [
# the example provided in gh-12629
(['NaT', 1, 2, 3],
[1, 2, 3, 'NaT']),
# multiple NaTs
(['NaT', 9, 'NaT', -707],
[-707, 9, 'NaT', 'NaT']),
# this sort explores another code path for NaT
([1, -2, 3, 'NaT'],
[-2, 1, 3, 'NaT']),
# 2-D array
([[51, -220, 'NaT'],
[-17, 'NaT', -90]],
[[-220, 51, 'NaT'],
[-90, -17, 'NaT']]),
])
@pytest.mark.parametrize("dtype", [
'M8[ns]', 'M8[us]',
'm8[ns]', 'm8[us]'])
def test_datetime_timedelta_sort_nat(self, arr, expected, dtype):
# fix for gh-12629 and gh-15063; NaT sorting to end of array
arr = np.array(arr, dtype=dtype)
expected = np.array(expected, dtype=dtype)
arr.sort()
assert_equal(arr, expected)
def test_datetime_scalar_construction(self):
# Construct with different units
assert_equal(np.datetime64('1950-03-12', 'D'),
np.datetime64('1950-03-12'))
assert_equal(np.datetime64('1950-03-12T13', 's'),
np.datetime64('1950-03-12T13', 'm'))
# Default construction means NaT
assert_equal(np.datetime64(), np.datetime64('NaT'))
# Some basic strings and repr
assert_equal(str(np.datetime64('NaT')), 'NaT')
assert_equal(repr(np.datetime64('NaT')),
"np.datetime64('NaT','generic')")
assert_equal(str(np.datetime64('2011-02')), '2011-02')
assert_equal(repr(np.datetime64('2011-02')),
"np.datetime64('2011-02')")
assert_equal(repr(np.datetime64('NaT').astype(np.dtype("datetime64[ns]"))),
"np.datetime64('NaT','ns')")
# None gets constructed as NaT
assert_equal(np.datetime64(None), np.datetime64('NaT'))
# Default construction of NaT is in generic units
assert_equal(np.datetime64().dtype, np.dtype('M8'))
assert_equal(np.datetime64('NaT').dtype, np.dtype('M8'))
# Construction from integers requires a specified unit
assert_raises(ValueError, np.datetime64, 17)
# When constructing from a scalar or zero-dimensional array,
# it either keeps the units or you can override them.
a = np.datetime64('2000-03-18T16', 'h')
b = np.array('2000-03-18T16', dtype='M8[h]')
assert_equal(a.dtype, np.dtype('M8[h]'))
assert_equal(b.dtype, np.dtype('M8[h]'))
assert_equal(np.datetime64(a), a)
assert_equal(np.datetime64(a).dtype, np.dtype('M8[h]'))
assert_equal(np.datetime64(b), a)
assert_equal(np.datetime64(b).dtype, np.dtype('M8[h]'))
assert_equal(np.datetime64(a, 's'), a)
assert_equal(np.datetime64(a, 's').dtype, np.dtype('M8[s]'))
assert_equal(np.datetime64(b, 's'), a)
assert_equal(np.datetime64(b, 's').dtype, np.dtype('M8[s]'))
# Construction from datetime.date
assert_equal(np.datetime64('1945-03-25'),
np.datetime64(datetime.date(1945, 3, 25)))
assert_equal(np.datetime64('2045-03-25', 'D'),
np.datetime64(datetime.date(2045, 3, 25), 'D'))
# Construction from datetime.datetime
assert_equal(np.datetime64('1980-01-25T14:36:22.5'),
np.datetime64(datetime.datetime(1980, 1, 25,
14, 36, 22, 500000)))
# Construction with time units from a date is okay
assert_equal(np.datetime64('1920-03-13', 'h'),
np.datetime64('1920-03-13T00'))
assert_equal(np.datetime64('1920-03', 'm'),
np.datetime64('1920-03-01T00:00'))
assert_equal(np.datetime64('1920', 's'),
np.datetime64('1920-01-01T00:00:00'))
assert_equal(np.datetime64(datetime.date(2045, 3, 25), 'ms'),
np.datetime64('2045-03-25T00:00:00.000'))
# Construction with date units from a datetime is also okay
assert_equal(np.datetime64('1920-03-13T18', 'D'),
np.datetime64('1920-03-13'))
assert_equal(np.datetime64('1920-03-13T18:33:12', 'M'),
np.datetime64('1920-03'))
assert_equal(np.datetime64('1920-03-13T18:33:12.5', 'Y'),
np.datetime64('1920'))
def test_datetime_scalar_construction_timezone(self):
msg = "no explicit representation of timezones available for " \
"np.datetime64"
# verify that supplying an explicit timezone works, but is deprecated
with pytest.warns(UserWarning, match=msg):
assert_equal(np.datetime64('2000-01-01T00Z'),
np.datetime64('2000-01-01T00'))
with pytest.warns(UserWarning, match=msg):
assert_equal(np.datetime64('2000-01-01T00-08'),
np.datetime64('2000-01-01T08'))
def test_datetime_array_find_type(self):
dt = np.datetime64('1970-01-01', 'M')
arr = np.array([dt])
assert_equal(arr.dtype, np.dtype('M8[M]'))
# at the moment, we don't automatically convert these to datetime64
dt = datetime.date(1970, 1, 1)
arr = np.array([dt])
assert_equal(arr.dtype, np.dtype('O'))
dt = datetime.datetime(1970, 1, 1, 12, 30, 40)
arr = np.array([dt])
assert_equal(arr.dtype, np.dtype('O'))
# find "supertype" for non-dates and dates
b = np.bool(True)
dm = np.datetime64('1970-01-01', 'M')
d = datetime.date(1970, 1, 1)
dt = datetime.datetime(1970, 1, 1, 12, 30, 40)
arr = np.array([b, dm])
assert_equal(arr.dtype, np.dtype('O'))
arr = np.array([b, d])
assert_equal(arr.dtype, np.dtype('O'))
arr = np.array([b, dt])
assert_equal(arr.dtype, np.dtype('O'))
arr = np.array([d, d]).astype('datetime64')
assert_equal(arr.dtype, np.dtype('M8[D]'))
arr = np.array([dt, dt]).astype('datetime64')
assert_equal(arr.dtype, np.dtype('M8[us]'))
@pytest.mark.parametrize("unit", [
# test all date / time units and use
# "generic" to select generic unit
("Y"), ("M"), ("W"), ("D"), ("h"), ("m"),
("s"), ("ms"), ("us"), ("ns"), ("ps"),
("fs"), ("as"), ("generic")])
def test_timedelta_np_int_construction(self, unit):
# regression test for gh-7617
if unit != "generic":
assert_equal(np.timedelta64(np.int64(123), unit),
np.timedelta64(123, unit))
else:
assert_equal(np.timedelta64(np.int64(123)),
np.timedelta64(123))
def test_timedelta_scalar_construction(self):
# Construct with different units
assert_equal(np.timedelta64(7, 'D'),
np.timedelta64(1, 'W'))
assert_equal(np.timedelta64(120, 's'),
np.timedelta64(2, 'm'))
# Default construction means 0
assert_equal(np.timedelta64(), np.timedelta64(0))
# None gets constructed as NaT
assert_equal(np.timedelta64(None), np.timedelta64('NaT'))
# Some basic strings and repr
assert_equal(str(np.timedelta64('NaT')), 'NaT')
assert_equal(repr(np.timedelta64('NaT')),
"np.timedelta64('NaT')")
assert_equal(str(np.timedelta64(3, 's')), '3 seconds')
assert_equal(repr(np.timedelta64(-3, 's')),
"np.timedelta64(-3,'s')")
assert_equal(repr(np.timedelta64(12)),
"np.timedelta64(12)")
# Construction from an integer produces generic units
assert_equal(np.timedelta64(12).dtype, np.dtype('m8'))
# When constructing from a scalar or zero-dimensional array,
# it either keeps the units or you can override them.
a = np.timedelta64(2, 'h')
b = np.array(2, dtype='m8[h]')
assert_equal(a.dtype, np.dtype('m8[h]'))
assert_equal(b.dtype, np.dtype('m8[h]'))
assert_equal(np.timedelta64(a), a)
assert_equal(np.timedelta64(a).dtype, np.dtype('m8[h]'))
assert_equal(np.timedelta64(b), a)
assert_equal(np.timedelta64(b).dtype, np.dtype('m8[h]'))
assert_equal(np.timedelta64(a, 's'), a)
assert_equal(np.timedelta64(a, 's').dtype, np.dtype('m8[s]'))
assert_equal(np.timedelta64(b, 's'), a)
assert_equal(np.timedelta64(b, 's').dtype, np.dtype('m8[s]'))
# Construction from datetime.timedelta
assert_equal(np.timedelta64(5, 'D'),
np.timedelta64(datetime.timedelta(days=5)))
assert_equal(np.timedelta64(102347621, 's'),
np.timedelta64(datetime.timedelta(seconds=102347621)))
assert_equal(np.timedelta64(-10234760000, 'us'),
np.timedelta64(datetime.timedelta(
microseconds=-10234760000)))
assert_equal(np.timedelta64(10234760000, 'us'),
np.timedelta64(datetime.timedelta(
microseconds=10234760000)))
assert_equal(np.timedelta64(1023476, 'ms'),
np.timedelta64(datetime.timedelta(milliseconds=1023476)))
assert_equal(np.timedelta64(10, 'm'),
np.timedelta64(datetime.timedelta(minutes=10)))
assert_equal(np.timedelta64(281, 'h'),
np.timedelta64(datetime.timedelta(hours=281)))
assert_equal(np.timedelta64(28, 'W'),
np.timedelta64(datetime.timedelta(weeks=28)))
# Cannot construct across nonlinear time unit boundaries
a = np.timedelta64(3, 's')
assert_raises(TypeError, np.timedelta64, a, 'M')
assert_raises(TypeError, np.timedelta64, a, 'Y')
a = np.timedelta64(6, 'M')
assert_raises(TypeError, np.timedelta64, a, 'D')
assert_raises(TypeError, np.timedelta64, a, 'h')
a = np.timedelta64(1, 'Y')
assert_raises(TypeError, np.timedelta64, a, 'D')
assert_raises(TypeError, np.timedelta64, a, 'm')
a = datetime.timedelta(seconds=3)
assert_raises(TypeError, np.timedelta64, a, 'M')
assert_raises(TypeError, np.timedelta64, a, 'Y')
a = datetime.timedelta(weeks=3)
assert_raises(TypeError, np.timedelta64, a, 'M')
assert_raises(TypeError, np.timedelta64, a, 'Y')
a = datetime.timedelta()
assert_raises(TypeError, np.timedelta64, a, 'M')
assert_raises(TypeError, np.timedelta64, a, 'Y')
def test_timedelta_object_array_conversion(self):
# Regression test for gh-11096
inputs = [datetime.timedelta(28),
datetime.timedelta(30),
datetime.timedelta(31)]
expected = np.array([28, 30, 31], dtype='timedelta64[D]')
actual = np.array(inputs, dtype='timedelta64[D]')
assert_equal(expected, actual)
def test_timedelta_0_dim_object_array_conversion(self):
# Regression test for gh-11151
test = np.array(datetime.timedelta(seconds=20))
actual = test.astype(np.timedelta64)
# expected value from the array constructor workaround
# described in above issue
expected = np.array(datetime.timedelta(seconds=20),
np.timedelta64)
assert_equal(actual, expected)
def test_timedelta_nat_format(self):
# gh-17552
assert_equal('NaT', f'{np.timedelta64("nat")}')
def test_timedelta_scalar_construction_units(self):
# String construction detecting units
assert_equal(np.datetime64('2010').dtype,
np.dtype('M8[Y]'))
assert_equal(np.datetime64('2010-03').dtype,
np.dtype('M8[M]'))
assert_equal(np.datetime64('2010-03-12').dtype,
np.dtype('M8[D]'))
assert_equal(np.datetime64('2010-03-12T17').dtype,
np.dtype('M8[h]'))
assert_equal(np.datetime64('2010-03-12T17:15').dtype,
np.dtype('M8[m]'))
assert_equal(np.datetime64('2010-03-12T17:15:08').dtype,
np.dtype('M8[s]'))
assert_equal(np.datetime64('2010-03-12T17:15:08.1').dtype,
np.dtype('M8[ms]'))
assert_equal(np.datetime64('2010-03-12T17:15:08.12').dtype,
np.dtype('M8[ms]'))
assert_equal(np.datetime64('2010-03-12T17:15:08.123').dtype,
np.dtype('M8[ms]'))
assert_equal(np.datetime64('2010-03-12T17:15:08.1234').dtype,
np.dtype('M8[us]'))
assert_equal(np.datetime64('2010-03-12T17:15:08.12345').dtype,
np.dtype('M8[us]'))
assert_equal(np.datetime64('2010-03-12T17:15:08.123456').dtype,
np.dtype('M8[us]'))
assert_equal(np.datetime64('1970-01-01T00:00:02.1234567').dtype,
np.dtype('M8[ns]'))
assert_equal(np.datetime64('1970-01-01T00:00:02.12345678').dtype,
np.dtype('M8[ns]'))
assert_equal(np.datetime64('1970-01-01T00:00:02.123456789').dtype,
np.dtype('M8[ns]'))
assert_equal(np.datetime64('1970-01-01T00:00:02.1234567890').dtype,
np.dtype('M8[ps]'))
assert_equal(np.datetime64('1970-01-01T00:00:02.12345678901').dtype,
np.dtype('M8[ps]'))
assert_equal(np.datetime64('1970-01-01T00:00:02.123456789012').dtype,
np.dtype('M8[ps]'))
assert_equal(np.datetime64(
'1970-01-01T00:00:02.1234567890123').dtype,
np.dtype('M8[fs]'))
assert_equal(np.datetime64(
'1970-01-01T00:00:02.12345678901234').dtype,
np.dtype('M8[fs]'))
assert_equal(np.datetime64(
'1970-01-01T00:00:02.123456789012345').dtype,
np.dtype('M8[fs]'))
assert_equal(np.datetime64(
'1970-01-01T00:00:02.1234567890123456').dtype,
np.dtype('M8[as]'))
assert_equal(np.datetime64(
'1970-01-01T00:00:02.12345678901234567').dtype,
np.dtype('M8[as]'))
assert_equal(np.datetime64(
'1970-01-01T00:00:02.123456789012345678').dtype,
np.dtype('M8[as]'))
# Python date object
assert_equal(np.datetime64(datetime.date(2010, 4, 16)).dtype,
np.dtype('M8[D]'))
# Python datetime object
assert_equal(np.datetime64(
datetime.datetime(2010, 4, 16, 13, 45, 18)).dtype,
np.dtype('M8[us]'))
# 'today' special value
assert_equal(np.datetime64('today').dtype,
np.dtype('M8[D]'))
# 'now' special value
assert_equal(np.datetime64('now').dtype,
np.dtype('M8[s]'))
def test_datetime_nat_casting(self):
a = np.array('NaT', dtype='M8[D]')
b = np.datetime64('NaT', '[D]')
# Arrays
assert_equal(a.astype('M8[s]'), np.array('NaT', dtype='M8[s]'))
assert_equal(a.astype('M8[ms]'), np.array('NaT', dtype='M8[ms]'))
assert_equal(a.astype('M8[M]'), np.array('NaT', dtype='M8[M]'))
assert_equal(a.astype('M8[Y]'), np.array('NaT', dtype='M8[Y]'))
assert_equal(a.astype('M8[W]'), np.array('NaT', dtype='M8[W]'))
# Scalars -> Scalars
assert_equal(np.datetime64(b, '[s]'), np.datetime64('NaT', '[s]'))
assert_equal(np.datetime64(b, '[ms]'), np.datetime64('NaT', '[ms]'))
assert_equal(np.datetime64(b, '[M]'), np.datetime64('NaT', '[M]'))
assert_equal(np.datetime64(b, '[Y]'), np.datetime64('NaT', '[Y]'))
assert_equal(np.datetime64(b, '[W]'), np.datetime64('NaT', '[W]'))
# Arrays -> Scalars
assert_equal(np.datetime64(a, '[s]'), np.datetime64('NaT', '[s]'))
assert_equal(np.datetime64(a, '[ms]'), np.datetime64('NaT', '[ms]'))
assert_equal(np.datetime64(a, '[M]'), np.datetime64('NaT', '[M]'))
assert_equal(np.datetime64(a, '[Y]'), np.datetime64('NaT', '[Y]'))
assert_equal(np.datetime64(a, '[W]'), np.datetime64('NaT', '[W]'))
# NaN -> NaT
nan = np.array([np.nan] * 8 + [0])
fnan = nan.astype('f')
lnan = nan.astype('g')
cnan = nan.astype('D')
cfnan = nan.astype('F')
clnan = nan.astype('G')
hnan = nan.astype(np.half)
nat = np.array([np.datetime64('NaT')] * 8 + [np.datetime64(0, 'D')])
assert_equal(nan.astype('M8[ns]'), nat)
assert_equal(fnan.astype('M8[ns]'), nat)
assert_equal(lnan.astype('M8[ns]'), nat)
assert_equal(cnan.astype('M8[ns]'), nat)
assert_equal(cfnan.astype('M8[ns]'), nat)
assert_equal(clnan.astype('M8[ns]'), nat)
assert_equal(hnan.astype('M8[ns]'), nat)
nat = np.array([np.timedelta64('NaT')] * 8 + [np.timedelta64(0)])
assert_equal(nan.astype('timedelta64[ns]'), nat)
assert_equal(fnan.astype('timedelta64[ns]'), nat)
assert_equal(lnan.astype('timedelta64[ns]'), nat)
assert_equal(cnan.astype('timedelta64[ns]'), nat)
assert_equal(cfnan.astype('timedelta64[ns]'), nat)
assert_equal(clnan.astype('timedelta64[ns]'), nat)
assert_equal(hnan.astype('timedelta64[ns]'), nat)
def test_days_creation(self):
assert_equal(np.array('1599', dtype='M8[D]').astype('i8'),
(1600 - 1970) * 365 - (1972 - 1600) / 4 + 3 - 365)
assert_equal(np.array('1600', dtype='M8[D]').astype('i8'),
(1600 - 1970) * 365 - (1972 - 1600) / 4 + 3)
assert_equal(np.array('1601', dtype='M8[D]').astype('i8'),
(1600 - 1970) * 365 - (1972 - 1600) / 4 + 3 + 366)
assert_equal(np.array('1900', dtype='M8[D]').astype('i8'),
(1900 - 1970) * 365 - (1970 - 1900) // 4)
assert_equal(np.array('1901', dtype='M8[D]').astype('i8'),
(1900 - 1970) * 365 - (1970 - 1900) // 4 + 365)
assert_equal(np.array('1967', dtype='M8[D]').astype('i8'), -3 * 365 - 1)
assert_equal(np.array('1968', dtype='M8[D]').astype('i8'), -2 * 365 - 1)
assert_equal(np.array('1969', dtype='M8[D]').astype('i8'), -1 * 365)
assert_equal(np.array('1970', dtype='M8[D]').astype('i8'), 0 * 365)
assert_equal(np.array('1971', dtype='M8[D]').astype('i8'), 1 * 365)
assert_equal(np.array('1972', dtype='M8[D]').astype('i8'), 2 * 365)
assert_equal(np.array('1973', dtype='M8[D]').astype('i8'), 3 * 365 + 1)
assert_equal(np.array('1974', dtype='M8[D]').astype('i8'), 4 * 365 + 1)
assert_equal(np.array('2000', dtype='M8[D]').astype('i8'),
(2000 - 1970) * 365 + (2000 - 1972) // 4)
assert_equal(np.array('2001', dtype='M8[D]').astype('i8'),
(2000 - 1970) * 365 + (2000 - 1972) // 4 + 366)
assert_equal(np.array('2400', dtype='M8[D]').astype('i8'),
(2400 - 1970) * 365 + (2400 - 1972) // 4 - 3)
assert_equal(np.array('2401', dtype='M8[D]').astype('i8'),
(2400 - 1970) * 365 + (2400 - 1972) // 4 - 3 + 366)
assert_equal(np.array('1600-02-29', dtype='M8[D]').astype('i8'),
(1600 - 1970) * 365 - (1972 - 1600) // 4 + 3 + 31 + 28)
assert_equal(np.array('1600-03-01', dtype='M8[D]').astype('i8'),
(1600 - 1970) * 365 - (1972 - 1600) // 4 + 3 + 31 + 29)
assert_equal(np.array('2000-02-29', dtype='M8[D]').astype('i8'),
(2000 - 1970) * 365 + (2000 - 1972) // 4 + 31 + 28)
assert_equal(np.array('2000-03-01', dtype='M8[D]').astype('i8'),
(2000 - 1970) * 365 + (2000 - 1972) // 4 + 31 + 29)
assert_equal(np.array('2001-03-22', dtype='M8[D]').astype('i8'),
(2000 - 1970) * 365 + (2000 - 1972) // 4 + 366 + 31 + 28 + 21)
def test_days_to_pydate(self):
assert_equal(np.array('1599', dtype='M8[D]').astype('O'),
datetime.date(1599, 1, 1))
assert_equal(np.array('1600', dtype='M8[D]').astype('O'),
datetime.date(1600, 1, 1))
assert_equal(np.array('1601', dtype='M8[D]').astype('O'),
datetime.date(1601, 1, 1))
assert_equal(np.array('1900', dtype='M8[D]').astype('O'),
datetime.date(1900, 1, 1))
assert_equal(np.array('1901', dtype='M8[D]').astype('O'),
datetime.date(1901, 1, 1))
assert_equal(np.array('2000', dtype='M8[D]').astype('O'),
datetime.date(2000, 1, 1))
assert_equal(np.array('2001', dtype='M8[D]').astype('O'),
datetime.date(2001, 1, 1))
assert_equal(np.array('1600-02-29', dtype='M8[D]').astype('O'),
datetime.date(1600, 2, 29))
assert_equal(np.array('1600-03-01', dtype='M8[D]').astype('O'),
datetime.date(1600, 3, 1))
assert_equal(np.array('2001-03-22', dtype='M8[D]').astype('O'),
datetime.date(2001, 3, 22))
def test_dtype_comparison(self):
assert_(not (np.dtype('M8[us]') == np.dtype('M8[ms]')))
assert_(np.dtype('M8[us]') != np.dtype('M8[ms]'))
assert_(np.dtype('M8[2D]') != np.dtype('M8[D]'))
assert_(np.dtype('M8[D]') != np.dtype('M8[2D]'))
def test_pydatetime_creation(self):
a = np.array(['1960-03-12', datetime.date(1960, 3, 12)], dtype='M8[D]')
assert_equal(a[0], a[1])
a = np.array(['1999-12-31', datetime.date(1999, 12, 31)], dtype='M8[D]')
assert_equal(a[0], a[1])
a = np.array(['2000-01-01', datetime.date(2000, 1, 1)], dtype='M8[D]')
assert_equal(a[0], a[1])
# Will fail if the date changes during the exact right moment
a = np.array(['today', datetime.date.today()], dtype='M8[D]')
assert_equal(a[0], a[1])
# datetime.datetime.now() returns local time, not UTC
#a = np.array(['now', datetime.datetime.now()], dtype='M8[s]')
#assert_equal(a[0], a[1])
# we can give a datetime.date time units
assert_equal(np.array(datetime.date(1960, 3, 12), dtype='M8[s]'),
np.array(np.datetime64('1960-03-12T00:00:00')))
def test_datetime_string_conversion(self):
a = ['2011-03-16', '1920-01-01', '2013-05-19']
str_a = np.array(a, dtype='S')
uni_a = np.array(a, dtype='U')
dt_a = np.array(a, dtype='M')
# String to datetime
assert_equal(dt_a, str_a.astype('M'))
assert_equal(dt_a.dtype, str_a.astype('M').dtype)
dt_b = np.empty_like(dt_a)
dt_b[...] = str_a
assert_equal(dt_a, dt_b)
# Datetime to string
assert_equal(str_a, dt_a.astype('S0'))
str_b = np.empty_like(str_a)
str_b[...] = dt_a
assert_equal(str_a, str_b)
# Unicode to datetime
assert_equal(dt_a, uni_a.astype('M'))
assert_equal(dt_a.dtype, uni_a.astype('M').dtype)
dt_b = np.empty_like(dt_a)
dt_b[...] = uni_a
assert_equal(dt_a, dt_b)
# Datetime to unicode
assert_equal(uni_a, dt_a.astype('U'))
uni_b = np.empty_like(uni_a)
uni_b[...] = dt_a
assert_equal(uni_a, uni_b)
# Datetime to long string - gh-9712
assert_equal(str_a, dt_a.astype((np.bytes_, 128)))
str_b = np.empty(str_a.shape, dtype=(np.bytes_, 128))
str_b[...] = dt_a
assert_equal(str_a, str_b)
@pytest.mark.parametrize("time_dtype", ["m8[D]", "M8[Y]"])
def test_time_byteswapping(self, time_dtype):
times = np.array(["2017", "NaT"], dtype=time_dtype)
times_swapped = times.astype(times.dtype.newbyteorder())
assert_array_equal(times, times_swapped)
unswapped = times_swapped.view(np.dtype("int64").newbyteorder())
assert_array_equal(unswapped, times.view(np.int64))
@pytest.mark.parametrize(["time1", "time2"],
[("M8[s]", "M8[D]"), ("m8[s]", "m8[ns]")])
def test_time_byteswapped_cast(self, time1, time2):
dtype1 = np.dtype(time1)
dtype2 = np.dtype(time2)
times = np.array(["2017", "NaT"], dtype=dtype1)
expected = times.astype(dtype2)
# Test that every byte-swapping combination also returns the same
# results (previous tests check that this comparison works fine).
res = times.astype(dtype1.newbyteorder()).astype(dtype2)
assert_array_equal(res, expected)
res = times.astype(dtype2.newbyteorder())
assert_array_equal(res, expected)
res = times.astype(dtype1.newbyteorder()).astype(dtype2.newbyteorder())
assert_array_equal(res, expected)
@pytest.mark.parametrize("time_dtype", ["m8[D]", "M8[Y]"])
@pytest.mark.parametrize("str_dtype", ["U", "S"])
def test_datetime_conversions_byteorders(self, str_dtype, time_dtype):
times = np.array(["2017", "NaT"], dtype=time_dtype)
# Unfortunately, timedelta does not roundtrip:
from_strings = np.array(["2017", "NaT"], dtype=str_dtype)
to_strings = times.astype(str_dtype) # assume this is correct
# Check that conversion from times to string works if src is swapped:
times_swapped = times.astype(times.dtype.newbyteorder())
res = times_swapped.astype(str_dtype)
assert_array_equal(res, to_strings)
# And also if both are swapped:
res = times_swapped.astype(to_strings.dtype.newbyteorder())
assert_array_equal(res, to_strings)
# only destination is swapped:
res = times.astype(to_strings.dtype.newbyteorder())
assert_array_equal(res, to_strings)
# Check that conversion from string to times works if src is swapped:
from_strings_swapped = from_strings.astype(
from_strings.dtype.newbyteorder())
res = from_strings_swapped.astype(time_dtype)
assert_array_equal(res, times)
# And if both are swapped:
res = from_strings_swapped.astype(times.dtype.newbyteorder())
assert_array_equal(res, times)
# Only destination is swapped:
res = from_strings.astype(times.dtype.newbyteorder())
assert_array_equal(res, times)
def test_datetime_array_str(self):
a = np.array(['2011-03-16', '1920-01-01', '2013-05-19'], dtype='M')
assert_equal(str(a), "['2011-03-16' '1920-01-01' '2013-05-19']")
a = np.array(['2011-03-16T13:55', '1920-01-01T03:12'], dtype='M')
assert_equal(np.array2string(a, separator=', ',
formatter={'datetime': lambda x:
f"'{np.datetime_as_string(x, timezone='UTC')}'"}),
"['2011-03-16T13:55Z', '1920-01-01T03:12Z']")
# Check that one NaT doesn't corrupt subsequent entries
a = np.array(['2010', 'NaT', '2030']).astype('M')
assert_equal(str(a), "['2010' 'NaT' '2030']")
def test_timedelta_array_str(self):
a = np.array([-1, 0, 100], dtype='m')
assert_equal(str(a), "[ -1 0 100]")
a = np.array(['NaT', 'NaT'], dtype='m')
assert_equal(str(a), "['NaT' 'NaT']")
# Check right-alignment with NaTs
a = np.array([-1, 'NaT', 0], dtype='m')
assert_equal(str(a), "[ -1 'NaT' 0]")
a = np.array([-1, 'NaT', 1234567], dtype='m')
assert_equal(str(a), "[ -1 'NaT' 1234567]")
# Test with other byteorder:
a = np.array([-1, 'NaT', 1234567], dtype='>m')
assert_equal(str(a), "[ -1 'NaT' 1234567]")
a = np.array([-1, 'NaT', 1234567], dtype='<m')
assert_equal(str(a), "[ -1 'NaT' 1234567]")
def test_timedelta_array_with_nats(self):
# Regression test for gh-29497.
x = np.array([np.timedelta64('nat'),
np.timedelta64('nat', 's'),
np.timedelta64('nat', 'ms'),
np.timedelta64(123, 'ms')])
for td in x[:3]:
assert np.isnat(td)
def test_timedelta_array_nat_assignment(self):
# Regression test for gh-29497.
x = np.zeros(3, dtype='m8[ms]')
x[1] = np.timedelta64('nat', 's')
assert np.isnat(x[1])
def test_pickle(self):
# Check that pickle roundtripping works
for proto in range(2, pickle.HIGHEST_PROTOCOL + 1):
dt = np.dtype('M8[7D]')
assert_equal(pickle.loads(pickle.dumps(dt, protocol=proto)), dt)
dt = np.dtype('M8[W]')
assert_equal(pickle.loads(pickle.dumps(dt, protocol=proto)), dt)
scalar = np.datetime64('2016-01-01T00:00:00.000000000')
assert_equal(pickle.loads(pickle.dumps(scalar, protocol=proto)),
scalar)
delta = scalar - np.datetime64('2015-01-01T00:00:00.000000000')
assert_equal(pickle.loads(pickle.dumps(delta, protocol=proto)),
delta)
# Check that loading pickles from 1.6 works
with pytest.warns(np.exceptions.VisibleDeprecationWarning,
match=r".*align should be passed"):
pkl = b"cnumpy\ndtype\np0\n(S'M8'\np1\nI0\nI1\ntp2\nRp3\n"\
b"(I4\nS'<'\np4\nNNNI-1\nI-1\nI0\n((dp5\n(S'D'\np6\n"\
b"I7\nI1\nI1\ntp7\ntp8\ntp9\nb."
assert_equal(pickle.loads(pkl), np.dtype('<M8[7D]'))
pkl = b"cnumpy\ndtype\np0\n(S'M8'\np1\nI0\nI1\ntp2\nRp3\n"\
b"(I4\nS'<'\np4\nNNNI-1\nI-1\nI0\n((dp5\n(S'W'\np6\n"\
b"I1\nI1\nI1\ntp7\ntp8\ntp9\nb."
assert_equal(pickle.loads(pkl), np.dtype('<M8[W]'))
pkl = b"cnumpy\ndtype\np0\n(S'M8'\np1\nI0\nI1\ntp2\nRp3\n"\
b"(I4\nS'>'\np4\nNNNI-1\nI-1\nI0\n((dp5\n(S'us'\np6\n"\
b"I1\nI1\nI1\ntp7\ntp8\ntp9\nb."
assert_equal(pickle.loads(pkl), np.dtype('>M8[us]'))
def test_gh_29555(self):
# check that dtype metadata round-trips when none
dt = np.dtype('>M8[us]')
assert dt.metadata is None
for proto in range(2, pickle.HIGHEST_PROTOCOL + 1):
res = pickle.loads(pickle.dumps(dt, protocol=proto))
assert_equal(res, dt)
assert res.metadata is None
def test_setstate(self):
"Verify that datetime dtype __setstate__ can handle bad arguments"
dt = np.dtype('>M8[us]')
assert_raises(ValueError, dt.__setstate__,
(4, '>', None, None, None, -1, -1, 0, 1))
assert_(dt.__reduce__()[2] == np.dtype('>M8[us]').__reduce__()[2])
assert_raises(TypeError, dt.__setstate__,
(4, '>', None, None, None, -1, -1, 0, ({}, 'xxx')))
assert_(dt.__reduce__()[2] == np.dtype('>M8[us]').__reduce__()[2])
def test_dtype_promotion(self):
# datetime <op> datetime computes the metadata gcd
# timedelta <op> timedelta computes the metadata gcd
for mM in ['m', 'M']:
assert_equal(
np.promote_types(np.dtype(mM + '8[2Y]'), np.dtype(mM + '8[2Y]')),
np.dtype(mM + '8[2Y]'))
assert_equal(
np.promote_types(np.dtype(mM + '8[12Y]'), np.dtype(mM + '8[15Y]')),
np.dtype(mM + '8[3Y]'))
assert_equal(
np.promote_types(np.dtype(mM + '8[62M]'), np.dtype(mM + '8[24M]')),
np.dtype(mM + '8[2M]'))
assert_equal(
np.promote_types(np.dtype(mM + '8[1W]'), np.dtype(mM + '8[2D]')),
np.dtype(mM + '8[1D]'))
assert_equal(
np.promote_types(np.dtype(mM + '8[W]'), np.dtype(mM + '8[13s]')),
np.dtype(mM + '8[s]'))
assert_equal(
np.promote_types(np.dtype(mM + '8[13W]'), np.dtype(mM + '8[49s]')),
np.dtype(mM + '8[7s]'))
# timedelta <op> timedelta raises when there is no reasonable gcd
assert_raises(TypeError, np.promote_types,
np.dtype('m8[Y]'), np.dtype('m8[D]'))
assert_raises(TypeError, np.promote_types,
np.dtype('m8[M]'), np.dtype('m8[W]'))
# timedelta and float cannot be safely cast with each other
assert_raises(TypeError, np.promote_types, "float32", "m8")
assert_raises(TypeError, np.promote_types, "m8", "float32")
assert_raises(TypeError, np.promote_types, "uint64", "m8")
assert_raises(TypeError, np.promote_types, "m8", "uint64")
# timedelta <op> timedelta may overflow with big unit ranges
assert_raises(OverflowError, np.promote_types,
np.dtype('m8[W]'), np.dtype('m8[fs]'))
assert_raises(OverflowError, np.promote_types,
np.dtype('m8[s]'), np.dtype('m8[as]'))
def test_cast_overflow(self):
# gh-4486
def cast():
numpy.datetime64("1971-01-01 00:00:00.000000000000000").astype("<M8[D]")
assert_raises(OverflowError, cast)
def cast2():
numpy.datetime64("2014").astype("<M8[fs]")
assert_raises(OverflowError, cast2)
def test_pyobject_roundtrip(self):
# All datetime types should be able to roundtrip through object
a = np.array([0, 0, 0, 0, 0, 0, 0, 0, 0,
-1020040340, -2942398, -1, 0, 1, 234523453, 1199164176],
dtype=np.int64)
# With date units
for unit in ['M8[D]', 'M8[W]', 'M8[M]', 'M8[Y]']:
b = a.copy().view(dtype=unit)
b[0] = '-0001-01-01'
b[1] = '-0001-12-31'
b[2] = '0000-01-01'
b[3] = '0001-01-01'
b[4] = '1969-12-31'
b[5] = '1970-01-01'
b[6] = '9999-12-31'
b[7] = '10000-01-01'
b[8] = 'NaT'
assert_equal(b.astype(object).astype(unit), b,
f"Error roundtripping unit {unit}")
# With time units
for unit in ['M8[as]', 'M8[16fs]', 'M8[ps]', 'M8[us]',
'M8[300as]', 'M8[20us]']:
b = a.copy().view(dtype=unit)
b[0] = '-0001-01-01T00'
b[1] = '-0001-12-31T00'
b[2] = '0000-01-01T00'
b[3] = '0001-01-01T00'
b[4] = '1969-12-31T23:59:59.999999'
b[5] = '1970-01-01T00'
b[6] = '9999-12-31T23:59:59.999999'
b[7] = '10000-01-01T00'
b[8] = 'NaT'
assert_equal(b.astype(object).astype(unit), b,
f"Error roundtripping unit {unit}")
def test_month_truncation(self):
# Make sure that months are truncating correctly
assert_equal(np.array('1945-03-01', dtype='M8[M]'),
np.array('1945-03-31', dtype='M8[M]'))
assert_equal(np.array('1969-11-01', dtype='M8[M]'),
np.array('1969-11-30T23:59:59.99999', dtype='M').astype('M8[M]'))
assert_equal(np.array('1969-12-01', dtype='M8[M]'),
np.array('1969-12-31T23:59:59.99999', dtype='M').astype('M8[M]'))
assert_equal(np.array('1970-01-01', dtype='M8[M]'),
np.array('1970-01-31T23:59:59.99999', dtype='M').astype('M8[M]'))
assert_equal(np.array('1980-02-01', dtype='M8[M]'),
np.array('1980-02-29T23:59:59.99999', dtype='M').astype('M8[M]'))
def test_different_unit_comparison(self):
# Check some years with date units
for unit1 in ['Y', 'M', 'D']:
dt1 = np.dtype(f'M8[{unit1}]')
for unit2 in ['Y', 'M', 'D']:
dt2 = np.dtype(f'M8[{unit2}]')
assert_equal(np.array('1945', dtype=dt1),
np.array('1945', dtype=dt2))
assert_equal(np.array('1970', dtype=dt1),
np.array('1970', dtype=dt2))
assert_equal(np.array('9999', dtype=dt1),
np.array('9999', dtype=dt2))
assert_equal(np.array('10000', dtype=dt1),
np.array('10000-01-01', dtype=dt2))
assert_equal(np.datetime64('1945', unit1),
np.datetime64('1945', unit2))
assert_equal(np.datetime64('1970', unit1),
np.datetime64('1970', unit2))
assert_equal(np.datetime64('9999', unit1),
np.datetime64('9999', unit2))
assert_equal(np.datetime64('10000', unit1),
np.datetime64('10000-01-01', unit2))
# Check some datetimes with time units
for unit1 in ['6h', 'h', 'm', 's', '10ms', 'ms', 'us']:
dt1 = np.dtype(f'M8[{unit1}]')
for unit2 in ['h', 'm', 's', 'ms', 'us']:
dt2 = np.dtype(f'M8[{unit2}]')
assert_equal(np.array('1945-03-12T18', dtype=dt1),
np.array('1945-03-12T18', dtype=dt2))
assert_equal(np.array('1970-03-12T18', dtype=dt1),
np.array('1970-03-12T18', dtype=dt2))
assert_equal(np.array('9999-03-12T18', dtype=dt1),
np.array('9999-03-12T18', dtype=dt2))
assert_equal(np.array('10000-01-01T00', dtype=dt1),
np.array('10000-01-01T00', dtype=dt2))
assert_equal(np.datetime64('1945-03-12T18', unit1),
np.datetime64('1945-03-12T18', unit2))
assert_equal(np.datetime64('1970-03-12T18', unit1),
np.datetime64('1970-03-12T18', unit2))
assert_equal(np.datetime64('9999-03-12T18', unit1),
np.datetime64('9999-03-12T18', unit2))
assert_equal(np.datetime64('10000-01-01T00', unit1),
np.datetime64('10000-01-01T00', unit2))
# Check some days with units that won't overflow
for unit1 in ['D', '12h', 'h', 'm', 's', '4s', 'ms', 'us']:
dt1 = np.dtype(f'M8[{unit1}]')
for unit2 in ['D', 'h', 'm', 's', 'ms', 'us']:
dt2 = np.dtype(f'M8[{unit2}]')
assert_(np.equal(np.array('1932-02-17', dtype='M').astype(dt1),
np.array('1932-02-17T00:00:00', dtype='M').astype(dt2),
casting='unsafe'))
assert_(np.equal(np.array('10000-04-27', dtype='M').astype(dt1),
np.array('10000-04-27T00:00:00', dtype='M').astype(dt2),
casting='unsafe'))
# Shouldn't be able to compare datetime and timedelta
a = np.array('2012-12-21', dtype='M8[D]')
b = np.array(3, dtype='m8[D]')
assert_raises(TypeError, np.less, a, b)
# not even if "unsafe"
assert_raises(TypeError, np.less, a, b, casting='unsafe')
def test_datetime_like(self):
a = np.array([3], dtype='m8[4D]')
b = np.array(['2012-12-21'], dtype='M8[D]')
assert_equal(np.ones_like(a).dtype, a.dtype)
assert_equal(np.zeros_like(a).dtype, a.dtype)
assert_equal(np.empty_like(a).dtype, a.dtype)
assert_equal(np.ones_like(b).dtype, b.dtype)
assert_equal(np.zeros_like(b).dtype, b.dtype)
assert_equal(np.empty_like(b).dtype, b.dtype)
def test_datetime_unary(self):
for tda, tdb, tdzero, tdone, tdmone in \
[
# One-dimensional arrays
(np.array([3], dtype='m8[D]'),
np.array([-3], dtype='m8[D]'),
np.array([0], dtype='m8[D]'),
np.array([1], dtype='m8[D]'),
np.array([-1], dtype='m8[D]')),
# NumPy scalars
(np.timedelta64(3, '[D]'),
np.timedelta64(-3, '[D]'),
np.timedelta64(0, '[D]'),
np.timedelta64(1, '[D]'),
np.timedelta64(-1, '[D]'))]:
# negative ufunc
assert_equal(-tdb, tda)
assert_equal((-tdb).dtype, tda.dtype)
assert_equal(np.negative(tdb), tda)
assert_equal(np.negative(tdb).dtype, tda.dtype)
# positive ufunc
assert_equal(np.positive(tda), tda)
assert_equal(np.positive(tda).dtype, tda.dtype)
assert_equal(np.positive(tdb), tdb)
assert_equal(np.positive(tdb).dtype, tdb.dtype)
# absolute ufunc
assert_equal(np.absolute(tdb), tda)
assert_equal(np.absolute(tdb).dtype, tda.dtype)
# sign ufunc
assert_equal(np.sign(tda), tdone)
assert_equal(np.sign(tdb), tdmone)
assert_equal(np.sign(tdzero), tdzero)
assert_equal(np.sign(tda).dtype, tda.dtype)
# The ufuncs always produce native-endian results
assert_
def test_datetime_add(self):
for dta, dtb, dtc, dtnat, tda, tdb, tdc in \
[
# One-dimensional arrays
(np.array(['2012-12-21'], dtype='M8[D]'),
np.array(['2012-12-24'], dtype='M8[D]'),
np.array(['2012-12-21T11'], dtype='M8[h]'),
np.array(['NaT'], dtype='M8[D]'),
np.array([3], dtype='m8[D]'),
np.array([11], dtype='m8[h]'),
np.array([3 * 24 + 11], dtype='m8[h]')),
# NumPy scalars
(np.datetime64('2012-12-21', '[D]'),
np.datetime64('2012-12-24', '[D]'),
np.datetime64('2012-12-21T11', '[h]'),
np.datetime64('NaT', '[D]'),
np.timedelta64(3, '[D]'),
np.timedelta64(11, '[h]'),
np.timedelta64(3 * 24 + 11, '[h]'))]:
# m8 + m8
assert_equal(tda + tdb, tdc)
assert_equal((tda + tdb).dtype, np.dtype('m8[h]'))
# m8 + bool
assert_equal(tdb + True, tdb + 1)
assert_equal((tdb + True).dtype, np.dtype('m8[h]'))
# m8 + int
assert_equal(tdb + 3 * 24, tdc)
assert_equal((tdb + 3 * 24).dtype, np.dtype('m8[h]'))
# bool + m8
assert_equal(False + tdb, tdb)
assert_equal((False + tdb).dtype, np.dtype('m8[h]'))
# int + m8
assert_equal(3 * 24 + tdb, tdc)
assert_equal((3 * 24 + tdb).dtype, np.dtype('m8[h]'))
# M8 + bool
assert_equal(dta + True, dta + 1)
assert_equal(dtnat + True, dtnat)
assert_equal((dta + True).dtype, np.dtype('M8[D]'))
# M8 + int
assert_equal(dta + 3, dtb)
assert_equal(dtnat + 3, dtnat)
assert_equal((dta + 3).dtype, np.dtype('M8[D]'))
# bool + M8
assert_equal(False + dta, dta)
assert_equal(False + dtnat, dtnat)
assert_equal((False + dta).dtype, np.dtype('M8[D]'))
# int + M8
assert_equal(3 + dta, dtb)
assert_equal(3 + dtnat, dtnat)
assert_equal((3 + dta).dtype, np.dtype('M8[D]'))
# M8 + m8
assert_equal(dta + tda, dtb)
assert_equal(dtnat + tda, dtnat)
assert_equal((dta + tda).dtype, np.dtype('M8[D]'))
# m8 + M8
assert_equal(tda + dta, dtb)
assert_equal(tda + dtnat, dtnat)
assert_equal((tda + dta).dtype, np.dtype('M8[D]'))
# In M8 + m8, the result goes to higher precision
assert_equal(np.add(dta, tdb, casting='unsafe'), dtc)
assert_equal(np.add(dta, tdb, casting='unsafe').dtype,
np.dtype('M8[h]'))
assert_equal(np.add(tdb, dta, casting='unsafe'), dtc)
assert_equal(np.add(tdb, dta, casting='unsafe').dtype,
np.dtype('M8[h]'))
# M8 + M8
assert_raises(TypeError, np.add, dta, dtb)
def test_datetime_subtract(self):
for dta, dtb, dtc, dtd, dte, dtnat, tda, tdb, tdc in \
[
# One-dimensional arrays
(np.array(['2012-12-21'], dtype='M8[D]'),
np.array(['2012-12-24'], dtype='M8[D]'),
np.array(['1940-12-24'], dtype='M8[D]'),
np.array(['1940-12-24T00'], dtype='M8[h]'),
np.array(['1940-12-23T13'], dtype='M8[h]'),
np.array(['NaT'], dtype='M8[D]'),
np.array([3], dtype='m8[D]'),
np.array([11], dtype='m8[h]'),
np.array([3 * 24 - 11], dtype='m8[h]')),
# NumPy scalars
(np.datetime64('2012-12-21', '[D]'),
np.datetime64('2012-12-24', '[D]'),
np.datetime64('1940-12-24', '[D]'),
np.datetime64('1940-12-24T00', '[h]'),
np.datetime64('1940-12-23T13', '[h]'),
np.datetime64('NaT', '[D]'),
np.timedelta64(3, '[D]'),
np.timedelta64(11, '[h]'),
np.timedelta64(3 * 24 - 11, '[h]'))]:
# m8 - m8
assert_equal(tda - tdb, tdc)
assert_equal((tda - tdb).dtype, np.dtype('m8[h]'))
assert_equal(tdb - tda, -tdc)
assert_equal((tdb - tda).dtype, np.dtype('m8[h]'))
# m8 - bool
assert_equal(tdc - True, tdc - 1)
assert_equal((tdc - True).dtype, np.dtype('m8[h]'))
# m8 - int
assert_equal(tdc - 3 * 24, -tdb)
assert_equal((tdc - 3 * 24).dtype, np.dtype('m8[h]'))
# int - m8
assert_equal(False - tdb, -tdb)
assert_equal((False - tdb).dtype, np.dtype('m8[h]'))
# int - m8
assert_equal(3 * 24 - tdb, tdc)
assert_equal((3 * 24 - tdb).dtype, np.dtype('m8[h]'))
# M8 - bool
assert_equal(dtb - True, dtb - 1)
assert_equal(dtnat - True, dtnat)
assert_equal((dtb - True).dtype, np.dtype('M8[D]'))
# M8 - int
assert_equal(dtb - 3, dta)
assert_equal(dtnat - 3, dtnat)
assert_equal((dtb - 3).dtype, np.dtype('M8[D]'))
# M8 - m8
assert_equal(dtb - tda, dta)
assert_equal(dtnat - tda, dtnat)
assert_equal((dtb - tda).dtype, np.dtype('M8[D]'))
# In M8 - m8, the result goes to higher precision
assert_equal(np.subtract(dtc, tdb, casting='unsafe'), dte)
assert_equal(np.subtract(dtc, tdb, casting='unsafe').dtype,
np.dtype('M8[h]'))
# M8 - M8 with different goes to higher precision
assert_equal(np.subtract(dtc, dtd, casting='unsafe'),
np.timedelta64(0, 'h'))
assert_equal(np.subtract(dtc, dtd, casting='unsafe').dtype,
np.dtype('m8[h]'))
assert_equal(np.subtract(dtd, dtc, casting='unsafe'),
np.timedelta64(0, 'h'))
assert_equal(np.subtract(dtd, dtc, casting='unsafe').dtype,
np.dtype('m8[h]'))
# m8 - M8
assert_raises(TypeError, np.subtract, tda, dta)
# bool - M8
assert_raises(TypeError, np.subtract, False, dta)
# int - M8
assert_raises(TypeError, np.subtract, 3, dta)
def test_datetime_multiply(self):
for dta, tda, tdb, tdc in \
[
# One-dimensional arrays
(np.array(['2012-12-21'], dtype='M8[D]'),
np.array([6], dtype='m8[h]'),
np.array([9], dtype='m8[h]'),
np.array([12], dtype='m8[h]')),
# NumPy scalars
(np.datetime64('2012-12-21', '[D]'),
np.timedelta64(6, '[h]'),
np.timedelta64(9, '[h]'),
np.timedelta64(12, '[h]'))]:
# m8 * int
assert_equal(tda * 2, tdc)
assert_equal((tda * 2).dtype, np.dtype('m8[h]'))
# int * m8
assert_equal(2 * tda, tdc)
assert_equal((2 * tda).dtype, np.dtype('m8[h]'))
# m8 * float
assert_equal(tda * 1.5, tdb)
assert_equal((tda * 1.5).dtype, np.dtype('m8[h]'))
# float * m8
assert_equal(1.5 * tda, tdb)
assert_equal((1.5 * tda).dtype, np.dtype('m8[h]'))
# m8 * m8
assert_raises(TypeError, np.multiply, tda, tdb)
# m8 * M8
assert_raises(TypeError, np.multiply, dta, tda)
# M8 * m8
assert_raises(TypeError, np.multiply, tda, dta)
# M8 * int
assert_raises(TypeError, np.multiply, dta, 2)
# int * M8
assert_raises(TypeError, np.multiply, 2, dta)
# M8 * float
assert_raises(TypeError, np.multiply, dta, 1.5)
# float * M8
assert_raises(TypeError, np.multiply, 1.5, dta)
# NaTs
with warnings.catch_warnings():
warnings.filterwarnings(
'ignore', "invalid value encountered in multiply", RuntimeWarning)
nat = np.timedelta64('NaT')
def check(a, b, res):
assert_equal(a * b, res)
assert_equal(b * a, res)
for tp in (int, float):
check(nat, tp(2), nat)
check(nat, tp(0), nat)
for f in (float('inf'), float('nan')):
check(np.timedelta64(1), f, nat)
check(np.timedelta64(0), f, nat)
check(nat, f, nat)
@pytest.mark.parametrize("op1, op2, exp", [
# m8 same units round down
(np.timedelta64(7, 's'),
np.timedelta64(4, 's'),
1),
# m8 same units round down with negative
(np.timedelta64(7, 's'),
np.timedelta64(-4, 's'),
-2),
# m8 same units negative no round down
(np.timedelta64(8, 's'),
np.timedelta64(-4, 's'),
-2),
# m8 different units
(np.timedelta64(1, 'm'),
np.timedelta64(31, 's'),
1),
# m8 generic units
(np.timedelta64(1890),
np.timedelta64(31),
60),
# Y // M works
(np.timedelta64(2, 'Y'),
np.timedelta64('13', 'M'),
1),
# handle 1D arrays
(np.array([1, 2, 3], dtype='m8'),
np.array([2], dtype='m8'),
np.array([0, 1, 1], dtype=np.int64)),
])
def test_timedelta_floor_divide(self, op1, op2, exp):
assert_equal(op1 // op2, exp)
@pytest.mark.skipif(IS_WASM, reason="fp errors don't work in wasm")
@pytest.mark.parametrize("op1, op2", [
# div by 0
(np.timedelta64(10, 'us'),
np.timedelta64(0, 'us')),
# div with NaT
(np.timedelta64('NaT'),
np.timedelta64(50, 'us')),
# special case for int64 min
# in integer floor division
(np.timedelta64(np.iinfo(np.int64).min),
np.timedelta64(-1)),
])
def test_timedelta_floor_div_warnings(self, op1, op2):
with pytest.warns(RuntimeWarning):
actual = op1 // op2
assert_equal(actual, 0)
assert_equal(actual.dtype, np.int64)
@pytest.mark.parametrize("val1, val2", [
# the smallest integer that can't be represented
# exactly in a double should be preserved if we avoid
# casting to double in floordiv operation
(9007199254740993, 1),
# stress the alternate floordiv code path where
# operand signs don't match and remainder isn't 0
(9007199254740999, -2),
])
def test_timedelta_floor_div_precision(self, val1, val2):
op1 = np.timedelta64(val1)
op2 = np.timedelta64(val2)
actual = op1 // op2
# Python reference integer floor
expected = val1 // val2
assert_equal(actual, expected)
@pytest.mark.parametrize("val1, val2", [
# years and months sometimes can't be unambiguously
# divided for floor division operation
(np.timedelta64(7, 'Y'),
np.timedelta64(3, 's')),
(np.timedelta64(7, 'M'),
np.timedelta64(1, 'D')),
])
def test_timedelta_floor_div_error(self, val1, val2):
with assert_raises_regex(TypeError, "common metadata divisor"):
val1 // val2
@pytest.mark.parametrize("op1, op2", [
# reuse the test cases from floordiv
(np.timedelta64(7, 's'),
np.timedelta64(4, 's')),
# m8 same units round down with negative
(np.timedelta64(7, 's'),
np.timedelta64(-4, 's')),
# m8 same units negative no round down
(np.timedelta64(8, 's'),
np.timedelta64(-4, 's')),
# m8 different units
(np.timedelta64(1, 'm'),
np.timedelta64(31, 's')),
# m8 generic units
(np.timedelta64(1890),
np.timedelta64(31)),
# Y // M works
(np.timedelta64(2, 'Y'),
np.timedelta64('13', 'M')),
# handle 1D arrays
(np.array([1, 2, 3], dtype='m8'),
np.array([2], dtype='m8')),
])
def test_timedelta_divmod(self, op1, op2):
expected = (op1 // op2, op1 % op2)
assert_equal(divmod(op1, op2), expected)
@pytest.mark.parametrize("op1, op2", [
# Y and M are incompatible with all units except Y and M
(np.timedelta64(1, 'Y'), np.timedelta64(1, 's')),
(np.timedelta64(1, 'D'), np.timedelta64(1, 'M')),
])
def test_timedelta_divmod_typeerror(self, op1, op2):
assert_raises(TypeError, np.divmod, op1, op2)
@pytest.mark.skipif(IS_WASM, reason="does not work in wasm")
@pytest.mark.parametrize("op1, op2", [
# reuse cases from floordiv
# div by 0
(np.timedelta64(10, 'us'),
np.timedelta64(0, 'us')),
# div with NaT
(np.timedelta64('NaT'),
np.timedelta64(50, 'us')),
# special case for int64 min
# in integer floor division
(np.timedelta64(np.iinfo(np.int64).min),
np.timedelta64(-1)),
])
def test_timedelta_divmod_warnings(self, op1, op2):
with pytest.warns(RuntimeWarning):
expected = (op1 // op2, op1 % op2)
with pytest.warns(RuntimeWarning):
actual = divmod(op1, op2)
assert_equal(actual, expected)
def test_datetime_divide(self):
for dta, tda, tdb, tdc, tdd in \
[
# One-dimensional arrays
(np.array(['2012-12-21'], dtype='M8[D]'),
np.array([6], dtype='m8[h]'),
np.array([9], dtype='m8[h]'),
np.array([12], dtype='m8[h]'),
np.array([6], dtype='m8[m]')),
# NumPy scalars
(np.datetime64('2012-12-21', '[D]'),
np.timedelta64(6, '[h]'),
np.timedelta64(9, '[h]'),
np.timedelta64(12, '[h]'),
np.timedelta64(6, '[m]'))]:
# m8 / int
assert_equal(tdc / 2, tda)
assert_equal((tdc / 2).dtype, np.dtype('m8[h]'))
# m8 / float
assert_equal(tda / 0.5, tdc)
assert_equal((tda / 0.5).dtype, np.dtype('m8[h]'))
# m8 / m8
assert_equal(tda / tdb, 6 / 9)
assert_equal(np.divide(tda, tdb), 6 / 9)
assert_equal(np.true_divide(tda, tdb), 6 / 9)
assert_equal(tdb / tda, 9 / 6)
assert_equal((tda / tdb).dtype, np.dtype('f8'))
assert_equal(tda / tdd, 60)
assert_equal(tdd / tda, 1 / 60)
# int / m8
assert_raises(TypeError, np.divide, 2, tdb)
# float / m8
assert_raises(TypeError, np.divide, 0.5, tdb)
# m8 / M8
assert_raises(TypeError, np.divide, dta, tda)
# M8 / m8
assert_raises(TypeError, np.divide, tda, dta)
# M8 / int
assert_raises(TypeError, np.divide, dta, 2)
# int / M8
assert_raises(TypeError, np.divide, 2, dta)
# M8 / float
assert_raises(TypeError, np.divide, dta, 1.5)
# float / M8
assert_raises(TypeError, np.divide, 1.5, dta)
# NaTs
with warnings.catch_warnings():
warnings.filterwarnings(
'ignore', r".*encountered in divide", RuntimeWarning)
nat = np.timedelta64('NaT')
for tp in (int, float):
assert_equal(np.timedelta64(1) / tp(0), nat)
assert_equal(np.timedelta64(0) / tp(0), nat)
assert_equal(nat / tp(0), nat)
assert_equal(nat / tp(2), nat)
# Division by inf
assert_equal(np.timedelta64(1) / float('inf'), np.timedelta64(0))
assert_equal(np.timedelta64(0) / float('inf'), np.timedelta64(0))
assert_equal(nat / float('inf'), nat)
# Division by nan
assert_equal(np.timedelta64(1) / float('nan'), nat)
assert_equal(np.timedelta64(0) / float('nan'), nat)
assert_equal(nat / float('nan'), nat)
def test_datetime_compare(self):
# Test all the comparison operators
a = np.datetime64('2000-03-12T18:00:00.000000')
b = np.array(['2000-03-12T18:00:00.000000',
'2000-03-12T17:59:59.999999',
'2000-03-12T18:00:00.000001',
'1970-01-11T12:00:00.909090',
'2016-01-11T12:00:00.909090'],
dtype='datetime64[us]')
assert_equal(np.equal(a, b), [1, 0, 0, 0, 0])
assert_equal(np.not_equal(a, b), [0, 1, 1, 1, 1])
assert_equal(np.less(a, b), [0, 0, 1, 0, 1])
assert_equal(np.less_equal(a, b), [1, 0, 1, 0, 1])
assert_equal(np.greater(a, b), [0, 1, 0, 1, 0])
assert_equal(np.greater_equal(a, b), [1, 1, 0, 1, 0])
def test_datetime_compare_nat(self):
dt_nat = np.datetime64('NaT', 'D')
dt_other = np.datetime64('2000-01-01')
td_nat = np.timedelta64('NaT', 'h')
td_other = np.timedelta64(1, 'h')
for op in [np.equal, np.less, np.less_equal,
np.greater, np.greater_equal]:
assert_(not op(dt_nat, dt_nat))
assert_(not op(dt_nat, dt_other))
assert_(not op(dt_other, dt_nat))
assert_(not op(td_nat, td_nat))
assert_(not op(td_nat, td_other))
assert_(not op(td_other, td_nat))
assert_(np.not_equal(dt_nat, dt_nat))
assert_(np.not_equal(dt_nat, dt_other))
assert_(np.not_equal(dt_other, dt_nat))
assert_(np.not_equal(td_nat, td_nat))
assert_(np.not_equal(td_nat, td_other))
assert_(np.not_equal(td_other, td_nat))
def test_datetime_minmax(self):
# The metadata of the result should become the GCD
# of the operand metadata
a = np.array('1999-03-12T13', dtype='M8[2m]')
b = np.array('1999-03-12T12', dtype='M8[s]')
assert_equal(np.minimum(a, b), b)
assert_equal(np.minimum(a, b).dtype, np.dtype('M8[s]'))
assert_equal(np.fmin(a, b), b)
assert_equal(np.fmin(a, b).dtype, np.dtype('M8[s]'))
assert_equal(np.maximum(a, b), a)
assert_equal(np.maximum(a, b).dtype, np.dtype('M8[s]'))
assert_equal(np.fmax(a, b), a)
assert_equal(np.fmax(a, b).dtype, np.dtype('M8[s]'))
# Viewed as integers, the comparison is opposite because
# of the units chosen
assert_equal(np.minimum(a.view('i8'), b.view('i8')), a.view('i8'))
# Interaction with NaT
a = np.array('1999-03-12T13', dtype='M8[2m]')
dtnat = np.array('NaT', dtype='M8[h]')
assert_equal(np.minimum(a, dtnat), dtnat)
assert_equal(np.minimum(dtnat, a), dtnat)
assert_equal(np.maximum(a, dtnat), dtnat)
assert_equal(np.maximum(dtnat, a), dtnat)
assert_equal(np.fmin(dtnat, a), a)
assert_equal(np.fmin(a, dtnat), a)
assert_equal(np.fmax(dtnat, a), a)
assert_equal(np.fmax(a, dtnat), a)
# Also do timedelta
a = np.array(3, dtype='m8[h]')
b = np.array(3 * 3600 - 3, dtype='m8[s]')
assert_equal(np.minimum(a, b), b)
assert_equal(np.minimum(a, b).dtype, np.dtype('m8[s]'))
assert_equal(np.fmin(a, b), b)
assert_equal(np.fmin(a, b).dtype, np.dtype('m8[s]'))
assert_equal(np.maximum(a, b), a)
assert_equal(np.maximum(a, b).dtype, np.dtype('m8[s]'))
assert_equal(np.fmax(a, b), a)
assert_equal(np.fmax(a, b).dtype, np.dtype('m8[s]'))
# Viewed as integers, the comparison is opposite because
# of the units chosen
assert_equal(np.minimum(a.view('i8'), b.view('i8')), a.view('i8'))
# should raise between datetime and timedelta
#
# TODO: Allowing unsafe casting by
# default in ufuncs strikes again... :(
a = np.array(3, dtype='m8[h]')
b = np.array('1999-03-12T12', dtype='M8[s]')
#assert_raises(TypeError, np.minimum, a, b)
#assert_raises(TypeError, np.maximum, a, b)
#assert_raises(TypeError, np.fmin, a, b)
#assert_raises(TypeError, np.fmax, a, b)
assert_raises(TypeError, np.minimum, a, b, casting='same_kind')
assert_raises(TypeError, np.maximum, a, b, casting='same_kind')
assert_raises(TypeError, np.fmin, a, b, casting='same_kind')
assert_raises(TypeError, np.fmax, a, b, casting='same_kind')
def test_hours(self):
t = np.ones(3, dtype='M8[s]')
t[0] = 60 * 60 * 24 + 60 * 60 * 10
assert_(t[0].item().hour == 10)
def test_divisor_conversion_year(self):
assert_(np.dtype('M8[Y/4]') == np.dtype('M8[3M]'))
assert_(np.dtype('M8[Y/13]') == np.dtype('M8[4W]'))
assert_(np.dtype('M8[3Y/73]') == np.dtype('M8[15D]'))
def test_divisor_conversion_month(self):
assert_(np.dtype('M8[M/2]') == np.dtype('M8[2W]'))
assert_(np.dtype('M8[M/15]') == np.dtype('M8[2D]'))
assert_(np.dtype('M8[3M/40]') == np.dtype('M8[54h]'))
def test_divisor_conversion_week(self):
assert_(np.dtype('m8[W/7]') == np.dtype('m8[D]'))
assert_(np.dtype('m8[3W/14]') == np.dtype('m8[36h]'))
assert_(np.dtype('m8[5W/140]') == np.dtype('m8[360m]'))
def test_divisor_conversion_day(self):
assert_(np.dtype('M8[D/12]') == np.dtype('M8[2h]'))
assert_(np.dtype('M8[D/120]') == np.dtype('M8[12m]'))
assert_(np.dtype('M8[3D/960]') == np.dtype('M8[270s]'))
def test_divisor_conversion_hour(self):
assert_(np.dtype('m8[h/30]') == np.dtype('m8[2m]'))
assert_(np.dtype('m8[3h/300]') == np.dtype('m8[36s]'))
def test_divisor_conversion_minute(self):
assert_(np.dtype('m8[m/30]') == np.dtype('m8[2s]'))
assert_(np.dtype('m8[3m/300]') == np.dtype('m8[600ms]'))
def test_divisor_conversion_second(self):
assert_(np.dtype('m8[s/100]') == np.dtype('m8[10ms]'))
assert_(np.dtype('m8[3s/10000]') == np.dtype('m8[300us]'))
def test_divisor_conversion_fs(self):
assert_(np.dtype('M8[fs/100]') == np.dtype('M8[10as]'))
assert_raises(ValueError, lambda: np.dtype('M8[3fs/10000]'))
def test_divisor_conversion_as(self):
assert_raises(ValueError, lambda: np.dtype('M8[as/10]'))
def test_string_parser_variants(self):
msg = "no explicit representation of timezones available for " \
"np.datetime64"
# Allow space instead of 'T' between date and time
assert_equal(np.array(['1980-02-29T01:02:03'], np.dtype('M8[s]')),
np.array(['1980-02-29 01:02:03'], np.dtype('M8[s]')))
# Allow positive years
assert_equal(np.array(['+1980-02-29T01:02:03'], np.dtype('M8[s]')),
np.array(['+1980-02-29 01:02:03'], np.dtype('M8[s]')))
# Allow negative years
assert_equal(np.array(['-1980-02-29T01:02:03'], np.dtype('M8[s]')),
np.array(['-1980-02-29 01:02:03'], np.dtype('M8[s]')))
# UTC specifier
with pytest.warns(UserWarning, match=msg):
assert_equal(
np.array(['+1980-02-29T01:02:03'], np.dtype('M8[s]')),
np.array(['+1980-02-29 01:02:03Z'], np.dtype('M8[s]')))
with pytest.warns(UserWarning, match=msg):
assert_equal(
np.array(['-1980-02-29T01:02:03'], np.dtype('M8[s]')),
np.array(['-1980-02-29 01:02:03Z'], np.dtype('M8[s]')))
# Time zone offset
with pytest.warns(UserWarning, match=msg):
assert_equal(
np.array(['1980-02-29T02:02:03'], np.dtype('M8[s]')),
np.array(['1980-02-29 00:32:03-0130'], np.dtype('M8[s]')))
with pytest.warns(UserWarning, match=msg):
assert_equal(
np.array(['1980-02-28T22:32:03'], np.dtype('M8[s]')),
np.array(['1980-02-29 00:02:03+01:30'], np.dtype('M8[s]')))
with pytest.warns(UserWarning, match=msg):
assert_equal(
np.array(['1980-02-29T02:32:03.506'], np.dtype('M8[s]')),
np.array(['1980-02-29 00:32:03.506-02'], np.dtype('M8[s]')))
with pytest.warns(UserWarning, match=msg):
assert_equal(np.datetime64('1977-03-02T12:30-0230'),
np.datetime64('1977-03-02T15:00'))
def test_string_parser_error_check(self):
msg = "no explicit representation of timezones available for " \
"np.datetime64"
# Arbitrary bad string
assert_raises(ValueError, np.array, ['badvalue'], np.dtype('M8[us]'))
# Character after year must be '-'
assert_raises(ValueError, np.array, ['1980X'], np.dtype('M8[us]'))
# Cannot have trailing '-'
assert_raises(ValueError, np.array, ['1980-'], np.dtype('M8[us]'))
# Month must be in range [1,12]
assert_raises(ValueError, np.array, ['1980-00'], np.dtype('M8[us]'))
assert_raises(ValueError, np.array, ['1980-13'], np.dtype('M8[us]'))
# Month must have two digits
assert_raises(ValueError, np.array, ['1980-1'], np.dtype('M8[us]'))
assert_raises(ValueError, np.array, ['1980-1-02'], np.dtype('M8[us]'))
# 'Mor' is not a valid month
assert_raises(ValueError, np.array, ['1980-Mor'], np.dtype('M8[us]'))
# Cannot have trailing '-'
assert_raises(ValueError, np.array, ['1980-01-'], np.dtype('M8[us]'))
# Day must be in range [1,len(month)]
assert_raises(ValueError, np.array, ['1980-01-0'], np.dtype('M8[us]'))
assert_raises(ValueError, np.array, ['1980-01-00'], np.dtype('M8[us]'))
assert_raises(ValueError, np.array, ['1980-01-32'], np.dtype('M8[us]'))
assert_raises(ValueError, np.array, ['1979-02-29'], np.dtype('M8[us]'))
assert_raises(ValueError, np.array, ['1980-02-30'], np.dtype('M8[us]'))
assert_raises(ValueError, np.array, ['1980-03-32'], np.dtype('M8[us]'))
assert_raises(ValueError, np.array, ['1980-04-31'], np.dtype('M8[us]'))
assert_raises(ValueError, np.array, ['1980-05-32'], np.dtype('M8[us]'))
assert_raises(ValueError, np.array, ['1980-06-31'], np.dtype('M8[us]'))
assert_raises(ValueError, np.array, ['1980-07-32'], np.dtype('M8[us]'))
assert_raises(ValueError, np.array, ['1980-08-32'], np.dtype('M8[us]'))
assert_raises(ValueError, np.array, ['1980-09-31'], np.dtype('M8[us]'))
assert_raises(ValueError, np.array, ['1980-10-32'], np.dtype('M8[us]'))
assert_raises(ValueError, np.array, ['1980-11-31'], np.dtype('M8[us]'))
assert_raises(ValueError, np.array, ['1980-12-32'], np.dtype('M8[us]'))
# Cannot have trailing characters
assert_raises(ValueError, np.array, ['1980-02-03%'],
np.dtype('M8[us]'))
assert_raises(ValueError, np.array, ['1980-02-03 q'],
np.dtype('M8[us]'))
# Hours must be in range [0, 23]
assert_raises(ValueError, np.array, ['1980-02-03 25'],
np.dtype('M8[us]'))
assert_raises(ValueError, np.array, ['1980-02-03T25'],
np.dtype('M8[us]'))
assert_raises(ValueError, np.array, ['1980-02-03 24:01'],
np.dtype('M8[us]'))
assert_raises(ValueError, np.array, ['1980-02-03T24:01'],
np.dtype('M8[us]'))
assert_raises(ValueError, np.array, ['1980-02-03 -1'],
np.dtype('M8[us]'))
# No trailing ':'
assert_raises(ValueError, np.array, ['1980-02-03 01:'],
np.dtype('M8[us]'))
# Minutes must be in range [0, 59]
assert_raises(ValueError, np.array, ['1980-02-03 01:-1'],
np.dtype('M8[us]'))
assert_raises(ValueError, np.array, ['1980-02-03 01:60'],
np.dtype('M8[us]'))
# No trailing ':'
assert_raises(ValueError, np.array, ['1980-02-03 01:60:'],
np.dtype('M8[us]'))
# Seconds must be in range [0, 59]
assert_raises(ValueError, np.array, ['1980-02-03 01:10:-1'],
np.dtype('M8[us]'))
assert_raises(ValueError, np.array, ['1980-02-03 01:01:60'],
np.dtype('M8[us]'))
# Timezone offset must within a reasonable range
with pytest.warns(UserWarning, match=msg):
assert_raises(ValueError, np.array, ['1980-02-03 01:01:00+0661'],
np.dtype('M8[us]'))
with pytest.warns(UserWarning, match=msg):
assert_raises(ValueError, np.array, ['1980-02-03 01:01:00+2500'],
np.dtype('M8[us]'))
with pytest.warns(UserWarning, match=msg):
assert_raises(ValueError, np.array, ['1980-02-03 01:01:00-0070'],
np.dtype('M8[us]'))
with pytest.warns(UserWarning, match=msg):
assert_raises(ValueError, np.array, ['1980-02-03 01:01:00-3000'],
np.dtype('M8[us]'))
with pytest.warns(UserWarning, match=msg):
assert_raises(ValueError, np.array, ['1980-02-03 01:01:00-25:00'],
np.dtype('M8[us]'))
def test_creation_overflow(self):
date = '1980-03-23 20:00:00'
timesteps = np.array([date], dtype='datetime64[s]')[0].astype(np.int64)
for unit in ['ms', 'us', 'ns']:
timesteps *= 1000
x = np.array([date], dtype=f'datetime64[{unit}]')
assert_equal(timesteps, x[0].astype(np.int64),
err_msg=f'Datetime conversion error for unit {unit}')
assert_equal(x[0].astype(np.int64), 322689600000000000)
# gh-13062
with pytest.raises(OverflowError):
np.datetime64(2**64, 'D')
with pytest.raises(OverflowError):
np.timedelta64(2**64, 'D')
def test_datetime_as_string(self):
# Check all the units with default string conversion
date = '1959-10-13'
datetime = '1959-10-13T12:34:56.789012345678901234'
assert_equal(np.datetime_as_string(np.datetime64(date, 'Y')),
'1959')
assert_equal(np.datetime_as_string(np.datetime64(date, 'M')),
'1959-10')
assert_equal(np.datetime_as_string(np.datetime64(date, 'D')),
'1959-10-13')
assert_equal(np.datetime_as_string(np.datetime64(datetime, 'h')),
'1959-10-13T12')
assert_equal(np.datetime_as_string(np.datetime64(datetime, 'm')),
'1959-10-13T12:34')
assert_equal(np.datetime_as_string(np.datetime64(datetime, 's')),
'1959-10-13T12:34:56')
assert_equal(np.datetime_as_string(np.datetime64(datetime, 'ms')),
'1959-10-13T12:34:56.789')
for us in ['us', 'μs', b'us']: # check non-ascii and bytes too
assert_equal(np.datetime_as_string(np.datetime64(datetime, us)),
'1959-10-13T12:34:56.789012')
datetime = '1969-12-31T23:34:56.789012345678901234'
assert_equal(np.datetime_as_string(np.datetime64(datetime, 'ns')),
'1969-12-31T23:34:56.789012345')
assert_equal(np.datetime_as_string(np.datetime64(datetime, 'ps')),
'1969-12-31T23:34:56.789012345678')
assert_equal(np.datetime_as_string(np.datetime64(datetime, 'fs')),
'1969-12-31T23:34:56.789012345678901')
datetime = '1969-12-31T23:59:57.789012345678901234'
assert_equal(np.datetime_as_string(np.datetime64(datetime, 'as')),
datetime)
datetime = '1970-01-01T00:34:56.789012345678901234'
assert_equal(np.datetime_as_string(np.datetime64(datetime, 'ns')),
'1970-01-01T00:34:56.789012345')
assert_equal(np.datetime_as_string(np.datetime64(datetime, 'ps')),
'1970-01-01T00:34:56.789012345678')
assert_equal(np.datetime_as_string(np.datetime64(datetime, 'fs')),
'1970-01-01T00:34:56.789012345678901')
datetime = '1970-01-01T00:00:05.789012345678901234'
assert_equal(np.datetime_as_string(np.datetime64(datetime, 'as')),
datetime)
# String conversion with the unit= parameter
a = np.datetime64('2032-07-18T12:23:34.123456', 'us')
assert_equal(np.datetime_as_string(a, unit='Y', casting='unsafe'),
'2032')
assert_equal(np.datetime_as_string(a, unit='M', casting='unsafe'),
'2032-07')
assert_equal(np.datetime_as_string(a, unit='W', casting='unsafe'),
'2032-07-18')
assert_equal(np.datetime_as_string(a, unit='D', casting='unsafe'),
'2032-07-18')
with pytest.raises(ValueError):
np.datetime_as_string(a, unit='Y', casting='same_value')
assert_equal(np.datetime_as_string(a, unit='h'), '2032-07-18T12')
assert_equal(np.datetime_as_string(a, unit='m'),
'2032-07-18T12:23')
assert_equal(np.datetime_as_string(a, unit='s'),
'2032-07-18T12:23:34')
assert_equal(np.datetime_as_string(a, unit='ms'),
'2032-07-18T12:23:34.123')
assert_equal(np.datetime_as_string(a, unit='us'),
'2032-07-18T12:23:34.123456')
assert_equal(np.datetime_as_string(a, unit='ns'),
'2032-07-18T12:23:34.123456000')
assert_equal(np.datetime_as_string(a, unit='ps'),
'2032-07-18T12:23:34.123456000000')
assert_equal(np.datetime_as_string(a, unit='fs'),
'2032-07-18T12:23:34.123456000000000')
assert_equal(np.datetime_as_string(a, unit='as'),
'2032-07-18T12:23:34.123456000000000000')
# unit='auto' parameter
assert_equal(np.datetime_as_string(
np.datetime64('2032-07-18T12:23:34.123456', 'us'), unit='auto'),
'2032-07-18T12:23:34.123456')
assert_equal(np.datetime_as_string(
np.datetime64('2032-07-18T12:23:34.12', 'us'), unit='auto'),
'2032-07-18T12:23:34.120')
assert_equal(np.datetime_as_string(
np.datetime64('2032-07-18T12:23:34', 'us'), unit='auto'),
'2032-07-18T12:23:34')
assert_equal(np.datetime_as_string(
np.datetime64('2032-07-18T12:23:00', 'us'), unit='auto'),
'2032-07-18T12:23')
# 'auto' doesn't split up hour and minute
assert_equal(np.datetime_as_string(
np.datetime64('2032-07-18T12:00:00', 'us'), unit='auto'),
'2032-07-18T12:00')
assert_equal(np.datetime_as_string(
np.datetime64('2032-07-18T00:00:00', 'us'), unit='auto'),
'2032-07-18')
# 'auto' doesn't split up the date
assert_equal(np.datetime_as_string(
np.datetime64('2032-07-01T00:00:00', 'us'), unit='auto'),
'2032-07-01')
assert_equal(np.datetime_as_string(
np.datetime64('2032-01-01T00:00:00', 'us'), unit='auto'),
'2032-01-01')
@pytest.mark.skipif(not _has_tz, reason="The tzdata module is not available.")
def test_datetime_as_string_timezone(self):
# timezone='local' vs 'UTC'
a = np.datetime64('2010-03-15T06:30', 'm')
assert_equal(np.datetime_as_string(a),
'2010-03-15T06:30')
assert_equal(np.datetime_as_string(a, timezone='naive'),
'2010-03-15T06:30')
assert_equal(np.datetime_as_string(a, timezone='UTC'),
'2010-03-15T06:30Z')
assert_(np.datetime_as_string(a, timezone='local') !=
'2010-03-15T06:30')
b = np.datetime64('2010-02-15T06:30', 'm')
assert_equal(np.datetime_as_string(a, timezone=ZoneInfo('US/Central')),
'2010-03-15T01:30-0500')
assert_equal(np.datetime_as_string(a, timezone=ZoneInfo('US/Eastern')),
'2010-03-15T02:30-0400')
assert_equal(np.datetime_as_string(a, timezone=ZoneInfo('US/Pacific')),
'2010-03-14T23:30-0700')
assert_equal(np.datetime_as_string(b, timezone=ZoneInfo('US/Central')),
'2010-02-15T00:30-0600')
assert_equal(np.datetime_as_string(b, timezone=ZoneInfo('US/Eastern')),
'2010-02-15T01:30-0500')
assert_equal(np.datetime_as_string(b, timezone=ZoneInfo('US/Pacific')),
'2010-02-14T22:30-0800')
# Dates to strings with a timezone attached is disabled by default
assert_raises(TypeError, np.datetime_as_string, a, unit='D',
timezone=ZoneInfo('US/Pacific'))
# Check that we can print out the date in the specified time zone
assert_equal(np.datetime_as_string(a, unit='D',
timezone=ZoneInfo('US/Pacific'), casting='unsafe'),
'2010-03-14')
assert_equal(np.datetime_as_string(b, unit='D',
timezone=ZoneInfo('US/Central'), casting='unsafe'),
'2010-02-15')
def test_datetime_arange(self):
# With two datetimes provided as strings
a = np.arange('2010-01-05', '2010-01-10', dtype='M8[D]')
assert_equal(a.dtype, np.dtype('M8[D]'))
assert_equal(a,
np.array(['2010-01-05', '2010-01-06', '2010-01-07',
'2010-01-08', '2010-01-09'], dtype='M8[D]'))
a = np.arange('1950-02-10', '1950-02-06', -1, dtype='M8[D]')
assert_equal(a.dtype, np.dtype('M8[D]'))
assert_equal(a,
np.array(['1950-02-10', '1950-02-09', '1950-02-08',
'1950-02-07'], dtype='M8[D]'))
# Unit should be detected as months here
a = np.arange('1969-05', '1970-05', 2, dtype='M8')
assert_equal(a.dtype, np.dtype('M8[M]'))
assert_equal(a,
np.datetime64('1969-05') + np.arange(12, step=2))
# datetime, integer|timedelta works as well
# produces arange (start, start + stop) in this case
a = np.arange('1969', 18, 3, dtype='M8')
assert_equal(a.dtype, np.dtype('M8[Y]'))
assert_equal(a,
np.datetime64('1969') + np.arange(18, step=3))
a = np.arange('1969-12-19', 22, np.timedelta64(2), dtype='M8')
assert_equal(a.dtype, np.dtype('M8[D]'))
assert_equal(a,
np.datetime64('1969-12-19') + np.arange(22, step=2))
# Step of 0 is disallowed
assert_raises(ValueError, np.arange, np.datetime64('today'),
np.datetime64('today') + 3, 0)
# Promotion across nonlinear unit boundaries is disallowed
assert_raises(TypeError, np.arange, np.datetime64('2011-03-01', 'D'),
np.timedelta64(5, 'M'))
assert_raises(TypeError, np.arange,
np.datetime64('2012-02-03T14', 's'),
np.timedelta64(5, 'Y'))
def test_datetime_arange_no_dtype(self):
d = np.array('2010-01-04', dtype="M8[D]")
assert_equal(np.arange(d, d + 1), d)
assert_raises(ValueError, np.arange, d)
def test_timedelta_arange(self):
a = np.arange(3, 10, dtype='m8')
assert_equal(a.dtype, np.dtype('m8'))
assert_equal(a, np.timedelta64(0) + np.arange(3, 10))
a = np.arange(np.timedelta64(3, 's'), 10, 2, dtype='m8')
assert_equal(a.dtype, np.dtype('m8[s]'))
assert_equal(a, np.timedelta64(0, 's') + np.arange(3, 10, 2))
# Step of 0 is disallowed
assert_raises(ValueError, np.arange, np.timedelta64(0),
np.timedelta64(5), 0)
# Promotion across nonlinear unit boundaries is disallowed
assert_raises(TypeError, np.arange, np.timedelta64(0, 'D'),
np.timedelta64(5, 'M'))
assert_raises(TypeError, np.arange, np.timedelta64(0, 'Y'),
np.timedelta64(5, 'D'))
@pytest.mark.parametrize("val1, val2, expected", [
# case from gh-12092
(np.timedelta64(7, 's'),
np.timedelta64(3, 's'),
np.timedelta64(1, 's')),
# negative value cases
(np.timedelta64(3, 's'),
np.timedelta64(-2, 's'),
np.timedelta64(-1, 's')),
(np.timedelta64(-3, 's'),
np.timedelta64(2, 's'),
np.timedelta64(1, 's')),
# larger value cases
(np.timedelta64(17, 's'),
np.timedelta64(22, 's'),
np.timedelta64(17, 's')),
(np.timedelta64(22, 's'),
np.timedelta64(17, 's'),
np.timedelta64(5, 's')),
# different units
(np.timedelta64(1, 'm'),
np.timedelta64(57, 's'),
np.timedelta64(3, 's')),
(np.timedelta64(1, 'us'),
np.timedelta64(727, 'ns'),
np.timedelta64(273, 'ns')),
# NaT is propagated
(np.timedelta64('NaT'),
np.timedelta64(50, 'ns'),
np.timedelta64('NaT')),
# Y % M works
(np.timedelta64(2, 'Y'),
np.timedelta64(22, 'M'),
np.timedelta64(2, 'M')),
])
def test_timedelta_modulus(self, val1, val2, expected):
assert_equal(val1 % val2, expected)
@pytest.mark.parametrize("val1, val2", [
# years and months sometimes can't be unambiguously
# divided for modulus operation
(np.timedelta64(7, 'Y'),
np.timedelta64(3, 's')),
(np.timedelta64(7, 'M'),
np.timedelta64(1, 'D')),
])
def test_timedelta_modulus_error(self, val1, val2):
with assert_raises_regex(TypeError, "common metadata divisor"):
val1 % val2
@pytest.mark.skipif(IS_WASM, reason="fp errors don't work in wasm")
def test_timedelta_modulus_div_by_zero(self):
with pytest.warns(RuntimeWarning):
actual = np.timedelta64(10, 's') % np.timedelta64(0, 's')
assert_equal(actual, np.timedelta64('NaT'))
@pytest.mark.parametrize("val1, val2", [
# cases where one operand is not
# timedelta64
(np.timedelta64(7, 'Y'),
15,),
(7.5,
np.timedelta64(1, 'D')),
])
def test_timedelta_modulus_type_resolution(self, val1, val2):
# NOTE: some of the operations may be supported
# in the future
with assert_raises_regex(TypeError,
"'remainder' cannot use operands with types"):
val1 % val2
def test_timedelta_arange_no_dtype(self):
d = np.array(5, dtype="m8[D]")
assert_equal(np.arange(d, d + 1), d)
assert_equal(np.arange(d), np.arange(0, d))
def test_datetime_maximum_reduce(self):
a = np.array(['2010-01-02', '1999-03-14', '1833-03'], dtype='M8[D]')
assert_equal(np.maximum.reduce(a).dtype, np.dtype('M8[D]'))
assert_equal(np.maximum.reduce(a),
np.datetime64('2010-01-02'))
a = np.array([1, 4, 0, 7, 2], dtype='m8[s]')
assert_equal(np.maximum.reduce(a).dtype, np.dtype('m8[s]'))
assert_equal(np.maximum.reduce(a),
np.timedelta64(7, 's'))
def test_timedelta_correct_mean(self):
# test mainly because it worked only via a bug in that allowed:
# `timedelta.sum(dtype="f8")` to ignore the dtype request.
a = np.arange(1000, dtype="m8[s]")
assert_array_equal(a.mean(), a.sum() / len(a))
def test_datetime_no_subtract_reducelike(self):
# subtracting two datetime64 works, but we cannot reduce it, since
# the result of that subtraction will have a different dtype.
arr = np.array(["2021-12-02", "2019-05-12"], dtype="M8[ms]")
msg = r"the resolved dtypes are not compatible"
with pytest.raises(TypeError, match=msg):
np.subtract.reduce(arr)
with pytest.raises(TypeError, match=msg):
np.subtract.accumulate(arr)
with pytest.raises(TypeError, match=msg):
np.subtract.reduceat(arr, [0])
def test_datetime_busday_offset(self):
# First Monday in June
assert_equal(
np.busday_offset('2011-06', 0, roll='forward', weekmask='Mon'),
np.datetime64('2011-06-06'))
# Last Monday in June
assert_equal(
np.busday_offset('2011-07', -1, roll='forward', weekmask='Mon'),
np.datetime64('2011-06-27'))
assert_equal(
np.busday_offset('2011-07', -1, roll='forward', weekmask='Mon'),
np.datetime64('2011-06-27'))
# Default M-F business days, different roll modes
assert_equal(np.busday_offset('2010-08', 0, roll='backward'),
np.datetime64('2010-07-30'))
assert_equal(np.busday_offset('2010-08', 0, roll='preceding'),
np.datetime64('2010-07-30'))
assert_equal(np.busday_offset('2010-08', 0, roll='modifiedpreceding'),
np.datetime64('2010-08-02'))
assert_equal(np.busday_offset('2010-08', 0, roll='modifiedfollowing'),
np.datetime64('2010-08-02'))
assert_equal(np.busday_offset('2010-08', 0, roll='forward'),
np.datetime64('2010-08-02'))
assert_equal(np.busday_offset('2010-08', 0, roll='following'),
np.datetime64('2010-08-02'))
assert_equal(np.busday_offset('2010-10-30', 0, roll='following'),
np.datetime64('2010-11-01'))
assert_equal(
np.busday_offset('2010-10-30', 0, roll='modifiedfollowing'),
np.datetime64('2010-10-29'))
assert_equal(
np.busday_offset('2010-10-30', 0, roll='modifiedpreceding'),
np.datetime64('2010-10-29'))
assert_equal(
np.busday_offset('2010-10-16', 0, roll='modifiedfollowing'),
np.datetime64('2010-10-18'))
assert_equal(
np.busday_offset('2010-10-16', 0, roll='modifiedpreceding'),
np.datetime64('2010-10-15'))
# roll='raise' by default
assert_raises(ValueError, np.busday_offset, '2011-06-04', 0)
# Bigger offset values
assert_equal(np.busday_offset('2006-02-01', 25),
np.datetime64('2006-03-08'))
assert_equal(np.busday_offset('2006-03-08', -25),
np.datetime64('2006-02-01'))
assert_equal(np.busday_offset('2007-02-25', 11, weekmask='SatSun'),
np.datetime64('2007-04-07'))
assert_equal(np.busday_offset('2007-04-07', -11, weekmask='SatSun'),
np.datetime64('2007-02-25'))
# NaT values when roll is not raise
assert_equal(np.busday_offset(np.datetime64('NaT'), 1, roll='nat'),
np.datetime64('NaT'))
assert_equal(np.busday_offset(np.datetime64('NaT'), 1, roll='following'),
np.datetime64('NaT'))
assert_equal(np.busday_offset(np.datetime64('NaT'), 1, roll='preceding'),
np.datetime64('NaT'))
def test_datetime_busdaycalendar(self):
# Check that it removes NaT, duplicates, and weekends
# and sorts the result.
bdd = np.busdaycalendar(
holidays=['NaT', '2011-01-17', '2011-03-06', 'NaT',
'2011-12-26', '2011-05-30', '2011-01-17'])
assert_equal(bdd.holidays,
np.array(['2011-01-17', '2011-05-30', '2011-12-26'], dtype='M8'))
# Default M-F weekmask
assert_equal(bdd.weekmask, np.array([1, 1, 1, 1, 1, 0, 0], dtype='?'))
# Check string weekmask with varying whitespace.
bdd = np.busdaycalendar(weekmask="Sun TueWed Thu\tFri")
assert_equal(bdd.weekmask, np.array([0, 1, 1, 1, 1, 0, 1], dtype='?'))
# Check length 7 0/1 string
bdd = np.busdaycalendar(weekmask="0011001")
assert_equal(bdd.weekmask, np.array([0, 0, 1, 1, 0, 0, 1], dtype='?'))
# Check length 7 string weekmask.
bdd = np.busdaycalendar(weekmask="Mon Tue")
assert_equal(bdd.weekmask, np.array([1, 1, 0, 0, 0, 0, 0], dtype='?'))
# All-zeros weekmask should raise
assert_raises(ValueError, np.busdaycalendar, weekmask=[0, 0, 0, 0, 0, 0, 0])
# weekday names must be correct case
assert_raises(ValueError, np.busdaycalendar, weekmask="satsun")
# All-zeros weekmask should raise
assert_raises(ValueError, np.busdaycalendar, weekmask="")
# Invalid weekday name codes should raise
assert_raises(ValueError, np.busdaycalendar, weekmask="Mon Tue We")
assert_raises(ValueError, np.busdaycalendar, weekmask="Max")
assert_raises(ValueError, np.busdaycalendar, weekmask="Monday Tue")
def test_datetime_busday_holidays_offset(self):
# With exactly one holiday
assert_equal(
np.busday_offset('2011-11-10', 1, holidays=['2011-11-11']),
np.datetime64('2011-11-14'))
assert_equal(
np.busday_offset('2011-11-04', 5, holidays=['2011-11-11']),
np.datetime64('2011-11-14'))
assert_equal(
np.busday_offset('2011-11-10', 5, holidays=['2011-11-11']),
np.datetime64('2011-11-18'))
assert_equal(
np.busday_offset('2011-11-14', -1, holidays=['2011-11-11']),
np.datetime64('2011-11-10'))
assert_equal(
np.busday_offset('2011-11-18', -5, holidays=['2011-11-11']),
np.datetime64('2011-11-10'))
assert_equal(
np.busday_offset('2011-11-14', -5, holidays=['2011-11-11']),
np.datetime64('2011-11-04'))
# With the holiday appearing twice
assert_equal(
np.busday_offset('2011-11-10', 1,
holidays=['2011-11-11', '2011-11-11']),
np.datetime64('2011-11-14'))
assert_equal(
np.busday_offset('2011-11-14', -1,
holidays=['2011-11-11', '2011-11-11']),
np.datetime64('2011-11-10'))
# With a NaT holiday
assert_equal(
np.busday_offset('2011-11-10', 1,
holidays=['2011-11-11', 'NaT']),
np.datetime64('2011-11-14'))
assert_equal(
np.busday_offset('2011-11-14', -1,
holidays=['NaT', '2011-11-11']),
np.datetime64('2011-11-10'))
# With another holiday after
assert_equal(
np.busday_offset('2011-11-10', 1,
holidays=['2011-11-11', '2011-11-24']),
np.datetime64('2011-11-14'))
assert_equal(
np.busday_offset('2011-11-14', -1,
holidays=['2011-11-11', '2011-11-24']),
np.datetime64('2011-11-10'))
# With another holiday before
assert_equal(
np.busday_offset('2011-11-10', 1,
holidays=['2011-10-10', '2011-11-11']),
np.datetime64('2011-11-14'))
assert_equal(
np.busday_offset('2011-11-14', -1,
holidays=['2011-10-10', '2011-11-11']),
np.datetime64('2011-11-10'))
# With another holiday before and after
assert_equal(
np.busday_offset('2011-11-10', 1,
holidays=['2011-10-10', '2011-11-11', '2011-11-24']),
np.datetime64('2011-11-14'))
assert_equal(
np.busday_offset('2011-11-14', -1,
holidays=['2011-10-10', '2011-11-11', '2011-11-24']),
np.datetime64('2011-11-10'))
# A bigger forward jump across more than one week/holiday
holidays = ['2011-10-10', '2011-11-11', '2011-11-24',
'2011-12-25', '2011-05-30', '2011-02-21',
'2011-12-26', '2012-01-02']
bdd = np.busdaycalendar(weekmask='1111100', holidays=holidays)
assert_equal(
np.busday_offset('2011-10-03', 4, holidays=holidays),
np.busday_offset('2011-10-03', 4))
assert_equal(
np.busday_offset('2011-10-03', 5, holidays=holidays),
np.busday_offset('2011-10-03', 5 + 1))
assert_equal(
np.busday_offset('2011-10-03', 27, holidays=holidays),
np.busday_offset('2011-10-03', 27 + 1))
assert_equal(
np.busday_offset('2011-10-03', 28, holidays=holidays),
np.busday_offset('2011-10-03', 28 + 2))
assert_equal(
np.busday_offset('2011-10-03', 35, holidays=holidays),
np.busday_offset('2011-10-03', 35 + 2))
assert_equal(
np.busday_offset('2011-10-03', 36, holidays=holidays),
np.busday_offset('2011-10-03', 36 + 3))
assert_equal(
np.busday_offset('2011-10-03', 56, holidays=holidays),
np.busday_offset('2011-10-03', 56 + 3))
assert_equal(
np.busday_offset('2011-10-03', 57, holidays=holidays),
np.busday_offset('2011-10-03', 57 + 4))
assert_equal(
np.busday_offset('2011-10-03', 60, holidays=holidays),
np.busday_offset('2011-10-03', 60 + 4))
assert_equal(
np.busday_offset('2011-10-03', 61, holidays=holidays),
np.busday_offset('2011-10-03', 61 + 5))
assert_equal(
np.busday_offset('2011-10-03', 61, busdaycal=bdd),
np.busday_offset('2011-10-03', 61 + 5))
# A bigger backward jump across more than one week/holiday
assert_equal(
np.busday_offset('2012-01-03', -1, holidays=holidays),
np.busday_offset('2012-01-03', -1 - 1))
assert_equal(
np.busday_offset('2012-01-03', -4, holidays=holidays),
np.busday_offset('2012-01-03', -4 - 1))
assert_equal(
np.busday_offset('2012-01-03', -5, holidays=holidays),
np.busday_offset('2012-01-03', -5 - 2))
assert_equal(
np.busday_offset('2012-01-03', -25, holidays=holidays),
np.busday_offset('2012-01-03', -25 - 2))
assert_equal(
np.busday_offset('2012-01-03', -26, holidays=holidays),
np.busday_offset('2012-01-03', -26 - 3))
assert_equal(
np.busday_offset('2012-01-03', -33, holidays=holidays),
np.busday_offset('2012-01-03', -33 - 3))
assert_equal(
np.busday_offset('2012-01-03', -34, holidays=holidays),
np.busday_offset('2012-01-03', -34 - 4))
assert_equal(
np.busday_offset('2012-01-03', -56, holidays=holidays),
np.busday_offset('2012-01-03', -56 - 4))
assert_equal(
np.busday_offset('2012-01-03', -57, holidays=holidays),
np.busday_offset('2012-01-03', -57 - 5))
assert_equal(
np.busday_offset('2012-01-03', -57, busdaycal=bdd),
np.busday_offset('2012-01-03', -57 - 5))
# Can't supply both a weekmask/holidays and busdaycal
assert_raises(ValueError, np.busday_offset, '2012-01-03', -15,
weekmask='1111100', busdaycal=bdd)
assert_raises(ValueError, np.busday_offset, '2012-01-03', -15,
holidays=holidays, busdaycal=bdd)
# Roll with the holidays
assert_equal(
np.busday_offset('2011-12-25', 0,
roll='forward', holidays=holidays),
np.datetime64('2011-12-27'))
assert_equal(
np.busday_offset('2011-12-26', 0,
roll='forward', holidays=holidays),
np.datetime64('2011-12-27'))
assert_equal(
np.busday_offset('2011-12-26', 0,
roll='backward', holidays=holidays),
np.datetime64('2011-12-23'))
assert_equal(
np.busday_offset('2012-02-27', 0,
roll='modifiedfollowing',
holidays=['2012-02-27', '2012-02-26', '2012-02-28',
'2012-03-01', '2012-02-29']),
np.datetime64('2012-02-24'))
assert_equal(
np.busday_offset('2012-03-06', 0,
roll='modifiedpreceding',
holidays=['2012-03-02', '2012-03-03', '2012-03-01',
'2012-03-05', '2012-03-07', '2012-03-06']),
np.datetime64('2012-03-08'))
def test_datetime_busday_holidays_count(self):
holidays = ['2011-01-01', '2011-10-10', '2011-11-11', '2011-11-24',
'2011-12-25', '2011-05-30', '2011-02-21', '2011-01-17',
'2011-12-26', '2012-01-02', '2011-02-21', '2011-05-30',
'2011-07-01', '2011-07-04', '2011-09-05', '2011-10-10']
bdd = np.busdaycalendar(weekmask='1111100', holidays=holidays)
# Validate against busday_offset broadcast against
# a range of offsets
dates = np.busday_offset('2011-01-01', np.arange(366),
roll='forward', busdaycal=bdd)
assert_equal(np.busday_count('2011-01-01', dates, busdaycal=bdd),
np.arange(366))
# Returns negative value when reversed
# -1 since the '2011-01-01' is not a busday
assert_equal(np.busday_count(dates, '2011-01-01', busdaycal=bdd),
-np.arange(366) - 1)
# 2011-12-31 is a saturday
dates = np.busday_offset('2011-12-31', -np.arange(366),
roll='forward', busdaycal=bdd)
# only the first generated date is in the future of 2011-12-31
expected = np.arange(366)
expected[0] = -1
assert_equal(np.busday_count(dates, '2011-12-31', busdaycal=bdd),
expected)
# Returns negative value when reversed
expected = -np.arange(366) + 1
expected[0] = 0
assert_equal(np.busday_count('2011-12-31', dates, busdaycal=bdd),
expected)
# Can't supply both a weekmask/holidays and busdaycal
assert_raises(ValueError, np.busday_offset, '2012-01-03', '2012-02-03',
weekmask='1111100', busdaycal=bdd)
assert_raises(ValueError, np.busday_offset, '2012-01-03', '2012-02-03',
holidays=holidays, busdaycal=bdd)
# Number of Mondays in March 2011
assert_equal(np.busday_count('2011-03', '2011-04', weekmask='Mon'), 4)
# Returns negative value when reversed
assert_equal(np.busday_count('2011-04', '2011-03', weekmask='Mon'), -4)
sunday = np.datetime64('2023-03-05')
monday = sunday + 1
friday = sunday + 5
saturday = sunday + 6
assert_equal(np.busday_count(sunday, monday), 0)
assert_equal(np.busday_count(monday, sunday), -1)
assert_equal(np.busday_count(friday, saturday), 1)
assert_equal(np.busday_count(saturday, friday), 0)
def test_datetime_is_busday(self):
holidays = ['2011-01-01', '2011-10-10', '2011-11-11', '2011-11-24',
'2011-12-25', '2011-05-30', '2011-02-21', '2011-01-17',
'2011-12-26', '2012-01-02', '2011-02-21', '2011-05-30',
'2011-07-01', '2011-07-04', '2011-09-05', '2011-10-10',
'NaT']
bdd = np.busdaycalendar(weekmask='1111100', holidays=holidays)
# Weekend/weekday tests
assert_equal(np.is_busday('2011-01-01'), False)
assert_equal(np.is_busday('2011-01-02'), False)
assert_equal(np.is_busday('2011-01-03'), True)
# All the holidays are not business days
assert_equal(np.is_busday(holidays, busdaycal=bdd),
np.zeros(len(holidays), dtype='?'))
def test_datetime_y2038(self):
msg = "no explicit representation of timezones available for " \
"np.datetime64"
# Test parsing on either side of the Y2038 boundary
a = np.datetime64('2038-01-19T03:14:07')
assert_equal(a.view(np.int64), 2**31 - 1)
a = np.datetime64('2038-01-19T03:14:08')
assert_equal(a.view(np.int64), 2**31)
# Test parsing on either side of the Y2038 boundary with
# a manually specified timezone offset
with pytest.warns(UserWarning, match=msg):
a = np.datetime64('2038-01-19T04:14:07+0100')
assert_equal(a.view(np.int64), 2**31 - 1)
with pytest.warns(UserWarning, match=msg):
a = np.datetime64('2038-01-19T04:14:08+0100')
assert_equal(a.view(np.int64), 2**31)
# Test parsing a date after Y2038
a = np.datetime64('2038-01-20T13:21:14')
assert_equal(str(a), '2038-01-20T13:21:14')
def test_isnat(self):
assert_(np.isnat(np.datetime64('NaT', 'ms')))
assert_(np.isnat(np.datetime64('NaT', 'ns')))
assert_(not np.isnat(np.datetime64('2038-01-19T03:14:07')))
assert_(np.isnat(np.timedelta64('NaT', "ms")))
assert_(not np.isnat(np.timedelta64(34, "ms")))
res = np.array([False, False, True])
for unit in ['Y', 'M', 'W', 'D',
'h', 'm', 's', 'ms', 'us',
'ns', 'ps', 'fs', 'as']:
arr = np.array([123, -321, "NaT"], dtype=f'<datetime64[{unit}]')
assert_equal(np.isnat(arr), res)
arr = np.array([123, -321, "NaT"], dtype=f'>datetime64[{unit}]')
assert_equal(np.isnat(arr), res)
arr = np.array([123, -321, "NaT"], dtype=f'<timedelta64[{unit}]')
assert_equal(np.isnat(arr), res)
arr = np.array([123, -321, "NaT"], dtype=f'>timedelta64[{unit}]')
assert_equal(np.isnat(arr), res)
def test_isnat_error(self):
# Test that only datetime dtype arrays are accepted
for t in np.typecodes["All"]:
if t in np.typecodes["Datetime"]:
continue
assert_raises(TypeError, np.isnat, np.zeros(10, t))
def test_isfinite_scalar(self):
assert_(not np.isfinite(np.datetime64('NaT', 'ms')))
assert_(not np.isfinite(np.datetime64('NaT', 'ns')))
assert_(np.isfinite(np.datetime64('2038-01-19T03:14:07')))
assert_(not np.isfinite(np.timedelta64('NaT', "ms")))
assert_(np.isfinite(np.timedelta64(34, "ms")))
@pytest.mark.parametrize('unit', ['Y', 'M', 'W', 'D', 'h', 'm', 's', 'ms',
'us', 'ns', 'ps', 'fs', 'as'])
@pytest.mark.parametrize('dstr', ['<datetime64[%s]', '>datetime64[%s]',
'<timedelta64[%s]', '>timedelta64[%s]'])
def test_isfinite_isinf_isnan_units(self, unit, dstr):
'''check isfinite, isinf, isnan for all units of <M, >M, <m, >m dtypes
'''
arr_val = [123, -321, "NaT"]
arr = np.array(arr_val, dtype=(dstr % unit))
pos = np.array([True, True, False])
neg = np.array([False, False, True])
false = np.array([False, False, False])
assert_equal(np.isfinite(arr), pos)
assert_equal(np.isinf(arr), false)
assert_equal(np.isnan(arr), neg)
def test_assert_equal(self):
assert_raises(AssertionError, assert_equal,
np.datetime64('nat'), np.timedelta64('nat'))
def test_corecursive_input(self):
# construct a co-recursive list
a, b = [], []
a.append(b)
b.append(a)
obj_arr = np.array([None])
obj_arr[0] = a
# At some point this caused a stack overflow (gh-11154). Now raises
# ValueError since the nested list cannot be converted to a datetime.
assert_raises(ValueError, obj_arr.astype, 'M8')
assert_raises(ValueError, obj_arr.astype, 'm8')
@pytest.mark.parametrize("shape", [(), (1,)])
def test_discovery_from_object_array(self, shape):
arr = np.array("2020-10-10", dtype=object).reshape(shape)
res = np.array("2020-10-10", dtype="M8").reshape(shape)
assert res.dtype == np.dtype("M8[D]")
assert_equal(arr.astype("M8"), res)
arr[...] = np.bytes_("2020-10-10") # try a numpy string type
assert_equal(arr.astype("M8"), res)
arr = arr.astype("S")
assert_equal(arr.astype("S").astype("M8"), res)
@pytest.mark.parametrize("time_unit", [
"Y", "M", "W", "D", "h", "m", "s", "ms", "us", "ns", "ps", "fs", "as",
# compound units
"10D", "2M",
])
def test_limit_symmetry(self, time_unit):
"""
Dates should have symmetric limits around the unix epoch at +/-np.int64
"""
epoch = np.datetime64(0, time_unit)
latest = np.datetime64(np.iinfo(np.int64).max, time_unit)
earliest = np.datetime64(-np.iinfo(np.int64).max, time_unit)
# above should not have overflowed
assert earliest < epoch < latest
@pytest.mark.parametrize("time_unit", [
"Y", "M",
pytest.param("W", marks=pytest.mark.xfail(reason="gh-13197")),
"D", "h", "m",
"s", "ms", "us", "ns", "ps", "fs", "as",
pytest.param("10D", marks=pytest.mark.xfail(reason="similar to gh-13197")),
])
@pytest.mark.parametrize("sign", [-1, 1])
def test_limit_str_roundtrip(self, time_unit, sign):
"""
Limits should roundtrip when converted to strings.
This tests the conversion to and from npy_datetimestruct.
"""
# TODO: add absolute (gold standard) time span limit strings
limit = np.datetime64(np.iinfo(np.int64).max * sign, time_unit)
# Convert to string and back. Explicit unit needed since the day and
# week reprs are not distinguishable.
limit_via_str = np.datetime64(str(limit), time_unit)
assert limit_via_str == limit
def test_datetime_hash_nat(self):
nat1 = np.datetime64()
nat2 = np.datetime64()
assert nat1 is not nat2
assert nat1 != nat2
assert hash(nat1) != hash(nat2)
@pytest.mark.parametrize('unit', ('Y', 'M', 'W', 'D', 'h', 'm', 's', 'ms', 'us'))
def test_datetime_hash_weeks(self, unit):
dt = np.datetime64(2348, 'W') # 2015-01-01
dt2 = np.datetime64(dt, unit)
_assert_equal_hash(dt, dt2)
dt3 = np.datetime64(int(dt2.astype(int)) + 1, unit)
assert hash(dt) != hash(dt3) # doesn't collide
@pytest.mark.parametrize('unit', ('h', 'm', 's', 'ms', 'us'))
def test_datetime_hash_weeks_vs_pydatetime(self, unit):
dt = np.datetime64(2348, 'W') # 2015-01-01
dt2 = np.datetime64(dt, unit)
pydt = dt2.astype(datetime.datetime)
assert isinstance(pydt, datetime.datetime)
_assert_equal_hash(pydt, dt2)
@pytest.mark.parametrize('unit', ('Y', 'M', 'W', 'D', 'h', 'm', 's', 'ms', 'us'))
def test_datetime_hash_big_negative(self, unit):
dt = np.datetime64(-102894, 'W') # -002-01-01
dt2 = np.datetime64(dt, unit)
_assert_equal_hash(dt, dt2)
# can only go down to "fs" before integer overflow
@pytest.mark.parametrize('unit', ('m', 's', 'ms', 'us', 'ns', 'ps', 'fs'))
def test_datetime_hash_minutes(self, unit):
dt = np.datetime64(3, 'm')
dt2 = np.datetime64(dt, unit)
_assert_equal_hash(dt, dt2)
@pytest.mark.parametrize('unit', ('ns', 'ps', 'fs', 'as'))
def test_datetime_hash_ns(self, unit):
dt = np.datetime64(3, 'ns')
dt2 = np.datetime64(dt, unit)
_assert_equal_hash(dt, dt2)
dt3 = np.datetime64(int(dt2.astype(int)) + 1, unit)
assert hash(dt) != hash(dt3) # doesn't collide
@pytest.mark.parametrize('wk', range(500000, 500010)) # 11552-09-04
@pytest.mark.parametrize('unit', ('W', 'D', 'h', 'm', 's', 'ms', 'us'))
def test_datetime_hash_big_positive(self, wk, unit):
dt = np.datetime64(wk, 'W')
dt2 = np.datetime64(dt, unit)
_assert_equal_hash(dt, dt2)
def test_timedelta_hash_generic(self):
assert_raises(ValueError, hash, np.timedelta64(123)) # generic
@pytest.mark.parametrize('unit', ('Y', 'M'))
def test_timedelta_hash_year_month(self, unit):
td = np.timedelta64(45, 'Y')
td2 = np.timedelta64(td, unit)
_assert_equal_hash(td, td2)
@pytest.mark.parametrize('unit', ('W', 'D', 'h', 'm', 's', 'ms', 'us'))
def test_timedelta_hash_weeks(self, unit):
td = np.timedelta64(10, 'W')
td2 = np.timedelta64(td, unit)
_assert_equal_hash(td, td2)
td3 = np.timedelta64(int(td2.astype(int)) + 1, unit)
assert hash(td) != hash(td3) # doesn't collide
@pytest.mark.parametrize('unit', ('W', 'D', 'h', 'm', 's', 'ms', 'us'))
def test_timedelta_hash_weeks_vs_pydelta(self, unit):
td = np.timedelta64(10, 'W')
td2 = np.timedelta64(td, unit)
pytd = td2.astype(datetime.timedelta)
assert isinstance(pytd, datetime.timedelta)
_assert_equal_hash(pytd, td2)
@pytest.mark.parametrize('unit', ('ms', 'us', 'ns', 'ps', 'fs', 'as'))
def test_timedelta_hash_ms(self, unit):
td = np.timedelta64(3, 'ms')
td2 = np.timedelta64(td, unit)
_assert_equal_hash(td, td2)
td3 = np.timedelta64(int(td2.astype(int)) + 1, unit)
assert hash(td) != hash(td3) # doesn't collide
@pytest.mark.parametrize('wk', range(500000, 500010))
@pytest.mark.parametrize('unit', ('W', 'D', 'h', 'm', 's', 'ms', 'us'))
def test_timedelta_hash_big_positive(self, wk, unit):
td = np.timedelta64(wk, 'W')
td2 = np.timedelta64(td, unit)
_assert_equal_hash(td, td2)
@pytest.mark.parametrize(
"inputs, divisor, expected",
[
(
np.array(
[datetime.timedelta(seconds=20), datetime.timedelta(days=2)],
dtype="object",
),
np.int64(2),
np.array(
[datetime.timedelta(seconds=10), datetime.timedelta(days=1)],
dtype="object",
),
),
(
np.array(
[datetime.timedelta(seconds=20), datetime.timedelta(days=2)],
dtype="object",
),
np.timedelta64(2, "s"),
np.array(
[10.0, 24.0 * 60.0 * 60.0],
dtype="object",
),
),
(
datetime.timedelta(seconds=2),
np.array(
[datetime.timedelta(seconds=20), datetime.timedelta(days=2)],
dtype="object",
),
np.array(
[1.0 / 10.0, 1.0 / (24.0 * 60.0 * 60.0)],
dtype="object",
),
),
],
)
def test_true_divide_object_by_timedelta(
self,
inputs: np.ndarray | type[np.generic],
divisor: np.ndarray | type[np.generic],
expected: np.ndarray,
):
# gh-30025
results = inputs / divisor
assert_array_equal(results, expected)
|
TestDateTime
|
python
|
networkx__networkx
|
networkx/classes/reportviews.py
|
{
"start": 44310,
"end": 44809
}
|
class ____(OutMultiEdgeView):
"""A EdgeView class for edges of a MultiGraph"""
__slots__ = ()
dataview = MultiEdgeDataView
def __len__(self):
return sum(1 for e in self)
def __iter__(self):
seen = {}
for n, nbrs in self._nodes_nbrs():
for nbr, kd in nbrs.items():
if nbr not in seen:
for k, dd in kd.items():
yield (n, nbr, k)
seen[n] = 1
del seen
|
MultiEdgeView
|
python
|
dask__dask
|
dask/dataframe/dask_expr/_expr.py
|
{
"start": 75416,
"end": 78437
}
|
class ____(Elemwise):
"""Reset the index of a Series or DataFrame"""
_parameters = ["frame", "drop", "name"]
_defaults = {"drop": False, "name": no_default}
_keyword_only = ["drop", "name"]
operation = M.reset_index
_filter_passthrough = True
_preserves_partitioning_information = True
@functools.cached_property
def _kwargs(self) -> dict:
kwargs = {"drop": self.drop}
if self.operand("name") is not no_default:
kwargs.update({"name": self.operand("name")})
return kwargs
def _divisions(self):
return (None,) * (self.frame.npartitions + 1)
def _simplify_up(self, parent, dependents):
if isinstance(parent, Filter) and self._filter_passthrough_available(
parent, dependents
):
parents = [
p().columns
for p in dependents[self._name]
if p() is not None and not isinstance(p(), Filter)
]
predicate = None
if not set(flatten(parents, list)).issubset(set(self.frame.columns)):
# one of the filters is the Index
name = self.operand("name") or self.frame._meta.index.name
if name is no_default and self.frame._meta.index.name is None:
name = "index"
elif self.frame._meta.index.name is not None:
name = self.frame._meta.index.name
# replace the projection of the former index with the actual index
subs = Projection(self, name)
predicate = parent.predicate.substitute(subs, Index(self.frame))
elif self.frame.ndim == 1 and not self.operand("drop"):
name = self.frame._meta.name
# Avoid Projection since we are already a Series
subs = Projection(self, name)
predicate = parent.predicate.substitute(subs, self.frame)
return self._filter_simplification(parent, predicate)
if isinstance(parent, Projection):
if self.frame.ndim == 1 and not self.drop:
if isinstance(parent.operand("columns"), list):
# Don't bother, dimensionality changes are tricky here and
# potential improvement is tiny
return
col = parent.operand("columns")
if col in (self.name, "index", self.frame._meta.index.name):
return
if all(
isinstance(d(), Projection) and d().operand("columns") == col
for d in dependents[self._name]
):
return type(self)(self.frame, True, self.name)
return
result = plain_column_projection(self, parent, dependents)
if result is not None and set(result.columns) != set(result.frame.columns):
result = result.substitute_parameters({"drop": True})
return result
|
ResetIndex
|
python
|
apache__airflow
|
providers/google/tests/unit/google/common/test_deprecated.py
|
{
"start": 1110,
"end": 10201
}
|
class ____:
@mock.patch(f"{ADAPTER_CLASS_PATH}._validate_fields")
@mock.patch(f"{ADAPTER_CLASS_PATH}._validate_removal_release")
@mock.patch(f"{ADAPTER_CLASS_PATH}._validate_date")
def test_init(self, mock_validate_date, mock_validate_removal_release, mock_validate_fields):
mock_date = mock_validate_date.return_value
mock_release = mock_validate_removal_release.return_value
given_date = "August 22, 2024"
given_release = None
adapter = AirflowDeprecationAdapter(planned_removal_date=given_date)
mock_validate_date.assert_called_once_with(given_date)
mock_validate_removal_release.assert_called_once_with(given_release)
mock_validate_fields.assert_called_once_with()
assert adapter.planned_removal_date == mock_date
assert adapter.planned_removal_release == mock_release
@mock.patch(f"{ADAPTER_PATH}.datetime")
def test_validate_date(self, mock_datetime):
value = "August 22, 2024"
expected_date = date(2024, 8, 22)
mock_datetime.strptime.return_value.date.return_value = expected_date
actual_date = AirflowDeprecationAdapter._validate_date(value)
assert actual_date == expected_date
mock_datetime.strptime.assert_called_once_with(value, "%B %d, %Y")
@mock.patch(f"{ADAPTER_PATH}.datetime")
def test_validate_date_none(self, mock_datetime):
value = None
actual_date = AirflowDeprecationAdapter._validate_date(value)
assert actual_date is None
assert not mock_datetime.strptime.called
@pytest.mark.parametrize(
"invalid_date",
[
"August 55, 2024",
"NotAugust 22, 2024",
"2024-08-22",
"Not a date at all",
],
)
def test_validate_date_error(self, invalid_date):
expected_error_message = (
f"Invalid date '{invalid_date}'. "
f"The expected format is 'Month DD, YYYY', for example 'August 22, 2024'."
)
with pytest.raises(ValueError, match=expected_error_message):
AirflowDeprecationAdapter(planned_removal_date=invalid_date)
@pytest.mark.parametrize(
"release_string",
[
"apache-airflow==1.2.3",
"apache-airflow-providers-test==1.2.3",
],
)
def test_validate_removal_release(self, release_string):
assert AirflowDeprecationAdapter._validate_removal_release(release_string) == release_string
def test_validate_removal_release_none(self):
assert AirflowDeprecationAdapter._validate_removal_release(None) is None
@pytest.mark.parametrize(
"release_string",
[
"invalid-release-string",
"apache-airflow==2",
"apache-airflow-providers-test==2",
],
)
def test_validate_removal_version_error(self, release_string):
msg = (
f"\\`{release_string}\\` must follow the format "
f"\\'apache-airflow\\(-providers-<name>\\)==<X\\.Y\\.Z>\\'."
)
with pytest.raises(ValueError, match=msg):
AirflowDeprecationAdapter._validate_removal_release(release_string)
@pytest.mark.parametrize(
"kwargs",
[
{"planned_removal_date": "August 22, 2024"},
{"planned_removal_release": "apache-airflow-providers-test==1.2.3"},
],
)
def test_validate_fields(self, kwargs):
adapter = AirflowDeprecationAdapter(**kwargs)
assert adapter is not None
def test_validate_fields_error_both_removal_date_and_release(self):
error_message = (
"Only one of two parameters must be set: `planned_removal_date` or 'planned_removal_release'. "
"You specified both."
)
with pytest.raises(ValueError, match=error_message):
AirflowDeprecationAdapter(
planned_removal_date="August 22, 2024",
planned_removal_release="apache-airflow==1.2.3",
)
@pytest.mark.parametrize(
("entity", "expected_type"),
[
(AirflowDeprecationAdapter, "class"),
(AirflowDeprecationAdapter.get_deprecated_msg, "function (or method)"),
],
)
def test_entity_type(self, entity, expected_type):
assert AirflowDeprecationAdapter.entity_type(entity) == expected_type
@pytest.mark.parametrize(
("module_path", "qualified_name", "_str", "expected_path"),
[
("test-module", "test-qualified-name", "test-str", "test-module.test-qualified-name"),
("test-module", "", "test-str", "test-module"),
("", "test-qualified-name", "test-str", "test-str"),
("", "", "test-str", "test-str"),
],
)
def test_entity_path(self, module_path, qualified_name, _str, expected_path):
mock_entity = mock.MagicMock(
__module__=module_path,
__qualname__=qualified_name,
__str__=mock.MagicMock(return_value=_str),
)
assert AirflowDeprecationAdapter.entity_path(mock_entity) == expected_path
@pytest.mark.parametrize(
("planned_removal_date", "planned_removal_release", "expected_message"),
[
("August 22, 2024", None, "after August 22, 2024"),
(None, "apache-airflow==1.2.3", "since version apache-airflow==1.2.3"),
(
None,
"apache-airflow-providers-test==1.2.3",
"since version apache-airflow-providers-test==1.2.3",
),
(None, None, "in the future"),
],
)
def test_sunset_message(self, planned_removal_date, planned_removal_release, expected_message):
adapter = AirflowDeprecationAdapter(
planned_removal_date=planned_removal_date,
planned_removal_release=planned_removal_release,
)
assert adapter.sunset_message() == expected_message
@pytest.mark.parametrize(
("use_instead", "expected_message"),
[
(None, "There is no replacement."),
("replacement", "Please use `replacement` instead."),
("r1, r2", "Please use `r1`, `r2` instead."),
],
)
def test_replacement_message(self, use_instead, expected_message):
adapter = AirflowDeprecationAdapter(use_instead=use_instead)
assert adapter.replacement_message() == expected_message
@pytest.mark.parametrize(
("reason", "instructions"),
[
("Test reason", "Test instructions"),
("Test reason", None),
(None, "Test instructions"),
(None, None),
],
)
@mock.patch(f"{ADAPTER_CLASS_PATH}.entity_type")
@mock.patch(f"{ADAPTER_CLASS_PATH}.entity_path")
@mock.patch(f"{ADAPTER_CLASS_PATH}.sunset_message")
@mock.patch(f"{ADAPTER_CLASS_PATH}.replacement_message")
def get_deprecated_msg(
self,
mock_replacement_message,
mock_sunset_message,
mock_entity_path,
mock_entity_type,
reason,
instructions,
):
replacement = mock_replacement_message.return_value
sunset = mock_sunset_message.return_value
entity_path = mock_entity_path.return_value
entity_type = mock_entity_type.return_value
expected_message = (
f"The {entity_type} `{entity_path}` is deprecated and will be removed {sunset}. {replacement}"
)
if reason:
expected_message += f" The reason is: {reason}"
if instructions:
expected_message += f" Instructions: {instructions}"
mock_wrapped = mock.MagicMock()
adapter = AirflowDeprecationAdapter(reason=mock_wrapped, instructions=instructions)
assert adapter.get_deprecated_msg(mock.MagicMock(), mock.MagicMock()) == expected_message
mock_entity_type.assert_called_once_with(entity=mock_wrapped)
mock_entity_path.assert_called_once_with(entity=mock_wrapped)
mock_sunset_message.assert_called_once_with()
mock_replacement_message.assert_called_once_with()
@mock.patch(f"{ADAPTER_PATH}.standard_deprecated")
def test_deprecated(mock_standard_deprecated):
mock_planned_removal_date = mock.MagicMock()
mock_planned_removal_release = mock.MagicMock()
mock_use_instead = mock.MagicMock()
mock_reason = mock.MagicMock()
mock_instructions = mock.MagicMock()
mock_adapter_cls = mock.MagicMock()
kwargs = {
"planned_removal_date": mock_planned_removal_date,
"planned_removal_release": mock_planned_removal_release,
"use_instead": mock_use_instead,
"reason": mock_reason,
"instructions": mock_instructions,
"adapter_cls": mock_adapter_cls,
}
extra_kwargs = {
"test1": "test1",
"test2": "test2",
}
extra_args = ["test3", "test4"]
deprecated(*extra_args, **{**kwargs, **extra_kwargs})
mock_standard_deprecated.assert_called_once_with(*extra_args, **{**kwargs, **extra_kwargs})
|
TestAirflowDeprecationAdapter
|
python
|
facebookresearch__faiss
|
benchs/bench_all_ivf/datasets_oss.py
|
{
"start": 678,
"end": 3655
}
|
class ____(faiss_datasets.Dataset):
def __init__(self, ds, indexfile):
self.d = ds.d
self.metric = ds.metric
self.nq = ds.nq
self.xq = ds.get_queries()
# get the xb set
src_index = faiss.read_index(indexfile)
src_quant = faiss.downcast_index(src_index.quantizer)
centroids = faiss.vector_to_array(src_quant.xb)
self.xb = centroids.reshape(-1, self.d)
self.nb = self.nt = len(self.xb)
def get_queries(self):
return self.xq
def get_database(self):
return self.xb
def get_train(self, maxtrain=None):
return self.xb
def get_groundtruth(self, k=100):
return faiss.knn(
self.xq, self.xb, k,
faiss.METRIC_L2 if self.metric == 'L2' else faiss.METRIC_INNER_PRODUCT
)[1]
def load_dataset(dataset='deep1M', compute_gt=False, download=False):
print("load data", dataset)
if dataset == 'sift1M':
return faiss_datasets.DatasetSIFT1M()
elif dataset.startswith('bigann'):
dbsize = 1000 if dataset == "bigann1B" else int(dataset[6:-1])
return faiss_datasets.DatasetBigANN(nb_M=dbsize)
elif dataset.startswith("deep_centroids_"):
ncent = int(dataset[len("deep_centroids_"):])
centdir = "/checkpoint/matthijs/bench_all_ivf/precomputed_clusters"
return DatasetCentroids(
faiss_datasets.DatasetDeep1B(nb=1000000),
f"{centdir}/clustering.dbdeep1M.IVF{ncent}.faissindex"
)
elif dataset.startswith("deep"):
szsuf = dataset[4:]
if szsuf[-1] == 'M':
dbsize = 10 ** 6 * int(szsuf[:-1])
elif szsuf == '1B':
dbsize = 10 ** 9
elif szsuf[-1] == 'k':
dbsize = 1000 * int(szsuf[:-1])
else:
assert False, "did not recognize suffix " + szsuf
return faiss_datasets.DatasetDeep1B(nb=dbsize)
elif dataset == "music-100":
return faiss_datasets.DatasetMusic100()
elif dataset == "glove":
return faiss_datasets.DatasetGlove(download=download)
else:
assert False
#################################################################
# Evaluation
#################################################################
def evaluate_DI(D, I, gt):
nq = gt.shape[0]
k = I.shape[1]
rank = 1
while rank <= k:
recall = (I[:, :rank] == gt[:, :1]).sum() / float(nq)
print("R@%d: %.4f" % (rank, recall), end=' ')
rank *= 10
def evaluate(xq, gt, index, k=100, endl=True):
t0 = time.time()
D, I = index.search(xq, k)
t1 = time.time()
nq = xq.shape[0]
print("\t %8.4f ms per query, " % (
(t1 - t0) * 1000.0 / nq), end=' ')
rank = 1
while rank <= k:
recall = (I[:, :rank] == gt[:, :1]).sum() / float(nq)
print("R@%d: %.4f" % (rank, recall), end=' ')
rank *= 10
if endl:
print()
return D, I
|
DatasetCentroids
|
python
|
django-guardian__django-guardian
|
guardian/testapp/tests/test_managers.py
|
{
"start": 167,
"end": 1364
}
|
class ____(TestCase):
def test_user_manager_assign(self):
manager = UserObjectPermissionManager()
manager.assign_perm = mock.Mock()
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
manager.assign("perm", "user", "object")
manager.assign_perm.assert_called_once_with("perm", "user", "object")
self.assertTrue(issubclass(w[0].category, DeprecationWarning))
self.assertIn(
"UserObjectPermissionManager method 'assign' is being renamed to 'assign_perm'.", str(w[0].message)
)
def test_group_manager_assign(self):
manager = GroupObjectPermissionManager()
manager.assign_perm = mock.Mock()
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
manager.assign("perm", "group", "object")
manager.assign_perm.assert_called_once_with("perm", "group", "object")
self.assertTrue(issubclass(w[0].category, DeprecationWarning))
self.assertIn(
"UserObjectPermissionManager method 'assign' is being renamed to 'assign_perm'.", str(w[0].message)
)
|
TestManagers
|
python
|
pytorch__pytorch
|
test/inductor/test_cutlass_backend.py
|
{
"start": 4647,
"end": 85723
}
|
class ____(TestCase):
def setUp(self):
if not HAS_CUDA_AND_TRITON:
self.skipTest("CUDA and triton are not available")
if torch.version.hip:
self.skipTest("CUTLASS backend is not supported on HIP")
# The new inductor cache refresh mechanism
# introduced with https://github.com/pytorch/pytorch/pull/122661
# interacts badly with persistent subprocesses during
# autotuning. So we need to disable automatic cache refresh
# before calling setUp() on the parent class.
old_disable_fresh_cache_envvar = os.environ.get(
"INDUCTOR_TEST_DISABLE_FRESH_CACHE", ""
)
try:
os.environ["INDUCTOR_TEST_DISABLE_FRESH_CACHE"] = "1"
super().setUp()
finally:
os.environ["INDUCTOR_TEST_DISABLE_FRESH_CACHE"] = (
old_disable_fresh_cache_envvar
)
torch.random.manual_seed(1234)
def tearDown(self):
super().tearDown()
clear_caches()
def run_evt_test(self, model, op, shape, num_fusions=1):
M, N = shape
a = torch.ones(M, N).cuda().half()
b = torch.ones(N, N).cuda().half().t()
extra_args = gen_args(op, (M, N))
model = model.cuda()
result = torch.compile(model)(a, b, extra_args)
ref_result = model(a, b, extra_args)
self.assertEqual(
torch._dynamo.utils.counters["inductor"]["cuda_epilogue_fusion_counter"],
num_fusions,
)
torch.testing.assert_close(result, ref_result)
def test_check_paths(self):
cutlass_mock_imports_path = os.path.join(
os.path.dirname(torch.__file__),
"_inductor/codegen/cuda/cutlass_lib_extensions/cutlass_mock_imports",
)
cutlass_mock_cuda_path = os.path.join(cutlass_mock_imports_path, "cuda")
cutlass_mock_pydot_path = os.path.join(cutlass_mock_imports_path, "pydot")
cutlass_mock_scipy_path = os.path.join(cutlass_mock_imports_path, "scipy")
self.assertTrue(os.path.exists(cutlass_mock_imports_path))
self.assertTrue(os.path.exists(cutlass_mock_cuda_path))
self.assertTrue(os.path.exists(cutlass_mock_pydot_path))
self.assertTrue(os.path.exists(cutlass_mock_scipy_path))
@unittest.skipIf(not SM90OrLater, "need sm_90")
@mock.patch.dict(os.environ, {"PATH": _get_path_without_sccache()})
def test_max_autotune_cutlass_threshold(self):
"""
Make sure Cutlass GEMM threshold works as intended.
"""
def mm(a, b):
return a @ b
a = torch.randn(100, 10).cuda().half()
b = torch.randn(100, 10).cuda().half().t()
with config.patch(
{
"max_autotune": True,
"max_autotune_gemm_backends": "CUTLASS",
"compile_threads": 4,
"cuda.cutlass_backend_min_gemm_size": 100000,
"cuda.cutlass_max_profiling_configs": 2,
}
):
with mock.patch(
"torch._inductor.kernel.mm.autotune_select_algorithm",
wraps=select_no_algorithm,
) as sa:
with self.assertRaisesRegex(InductorError, r".*NoValidChoicesError.*"):
_ = torch.compile(mm, dynamic=False)(a, b)
args, _ = sa.call_args
_, choices, _, __ = args
self.assertEqual(choices, [])
@mock.patch.dict(os.environ, {"PATH": _get_path_without_sccache()})
def test_import_cutlass(self):
from torch._inductor.codegen.cuda.cutlass_utils import try_import_cutlass
self.assertTrue(try_import_cutlass())
import cutlass_cppgen # type: ignore[import-not-found] # noqa: F401
import cutlass_library # noqa: F401
def test_cutlass_key(self):
from torch._inductor.codegen.cuda.cutlass_utils import try_import_cutlass
self.assertTrue(try_import_cutlass())
from torch._inductor.codecache import cutlass_key
self.assertIsNotNone(cutlass_key())
@unittest.skipIf(not SM90OrLater, "need sm_90")
@mock.patch.dict(os.environ, {"PATH": _get_path_without_sccache()})
def test_cutlass_backend_subproc_mm(self):
"""
Test autotune_in_subproc works for mm.
NOTE: Shape like M, N, K = 100, 100, 10 would get filtered out due to
alignment mismatch.
"""
M, N, K = 4096, 2048, 25728
a = torch.randn(M, K).cuda().half()
b = torch.randn(N, K).cuda().half().t()
with config.patch(
{
"max_autotune": True,
"autotune_in_subproc": True,
"max_autotune_gemm_backends": "CUTLASS",
"compile_threads": 4,
"cuda.cutlass_max_profiling_configs": 4,
}
):
Y_compiled = torch.compile(torch.mm)(a, b)
Y = torch.mm(a, b)
torch.testing.assert_close(Y_compiled, Y)
@unittest.skipIf(not SM90OrLater, "need sm_90")
@mock.patch.dict(os.environ, {"PATH": _get_path_without_sccache()})
@parametrize("dtype", (torch.float16, torch.bfloat16))
def test_cutlass_backend_subproc_addmm(self, dtype):
"""
Test autotune_in_subproc works for addmm.
"""
M, N, K = 4096, 2048, 25728
dtype = torch.float16
a = torch.randn(M, K, dtype=dtype).cuda()
b = torch.randn(N, K, dtype=dtype).cuda().t()
x_shapes = [
(M, N),
(M, 1),
(1, N),
(N,),
]
alpha = 2.0
beta = 0.4
with config.patch(
{
"max_autotune": True,
"autotune_in_subproc": True,
"max_autotune_gemm_backends": "CUTLASS",
"compile_threads": 4,
"cuda.cutlass_max_profiling_configs": 4,
}
):
for x_shape in x_shapes:
torch._dynamo.reset()
clear_caches()
x = torch.randn(x_shape).cuda().to(dtype)
Y_compiled = torch.compile(torch.addmm)(x, a, b, alpha=alpha, beta=beta)
Y = torch.addmm(x, a, b, alpha=alpha, beta=beta)
torch.testing.assert_close(Y_compiled, Y)
@unittest.skipIf(not SM90OrLater, "need sm_90")
@mock.patch.dict(os.environ, {"PATH": _get_path_without_sccache()})
def test_cutlass_backend_subproc_bmm(self):
"""
Test autotune_in_subproc works for bmm.
"""
B, M, N, K = 10, 4096, 2048, 25728
a = torch.randn(B, M, K).cuda().half()
b = torch.randn(B, N, K).cuda().half().permute(0, 2, 1)
with config.patch(
{
"max_autotune": True,
"autotune_in_subproc": True,
"max_autotune_gemm_backends": "CUTLASS",
"compile_threads": 4,
"cuda.cutlass_max_profiling_configs": 4,
}
):
Y_compiled = torch.compile(torch.bmm)(a, b)
Y = torch.bmm(a, b)
torch.testing.assert_close(Y_compiled, Y)
@unittest.skipIf(not SM90OrLater, "need sm_90")
@parametrize("dynamic", (False, True))
@mock.patch.dict(os.environ, {"PATH": _get_path_without_sccache()})
def test_diff_matmul_share_same_kernel(self, dynamic):
max_autotune_gemm_backends = "CUTLASS"
class MyModel(torch.nn.Module):
def __init__(self):
super().__init__()
def forward(self, a, b, c):
ab = a @ b
ac = a @ c
return ab, ac
model = MyModel()
a = torch.randn(128, 16).cuda().half()
b = torch.randn(128, 16).cuda().half().t()
c = torch.randn(512, 16).cuda().half().t()
with config.patch(
{
"max_autotune": True,
"autotune_in_subproc": True,
"max_autotune_gemm_backends": max_autotune_gemm_backends,
"cuda.cutlass_max_profiling_configs": 1,
}
):
from torch._inductor.utils import run_and_get_code
compiled = torch.compile(model, dynamic=dynamic)
expected = model(a, b, c)
actual, codes = run_and_get_code(compiled, a, b, c)
torch.testing.assert_close(actual, expected)
pattern = r"cutlass_[\w]+\.cutlass_[\w]+"
match = re.search(pattern, codes[0])
self.assertTrue(match is not None)
cutlass_kernel = match.group()
FileCheck().check_count(
cutlass_kernel,
2,
).run(codes[0])
@unittest.skipIf(not SM90OrLater, "need sm_90")
@mock.patch.dict(os.environ, {"PATH": _get_path_without_sccache()})
def test_number_mm_precompiles(self):
torch._dynamo.utils.counters.clear()
max_autotune_gemm_backends = "CUTLASS"
class MyModel(torch.nn.Module):
def __init__(self):
super().__init__()
def forward(self, a, b, c):
ab = a @ b
return ab
model = MyModel()
a = torch.randn(128, 16).cuda().half()
b = torch.randn(128, 16).cuda().half().t()
c = torch.randn(512, 16).cuda().half().t()
with config.patch(
{
"max_autotune": True,
"autotune_in_subproc": True,
"max_autotune_gemm_backends": max_autotune_gemm_backends,
"cuda.cutlass_max_profiling_configs": 1,
"cuda.cutlass_max_profiling_swizzle_options": [
1,
2,
4,
], # guarantees > 1 choices
"fx_graph_cache": False,
"fx_graph_remote_cache": False,
"autotune_local_cache": False,
}
):
from torch._inductor.utils import run_and_get_code
compiled = torch.compile(model, dynamic=True)
expected = model(a, b, c)
actual, codes = run_and_get_code(compiled, a, b, c)
torch.testing.assert_close(actual, expected)
self.assertTrue(re.search(r"cutlass_.*.cutlass_.*", codes[0]))
# Verifies expected number of precompilations
self.assertEqual(
torch._dynamo.utils.counters["inductor"][
"select_algorithm_num_precompiles"
],
1,
)
# NOTE: right now tuned_mm doesn't support cutlass 2x, which is used by A100
@unittest.skipIf(not SM90OrLater, "need sm_90")
@parametrize("dynamic", (False, True))
@parametrize("use_aoti", (False, True))
@parametrize("dtype", (torch.float16, torch.bfloat16))
@mock.patch.dict(os.environ, {"PATH": _get_path_without_sccache()})
def test_max_autotune_cutlass_backend_regular_mm(
self,
dynamic: bool,
max_autotune_gemm_backends: str = "CUTLASS",
use_aoti: bool = False,
dtype: torch.dtype = torch.float16,
):
"""
Main test for mm.
"""
# M, N, K
shapes = [
(128, 128, 16),
(1024, 1024, 256),
]
# M, N, K
shapes = shapes if dynamic else shapes[0:1]
class MyModel(torch.nn.Module):
def forward(self, a, b):
return a @ b
model = MyModel().cuda()
inputs = [
(torch.randn(M, K).cuda().to(dtype), torch.randn(K, N).cuda().to(dtype))
for (M, N, K) in shapes
]
dynamic_shapes = (
{
"a": {0: Dim.DYNAMIC, 1: Dim.DYNAMIC},
"b": {0: Dim.DYNAMIC, 1: Dim.DYNAMIC},
}
if dynamic
else None
)
with (
config.patch(
{
"max_autotune": True,
"max_autotune_gemm_backends": max_autotune_gemm_backends,
"cuda.cutlass_max_profiling_configs": 2,
}
),
dynamo_config.patch({"error_on_recompile": dynamic}),
):
expected = [model(*input) for input in inputs]
if use_aoti:
actual = AOTIRunnerUtil.run_multiple(
model, inputs, dynamic_shapes=dynamic_shapes
)
else:
compiled_model = torch.compile(model, dynamic=True)
actual = [compiled_model(*input) for input in inputs]
torch.testing.assert_close(actual, expected)
@unittest.skipIf(not SM90OrLater, "need sm_90")
@parametrize("dynamic", (False, True))
@parametrize("use_aoti", (False, True))
@parametrize("dtype", (torch.float8_e4m3fn,))
@mock.patch.dict(os.environ, {"PATH": _get_path_without_sccache()})
def test_max_autotune_cutlass_backend_fp8_scaled_mm(
self,
dynamic: bool,
max_autotune_gemm_backends: str = "CUTLASS",
use_aoti: bool = False,
dtype: torch.dtype = torch.float16,
):
"""
Main test for mm.
"""
# M, N, K
shapes = [
(128, 128, 16),
(1024, 1024, 256),
]
# M, N, K
shapes = shapes if dynamic else shapes[0:1]
inputs = []
for shape in shapes:
M, N, K = shape
output_dtype = torch.bfloat16
device = "cuda"
x = torch.randn(M, K, dtype=output_dtype, device=device)
w = torch.randn(N, K, dtype=output_dtype, device=device)
# quantize weight (prior to inference)
w_fp8, w_inverse_scale = _quantize_rowwise(w, dtype)
w_t_fp8 = w_fp8.t()
w_inverse_scale = w_inverse_scale.t() # scale_b should be (1, N)
# quantize input x
x_fp8, x_inverse_scale = _quantize_rowwise(x, dtype)
inputs.append((x_fp8, x_inverse_scale, w_t_fp8, w_inverse_scale))
class MyModel(torch.nn.Module):
def forward(self, x_fp8, x_inverse_scale, w_t_fp8, w_inverse_scale):
y = torch._scaled_mm(
x_fp8,
w_t_fp8,
x_inverse_scale,
w_inverse_scale,
None,
out_dtype=torch.bfloat16,
use_fast_accum=False,
)
return y
dynamic_shapes = (
{
"x_fp8": {0: Dim.DYNAMIC, 1: Dim.DYNAMIC},
"x_inverse_scale": {0: Dim.DYNAMIC, 1: 1},
"w_t_fp8": {0: Dim.DYNAMIC, 1: Dim.DYNAMIC},
"w_inverse_scale": {0: 1, 1: Dim.DYNAMIC},
}
if dynamic
else None
)
model = MyModel().cuda()
with (
config.patch(
{
"max_autotune": True,
"max_autotune_gemm_backends": max_autotune_gemm_backends,
"cuda.cutlass_max_profiling_configs": 2,
"benchmark_epilogue_fusion": False, # EVT doesn't support benchmark fusion yet
"cuda.cutlass_tma_only": True,
}
),
dynamo_config.patch({"error_on_recompile": dynamic}),
):
expected = [model(*input) for input in inputs]
if use_aoti:
actual = AOTIRunnerUtil.run_multiple(
model, inputs, dynamic_shapes=dynamic_shapes
)
else:
compiled_model = torch.compile(model, dynamic=True)
actual = [compiled_model(*input) for input in inputs]
torch.testing.assert_close(actual, expected, rtol=1e-2, atol=0.05)
@unittest.skipIf(not SM90OrLater, "need sm_90")
@parametrize("dynamic", (False, True))
@parametrize("use_aoti", (False, True))
@parametrize("dtype", (torch.float16, torch.bfloat16))
@mock.patch.dict(os.environ, {"PATH": _get_path_without_sccache()})
def test_max_autotune_cutlass_backend_addmm(
self,
dynamic: bool,
max_autotune_gemm_backends: str = "CUTLASS",
use_aoti: bool = False,
dtype: torch.dtype = torch.float16,
):
"""
Main test for addmm.
"""
class MyModel(torch.nn.Module):
def forward(self, x, a, b):
return torch.addmm(x, a, b)
model = MyModel().cuda()
# M, N, K
shapes = [
(128, 128, 16),
(512, 512, 128),
]
shapes = shapes[0:1] if not dynamic else shapes
x_shapes = [
lambda M, N: (M, N),
lambda M, N: (M, 1),
lambda M, N: (1, N),
lambda M, N: (N,),
]
for x_shape in x_shapes:
torch._dynamo.reset()
clear_caches()
inputs = [
(
torch.randn(x_shape(M, N)).cuda().to(dtype),
torch.randn(M, K).cuda().to(dtype),
torch.randn(N, K).cuda().to(dtype).t(),
)
for (M, N, K) in shapes
]
dynamic_shapes = (
{
"x": {
i: v
for i, v in enumerate(x_shape(Dim.DYNAMIC, Dim.DYNAMIC))
if v != 1
},
"a": {0: Dim.DYNAMIC, 1: Dim.DYNAMIC},
"b": {0: Dim.DYNAMIC, 1: Dim.DYNAMIC},
}
if dynamic
else None
)
with (
config.patch(
{
"max_autotune": True,
"max_autotune_gemm_backends": max_autotune_gemm_backends,
"cuda.cutlass_max_profiling_configs": 2,
}
),
dynamo_config.patch({"error_on_recompile": dynamic}),
):
expected = [model(*input) for input in inputs]
if use_aoti:
actual = AOTIRunnerUtil.run_multiple(
model, inputs, dynamic_shapes=dynamic_shapes
)
else:
compiled_model = torch.compile(model, dynamic=dynamic)
actual = [compiled_model(*input) for input in inputs]
torch.testing.assert_close(actual, expected)
@unittest.skipIf(not SM90OrLater, "need sm_90")
@parametrize("dynamic", (False, True))
@parametrize("use_aoti", (False, True))
@parametrize("dtype", (torch.float16, torch.bfloat16))
@parametrize("use_expand", (False, True))
@mock.patch.dict(os.environ, {"PATH": _get_path_without_sccache()})
def test_max_autotune_cutlass_backend_bmm(
self,
dynamic: bool,
use_aoti: bool = False,
max_autotune_gemm_backends: str = "CUTLASS",
dtype: torch.dtype = torch.float16,
use_expand: bool = False,
):
"""
Main test for bmm.
"""
class MyModel(torch.nn.Module):
def forward(self, a, b):
return torch.bmm(a, b)
model = MyModel().cuda()
# B, M, N, K
shapes = [
(10, 4096, 2048, 25728),
(20, 2048, 1024, 12864),
]
shapes = shapes[0:1] if not dynamic else shapes
inputs = []
for B, M, N, K in shapes:
if use_expand:
# Create A using unsqueeze and expand
A = torch.randn(M, K).cuda().to(dtype).unsqueeze(0).expand(B, -1, -1)
else:
# Original method
A = torch.randn(B, M, K).cuda().to(dtype)
B_tensor = torch.randn(B, N, K).cuda().to(dtype).permute(0, 2, 1)
inputs.append((A, B_tensor))
dynamic_shapes = (
{
"a": {0: Dim.DYNAMIC, 1: Dim.DYNAMIC, 2: Dim.DYNAMIC},
"b": {0: Dim.DYNAMIC, 1: Dim.DYNAMIC, 2: Dim.DYNAMIC},
}
if dynamic
else None
)
with config.patch(
{
"max_autotune": True,
"max_autotune_gemm_backends": max_autotune_gemm_backends,
"cuda.cutlass_max_profiling_configs": 2,
}
):
expected = [model(*input) for input in inputs]
if use_aoti:
actual = AOTIRunnerUtil.run_multiple(
model, inputs, dynamic_shapes=dynamic_shapes
)
else:
compiled_model = torch.compile(model, dynamic=dynamic)
actual = [compiled_model(*input) for input in inputs]
torch.testing.assert_close(actual, expected)
@unittest.skipIf(not SM90OrLater, "need sm_90")
@mock.patch.dict(os.environ, {"PATH": _get_path_without_sccache()})
def test_max_autotune_cutlass_backend_regular_mm_streamk(
self, dynamic: bool = False, max_autotune_gemm_backends: str = "CUTLASS"
):
"""
Make sure autotuning mm in sub processes work without crashes.
"""
compiled_model = torch.compile(torch.mm, dynamic=dynamic)
with config.patch(
{
"max_autotune": True,
"autotune_in_subproc": True,
"max_autotune_gemm_backends": max_autotune_gemm_backends,
"cuda.cutlass_max_profiling_configs": 2,
"cuda.cutlass_op_allowlist_regex": "stream_k", # only stream-k GEMM Kernels
}
):
for M, K, N in (
(128, 16, 128),
(1024, 256, 1024),
(
16384,
1024,
16384,
),
(
16384,
1408,
16384,
),
):
a = torch.randn(M, K).cuda().half()
b = torch.randn(N, K).cuda().half().t()
Y_compiled = compiled_model(a, b)
Y = torch.mm(a, b)
# we need relaxed numerical limits due to the sheer size of the
# matmuls involved. Many small addition differences add up.
torch.testing.assert_close(Y_compiled, Y, atol=0.01, rtol=0.01)
@unittest.skipIf(not SM90OrLater, "need sm_90")
def test_streamk_with_dynamic(
self,
):
"""
Test streamk with dynamic=True. Streamk should be filtered out.
Problem is streamk can have a different workspace depending on the
shape. Without a correct workspace, the kernel will fail at runtime.
"""
a = torch.randn(128, 16).cuda().half()
b = torch.randn(128, 16).cuda().half().t()
with config.patch(
{
"max_autotune": True,
"max_autotune_gemm_backends": "CUTLASS",
"cuda.cutlass_op_allowlist_regex": "stream_k", # only stream-k GEMM Kernels
}
):
with self.assertRaisesRegex(InductorError, r".*NoValidChoicesError.*"):
_ = torch.compile(torch.mm, dynamic=True)(a, b)
@unittest.skipIf(not SM90OrLater, "need sm_90")
def test_streamk_with_static(
self,
):
"""
Test streamk with dynamic=False. Streamk should work.
"""
shapes = [
(18432, 3072, 6144),
(9216, 3072, 6144),
(4608, 3072, 6144),
]
compiled_model = torch.compile(torch.mm, dynamic=False)
for shape in shapes:
M, N, K = shape
a = torch.randn(M, K).cuda().half()
b = torch.randn(N, K).cuda().half().t()
with config.patch(
{
"max_autotune": True,
"max_autotune_gemm_backends": "CUTLASS",
"cuda.cutlass_max_profiling_configs": 1,
"cuda.cutlass_op_allowlist_regex": "stream_k", # only stream-k GEMM Kernels
}
):
_ = compiled_model(a, b)
def _test_max_autotune_cutlass_backend_epilogue_fusion(
self,
dynamic: bool = False,
max_autotune_gemm_backends: str = "CUTLASS",
fp16=True,
expected_fuse_count=0,
mm: Optional[Callable[[torch.Tensor, torch.Tensor], torch.Tensor]] = None,
batch_size: Optional[int] = None,
):
# Note: The ops that are available
# also depend on the alignment of the shapes
# so if these shapes don't all align to at least 8 elements
# it can happen that no Cutlass 3.x op is available
# that allows fusions
if batch_size is None:
a = torch.randn(256, 32).cuda()
b = torch.randn(256, 32).cuda().t()
else:
a = torch.randn(batch_size, 256, 32).cuda()
b = torch.randn(batch_size, 256, 32).cuda().permute(0, 2, 1)
if fp16:
a = a.half()
b = b.half()
with config.patch(
{
"max_autotune": True,
"autotune_in_subproc": True,
"max_autotune_gemm_backends": max_autotune_gemm_backends,
"cuda.cutlass_max_profiling_configs": 4,
"cuda.version": "12.2", # required to enable the Kernels we need
}
):
counters["inductor"]["cuda_epilogue_fusion_counter"] = 0
assert mm is not None
Y_compiled = torch.compile(mm, dynamic=dynamic)(a, b)
Y = mm(a, b)
actual_count = counters["inductor"]["cuda_epilogue_fusion_counter"]
assert actual_count == expected_fuse_count, (
f"Expected fuse count of {expected_fuse_count} but got {actual_count}"
)
torch.testing.assert_close(Y_compiled, Y, atol=1e-2, rtol=1e-2)
@unittest.skipIf(not SM90OrLater, "need sm_90")
def test_max_autotune_cutlass_backend_simple_fusion_fp16_fp32acc(self):
def mm(a, b):
return (a @ b) * 3.0
self._test_max_autotune_cutlass_backend_epilogue_fusion(
fp16=True, expected_fuse_count=0, mm=mm
)
@unittest.skipIf(not SM90OrLater, "need sm_90")
def test_max_autotune_cutlass_backend_chained_fusion_fp16_fp32acc(self):
def mm(a, b):
return (a @ b) * 3.3 - 1.234
self._test_max_autotune_cutlass_backend_epilogue_fusion(
fp16=True, expected_fuse_count=0, mm=mm
)
@unittest.skipIf(not SM90OrLater, "need sm_90")
def test_max_autotune_cutlass_backend_relu_fusion_fp16_fp32acc(self):
def mm(a, b):
return torch.nn.functional.relu((a @ b) * 3.3 - 1.234)
# The pointwise ops seem to be pre-fused into a single Pointwise
self._test_max_autotune_cutlass_backend_epilogue_fusion(
fp16=True, expected_fuse_count=0, mm=mm
)
@unittest.skipIf(not SM90OrLater, "need sm_90")
def test_max_autotune_cutlass_backend_relu6_fusion_fp16_fp32acc(self):
def mm(a, b):
return torch.clamp(torch.nn.functional.relu(a @ b), max=6.0)
# The pointwise ops seem to be pre-fused into a single Pointwise
self._test_max_autotune_cutlass_backend_epilogue_fusion(
fp16=True, expected_fuse_count=0, mm=mm
)
@unittest.skipIf(not SM90OrLater, "need sm_90")
def test_max_autotune_cutlass_backend_no_fusion_dtype_mismatch(self):
def mm(a, b):
# this should not be fused, since the output dtype is different from the matmul dtype
return (a @ b).to(torch.float32) * 0.00001
self._test_max_autotune_cutlass_backend_epilogue_fusion(
fp16=True, expected_fuse_count=0, mm=mm
)
@unittest.skipIf(not SM90OrLater, "need sm_90")
def test_max_autotune_cutlass_backend_shape_dependent_normalization_fusion(self):
def mm(a, b):
return (a @ b) / b.size(1)
self._test_max_autotune_cutlass_backend_epilogue_fusion(
fp16=True, expected_fuse_count=0, mm=mm
)
# TODO: Enable dynamic test cases when dynamic support is added.
@unittest.skipIf(not SM90OrLater, "need sm_90")
@parametrize("dynamic", (False,))
@mock.patch.dict(os.environ, {"PATH": _get_path_without_sccache()})
def test_max_autotune_cutlass_backend_int_mm(
self, dynamic: bool, max_autotune_gemm_backends: str = "CUTLASS"
):
"""
Make sure autotuning mm in sub processes work without crashes.
"""
def mm(a, b):
return torch._int_mm(a, b)
# CUTLASS only supports row-major/column-major combination of
# layouts for this operation, thus the transpose of tensor b
# (on the other side, Triton at the moment doesn't support
# this combination, so it's excluded from the test). Also,
# for CUTLASS alignment requirements, number of columns in
# both tensors has to be divisible by 16.
a = torch.randint(0, 5, (100, 16), dtype=torch.int8).cuda()
b = torch.randint(0, 5, (32, 16), dtype=torch.int8).cuda().T
with config.patch(
{
"max_autotune": True,
"autotune_in_subproc": True,
"max_autotune_gemm_backends": max_autotune_gemm_backends,
"cuda.cutlass_max_profiling_configs": 2,
}
):
Y_compiled = torch.compile(mm, dynamic=dynamic)(a, b)
Y = mm(a, b)
torch.testing.assert_close(Y_compiled, Y)
@mock.patch.dict(os.environ, {"PATH": _get_path_without_sccache()})
@unittest.skipIf(not SM90OrLater, "need sm_90")
def test_force_cutlass_backend_aoti_dynamic(self):
class MyModel(torch.nn.Module):
def forward(self, x, w):
return x @ w
with config.patch(
{
"max_autotune": True,
"autotune_in_subproc": False,
"max_autotune_gemm_backends": "CUTLASS",
"cuda.cutlass_max_profiling_configs": 2,
}
):
model = MyModel()
M, N, K = 16, 32, 64
dynamic_shapes = {
"x": {0: M, 1: K},
"w": {0: K, 1: N},
}
x = torch.randn(M, K).cuda().half()
w = torch.randn(N, K).cuda().half().t()
actual = AOTIRunnerUtil.run(
model,
(x, w),
dynamic_shapes=dynamic_shapes,
)
expected = model(x, w)
torch.testing.assert_close(expected, actual)
@mock.patch.dict(os.environ, {"PATH": _get_path_without_sccache()})
@unittest.skipIf(not SM90OrLater, "need sm_90")
def test_force_cutlass_backend_aoti_cexpr_codegen(self):
class MyModel(torch.nn.Module):
def forward(self, x, w):
x0, x1 = x.shape
x = x.reshape(x0 // 2, x1, 2)[:, :, 0]
x = x.contiguous()
x = x.as_strided(x.size(), x.stride())
return x @ w
with config.patch(
{
"max_autotune": True,
"autotune_in_subproc": False,
"max_autotune_gemm_backends": "CUTLASS",
"cuda.cutlass_max_profiling_configs": 2,
}
):
model = MyModel()
M, N, K = 128, 64, 64
dynamic_shapes = {
"x": {0: Dim.DYNAMIC},
"w": None,
}
x = torch.randn(M, K).cuda().half()
w = torch.randn(N, K).cuda().half().t()
actual = AOTIRunnerUtil.run(
model,
(x, w),
dynamic_shapes=dynamic_shapes,
)
expected = model(x, w)
torch.testing.assert_close(expected, actual)
@mock.patch.dict(os.environ, {"PATH": _get_path_without_sccache()})
@unittest.skipIf(not SM90OrLater, "need sm_90")
def test_aoti_workspace_ptr(self):
class MyModel(torch.nn.Module):
def forward(self, x, w):
return x @ w
with config.patch(
{
"max_autotune": True,
"autotune_in_subproc": False,
"max_autotune_gemm_backends": "CUTLASS",
"cuda.cutlass_op_allowlist_regex": "128x256x64.*stream_k_warpspecialized_cooperative_epi_nosmem",
"cuda.cutlass_max_profiling_configs": 1,
}
):
model = MyModel()
M, N, K = 200, 5216, 10_432
x = torch.randn(M, K).cuda().half()
w = torch.randn(N, K).cuda().half().t()
actual = AOTIRunnerUtil.run(
model,
(x, w),
)
expected = model(x, w)
torch.testing.assert_close(expected, actual, atol=0.01, rtol=0.01)
# TODO: Enable dynamic test cases when dynamic support is added.
@unittest.skipIf(not SM80OrLater or SM90OrLater, "need sm_8x exactly")
@parametrize("dynamic", (False,))
@mock.patch.dict(os.environ, {"PATH": _get_path_without_sccache()})
def test_max_autotune_cutlass_backend_sparse_semi_structured_mm(
self, dynamic: bool
):
"""
Make sure autotuning mm in sub processes work without crashes.
"""
SparseSemiStructuredTensor._FORCE_CUTLASS = True
def mm(a, b):
return torch.mm(a, b)
m, n, k = 32, 8, 64
mask = torch.tensor([0, 0, 1, 1]).tile(m, k // 4).cuda().half()
a = torch.rand(m, k).cuda().half() * mask
a_sparse = to_sparse_semi_structured(a)
b = torch.rand(k, n).cuda().half()
with config.patch(
{
"max_autotune": True,
"autotune_in_subproc": True,
"max_autotune_gemm_backends": "CUTLASS",
"cuda.cutlass_max_profiling_configs": 2,
"autotune_local_cache": True,
}
):
Y_compiled = torch.compile(mm, dynamic=dynamic)(a_sparse, b)
Y = mm(a, b)
torch.testing.assert_close(Y_compiled, Y)
cache = torch._inductor.codecache.LocalCache().lookup(
"sparse_semi_structured_mm"
)
assert cache is not None
high = cache[
f"[('cuda', 'torch.float16', {m}, {k // 2}, {k // 2}, 1, 0), "
f"('cuda', 'torch.int16', {m}, {k // 16}, {k // 16}, 1, 0), "
f"('cuda', 'torch.float16', {k}, {n}, {n}, 1, 0)]"
]["high"]
cutlass_kernels_count = 0
for kernel, duration in high.items():
if kernel.startswith("cutlass_gemm") and not math.isinf(duration):
cutlass_kernels_count += 1
assert cutlass_kernels_count > 0
@unittest.skipIf(not SM90OrLater, "need sm_90")
@mock.patch.dict(os.environ, {"PATH": _get_path_without_sccache()})
def test_cutlass_backend_op_denylist(
self,
):
def my_addmm(x, a, b, alpha, beta):
return torch.addmm(x, a, b, alpha=beta, beta=alpha)
x = torch.randn((128, 128)).cuda().half()
a = torch.randn(128, 128).cuda().half()
b = torch.randn(128, 128).cuda().half().t()
with fresh_cache():
with config.patch(
{
"max_autotune": True,
"max_autotune_gemm_backends": "CUTLASS",
"cuda.cutlass_max_profiling_configs": 2,
"cuda.cutlass_op_allowlist_regex": "",
"cuda.cutlass_op_denylist_regex": "pingpong",
}
):
with mock.patch(
"torch._inductor.kernel.mm.autotune_select_algorithm",
wraps=select_no_algorithm,
) as sa:
with self.assertRaisesRegex(
InductorError, r".*NoValidChoicesError.*"
):
torch.compile(my_addmm, dynamic=False)(x, a, b, 1.0, 2.0)
args, _ = sa.call_args
op_name, choices, _, __ = args
assert op_name == "addmm"
cuda_template_count = 0
for choice in choices:
if isinstance(choice, CUDATemplateCaller):
choice_info = choice.info_dict()
op_conf_name = choice_info.get("op_conf_name", "")
assert isinstance(op_conf_name, str)
assert "pingpong" not in op_conf_name, (
"All pingpong Kernels should have been filtered"
)
cuda_template_count += 1
assert cuda_template_count > 0, "No CUDATemplateCaller choices"
@unittest.skipIf(not SM90OrLater, "need sm_90")
@mock.patch.dict(os.environ, {"PATH": _get_path_without_sccache()})
def test_cutlass_backend_op_allowlist(
self,
):
def addmm(x, a, b, alpha, beta):
return torch.addmm(x, a, b, alpha=alpha, beta=beta)
x = torch.randn((128, 128)).cuda().half()
a = torch.randn(128, 128).cuda().half()
b = torch.randn(128, 128).cuda().half().t()
with fresh_cache():
with config.patch(
{
"max_autotune": True,
"max_autotune_gemm_backends": "CUTLASS",
"cuda.cutlass_max_profiling_configs": 2,
"cuda.cutlass_op_allowlist_regex": "pingpong",
"cuda.cutlass_op_denylist_regex": None,
}
):
with mock.patch(
"torch._inductor.kernel.mm.autotune_select_algorithm",
wraps=select_no_algorithm,
) as sa:
with self.assertRaisesRegex(
InductorError, r".*NoValidChoicesError.*"
):
torch.compile(addmm, dynamic=False)(x, a, b, 1.0, 1.0)
args, _ = sa.call_args
op_name, choices, _, __ = args
assert op_name == "addmm"
cuda_template_count = 0
for choice in choices:
if isinstance(choice, CUDATemplateCaller):
choice_info = choice.info_dict()
op_conf_name = choice_info.get("op_conf_name", "")
assert isinstance(op_conf_name, str)
assert "pingpong" in op_conf_name, (
"Only pingpong Kernels should have been allowed"
)
cuda_template_count += 1
assert cuda_template_count > 0, "No CUDATemplateCaller choices"
@unittest.skipIf(not SM90OrLater, "need sm_90")
@mock.patch.dict(os.environ, {"PATH": _get_path_without_sccache()})
def test_cutlass_backend_fp8_scaled_mm_fast_accum_filtering(
self,
):
float8_dtype = torch.float8_e4m3fn
# Only bf16 output type is supported for row-wise scaling, not fp32
output_dtype: torch.dtype = torch.bfloat16
device = "cuda"
M, K, N = 128, 128, 128 # Matmul Y = X [M, K] x W [N, K]
x = torch.randn(M, K, dtype=output_dtype, device=device)
w = torch.randn(N, K, dtype=output_dtype, device=device)
bias = None
# quantize weight (prior to inference)
w_fp8, w_inverse_scale = _quantize_rowwise(w, float8_dtype)
w_t_fp8 = w_fp8.t()
w_inverse_scale = w_inverse_scale.t() # scale_b should be (1, N)
# quantize input x
x_fp8, x_inverse_scale = _quantize_rowwise(x, float8_dtype)
def linear(
x_fp8, x_inverse_scale, w_t_fp8, w_inverse_scale, bias, use_fast_accum
):
y = torch._scaled_mm(
x_fp8,
w_t_fp8,
x_inverse_scale,
w_inverse_scale,
bias,
out_dtype=output_dtype,
use_fast_accum=use_fast_accum,
)
return y
linear_compiled = torch.compile(linear, backend="inductor")
def run_test(use_fast_accum):
with fresh_cache():
with config.patch(
{
"max_autotune": True,
"max_autotune_gemm_backends": "CUTLASS",
"cuda.cutlass_max_profiling_configs": 2,
}
):
with mock.patch(
"torch._inductor.kernel.mm.autotune_select_algorithm",
wraps=select_no_algorithm,
) as sa:
with self.assertRaisesRegex(
InductorError, r".*NoValidChoicesError.*"
):
linear_compiled(
x_fp8,
x_inverse_scale,
w_t_fp8,
w_inverse_scale,
bias,
use_fast_accum,
)
args, _ = sa.call_args
_, choices, _, _ = args
cuda_template_count = 0
for choice in choices:
if isinstance(choice, CUDATemplateCaller):
choice_info = choice.info_dict()
op_conf_name = choice_info.get("op_conf_name", "")
assert isinstance(op_conf_name, str)
if use_fast_accum:
assert "fastaccum" in op_conf_name, (
"Only fastaccum Kernels should have been allowed"
)
else:
assert "fastaccum" not in op_conf_name, (
"fastaccum Kernels should have been filtered"
)
cuda_template_count += 1
assert cuda_template_count > 0, "No CUDATemplateCaller choices"
run_test(True)
run_test(False)
@unittest.skipIf(not SM90OrLater, "need sm_90")
@mock.patch.dict(os.environ, {"PATH": _get_path_without_sccache()})
def test_cutlass_backend_shape_coverage_mm(
self,
):
"""
Checks if cutlass backend produces some ops for a variety of shapes.
This test doesn't compile and check the correctness of the ops.
NOTE: K has to be even.
"""
inputs = [
(torch.randn(128, 500).cuda().half(), torch.randn(500, 576).cuda().half()),
(
torch.randn(500, 128).cuda().half(),
torch.randn(128, 576).cuda().half(),
),
(torch.randn(128, 250).cuda().half(), torch.randn(250, 576).cuda().half()),
(
torch.randn(250, 128).cuda().half(),
torch.randn(128, 576).cuda().half(),
),
(
torch.randn(125, 128).cuda().half(),
torch.randn(128, 576).cuda().half(),
),
]
with (
fresh_cache(),
config.patch(
{
"max_autotune": True,
"max_autotune_gemm_backends": "CUTLASS",
"cuda.cutlass_max_profiling_configs": 2,
}
),
mock.patch(
"torch._inductor.kernel.mm.autotune_select_algorithm",
wraps=select_no_algorithm,
) as sa,
):
for input in inputs:
A, B = input
M, K = A.shape
_, N = B.shape
with self.assertRaisesRegex(InductorError, r".*NoValidChoicesError.*"):
torch.compile(torch.mm, dynamic=False)(*input)
self.assertTrue(
sa.called,
f"autotune_select_algorithm was not called with shape M={M}, N={N}, K={K}",
)
args, _ = sa.call_args
op_name, choices, _, __ = args
assert op_name == "mm"
cuda_template_count = 0
for choice in choices:
if isinstance(choice, CUDATemplateCaller):
choice_info = choice.info_dict()
op_conf_name = choice_info.get("op_conf_name", "")
assert isinstance(op_conf_name, str)
cuda_template_count += 1
self.assertGreater(
cuda_template_count,
0,
"No CUDATemplateCaller choices found for matmul with shape "
f"M={M}, N={N}, K={K}",
)
@unittest.skipIf(not SM80OrLater, "need sm_80")
@mock.patch.dict(os.environ, {"PATH": _get_path_without_sccache()})
def test_get_max_alignment(self):
l4 = FixedLayout(
torch.device("cpu"), torch.half, size=[1, 2, 4], stride=[0, 4, 1]
)
m4 = get_max_alignment(l4)
self.assertEqual(
m4, 4, "Wrong max alignment. Should have been 4. (simple, contiguous case)"
)
l4_2 = FixedLayout(
torch.device("cpu"), torch.half, size=[1, 4, 2], stride=[0, 1, 4]
)
m4_2 = get_max_alignment(l4_2)
self.assertEqual(
m4_2,
4,
"Wrong max alignment. Should have been 4. Did not deal with strides correctly",
)
l1 = FixedLayout(
torch.device("cpu"), torch.half, size=[2, 4, 2], stride=[23, 1, 4]
)
m1 = get_max_alignment(l1)
self.assertEqual(
m1,
1,
"Wrong max alignment. Should have been 1. Did not take stride into account correctly",
)
l2 = FixedLayout(
torch.device("cpu"), torch.half, size=[1, 2, 4], stride=[0, 4, 1], offset=6
)
m2 = get_max_alignment(l2)
self.assertEqual(
m2, 2, "Wrong max alignment. Should have been 2. (due to choice of offset)"
)
l8 = FixedLayout(
torch.device("cpu"),
torch.half,
size=[2, 2, 8],
stride=[32, 8, 1],
offset=24,
)
m8 = get_max_alignment(l8)
self.assertEqual(m8, 8, "Wrong max alignment. Should have been 8.")
l4 = FixedLayout(
torch.device("cpu"),
torch.float32,
size=[2, 2, 8],
stride=[32, 8, 1],
offset=24,
)
m4 = get_max_alignment(l4)
self.assertEqual(
m4, 4, "Wrong max alignment. Should have been 4 (due to float32 dtype )."
)
@unittest.skipIf(not SM90OrLater, "need sm_90")
@mock.patch.dict(os.environ, {"PATH": _get_path_without_sccache()})
def test_standalone_runner(self):
max_autotune_gemm_backends = "CUTLASS"
a = torch.randn(128, 16).cuda().half()
b = torch.randn(128, 16).cuda().half().t()
with config.patch(
{
"max_autotune": True,
"max_autotune_gemm_backends": max_autotune_gemm_backends,
"cuda.cutlass_max_profiling_configs": 2,
"cuda.generate_test_runner": True, # put standalone runner in the generated code
}
):
from tempfile import NamedTemporaryFile
from torch._inductor.codegen.cuda.cutlass_utils import (
cuda_standalone_runner_compile_command,
CUDACompileSourceCapturingContext,
)
# Run compilation, check results just in case, and save
# CUTLASS-based generated code.
with CUDACompileSourceCapturingContext() as ctx:
compiled = torch.compile(torch.mm, dynamic=False)
expected = torch.mm(a, b)
actual = compiled(a, b)
torch.testing.assert_close(actual, expected)
sources = ctx.sources
assert len(sources) >= 1
# Get names for temporary source and executable files.
cu_file = NamedTemporaryFile("w", suffix=".cu", delete=False)
cu_file.close()
exe_file = NamedTemporaryFile("w", suffix="", delete=False)
exe_file.close()
# Save the generated code into the .cu file.
with open(cu_file.name, "w") as file:
file.write(sources[0])
# Get command to compile .cu file, and run the
# compilation.
command = cuda_standalone_runner_compile_command(
Path(cu_file.name), Path(exe_file.name)
)
if IS_FBCODE:
# hack to bypass the following error:
# error while loading shared libraries: IX}: invalid mode for dlopen(): Invalid argument
platform_path = sysconfig.get_config_var("LIBDIR")
cuda_path = os.path.realpath(os.path.join(platform_path, "libcuda.so"))
command = command.replace("-lcuda ", f"-L{cuda_path} ")
repro_message = (
f"Reproduce with: {command}\n"
f"exe_file.name: {exe_file.name}\n"
f"cu_file.name: {cu_file.name}\n"
)
retcode = os.system(command)
self.assertEqual(retcode, 0, repro_message)
# Run the executable generated.
if not IS_FBCODE or not IN_RE_WORKER:
retcode = os.system(exe_file.name)
self.assertEqual(retcode, 0, repro_message)
# Remove temporary files.
os.remove(cu_file.name)
os.remove(exe_file.name)
@unittest.skipIf(not SM90OrLater, "need sm_90")
@mock.patch.dict(os.environ, {"PATH": _get_path_without_sccache()})
def test_cutlass_backend_integration(self):
"""
Test if cutlass backend can be autotune with other backends
"""
def mm(a, b):
return a @ b
a = torch.randn(128, 16).cuda().half()
b = torch.randn(128, 16).cuda().half().t()
with config.patch(
{
"max_autotune": True,
"max_autotune_gemm_backends": "ATEN,TRITON,CUTLASS",
"cuda.cutlass_max_profiling_configs": 2,
# needed for log searching
"fx_graph_cache": False,
"fx_graph_remote_cache": False,
}
):
with (
log_settings("+inductor"),
self.assertLogs(
logger="torch._inductor.codegen.cuda", level=logging.DEBUG
) as test_log,
):
Y_compiled = torch.compile(mm, dynamic=False)(a, b)
Y = mm(a, b)
torch.testing.assert_close(Y_compiled, Y)
output = "\n".join(record.getMessage() for record in test_log.records)
match = re.search(
r"Got cutlass configs: total number of ops: (\d+)", output
)
assert match, "Expect to find the cutlass configs log"
num_ops = int(match.group(1))
self.assertTrue(num_ops > 0, "The number of ops should be greater than 0")
@unittest.skipIf(not SM90OrLater, "need sm_90")
def test_maybe_append_choice_caching(self):
"""
Test if maybe_append_choice's caching leads to correct results and
shorter maybe_append_choice time.
"""
NUM_ITERATIONS = 10
class TestModule(torch.nn.Module):
def forward(self, A, B):
for _ in range(NUM_ITERATIONS):
A = A @ B / 32
return A
model = TestModule().cuda()
A = torch.randn(1024, 1024, dtype=torch.bfloat16, device="cuda")
B = torch.randn(1024, 1024, dtype=torch.bfloat16, device="cuda").t()
expected = model(A, B)
# Track render calls
from torch._inductor.codegen.cuda.gemm_template import CUTLASSGemmTemplate
original_render = CUTLASSGemmTemplate.render
render_call_count = 0
def counting_render(self, *args, **kwargs):
nonlocal render_call_count
render_call_count += 1
return original_render(self, *args, **kwargs)
with mock.patch.object(CUTLASSGemmTemplate, "render", counting_render):
with config.patch(
{
"max_autotune": True,
"max_autotune_gemm_backends": "CUTLASS",
"fx_graph_cache": False,
"fx_graph_remote_cache": False,
"cuda.enable_caching_codegen": True,
"cuda.cutlass_max_profiling_configs": 2,
}
):
compiled_model = torch.compile(model, fullgraph=True)
actual = compiled_model(A, B)
torch.testing.assert_close(actual, expected)
# Check render call count: render is called uniquely for each codegen
# and for each finalized codegen.
self.assertEqual(render_call_count, NUM_ITERATIONS + 2)
@unittest.skipIf(not SM90OrLater, "need sm_90")
@mock.patch.dict(os.environ, {"PATH": _get_path_without_sccache()})
def test_multiple_mm(self):
"""
Test multiple matrix multiplications with different shapes in a single nn.Module.
"""
class MultipleMMModel(torch.nn.Module):
def forward(self, a, b, c, d):
# First mm with shape (128, 64) @ (64, 32) -> (128, 32)
mm1 = a @ b
# Second mm with shape (256, 128) @ (128, 64) -> (256, 64)
mm2 = c @ d
return mm1, mm2
model = MultipleMMModel().cuda()
# Create tensors with different shapes
a = torch.randn(128, 64).cuda().half()
b = torch.randn(32, 64).cuda().half().t()
c = torch.randn(256, 128).cuda().half()
d = torch.randn(64, 128).cuda().half().t()
# Track render calls
from torch._inductor.codegen.cuda.gemm_template import CUTLASSGemmTemplate
original_render = CUTLASSGemmTemplate.render
render_call_count = 0
def counting_render(self, *args, **kwargs):
nonlocal render_call_count
render_call_count += 1
return original_render(self, *args, **kwargs)
with mock.patch.object(CUTLASSGemmTemplate, "render", counting_render):
with config.patch(
{
"max_autotune": True,
"max_autotune_gemm_backends": "CUTLASS",
"cuda.cutlass_max_profiling_configs": 2,
"fx_graph_cache": False,
"fx_graph_remote_cache": False,
"cuda.enable_caching_codegen": True,
}
):
# Get expected results
expected = model(a, b, c, d)
# Compile and run
compiled_model = torch.compile(model)
actual = compiled_model(a, b, c, d)
# Verify results
torch.testing.assert_close(actual, expected)
num_matmuls = 2
self.assertEqual(render_call_count, num_matmuls + num_matmuls * 2)
@unittest.skipIf(not SM90OrLater, "need sm_90")
@mock.patch.dict(os.environ, {"PATH": _get_path_without_sccache()})
def test_multiple_mm_with_dynamic_shape(self):
"""
Test multiple matrix multiplications where one has dynamic shapes.
"""
class MultipleMMDynamicModel(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.c = torch.randn(64, 256).cuda().half()
self.d = torch.randn(128, 256).cuda().half().t()
def forward(self, a, b):
# dynamic shape matmul
mm1 = a @ b
# static shape matmul
mm2 = self.c @ self.d
return mm1, mm2
model = MultipleMMDynamicModel().cuda()
# Create tensors with different shapes
a = torch.randn(128, 64).cuda().half()
b = torch.randn(32, 64).cuda().half().t()
# Track render calls
from torch._inductor.codegen.cuda.gemm_template import CUTLASSGemmTemplate
original_render = CUTLASSGemmTemplate.render
render_call_count = 0
def counting_render(self, *args, **kwargs):
nonlocal render_call_count
render_call_count += 1
return original_render(self, *args, **kwargs)
with mock.patch.object(CUTLASSGemmTemplate, "render", counting_render):
with config.patch(
{
"max_autotune": True,
"max_autotune_gemm_backends": "CUTLASS",
"cuda.cutlass_max_profiling_configs": 2,
"fx_graph_cache": False,
"fx_graph_remote_cache": False,
"cuda.enable_caching_codegen": True,
}
):
# Get expected results
expected = model(a, b)
# Compile and run
compiled_model = torch.compile(model, dynamic=True)
actual = compiled_model(a, b)
# Verify results
torch.testing.assert_close(actual, expected)
num_matmuls = 2
self.assertEqual(render_call_count, num_matmuls + num_matmuls * 2)
@unittest.skipIf(not SM90OrLater, "need sm_90")
@mock.patch.dict(os.environ, {"PATH": _get_path_without_sccache()})
def test_cutlass_backend_matmul_same_tensor(self):
max_autotune_gemm_backends = "CUTLASS"
M = 128
A = torch.randn(M, M).cuda().half()
with config.patch(
{
"max_autotune": True,
"max_autotune_gemm_backends": max_autotune_gemm_backends,
"cuda.cutlass_max_profiling_configs": 2,
}
):
compiled = torch.compile(torch.mm)
torch.testing.assert_close(A @ A.t(), compiled(A, A.t()))
@unittest.skipIf(not SM90OrLater, "need sm_90")
@mock.patch.dict(os.environ, {"PATH": _get_path_without_sccache()})
def test_cutlass_backend_matmul_nonzero_offset(self):
max_autotune_gemm_backends = "CUTLASS"
M = 129
A = torch.randn(M, M - 1).cuda().half()
with config.patch(
{
"max_autotune": True,
"max_autotune_gemm_backends": max_autotune_gemm_backends,
"cuda.cutlass_max_profiling_configs": 2,
}
):
compiled = torch.compile(torch.mm)
torch.testing.assert_close(
A[1:, :] @ A[1:, :].t(), compiled(A[1:, :], A[1:, :].t())
)
@unittest.skipIf(not SM90OrLater, "need sm_90")
@mock.patch.dict(os.environ, {"PATH": _get_path_without_sccache()})
def test_flexible_layout(self):
class TestModel(torch.nn.Module):
def forward(self, B):
A = torch.zeros_like(B)
return A @ B.t()
M = 1024
B = torch.randn(M, M).cuda().half()
model = TestModel().cuda()
with config.patch(
{
"max_autotune": True,
"max_autotune_gemm_backends": "CUTLASS",
"cuda.cutlass_max_profiling_configs": 1,
}
):
_ = torch.compile(model)(B)
@unittest.skipIf(not SM90OrLater, "need sm_90")
@mock.patch.dict(os.environ, {"PATH": _get_path_without_sccache()})
@use_evt_config
def test_evt_flexible_layout(self):
class TestModel(torch.nn.Module):
def forward(self, B):
A = torch.zeros_like(B)
return (A @ B.t()).relu()
M = 1024
B = torch.randn(M, M).cuda().half()
model = TestModel().cuda().half()
with config.patch(
{
"max_autotune": True,
"max_autotune_gemm_backends": "CUTLASS",
"cuda.cutlass_max_profiling_configs": 1,
}
):
_ = torch.compile(model)(B)
self.assertEqual(
torch._dynamo.utils.counters["inductor"]["cuda_epilogue_fusion_counter"], 1
)
@unittest.skipIf(not SM90OrLater, "need sm_90")
@mock.patch.dict(os.environ, {"PATH": _get_path_without_sccache()})
def test_filtered_ops_cache(self):
class TestModel(torch.nn.Module):
def forward(self, B):
A = torch.zeros_like(B)
for _ in range(100):
A = A @ B.t()
return A
M = 1024
B = torch.randn(M, M).cuda().half()
model = TestModel().cuda()
start_time = time.time()
with config.patch(
{
"max_autotune": True,
"max_autotune_gemm_backends": "CUTLASS",
"cuda.cutlass_max_profiling_configs": 1,
}
):
_ = torch.compile(model)(B)
self.assertTrue(time.time() - start_time < 60)
@unittest.skipIf(not SM90OrLater, "need sm_90")
@mock.patch.dict(os.environ, {"PATH": _get_path_without_sccache()})
@parametrize("use_aoti", (False, True))
def test_compilation_time(self, use_aoti):
M = 1024
A = torch.randn(M, M).cuda().half()
B = torch.randn(M, M).cuda().half().t()
class MyModel(torch.nn.Module):
def forward(self, a, b):
return a @ b
model = MyModel().cuda()
expected = model(A, B)
start_time = time.time()
with config.patch(
{
"max_autotune": True,
"max_autotune_gemm_backends": "CUTLASS",
"cuda.cutlass_max_profiling_configs": 1,
}
):
if use_aoti:
actual = AOTIRunnerUtil.run(
model,
(A, B),
)
else:
actual = torch.compile(model, fullgraph=True)(A, B)
torch.testing.assert_close(actual, expected)
self.assertTrue(time.time() - start_time < 50)
@unittest.skipIf(not SM90OrLater, "need sm_90")
@use_evt_config
@evt_all_ops
@evt_all_shapes
def test_evt_fusions_basic(self, op, shape):
class TestModel(torch.nn.Module):
def forward(self, a, b, extra_args):
res = (a @ b).relu() # add extra activation to not hit addmm path
return op(res, *extra_args)
self.run_evt_test(TestModel(), op, shape)
@unittest.skipIf(not SM90OrLater, "need sm_90")
@use_evt_config
@evt_bin_ops
def test_evt_broadcasting(self, op):
class TestModel(torch.nn.Module):
def forward(self, a, b, extra_args):
acc = a @ b
return acc, op(acc.relu(), *extra_args)
M = 1024
N = 512
a = torch.ones(M, N).cuda().half()
b = torch.ones(N, N).cuda().half().t()
extra_args = gen_args(op, (M, N))
model = TestModel().cuda()
result = torch.compile(model)(a, b, extra_args)
ref_result = model(a, b, extra_args)
self.assertEqual(
torch._dynamo.utils.counters["inductor"]["cuda_epilogue_fusion_counter"], 1
)
torch.testing.assert_close(result, ref_result)
@unittest.skipIf(not SM90OrLater, "need sm_90")
@use_evt_config
@evt_un_ops
def test_evt_activations(self, op):
class TestModel(torch.nn.Module):
def forward(self, a, b, extra_args):
acc = a @ b
return acc, op(acc, *extra_args)
M = 1024
N = 512
a = torch.ones(M, N).cuda().half()
b = torch.ones(N, N).cuda().half().t()
extra_args = gen_args(op, (M, N))
model = TestModel().cuda()
result = torch.compile(model)(a, b, extra_args)
ref_result = model(a, b, extra_args)
self.assertEqual(
torch._dynamo.utils.counters["inductor"]["cuda_epilogue_fusion_counter"], 1
)
torch.testing.assert_close(result, ref_result)
@unittest.skipIf(not SM90OrLater, "need sm_90")
@use_evt_config
@evt_all_ops
def test_evt_mixed_dtypes(self, op):
M = 1024
N = 256
fp32_tensor = torch.ones(M, N).cuda().float()
class TestModel(torch.nn.Module):
def forward(self, a, b, extra_args):
acc = a @ b
out0 = op(acc.relu(), *extra_args)
out1 = torch.add(out0, fp32_tensor)
return out1
model = TestModel().cuda()
a = torch.ones(M, N).cuda().half()
b = torch.ones(N, N).cuda().half().t()
extra_args = gen_args(op, (M, N), dtype=torch.float16)
# baseline is cutlass kernel + triton
# matches expected casting behavior
with config.patch({"cuda.cutlass_epilogue_fusion_enabled": False}):
ref_result = torch.compile(model)(a, b, extra_args)
self.assertEqual(
torch._dynamo.utils.counters["inductor"]["cuda_epilogue_fusion_counter"], 0
)
torch._dynamo.reset()
result = torch.compile(model)(a, b, extra_args)
self.assertEqual(
torch._dynamo.utils.counters["inductor"]["cuda_epilogue_fusion_counter"],
1,
)
torch.testing.assert_close(result, ref_result)
@unittest.skipIf(not SM90OrLater, "need sm_90")
@use_evt_config
@evt_all_ops
def test_evt_multi_op(self, op):
class TestModel(torch.nn.Module):
def forward(self, a, b, extra_args):
acc = a @ b
return torch.add(op(acc.relu(), *extra_args).relu(), acc)
self.run_evt_test(TestModel(), op, (1024, 512))
@unittest.skipIf(not SM90OrLater, "need sm_90")
@use_evt_config
@evt_all_ops
def test_evt_reuse_matmul_input(self, op):
class TestModel(torch.nn.Module):
def forward(self, a, b, extra_args):
acc = a @ b
return torch.add(op(acc.relu(), *extra_args).relu(), a)
self.run_evt_test(TestModel(), op, (1024, 1024)) # shape needs to be square
@unittest.skipIf(not SM90OrLater, "need sm_90")
@use_evt_config
@evt_all_ops
@parametrize(
"dynamic", (False, True)
) # To not drastically increase test time we only test dynamic on this test
def test_evt_multi_output(self, op, dynamic):
class TestModel(torch.nn.Module):
def forward(self, a, b, extra_args):
acc = a @ b
z0 = acc.relu()
z = op(z0, *extra_args)
y = z + z0
return z, y
M = 1024
N = 512
shapes = [(512, 512)] if not dynamic else [(1024, 64), (128, 256)]
for i, shape in enumerate(shapes):
M, N = shape
a = torch.ones(M, N).cuda().half()
b = torch.ones(N, N).cuda().half().t()
extra_args = gen_args(op, (M, N))
model = TestModel().cuda()
result = torch.compile(model)(a, b, extra_args)
ref_result = model(a, b, extra_args)
self.assertEqual(
torch._dynamo.utils.counters["inductor"][
"cuda_epilogue_fusion_counter"
],
2 * (i + 1),
)
torch.testing.assert_close(result, ref_result)
@unittest.skipIf(not SM90OrLater, "need sm_90")
@use_evt_config
def test_evt_return_accumulator(self):
op = torch.add
class TestModel(torch.nn.Module):
def forward(self, a, b, extra_args):
acc = a @ b
return acc, op(acc.relu(), *extra_args)
M = 1024
N = 512
a = torch.ones(M, N).cuda().half()
b = torch.ones(N, N).cuda().half().t()
extra_args = gen_args(op, (M, N))
model = TestModel().cuda()
result = torch.compile(model)(a, b, extra_args)
ref_result = model(a, b, extra_args)
self.assertEqual(
torch._dynamo.utils.counters["inductor"]["cuda_epilogue_fusion_counter"], 1
)
torch.testing.assert_close(result, ref_result)
@mock.patch.dict(os.environ, {"PATH": _get_path_without_sccache()})
@parametrize("arch", ("90", "100"))
@parametrize("cuda_version", ("12.4", "12.8"))
def test_gemm_operation_serialization(self, arch: str, cuda_version: str):
"""
Testing serialization for GEMM operations generated by CUTLASS.
This should cover GroupedGemmOperation as well.
"""
full_ops = _gen_ops_cached(arch, cuda_version)
ops = pytree.tree_flatten(full_ops)[0]
# sanity check
self.assertGreater(len(ops), 1000, "Too few ops generated")
# test if configuration name is unique
op_config_names = [op.configuration_name() for op in ops]
self.assertEqual(len(op_config_names), len(set(op_config_names)))
serializer = get_cutlass_operation_serializer()
self.assertIsNotNone(serializer)
serialized_ops = [serializer.serialize(op) for op in ops]
deserialized_ops = [
serializer.deserialize(serialized_op) for serialized_op in serialized_ops
]
for op, deserialized_op in zip(ops, deserialized_ops, strict=False):
self.assertTrue(_check_if_instances_equal(op, deserialized_op))
@unittest.skipIf(not PLATFORM_SUPPORTS_FP8, "FP8 is only supported on H100+")
@unittest.skipIf(not SM90OrLater, "need sm_90")
@fp8_config
@parametrize("float8_dtype", (torch.float8_e4m3fn,))
@parametrize(
"shape",
(
(
512,
128,
64,
),
),
)
@parametrize("has_bias", (False, True))
@parametrize("use_fast_accum", (False, True))
@parametrize("input_dtype", (torch.bfloat16, torch.float16))
def test_fp8_rowwise_scaling(
self,
float8_dtype: torch.dtype,
shape: tuple[int, int, int],
has_bias: bool,
use_fast_accum: bool,
input_dtype: torch.dtype,
):
# Only bf16 output type is supported for row-wise scaling, not fp32
output_dtype: torch.dtype = torch.bfloat16
device = "cuda"
M, K, N = shape # Matmul Y = X [M, K] x W [N, K]
x = torch.randn(M, K, dtype=input_dtype, device=device)
w = torch.randn(N, K, dtype=input_dtype, device=device)
bias = None
if has_bias:
bias = torch.randn(N, device=device, dtype=input_dtype).to(torch.bfloat16)
# quantize weight (prior to inference)
w_fp8, w_inverse_scale = _quantize_rowwise(w, float8_dtype)
w_t_fp8 = w_fp8.t()
w_inverse_scale = w_inverse_scale.t() # scale_b should be (1, N)
# quantize input x
x_fp8, x_inverse_scale = _quantize_rowwise(x, float8_dtype)
def linear(x_fp8, x_inverse_scale, w_t_fp8, w_inverse_scale, bias):
y = torch._scaled_mm(
x_fp8,
w_t_fp8,
x_inverse_scale,
w_inverse_scale,
bias,
out_dtype=output_dtype,
use_fast_accum=use_fast_accum,
)
return y
y_eager = linear(
x_fp8,
x_inverse_scale,
w_t_fp8,
w_inverse_scale,
bias,
)
linear_compiled = torch.compile(linear, backend="inductor")
y_compiled = linear_compiled(
x_fp8,
x_inverse_scale,
w_t_fp8,
w_inverse_scale,
bias,
)
self.assertEqual(y_eager.dtype, output_dtype)
self.assertEqual(y_compiled.dtype, output_dtype)
torch.testing.assert_close(y_eager, y_compiled, rtol=1e-2, atol=0.05)
@unittest.skipIf(not PLATFORM_SUPPORTS_FP8, "FP8 is only supported on H100+")
@unittest.skipIf(not SM90OrLater, "need sm_90")
@fp8_config
@parametrize("float8_dtype", (torch.float8_e4m3fn,))
@parametrize(
"shape",
(
(
512,
1024,
),
),
)
@parametrize("use_fast_accum", (True,))
@parametrize("use_aoti", (False, True))
@parametrize("dynamic", (False, True))
def test_fp8_rowwise_scaling_multiple_linear(
self,
float8_dtype: torch.dtype,
shape: tuple[int, int],
use_fast_accum: bool,
use_aoti: bool = False,
dynamic: bool = False,
):
"""
This test is meant to simulate a more realistic scenario.
"""
if dynamic and use_aoti:
self.skipTest("Accuracy issues when both AOTI and dynamic are enabled")
# Only bf16 output type is supported for row-wise scaling, not fp32
output_dtype: torch.dtype = torch.bfloat16
device = "cuda"
M, N = shape # Matmul Y = X [M, K] x W [N, K]
x = torch.randn(M, N, dtype=output_dtype, device=device)
w1 = torch.randn(N, N, dtype=output_dtype, device=device)
w2 = torch.randn(N, N, dtype=output_dtype, device=device)
class TestModule(torch.nn.Module):
def __init__(self, w1, w2, float8_dtype):
super().__init__()
w1_fp8, self.w1_inverse_scale = _quantize_rowwise(w1, float8_dtype)
w2_fp8, self.w2_inverse_scale = _quantize_rowwise(w2, float8_dtype)
self.w1_t_fp8 = w1_fp8.t()
self.w2_t_fp8 = w2_fp8.t()
self.float8_dtype = float8_dtype
def forward(self, x):
x_fp8, x_inverse_scale = _quantize_rowwise(x, self.float8_dtype)
y1 = torch._scaled_mm(
x_fp8,
self.w1_t_fp8,
x_inverse_scale.view(-1, 1),
self.w1_inverse_scale.view(1, -1),
out_dtype=output_dtype,
use_fast_accum=use_fast_accum,
)
y1_fp8, y1_inverse_scale = _quantize_rowwise(y1, self.float8_dtype)
y2 = torch._scaled_mm(
y1_fp8,
self.w2_t_fp8,
y1_inverse_scale.view(-1, 1),
self.w2_inverse_scale.view(1, -1),
out_dtype=output_dtype,
use_fast_accum=use_fast_accum,
)
return y2
model = TestModule(w1, w2, float8_dtype).cuda()
dynamic_shapes = (
{
"x": {0: Dim.DYNAMIC, 1: Dim.DYNAMIC},
}
if dynamic
else None
)
expected = model(x)
if use_aoti:
actual = AOTIRunnerUtil.run(
model,
(x,),
dynamic_shapes=dynamic_shapes,
)
else:
compiled_model = torch.compile(model, fullgraph=True, dynamic=dynamic)
actual = compiled_model(x)
torch.testing.assert_close(expected, actual, rtol=1e-2, atol=0.05)
@unittest.skipIf(not PLATFORM_SUPPORTS_FP8, "FP8 is only supported on H100+")
@unittest.skipIf(not SM90OrLater, "need sm_90")
@fp8_config
@parametrize("float8_dtype", (torch.float8_e4m3fn,))
@parametrize(
"shape",
(
(
512,
128,
64,
),
),
)
@parametrize("has_bias", (False, True))
@parametrize("use_fast_accum", (False,))
@parametrize("input_dtype", (torch.bfloat16, torch.float16))
def test_fp8_tensorwise_scaling(
self,
float8_dtype: torch.dtype,
shape: tuple[int, int, int],
has_bias: bool,
use_fast_accum: bool,
input_dtype: torch.dtype,
):
device = "cuda"
M, K, N = shape # Matmul Y = X [M, K] x W [N, K]
output_dtype = input_dtype
# input and output dtypes of _scaled_mm do not need to be the same, but
# typically in a model they are
x = torch.randn(M, K, dtype=input_dtype, device=device)
w = torch.randn(N, K, dtype=input_dtype, device=device)
bias = None
if has_bias:
bias = torch.randn(N, device=device, dtype=input_dtype)
# quantize weight (prior to inference)
w_fp8, w_inverse_scale = _quantize_tensorwise(w, float8_dtype)
w_t_fp8 = w_fp8.t()
# quantize input x
x_fp8, x_inverse_scale = _quantize_tensorwise(x, float8_dtype)
def linear(x_fp8, x_inverse_scale, w_t_fp8, w_inverse_scale, bias):
y = torch._scaled_mm(
x_fp8,
w_t_fp8,
x_inverse_scale,
w_inverse_scale,
bias,
out_dtype=output_dtype,
use_fast_accum=use_fast_accum,
)
return y
y_eager = linear(
x_fp8,
x_inverse_scale,
w_t_fp8,
w_inverse_scale,
bias,
)
linear_compiled = torch.compile(linear, backend="inductor", mode="max-autotune")
y_compiled = linear_compiled(
x_fp8,
x_inverse_scale,
w_t_fp8,
w_inverse_scale,
bias,
)
self.assertEqual(y_eager.dtype, output_dtype)
self.assertEqual(y_compiled.dtype, output_dtype)
# depending on the kernel config (BLOCK_M size, etc) selected during Inductor
# autotuning for the compiled case, the results can be different because of
# the way blocks of results are accumulated (float addition not associative), so
# setting a small absolute tolerance in these tests
torch.testing.assert_close(y_eager, y_compiled, rtol=1e-2, atol=0.05)
@unittest.skipIf(not SM90OrLater, "need sm_90")
def test_config_number_post_filtering(self) -> None:
"""
Test if cutlass backend produces the same number of configs after filtering
regardless of layout and dtype.
"""
layouts = ["rr", "rc", "cr", "cc"]
dtypes = [torch.float16, torch.bfloat16]
config_counts = {}
for layout in layouts:
for dtype in dtypes:
a = torch.randn(128, 128, dtype=dtype).cuda()
b = torch.randn(128, 128, dtype=dtype).cuda()
if layout[0] == "c":
a = a.t()
if layout[1] == "c":
b = b.t()
with config.patch(
{
"max_autotune": True,
"max_autotune_gemm_backends": "CUTLASS",
# needed for log searching
"force_disable_caches": True,
"cuda.cutlass_max_profiling_swizzle_options": [2],
}
):
with mock.patch(
"torch._inductor.kernel.mm.autotune_select_algorithm",
wraps=select_no_algorithm,
) as sa:
with self.assertRaisesRegex(
BackendCompilerFailed, r".*NoValidChoicesError.*"
):
_ = torch.compile(torch.mm, dynamic=False)(a, b)
args, _ = sa.call_args
_, choices, _, __ = args
config_counts[(layout, dtype)] = len(choices)
# Check that all config counts are equal
all_counts = list(config_counts.values())
self.assertTrue(
len(set(all_counts)) == 1,
f"Config counts should be equal across all layout/dtype combinations. "
f"Got counts: {config_counts}",
)
if __name__ == "__main__":
from torch._inductor.utils import is_big_gpu
# Set env to make it work in CI.
if HAS_CUDA_AND_TRITON and HAS_CPU and is_big_gpu():
run_tests()
|
TestCutlassBackend
|
python
|
sphinx-doc__sphinx
|
sphinx/builders/linkcheck.py
|
{
"start": 27327,
"end": 31154
}
|
class ____(NamedTuple):
delay: float
next_check: float
def rewrite_github_anchor(app: Sphinx, uri: str) -> str | None:
"""Rewrite anchor name of the hyperlink to github.com
The hyperlink anchors in github.com are dynamically generated. This rewrites
them before checking and makes them comparable.
"""
parsed = urlparse(uri)
if parsed.hostname == 'github.com' and parsed.fragment:
prefixed = parsed.fragment.startswith('user-content-')
if not prefixed:
fragment = f'user-content-{parsed.fragment}'
return urlunparse(parsed._replace(fragment=fragment))
return None
def compile_linkcheck_allowed_redirects(app: Sphinx, config: Config) -> None:
"""Compile patterns to the regexp objects."""
if config.linkcheck_allowed_redirects is _SENTINEL_LAR:
return
if not isinstance(config.linkcheck_allowed_redirects, dict):
msg = __(
f'Invalid value `{config.linkcheck_allowed_redirects!r}` in '
'linkcheck_allowed_redirects. Expected a dictionary.'
)
raise ConfigError(msg)
allowed_redirects = {}
for url, pattern in config.linkcheck_allowed_redirects.items():
try:
allowed_redirects[re.compile(url)] = re.compile(pattern)
except re.error as exc:
logger.warning(
__('Failed to compile regex in linkcheck_allowed_redirects: %r %s'),
exc.pattern,
exc.msg,
)
config.linkcheck_allowed_redirects = allowed_redirects
def setup(app: Sphinx) -> ExtensionMetadata:
app.add_builder(CheckExternalLinksBuilder)
app.add_post_transform(HyperlinkCollector)
app.add_config_value('linkcheck_ignore', [], '', types=frozenset({list, tuple}))
app.add_config_value(
'linkcheck_exclude_documents', [], '', types=frozenset({list, tuple})
)
app.add_config_value(
'linkcheck_allowed_redirects', _SENTINEL_LAR, '', types=frozenset({dict})
)
app.add_config_value('linkcheck_auth', [], '', types=frozenset({list, tuple}))
app.add_config_value('linkcheck_request_headers', {}, '', types=frozenset({dict}))
app.add_config_value('linkcheck_retries', 1, '', types=frozenset({int}))
app.add_config_value('linkcheck_timeout', 30, '', types=frozenset({float, int}))
app.add_config_value('linkcheck_workers', 5, '', types=frozenset({int}))
app.add_config_value('linkcheck_anchors', True, '', types=frozenset({bool}))
# Anchors starting with ! are ignored since they are
# commonly used for dynamic pages
app.add_config_value(
'linkcheck_anchors_ignore', ['^!'], '', types=frozenset({list, tuple})
)
app.add_config_value(
'linkcheck_anchors_ignore_for_url', (), '', types=frozenset({list, tuple})
)
app.add_config_value(
'linkcheck_rate_limit_timeout', 300.0, '', types=frozenset({float, int})
)
app.add_config_value(
'linkcheck_allow_unauthorized', False, '', types=frozenset({bool})
)
app.add_config_value(
'linkcheck_report_timeouts_as_broken', False, '', types=frozenset({bool})
)
app.add_config_value(
'linkcheck_case_insensitive_urls',
(),
'',
types=frozenset({frozenset, list, set, tuple}),
)
app.add_event('linkcheck-process-uri')
# priority 900 to happen after ``check_confval_types()``
app.connect('config-inited', compile_linkcheck_allowed_redirects, priority=900)
# FIXME: Disable URL rewrite handler for github.com temporarily.
# See: https://github.com/sphinx-doc/sphinx/issues/9435
# app.connect('linkcheck-process-uri', rewrite_github_anchor)
return {
'version': 'builtin',
'parallel_read_safe': True,
'parallel_write_safe': True,
}
|
RateLimit
|
python
|
airbytehq__airbyte
|
airbyte-integrations/connectors/source-github/source_github/github_schema.py
|
{
"start": 183043,
"end": 183661
}
|
class ____(sgqlc.types.Input):
"""Autogenerated input type of CreateEnvironment"""
__schema__ = github_schema
__field_names__ = ("repository_id", "name", "client_mutation_id")
repository_id = sgqlc.types.Field(sgqlc.types.non_null(ID), graphql_name="repositoryId")
"""The node ID of the repository."""
name = sgqlc.types.Field(sgqlc.types.non_null(String), graphql_name="name")
"""The name of the environment."""
client_mutation_id = sgqlc.types.Field(String, graphql_name="clientMutationId")
"""A unique identifier for the client performing the mutation."""
|
CreateEnvironmentInput
|
python
|
PyCQA__pylint
|
doc/data/messages/a/arguments-differ/bad.py
|
{
"start": 93,
"end": 251
}
|
class ____(Drink):
def mix(self, fluid_one, fluid_two, alcoholic_fluid): # [arguments-differ]
return fluid_one + fluid_two + alcoholic_fluid
|
Cocktail
|
python
|
scikit-learn__scikit-learn
|
sklearn/utils/_param_validation.py
|
{
"start": 22963,
"end": 23231
}
|
class ____(_Constraint):
"""Constraint representing iterables that are not strings."""
def is_satisfied_by(self, val):
return isinstance(val, Iterable) and not isinstance(val, str)
def __str__(self):
return "an iterable"
|
_IterablesNotString
|
python
|
doocs__leetcode
|
solution/1000-1099/1073.Adding Two Negabinary Numbers/Solution.py
|
{
"start": 0,
"end": 618
}
|
class ____:
def addNegabinary(self, arr1: List[int], arr2: List[int]) -> List[int]:
i, j = len(arr1) - 1, len(arr2) - 1
c = 0
ans = []
while i >= 0 or j >= 0 or c:
a = 0 if i < 0 else arr1[i]
b = 0 if j < 0 else arr2[j]
x = a + b + c
c = 0
if x >= 2:
x -= 2
c -= 1
elif x == -1:
x = 1
c += 1
ans.append(x)
i, j = i - 1, j - 1
while len(ans) > 1 and ans[-1] == 0:
ans.pop()
return ans[::-1]
|
Solution
|
python
|
kamyu104__LeetCode-Solutions
|
Python/maximum-non-negative-product-in-a-matrix.py
|
{
"start": 58,
"end": 1161
}
|
class ____(object):
def maxProductPath(self, grid):
"""
:type grid: List[List[int]]
:rtype: int
"""
MOD = 10**9+7
max_dp = [[0]*len(grid[0]) for _ in xrange(2)]
min_dp = [[0]*len(grid[0]) for _ in xrange(2)]
for i in xrange(len(grid)):
for j in xrange(len(grid[i])):
if i == 0 and j == 0:
max_dp[i%2][j] = min_dp[i%2][j] = grid[i][j]
continue
curr_max = max(max_dp[(i-1)%2][j] if i > 0 else max_dp[i%2][j-1],
max_dp[i%2][j-1] if j > 0 else max_dp[(i-1)%2][j])
curr_min = min(min_dp[(i-1)%2][j] if i > 0 else min_dp[i%2][j-1],
min_dp[i%2][j-1] if j > 0 else min_dp[(i-1)%2][j])
if grid[i][j] < 0:
curr_max, curr_min = curr_min, curr_max
max_dp[i%2][j] = curr_max*grid[i][j]
min_dp[i%2][j] = curr_min*grid[i][j]
return max_dp[(len(grid)-1)%2][-1]%MOD if max_dp[(len(grid)-1)%2][-1] >= 0 else -1
|
Solution
|
python
|
pypa__warehouse
|
warehouse/oidc/forms/google.py
|
{
"start": 973,
"end": 1030
}
|
class ____(GooglePublisherBase):
pass
|
GooglePublisherForm
|
python
|
scipy__scipy
|
scipy/spatial/tests/test_kdtree.py
|
{
"start": 16107,
"end": 16264
}
|
class ____(_Test_two_random_trees):
def setup_method(self):
super().setup_method()
self.p = np.inf
@KDTreeTest
|
_Test_two_random_trees_linf
|
python
|
openai__openai-python
|
src/openai/types/vector_stores/vector_store_file_deleted.py
|
{
"start": 199,
"end": 321
}
|
class ____(BaseModel):
id: str
deleted: bool
object: Literal["vector_store.file.deleted"]
|
VectorStoreFileDeleted
|
python
|
patrick-kidger__equinox
|
equinox/nn/_conv.py
|
{
"start": 11539,
"end": 12504
}
|
class ____(Conv):
"""As [`equinox.nn.Conv`][] with `num_spatial_dims=3`."""
def __init__(
self,
in_channels: int,
out_channels: int,
kernel_size: int | Sequence[int],
stride: int | Sequence[int] = (1, 1, 1),
padding: str | int | Sequence[int] | Sequence[tuple[int, int]] = (0, 0, 0),
dilation: int | Sequence[int] = (1, 1, 1),
groups: int = 1,
use_bias: bool = True,
padding_mode: str = "ZEROS",
dtype=None,
*,
key: PRNGKeyArray,
):
super().__init__(
num_spatial_dims=3,
in_channels=in_channels,
out_channels=out_channels,
kernel_size=kernel_size,
stride=stride,
padding=padding,
dilation=dilation,
groups=groups,
use_bias=use_bias,
padding_mode=padding_mode,
dtype=dtype,
key=key,
)
|
Conv3d
|
python
|
django__django
|
tests/nested_foreign_keys/models.py
|
{
"start": 105,
"end": 236
}
|
class ____(models.Model):
title = models.CharField(max_length=200)
director = models.ForeignKey(Person, models.CASCADE)
|
Movie
|
python
|
pyparsing__pyparsing
|
examples/tiny/tiny_engine.py
|
{
"start": 917,
"end": 1923
}
|
class ____:
"""A single stack frame holding local variables and their types.
Variables in TINY are stored per-frame; lookups search the current
frame first, then the global frame.
"""
def __init__(self) -> None:
# maintain mapping of name -> var definitions
self._vars: dict[str, list] = {}
def __contains__(self, name: str) -> bool: # allow `name in frame`
return name in self._vars
def declare(self, name: str, dtype: str, value: object) -> None:
if name in self._vars:
raise NameError(f"Variable already declared in frame: {name!r}")
self._vars[name] = [dtype, value]
def set(self, name: str, value: object) -> None:
if name not in self._vars:
raise NameError(f"Variable not declared: {name!r}")
self._vars[name][1] = value
def get(self, name: str) -> object:
return self._vars[name][1]
def get_type(self, name: str) -> str:
return self._vars[name][0]
|
TinyFrame
|
python
|
airbytehq__airbyte
|
airbyte-integrations/connectors/source-shopify/source_shopify/shopify_graphql/bulk/query.py
|
{
"start": 113334,
"end": 124366
}
|
class ____(ShopifyBulkQuery):
"""
Output example to BULK query `order agreement` from `orders` with `filter query` by `updated_at` sorted `ASC`:
{
orders(query: "updated_at:>='2020-06-13T00:00:00+00:00' AND updated_at:<'2024-06-14T00:00:00+00:00'", sortKey:UPDATED_AT) {
edges {
node {
__typename
id
createdAt
updatedAt
agreements {
edges {
node {
__typename
id
happenedAt
reason
sales {
edges {
node {
__typename
quantity
id
actionType
lineType
totalAmount {
shopMoney {
amount
currencyCode
}
}
totalDiscountAmountAfterTaxes {
shopMoney {
amount
currencyCode
}
}
totalDiscountAmountBeforeTaxes {
shopMoney {
amount
currencyCode
}
}
totalTaxAmount {
shopMoney {
amount
currencyCode
}
}
... on ProductSale {
id
lineItem {
id
}
}
}
}
}
}
}
}
}
}
}
}
"""
query_name = "orders"
sort_key = "UPDATED_AT"
shop_money_fields: List[Field] = [
"amount",
Field(name="currencyCode", alias="currency_code"),
]
sales_fields: List[Field] = [
Field(
name="edges",
fields=[
Field(
name="node",
fields=[
"__typename",
"quantity",
"id",
Field(name="actionType", alias="action_type"),
Field(name="lineType", alias="line_type"),
Field(
name="totalAmount",
alias="total_amount",
fields=[Field(name="shopMoney", alias="shop_money", fields=shop_money_fields)],
),
Field(
name="totalDiscountAmountAfterTaxes",
alias="total_discount_amount_after_taxes",
fields=[Field(name="shopMoney", alias="shop_money", fields=shop_money_fields)],
),
Field(
name="totalDiscountAmountBeforeTaxes",
alias="total_discount_amount_before_taxes",
fields=[Field(name="shopMoney", alias="shop_money", fields=shop_money_fields)],
),
Field(
name="totalTaxAmount",
alias="total_tax_amount",
fields=[Field(name="shopMoney", alias="shop_money", fields=shop_money_fields)],
),
InlineFragment(
type="ProductSale",
fields=[
"id",
Field(name="lineItem", alias="line_item", fields=["id"]),
],
),
],
)
],
)
]
agreements_fields: List[Field] = [
Field(
name="edges",
fields=[
Field(
name="node",
fields=[
"__typename",
"id",
Field(name="happenedAt", alias="happened_at"),
"reason",
Field(name="sales", fields=sales_fields),
],
)
],
)
]
query_nodes: List[Field] = [
"__typename",
"id",
"createdAt",
"updatedAt",
Field(name="agreements", fields=agreements_fields),
]
sale_record_components = [
# Each `OrderAgreement` could have one of the following sale component associated with it.
"AdditionalFeeSale",
"AdjustmentSale",
"DutySale",
"FeeSale",
"GiftCardSale",
"ProductSale",
"ShippingLineSale",
"TipSale",
"UnknownSale",
]
agreement_record_components = [
# Each `Order` could have one of the following agreement component associated with it.
"OrderAgreement",
"OrderEditAgreement",
"RefundAgreement",
"ReturnAgreement",
]
record_composition = {
"new_record": "Order",
"record_components": agreement_record_components + sale_record_components,
}
def _resolve_and_save_id(self, entity: MutableMapping[str, Any]) -> Mapping[str, Any]:
# save the id before it's resolved
entity["admin_graphql_api_id"] = entity.get("id")
entity["id"] = self.tools.resolve_str_id(entity.get("id"))
# remove leftovers
entity.pop("__parentId", None)
return entity
def process_agreement(self, agreement: MutableMapping[str, Any]) -> MutableMapping[str, Any]:
agreement = self._resolve_and_save_id(agreement)
# convert dates from ISO-8601 to RFC-3339
agreement["happened_at"] = self.tools.from_iso8601_to_rfc3339(agreement, "happened_at")
return agreement
def cast_sale_amount_to_float(self, sale: MutableMapping[str, Any], entities: Iterable[str]) -> float:
# cast the `amount` from str to float
for entity in entities:
if sale.get(entity):
sale[entity]["shop_money"]["amount"] = float(sale.get(entity, {}).get("shop_money", {}).get("amount"))
return sale
def process_sale(self, sale: MutableMapping[str, Any]) -> MutableMapping[str, Any]:
sale = self._resolve_and_save_id(sale)
sale_line_item = sale.get("line_item")
# `sale_line_item` could be None
if sale_line_item:
sale["line_item"]["admin_graphql_api_id"] = sale_line_item.get("id")
sale["line_item"]["id"] = self.tools.resolve_str_id(sale_line_item.get("id"))
# cast the `amout` for the number of entities provided from str to float
sale = self.cast_sale_amount_to_float(
sale,
["total_amount", "total_discount_amount_after_taxes", "total_discount_amount_before_taxes", "total_tax_amount"],
)
return sale
def collect_sub_components(
self, collected_record_components: MutableMapping[str, Any], entities: List[str]
) -> Optional[Iterable[MutableMapping[str, Any]]]:
collected_entities: List[MutableMapping[str, Any]] = []
for component in entities:
_component: Optional[Iterable[Mapping[str, Any]]] = collected_record_components.get(component, [])
if _component:
for element in _component:
collected_entities.append(element)
return collected_entities
def join_agreements_with_sales(
self, collected_agreements: List[MutableMapping[str, Any]], collected_sales: List[Mapping[str, Any]]
) -> Iterable[Mapping[str, Any]]:
# process each `agreement` collected by checking the related `sales` presence
for agreement in collected_agreements:
agreement_id = agreement.get("id")
agreement["sales"] = []
# find the target `sale` record for each `agreement` collected
for sale in collected_sales:
sale_parent_id = sale.get("__parentId")
if sale_parent_id == agreement_id:
agreement["sales"].append(self.process_sale(sale))
# process the final agreement element
self.process_agreement(agreement)
return collected_agreements
def record_process_components(self, record: MutableMapping[str, Any]) -> Optional[Iterable[MutableMapping[str, Any]]]:
"""
Defines how to process collected components.
"""
# get the joined record components collected for the record
record_components = record.get("record_components", {})
if record_components:
collected_agreements = self.collect_sub_components(record_components, self.agreement_record_components)
collected_sales = self.collect_sub_components(record_components, self.sale_record_components)
if collected_agreements:
agreements_with_sales = self.join_agreements_with_sales(collected_agreements, collected_sales)
# remove leftovers
record.pop("record_components", None)
# populate the record with the collected and joined `agreement + sales` data, if present
record["agreements"] = agreements_with_sales if agreements_with_sales else {}
yield record
|
OrderAgreement
|
python
|
numpy__numpy
|
numpy/_build_utils/tempita/_looper.py
|
{
"start": 987,
"end": 1338
}
|
class ____:
def __init__(self, seq):
self.seq = list(seq)
self.pos = 0
def __iter__(self):
return self
def __next__(self):
if self.pos >= len(self.seq):
raise StopIteration
result = loop_pos(self.seq, self.pos), self.seq[self.pos]
self.pos += 1
return result
|
looper_iter
|
python
|
sympy__sympy
|
sympy/physics/quantum/qft.py
|
{
"start": 1252,
"end": 2817
}
|
class ____(OneQubitGate):
"""This is the R_k gate of the QTF."""
gate_name = 'Rk'
gate_name_latex = 'R'
def __new__(cls, *args):
if len(args) != 2:
raise QuantumError(
'Rk gates only take two arguments, got: %r' % args
)
# For small k, Rk gates simplify to other gates, using these
# substitutions give us familiar results for the QFT for small numbers
# of qubits.
target = args[0]
k = args[1]
if k == 1:
return ZGate(target)
elif k == 2:
return PhaseGate(target)
elif k == 3:
return TGate(target)
args = cls._eval_args(args)
inst = Expr.__new__(cls, *args)
inst.hilbert_space = cls._eval_hilbert_space(args)
return inst
@classmethod
def _eval_args(cls, args):
# Fall back to this, because Gate._eval_args assumes that args is
# all targets and can't contain duplicates.
return QExpr._eval_args(args)
@property
def k(self):
return self.label[1]
@property
def targets(self):
return self.label[:1]
@property
def gate_name_plot(self):
return r'$%s_%s$' % (self.gate_name_latex, str(self.k))
def get_target_matrix(self, format='sympy'):
if format == 'sympy':
return Matrix([[1, 0], [0, exp(sign(self.k)*Integer(2)*pi*I/(Integer(2)**abs(self.k)))]])
raise NotImplementedError(
'Invalid format for the R_k gate: %r' % format)
Rk = RkGate
|
RkGate
|
python
|
PrefectHQ__prefect
|
src/prefect/futures.py
|
{
"start": 16628,
"end": 19736
}
|
class ____(list[PrefectFuture[R]], Iterator[PrefectFuture[R]]):
"""
A list of Prefect futures.
This class provides methods to wait for all futures
in the list to complete and to retrieve the results of all task runs.
"""
def wait(self, timeout: float | None = None) -> None:
"""
Wait for all futures in the list to complete.
Args:
timeout: The maximum number of seconds to wait for all futures to
complete. This method will not raise if the timeout is reached.
"""
wait(self, timeout=timeout)
def result(
self: Self,
timeout: float | None = None,
raise_on_failure: bool = True,
) -> list[R]:
"""
Get the results of all task runs associated with the futures in the list.
Args:
timeout: The maximum number of seconds to wait for all futures to
complete.
raise_on_failure: If `True`, an exception will be raised if any task run fails.
Returns:
A list of results of the task runs.
Raises:
TimeoutError: If the timeout is reached before all futures complete.
"""
try:
with timeout_context(timeout):
return [
future.result(raise_on_failure=raise_on_failure) for future in self
]
except TimeoutError as exc:
# timeout came from inside the task
if "Scope timed out after {timeout} second(s)." not in str(exc):
raise
raise TimeoutError(
f"Timed out waiting for all futures to complete within {timeout} seconds"
) from exc
def as_completed(
futures: list[PrefectFuture[R]], timeout: float | None = None
) -> Generator[PrefectFuture[R], None]:
unique_futures: set[PrefectFuture[R]] = set(futures)
total_futures = len(unique_futures)
pending = unique_futures
try:
with timeout_context(timeout):
done = {f for f in unique_futures if f._final_state} # type: ignore[privateUsage]
pending = unique_futures - done
yield from done
finished_event = threading.Event()
finished_lock = threading.Lock()
finished_futures: list[PrefectFuture[R]] = []
def add_to_done(future: PrefectFuture[R]):
with finished_lock:
finished_futures.append(future)
finished_event.set()
for future in pending:
future.add_done_callback(add_to_done)
while pending:
finished_event.wait()
with finished_lock:
done = finished_futures
finished_futures = []
finished_event.clear()
for future in done:
pending.remove(future)
yield future
except TimeoutError:
raise TimeoutError(
"%d (of %d) futures unfinished" % (len(pending), total_futures)
)
|
PrefectFutureList
|
python
|
paramiko__paramiko
|
paramiko/auth_strategy.py
|
{
"start": 1340,
"end": 2804
}
|
class ____(AuthSource):
"""
Password authentication.
:param callable password_getter:
A lazy callable that should return a `str` password value at
authentication time, such as a `functools.partial` wrapping
`getpass.getpass`, an API call to a secrets store, or similar.
If you already know the password at instantiation time, you should
simply use something like ``lambda: "my literal"`` (for a literal, but
also, shame on you!) or ``lambda: variable_name`` (for something stored
in a variable).
"""
def __init__(self, username, password_getter):
super().__init__(username=username)
self.password_getter = password_getter
def __repr__(self):
# Password auth is marginally more 'username-caring' than pkeys, so may
# as well log that info here.
return super()._repr(user=self.username)
def authenticate(self, transport):
# Lazily get the password, in case it's prompting a user
# TODO: be nice to log source _of_ the password?
password = self.password_getter()
return transport.auth_password(self.username, password)
# TODO 4.0: twiddle this, or PKey, or both, so they're more obviously distinct.
# TODO 4.0: the obvious is to make this more wordy (PrivateKeyAuth), the
# minimalist approach might be to rename PKey to just Key (esp given all the
# subclasses are WhateverKey and not WhateverPKey)
|
Password
|
python
|
chroma-core__chroma
|
chromadb/utils/embedding_functions/fastembed_sparse_embedding_function.py
|
{
"start": 459,
"end": 7228
}
|
class ____(SparseEmbeddingFunction[Documents]):
def __init__(
self,
model_name: str,
task: Optional[TaskType] = "document",
cache_dir: Optional[str] = None,
threads: Optional[int] = None,
cuda: Optional[bool] = None,
device_ids: Optional[list[int]] = None,
lazy_load: Optional[bool] = None,
query_config: Optional[FastembedSparseEmbeddingFunctionQueryConfig] = None,
**kwargs: Any,
):
"""Initialize SparseEncoderEmbeddingFunction.
Args:
model_name (str, optional): Identifier of the Fastembed model
List of commonly used models: Qdrant/bm25, prithivida/Splade_PP_en_v1, Qdrant/minicoil-v1
task (str, optional): Task to perform, can be "document" or "query"
cache_dir (str, optional): The path to the cache directory.
threads (int, optional): The number of threads to use for the model.
cuda (bool, optional): Whether to use CUDA.
device_ids (list[int], optional): The device IDs to use for the model.
lazy_load (bool, optional): Whether to lazy load the model.
query_config (dict, optional): Configuration for the query, can be "task"
**kwargs: Additional arguments to pass to the model.
"""
try:
from fastembed import SparseTextEmbedding
except ImportError:
raise ValueError(
"The fastembed python package is not installed. Please install it with `pip install fastembed`"
)
self.task = task
self.query_config = query_config
self.model_name = model_name
self.cache_dir = cache_dir
self.threads = threads
self.cuda = cuda
self.device_ids = device_ids
self.lazy_load = lazy_load
for key, value in kwargs.items():
if not isinstance(value, (str, int, float, bool, list, dict, tuple)):
raise ValueError(f"Keyword argument {key} is not a primitive type")
self.kwargs = kwargs
self._model = SparseTextEmbedding(
model_name, cache_dir, threads, cuda, device_ids, lazy_load, **kwargs
)
def __call__(self, input: Documents) -> SparseVectors:
"""Generate embeddings for the given documents.
Args:
input: Documents to generate embeddings for.
Returns:
Embeddings for the documents.
"""
try:
from fastembed import SparseTextEmbedding
except ImportError:
raise ValueError(
"The fastembed python package is not installed. Please install it with `pip install fastembed`"
)
model = cast(SparseTextEmbedding, self._model)
if self.task == "document":
embeddings = model.embed(
list(input),
)
elif self.task == "query":
embeddings = model.query_embed(
list(input),
)
else:
raise ValueError(f"Invalid task: {self.task}")
sparse_vectors: SparseVectors = []
for vec in embeddings:
sparse_vectors.append(
normalize_sparse_vector(
indices=vec.indices.tolist(), values=vec.values.tolist()
)
)
return sparse_vectors
def embed_query(self, input: Documents) -> SparseVectors:
try:
from fastembed import SparseTextEmbedding
except ImportError:
raise ValueError(
"The fastembed python package is not installed. Please install it with `pip install fastembed`"
)
model = cast(SparseTextEmbedding, self._model)
if self.query_config is not None:
task = self.query_config.get("task")
if task == "document":
embeddings = model.embed(
list(input),
)
elif task == "query":
embeddings = model.query_embed(
list(input),
)
else:
raise ValueError(f"Invalid task: {task}")
sparse_vectors: SparseVectors = []
for vec in embeddings:
sparse_vectors.append(
normalize_sparse_vector(
indices=vec.indices.tolist(), values=vec.values.tolist()
)
)
return sparse_vectors
else:
return self.__call__(input)
@staticmethod
def name() -> str:
return "fastembed_sparse"
@staticmethod
def build_from_config(
config: Dict[str, Any]
) -> "SparseEmbeddingFunction[Documents]":
model_name = config.get("model_name")
task = config.get("task")
query_config = config.get("query_config")
cache_dir = config.get("cache_dir")
threads = config.get("threads")
cuda = config.get("cuda")
device_ids = config.get("device_ids")
lazy_load = config.get("lazy_load")
kwargs = config.get("kwargs", {})
if model_name is None:
assert False, "This code should not be reached"
return FastembedSparseEmbeddingFunction(
model_name=model_name,
task=task,
query_config=query_config,
cache_dir=cache_dir,
threads=threads,
cuda=cuda,
device_ids=device_ids,
lazy_load=lazy_load,
**kwargs,
)
def get_config(self) -> Dict[str, Any]:
return {
"model_name": self.model_name,
"task": self.task,
"query_config": self.query_config,
"cache_dir": self.cache_dir,
"threads": self.threads,
"cuda": self.cuda,
"device_ids": self.device_ids,
"lazy_load": self.lazy_load,
"kwargs": self.kwargs,
}
def validate_config_update(
self, old_config: Dict[str, Any], new_config: Dict[str, Any]
) -> None:
# model_name is also used as the identifier for model path if stored locally.
# Users should be able to change the path if needed, so we should not validate that.
# e.g. moving file path from /v1/my-model.bin to /v2/my-model.bin
return
@staticmethod
def validate_config(config: Dict[str, Any]) -> None:
"""
Validate the configuration using the JSON schema.
Args:
config: Configuration to validate
Raises:
ValidationError: If the configuration does not match the schema
"""
validate_config_schema(config, "fastembed_sparse")
|
FastembedSparseEmbeddingFunction
|
python
|
python-openxml__python-docx
|
tests/test_package.py
|
{
"start": 1769,
"end": 5517
}
|
class ____:
"""Unit-test suite for `docx.package.Package`."""
def it_can_get_a_matching_image_part(
self,
Image_: Mock,
image_: Mock,
_get_by_sha1_: Mock,
image_part_: Mock,
):
Image_.from_file.return_value = image_
image_.sha1 = "f005ba11"
_get_by_sha1_.return_value = image_part_
image_parts = ImageParts()
image_part = image_parts.get_or_add_image_part("image.jpg")
Image_.from_file.assert_called_once_with("image.jpg")
_get_by_sha1_.assert_called_once_with(image_parts, "f005ba11")
assert image_part is image_part_
def but_it_adds_a_new_image_part_when_match_fails(
self,
Image_: Mock,
image_: Mock,
_get_by_sha1_: Mock,
_add_image_part_: Mock,
image_part_: Mock,
):
Image_.from_file.return_value = image_
image_.sha1 = "fa1afe1"
_get_by_sha1_.return_value = None
_add_image_part_.return_value = image_part_
image_parts = ImageParts()
image_part = image_parts.get_or_add_image_part("image.png")
Image_.from_file.assert_called_once_with("image.png")
_get_by_sha1_.assert_called_once_with(image_parts, "fa1afe1")
_add_image_part_.assert_called_once_with(image_parts, image_)
assert image_part is image_part_
@pytest.mark.parametrize(
("existing_partname_numbers", "expected_partname_number"),
[
((2, 3), 1),
((1, 3), 2),
((1, 2), 3),
],
)
def it_knows_the_next_available_image_partname(
self,
request: FixtureRequest,
existing_partname_numbers: tuple[int, int],
expected_partname_number: int,
):
image_parts = ImageParts()
for n in existing_partname_numbers:
image_parts.append(
instance_mock(request, ImagePart, partname=PackURI(f"/word/media/image{n}.png"))
)
next_partname = image_parts._next_image_partname("png")
assert next_partname == PackURI("/word/media/image%d.png" % expected_partname_number)
def it_can_add_a_new_image_part(
self,
_next_image_partname_: Mock,
image_: Mock,
ImagePart_: Mock,
image_part_: Mock,
):
partname = PackURI("/word/media/image7.png")
_next_image_partname_.return_value = partname
ImagePart_.from_image.return_value = image_part_
image_parts = ImageParts()
image_part = image_parts._add_image_part(image_)
ImagePart_.from_image.assert_called_once_with(image_, partname)
assert image_part in image_parts
assert image_part is image_part_
# fixtures -------------------------------------------------------
@pytest.fixture
def _add_image_part_(self, request: FixtureRequest):
return method_mock(request, ImageParts, "_add_image_part")
@pytest.fixture
def _get_by_sha1_(self, request: FixtureRequest):
return method_mock(request, ImageParts, "_get_by_sha1")
@pytest.fixture
def Image_(self, request: FixtureRequest):
return class_mock(request, "docx.package.Image")
@pytest.fixture
def image_(self, request: FixtureRequest):
return instance_mock(request, Image)
@pytest.fixture
def ImagePart_(self, request: FixtureRequest):
return class_mock(request, "docx.package.ImagePart")
@pytest.fixture
def image_part_(self, request: FixtureRequest):
return instance_mock(request, ImagePart)
@pytest.fixture
def _next_image_partname_(self, request: FixtureRequest):
return method_mock(request, ImageParts, "_next_image_partname")
|
DescribeImageParts
|
python
|
google__jax
|
docs/autodidax.py
|
{
"start": 25806,
"end": 26259
}
|
class ____(Tracer):
def __init__(self, trace, val, batch_dim: BatchAxis):
self._trace = trace
self.val = val
self.batch_dim = batch_dim
@property
def aval(self):
if self.batch_dim is not_mapped:
return get_aval(self.val)
else:
return mapped_aval(self.batch_dim, get_aval(self.val))
def full_lower(self):
if self.batch_dim is not_mapped:
return full_lower(self.val)
else:
return self
|
BatchTracer
|
python
|
PrefectHQ__prefect
|
src/prefect/utilities/schema_tools/hydration.py
|
{
"start": 4131,
"end": 9436
}
|
class ____(Placeholder):
def __init__(self, template: str) -> None:
self.template = template
def __eq__(self, other: Any) -> bool:
return isinstance(other, type(self)) and self.template == other.template
def handler(kind: PrefectKind) -> Callable[[Handler], Handler]:
def decorator(func: Handler) -> Handler:
_handlers[kind] = func
return func
return decorator
def call_handler(kind: PrefectKind, obj: dict[str, Any], ctx: HydrationContext) -> Any:
if kind not in _handlers:
return obj.get("value", None)
res = _handlers[kind](obj, ctx)
if ctx.raise_on_error and isinstance(res, HydrationError):
raise res
return res
@handler("none")
def null_handler(obj: dict[str, Any], ctx: HydrationContext):
if "value" in obj:
# null handler is a pass through, so we want to continue to hydrate
return _hydrate(obj["value"], ctx)
else:
return ValueNotFound()
@handler("json")
def json_handler(obj: dict[str, Any], ctx: HydrationContext):
if "value" in obj:
if isinstance(obj["value"], dict):
dehydrated_json = _hydrate(obj["value"], ctx)
else:
dehydrated_json = obj["value"]
# If the result is a Placeholder, we should return it as is
if isinstance(dehydrated_json, Placeholder):
return dehydrated_json
try:
return json.loads(dehydrated_json)
except (json.decoder.JSONDecodeError, TypeError) as e:
return InvalidJSON(detail=str(e))
else:
# If `value` is not in the object, we need special handling to help
# the UI. For now if an object looks like {"__prefect_kind": "json"}
# We will remove it from the parent object. e.x.
# {"a": {"__prefect_kind": "json"}} -> {}
# or
# [{"__prefect_kind": "json"}] -> []
return RemoveValue()
@handler("jinja")
def jinja_handler(obj: dict[str, Any], ctx: HydrationContext) -> Any:
from prefect.server.utilities.user_templates import (
TemplateSecurityError,
render_user_template_sync,
validate_user_template,
)
if "template" in obj:
if isinstance(obj["template"], dict):
dehydrated_jinja = _hydrate(obj["template"], ctx)
else:
dehydrated_jinja = obj["template"]
# If the result is a Placeholder, we should return it as is
if isinstance(dehydrated_jinja, Placeholder):
return dehydrated_jinja
try:
validate_user_template(dehydrated_jinja)
except (jinja2.exceptions.TemplateSyntaxError, TemplateSecurityError) as exc:
return InvalidJinja(detail=str(exc))
if ctx.render_jinja:
return render_user_template_sync(dehydrated_jinja, ctx.jinja_context)
else:
return ValidJinja(template=dehydrated_jinja)
else:
return TemplateNotFound()
@handler("workspace_variable")
def workspace_variable_handler(obj: dict[str, Any], ctx: HydrationContext) -> Any:
if "variable_name" in obj:
if isinstance(obj["variable_name"], dict):
dehydrated_variable = _hydrate(obj["variable_name"], ctx)
else:
dehydrated_variable = obj["variable_name"]
# If the result is a Placeholder, we should return it as is
if isinstance(dehydrated_variable, Placeholder):
return dehydrated_variable
if not ctx.render_workspace_variables:
return WorkspaceVariable(variable_name=dehydrated_variable)
if dehydrated_variable in ctx.workspace_variables:
return ctx.workspace_variables[dehydrated_variable]
else:
return WorkspaceVariableNotFound(detail=dehydrated_variable)
else:
# Special handling if `variable_name` is not in the object.
# If an object looks like {"__prefect_kind": "workspace_variable"}
# we will remove it from the parent object. e.x.
# {"a": {"__prefect_kind": "workspace_variable"}} -> {}
# or
# [{"__prefect_kind": "workspace_variable"}] -> []
# or
# {"__prefect_kind": "workspace_variable"} -> {}
return RemoveValue()
def hydrate(
obj: dict[str, Any], ctx: Optional[HydrationContext] = None
) -> dict[str, Any]:
res: dict[str, Any] = _hydrate(obj, ctx)
if _remove_value(res):
res = {}
return res
def _hydrate(obj: Any, ctx: Optional[HydrationContext] = None) -> Any:
if ctx is None:
ctx = HydrationContext()
if isinstance(obj, dict) and "__prefect_kind" in obj:
obj_dict: dict[str, Any] = obj
prefect_kind = obj_dict["__prefect_kind"]
return call_handler(prefect_kind, obj_dict, ctx)
else:
if isinstance(obj, dict):
return {
key: hydrated_value
for key, value in cast(dict[str, Any], obj).items()
if not _remove_value(hydrated_value := _hydrate(value, ctx))
}
elif isinstance(obj, list):
return [
hydrated_element
for element in cast(list[Any], obj)
if not _remove_value(hydrated_element := _hydrate(element, ctx))
]
else:
return obj
|
ValidJinja
|
python
|
walkccc__LeetCode
|
solutions/2997. Minimum Number of Operations to Make Array XOR Equal to K/2997.py
|
{
"start": 0,
"end": 138
}
|
class ____:
def minOperations(self, nums: list[int], k: int) -> int:
return functools.reduce(operator.xor, nums, k).bit_count()
|
Solution
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.