language stringclasses 1 value | repo stringclasses 346 values | path stringlengths 6 201 | class_span dict | source stringlengths 21 2.38M | target stringlengths 1 96 |
|---|---|---|---|---|---|
python | tensorflow__tensorflow | tensorflow/python/keras/layers/pooling.py | {
"start": 27848,
"end": 30487
} | class ____(Pooling3D):
"""Max pooling operation for 3D data (spatial or spatio-temporal).
Downsamples the input along its spatial dimensions (depth, height, and width)
by taking the maximum value over an input window
(of size defined by `pool_size`) for each channel of the input.
The window is shifted by `strides` along each dimension.
Args:
pool_size: Tuple of 3 integers,
factors by which to downscale (dim1, dim2, dim3).
`(2, 2, 2)` will halve the size of the 3D input in each dimension.
strides: tuple of 3 integers, or None. Strides values.
padding: One of `"valid"` or `"same"` (case-insensitive).
`"valid"` means no padding. `"same"` results in padding evenly to
the left/right or up/down of the input such that output has the same
height/width dimension as the input.
data_format: A string,
one of `channels_last` (default) or `channels_first`.
The ordering of the dimensions in the inputs.
`channels_last` corresponds to inputs with shape
`(batch, spatial_dim1, spatial_dim2, spatial_dim3, channels)`
while `channels_first` corresponds to inputs with shape
`(batch, channels, spatial_dim1, spatial_dim2, spatial_dim3)`.
It defaults to the `image_data_format` value found in your
Keras config file at `~/.keras/keras.json`.
If you never set it, then it will be "channels_last".
Input shape:
- If `data_format='channels_last'`:
5D tensor with shape:
`(batch_size, spatial_dim1, spatial_dim2, spatial_dim3, channels)`
- If `data_format='channels_first'`:
5D tensor with shape:
`(batch_size, channels, spatial_dim1, spatial_dim2, spatial_dim3)`
Output shape:
- If `data_format='channels_last'`:
5D tensor with shape:
`(batch_size, pooled_dim1, pooled_dim2, pooled_dim3, channels)`
- If `data_format='channels_first'`:
5D tensor with shape:
`(batch_size, channels, pooled_dim1, pooled_dim2, pooled_dim3)`
Example:
```python
depth = 30
height = 30
width = 30
input_channels = 3
inputs = tf.keras.Input(shape=(depth, height, width, input_channels))
layer = tf.keras.layers.MaxPooling3D(pool_size=3)
outputs = layer(inputs) # Shape: (batch_size, 10, 10, 10, 3)
```
"""
def __init__(self,
pool_size=(2, 2, 2),
strides=None,
padding='valid',
data_format=None,
**kwargs):
super(MaxPooling3D, self).__init__(
nn.max_pool3d,
pool_size=pool_size, strides=strides,
padding=padding, data_format=data_format, **kwargs)
| MaxPooling3D |
python | streamlit__streamlit | lib/streamlit/runtime/stats.py | {
"start": 3143,
"end": 3839
} | class ____:
def __init__(self) -> None:
self._cache_stats_providers: list[CacheStatsProvider] = []
def register_provider(self, provider: CacheStatsProvider) -> None:
"""Register a CacheStatsProvider with the manager.
This function is not thread-safe. Call it immediately after
creation.
"""
self._cache_stats_providers.append(provider)
def get_stats(self) -> list[CacheStat]:
"""Return a list containing all stats from each registered provider."""
all_stats: list[CacheStat] = []
for provider in self._cache_stats_providers:
all_stats.extend(provider.get_stats())
return all_stats
| StatsManager |
python | dagster-io__dagster | python_modules/dagster-graphql/dagster_graphql/schema/freshness.py | {
"start": 744,
"end": 1018
} | class ____(graphene.ObjectType):
class Meta:
name = "CronFreshnessPolicy"
deadlineCron = graphene.NonNull(graphene.String)
lowerBoundDeltaSeconds = graphene.NonNull(graphene.Int)
timezone = graphene.NonNull(graphene.String)
| GrapheneCronFreshnessPolicy |
python | google__jax | jax/experimental/mosaic/gpu/constraints.py | {
"start": 1343,
"end": 1444
} | class ____(abc.ABC):
"""A constant is a known layout."""
@dataclasses.dataclass(frozen=True)
| Constant |
python | scipy__scipy | scipy/signal/_ltisys.py | {
"start": 50817,
"end": 52996
} | class ____(StateSpace, lti):
r"""
Continuous-time Linear Time Invariant system in state-space form.
Represents the system as the continuous-time, first order differential
equation :math:`\dot{x} = A x + B u`.
Continuous-time `StateSpace` systems inherit additional functionality
from the `lti` class.
Parameters
----------
*system: arguments
The `StateSpace` class can be instantiated with 1 or 3 arguments.
The following gives the number of input arguments and their
interpretation:
* 1: `lti` system: (`StateSpace`, `TransferFunction` or
`ZerosPolesGain`)
* 4: array_like: (A, B, C, D)
See Also
--------
TransferFunction, ZerosPolesGain, lti
ss2zpk, ss2tf, zpk2sos
Notes
-----
Changing the value of properties that are not part of the
`StateSpace` system representation (such as `zeros` or `poles`) is very
inefficient and may lead to numerical inaccuracies. It is better to
convert to the specific system representation first. For example, call
``sys = sys.to_zpk()`` before accessing/changing the zeros, poles or gain.
Examples
--------
>>> import numpy as np
>>> from scipy import signal
>>> a = np.array([[0, 1], [0, 0]])
>>> b = np.array([[0], [1]])
>>> c = np.array([[1, 0]])
>>> d = np.array([[0]])
>>> sys = signal.StateSpace(a, b, c, d)
>>> print(sys)
StateSpaceContinuous(
array([[0, 1],
[0, 0]]),
array([[0],
[1]]),
array([[1, 0]]),
array([[0]]),
dt: None
)
"""
def to_discrete(self, dt, method='zoh', alpha=None):
"""
Returns the discretized `StateSpace` system.
Parameters: See `cont2discrete` for details.
Returns
-------
sys: instance of `dlti` and `StateSpace`
"""
return StateSpace(*cont2discrete((self.A, self.B, self.C, self.D),
dt,
method=method,
alpha=alpha)[:-1],
dt=dt)
| StateSpaceContinuous |
python | cython__cython | Cython/Compiler/Nodes.py | {
"start": 368025,
"end": 379841
} | class ____(StatNode):
# try ... finally statement
#
# body StatNode
# finally_clause StatNode
# finally_except_clause deep-copy of finally_clause for exception case
# in_generator inside of generator => must store away current exception also in return case
#
# Each of the continue, break, return and error gotos runs
# into its own deep-copy of the finally block code.
# In addition, if we're doing an error, we save the
# exception on entry to the finally block and restore
# it on exit.
child_attrs = ["body", "finally_clause", "finally_except_clause"]
preserve_exception = 1
# handle exception case, in addition to return/break/continue
handle_error_case = True
func_return_type = None
finally_except_clause = None
is_try_finally_in_nogil = False
in_generator = False
@staticmethod
def create_analysed(pos, env, body, finally_clause):
node = TryFinallyStatNode(pos, body=body, finally_clause=finally_clause)
return node
def analyse_declarations(self, env):
self.body.analyse_declarations(env)
self.finally_except_clause = copy.deepcopy(self.finally_clause)
self.finally_except_clause.analyse_declarations(env)
self.finally_clause.analyse_declarations(env)
def analyse_expressions(self, env):
self.body = self.body.analyse_expressions(env)
self.finally_clause = self.finally_clause.analyse_expressions(env)
self.finally_except_clause = self.finally_except_clause.analyse_expressions(env)
if env.return_type and not env.return_type.is_void:
self.func_return_type = env.return_type
return self
nogil_check = Node.gil_error
gil_message = "Try-finally statement"
def generate_execution_code(self, code):
code.mark_pos(self.pos) # before changing the error label, in case of tracing errors
code.putln("/*try:*/ {")
old_error_label = code.error_label
old_labels = code.all_new_labels()
new_labels = code.get_all_labels()
new_error_label = code.error_label
if not self.handle_error_case:
code.error_label = old_error_label
catch_label = code.new_label()
self.body.generate_execution_code(code)
code.putln("}")
temps_to_clean_up = code.funcstate.all_free_managed_temps()
code.mark_pos(self.finally_clause.pos)
code.putln("/*finally:*/ {")
# Reset labels only after writing out a potential line trace call for correct nogil error handling.
code.set_all_labels(old_labels)
def fresh_finally_clause(_next=[self.finally_clause]):
# generate the original subtree once and always keep a fresh copy
node = _next[0]
node_copy = copy.deepcopy(node)
if node is self.finally_clause:
_next[0] = node_copy
else:
node = node_copy
return node
preserve_error = self.preserve_exception and code.label_used(new_error_label)
needs_success_cleanup = not self.finally_clause.is_terminator
if not self.body.is_terminator:
code.putln('/*normal exit:*/{')
fresh_finally_clause().generate_execution_code(code)
if not self.finally_clause.is_terminator:
code.put_goto(catch_label)
code.putln('}')
if preserve_error:
code.put_label(new_error_label)
code.putln('/*exception exit:*/{')
if not self.in_generator:
code.putln("__Pyx_PyThreadState_declare")
if self.is_try_finally_in_nogil:
code.declare_gilstate()
if needs_success_cleanup:
exc_lineno_cnames = tuple([
code.funcstate.allocate_temp(PyrexTypes.c_int_type, manage_ref=False)
for _ in range(2)])
exc_filename_cname = code.funcstate.allocate_temp(
PyrexTypes.CPtrType(PyrexTypes.c_const_type(PyrexTypes.c_char_type)),
manage_ref=False)
else:
exc_lineno_cnames = exc_filename_cname = None
exc_vars = tuple([
code.funcstate.allocate_temp(py_object_type, manage_ref=False)
for _ in range(6)])
if code.is_tracing():
code.put_trace_exception_propagating()
code.put_trace_exception_handled(self.pos)
self.put_error_catcher(
code, temps_to_clean_up, exc_vars, exc_lineno_cnames, exc_filename_cname)
finally_old_labels = code.all_new_labels()
code.putln('{')
old_exc_vars = code.funcstate.exc_vars
code.funcstate.exc_vars = exc_vars[:3]
self.finally_except_clause.generate_execution_code(code)
code.funcstate.exc_vars = old_exc_vars
code.putln('}')
if needs_success_cleanup:
self.put_error_uncatcher(code, exc_vars, exc_lineno_cnames, exc_filename_cname)
if exc_lineno_cnames:
for cname in exc_lineno_cnames:
code.funcstate.release_temp(cname)
if exc_filename_cname:
code.funcstate.release_temp(exc_filename_cname)
if code.is_tracing():
code.put_trace_exception(self.pos, reraise=True)
code.put_goto(old_error_label)
for _ in code.label_interceptor(code.get_all_labels(), finally_old_labels):
self.put_error_cleaner(code, exc_vars)
for cname in exc_vars:
code.funcstate.release_temp(cname)
code.putln('}')
code.set_all_labels(old_labels)
return_label = code.return_label
exc_vars = ()
# TODO: use code.label_interceptor()?
for i, (new_label, old_label) in enumerate(zip(new_labels, old_labels)):
if not code.label_used(new_label):
continue
if new_label == new_error_label and preserve_error:
continue # handled above
code.putln('%s: {' % new_label)
ret_temp = None
if old_label == return_label:
if not self.finally_clause.is_terminator:
# store away return value for later reuse
if (self.func_return_type and
not self.is_try_finally_in_nogil and
not isinstance(self.finally_clause, GILExitNode)):
ret_temp = code.funcstate.allocate_temp(
self.func_return_type, manage_ref=False)
code.putln("%s = %s;" % (ret_temp, Naming.retval_cname))
if self.func_return_type.is_pyobject:
code.putln("%s = 0;" % Naming.retval_cname)
fresh_finally_clause().generate_execution_code(code)
if old_label == return_label:
if ret_temp:
code.putln("%s = %s;" % (Naming.retval_cname, ret_temp))
if self.func_return_type.is_pyobject:
code.putln("%s = 0;" % ret_temp)
code.funcstate.release_temp(ret_temp)
if not self.finally_clause.is_terminator:
code.put_goto(old_label)
code.putln('}')
# End finally
code.put_label(catch_label)
code.putln(
"}")
def generate_function_definitions(self, env, code):
self.body.generate_function_definitions(env, code)
self.finally_clause.generate_function_definitions(env, code)
if self.finally_except_clause:
self.finally_except_clause.generate_function_definitions(env, code)
def put_error_catcher(self, code, temps_to_clean_up, exc_vars,
exc_lineno_cnames=None, exc_filename_cname=None):
code.globalstate.use_utility_code(restore_exception_utility_code)
code.globalstate.use_utility_code(get_exception_utility_code)
code.globalstate.use_utility_code(swap_exception_utility_code)
if self.is_try_finally_in_nogil:
code.put_ensure_gil(declare_gilstate=False)
code.putln("__Pyx_PyThreadState_assign")
code.putln(' '.join(["%s = 0;" % var for var in exc_vars]))
for temp_name, type in temps_to_clean_up:
code.put_xdecref_clear(temp_name, type)
# not using preprocessor here to avoid warnings about
# unused utility functions and/or temps
code.putln(" __Pyx_ExceptionSwap(&%s, &%s, &%s);" % exc_vars[3:])
code.putln("if ("
# if __Pyx_GetException() fails,
# store the newly raised exception instead
" unlikely(__Pyx_GetException(&%s, &%s, &%s) < 0)) "
"__Pyx_ErrFetch(&%s, &%s, &%s);" % (exc_vars[:3] * 2))
for var in exc_vars:
code.put_xgotref(var, py_object_type)
if exc_lineno_cnames:
code.putln("%s = %s; %s = %s; %s = %s;" % (
exc_lineno_cnames[0], Naming.lineno_cname,
exc_lineno_cnames[1], Naming.clineno_cname,
exc_filename_cname, Naming.filename_cname))
if self.is_try_finally_in_nogil:
code.put_release_ensured_gil()
def put_error_uncatcher(self, code, exc_vars, exc_lineno_cnames=None, exc_filename_cname=None):
code.globalstate.use_utility_code(restore_exception_utility_code)
code.globalstate.use_utility_code(reset_exception_utility_code)
if self.is_try_finally_in_nogil:
code.put_ensure_gil(declare_gilstate=False)
# although the thread state is already assigned, that can't be trusted after releasing the GIL
code.putln("__Pyx_PyThreadState_assign")
# not using preprocessor here to avoid warnings about
# unused utility functions and/or temps
for var in exc_vars[3:]:
code.put_xgiveref(var, py_object_type)
code.putln("__Pyx_ExceptionReset(%s, %s, %s);" % exc_vars[3:])
for var in exc_vars[:3]:
code.put_xgiveref(var, py_object_type)
code.putln("__Pyx_ErrRestore(%s, %s, %s);" % exc_vars[:3])
if self.is_try_finally_in_nogil:
code.put_release_ensured_gil()
code.putln(' '.join(["%s = 0;" % var for var in exc_vars]))
if exc_lineno_cnames:
code.putln("%s = %s; %s = %s; %s = %s;" % (
Naming.lineno_cname, exc_lineno_cnames[0],
Naming.clineno_cname, exc_lineno_cnames[1],
Naming.filename_cname, exc_filename_cname))
def put_error_cleaner(self, code, exc_vars):
code.globalstate.use_utility_code(reset_exception_utility_code)
if self.is_try_finally_in_nogil:
code.put_ensure_gil(declare_gilstate=False)
# although the thread state is already assigned, that can't be trusted after releasing the GIL
code.putln("__Pyx_PyThreadState_assign")
# not using preprocessor here to avoid warnings about
# unused utility functions and/or temps
for var in exc_vars[3:]:
code.put_xgiveref(var, py_object_type)
code.putln("__Pyx_ExceptionReset(%s, %s, %s);" % exc_vars[3:])
for var in exc_vars[:3]:
code.put_xdecref_clear(var, py_object_type)
if self.is_try_finally_in_nogil:
code.put_release_ensured_gil()
code.putln(' '.join(["%s = 0;"]*3) % exc_vars[3:])
def annotate(self, code):
self.body.annotate(code)
self.finally_clause.annotate(code)
| TryFinallyStatNode |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/protocolExplicit3.py | {
"start": 750,
"end": 928
} | class ____(Protocol1, Protocol3):
cm1 = 3
cm10 = 3
def __init__(self):
self.im1 = 3
self.im10 = 10
self.cm11 = 3
Concrete4()
@final
| Concrete4 |
python | pyca__cryptography | tests/x509/test_x509_ext.py | {
"start": 144771,
"end": 152394
} | class ____:
def test_distribution_point_full_name_not_general_names(self):
with pytest.raises(TypeError):
x509.DistributionPoint(
["notgn"], # type:ignore[list-item]
None,
None,
None,
)
def test_distribution_point_relative_name_not_name(self):
with pytest.raises(TypeError):
x509.DistributionPoint(
None,
"notname", # type:ignore[arg-type]
None,
None,
)
def test_distribution_point_full_and_relative_not_none(self):
with pytest.raises(ValueError):
x509.DistributionPoint(
[x509.UniformResourceIdentifier("http://crypt.og/crl")],
x509.RelativeDistinguishedName(
[x509.NameAttribute(NameOID.TITLE, "Test")]
),
None,
None,
)
def test_no_full_name_relative_name_or_crl_issuer(self):
with pytest.raises(ValueError):
x509.DistributionPoint(None, None, None, None)
def test_crl_issuer_not_general_names(self):
with pytest.raises(TypeError):
x509.DistributionPoint(
None,
None,
None,
["notgn"], # type:ignore[list-item]
)
def test_reason_not_reasonflags(self):
with pytest.raises(TypeError):
x509.DistributionPoint(
[x509.UniformResourceIdentifier("http://crypt.og/crl")],
None,
frozenset(["notreasonflags"]), # type:ignore[list-item]
None,
)
def test_reason_not_frozenset(self):
with pytest.raises(TypeError):
x509.DistributionPoint(
[x509.UniformResourceIdentifier("http://crypt.og/crl")],
None,
[x509.ReasonFlags.ca_compromise], # type:ignore[arg-type]
None,
)
def test_disallowed_reasons(self):
with pytest.raises(ValueError):
x509.DistributionPoint(
[x509.UniformResourceIdentifier("http://crypt.og/crl")],
None,
frozenset([x509.ReasonFlags.unspecified]),
None,
)
with pytest.raises(ValueError):
x509.DistributionPoint(
[x509.UniformResourceIdentifier("http://crypt.og/crl")],
None,
frozenset([x509.ReasonFlags.remove_from_crl]),
None,
)
def test_reason_only(self):
with pytest.raises(ValueError):
x509.DistributionPoint(
None, None, frozenset([x509.ReasonFlags.aa_compromise]), None
)
def test_eq(self):
dp = x509.DistributionPoint(
[x509.UniformResourceIdentifier("http://crypt.og/crl")],
None,
frozenset([x509.ReasonFlags.superseded]),
[
x509.DirectoryName(
x509.Name(
[
x509.NameAttribute(
NameOID.COMMON_NAME, "Important CA"
)
]
)
)
],
)
dp2 = x509.DistributionPoint(
[x509.UniformResourceIdentifier("http://crypt.og/crl")],
None,
frozenset([x509.ReasonFlags.superseded]),
[
x509.DirectoryName(
x509.Name(
[
x509.NameAttribute(
NameOID.COMMON_NAME, "Important CA"
)
]
)
)
],
)
assert dp == dp2
def test_ne(self):
dp = x509.DistributionPoint(
[x509.UniformResourceIdentifier("http://crypt.og/crl")],
None,
frozenset([x509.ReasonFlags.superseded]),
[
x509.DirectoryName(
x509.Name(
[
x509.NameAttribute(
NameOID.COMMON_NAME, "Important CA"
)
]
)
)
],
)
dp2 = x509.DistributionPoint(
[x509.UniformResourceIdentifier("http://crypt.og/crl")],
None,
None,
None,
)
assert dp != dp2
assert dp != object()
def test_iter_input(self):
name = [x509.UniformResourceIdentifier("http://crypt.og/crl")]
issuer = [
x509.DirectoryName(
x509.Name(
[x509.NameAttribute(NameOID.COMMON_NAME, "Important CA")]
)
)
]
dp = x509.DistributionPoint(
iter(name),
None,
frozenset([x509.ReasonFlags.ca_compromise]),
iter(issuer),
)
assert dp.full_name is not None
assert list(dp.full_name) == name
assert dp.crl_issuer is not None
assert list(dp.crl_issuer) == issuer
def test_repr(self):
dp = x509.DistributionPoint(
None,
x509.RelativeDistinguishedName(
[x509.NameAttribute(NameOID.COMMON_NAME, "myCN")]
),
frozenset([x509.ReasonFlags.ca_compromise]),
[
x509.DirectoryName(
x509.Name(
[
x509.NameAttribute(
NameOID.COMMON_NAME, "Important CA"
)
]
)
)
],
)
assert repr(dp) == (
"<DistributionPoint(full_name=None, relative_name=<RelativeDis"
"tinguishedName(CN=myCN)>, reasons=frozenset({<ReasonFlags.ca_"
"compromise: 'cACompromise'>}), crl_issuer=[<DirectoryName(val"
"ue=<Name(CN=Important CA)>)>])>"
)
def test_hash(self):
dp = x509.DistributionPoint(
[x509.UniformResourceIdentifier("http://crypt.og/crl")],
None,
frozenset([x509.ReasonFlags.superseded]),
[
x509.DirectoryName(
x509.Name(
[
x509.NameAttribute(
NameOID.COMMON_NAME, "Important CA"
)
]
)
)
],
)
dp2 = x509.DistributionPoint(
[x509.UniformResourceIdentifier("http://crypt.og/crl")],
None,
frozenset([x509.ReasonFlags.superseded]),
[
x509.DirectoryName(
x509.Name(
[
x509.NameAttribute(
NameOID.COMMON_NAME, "Important CA"
)
]
)
)
],
)
dp3 = x509.DistributionPoint(
None,
x509.RelativeDistinguishedName(
[x509.NameAttribute(NameOID.COMMON_NAME, "myCN")]
),
None,
None,
)
assert hash(dp) == hash(dp2)
assert hash(dp) != hash(dp3)
| TestDistributionPoint |
python | pytorch__pytorch | torch/distributed/tensor/placement_types.py | {
"start": 17130,
"end": 26270
} | class ____(torch._C._distributed.StridedShard, Shard):
"""
_StridedShard is only introduced to support 2D FSDP2 + TP sharding where the tensor
is sharded on the TP mesh dimension first, then sharded on the FSDP mesh dimension.
We call this right-to-left sharding which is the opposite of the default
left-to-right sharding. See the example below:
tensor shape: [8, 8]
mesh: [[0, 1], [2, 3]], names=("dp", "tp")
placements: [Shard(0), Shard(0)]
The default sharding behavior shards the tensor on "dp" mesh dimension first then
"tp" dimension. The sharding result will be:
Rank | Mesh Coordinate | Shard Index
------------------------------------------------
0 | (0, 0) | 0 (row 0-1)
1 | (0, 1) | 1 (row 2-3)
2 | (1, 0) | 2 (row 4-5)
3 | (1, 1) | 3 (row 6-7)
While the FSDP2 + TP sharding behavior does the opposite: it shards the tensor on
"tp" mesh dim first then "dp" dim. This right-to-left sharding will produce the
result:
Rank | Mesh Coordinate | Shard Index
------------------------------------------------
0 | (0, 0) | 0 (row 0-1)
1 | (0, 1) | 2 (row 4-5)
2 | (1, 0) | 1 (row 2-3)
3 | (1, 1) | 3 (row 6-7)
The consequence is, any attempt to redistribute this DTensor to a full replica will
produce a wrong result because the shard-to-replicate redistribution always happens
right-to-left, regardless it's left-to-right sharding or right-to-left. To address
this, we use _StridedShard placement to make this right-to-left sharding compatible
with our left-to-right convention on both tensor distribution and redistribution.
Now with _StridedShard, the right-to-left sharding above can be represented as:
tensor shape: [8, 8]
mesh: [[0, 1], [2, 3]], names=("dp", "tp")
placements: [_StridedShard(0, split_factor=2), Shard(0)]
And a left-to-right processing of `placements` will produce the same result, which is
different from using the `Shard` placement:
Rank | Mesh Coordinate | Shard Index
------------------------------------------------
0 | (0, 0) | 0 (row 0-1)
1 | (0, 1) | 2 (row 4-5)
2 | (1, 0) | 1 (row 2-3)
3 | (1, 1) | 3 (row 6-7)
The argument `split_factor` is the number of existing shards over the tensor sharding
dimension before processing the _StridedShard placement, as if the sharding happened
right-to-left. In the example above, the tensor should first be sharded on the "tp"
dimension into 2 shards before being sharded on the "dp" dimension. Therefore, the
`split_factor` of the _StridedShard placement on "dp" dim is 2.
TODO: we should remove _StridedShard placement once we can unify it with Shard
"""
def __hash__(self) -> int:
return hash((self.dim, self.split_factor))
def __repr__(self) -> str:
"""
machine readable representation of the _StridedShard placement
"""
return f"_StridedShard(dim={self.dim}, sf={self.split_factor})"
def __str__(self) -> str:
"""human readable representation of the _StridedShard placement"""
return f"_S({self.dim}, {self.split_factor})"
@classmethod
def _make_shard_tensor(
cls,
dim: int,
tensor: torch.Tensor,
mesh: DeviceMesh,
mesh_dim: int,
src_data_rank: int | None = 0,
split_factor: int = 1,
) -> torch.Tensor:
strided_shard_placement = cls(dim=dim, split_factor=split_factor)
return strided_shard_placement._shard_tensor(
tensor, mesh, mesh_dim, src_data_rank
)
def _split_tensor(
self,
tensor: torch.Tensor,
num_chunks: int,
*,
with_padding: bool = True,
contiguous: bool = True,
) -> tuple[list[torch.Tensor], list[int]]:
assert self.dim <= tensor.ndim, (
f"Sharding dim {self.dim} greater than tensor ndim {tensor.ndim}"
)
# Essentially _StridedShard express the right-to-left sharding in the
# reversed order. Here we perform first_split as the virtual "right" sharding,
# and then second_split as the virtual "left" sharding, and finally assemble
# results in the transposed left-first order.
first_split, _ = super()._split_tensor(
tensor, self.split_factor, with_padding=False, contiguous=False
)
second_split = [
super(_StridedShard, self)._split_tensor(
s, num_chunks=num_chunks, with_padding=False, contiguous=False
)[0]
for s in first_split
]
shard_list: list[torch.Tensor] = []
for i in range(num_chunks):
shard = torch.cat(
[second_split[j][i] for j in range(self.split_factor)],
dim=self.dim,
)
if contiguous:
shard = shard.contiguous()
shard_list.append(shard)
# The amount of padding is determined by the local chunk with the largest size.
pad_sizes: list[int] = []
max_chunk_size = max([shard.size(self.dim) for shard in shard_list])
if with_padding:
pad_sizes = [max_chunk_size - shard.size(self.dim) for shard in shard_list]
return shard_list, pad_sizes
def _to_replicate_tensor(
self,
local_tensor: torch.Tensor,
mesh: DeviceMesh,
mesh_dim: int,
current_logical_shape: list[int],
) -> torch.Tensor:
"""
replay the replicate-to-shard process to understand how to stitch shards back
"""
num_chunks = mesh.size(mesh_dim=mesh_dim)
logical_dim_size = current_logical_shape[self.dim]
# indices_tensor is 1D torch.arange(logical_dim_size) unsqueezed
# so that we can reuse self._split_tensor which splits on self.dim
shape = [1] * self.dim + [logical_dim_size]
indices_tensor = torch.arange(
logical_dim_size, device=local_tensor.device
).view(shape)
sharded_indices, _ = self._split_tensor(
indices_tensor,
num_chunks,
with_padding=False,
contiguous=False,
)
# squeeze back to 1D indices tensor
sharded_indices = [shard.view(-1) for shard in sharded_indices]
max_chunk_size = max([len(shard) for shard in sharded_indices])
local_pad_size = max_chunk_size - local_tensor.size(self.dim)
local_tensor_padded = pad_tensor(local_tensor, self.dim, local_pad_size)
if not local_tensor_padded.is_contiguous():
local_tensor_padded = local_tensor_padded.contiguous()
replicate_tensor_permuted_padded = funcol.all_gather_tensor(
local_tensor_padded,
gather_dim=self.dim,
group=(mesh, mesh_dim),
)
if isinstance(replicate_tensor_permuted_padded, funcol.AsyncCollectiveTensor):
replicate_tensor_permuted_padded = replicate_tensor_permuted_padded.wait()
if replicate_tensor_permuted_padded.shape[self.dim] > logical_dim_size:
replicate_tensor_permuted = unpad_tensor(
replicate_tensor_permuted_padded,
self.dim,
replicate_tensor_permuted_padded.shape[self.dim] - logical_dim_size,
)
else:
replicate_tensor_permuted = replicate_tensor_permuted_padded
permutation = torch.cat(sharded_indices)
inv_permutation = torch.argsort(permutation)
replicate_tensor = torch.index_select(
replicate_tensor_permuted, self.dim, inv_permutation
)
return replicate_tensor.contiguous()
@staticmethod
@maybe_run_for_local_tensor
def _local_shard_size(sharded_indices: list[torch.Tensor], rank: int) -> int:
return len(sharded_indices[rank])
def _local_shard_size_and_offset(
self,
curr_local_size: int,
num_chunks: int,
rank: int,
) -> tuple[int, int | None]:
# indices_tensor is 1D torch.arange(logical_dim_size) unsqueezed
# so that we can reuse self._split_tensor which splits on self.dim
shape = [1] * self.dim + [curr_local_size]
indices_tensor = torch.arange(
curr_local_size,
).view(shape)
sharded_indices, _ = self._split_tensor(
indices_tensor,
num_chunks,
with_padding=False,
contiguous=False,
)
# squeeze back to 1D indices tensor
sharded_indices = [shard.view(-1) for shard in sharded_indices]
local_shard_size = _StridedShard._local_shard_size(sharded_indices, rank)
# offsets from _StridedShard is never used
return local_shard_size, None
| _StridedShard |
python | pytorch__pytorch | test/torch_np/numpy_tests/linalg/test_linalg.py | {
"start": 12538,
"end": 13679
} | class ____(LinalgTestCase):
@slow
def test_generalized_herm_cases(self):
self.check_cases(require={"generalized", "hermitian"}, exclude={"size-0"})
@slow
def test_generalized_empty_herm_cases(self):
self.check_cases(
require={"generalized", "hermitian", "size-0"}, exclude={"none"}
)
def dot_generalized(a, b):
a = asarray(a)
if a.ndim >= 3:
if a.ndim == b.ndim:
# matrix x matrix
new_shape = a.shape[:-1] + b.shape[-1:]
elif a.ndim == b.ndim + 1:
# matrix x vector
new_shape = a.shape[:-1]
else:
raise ValueError("Not implemented...")
r = np.empty(new_shape, dtype=np.common_type(a, b))
for c in itertools.product(*map(range, a.shape[:-2])):
r[c] = dot(a[c], b[c])
return r
else:
return dot(a, b)
def identity_like_generalized(a):
a = asarray(a)
if a.ndim >= 3:
r = np.empty(a.shape, dtype=a.dtype)
r[...] = identity(a.shape[-2])
return r
else:
return identity(a.shape[0])
| HermitianGeneralizedTestCase |
python | getsentry__sentry | src/sentry/core/endpoints/scim/utils.py | {
"start": 5033,
"end": 7425
} | class ____(OrganizationEndpoint):
owner = ApiOwner.ENTERPRISE
content_negotiation_class = SCIMClientNegotiation
cursor_name = "startIndex"
def add_cursor_headers(self, request: Request, response, cursor_result):
pass
def list_api_format(self, results, total_results, start_index):
return {
"schemas": [SCIM_API_LIST],
"totalResults": total_results, # TODO: audit perf of queryset.count()
"startIndex": start_index,
"itemsPerPage": len(results), # what's max?
"Resources": results,
}
def get_query_parameters(self, request: Request):
serializer = SCIMQueryParamSerializer(data=request.GET)
if not serializer.is_valid():
if "filter" in serializer.errors:
# invalid filter needs to return a specific formatted
# error response
raise ParseError(detail=SCIM_400_INVALID_FILTER)
raise ParseError(serializer.errors)
return serializer.validated_data
def parse_filter_conditions(raw_filters):
"""
this function parses a scim filter, see:
https://datatracker.ietf.org/doc/html/rfc7644#section-3.4.2.2
right now the only subset of that filtering we support is the simple "eq"
operator. the input would look like so:
userName eq "test.user@okta.local"
We may want to support further SCIM grammar for other IDPs and may use
a package to replace this functionality.
"""
# TODO: support "and" operator
# TODO: support email querying/filtering
# TODO: graceful error handling when unsupported operators are used
if raw_filters is None:
return None
try:
conditions = raw_filters.split(",")
except ValueError:
raise SCIMFilterError
# we don't support multiple filters right now.
if len(conditions) > 1:
raise SCIMFilterError
condition = conditions[0]
try:
[key, value] = condition.split(" eq ")
except ValueError:
raise SCIMFilterError
if not key or not value:
raise SCIMFilterError
key = key.strip()
value = value.strip()
# remove encasing quotes around the value
value = value[1:-1]
if key not in ACCEPTED_FILTERED_KEYS:
raise SCIMFilterError
if key == "value":
value = int(value)
return value
| SCIMEndpoint |
python | doocs__leetcode | solution/0100-0199/0143.Reorder List/Solution.py | {
"start": 151,
"end": 691
} | class ____:
def reorderList(self, head: Optional[ListNode]) -> None:
fast = slow = head
while fast.next and fast.next.next:
slow = slow.next
fast = fast.next.next
cur = slow.next
slow.next = None
pre = None
while cur:
t = cur.next
cur.next = pre
pre, cur = cur, t
cur = head
while pre:
t = pre.next
pre.next = cur.next
cur.next = pre
cur, pre = pre.next, t
| Solution |
python | spack__spack | var/spack/test_repos/spack_repo/builtin_mock/packages/openblas_with_lapack/package.py | {
"start": 217,
"end": 568
} | class ____(Package):
"""Dummy version of OpenBLAS that also provides LAPACK, for testing."""
homepage = "http://www.openblas.net"
url = "http://github.com/xianyi/OpenBLAS/archive/v0.2.15.tar.gz"
version("0.2.15", md5="b1190f3d3471685f17cfd1ec1d252ac9")
provides("lapack", "blas")
depends_on("c", type="build")
| OpenblasWithLapack |
python | facebook__pyre-check | source/interprocedural_analyses/taint/test/integration/attributes.py | {
"start": 1897,
"end": 2149
} | class ____:
token: str = ""
def test_attribute_union_sink(t: Union[Sink, Untainted]):
t.token = _test_source()
if isinstance(t, Sink):
t.token = _test_source()
elif isinstance(t, Untainted):
t.token = _test_source()
| Sink |
python | pydantic__pydantic | tests/mypy/modules/plugin_success.py | {
"start": 2420,
"end": 2560
} | class ____(BaseModel):
x: int
y: ClassVar[int] = 1
ClassVarModel(x=1)
@dataclass(config={'validate_assignment': True})
| ClassVarModel |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/typeNarrowingTuple1.py | {
"start": 1094,
"end": 1643
} | class ____(Enum):
A = 0
B = 1
MsgE = tuple[Literal[MyEnum.A], str]
MsgF = tuple[Literal[MyEnum.B], float]
MsgEOrF = MsgE | MsgF
def func5(m: MsgEOrF):
if m[0] is MyEnum.A:
reveal_type(m, expected_text="tuple[Literal[MyEnum.A], str]")
else:
reveal_type(m, expected_text="tuple[Literal[MyEnum.B], float]")
def func6(m: MsgEOrF):
if m[0] is not MyEnum.A:
reveal_type(m, expected_text="tuple[Literal[MyEnum.B], float]")
else:
reveal_type(m, expected_text="tuple[Literal[MyEnum.A], str]")
| MyEnum |
python | ray-project__ray | python/ray/train/tests/test_iter_torch_batches_gpu.py | {
"start": 2615,
"end": 3012
} | class ____(ArrowBatchCollateFn):
"""Collate function that returns id and value as a dictionary of chunked tensors."""
def __call__(self, batch: pa.Table) -> Dict[str, List[torch.Tensor]]:
assert isinstance(batch, pa.Table)
modified_batch = _chunk_table_in_half(batch)
return arrow_batch_to_tensors(modified_batch, combine_chunks=False)
| ChunkedDictArrowBatchCollateFn |
python | getsentry__sentry | tests/sentry/snuba/test_transactions.py | {
"start": 108200,
"end": 120391
} | class ____(SnubaTestCase, TestCase):
def setUp(self) -> None:
super().setUp()
self.day_ago = before_now(days=1).replace(hour=10, minute=0, second=0, microsecond=0)
self.now = before_now()
event_data = load_data("transaction")
# Half of duration so we don't get weird rounding differences when comparing the results
event_data["breakdowns"]["span_ops"]["ops.http"]["value"] = 1500
event_data["start_timestamp"] = (self.day_ago + timedelta(minutes=30)).isoformat()
event_data["timestamp"] = (self.day_ago + timedelta(minutes=30, seconds=3)).isoformat()
self.store_event(data=event_data, project_id=self.project.id)
self.snuba_params = SnubaParams(
projects=[self.project],
start=self.day_ago,
end=self.now,
)
self.query = "event.type:transaction"
def test_simple(self) -> None:
results = transactions.query(
selected_columns=[
"spans.http",
"transaction.duration",
],
equations=["spans.http / transaction.duration"],
query=self.query,
snuba_params=self.snuba_params,
referrer="discover",
)
assert len(results["data"]) == 1
result = results["data"][0]
assert result["equation[0]"] == result["spans.http"] / result["transaction.duration"]
def test_multiple_equations(self) -> None:
results = transactions.query(
selected_columns=[
"spans.http",
"transaction.duration",
],
equations=[
"spans.http / transaction.duration",
"transaction.duration / spans.http",
"1500 + transaction.duration",
],
query=self.query,
snuba_params=self.snuba_params,
referrer="discover",
)
assert len(results["data"]) == 1
result = results["data"][0]
assert result["equation[0]"] == result["spans.http"] / result["transaction.duration"]
assert result["equation[1]"] == result["transaction.duration"] / result["spans.http"]
assert result["equation[2]"] == 1500 + result["transaction.duration"]
def test_invalid_field(self) -> None:
with pytest.raises(ArithmeticValidationError):
transactions.query(
selected_columns=[
"spans.http",
"transaction.status",
],
# while transaction_status is a uint8, there's no reason we should allow arith on it
equations=["spans.http / transaction.status"],
query=self.query,
snuba_params=self.snuba_params,
referrer="discover",
)
def test_invalid_function(self) -> None:
with pytest.raises(ArithmeticValidationError):
transactions.query(
selected_columns=[
"p50(transaction.duration)",
"last_seen()",
],
equations=["p50(transaction.duration) / last_seen()"],
query=self.query,
snuba_params=self.snuba_params,
referrer="discover",
)
def test_unselected_field(self) -> None:
with pytest.raises(InvalidSearchQuery):
transactions.query(
selected_columns=[
"spans.http",
],
equations=["spans.http / transaction.duration"],
query=self.query,
snuba_params=self.snuba_params,
referrer="discover",
)
def test_unselected_function(self) -> None:
with pytest.raises(InvalidSearchQuery):
transactions.query(
selected_columns=[
"p50(transaction.duration)",
],
equations=["p50(transaction.duration) / p100(transaction.duration)"],
query=self.query,
snuba_params=self.snuba_params,
referrer="discover",
)
def test_orderby_equation(self) -> None:
for i in range(1, 3):
event_data = load_data("transaction")
# Half of duration so we don't get weird rounding differences when comparing the results
event_data["breakdowns"]["span_ops"]["ops.http"]["value"] = 300 * i
event_data["start_timestamp"] = (self.day_ago + timedelta(minutes=30)).isoformat()
event_data["timestamp"] = (self.day_ago + timedelta(minutes=30, seconds=3)).isoformat()
self.store_event(data=event_data, project_id=self.project.id)
results = transactions.query(
selected_columns=[
"spans.http",
"transaction.duration",
],
equations=[
"spans.http / transaction.duration",
"transaction.duration / spans.http",
"1500 + transaction.duration",
],
orderby=["equation[0]"],
query=self.query,
snuba_params=self.snuba_params,
referrer="discover",
)
assert len(results["data"]) == 3
assert [result["equation[0]"] for result in results["data"]] == [0.1, 0.2, 0.5]
results = transactions.query(
selected_columns=[
"spans.http",
"transaction.duration",
],
equations=[
"spans.http / transaction.duration",
"transaction.duration / spans.http",
"1500 + transaction.duration",
],
orderby=["equation[1]"],
query=self.query,
snuba_params=self.snuba_params,
referrer="discover",
)
assert len(results["data"]) == 3
assert [result["equation[1]"] for result in results["data"]] == [2, 5, 10]
results = transactions.query(
selected_columns=[
"spans.http",
"transaction.duration",
],
equations=[
"spans.http / transaction.duration",
"transaction.duration / spans.http",
"1500 + transaction.duration",
],
orderby=["-equation[0]"],
query=self.query,
snuba_params=self.snuba_params,
referrer="discover",
)
assert len(results["data"]) == 3
assert [result["equation[0]"] for result in results["data"]] == [0.5, 0.2, 0.1]
def test_orderby_nonexistent_equation(self) -> None:
with pytest.raises(InvalidSearchQuery):
transactions.query(
selected_columns=[
"spans.http",
"transaction.duration",
],
orderby=["equation[1]"],
query=self.query,
snuba_params=self.snuba_params,
referrer="discover",
)
def test_equation_without_field_or_function(self) -> None:
with pytest.raises(InvalidSearchQuery):
transactions.query(
selected_columns=[
"spans.http",
"transaction.duration",
],
equations=[
"5 + 5",
],
query=self.query,
snuba_params=self.snuba_params,
referrer="discover",
)
def test_aggregate_equation(self) -> None:
results = transactions.query(
selected_columns=[
"p50(transaction.duration)",
],
equations=["p50(transaction.duration) / 2"],
query=self.query,
snuba_params=self.snuba_params,
referrer="discover",
)
assert len(results["data"]) == 1
result = results["data"][0]
assert result["equation[0]"] == result["p50_transaction_duration"] / 2
def test_multiple_aggregate_equation(self) -> None:
results = transactions.query(
selected_columns=[
"p50(transaction.duration)",
"count()",
],
equations=["p50(transaction.duration) + 2", "p50(transaction.duration) / count()"],
query=self.query,
snuba_params=self.snuba_params,
referrer="discover",
)
assert len(results["data"]) == 1
result = results["data"][0]
assert result["equation[0]"] == result["p50_transaction_duration"] + 2
assert result["equation[1]"] == result["p50_transaction_duration"] / result["count"]
def test_multiple_operators(self) -> None:
results = transactions.query(
selected_columns=[
"p50(transaction.duration)",
"p100(transaction.duration)",
"count()",
],
equations=[
"p50(transaction.duration) / p100(transaction.duration) * 100",
"100 + count() * 5 - 3 / 5",
"count() + count() / count() * count() - count()",
],
query=self.query,
snuba_params=self.snuba_params,
referrer="discover",
)
assert len(results["data"]) == 1
result = results["data"][0]
assert (
result["equation[0]"]
== result["p50_transaction_duration"] / result["p100_transaction_duration"] * 100
)
assert result["equation[1]"] == 100 + result["count"] * 5 - 3 / 5
assert (
result["equation[2]"]
== result["count"]
+ result["count"] / result["count"] * result["count"]
- result["count"]
)
def test_nan_equation_results(self) -> None:
for i in range(1, 3):
event_data = load_data("transaction")
# Half of duration so we don't get weird rounding differences when comparing the results
event_data["breakdowns"]["span_ops"]["ops.http"]["value"] = 0
event_data["start_timestamp"] = (self.day_ago + timedelta(minutes=30)).isoformat()
event_data["timestamp"] = (self.day_ago + timedelta(minutes=30, seconds=3)).isoformat()
self.store_event(data=event_data, project_id=self.project.id)
results = transactions.query(
selected_columns=[
"spans.http",
"transaction.duration",
],
equations=[
"transaction.duration / spans.http", # inf
"spans.http / spans.http", # nan
],
orderby=["equation[0]"],
query=self.query,
snuba_params=self.snuba_params,
referrer="discover",
)
assert len(results["data"]) == 3
assert [result["equation[0]"] for result in results["data"]] == [2, None, None]
results = transactions.query(
selected_columns=[
"spans.http",
"transaction.duration",
],
equations=[
"transaction.duration / spans.http", # inf
"spans.http / spans.http", # nan
],
orderby=["equation[1]"],
query=self.query,
snuba_params=self.snuba_params,
referrer="discover",
)
assert len(results["data"]) == 3
assert [result["equation[1]"] for result in results["data"]] == [1, None, None]
results = transactions.query(
selected_columns=[
"spans.http",
"transaction.duration",
],
equations=[
"transaction.duration / spans.http", # inf
"spans.http / spans.http", # nan
],
orderby=["-equation[0]"],
query=self.query,
snuba_params=self.snuba_params,
referrer="discover",
)
assert len(results["data"]) == 3
assert [result["equation[0]"] for result in results["data"]] == [2, None, None]
| TransactionsArithmeticTest |
python | ray-project__ray | python/ray/data/tests/test_issue_detection.py | {
"start": 730,
"end": 6509
} | class ____:
def test_hanging_detector_configuration(self, restore_data_context):
"""Test hanging detector configuration and initialization."""
# Test default configuration from DataContext
ctx = DataContext.get_current()
default_config = ctx.issue_detectors_config.hanging_detector_config
assert default_config.op_task_stats_min_count == DEFAULT_OP_TASK_STATS_MIN_COUNT
assert (
default_config.op_task_stats_std_factor == DEFAULT_OP_TASK_STATS_STD_FACTOR
)
# Test custom configuration
min_count = 5
std_factor = 3.0
custom_config = HangingExecutionIssueDetectorConfig(
op_task_stats_min_count=min_count,
op_task_stats_std_factor=std_factor,
)
ctx.issue_detectors_config.hanging_detector_config = custom_config
detector = HangingExecutionIssueDetector(
dataset_id="id", operators=[], config=custom_config
)
assert detector._op_task_stats_min_count == min_count
assert detector._op_task_stats_std_factor_threshold == std_factor
@patch(
"ray.data._internal.execution.interfaces.op_runtime_metrics.TaskDurationStats"
)
def test_basic_hanging_detection(
self, mock_stats_cls, ray_start_2_cpus, restore_data_context
):
# Set up logging capture
log_capture = io.StringIO()
handler = logging.StreamHandler(log_capture)
logger = logging.getLogger("ray.data._internal.issue_detection")
logger.addHandler(handler)
# Set up mock stats to return values that will trigger adaptive threshold
mocked_mean = 2.0 # Increase from 0.5 to 2.0 seconds
mocked_stddev = 0.2 # Increase from 0.05 to 0.2 seconds
mock_stats = mock_stats_cls.return_value
mock_stats.count.return_value = 20 # Enough samples
mock_stats.mean.return_value = mocked_mean
mock_stats.stddev.return_value = mocked_stddev
# Set a short issue detection interval for testing
ctx = DataContext.get_current()
detector_cfg = ctx.issue_detectors_config.hanging_detector_config
detector_cfg.detection_time_interval_s = 0.00
# test no hanging doesn't log hanging warning
def f1(x):
return x
_ = ray.data.range(1).map(f1).materialize()
log_output = log_capture.getvalue()
warn_msg = (
r"A task of operator .+ with task index .+ has been running for [\d\.]+s"
)
assert re.search(warn_msg, log_output) is None, log_output
# # test hanging does log hanging warning
def f2(x):
time.sleep(5.0) # Increase from 1.1 to 5.0 seconds to exceed new threshold
return x
_ = ray.data.range(1).map(f2).materialize()
log_output = log_capture.getvalue()
assert re.search(warn_msg, log_output) is not None, log_output
def test_hanging_detector_detects_issues(
self, caplog, propagate_logs, restore_data_context
):
"""Test hanging detector adaptive thresholds with real Ray Data pipelines and extreme configurations."""
ctx = DataContext.get_current()
# Configure hanging detector with extreme std_factor values
ctx.issue_detectors_config.hanging_detector_config = (
HangingExecutionIssueDetectorConfig(
op_task_stats_min_count=1,
op_task_stats_std_factor=1,
detection_time_interval_s=0,
)
)
# Create a pipeline with many small blocks to ensure concurrent tasks
def sleep_task(x):
if x["id"] == 2:
# Issue detection is based on the mean + stdev. One of the tasks must take
# awhile, so doing it just for one of the rows.
time.sleep(1)
return x
with caplog.at_level(logging.WARNING):
ray.data.range(3, override_num_blocks=3).map(
sleep_task, concurrency=1
).materialize()
# Check if hanging detection occurred
hanging_detected = (
"has been running for" in caplog.text
and "longer than the average task duration" in caplog.text
)
assert hanging_detected, caplog.text
@pytest.mark.parametrize(
"configured_memory, actual_memory, should_return_issue",
[
# User has appropriately configured memory, so no issue.
(4 * 1024**3, 4 * 1024**3, False),
# User hasn't configured memory correctly and memory use is high, so issue.
(None, 4 * 1024**3, True),
(1, 4 * 1024**3, True),
# User hasn't configured memory correctly but memory use is low, so no issue.
(None, 4 * 1024**3 - 1, False),
],
)
def test_high_memory_detection(
configured_memory, actual_memory, should_return_issue, restore_data_context
):
ctx = DataContext.get_current()
input_data_buffer = InputDataBuffer(ctx, input_data=[])
map_operator = MapOperator.create(
map_transformer=MagicMock(),
input_op=input_data_buffer,
data_context=ctx,
ray_remote_args={"memory": configured_memory},
)
map_operator._metrics = MagicMock(average_max_uss_per_task=actual_memory)
topology = {input_data_buffer: MagicMock(), map_operator: MagicMock()}
operators = list(topology.keys())
detector = HighMemoryIssueDetector(
dataset_id="id",
operators=operators,
config=ctx.issue_detectors_config.high_memory_detector_config,
)
issues = detector.detect()
assert should_return_issue == bool(issues)
if __name__ == "__main__":
import sys
sys.exit(pytest.main(["-v", __file__]))
| TestHangingExecutionIssueDetector |
python | bokeh__bokeh | tests/unit/bokeh/test_resources.py | {
"start": 3410,
"end": 18899
} | class ____:
def test_basic(self) -> None:
r = resources.Resources()
assert r.mode == "cdn"
def test_clone(self) -> None:
r = resources.Resources(mode="server-dev")
assert r.mode == "server"
assert r.dev is True
assert r.components == ["bokeh", "bokeh-gl", "bokeh-widgets", "bokeh-tables", "bokeh-mathjax"]
c = r.clone(components=["bokeh", "bokeh-gl"])
assert c.mode == "server"
assert c.dev is True
assert c.components == ["bokeh", "bokeh-gl"]
def test_str(self) -> None:
r0 = resources.Resources(mode="cdn")
assert str(r0) == "Resources(mode='cdn')"
r1 = resources.Resources(mode="inline")
assert str(r1) == "Resources(mode='inline')"
r2 = resources.Resources(mode="server-dev")
assert str(r2) == "Resources(mode='server', dev=True)"
r3 = resources.Resources(mode="server-dev", components=["bokeh", "bokeh-gl"])
assert str(r3) == "Resources(mode='server', dev=True, components=['bokeh', 'bokeh-gl'])"
def test_build(self) -> None:
r0 = resources.Resources(mode="cdn")
settings.resources = "inline"
try:
r = resources.Resources.build(r0)
assert r is r0
finally:
del settings.resources
r1 = "cdn"
settings.resources = "inline"
try:
r = resources.Resources.build(r1)
assert r.mode == "cdn"
finally:
del settings.resources
r2 = None
settings.resources = "inline"
try:
r = resources.Resources.build(r2)
assert r.mode == "inline"
finally:
del settings.resources
def test_log_level(self) -> None:
r = resources.Resources()
for level in LOG_LEVELS:
r.log_level = level
assert r.log_level == level
if not r.dev:
assert r.js_raw[-1] == f'Bokeh.set_log_level("{level}");'
with pytest.raises(ValueError):
setattr(r, "log_level", "foo")
def test_module_attrs(self) -> None:
assert resources.CDN.mode == "cdn"
assert resources.INLINE.mode == "inline"
def test_inline(self) -> None:
r = resources.Resources(mode="inline")
assert r.mode == "inline"
assert r.dev is False
assert len(r.js_raw) == 6
assert r.js_raw[-1] == DEFAULT_LOG_JS_RAW
assert len(r.css_raw) == 0
assert r.messages == []
def test__get_cdn_urls_full(self) -> None:
result = _get_cdn_urls(version="2.4.2")
url = result.urls(["bokeh"], "js")[0]
assert "bokeh/" in url
assert "2.4.2" in url
assert "dev" not in url
assert "rc" not in url
@pytest.mark.parametrize('v', ("2.3.4.dev2", "3.0.1rc2"))
def test__get_cdn_urls_dev(self, v) -> None:
result = _get_cdn_urls(version=v)
url = result.urls(["bokeh"], "js")[0]
assert "bokeh/dev" in url
assert v in url
def test_cdn(self, monkeypatch: pytest.MonkeyPatch) -> None:
monkeypatch.setattr(resources, "__version__", "1.0")
r = resources.Resources(mode="cdn", version="1.0")
assert r.mode == "cdn"
assert r.dev is False
assert r.js_raw == [DEFAULT_LOG_JS_RAW]
assert r.css_raw == []
assert r.messages == []
resources.__version__ = "1.0+1.abc"
r = resources.Resources(mode="cdn", version="1.0")
assert r.messages == [
RuntimeMessage(
text="Requesting CDN BokehJS version '1.0' from local development version '1.0+1.abc'. This configuration is unsupported and may not work!",
type="warn",
),
]
def test_server_default(self) -> None:
r = resources.Resources(mode="server")
assert r.mode == "server"
assert r.dev is False
assert r.js_raw == [DEFAULT_LOG_JS_RAW]
assert r.css_raw == []
assert r.messages == []
assert r.js_files == [
"http://localhost:5006/static/js/bokeh.min.js",
"http://localhost:5006/static/js/bokeh-gl.min.js",
"http://localhost:5006/static/js/bokeh-widgets.min.js",
"http://localhost:5006/static/js/bokeh-tables.min.js",
"http://localhost:5006/static/js/bokeh-mathjax.min.js",
]
def test_server_root_url(self) -> None:
r = resources.Resources(mode="server", root_url="http://foo/")
assert r.js_raw == [DEFAULT_LOG_JS_RAW]
assert r.css_raw == []
assert r.messages == []
assert r.js_files == [
"http://foo/static/js/bokeh.min.js",
"http://foo/static/js/bokeh-gl.min.js",
"http://foo/static/js/bokeh-widgets.min.js",
"http://foo/static/js/bokeh-tables.min.js",
"http://foo/static/js/bokeh-mathjax.min.js",
]
def test_server_root_url_empty(self) -> None:
r = resources.Resources(mode="server", root_url="")
assert r.js_raw == [DEFAULT_LOG_JS_RAW]
assert r.css_raw == []
assert r.messages == []
assert r.js_files == [
"static/js/bokeh.min.js",
"static/js/bokeh-gl.min.js",
"static/js/bokeh-widgets.min.js",
"static/js/bokeh-tables.min.js",
"static/js/bokeh-mathjax.min.js",
]
def test_server_with_versioner(self) -> None:
def versioner(path: str) -> str:
return path + "?v=VERSIONED"
r = resources.Resources(mode="server", root_url="http://foo/", path_versioner=versioner)
assert r.js_files == [
"http://foo/static/js/bokeh.min.js?v=VERSIONED",
"http://foo/static/js/bokeh-gl.min.js?v=VERSIONED",
"http://foo/static/js/bokeh-widgets.min.js?v=VERSIONED",
"http://foo/static/js/bokeh-tables.min.js?v=VERSIONED",
"http://foo/static/js/bokeh-mathjax.min.js?v=VERSIONED",
]
def test_server_dev(self) -> None:
r = resources.Resources(mode="server-dev")
assert r.mode == "server"
assert r.dev is True
assert len(r.js_raw) == 2
assert r.css_raw == []
assert r.messages == []
r = resources.Resources(mode="server-dev", root_url="http://foo/")
assert r.js_raw == [DEFAULT_LOG_JS_RAW, "Bokeh.settings.dev = true"]
assert r.css_raw == []
assert r.messages == []
@pytest.mark.skipif(sys.platform == "win32", reason="tests/package on different drives")
def test_relative(self) -> None:
r = resources.Resources(mode="relative")
assert r.mode == "relative"
assert r.dev is False
assert r.js_raw == [DEFAULT_LOG_JS_RAW]
assert r.css_raw == []
assert r.messages == []
@pytest.mark.skipif(sys.platform == "win32", reason="tests/package on different drives")
def test_relative_dev(self) -> None:
r = resources.Resources(mode="relative-dev")
assert r.mode == "relative"
assert r.dev is True
assert r.js_raw == [DEFAULT_LOG_JS_RAW, "Bokeh.settings.dev = true"]
assert r.css_raw == []
assert r.messages == []
def test_absolute(self) -> None:
r = resources.Resources(mode="absolute")
assert r.mode == "absolute"
assert r.dev is False
assert r.js_raw == [DEFAULT_LOG_JS_RAW]
assert r.css_raw == []
assert r.messages == []
def test_absolute_dev(self) -> None:
r = resources.Resources(mode="absolute-dev")
assert r.mode == "absolute"
assert r.dev is True
assert r.js_raw == [DEFAULT_LOG_JS_RAW, "Bokeh.settings.dev = true"]
assert r.css_raw == []
assert r.messages == []
def test_argument_checks(self) -> None:
with pytest.raises(ValueError):
resources.Resources("foo")
for mode in ("inline", "cdn", "server", "server-dev", "absolute", "absolute-dev"):
with pytest.raises(ValueError):
resources.Resources(mode, root_dir="foo")
for mode in ("inline", "server", "server-dev", "relative", "relative-dev", "absolute", "absolute-dev"):
with pytest.raises(ValueError):
resources.Resources(mode, version="foo")
for mode in ("inline", "cdn", "relative", "relative-dev", "absolute", "absolute-dev"):
with pytest.raises(ValueError):
resources.Resources(mode, root_url="foo")
@pytest.mark.parametrize('env', ["BOKEH_CDN_VERSION", "BOKEH_ROOTDIR"])
def test_builtin_importable_with_env(self, monkeypatch: pytest.MonkeyPatch, env) -> None:
cmd = [sys.executable, "-c", "import bokeh.resources"]
monkeypatch.setenv(env, "foo")
try:
subprocess.check_call(cmd, stderr=subprocess.STDOUT)
except subprocess.CalledProcessError:
pytest.fail(f"resources import failed with {env} set")
def test_render_js_cdn_release(self, monkeypatch: pytest.MonkeyPatch) -> None:
bs4 = pytest.importorskip("bs4")
monkeypatch.setattr(buv, "__version__", "2.0.0")
monkeypatch.setattr(resources, "__version__", "2.0.0")
r = resources.CDN.clone()
# Skip bokeh-mathjax for older versions
r.components.remove("bokeh-mathjax")
out = r.render_js()
html = bs4.BeautifulSoup(out, "html.parser")
scripts = html.find_all(name='script')
for script in scripts:
if "src" not in script.attrs:
continue
assert "crossorigin" not in script.attrs
assert "integrity" not in script.attrs
@pytest.mark.parametrize('v', ["1.8.0.rc1", "1.8.0.dev6"])
def test_render_js_cdn_dev_release(self, v: str, monkeypatch: pytest.MonkeyPatch) -> None:
bs4 = pytest.importorskip("bs4")
monkeypatch.setattr(buv, "__version__", v)
monkeypatch.setattr(resources, "__version__", v)
out = resources.CDN.render_js()
html = bs4.BeautifulSoup(out, "html.parser")
scripts = html.find_all(name='script')
for script in scripts:
assert "crossorigin" not in script.attrs
assert "integrity" not in script.attrs
def test_render_js_cdn_dev_local(self, monkeypatch: pytest.MonkeyPatch) -> None:
bs4 = pytest.importorskip("bs4")
monkeypatch.setattr(buv, "__version__", "2.0.0+foo")
monkeypatch.setattr(resources, "__version__", "2.0.0+foo")
r = resources.CDN.clone()
# Skip bokeh-mathjax for older versions
r.components.remove("bokeh-mathjax")
out = r.render_js()
html = bs4.BeautifulSoup(out, "html.parser")
scripts = html.find_all(name='script')
for script in scripts:
if "src" not in script.attrs:
continue
assert "crossorigin" not in script.attrs
assert "integrity" not in script.attrs
@pytest.mark.parametrize('v', ["2.0.0", "2.0.0+foo", "1.8.0.rc1", "1.8.0.dev6"])
def test_render_js_inline(self, v, monkeypatch: pytest.MonkeyPatch) -> None:
bs4 = pytest.importorskip("bs4")
monkeypatch.setattr(buv, "__version__", v)
monkeypatch.setattr(resources, "__version__", v)
out = resources.INLINE.render_js()
html = bs4.BeautifulSoup(out, "html.parser")
scripts = html.find_all(name='script')
for script in scripts:
assert "crossorigin" not in script.attrs
assert "integrity" not in script.attrs
## Test external resources
def test_external_js_and_css_resource_embedding() -> None:
""" This test method has to be at the end of the test modules because
subclassing a Model causes the CustomModel to be added as a Model and
messes up the Resources state for the other tests.
"""
# External resources can be defined as a string or list of strings
class CustomModel1(Model):
__javascript__ = "external_js_1"
__css__ = "external_css_1"
class CustomModel2(Model):
__javascript__ = ["external_js_2", "external_js_3"]
__css__ = ["external_css_2", "external_css_3"]
class CustomModel3(Model):
__javascript__ = ["external_js_1", "external_js_3"]
__css__ = ["external_css_1", "external_css_2"]
r = resources.Resources()
assert "external_js_1" in r.js_files
assert "external_css_1" in r.css_files
assert "external_js_2" in r.js_files
assert "external_js_3" in r.js_files
assert "external_css_2" in r.css_files
assert "external_css_3" in r.css_files
# Deduplication should keep the first instance of every file
assert r.css_files.count("external_css_1") == 1
assert r.css_files.count("external_css_2") == 1
assert r.js_files.count("external_js_3") == 1
assert r.js_files.count("external_js_1") == 1
def test_external_js_and_css_resource_ordering() -> None:
class ZClass(Model):
__javascript__ = "z_class"
class AClass(Model):
__javascript__ = "a_class"
r = resources.Resources()
# a_class is before z_class because they're sorted alphabetically
assert r.js_files.index("a_class") < r.js_files.index("z_class")
# The files should be in the order defined by the lists in CustomModel2 and CustomModel3
assert r.css_files.index("external_css_3") > r.css_files.index("external_css_2")
assert r.js_files.index("external_js_3") > r.js_files.index("external_js_2")
@pytest.mark.parametrize("mode", ["cdn", "inline"])
def test_Resources_with_BOKEH_MINIFIED(mode: resources.ResourcesMode) -> None:
with envset(BOKEH_MINIFIED="yes"):
r = resources.Resources(mode=mode)
assert r.minified is True
with envset(BOKEH_MINIFIED="no"):
r = resources.Resources(mode=mode)
assert r.minified is False
with envset(BOKEH_DEV="yes"):
r = resources.Resources(mode=mode, minified=True)
assert r.minified is True
with envset(BOKEH_DEV="yes"):
r = resources.Resources(mode=mode, minified=False)
assert r.minified is False
with envset(BOKEH_DEV="no"):
r = resources.Resources(mode=mode, minified=True)
assert r.minified is True
with envset(BOKEH_DEV="no"):
r = resources.Resources(mode=mode, minified=False)
assert r.minified is False
with envset(BOKEH_MINIFIED="yes", BOKEH_DEV="yes"):
r = resources.Resources(mode=mode)
assert r.minified is False
with envset(BOKEH_MINIFIED="yes", BOKEH_DEV="no"):
r = resources.Resources(mode=mode)
assert r.minified is True
with envset(BOKEH_MINIFIED="no", BOKEH_DEV="yes"):
r = resources.Resources(mode=mode)
assert r.minified is False
with envset(BOKEH_MINIFIED="no", BOKEH_DEV="no"):
r = resources.Resources(mode=mode)
assert r.minified is False
# -----------------------------------------------------------------------------
# Dev API
# -----------------------------------------------------------------------------
# -----------------------------------------------------------------------------
# Private API
# -----------------------------------------------------------------------------
# -----------------------------------------------------------------------------
# Code
# -----------------------------------------------------------------------------
| TestResources |
python | oauthlib__oauthlib | oauthlib/openid/connect/core/exceptions.py | {
"start": 2966,
"end": 3199
} | class ____(OpenIDClientError):
"""
The OP does not support use of the request_uri parameter.
"""
error = 'request_uri_not_supported'
description = 'The request_uri parameter is not supported.'
| RequestURINotSupported |
python | ansible__ansible | test/lib/ansible_test/_internal/host_profiles.py | {
"start": 15338,
"end": 18249
} | class ____[TRemoteConfig: RemoteConfig](SshTargetHostProfile[TRemoteConfig], metaclass=abc.ABCMeta):
"""Base class for remote instance profiles."""
@property
def name(self) -> str:
"""The name of the host profile."""
return self.config.name
@property
def core_ci_state(self) -> t.Optional[dict[str, str]]:
"""The saved Ansible Core CI state."""
return self.state.get('core_ci')
@core_ci_state.setter
def core_ci_state(self, value: dict[str, str]) -> None:
"""The saved Ansible Core CI state."""
self.state['core_ci'] = value
def pre_provision(self) -> None:
"""Pre-provision the host before delegation."""
self.core_ci = self.create_core_ci(load=True)
self.core_ci.start()
self.core_ci_state = self.core_ci.save()
def deprovision(self) -> None:
"""Deprovision the host after delegation has completed."""
super().deprovision()
if self.args.remote_terminate == TerminateMode.ALWAYS or (self.args.remote_terminate == TerminateMode.SUCCESS and self.args.success):
self.delete_instance()
@property
def core_ci(self) -> t.Optional[AnsibleCoreCI]:
"""Return the cached AnsibleCoreCI instance, if any, otherwise None."""
return self.cache.get('core_ci')
@core_ci.setter
def core_ci(self, value: AnsibleCoreCI) -> None:
"""Cache the given AnsibleCoreCI instance."""
self.cache['core_ci'] = value
def get_instance(self) -> t.Optional[AnsibleCoreCI]:
"""Return the current AnsibleCoreCI instance, loading it if not already loaded."""
if not self.core_ci and self.core_ci_state:
self.core_ci = self.create_core_ci(load=False)
self.core_ci.load(self.core_ci_state)
return self.core_ci
def delete_instance(self) -> None:
"""Delete the AnsibleCoreCI VM instance."""
core_ci = self.get_instance()
if not core_ci:
return # instance has not been provisioned
core_ci.stop()
def wait_for_instance(self) -> AnsibleCoreCI:
"""Wait for an AnsibleCoreCI VM instance to become ready."""
core_ci = self.get_instance()
core_ci.wait()
return core_ci
def create_core_ci(self, load: bool) -> AnsibleCoreCI:
"""Create and return an AnsibleCoreCI instance."""
if not self.config.arch:
raise InternalError(f'No arch specified for config: {self.config}')
return AnsibleCoreCI(
args=self.args,
resource=VmResource(
platform=self.config.platform,
version=self.config.version,
architecture=self.config.arch,
provider=self.config.provider,
tag='controller' if self.controller else 'target',
),
load=load,
)
| RemoteProfile |
python | sanic-org__sanic | sanic/mixins/base.py | {
"start": 86,
"end": 132
} | class ____(Protocol):
name: str
| NameProtocol |
python | automl__auto-sklearn | autosklearn/pipeline/components/data_preprocessing/imputation/categorical_imputation.py | {
"start": 464,
"end": 3653
} | class ____(AutoSklearnPreprocessingAlgorithm):
"""
Substitute missing values by constant:
When strategy == “constant”, fill_value is used to replace all
occurrences of missing_values.
If left to the default, fill_value will be 0 when imputing
numerical data and “missing_value” for strings or object data types.
"""
def __init__(
self, random_state: Optional[Union[int, np.random.RandomState]] = None
) -> None:
self.random_state = random_state
def fit(
self, X: PIPELINE_DATA_DTYPE, y: Optional[PIPELINE_DATA_DTYPE] = None
) -> "CategoricalImputation":
import sklearn.impute
if hasattr(X, "columns"):
kind = X[X.columns[-1]].dtype.kind
else:
# Series, sparse and numpy have dtype
# Only DataFrame does not
kind = X.dtype.kind
fill_value: Optional[int] = None
number_kinds = ("i", "u", "f")
if kind in number_kinds:
if isinstance(X, spmatrix):
# TODO negative labels
#
# Previously this was the behaviour and went
# unnoticed. Imputing negative labels results in
# the cateogircal shift step failing as the ordinal
# encoder can't fix negative labels.
# This is here to document the behaviour explicitly
fill_value = 0
else:
fill_value = min(np.unique(X)) - 1
self.preprocessor = sklearn.impute.SimpleImputer(
strategy="constant", copy=False, fill_value=fill_value
)
self.preprocessor.fit(X)
return self
def transform(self, X: PIPELINE_DATA_DTYPE) -> PIPELINE_DATA_DTYPE:
if self.preprocessor is None:
raise NotImplementedError()
X = self.preprocessor.transform(X)
return X
@staticmethod
def get_properties(
dataset_properties: Optional[DATASET_PROPERTIES_TYPE] = None,
) -> Dict[str, Optional[Union[str, int, bool, Tuple]]]:
return {
"shortname": "CategoricalImputation",
"name": "Categorical Imputation",
"handles_missing_values": True,
"handles_nominal_values": True,
"handles_numerical_features": True,
"prefers_data_scaled": False,
"prefers_data_normalized": False,
"handles_regression": True,
"handles_classification": True,
"handles_multiclass": True,
"handles_multilabel": True,
"handles_multioutput": True,
"is_deterministic": True,
# TODO find out of this is right!
"handles_sparse": True,
"handles_dense": True,
"input": (DENSE, SPARSE, UNSIGNED_DATA),
"output": (INPUT,),
"preferred_dtype": None,
}
@staticmethod
def get_hyperparameter_search_space(
feat_type: Optional[FEAT_TYPE_TYPE] = None,
dataset_properties: Optional[DATASET_PROPERTIES_TYPE] = None,
) -> ConfigurationSpace:
return ConfigurationSpace()
| CategoricalImputation |
python | fastai__fastai | fastai/text/core.py | {
"start": 14579,
"end": 17603
} | class ____():#TODO: pass the special tokens symbol to sp
"SentencePiece tokenizer for `lang`"
def __init__(self, lang='en', special_toks=None, sp_model=None, vocab_sz=None, max_vocab_sz=30000,
model_type='unigram', char_coverage=None, cache_dir='tmp'):
try: from sentencepiece import SentencePieceTrainer,SentencePieceProcessor
except ImportError:
raise Exception('sentencepiece module is missing: run `pip install sentencepiece!=0.1.90,!=0.1.91`')
self.sp_model,self.cache_dir = sp_model,Path(cache_dir)
self.vocab_sz,self.max_vocab_sz,self.model_type = vocab_sz,max_vocab_sz,model_type
self.char_coverage = ifnone(char_coverage, 0.99999 if lang in eu_langs else 0.9998)
self.special_toks = ifnone(special_toks, defaults.text_spec_tok)
if sp_model is None: self.tok = None
else:
self.tok = SentencePieceProcessor()
self.tok.Load(str(sp_model))
os.makedirs(self.cache_dir, exist_ok=True)
def _get_vocab_sz(self, raw_text_path):
cnt = Counter()
with open(raw_text_path, 'r') as f:
for line in f.readlines():
cnt.update(line.split())
if len(cnt)//4 > self.max_vocab_sz: return self.max_vocab_sz
res = len(cnt)//4
while res%8 != 0: res+=1
return max(res,29)
def train(self, raw_text_path):
"Train a sentencepiece tokenizer on `texts` and save it in `path/tmp_dir`"
from sentencepiece import SentencePieceTrainer
vocab_sz = self._get_vocab_sz(raw_text_path) if self.vocab_sz is None else self.vocab_sz
spec_tokens = ['\u2581'+s for s in self.special_toks]
SentencePieceTrainer.Train(" ".join([
f"--input={raw_text_path} --vocab_size={vocab_sz} --model_prefix={self.cache_dir/'spm'}",
f"--character_coverage={self.char_coverage} --model_type={self.model_type}",
f"--unk_id={len(spec_tokens)} --pad_id=-1 --bos_id=-1 --eos_id=-1 --minloglevel=2",
f"--user_defined_symbols={','.join(spec_tokens)} --hard_vocab_limit=false"]))
raw_text_path.unlink()
return self.cache_dir/'spm.model'
def setup(self, items, rules=None):
from sentencepiece import SentencePieceProcessor
if rules is None: rules = []
if self.tok is not None: return {'sp_model': self.sp_model}
raw_text_path = self.cache_dir/'texts.out'
with open(raw_text_path, 'w') as f:
for t in progress_bar(maps(*rules, items), total=len(items), leave=False):
f.write(f'{t}\n')
sp_model = self.train(raw_text_path)
self.tok = SentencePieceProcessor()
self.tok.Load(str(sp_model))
return {'sp_model': sp_model}
def __call__(self, items):
if self.tok is None: self.setup(items)
for t in items: yield self.tok.EncodeAsPieces(t)
# %% ../../nbs/30_text.core.ipynb 77
SubwordTokenizer = SentencePieceTokenizer
| SentencePieceTokenizer |
python | psf__black | src/blib2to3/pytree.py | {
"start": 22503,
"end": 30322
} | class ____(BasePattern):
"""
A wildcard pattern can match zero or more nodes.
This has all the flexibility needed to implement patterns like:
.* .+ .? .{m,n}
(a b c | d e | f)
(...)* (...)+ (...)? (...){m,n}
except it always uses non-greedy matching.
"""
min: int
max: int
def __init__(
self,
content: str | None = None,
min: int = 0,
max: int = HUGE,
name: str | None = None,
) -> None:
"""
Initializer.
Args:
content: optional sequence of subsequences of patterns;
if absent, matches one node;
if present, each subsequence is an alternative [*]
min: optional minimum number of times to match, default 0
max: optional maximum number of times to match, default HUGE
name: optional name assigned to this match
[*] Thus, if content is [[a, b, c], [d, e], [f, g, h]] this is
equivalent to (a b c | d e | f g h); if content is None,
this is equivalent to '.' in regular expression terms.
The min and max parameters work as follows:
min=0, max=maxint: .*
min=1, max=maxint: .+
min=0, max=1: .?
min=1, max=1: .
If content is not None, replace the dot with the parenthesized
list of alternatives, e.g. (a b c | d e | f g h)*
"""
assert 0 <= min <= max <= HUGE, (min, max)
if content is not None:
f = lambda s: tuple(s)
wrapped_content = tuple(map(f, content)) # Protect against alterations
# Check sanity of alternatives
assert len(wrapped_content), repr(
wrapped_content
) # Can't have zero alternatives
for alt in wrapped_content:
assert len(alt), repr(alt) # Can have empty alternatives
self.content = wrapped_content
self.min = min
self.max = max
self.name = name
def optimize(self) -> Any:
"""Optimize certain stacked wildcard patterns."""
subpattern = None
if (
self.content is not None
and len(self.content) == 1
and len(self.content[0]) == 1
):
subpattern = self.content[0][0]
if self.min == 1 and self.max == 1:
if self.content is None:
return NodePattern(name=self.name)
if subpattern is not None and self.name == subpattern.name:
return subpattern.optimize()
if (
self.min <= 1
and isinstance(subpattern, WildcardPattern)
and subpattern.min <= 1
and self.name == subpattern.name
):
return WildcardPattern(
subpattern.content,
self.min * subpattern.min,
self.max * subpattern.max,
subpattern.name,
)
return self
def match(self, node, results=None) -> bool:
"""Does this pattern exactly match a node?"""
return self.match_seq([node], results)
def match_seq(self, nodes, results=None) -> bool:
"""Does this pattern exactly match a sequence of nodes?"""
for c, r in self.generate_matches(nodes):
if c == len(nodes):
if results is not None:
results.update(r)
if self.name:
results[self.name] = list(nodes)
return True
return False
def generate_matches(self, nodes) -> Iterator[tuple[int, _Results]]:
"""
Generator yielding matches for a sequence of nodes.
Args:
nodes: sequence of nodes
Yields:
(count, results) tuples where:
count: the match comprises nodes[:count];
results: dict containing named submatches.
"""
if self.content is None:
# Shortcut for special case (see __init__.__doc__)
for count in range(self.min, 1 + min(len(nodes), self.max)):
r = {}
if self.name:
r[self.name] = nodes[:count]
yield count, r
elif self.name == "bare_name":
yield self._bare_name_matches(nodes)
else:
# The reason for this is that hitting the recursion limit usually
# results in some ugly messages about how RuntimeErrors are being
# ignored. We only have to do this on CPython, though, because other
# implementations don't have this nasty bug in the first place.
if hasattr(sys, "getrefcount"):
save_stderr = sys.stderr
sys.stderr = StringIO()
try:
for count, r in self._recursive_matches(nodes, 0):
if self.name:
r[self.name] = nodes[:count]
yield count, r
except RuntimeError:
# We fall back to the iterative pattern matching scheme if the recursive
# scheme hits the recursion limit.
for count, r in self._iterative_matches(nodes):
if self.name:
r[self.name] = nodes[:count]
yield count, r
finally:
if hasattr(sys, "getrefcount"):
sys.stderr = save_stderr
def _iterative_matches(self, nodes) -> Iterator[tuple[int, _Results]]:
"""Helper to iteratively yield the matches."""
nodelen = len(nodes)
if 0 >= self.min:
yield 0, {}
results = []
# generate matches that use just one alt from self.content
for alt in self.content:
for c, r in generate_matches(alt, nodes):
yield c, r
results.append((c, r))
# for each match, iterate down the nodes
while results:
new_results = []
for c0, r0 in results:
# stop if the entire set of nodes has been matched
if c0 < nodelen and c0 <= self.max:
for alt in self.content:
for c1, r1 in generate_matches(alt, nodes[c0:]):
if c1 > 0:
r = {}
r.update(r0)
r.update(r1)
yield c0 + c1, r
new_results.append((c0 + c1, r))
results = new_results
def _bare_name_matches(self, nodes) -> tuple[int, _Results]:
"""Special optimized matcher for bare_name."""
count = 0
r = {} # type: _Results
done = False
max = len(nodes)
while not done and count < max:
done = True
for leaf in self.content:
if leaf[0].match(nodes[count], r):
count += 1
done = False
break
assert self.name is not None
r[self.name] = nodes[:count]
return count, r
def _recursive_matches(self, nodes, count) -> Iterator[tuple[int, _Results]]:
"""Helper to recursively yield the matches."""
assert self.content is not None
if count >= self.min:
yield 0, {}
if count < self.max:
for alt in self.content:
for c0, r0 in generate_matches(alt, nodes):
for c1, r1 in self._recursive_matches(nodes[c0:], count + 1):
r = {}
r.update(r0)
r.update(r1)
yield c0 + c1, r
| WildcardPattern |
python | getsentry__sentry | tests/snuba/sessions/test_sessions.py | {
"start": 4135,
"end": 6367
} | class ____(TestCase, BaseMetricsTestCase):
backend = MetricsReleaseHealthBackend()
def test_check_has_health_data(self) -> None:
self.store_session(
self.build_session(
project_id=self.project.id,
org_id=self.project.organization_id,
status="exited",
release=release_v1_0_0,
),
)
data = self.backend.check_has_health_data(
[(self.project.id, release_v1_0_0), (self.project.id, "dummy-release")]
)
assert data == {(self.project.id, release_v1_0_0)}
def test_check_has_health_data_without_releases_should_include_sessions_lte_90_days(
self,
) -> None:
"""
Test that ensures that `check_has_health_data` returns a set of projects that has health
data within the last 90d if only a list of project ids is provided and any project with
session data earlier than 90 days should be included
"""
project1 = self.project
project2 = self.create_project(
name="Bar2",
slug="bar2",
teams=[self.team],
fire_project_created=True,
organization=self.organization,
)
self.store_session(
self.build_session(
project_id=project1.id,
org_id=project1.organization_id,
status="exited",
)
)
self.store_session(
self.build_session(
project_id=project2.id,
org_id=project2.organization_id,
status="exited",
)
)
data = self.backend.check_has_health_data([project1.id, project2.id])
assert data == {project1.id, project2.id}
def test_check_has_health_data_does_not_crash_when_sending_projects_list_as_set(self) -> None:
self.store_session(
self.build_session(
project_id=self.project.id,
org_id=self.project.organization_id,
status="exited",
),
)
data = self.backend.check_has_health_data({self.project.id})
assert data == {self.project.id}
| CheckHasHealthDataTestCase |
python | coleifer__peewee | tests/fields.py | {
"start": 42337,
"end": 43369
} | class ____(ModelTestCase):
database = get_in_memory_db()
requires = [UpperModel]
def test_sql_function_db_value(self):
# Verify that the db function is applied as part of an INSERT.
um = UpperModel.create(name='huey')
um_db = UpperModel.get(UpperModel.id == um.id)
self.assertEqual(um_db.name, 'HUEY')
# Verify that the db function is applied as part of an UPDATE.
um_db.name = 'zaizee'
um_db.save()
# Ensure that the name was updated correctly.
um_db2 = UpperModel.get(UpperModel.id == um.id)
self.assertEqual(um_db2.name, 'ZAIZEE')
# Verify that the db function is applied in a WHERE expression.
um_db3 = UpperModel.get(UpperModel.name == 'zaiZee')
self.assertEqual(um_db3.id, um.id)
# If we nest the field in a function, the conversion is not applied.
expr = fn.SUBSTR(UpperModel.name, 1, 1) == 'z'
self.assertRaises(UpperModel.DoesNotExist, UpperModel.get, expr)
| TestSQLFunctionDBValue |
python | zarr-developers__zarr-python | tests/test_dtype_registry.py | {
"start": 746,
"end": 8199
} | class ____:
@staticmethod
def test_register(data_type_registry_fixture: DataTypeRegistry) -> None:
"""
Test that registering a dtype in a data type registry works.
"""
data_type_registry_fixture.register(Bool._zarr_v3_name, Bool)
assert data_type_registry_fixture.get(Bool._zarr_v3_name) == Bool
assert isinstance(data_type_registry_fixture.match_dtype(np.dtype("bool")), Bool)
@staticmethod
def test_override(data_type_registry_fixture: DataTypeRegistry) -> None:
"""
Test that registering a new dtype with the same name works (overriding the previous one).
"""
data_type_registry_fixture.register(Bool._zarr_v3_name, Bool)
class NewBool(Bool):
def default_scalar(self) -> np.bool_:
return np.True_
data_type_registry_fixture.register(NewBool._zarr_v3_name, NewBool)
assert isinstance(data_type_registry_fixture.match_dtype(np.dtype("bool")), NewBool)
@staticmethod
@pytest.mark.parametrize(
("wrapper_cls", "dtype_str"), [(Bool, "bool"), (FixedLengthUTF32, "|U4")]
)
def test_match_dtype(
data_type_registry_fixture: DataTypeRegistry,
wrapper_cls: type[ZDType[TBaseDType, TBaseScalar]],
dtype_str: str,
) -> None:
"""
Test that match_dtype resolves a numpy dtype into an instance of the correspond wrapper for that dtype.
"""
data_type_registry_fixture.register(wrapper_cls._zarr_v3_name, wrapper_cls)
assert isinstance(data_type_registry_fixture.match_dtype(np.dtype(dtype_str)), wrapper_cls)
@staticmethod
def test_unregistered_dtype(data_type_registry_fixture: DataTypeRegistry) -> None:
"""
Test that match_dtype raises an error if the dtype is not registered.
"""
outside_dtype_name = "int8"
outside_dtype = np.dtype(outside_dtype_name)
msg = f"No Zarr data type found that matches dtype '{outside_dtype!r}'"
with pytest.raises(ValueError, match=re.escape(msg)):
data_type_registry_fixture.match_dtype(outside_dtype)
with pytest.raises(KeyError):
data_type_registry_fixture.get(outside_dtype_name)
@staticmethod
@pytest.mark.filterwarnings("ignore::zarr.core.dtype.common.UnstableSpecificationWarning")
@pytest.mark.parametrize("zdtype", zdtype_examples)
def test_registered_dtypes_match_dtype(zdtype: ZDType[TBaseDType, TBaseScalar]) -> None:
"""
Test that the registered dtypes can be retrieved from the registry.
"""
skip_object_dtype(zdtype)
assert data_type_registry.match_dtype(zdtype.to_native_dtype()) == zdtype
@staticmethod
@pytest.mark.filterwarnings("ignore::zarr.core.dtype.common.UnstableSpecificationWarning")
@pytest.mark.parametrize("zdtype", zdtype_examples)
def test_registered_dtypes_match_json(
zdtype: ZDType[TBaseDType, TBaseScalar], zarr_format: ZarrFormat
) -> None:
assert (
data_type_registry.match_json(
zdtype.to_json(zarr_format=zarr_format), zarr_format=zarr_format
)
== zdtype
)
@staticmethod
@pytest.mark.filterwarnings("ignore::zarr.core.dtype.common.UnstableSpecificationWarning")
@pytest.mark.parametrize("zdtype", zdtype_examples)
def test_match_dtype_unique(
zdtype: ZDType[Any, Any],
data_type_registry_fixture: DataTypeRegistry,
zarr_format: ZarrFormat,
) -> None:
"""
Test that the match_dtype method uniquely specifies a registered data type. We create a local registry
that excludes the data type class being tested, and ensure that an instance of the wrapped data type
fails to match anything in the registry
"""
skip_object_dtype(zdtype)
for _cls in get_args(AnyDType):
if _cls is not type(zdtype):
data_type_registry_fixture.register(_cls._zarr_v3_name, _cls)
dtype_instance = zdtype.to_native_dtype()
msg = f"No Zarr data type found that matches dtype '{dtype_instance!r}'"
with pytest.raises(ValueError, match=re.escape(msg)):
data_type_registry_fixture.match_dtype(dtype_instance)
instance_dict = zdtype.to_json(zarr_format=zarr_format)
msg = f"No Zarr data type found that matches {instance_dict!r}"
with pytest.raises(ValueError, match=re.escape(msg)):
data_type_registry_fixture.match_json(instance_dict, zarr_format=zarr_format)
@pytest.mark.usefixtures("set_path")
def test_entrypoint_dtype(zarr_format: ZarrFormat) -> None:
from package_with_entrypoint import TestDataType
data_type_registry._lazy_load()
instance = TestDataType()
dtype_json = instance.to_json(zarr_format=zarr_format)
assert get_data_type_from_json(dtype_json, zarr_format=zarr_format) == instance
data_type_registry.unregister(TestDataType._zarr_v3_name)
@pytest.mark.filterwarnings("ignore::zarr.core.dtype.common.UnstableSpecificationWarning")
@pytest.mark.parametrize("data_type", zdtype_examples, ids=str)
@pytest.mark.parametrize("json_style", [(2, "internal"), (2, "metadata"), (3, None)], ids=str)
@pytest.mark.parametrize(
"dtype_parser_func", [parse_dtype, parse_data_type], ids=["parse_dtype", "parse_data_type"]
)
def test_parse_data_type(
data_type: ZDType[Any, Any],
json_style: tuple[ZarrFormat, None | Literal["internal", "metadata"]],
dtype_parser_func: Any,
) -> None:
"""
Test the parsing of data types into ZDType instances.
This function tests the ability of `dtype_parser_func` to correctly
interpret and parse data type specifications into `ZDType` instances
according to the specified Zarr format and JSON style.
Parameters
----------
data_type : ZDType[Any, Any]
The data type to be tested for parsing.
json_style : tuple[ZarrFormat, None or Literal["internal", "metadata"]]
A tuple specifying the Zarr format version and the JSON style
for Zarr V2 2. For Zarr V2 there are 2 JSON styles: "internal", and
"metadata". The internal style takes the form {"name": <data type identifier>, "object_codec_id": <object codec id>},
while the metadata style is just <data type identifier>.
dtype_parser_func : Any
The function to be tested for parsing the data type. This is necessary for compatibility
reasons, as we support multiple functions that perform the same data type parsing operation.
"""
zarr_format, style = json_style
dtype_spec: Any
if zarr_format == 2:
dtype_spec = data_type.to_json(zarr_format=zarr_format)
if style == "internal":
pass
elif style == "metadata":
dtype_spec = unpack_dtype_json(dtype_spec)
else:
raise ValueError(f"Invalid zarr v2 json style: {style}")
else:
dtype_spec = data_type.to_json(zarr_format=zarr_format)
if dtype_spec == "|O":
# The object data type on its own is ambiguous and should fail to resolve.
msg = "Zarr data type resolution from object failed."
with pytest.raises(ValueError, match=msg):
dtype_parser_func(dtype_spec, zarr_format=zarr_format)
else:
observed = dtype_parser_func(dtype_spec, zarr_format=zarr_format)
assert observed == data_type
| TestRegistry |
python | apache__airflow | providers/google/tests/unit/google/cloud/operators/test_datastore.py | {
"start": 5578,
"end": 6001
} | class ____:
@mock.patch(HOOK_PATH)
def test_execute(self, mock_hook):
op = CloudDatastoreDeleteOperationOperator(task_id="test_task", gcp_conn_id=CONN_ID, name=TRANSACTION)
op.execute({})
mock_hook.assert_called_once_with(gcp_conn_id=CONN_ID, impersonation_chain=None)
mock_hook.return_value.delete_operation.assert_called_once_with(name=TRANSACTION)
| TestCloudDatastoreDeleteOperation |
python | scipy__scipy | scipy/stats/_fit.py | {
"start": 1608,
"end": 59867
} | class ____:
r"""Result of fitting a discrete or continuous distribution to data
Attributes
----------
params : namedtuple
A namedtuple containing the maximum likelihood estimates of the
shape parameters, location, and (if applicable) scale of the
distribution.
success : bool or None
Whether the optimizer considered the optimization to terminate
successfully or not.
message : str or None
Any status message provided by the optimizer.
"""
def __init__(self, dist, data, discrete, res):
self._dist = dist
self._data = data
self.discrete = discrete
self.pxf = getattr(dist, "pmf", None) or getattr(dist, "pdf", None)
shape_names = [] if dist.shapes is None else dist.shapes.split(", ")
if not discrete:
FitParams = namedtuple('FitParams', shape_names + ['loc', 'scale'])
else:
FitParams = namedtuple('FitParams', shape_names + ['loc'])
self.params = FitParams(*res.x)
# Optimizer can report success even when nllf is infinite
if res.success and not np.isfinite(self.nllf()):
res.success = False
res.message = ("Optimization converged to parameter values that "
"are inconsistent with the data.")
self.success = getattr(res, "success", None)
self.message = getattr(res, "message", None)
def __repr__(self):
keys = ["params", "success", "message"]
m = max(map(len, keys)) + 1
return '\n'.join([key.rjust(m) + ': ' + repr(getattr(self, key))
for key in keys if getattr(self, key) is not None])
def nllf(self, params=None, data=None):
"""Negative log-likelihood function
Evaluates the negative of the log-likelihood function of the provided
data at the provided parameters.
Parameters
----------
params : tuple, optional
The shape parameters, location, and (if applicable) scale of the
distribution as a single tuple. Default is the maximum likelihood
estimates (``self.params``).
data : array_like, optional
The data for which the log-likelihood function is to be evaluated.
Default is the data to which the distribution was fit.
Returns
-------
nllf : float
The negative of the log-likelihood function.
"""
params = params if params is not None else self.params
data = data if data is not None else self._data
return self._dist.nnlf(theta=params, x=data)
def plot(self, ax=None, *, plot_type="hist"):
"""Visually compare the data against the fitted distribution.
Available only if `matplotlib` is installed.
Parameters
----------
ax : `matplotlib.axes.Axes`
Axes object to draw the plot onto, otherwise uses the current Axes.
plot_type : {"hist", "qq", "pp", "cdf"}
Type of plot to draw. Options include:
- "hist": Superposes the PDF/PMF of the fitted distribution
over a normalized histogram of the data.
- "qq": Scatter plot of theoretical quantiles against the
empirical quantiles. Specifically, the x-coordinates are the
values of the fitted distribution PPF evaluated at the
percentiles ``(np.arange(1, n) - 0.5)/n``, where ``n`` is the
number of data points, and the y-coordinates are the sorted
data points.
- "pp": Scatter plot of theoretical percentiles against the
observed percentiles. Specifically, the x-coordinates are the
percentiles ``(np.arange(1, n) - 0.5)/n``, where ``n`` is
the number of data points, and the y-coordinates are the values
of the fitted distribution CDF evaluated at the sorted
data points.
- "cdf": Superposes the CDF of the fitted distribution over the
empirical CDF. Specifically, the x-coordinates of the empirical
CDF are the sorted data points, and the y-coordinates are the
percentiles ``(np.arange(1, n) - 0.5)/n``, where ``n`` is
the number of data points.
Returns
-------
ax : `matplotlib.axes.Axes`
The matplotlib Axes object on which the plot was drawn.
Examples
--------
>>> import numpy as np
>>> from scipy import stats
>>> import matplotlib.pyplot as plt # matplotlib must be installed
>>> rng = np.random.default_rng()
>>> data = stats.nbinom(5, 0.5).rvs(size=1000, random_state=rng)
>>> bounds = [(0, 30), (0, 1)]
>>> res = stats.fit(stats.nbinom, data, bounds)
>>> ax = res.plot() # save matplotlib Axes object
The `matplotlib.axes.Axes` object can be used to customize the plot.
See `matplotlib.axes.Axes` documentation for details.
>>> ax.set_xlabel('number of trials') # customize axis label
>>> ax.get_children()[0].set_linewidth(5) # customize line widths
>>> ax.legend()
>>> plt.show()
"""
try:
import matplotlib # noqa: F401
except ModuleNotFoundError as exc:
message = "matplotlib must be installed to use method `plot`."
raise ModuleNotFoundError(message) from exc
plots = {'histogram': self._hist_plot, 'qq': self._qq_plot,
'pp': self._pp_plot, 'cdf': self._cdf_plot,
'hist': self._hist_plot}
if plot_type.lower() not in plots:
message = f"`plot_type` must be one of {set(plots.keys())}"
raise ValueError(message)
plot = plots[plot_type.lower()]
if ax is None:
import matplotlib.pyplot as plt
ax = plt.gca()
fit_params = np.atleast_1d(self.params)
return plot(ax=ax, fit_params=fit_params)
def _hist_plot(self, ax, fit_params):
from matplotlib.ticker import MaxNLocator
support = self._dist.support(*fit_params)
lb = support[0] if np.isfinite(support[0]) else min(self._data)
ub = support[1] if np.isfinite(support[1]) else max(self._data)
pxf = "PMF" if self.discrete else "PDF"
if self.discrete:
x = np.arange(lb, ub + 2)
y = self.pxf(x, *fit_params)
ax.vlines(x[:-1], 0, y[:-1], label='Fitted Distribution PMF',
color='C0')
options = dict(density=True, bins=x, align='left', color='C1')
ax.xaxis.set_major_locator(MaxNLocator(integer=True))
ax.set_xlabel('k')
ax.set_ylabel('PMF')
else:
x = np.linspace(lb, ub, 200)
y = self.pxf(x, *fit_params)
ax.plot(x, y, '--', label='Fitted Distribution PDF', color='C0')
options = dict(density=True, bins=50, align='mid', color='C1')
ax.set_xlabel('x')
ax.set_ylabel('PDF')
if len(self._data) > 50 or self.discrete:
ax.hist(self._data, label="Histogram of Data", **options)
else:
ax.plot(self._data, np.zeros_like(self._data), "*",
label='Data', color='C1')
ax.set_title(rf"Fitted $\tt {self._dist.name}$ {pxf} and Histogram")
ax.legend(*ax.get_legend_handles_labels())
return ax
def _qp_plot(self, ax, fit_params, qq):
data = np.sort(self._data)
ps = self._plotting_positions(len(self._data))
if qq:
qp = "Quantiles"
plot_type = 'Q-Q'
x = self._dist.ppf(ps, *fit_params)
y = data
else:
qp = "Percentiles"
plot_type = 'P-P'
x = ps
y = self._dist.cdf(data, *fit_params)
ax.plot(x, y, '.', label=f'Fitted Distribution {plot_type}',
color='C0', zorder=1)
xlim = ax.get_xlim()
ylim = ax.get_ylim()
lim = [min(xlim[0], ylim[0]), max(xlim[1], ylim[1])]
if not qq:
lim = max(lim[0], 0), min(lim[1], 1)
if self.discrete and qq:
q_min, q_max = int(lim[0]), int(lim[1]+1)
q_ideal = np.arange(q_min, q_max)
# q_ideal = np.unique(self._dist.ppf(ps, *fit_params))
ax.plot(q_ideal, q_ideal, 'o', label='Reference', color='k',
alpha=0.25, markerfacecolor='none', clip_on=True)
elif self.discrete and not qq:
# The intent of this is to match the plot that would be produced
# if x were continuous on [0, 1] and y were cdf(ppf(x)).
# It can be approximated by letting x = np.linspace(0, 1, 1000),
# but this might not look great when zooming in. The vertical
# portions are included to indicate where the transition occurs
# where the data completely obscures the horizontal portions.
p_min, p_max = lim
a, b = self._dist.support(*fit_params)
p_min = max(p_min, 0 if np.isfinite(a) else 1e-3)
p_max = min(p_max, 1 if np.isfinite(b) else 1-1e-3)
q_min, q_max = self._dist.ppf([p_min, p_max], *fit_params)
qs = np.arange(q_min-1, q_max+1)
ps = self._dist.cdf(qs, *fit_params)
ax.step(ps, ps, '-', label='Reference', color='k', alpha=0.25,
clip_on=True)
else:
ax.plot(lim, lim, '-', label='Reference', color='k', alpha=0.25,
clip_on=True)
ax.set_xlim(lim)
ax.set_ylim(lim)
ax.set_xlabel(rf"Fitted $\tt {self._dist.name}$ Theoretical {qp}")
ax.set_ylabel(f"Data {qp}")
ax.set_title(rf"Fitted $\tt {self._dist.name}$ {plot_type} Plot")
ax.legend(*ax.get_legend_handles_labels())
ax.set_aspect('equal')
return ax
def _qq_plot(self, **kwargs):
return self._qp_plot(qq=True, **kwargs)
def _pp_plot(self, **kwargs):
return self._qp_plot(qq=False, **kwargs)
def _plotting_positions(self, n, a=.5):
# See https://en.wikipedia.org/wiki/Q%E2%80%93Q_plot#Plotting_positions
k = np.arange(1, n+1)
return (k-a) / (n + 1 - 2*a)
def _cdf_plot(self, ax, fit_params):
data = np.sort(self._data)
ecdf = self._plotting_positions(len(self._data))
ls = '--' if len(np.unique(data)) < 30 else '.'
xlabel = 'k' if self.discrete else 'x'
ax.step(data, ecdf, ls, label='Empirical CDF', color='C1', zorder=0)
xlim = ax.get_xlim()
q = np.linspace(*xlim, 300)
tcdf = self._dist.cdf(q, *fit_params)
ax.plot(q, tcdf, label='Fitted Distribution CDF', color='C0', zorder=1)
ax.set_xlim(xlim)
ax.set_ylim(0, 1)
ax.set_xlabel(xlabel)
ax.set_ylabel("CDF")
ax.set_title(rf"Fitted $\tt {self._dist.name}$ and Empirical CDF")
handles, labels = ax.get_legend_handles_labels()
ax.legend(handles[::-1], labels[::-1])
return ax
@xp_capabilities(out_of_scope=True)
def fit(dist, data, bounds=None, *, guess=None, method='mle',
optimizer=optimize.differential_evolution):
r"""Fit a discrete or continuous distribution to data
Given a distribution, data, and bounds on the parameters of the
distribution, return maximum likelihood estimates of the parameters.
Parameters
----------
dist : `scipy.stats.rv_continuous` or `scipy.stats.rv_discrete`
The object representing the distribution to be fit to the data.
data : 1D array_like
The data to which the distribution is to be fit. If the data contain
any of ``np.nan``, ``np.inf``, or -``np.inf``, the fit method will
raise a ``ValueError``.
bounds : dict or sequence of tuples, optional
If a dictionary, each key is the name of a parameter of the
distribution, and the corresponding value is a tuple containing the
lower and upper bound on that parameter. If the distribution is
defined only for a finite range of values of that parameter, no entry
for that parameter is required; e.g., some distributions have
parameters which must be on the interval [0, 1]. Bounds for parameters
location (``loc``) and scale (``scale``) are optional; by default,
they are fixed to 0 and 1, respectively.
If a sequence, element *i* is a tuple containing the lower and upper
bound on the *i*\ th parameter of the distribution. In this case,
bounds for *all* distribution shape parameters must be provided.
Optionally, bounds for location and scale may follow the
distribution shape parameters.
If a shape is to be held fixed (e.g. if it is known), the
lower and upper bounds may be equal. If a user-provided lower or upper
bound is beyond a bound of the domain for which the distribution is
defined, the bound of the distribution's domain will replace the
user-provided value. Similarly, parameters which must be integral
will be constrained to integral values within the user-provided bounds.
guess : dict or array_like, optional
If a dictionary, each key is the name of a parameter of the
distribution, and the corresponding value is a guess for the value
of the parameter.
If a sequence, element *i* is a guess for the *i*\ th parameter of the
distribution. In this case, guesses for *all* distribution shape
parameters must be provided.
If `guess` is not provided, guesses for the decision variables will
not be passed to the optimizer. If `guess` is provided, guesses for
any missing parameters will be set at the mean of the lower and
upper bounds. Guesses for parameters which must be integral will be
rounded to integral values, and guesses that lie outside the
intersection of the user-provided bounds and the domain of the
distribution will be clipped.
method : {'mle', 'mse'}
With ``method="mle"`` (default), the fit is computed by minimizing
the negative log-likelihood function. A large, finite penalty
(rather than infinite negative log-likelihood) is applied for
observations beyond the support of the distribution.
With ``method="mse"``, the fit is computed by minimizing
the negative log-product spacing function. The same penalty is applied
for observations beyond the support. We follow the approach of [1]_,
which is generalized for samples with repeated observations.
optimizer : callable, optional
`optimizer` is a callable that accepts the following positional
argument.
fun : callable
The objective function to be optimized. `fun` accepts one argument
``x``, candidate shape parameters of the distribution, and returns
the objective function value given ``x``, `dist`, and the provided
`data`.
The job of `optimizer` is to find values of the decision variables
that minimizes `fun`.
`optimizer` must also accept the following keyword argument.
bounds : sequence of tuples
The bounds on values of the decision variables; each element will
be a tuple containing the lower and upper bound on a decision
variable.
If `guess` is provided, `optimizer` must also accept the following
keyword argument.
x0 : array_like
The guesses for each decision variable.
If the distribution has any shape parameters that must be integral or
if the distribution is discrete and the location parameter is not
fixed, `optimizer` must also accept the following keyword argument.
integrality : array_like of bools
For each decision variable, True if the decision variable
must be constrained to integer values and False if the decision
variable is continuous.
`optimizer` must return an object, such as an instance of
`scipy.optimize.OptimizeResult`, which holds the optimal values of
the decision variables in an attribute ``x``. If attributes
``fun``, ``status``, or ``message`` are provided, they will be
included in the result object returned by `fit`.
Returns
-------
result : `~scipy.stats._result_classes.FitResult`
An object with the following fields.
params : namedtuple
A namedtuple containing the maximum likelihood estimates of the
shape parameters, location, and (if applicable) scale of the
distribution.
success : bool or None
Whether the optimizer considered the optimization to terminate
successfully or not.
message : str or None
Any status message provided by the optimizer.
The object has the following method:
nllf(params=None, data=None)
By default, the negative log-likelihood function at the fitted
`params` for the given `data`. Accepts a tuple containing
alternative shapes, location, and scale of the distribution and
an array of alternative data.
plot(ax=None)
Superposes the PDF/PMF of the fitted distribution over a normalized
histogram of the data.
See Also
--------
rv_continuous, rv_discrete
Notes
-----
Optimization is more likely to converge to the maximum likelihood estimate
when the user provides tight bounds containing the maximum likelihood
estimate. For example, when fitting a binomial distribution to data, the
number of experiments underlying each sample may be known, in which case
the corresponding shape parameter ``n`` can be fixed.
References
----------
.. [1] Shao, Yongzhao, and Marjorie G. Hahn. "Maximum product of spacings
method: a unified formulation with illustration of strong
consistency." Illinois Journal of Mathematics 43.3 (1999): 489-499.
Examples
--------
Suppose we wish to fit a distribution to the following data.
>>> import numpy as np
>>> from scipy import stats
>>> rng = np.random.default_rng()
>>> dist = stats.nbinom
>>> shapes = (5, 0.5)
>>> data = dist.rvs(*shapes, size=1000, random_state=rng)
Suppose we do not know how the data were generated, but we suspect that
it follows a negative binomial distribution with parameters *n* and *p*\.
(See `scipy.stats.nbinom`.) We believe that the parameter *n* was fewer
than 30, and we know that the parameter *p* must lie on the interval
[0, 1]. We record this information in a variable `bounds` and pass
this information to `fit`.
>>> bounds = [(0, 30), (0, 1)]
>>> res = stats.fit(dist, data, bounds)
`fit` searches within the user-specified `bounds` for the
values that best match the data (in the sense of maximum likelihood
estimation). In this case, it found shape values similar to those
from which the data were actually generated.
>>> res.params
FitParams(n=5.0, p=0.5028157644634368, loc=0.0) # may vary
We can visualize the results by superposing the probability mass function
of the distribution (with the shapes fit to the data) over a normalized
histogram of the data.
>>> import matplotlib.pyplot as plt # matplotlib must be installed to plot
>>> res.plot()
>>> plt.show()
Note that the estimate for *n* was exactly integral; this is because
the domain of the `nbinom` PMF includes only integral *n*, and the `nbinom`
object "knows" that. `nbinom` also knows that the shape *p* must be a
value between 0 and 1. In such a case - when the domain of the distribution
with respect to a parameter is finite - we are not required to specify
bounds for the parameter.
>>> bounds = {'n': (0, 30)} # omit parameter p using a `dict`
>>> res2 = stats.fit(dist, data, bounds)
>>> res2.params
FitParams(n=5.0, p=0.5016492009232932, loc=0.0) # may vary
If we wish to force the distribution to be fit with *n* fixed at 6, we can
set both the lower and upper bounds on *n* to 6. Note, however, that the
value of the objective function being optimized is typically worse (higher)
in this case.
>>> bounds = {'n': (6, 6)} # fix parameter `n`
>>> res3 = stats.fit(dist, data, bounds)
>>> res3.params
FitParams(n=6.0, p=0.5486556076755706, loc=0.0) # may vary
>>> res3.nllf() > res.nllf()
True # may vary
Note that the numerical results of the previous examples are typical, but
they may vary because the default optimizer used by `fit`,
`scipy.optimize.differential_evolution`, is stochastic. However, we can
customize the settings used by the optimizer to ensure reproducibility -
or even use a different optimizer entirely - using the `optimizer`
parameter.
>>> from scipy.optimize import differential_evolution
>>> rng = np.random.default_rng(767585560716548)
>>> def optimizer(fun, bounds, *, integrality):
... return differential_evolution(fun, bounds, strategy='best2bin',
... rng=rng, integrality=integrality)
>>> bounds = [(0, 30), (0, 1)]
>>> res4 = stats.fit(dist, data, bounds, optimizer=optimizer)
>>> res4.params
FitParams(n=5.0, p=0.5015183149259951, loc=0.0)
"""
# --- Input Validation / Standardization --- #
user_bounds = bounds
user_guess = guess
# distribution input validation and information collection
if hasattr(dist, "pdf"): # can't use isinstance for types
default_bounds = {'loc': (0, 0), 'scale': (1, 1)}
discrete = False
elif hasattr(dist, "pmf"):
default_bounds = {'loc': (0, 0)}
discrete = True
else:
message = ("`dist` must be an instance of `rv_continuous` "
"or `rv_discrete.`")
raise ValueError(message)
try:
param_info = dist._param_info()
except AttributeError as e:
message = (f"Distribution `{dist.name}` is not yet supported by "
"`scipy.stats.fit` because shape information has "
"not been defined.")
raise ValueError(message) from e
# data input validation
data = np.asarray(data)
if data.ndim != 1:
message = "`data` must be exactly one-dimensional."
raise ValueError(message)
if not (np.issubdtype(data.dtype, np.number)
and np.all(np.isfinite(data))):
message = "All elements of `data` must be finite numbers."
raise ValueError(message)
# bounds input validation and information collection
n_params = len(param_info)
n_shapes = n_params - (1 if discrete else 2)
param_list = [param.name for param in param_info]
param_names = ", ".join(param_list)
shape_names = ", ".join(param_list[:n_shapes])
if user_bounds is None:
user_bounds = {}
if isinstance(user_bounds, dict):
default_bounds.update(user_bounds)
user_bounds = default_bounds
user_bounds_array = np.empty((n_params, 2))
for i in range(n_params):
param_name = param_info[i].name
user_bound = user_bounds.pop(param_name, None)
if user_bound is None:
user_bound = param_info[i].domain
user_bounds_array[i] = user_bound
if user_bounds:
message = ("Bounds provided for the following unrecognized "
f"parameters will be ignored: {set(user_bounds)}")
warnings.warn(message, RuntimeWarning, stacklevel=2)
else:
try:
user_bounds = np.asarray(user_bounds, dtype=float)
if user_bounds.size == 0:
user_bounds = np.empty((0, 2))
except ValueError as e:
message = ("Each element of a `bounds` sequence must be a tuple "
"containing two elements: the lower and upper bound of "
"a distribution parameter.")
raise ValueError(message) from e
if (user_bounds.ndim != 2 or user_bounds.shape[1] != 2):
message = ("Each element of `bounds` must be a tuple specifying "
"the lower and upper bounds of a shape parameter")
raise ValueError(message)
if user_bounds.shape[0] < n_shapes:
message = (f"A `bounds` sequence must contain at least {n_shapes} "
"elements: tuples specifying the lower and upper "
f"bounds of all shape parameters {shape_names}.")
raise ValueError(message)
if user_bounds.shape[0] > n_params:
message = ("A `bounds` sequence may not contain more than "
f"{n_params} elements: tuples specifying the lower and "
"upper bounds of distribution parameters "
f"{param_names}.")
raise ValueError(message)
user_bounds_array = np.empty((n_params, 2))
user_bounds_array[n_shapes:] = list(default_bounds.values())
user_bounds_array[:len(user_bounds)] = user_bounds
user_bounds = user_bounds_array
validated_bounds = []
for i in range(n_params):
name = param_info[i].name
user_bound = user_bounds_array[i]
param_domain = param_info[i].domain
integral = param_info[i].integrality
combined = _combine_bounds(name, user_bound, param_domain, integral)
validated_bounds.append(combined)
bounds = np.asarray(validated_bounds)
integrality = [param.integrality for param in param_info]
# guess input validation
if user_guess is None:
guess_array = None
elif isinstance(user_guess, dict):
default_guess = {param.name: np.mean(bound)
for param, bound in zip(param_info, bounds)}
unrecognized = set(user_guess) - set(default_guess)
if unrecognized:
message = ("Guesses provided for the following unrecognized "
f"parameters will be ignored: {unrecognized}")
warnings.warn(message, RuntimeWarning, stacklevel=2)
default_guess.update(user_guess)
message = ("Each element of `guess` must be a scalar "
"guess for a distribution parameter.")
try:
guess_array = np.asarray([default_guess[param.name]
for param in param_info], dtype=float)
except ValueError as e:
raise ValueError(message) from e
else:
message = ("Each element of `guess` must be a scalar "
"guess for a distribution parameter.")
try:
user_guess = np.asarray(user_guess, dtype=float)
except ValueError as e:
raise ValueError(message) from e
if user_guess.ndim != 1:
raise ValueError(message)
if user_guess.shape[0] < n_shapes:
message = (f"A `guess` sequence must contain at least {n_shapes} "
"elements: scalar guesses for the distribution shape "
f"parameters {shape_names}.")
raise ValueError(message)
if user_guess.shape[0] > n_params:
message = ("A `guess` sequence may not contain more than "
f"{n_params} elements: scalar guesses for the "
f"distribution parameters {param_names}.")
raise ValueError(message)
guess_array = np.mean(bounds, axis=1)
guess_array[:len(user_guess)] = user_guess
if guess_array is not None:
guess_rounded = guess_array.copy()
guess_rounded[integrality] = np.round(guess_rounded[integrality])
rounded = np.where(guess_rounded != guess_array)[0]
for i in rounded:
message = (f"Guess for parameter `{param_info[i].name}` "
f"rounded from {guess_array[i]} to {guess_rounded[i]}.")
warnings.warn(message, RuntimeWarning, stacklevel=2)
guess_clipped = np.clip(guess_rounded, bounds[:, 0], bounds[:, 1])
clipped = np.where(guess_clipped != guess_rounded)[0]
for i in clipped:
message = (f"Guess for parameter `{param_info[i].name}` "
f"clipped from {guess_rounded[i]} to "
f"{guess_clipped[i]}.")
warnings.warn(message, RuntimeWarning, stacklevel=2)
guess = guess_clipped
else:
guess = None
# --- Fitting --- #
def nllf(free_params, data=data): # bind data NOW
with np.errstate(invalid='ignore', divide='ignore'):
return dist._penalized_nnlf(free_params, data)
def nlpsf(free_params, data=data): # bind data NOW
with np.errstate(invalid='ignore', divide='ignore'):
return dist._penalized_nlpsf(free_params, data)
methods = {'mle': nllf, 'mse': nlpsf}
objective = methods[method.lower()]
with np.errstate(invalid='ignore', divide='ignore'):
kwds = {}
if bounds is not None:
kwds['bounds'] = bounds
if np.any(integrality):
kwds['integrality'] = integrality
if guess is not None:
kwds['x0'] = guess
res = optimizer(objective, **kwds)
return FitResult(dist, data, discrete, res)
GoodnessOfFitResult = namedtuple('GoodnessOfFitResult',
('fit_result', 'statistic', 'pvalue',
'null_distribution'))
@xp_capabilities(out_of_scope=True)
@_transition_to_rng('random_state')
def goodness_of_fit(dist, data, *, known_params=None, fit_params=None,
guessed_params=None, statistic='ad', n_mc_samples=9999,
rng=None):
r"""
Perform a goodness of fit test comparing data to a distribution family.
Given a distribution family and data, perform a test of the null hypothesis
that the data were drawn from a distribution in that family. Any known
parameters of the distribution may be specified. Remaining parameters of
the distribution will be fit to the data, and the p-value of the test
is computed accordingly. Several statistics for comparing the distribution
to data are available.
Parameters
----------
dist : `scipy.stats.rv_continuous`
The object representing the distribution family under the null
hypothesis.
data : 1D array_like
Finite, uncensored data to be tested.
known_params : dict, optional
A dictionary containing name-value pairs of known distribution
parameters. Monte Carlo samples are randomly drawn from the
null-hypothesized distribution with these values of the parameters.
Before the statistic is evaluated for the observed `data` and each
Monte Carlo sample, only remaining unknown parameters of the
null-hypothesized distribution family are fit to the samples; the
known parameters are held fixed. If all parameters of the distribution
family are known, then the step of fitting the distribution family to
each sample is omitted.
fit_params : dict, optional
A dictionary containing name-value pairs of distribution parameters
that have already been fit to the data, e.g. using `scipy.stats.fit`
or the ``fit`` method of `dist`. Monte Carlo samples are drawn from the
null-hypothesized distribution with these specified values of the
parameter. However, these and all other unknown parameters of the
null-hypothesized distribution family are always fit to the sample,
whether that is the observed `data` or a Monte Carlo sample, before
the statistic is evaluated.
guessed_params : dict, optional
A dictionary containing name-value pairs of distribution parameters
which have been guessed. These parameters are always considered as
free parameters and are fit both to the provided `data` as well as
to the Monte Carlo samples drawn from the null-hypothesized
distribution. The purpose of these `guessed_params` is to be used as
initial values for the numerical fitting procedure.
statistic : {"ad", "ks", "cvm", "filliben"} or callable, optional
The statistic used to compare data to a distribution after fitting
unknown parameters of the distribution family to the data. The
Anderson-Darling ("ad") [1]_, Kolmogorov-Smirnov ("ks") [1]_,
Cramer-von Mises ("cvm") [1]_, and Filliben ("filliben") [7]_
statistics are available. Alternatively, a callable with signature
``(dist, data, axis)`` may be supplied to compute the statistic. Here
``dist`` is a frozen distribution object (potentially with array
parameters), ``data`` is an array of Monte Carlo samples (of
compatible shape), and ``axis`` is the axis of ``data`` along which
the statistic must be computed.
n_mc_samples : int, default: 9999
The number of Monte Carlo samples drawn from the null hypothesized
distribution to form the null distribution of the statistic. The
sample size of each is the same as the given `data`.
rng : `numpy.random.Generator`, optional
Pseudorandom number generator state. When `rng` is None, a new
`numpy.random.Generator` is created using entropy from the
operating system. Types other than `numpy.random.Generator` are
passed to `numpy.random.default_rng` to instantiate a ``Generator``.
Returns
-------
res : GoodnessOfFitResult
An object with the following attributes.
fit_result : `~scipy.stats._result_classes.FitResult`
An object representing the fit of the provided `dist` to `data`.
This object includes the values of distribution family parameters
that fully define the null-hypothesized distribution, that is,
the distribution from which Monte Carlo samples are drawn.
statistic : float
The value of the statistic comparing provided `data` to the
null-hypothesized distribution.
pvalue : float
The proportion of elements in the null distribution with
statistic values at least as extreme as the statistic value of the
provided `data`.
null_distribution : ndarray
The value of the statistic for each Monte Carlo sample
drawn from the null-hypothesized distribution.
Notes
-----
This is a generalized Monte Carlo goodness-of-fit procedure, special cases
of which correspond with various Anderson-Darling tests, Lilliefors' test,
etc. The test is described in [2]_, [3]_, and [4]_ as a parametric
bootstrap test. This is a Monte Carlo test in which parameters that
specify the distribution from which samples are drawn have been estimated
from the data. We describe the test using "Monte Carlo" rather than
"parametric bootstrap" throughout to avoid confusion with the more familiar
nonparametric bootstrap, and describe how the test is performed below.
*Traditional goodness of fit tests*
Traditionally, critical values corresponding with a fixed set of
significance levels are pre-calculated using Monte Carlo methods. Users
perform the test by calculating the value of the test statistic only for
their observed `data` and comparing this value to tabulated critical
values. This practice is not very flexible, as tables are not available for
all distributions and combinations of known and unknown parameter values.
Also, results can be inaccurate when critical values are interpolated from
limited tabulated data to correspond with the user's sample size and
fitted parameter values. To overcome these shortcomings, this function
allows the user to perform the Monte Carlo trials adapted to their
particular data.
*Algorithmic overview*
In brief, this routine executes the following steps:
1. Fit unknown parameters to the given `data`, thereby forming the
"null-hypothesized" distribution, and compute the statistic of
this pair of data and distribution.
2. Draw random samples from this null-hypothesized distribution.
3. Fit the unknown parameters to each random sample.
4. Calculate the statistic between each sample and the distribution that
has been fit to the sample.
5. Compare the value of the statistic corresponding with `data` from (1)
against the values of the statistic corresponding with the random
samples from (4). The p-value is the proportion of samples with a
statistic value greater than or equal to the statistic of the observed
data.
In more detail, the steps are as follows.
First, any unknown parameters of the distribution family specified by
`dist` are fit to the provided `data` using maximum likelihood estimation.
(One exception is the normal distribution with unknown location and scale:
we use the bias-corrected standard deviation ``np.std(data, ddof=1)`` for
the scale as recommended in [1]_.)
These values of the parameters specify a particular member of the
distribution family referred to as the "null-hypothesized distribution",
that is, the distribution from which the data were sampled under the null
hypothesis. The `statistic`, which compares data to a distribution, is
computed between `data` and the null-hypothesized distribution.
Next, many (specifically `n_mc_samples`) new samples, each containing the
same number of observations as `data`, are drawn from the
null-hypothesized distribution. All unknown parameters of the distribution
family `dist` are fit to *each resample*, and the `statistic` is computed
between each sample and its corresponding fitted distribution. These
values of the statistic form the Monte Carlo null distribution (not to be
confused with the "null-hypothesized distribution" above).
The p-value of the test is the proportion of statistic values in the Monte
Carlo null distribution that are at least as extreme as the statistic value
of the provided `data`. More precisely, the p-value is given by
.. math::
p = \frac{b + 1}
{m + 1}
where :math:`b` is the number of statistic values in the Monte Carlo null
distribution that are greater than or equal to the statistic value
calculated for `data`, and :math:`m` is the number of elements in the
Monte Carlo null distribution (`n_mc_samples`). The addition of :math:`1`
to the numerator and denominator can be thought of as including the
value of the statistic corresponding with `data` in the null distribution,
but a more formal explanation is given in [5]_.
*Limitations*
The test can be very slow for some distribution families because unknown
parameters of the distribution family must be fit to each of the Monte
Carlo samples, and for most distributions in SciPy, distribution fitting
performed via numerical optimization.
*Anti-Pattern*
For this reason, it may be tempting
to treat parameters of the distribution pre-fit to `data` (by the user)
as though they were `known_params`, as specification of all parameters of
the distribution precludes the need to fit the distribution to each Monte
Carlo sample. (This is essentially how the original Kilmogorov-Smirnov
test is performed.) Although such a test can provide evidence against the
null hypothesis, the test is conservative in the sense that small p-values
will tend to (greatly) *overestimate* the probability of making a type I
error (that is, rejecting the null hypothesis although it is true), and the
power of the test is low (that is, it is less likely to reject the null
hypothesis even when the null hypothesis is false).
This is because the Monte Carlo samples are less likely to agree with the
null-hypothesized distribution as well as `data`. This tends to increase
the values of the statistic recorded in the null distribution, so that a
larger number of them exceed the value of statistic for `data`, thereby
inflating the p-value.
References
----------
.. [1] M. A. Stephens (1974). "EDF Statistics for Goodness of Fit and
Some Comparisons." Journal of the American Statistical Association,
Vol. 69, pp. 730-737.
.. [2] W. Stute, W. G. Manteiga, and M. P. Quindimil (1993).
"Bootstrap based goodness-of-fit-tests." Metrika 40.1: 243-256.
.. [3] C. Genest, & B Rémillard. (2008). "Validity of the parametric
bootstrap for goodness-of-fit testing in semiparametric models."
Annales de l'IHP Probabilités et statistiques. Vol. 44. No. 6.
.. [4] I. Kojadinovic and J. Yan (2012). "Goodness-of-fit testing based on
a weighted bootstrap: A fast large-sample alternative to the
parametric bootstrap." Canadian Journal of Statistics 40.3: 480-500.
.. [5] B. Phipson and G. K. Smyth (2010). "Permutation P-values Should
Never Be Zero: Calculating Exact P-values When Permutations Are
Randomly Drawn." Statistical Applications in Genetics and Molecular
Biology 9.1.
.. [6] H. W. Lilliefors (1967). "On the Kolmogorov-Smirnov test for
normality with mean and variance unknown." Journal of the American
statistical Association 62.318: 399-402.
.. [7] Filliben, James J. "The probability plot correlation coefficient
test for normality." Technometrics 17.1 (1975): 111-117.
Examples
--------
A well-known test of the null hypothesis that data were drawn from a
given distribution is the Kolmogorov-Smirnov (KS) test, available in SciPy
as `scipy.stats.ks_1samp`. Suppose we wish to test whether the following
data:
>>> import numpy as np
>>> from scipy import stats
>>> rng = np.random.default_rng()
>>> x = stats.uniform.rvs(size=75, random_state=rng)
were sampled from a normal distribution. To perform a KS test, the
empirical distribution function of the observed data will be compared
against the (theoretical) cumulative distribution function of a normal
distribution. Of course, to do this, the normal distribution under the null
hypothesis must be fully specified. This is commonly done by first fitting
the ``loc`` and ``scale`` parameters of the distribution to the observed
data, then performing the test.
>>> loc, scale = np.mean(x), np.std(x, ddof=1)
>>> cdf = stats.norm(loc, scale).cdf
>>> stats.ks_1samp(x, cdf)
KstestResult(statistic=0.1119257570456813,
pvalue=0.2827756409939257,
statistic_location=0.7751845155861765,
statistic_sign=-1)
An advantage of the KS-test is that the p-value - the probability of
obtaining a value of the test statistic under the null hypothesis as
extreme as the value obtained from the observed data - can be calculated
exactly and efficiently. `goodness_of_fit` can only approximate these
results.
>>> known_params = {'loc': loc, 'scale': scale}
>>> res = stats.goodness_of_fit(stats.norm, x, known_params=known_params,
... statistic='ks', rng=rng)
>>> res.statistic, res.pvalue
(0.1119257570456813, 0.2788)
The statistic matches exactly, but the p-value is estimated by forming
a "Monte Carlo null distribution", that is, by explicitly drawing random
samples from `scipy.stats.norm` with the provided parameters and
calculating the stastic for each. The fraction of these statistic values
at least as extreme as ``res.statistic`` approximates the exact p-value
calculated by `scipy.stats.ks_1samp`.
However, in many cases, we would prefer to test only that the data were
sampled from one of *any* member of the normal distribution family, not
specifically from the normal distribution with the location and scale
fitted to the observed sample. In this case, Lilliefors [6]_ argued that
the KS test is far too conservative (that is, the p-value overstates
the actual probability of rejecting a true null hypothesis) and thus lacks
power - the ability to reject the null hypothesis when the null hypothesis
is actually false.
Indeed, our p-value above is approximately 0.28, which is far too large
to reject the null hypothesis at any common significance level.
Consider why this might be. Note that in the KS test above, the statistic
always compares data against the CDF of a normal distribution fitted to the
*observed data*. This tends to reduce the value of the statistic for the
observed data, but it is "unfair" when computing the statistic for other
samples, such as those we randomly draw to form the Monte Carlo null
distribution. It is easy to correct for this: whenever we compute the KS
statistic of a sample, we use the CDF of a normal distribution fitted
to *that sample*. The null distribution in this case has not been
calculated exactly and is tyically approximated using Monte Carlo methods
as described above. This is where `goodness_of_fit` excels.
>>> res = stats.goodness_of_fit(stats.norm, x, statistic='ks',
... rng=rng)
>>> res.statistic, res.pvalue
(0.1119257570456813, 0.0196)
Indeed, this p-value is much smaller, and small enough to (correctly)
reject the null hypothesis at common significance levels, including 5% and
2.5%.
However, the KS statistic is not very sensitive to all deviations from
normality. The original advantage of the KS statistic was the ability
to compute the null distribution theoretically, but a more sensitive
statistic - resulting in a higher test power - can be used now that we can
approximate the null distribution
computationally. The Anderson-Darling statistic [1]_ tends to be more
sensitive, and critical values of the this statistic have been tabulated
for various significance levels and sample sizes using Monte Carlo methods.
>>> res = stats.anderson(x, 'norm')
>>> print(res.statistic)
1.2139573337497467
>>> print(res.critical_values)
[0.555 0.625 0.744 0.864 1.024]
>>> print(res.significance_level)
[15. 10. 5. 2.5 1. ]
Here, the observed value of the statistic exceeds the critical value
corresponding with a 1% significance level. This tells us that the p-value
of the observed data is less than 1%, but what is it? We could interpolate
from these (already-interpolated) values, but `goodness_of_fit` can
estimate it directly.
>>> res = stats.goodness_of_fit(stats.norm, x, statistic='ad',
... rng=rng)
>>> res.statistic, res.pvalue
(1.2139573337497467, 0.0034)
A further advantage is that use of `goodness_of_fit` is not limited to
a particular set of distributions or conditions on which parameters
are known versus which must be estimated from data. Instead,
`goodness_of_fit` can estimate p-values relatively quickly for any
distribution with a sufficiently fast and reliable ``fit`` method. For
instance, here we perform a goodness of fit test using the Cramer-von Mises
statistic against the Rayleigh distribution with known location and unknown
scale.
>>> rng = np.random.default_rng()
>>> x = stats.chi(df=2.2, loc=0, scale=2).rvs(size=1000, random_state=rng)
>>> res = stats.goodness_of_fit(stats.rayleigh, x, statistic='cvm',
... known_params={'loc': 0}, rng=rng)
This executes fairly quickly, but to check the reliability of the ``fit``
method, we should inspect the fit result.
>>> res.fit_result # location is as specified, and scale is reasonable
params: FitParams(loc=0.0, scale=2.1026719844231243)
success: True
message: 'The fit was performed successfully.'
>>> import matplotlib.pyplot as plt # matplotlib must be installed to plot
>>> res.fit_result.plot()
>>> plt.show()
If the distribution is not fit to the observed data as well as possible,
the test may not control the type I error rate, that is, the chance of
rejecting the null hypothesis even when it is true.
We should also look for extreme outliers in the null distribution that
may be caused by unreliable fitting. These do not necessarily invalidate
the result, but they tend to reduce the test's power.
>>> _, ax = plt.subplots()
>>> ax.hist(np.log10(res.null_distribution))
>>> ax.set_xlabel("log10 of CVM statistic under the null hypothesis")
>>> ax.set_ylabel("Frequency")
>>> ax.set_title("Histogram of the Monte Carlo null distribution")
>>> plt.show()
This plot seems reassuring.
If ``fit`` method is working reliably, and if the distribution of the test
statistic is not particularly sensitive to the values of the fitted
parameters, then the p-value provided by `goodness_of_fit` is expected to
be a good approximation.
>>> res.statistic, res.pvalue
(0.2231991510248692, 0.0525)
"""
args = _gof_iv(dist, data, known_params, fit_params, guessed_params,
statistic, n_mc_samples, rng)
(dist, data, fixed_nhd_params, fixed_rfd_params, guessed_nhd_params,
guessed_rfd_params, statistic, n_mc_samples_int, rng) = args
# Fit null hypothesis distribution to data
nhd_fit_fun = _get_fit_fun(dist, data, guessed_nhd_params,
fixed_nhd_params)
nhd_vals = nhd_fit_fun(data)
nhd_dist = dist(*nhd_vals)
def rvs(size):
return nhd_dist.rvs(size=size, random_state=rng)
# Define statistic
fit_fun = _get_fit_fun(dist, data, guessed_rfd_params, fixed_rfd_params)
if callable(statistic):
compare_fun = statistic
else:
compare_fun = _compare_dict[statistic]
alternative = getattr(compare_fun, 'alternative', 'greater')
def statistic_fun(data, axis):
# Make things simple by always working along the last axis.
data = np.moveaxis(data, axis, -1)
rfd_vals = fit_fun(data)
rfd_dist = dist(*rfd_vals)
return compare_fun(rfd_dist, data, axis=-1)
res = stats.monte_carlo_test(data, rvs, statistic_fun, vectorized=True,
n_resamples=n_mc_samples, axis=-1,
alternative=alternative)
opt_res = optimize.OptimizeResult()
opt_res.success = True
opt_res.message = "The fit was performed successfully."
opt_res.x = nhd_vals
# Only continuous distributions for now, hence discrete=False
# There's no fundamental limitation; it's just that we're not using
# stats.fit, discrete distributions don't have `fit` method, and
# we haven't written any vectorized fit functions for a discrete
# distribution yet.
return GoodnessOfFitResult(FitResult(dist, data, False, opt_res),
res.statistic, res.pvalue,
res.null_distribution)
def _get_fit_fun(dist, data, guessed_params, fixed_params):
shape_names = [] if dist.shapes is None else dist.shapes.split(", ")
param_names = shape_names + ['loc', 'scale']
fparam_names = ['f'+name for name in param_names]
all_fixed = not set(fparam_names).difference(fixed_params)
guessed_shapes = [guessed_params.pop(x, None)
for x in shape_names if x in guessed_params]
if all_fixed:
def fit_fun(data):
return [fixed_params[name] for name in fparam_names]
# Define statistic, including fitting distribution to data
elif dist in _fit_funs:
def fit_fun(data):
params = _fit_funs[dist](data, **fixed_params)
params = np.asarray(np.broadcast_arrays(*params))
if params.ndim > 1:
params = params[..., np.newaxis]
return params
else:
def fit_fun_1d(data):
return dist.fit(data, *guessed_shapes, **guessed_params,
**fixed_params)
def fit_fun(data):
params = np.apply_along_axis(fit_fun_1d, axis=-1, arr=data)
if params.ndim > 1:
params = params.T[..., np.newaxis]
return params
return fit_fun
# Vectorized fitting functions. These are to accept ND `data` in which each
# row (slice along last axis) is a sample to fit and scalar fixed parameters.
# They return a tuple of shape parameter arrays, each of shape data.shape[:-1].
def _fit_norm(data, floc=None, fscale=None):
loc = floc
scale = fscale
if loc is None and scale is None:
loc = np.mean(data, axis=-1)
scale = np.std(data, ddof=1, axis=-1)
elif loc is None:
loc = np.mean(data, axis=-1)
elif scale is None:
scale = np.sqrt(((data - loc)**2).mean(axis=-1))
return loc, scale
_fit_funs = {stats.norm: _fit_norm} # type: ignore[attr-defined]
# Vectorized goodness of fit statistic functions. These accept a frozen
# distribution object and `data` in which each row (slice along last axis) is
# a sample.
def _anderson_darling(dist, data, axis):
x = np.sort(data, axis=-1)
n = data.shape[-1]
i = np.arange(1, n+1)
Si = (2*i - 1)/n * (dist.logcdf(x) + dist.logsf(x[..., ::-1]))
S = np.sum(Si, axis=-1)
return -n - S
def _compute_dplus(cdfvals): # adapted from _stats_py before gh-17062
n = cdfvals.shape[-1]
return (np.arange(1.0, n + 1) / n - cdfvals).max(axis=-1)
def _compute_dminus(cdfvals):
n = cdfvals.shape[-1]
return (cdfvals - np.arange(0.0, n)/n).max(axis=-1)
def _kolmogorov_smirnov(dist, data, axis=-1):
x = np.sort(data, axis=axis)
cdfvals = dist.cdf(x)
cdfvals = np.moveaxis(cdfvals, axis, -1)
Dplus = _compute_dplus(cdfvals) # always works along last axis
Dminus = _compute_dminus(cdfvals)
return np.maximum(Dplus, Dminus)
def _corr(X, M):
# Correlation coefficient r, simplified and vectorized as we need it.
# See [7] Equation (2). Lemma 1/2 are only for distributions symmetric
# about 0.
Xm = X.mean(axis=-1, keepdims=True)
Mm = M.mean(axis=-1, keepdims=True)
num = np.sum((X - Xm) * (M - Mm), axis=-1)
den = np.sqrt(np.sum((X - Xm)**2, axis=-1) * np.sum((M - Mm)**2, axis=-1))
return num/den
def _filliben(dist, data, axis):
# [7] Section 8 # 1
X = np.sort(data, axis=-1)
# [7] Section 8 # 2
n = data.shape[-1]
k = np.arange(1, n+1)
# Filliben used an approximation for the uniform distribution order
# statistic medians.
# m = (k - .3175)/(n + 0.365)
# m[-1] = 0.5**(1/n)
# m[0] = 1 - m[-1]
# We can just as easily use the (theoretically) exact values. See e.g.
# https://en.wikipedia.org/wiki/Order_statistic
# "Order statistics sampled from a uniform distribution"
m = stats.beta(k, n + 1 - k).median()
# [7] Section 8 # 3
M = dist.ppf(m)
# [7] Section 8 # 4
return _corr(X, M)
_filliben.alternative = 'less' # type: ignore[attr-defined]
def _cramer_von_mises(dist, data, axis):
x = np.sort(data, axis=-1)
n = data.shape[-1]
cdfvals = dist.cdf(x)
u = (2*np.arange(1, n+1) - 1)/(2*n)
w = 1 / (12*n) + np.sum((u - cdfvals)**2, axis=-1)
return w
_compare_dict = {"ad": _anderson_darling, "ks": _kolmogorov_smirnov,
"cvm": _cramer_von_mises, "filliben": _filliben}
def _gof_iv(dist, data, known_params, fit_params, guessed_params, statistic,
n_mc_samples, rng):
if not isinstance(dist, stats.rv_continuous):
message = ("`dist` must be a (non-frozen) instance of "
"`stats.rv_continuous`.")
raise TypeError(message)
data = np.asarray(data, dtype=float)
if not data.ndim == 1:
message = "`data` must be a one-dimensional array of numbers."
raise ValueError(message)
# Leave validation of these key/value pairs to the `fit` method,
# but collect these into dictionaries that will be used
known_params = known_params or dict()
fit_params = fit_params or dict()
guessed_params = guessed_params or dict()
known_params_f = {("f"+key): val for key, val in known_params.items()}
fit_params_f = {("f"+key): val for key, val in fit_params.items()}
# These are the values of parameters of the null distribution family
# with which resamples are drawn
fixed_nhd_params = known_params_f.copy()
fixed_nhd_params.update(fit_params_f)
# These are fixed when fitting the distribution family to resamples
fixed_rfd_params = known_params_f.copy()
# These are used as guesses when fitting the distribution family to
# the original data
guessed_nhd_params = guessed_params.copy()
# These are used as guesses when fitting the distribution family to
# resamples
guessed_rfd_params = fit_params.copy()
guessed_rfd_params.update(guessed_params)
if not callable(statistic):
statistic = statistic.lower()
statistics = {'ad', 'ks', 'cvm', 'filliben'}
if statistic not in statistics:
message = f"`statistic` must be one of {statistics}."
raise ValueError(message)
n_mc_samples_int = int(n_mc_samples)
if n_mc_samples_int != n_mc_samples:
message = "`n_mc_samples` must be an integer."
raise TypeError(message)
rng = check_random_state(rng)
return (dist, data, fixed_nhd_params, fixed_rfd_params, guessed_nhd_params,
guessed_rfd_params, statistic, n_mc_samples_int, rng)
| FitResult |
python | realpython__materials | typer-cli-python/source_code_step_5/rptodo/rptodo.py | {
"start": 284,
"end": 1238
} | class ____:
def __init__(self, db_path: Path) -> None:
self._db_handler = DatabaseHandler(db_path)
def add(self, description: List[str], priority: int = 2) -> CurrentTodo:
"""Add a new to-do to the database."""
description_text = " ".join(description)
if not description_text.endswith("."):
description_text += "."
todo = {
"Description": description_text,
"Priority": priority,
"Done": False,
}
read = self._db_handler.read_todos()
if read.error == DB_READ_ERROR:
return CurrentTodo(todo, read.error)
read.todo_list.append(todo)
write = self._db_handler.write_todos(read.todo_list)
return CurrentTodo(todo, write.error)
def get_todo_list(self) -> List[Dict[str, Any]]:
"""Return the current to-do list."""
read = self._db_handler.read_todos()
return read.todo_list
| Todoer |
python | pytest-dev__pytest | testing/test_collection.py | {
"start": 27576,
"end": 32263
} | class ____:
def test_check_collect_hashes(self, pytester: Pytester) -> None:
p = pytester.makepyfile(
"""
def test_1():
pass
def test_2():
pass
"""
)
shutil.copy(p, p.parent / (p.stem + "2" + ".py"))
items, _reprec = pytester.inline_genitems(p.parent)
assert len(items) == 4
for numi, i in enumerate(items):
for numj, j in enumerate(items):
if numj != numi:
assert hash(i) != hash(j)
assert i != j
def test_example_items1(self, pytester: Pytester) -> None:
p = pytester.makepyfile(
"""
import pytest
def testone():
pass
class TestX(object):
def testmethod_one(self):
pass
class TestY(TestX):
@pytest.mark.parametrize("arg0", [".["])
def testmethod_two(self, arg0):
pass
"""
)
items, _reprec = pytester.inline_genitems(p)
assert len(items) == 4
assert items[0].name == "testone"
assert items[1].name == "testmethod_one"
assert items[2].name == "testmethod_one"
assert items[3].name == "testmethod_two[.[]"
# let's also test getmodpath here
assert items[0].getmodpath() == "testone" # type: ignore[attr-defined]
assert items[1].getmodpath() == "TestX.testmethod_one" # type: ignore[attr-defined]
assert items[2].getmodpath() == "TestY.testmethod_one" # type: ignore[attr-defined]
# PR #6202: Fix incorrect result of getmodpath method. (Resolves issue #6189)
assert items[3].getmodpath() == "TestY.testmethod_two[.[]" # type: ignore[attr-defined]
s = items[0].getmodpath(stopatmodule=False) # type: ignore[attr-defined]
assert s.endswith("test_example_items1.testone")
print(s)
def test_classmethod_is_discovered(self, pytester: Pytester) -> None:
"""Test that classmethods are discovered"""
p = pytester.makepyfile(
"""
class TestCase:
@classmethod
def test_classmethod(cls) -> None:
pass
"""
)
items, _reprec = pytester.inline_genitems(p)
ids = [x.getmodpath() for x in items] # type: ignore[attr-defined]
assert ids == ["TestCase.test_classmethod"]
def test_class_and_functions_discovery_using_glob(self, pytester: Pytester) -> None:
"""Test that Python_classes and Python_functions config options work
as prefixes and glob-like patterns (#600)."""
pytester.makeini(
"""
[pytest]
python_classes = *Suite Test
python_functions = *_test test
"""
)
p = pytester.makepyfile(
"""
class MyTestSuite(object):
def x_test(self):
pass
class TestCase(object):
def test_y(self):
pass
"""
)
items, _reprec = pytester.inline_genitems(p)
ids = [x.getmodpath() for x in items] # type: ignore[attr-defined]
assert ids == ["MyTestSuite.x_test", "TestCase.test_y"]
def test_matchnodes_two_collections_same_file(pytester: Pytester) -> None:
pytester.makeconftest(
"""
import pytest
def pytest_configure(config):
config.pluginmanager.register(Plugin2())
class Plugin2(object):
def pytest_collect_file(self, file_path, parent):
if file_path.suffix == ".abc":
return MyFile2.from_parent(path=file_path, parent=parent)
def pytest_collect_file(file_path, parent):
if file_path.suffix == ".abc":
return MyFile1.from_parent(path=file_path, parent=parent)
class MyFile1(pytest.File):
def collect(self):
yield Item1.from_parent(name="item1", parent=self)
class MyFile2(pytest.File):
def collect(self):
yield Item2.from_parent(name="item2", parent=self)
class Item1(pytest.Item):
def runtest(self):
pass
class Item2(pytest.Item):
def runtest(self):
pass
"""
)
p = pytester.makefile(".abc", "")
result = pytester.runpytest()
assert result.ret == 0
result.stdout.fnmatch_lines(["*2 passed*"])
res = pytester.runpytest(f"{p.name}::item2")
res.stdout.fnmatch_lines(["*1 passed*"])
| Test_genitems |
python | mlflow__mlflow | mlflow/store/artifact/databricks_artifact_repo_resources.py | {
"start": 6728,
"end": 9005
} | class ____(_Resource):
def get_credentials(
self,
cred_type: _CredentialType,
paths: list[str] | None = None,
page_token: str | None = None,
) -> tuple[list[ArtifactCredentialInfo], str | None]:
api = GetCredentialsForRead if cred_type == _CredentialType.READ else GetCredentialsForWrite
json_body = api(run_id=self.id, path=paths, page_token=page_token)
response = self.call_endpoint(
DatabricksMlflowArtifactsService, api, message_to_json(json_body)
)
credential_infos = [
ArtifactCredentialInfo(
signed_uri=c.signed_uri,
type=c.type,
headers=[HttpHeader(name=h.name, value=h.value) for h in c.headers],
)
for c in response.credential_infos
]
return credential_infos, response.next_page_token
def get_artifact_root(self) -> str:
json_body = message_to_json(GetRun(run_id=self.id))
run_response = self.call_endpoint(MlflowService, GetRun, json_body)
return run_response.run.info.artifact_uri
def _list_artifacts(
self,
path: str | None = None,
page_token: str | None = None,
) -> ListArtifactsPage:
path = posixpath.join(self.relative_path, path) if path else self.relative_path
json_body = message_to_json(
ListArtifacts(run_id=self.id, path=path, page_token=page_token),
)
response = self.call_endpoint(MlflowService, ListArtifacts, json_body)
files = response.files
# If `path` is a file, ListArtifacts returns a single list element with the
# same name as `path`. The list_artifacts API expects us to return an empty list in this
# case, so we do so here.
if len(files) == 1 and files[0].path == path and not files[0].is_dir:
return ListArtifactsPage.empty()
return ListArtifactsPage(
files=[
FileInfo(
posixpath.relpath(f.path, self.relative_path),
f.is_dir,
None if f.is_dir else f.file_size,
)
for f in files
],
next_page_token=response.next_page_token,
)
| _Run |
python | kamyu104__LeetCode-Solutions | Python/strong-password-checker.py | {
"start": 29,
"end": 1619
} | class ____(object):
def strongPasswordChecker(self, s):
"""
:type s: str
:rtype: int
"""
missing_type_cnt = 3
if any('a' <= c <= 'z' for c in s):
missing_type_cnt -= 1
if any('A' <= c <= 'Z' for c in s):
missing_type_cnt -= 1
if any(c.isdigit() for c in s):
missing_type_cnt -= 1
total_change_cnt = 0
one_change_cnt, two_change_cnt, three_change_cnt = 0, 0, 0
i = 2
while i < len(s):
if s[i] == s[i-1] == s[i-2]:
length = 2
while i < len(s) and s[i] == s[i-1]:
length += 1
i += 1
total_change_cnt += length / 3
if length % 3 == 0:
one_change_cnt += 1
elif length % 3 == 1:
two_change_cnt += 1
else:
three_change_cnt += 1
else:
i += 1
if len(s) < 6:
return max(missing_type_cnt, 6 - len(s))
elif len(s) <= 20:
return max(missing_type_cnt, total_change_cnt)
else:
delete_cnt = len(s) - 20
total_change_cnt -= min(delete_cnt, one_change_cnt * 1) / 1
total_change_cnt -= min(max(delete_cnt - one_change_cnt, 0), two_change_cnt * 2) / 2
total_change_cnt -= min(max(delete_cnt - one_change_cnt - 2 * two_change_cnt, 0), three_change_cnt * 3) / 3
return delete_cnt + max(missing_type_cnt, total_change_cnt)
| Solution |
python | ray-project__ray | python/ray/data/_internal/datasource/tfrecords_datasource.py | {
"start": 15845,
"end": 16653
} | class ____(AggregateFn):
def __init__(self, columns: List[str]):
self._columns = columns
super().__init__(
init=self._init,
merge=self._merge,
accumulate_row=self._accumulate_row,
finalize=lambda a: a,
name="max_list_size",
)
def _init(self, k: str):
return {col: 0 for col in self._columns}
def _merge(self, acc1: Dict[str, int], acc2: Dict[str, int]):
merged = {}
for col in self._columns:
merged[col] = max(acc1[col], acc2[col])
return merged
def _accumulate_row(self, acc: Dict[str, int], row: "pd.Series"):
for k in row:
value = row[k]
if value:
acc[k] = max(len(value), acc[k])
return acc
| _MaxListSize |
python | joke2k__faker | tests/providers/test_ssn.py | {
"start": 46877,
"end": 47656
} | class ____(unittest.TestCase):
def setUp(self):
self.fake = Faker("uk_Ua")
Faker.seed(0)
self.provider = uk_Provider
def test_ssn_len(self):
assert len(self.fake.ssn()) == 10
def test_start_ssn(self):
assert self.fake.ssn("21-06-1994")[:5] == "34505"
def test_ssn_gender(self):
m = self.fake.ssn(gender="M")
w = self.fake.ssn(gender="F")
assert int(m[8]) % 2 != 0, "Must be odd for men"
assert int(w[8]) % 2 == 0, "Must be even for women"
def test_incorrect_birthday(self):
with pytest.raises(ValueError):
self.fake.ssn(birthday="1994-06-01")
def test_incorrect_gender(self):
with pytest.raises(ValueError):
self.fake.ssn(gender="f")
| TestUkUA |
python | getsentry__sentry | src/sentry/testutils/cases.py | {
"start": 88776,
"end": 90063
} | class ____(AcceptanceTestCase, SnubaTestCase):
def setUp(self):
self.now = datetime.now(UTC)
super().setUp()
self.drop_replays()
patcher = mock.patch("django.utils.timezone.now", return_value=self.now)
patcher.start()
self.addCleanup(patcher.stop)
def drop_replays(self):
assert requests.post(settings.SENTRY_SNUBA + "/tests/replays/drop").status_code == 200
def store_replays(self, replays):
assert (
len(replays) >= 2
), "You need to store at least 2 replay events for the replay to be considered valid"
response = requests.post(
settings.SENTRY_SNUBA + "/tests/entities/replays/insert", json=replays
)
assert response.status_code == 200
def store_replay_segments(
self,
replay_id: str,
project_id: int,
segment_id: int,
segment,
) -> None:
f = File.objects.create(name="rr:{segment_id}", type="replay.recording")
f.putfile(BytesIO(compress(dumps_htmlsafe(segment).encode())))
ReplayRecordingSegment.objects.create(
replay_id=replay_id,
project_id=project_id,
segment_id=segment_id,
file_id=f.id,
)
| ReplaysAcceptanceTestCase |
python | tensorflow__tensorflow | tensorflow/python/autograph/tests/datasets_test.py | {
"start": 3219,
"end": 5931
} | class ____(reference_test_base.TestCase):
def setUp(self):
super(ReferenceTest, self).setUp()
self.ds = tf.data.Dataset.range(7)
def test_dataset_no_vars_loop(self):
self.assertFunctionMatchesEager(dataset_no_vars_loop, self.ds)
def test_iterator_no_vars_loop(self):
self.assertFunctionMatchesEager(iterator_no_vars_loop, self.ds)
def test_dataset_single_var_loop(self):
self.assertFunctionMatchesEager(dataset_single_var_loop, self.ds)
def test_iterator_single_var_loop(self):
self.assertFunctionMatchesEager(iterator_single_var_loop, self.ds)
def test_dataset_two_vars_loop(self):
self.assertFunctionMatchesEager(dataset_two_vars_loop, self.ds)
def test_iterator_two_vars_loop(self):
self.assertFunctionMatchesEager(iterator_two_vars_loop, self.ds)
def test_dataset_loop_with_break(self):
self.assertFunctionMatchesEager(dataset_loop_with_break, self.ds)
def test_iterator_loop_with_break(self):
self.assertFunctionMatchesEager(iterator_loop_with_break, self.ds)
def test_dataset_loop_with_return_raises(self):
# This is for the same reason why returns in loops aren't allowed.
# TODO(mdan): This might be resolved by unrolling the loop once.
with self.assertRaisesRegex(
NotImplementedError,
'a return statement cannot be placed inside this TensorFlow loop'):
tf.function(dataset_loop_with_return)(self.ds)
def test_iterator_loop_with_return_raises(self):
# This is for the same reason why returns in loops aren't allowed.
# TODO(mdan): This might be resolved by unrolling the loop once.
with self.assertRaisesRegex(
NotImplementedError,
'a return statement cannot be placed inside this TensorFlow loop'):
tf.function(iterator_loop_with_return)(self.ds)
def test_iterator_next(self):
self.assertFunctionMatchesEager(iterator_next, self.ds)
def test_iterator_next_multiple_calls(self):
self.assertFunctionMatchesEager(iterator_next_multiple_calls, self.ds)
def test_iterator_next_in_loop(self):
self.assertFunctionMatchesEager(iterator_next_in_loop, self.ds, 7)
def test_iterator_next_stopping(self):
# Graph ops raise OutOfRangeError, but eager ops raise StopIteration
with self.assertRaises(tf.errors.OutOfRangeError):
tf.function(iterator_next_stopping)(self.ds, tf.constant(True))
def test_iterator_next_with_catching_stop_iteration(self):
# Graph ops raise OutOfRangeError, but eager ops raise StopIteration
with self.assertRaises(tf.errors.OutOfRangeError):
tf.function(iterator_next_with_catching_stop_iteration)(
self.ds, tf.constant(True))
if __name__ == '__main__':
tf.test.main()
| ReferenceTest |
python | pypa__setuptools | setuptools/tests/test_wheel.py | {
"start": 4596,
"end": 18721
} | class ____:
def __init__(self, id, **kwargs) -> None:
self._id = id
self._fields = kwargs
def __repr__(self) -> str:
return f'{self._id}(**{self._fields!r})'
# Using Any to avoid possible type union issues later in test
# making a TypedDict is not worth in a test and anonymous/inline TypedDict are experimental
# https://github.com/python/mypy/issues/9884
WHEEL_INSTALL_TESTS: tuple[dict[str, Any], ...] = (
dict(
id='basic',
file_defs={'foo': {'__init__.py': ''}},
setup_kwargs=dict(
packages=['foo'],
),
install_tree=flatten_tree({
'foo-1.0-py{py_version}.egg': {
'EGG-INFO': ['PKG-INFO', 'RECORD', 'WHEEL', 'top_level.txt'],
'foo': ['__init__.py'],
}
}),
),
dict(
id='utf-8',
setup_kwargs=dict(
description='Description accentuée',
),
),
dict(
id='data',
file_defs={
'data.txt': DALS(
"""
Some data...
"""
),
},
setup_kwargs=dict(
data_files=[('data_dir', ['data.txt'])],
),
install_tree=flatten_tree({
'foo-1.0-py{py_version}.egg': {
'EGG-INFO': ['PKG-INFO', 'RECORD', 'WHEEL', 'top_level.txt'],
'data_dir': ['data.txt'],
}
}),
),
dict(
id='extension',
file_defs={
'extension.c': DALS(
"""
#include "Python.h"
#if PY_MAJOR_VERSION >= 3
static struct PyModuleDef moduledef = {
PyModuleDef_HEAD_INIT,
"extension",
NULL,
0,
NULL,
NULL,
NULL,
NULL,
NULL
};
#define INITERROR return NULL
PyMODINIT_FUNC PyInit_extension(void)
#else
#define INITERROR return
void initextension(void)
#endif
{
#if PY_MAJOR_VERSION >= 3
PyObject *module = PyModule_Create(&moduledef);
#else
PyObject *module = Py_InitModule("extension", NULL);
#endif
if (module == NULL)
INITERROR;
#if PY_MAJOR_VERSION >= 3
return module;
#endif
}
"""
),
},
setup_kwargs=dict(
ext_modules=[
Record(
'setuptools.Extension', name='extension', sources=['extension.c']
)
],
),
install_tree=flatten_tree({
'foo-1.0-py{py_version}-{platform}.egg': [
'extension{shlib_ext}',
{
'EGG-INFO': [
'PKG-INFO',
'RECORD',
'WHEEL',
'top_level.txt',
]
},
]
}),
),
dict(
id='header',
file_defs={
'header.h': DALS(
"""
"""
),
},
setup_kwargs=dict(
headers=['header.h'],
),
install_tree=flatten_tree({
'foo-1.0-py{py_version}.egg': [
'header.h',
{
'EGG-INFO': [
'PKG-INFO',
'RECORD',
'WHEEL',
'top_level.txt',
]
},
]
}),
),
dict(
id='script',
file_defs={
'script.py': DALS(
"""
#/usr/bin/python
print('hello world!')
"""
),
'script.sh': DALS(
"""
#/bin/sh
echo 'hello world!'
"""
),
},
setup_kwargs=dict(
scripts=['script.py', 'script.sh'],
),
install_tree=flatten_tree({
'foo-1.0-py{py_version}.egg': {
'EGG-INFO': [
'PKG-INFO',
'RECORD',
'WHEEL',
'top_level.txt',
{'scripts': ['script.py', 'script.sh']},
]
}
}),
),
dict(
id='requires1',
install_requires='foobar==2.0',
install_tree=flatten_tree({
'foo-1.0-py{py_version}.egg': {
'EGG-INFO': [
'PKG-INFO',
'RECORD',
'WHEEL',
'requires.txt',
'top_level.txt',
]
}
}),
requires_txt=DALS(
"""
foobar==2.0
"""
),
),
dict(
id='requires2',
install_requires=f"""
bar
foo<=2.0; {sys.platform!r} in sys_platform
""",
requires_txt=DALS(
"""
bar
foo<=2.0
"""
),
),
dict(
id='requires3',
install_requires=f"""
bar; {sys.platform!r} != sys_platform
""",
),
dict(
id='requires4',
install_requires="""
foo
""",
extras_require={
'extra': 'foobar>3',
},
requires_txt=DALS(
"""
foo
[extra]
foobar>3
"""
),
),
dict(
id='requires5',
extras_require={
'extra': f'foobar; {sys.platform!r} != sys_platform',
},
requires_txt='\n'
+ DALS(
"""
[extra]
"""
),
),
dict(
id='requires_ensure_order',
install_requires="""
foo
bar
baz
qux
""",
extras_require={
'extra': """
foobar>3
barbaz>4
bazqux>5
quxzap>6
""",
},
requires_txt=DALS(
"""
foo
bar
baz
qux
[extra]
foobar>3
barbaz>4
bazqux>5
quxzap>6
"""
),
),
dict(
id='namespace_package',
file_defs={
'foo': {
'bar': {'__init__.py': ''},
},
},
setup_kwargs=dict(
namespace_packages=['foo'],
packages=['foo.bar'],
),
install_tree=flatten_tree({
'foo-1.0-py{py_version}.egg': [
'foo-1.0-py{py_version}-nspkg.pth',
{
'EGG-INFO': [
'PKG-INFO',
'RECORD',
'WHEEL',
'namespace_packages.txt',
'top_level.txt',
]
},
{
'foo': [
'__init__.py',
{'bar': ['__init__.py']},
]
},
]
}),
),
dict(
id='empty_namespace_package',
file_defs={
'foobar': {
'__init__.py': (
"__import__('pkg_resources').declare_namespace(__name__)"
)
},
},
setup_kwargs=dict(
namespace_packages=['foobar'],
packages=['foobar'],
),
install_tree=flatten_tree({
'foo-1.0-py{py_version}.egg': [
'foo-1.0-py{py_version}-nspkg.pth',
{
'EGG-INFO': [
'PKG-INFO',
'RECORD',
'WHEEL',
'namespace_packages.txt',
'top_level.txt',
]
},
{
'foobar': [
'__init__.py',
]
},
]
}),
),
dict(
id='data_in_package',
file_defs={
'foo': {
'__init__.py': '',
'data_dir': {
'data.txt': DALS(
"""
Some data...
"""
),
},
}
},
setup_kwargs=dict(
packages=['foo'],
data_files=[('foo/data_dir', ['foo/data_dir/data.txt'])],
),
install_tree=flatten_tree({
'foo-1.0-py{py_version}.egg': {
'EGG-INFO': [
'PKG-INFO',
'RECORD',
'WHEEL',
'top_level.txt',
],
'foo': [
'__init__.py',
{
'data_dir': [
'data.txt',
]
},
],
}
}),
),
)
@pytest.mark.parametrize(
'params',
WHEEL_INSTALL_TESTS,
ids=[params['id'] for params in WHEEL_INSTALL_TESTS],
)
def test_wheel_install(params):
project_name = params.get('name', 'foo')
version = params.get('version', '1.0')
install_requires = params.get('install_requires', [])
extras_require = params.get('extras_require', {})
requires_txt = params.get('requires_txt', None)
install_tree = params.get('install_tree')
file_defs = params.get('file_defs', {})
setup_kwargs = params.get('setup_kwargs', {})
with (
build_wheel(
name=project_name,
version=version,
install_requires=install_requires,
extras_require=extras_require,
extra_file_defs=file_defs,
**setup_kwargs,
) as filename,
tempdir() as install_dir,
):
_check_wheel_install(
filename, install_dir, install_tree, project_name, version, requires_txt
)
def test_wheel_no_dist_dir():
project_name = 'nodistinfo'
version = '1.0'
wheel_name = f'{project_name}-{version}-py2.py3-none-any.whl'
with tempdir() as source_dir:
wheel_path = os.path.join(source_dir, wheel_name)
# create an empty zip file
zipfile.ZipFile(wheel_path, 'w').close()
with tempdir() as install_dir:
with pytest.raises(ValueError):
_check_wheel_install(
wheel_path, install_dir, None, project_name, version, None
)
def test_wheel_is_compatible(monkeypatch):
def sys_tags():
return {
(t.interpreter, t.abi, t.platform)
for t in parse_tag('cp36-cp36m-manylinux1_x86_64')
}
monkeypatch.setattr('setuptools.wheel._get_supported_tags', sys_tags)
assert Wheel('onnxruntime-0.1.2-cp36-cp36m-manylinux1_x86_64.whl').is_compatible()
def test_wheel_mode():
@contextlib.contextmanager
def build_wheel(extra_file_defs=None, **kwargs):
file_defs = {
'setup.py': (
DALS(
"""
# -*- coding: utf-8 -*-
from setuptools import setup
import setuptools
setup(**%r)
"""
)
% kwargs
).encode('utf-8'),
}
if extra_file_defs:
file_defs.update(extra_file_defs)
with tempdir() as source_dir:
path.build(file_defs, source_dir)
runsh = pathlib.Path(source_dir) / "script.sh"
os.chmod(runsh, 0o777)
subprocess.check_call(
(sys.executable, 'setup.py', '-q', 'bdist_wheel'), cwd=source_dir
)
yield glob.glob(os.path.join(source_dir, 'dist', '*.whl'))[0]
params = dict(
id='script',
file_defs={
'script.py': DALS(
"""
#/usr/bin/python
print('hello world!')
"""
),
'script.sh': DALS(
"""
#/bin/sh
echo 'hello world!'
"""
),
},
setup_kwargs=dict(
scripts=['script.py', 'script.sh'],
),
install_tree=flatten_tree({
'foo-1.0-py{py_version}.egg': {
'EGG-INFO': [
'PKG-INFO',
'RECORD',
'WHEEL',
'top_level.txt',
{'scripts': ['script.py', 'script.sh']},
]
}
}),
)
project_name = params.get('name', 'foo')
version = params.get('version', '1.0')
install_tree = params.get('install_tree')
file_defs = params.get('file_defs', {})
setup_kwargs = params.get('setup_kwargs', {})
with (
build_wheel(
name=project_name,
version=version,
install_requires=[],
extras_require={},
extra_file_defs=file_defs,
**setup_kwargs,
) as filename,
tempdir() as install_dir,
):
_check_wheel_install(
filename, install_dir, install_tree, project_name, version, None
)
w = Wheel(filename)
base = pathlib.Path(install_dir) / w.egg_name()
script_sh = base / "EGG-INFO" / "scripts" / "script.sh"
assert script_sh.exists()
if sys.platform != 'win32':
# Editable file mode has no effect on Windows
assert oct(stat.S_IMODE(script_sh.stat().st_mode)) == "0o777"
| Record |
python | getsentry__sentry | tests/acceptance/test_create_project.py | {
"start": 374,
"end": 2885
} | class ____(AcceptanceTestCase):
def setUp(self) -> None:
super().setUp()
self.user = self.create_user("foo@example.com")
self.org = self.create_organization(name="Rowdy Tiger", owner=self.user)
self.login_as(self.user)
self.path = f"/organizations/{self.org.slug}/projects/new/"
def load_project_creation_page(self) -> None:
self.browser.get(self.path)
self.browser.wait_until('[aria-label="Create Project"]')
def test_no_teams(self) -> None:
self.load_project_creation_page()
self.browser.click(None, "//*[text()='Select a Team']")
self.browser.click('[data-test-id="create-team-option"]')
self.browser.wait_until("[role='dialog']")
input = self.browser.element('input[name="slug"]')
input.send_keys("new-team")
self.browser.element("[role='dialog'] form").submit()
self.browser.wait_until(xpath='//div[text()="#new-team"]')
def test_select_correct_platform(self) -> None:
self.create_team(organization=self.org, name="team three")
self.load_project_creation_page()
self.browser.click("[data-test-id='platform-javascript-react']")
self.browser.click('[data-test-id="create-project"]')
self.browser.wait_until(xpath="//h2[text()='Configure React SDK']")
def test_project_deletion_on_going_back(self) -> None:
self.create_team(organization=self.org, name="team three", members=[self.user])
self.load_project_creation_page()
self.browser.click("[data-test-id='platform-php-laravel']")
self.browser.click('[data-test-id="create-project"]')
self.browser.wait_until(xpath="//h2[text()='Configure Laravel SDK']")
project1 = Project.objects.get(organization=self.org, slug="php-laravel")
self.browser.click('[aria-label="Back to Platform Selection"]')
self.browser.click("[data-test-id='platform-javascript-nextjs']")
self.browser.click('[data-test-id="create-project"]')
self.browser.wait_until(xpath="//h2[text()='Configure Next.js SDK']")
project2 = Project.objects.get(organization=self.org, slug="javascript-nextjs")
self.browser.back()
self.browser.get("/organizations/%s/projects/" % self.org.slug)
self.browser.wait_until(xpath='//h1[text()="Remain Calm"]')
assert_existing_projects_status(
self.org, active_project_ids=[], deleted_project_ids=[project1.id, project2.id]
)
| CreateProjectTest |
python | qdrant__qdrant-client | qdrant_client/embed/schema_parser.py | {
"start": 746,
"end": 11846
} | class ____:
"""Model schema parser. Parses json schemas to retrieve paths to objects requiring inference.
The parser is stateful, it accumulates the results of parsing in its internal structures.
Attributes:
_defs: definitions extracted from json schemas
_recursive_refs: set of recursive refs found in the processed schemas, e.g.:
{"Filter", "Prefetch"}
_excluded_recursive_refs: predefined time-consuming recursive refs which don't have inference objects, e.g.:
{"Filter"}
_included_recursive_refs: set of recursive refs which have inference objects, e.g.:
{"Prefetch"}
_cache: cache of string paths for models containing objects for inference, e.g.:
{"Prefetch": ['prefetch.query', 'prefetch.query.context.negative', ...]}
path_cache: cache of FieldPath objects for models containing objects for inference, e.g.:
{
"Prefetch": [
FieldPath(
current="prefetch",
tail=[
FieldPath(
current="query",
tail=[
FieldPath(
current="recommend",
tail=[
FieldPath(current="negative", tail=None),
FieldPath(current="positive", tail=None),
],
),
...,
],
),
],
)
]
}
name_recursive_ref_mapping: mapping of model field names to ref names, e.g.:
{"prefetch": "Prefetch"}
"""
CACHE_PATH = "_inspection_cache.py"
INFERENCE_OBJECT_NAMES = {"Document", "Image", "InferenceObject"}
def __init__(self) -> None:
# self._defs does not include the whole schema, but only the part with the structures used in $defs
self._defs: dict[str, Union[dict[str, Any], list[dict[str, Any]]]] = deepcopy(DEFS) # type: ignore[arg-type]
self._cache: dict[str, list[str]] = deepcopy(CACHE_STR_PATH)
self._recursive_refs: set[str] = set(RECURSIVE_REFS)
self._excluded_recursive_refs: set[str] = set(EXCLUDED_RECURSIVE_REFS)
self._included_recursive_refs: set[str] = set(INCLUDED_RECURSIVE_REFS)
self.name_recursive_ref_mapping: dict[str, str] = {
k: v for k, v in NAME_RECURSIVE_REF_MAPPING.items()
}
self.path_cache: dict[str, list[FieldPath]] = {
model: convert_paths(paths) for model, paths in self._cache.items()
}
self._processed_recursive_defs: dict[str, Any] = {}
def _replace_refs(
self,
schema: Union[dict[str, Any], list[dict[str, Any]]],
parent: Optional[str] = None,
seen_refs: Optional[set] = None,
) -> Union[dict[str, Any], list[dict[str, Any]]]:
"""Replace refs in schema with their definitions
Args:
schema: schema to parse
parent: previous level key
seen_refs: set of seen refs to spot recursive paths
Returns:
schema with replaced refs
"""
parent = parent if parent else None
seen_refs = seen_refs if seen_refs else set()
if isinstance(schema, dict):
if "$ref" in schema:
ref_path = schema["$ref"]
def_key = ref_path.split("/")[-1]
if def_key in self._processed_recursive_defs:
return self._processed_recursive_defs[def_key]
if def_key == parent or def_key in seen_refs:
self._recursive_refs.add(def_key)
self._processed_recursive_defs[def_key] = schema
return schema
seen_refs.add(def_key)
return self._replace_refs(
self._defs[def_key], parent=def_key, seen_refs=copy(seen_refs)
)
schemes = {}
if "properties" in schema:
for k, v in schema.items():
if k == "properties":
schemes[k] = self._replace_refs(
schema=v, parent=parent, seen_refs=copy(seen_refs)
)
else:
schemes[k] = v
else:
for k, v in schema.items():
parent_key = k if isinstance(v, dict) and "properties" in v else parent
schemes[k] = self._replace_refs(
schema=v, parent=parent_key, seen_refs=copy(seen_refs)
)
return schemes
elif isinstance(schema, list):
return [
self._replace_refs(schema=item, parent=parent, seen_refs=copy(seen_refs)) # type: ignore
for item in schema
]
else:
return schema
def _find_document_paths(
self,
schema: Union[dict[str, Any], list[dict[str, Any]]],
current_path: str = "",
after_properties: bool = False,
seen_refs: Optional[set] = None,
) -> list[str]:
"""Read a schema and find paths to objects requiring inference
Populates model fields names to ref names mapping
Args:
schema: schema to parse
current_path: current path in the schema
after_properties: flag indicating if the current path is after "properties" key
seen_refs: set of seen refs to spot recursive paths
Returns:
List of string dot separated paths to objects requiring inference
"""
document_paths: list[str] = []
seen_recursive_refs = seen_refs if seen_refs is not None else set()
parts = current_path.split(".")
if len(parts) != len(set(parts)): # check for recursive paths
return document_paths
if not isinstance(schema, dict):
return document_paths
if "title" in schema and schema["title"] in self.INFERENCE_OBJECT_NAMES:
document_paths.append(current_path)
return document_paths
for key, value in schema.items():
if key == "$defs":
continue
if key == "$ref":
model_name = value.split("/")[-1]
value = self._defs[model_name]
if model_name in seen_recursive_refs:
continue
if (
model_name in self._excluded_recursive_refs
): # on the first run it might be empty
continue
if (
model_name in self._recursive_refs
): # included and excluded refs might not be filled up yet, we're looking in all recursive refs
# we would need to clean up name recursive ref mapping later and delete excluded refs from there
seen_recursive_refs.add(model_name)
self.name_recursive_ref_mapping[current_path.split(".")[-1]] = model_name
if after_properties: # field name seen in pydantic models comes after "properties" key
if current_path:
new_path = f"{current_path}.{key}"
else:
new_path = key
else:
new_path = current_path
if isinstance(value, dict):
document_paths.extend(
self._find_document_paths(
value, new_path, key == "properties", seen_refs=seen_recursive_refs
)
)
elif isinstance(value, list):
for item in value:
if isinstance(item, dict):
document_paths.extend(
self._find_document_paths(
item,
new_path,
key == "properties",
seen_refs=seen_recursive_refs,
)
)
return sorted(set(document_paths))
def parse_model(self, model: Type[BaseModel]) -> None:
"""Parse model schema to retrieve paths to objects requiring inference.
Checks model json schema, extracts definitions and finds paths to objects requiring inference.
No parsing happens if model has already been processed.
Args:
model: model to parse
Returns:
None
"""
model_name = model.__name__
if model_name in self._cache:
return None
schema = model_json_schema(model)
for k, v in schema.get("$defs", {}).items():
if k not in self._defs:
self._defs[k] = v
if "$defs" in schema:
raw_refs = (
{"$ref": schema["$ref"]}
if "$ref" in schema
else {"properties": schema["properties"]}
)
refs = self._replace_refs(raw_refs)
self._cache[model_name] = self._find_document_paths(refs)
else:
self._cache[model_name] = []
for ref in self._recursive_refs:
if ref in self._excluded_recursive_refs or ref in self._included_recursive_refs:
continue
if self._find_document_paths(self._defs[ref]):
self._included_recursive_refs.add(ref)
else:
self._excluded_recursive_refs.add(ref)
self.name_recursive_ref_mapping = {
k: v
for k, v in self.name_recursive_ref_mapping.items()
if v not in self._excluded_recursive_refs
}
# convert str paths to FieldPath objects which group path parts and reduce the time of the traversal
self.path_cache = {model: convert_paths(paths) for model, paths in self._cache.items()}
def _persist(self, output_path: Union[Path, str] = CACHE_PATH) -> None:
"""Persist the parser state to a file
Args:
output_path: path to the file to save the parser state
Returns:
None
"""
with open(output_path, "w") as f:
f.write(f"CACHE_STR_PATH = {self._cache}\n")
f.write(f"DEFS = {self._defs}\n")
# `sorted is required` to use `diff` in comparisons
f.write(f"RECURSIVE_REFS = {sorted(self._recursive_refs)}\n")
f.write(f"INCLUDED_RECURSIVE_REFS = {sorted(self._included_recursive_refs)}\n")
f.write(f"EXCLUDED_RECURSIVE_REFS = {sorted(self._excluded_recursive_refs)}\n")
f.write(f"NAME_RECURSIVE_REF_MAPPING = {self.name_recursive_ref_mapping}\n")
| ModelSchemaParser |
python | facebook__pyre-check | tools/generate_taint_models/tests/test_functions.py | {
"start": 717,
"end": 926
} | class ____(TestClass):
def __init__(self, x: int) -> None:
...
all_functions = [
testA,
testB,
testC,
testD,
testE,
TestClass.methodA,
TestClass.methodB,
]
| TestChildClassB |
python | openai__openai-python | src/openai/types/eval_custom_data_source_config.py | {
"start": 267,
"end": 589
} | class ____(BaseModel):
schema_: Dict[str, object] = FieldInfo(alias="schema")
"""
The json schema for the run data source items. Learn how to build JSON schemas
[here](https://json-schema.org/).
"""
type: Literal["custom"]
"""The type of data source. Always `custom`."""
| EvalCustomDataSourceConfig |
python | google__python-fire | fire/main_test.py | {
"start": 703,
"end": 1398
} | class ____(testutils.BaseTestCase):
"""Tests to verify the behavior of __main__ (python -m fire)."""
def testNameSetting(self):
# Confirm one of the usage lines has the gettempdir member.
with self.assertOutputMatches('gettempdir'):
__main__.main(['__main__.py', 'tempfile'])
def testArgPassing(self):
expected = os.path.join('part1', 'part2', 'part3')
with self.assertOutputMatches('%s\n' % expected):
__main__.main(
['__main__.py', 'os.path', 'join', 'part1', 'part2', 'part3'])
with self.assertOutputMatches('%s\n' % expected):
__main__.main(
['__main__.py', 'os', 'path', '-', 'join', 'part1', 'part2', 'part3'])
| MainModuleTest |
python | walkccc__LeetCode | solutions/661. Image Smoother/661.py | {
"start": 0,
"end": 462
} | class ____:
def imageSmoother(self, M: list[list[int]]) -> list[list[int]]:
m = len(M)
n = len(M[0])
ans = [[0 for j in range(n)] for i in range(m)]
for i in range(m):
for j in range(n):
ones = 0
count = 0
for y in range(max(0, i - 1), min(m, i + 2)):
for x in range(max(0, j - 1), min(n, j + 2)):
ones += M[y][x]
count += 1
ans[i][j] = ones // count
return ans
| Solution |
python | tensorflow__tensorflow | tensorflow/python/data/ops/take_op.py | {
"start": 1058,
"end": 1673
} | class ____(dataset_ops.UnaryUnchangedStructureDataset):
"""A `Dataset` containing the first `count` elements from its input."""
def __init__(self, input_dataset, count, name=None):
"""See `Dataset.take()` for details."""
self._input_dataset = input_dataset
self._count = ops.convert_to_tensor(count, dtype=dtypes.int64, name="count")
self._name = name
variant_tensor = gen_dataset_ops.take_dataset(
input_dataset._variant_tensor, # pylint: disable=protected-access
count=self._count,
**self._common_args)
super().__init__(input_dataset, variant_tensor)
| _TakeDataset |
python | python-pillow__Pillow | Tests/test_font_leaks.py | {
"start": 824,
"end": 1284
} | class ____(TestTTypeFontLeak):
# fails at iteration 37 in main
iterations = 100
mem_limit = 1024 # k
def test_leak(self) -> None:
if features.check_module("freetype2"):
ImageFont.core = _util.DeferredError(ImportError("Disabled for testing"))
try:
default_font = ImageFont.load_default()
finally:
ImageFont.core = original_core
self._test_font(default_font)
| TestDefaultFontLeak |
python | huggingface__transformers | src/transformers/models/qwen3_vl_moe/modeling_qwen3_vl_moe.py | {
"start": 17669,
"end": 18781
} | class ____(nn.Module):
def __init__(self, config):
super().__init__()
self.top_k = config.num_experts_per_tok
self.num_experts = config.num_experts
self.norm_topk_prob = config.norm_topk_prob
self.hidden_dim = config.hidden_size
self.weight = nn.Parameter(torch.zeros(self.num_experts, self.hidden_dim))
def forward(self, hidden_states):
hidden_states = hidden_states.reshape(-1, self.hidden_dim)
router_logits = F.linear(hidden_states, self.weight) # (seq_len, num_experts)
router_logits = torch.nn.functional.softmax(router_logits, dtype=torch.float, dim=-1)
router_top_value, router_indices = torch.topk(router_logits, self.top_k, dim=-1) # (seq_len, top_k)
if self.norm_topk_prob:
router_top_value /= router_top_value.sum(dim=-1, keepdim=True)
router_top_value = router_top_value.to(router_logits.dtype)
router_scores = torch.zeros_like(router_logits).scatter_(1, router_indices, router_top_value)
return router_scores, router_indices
@auto_docstring
| Qwen3VLMoeTextTopKRouter |
python | altair-viz__altair | altair/vegalite/v6/schema/core.py | {
"start": 433406,
"end": 433598
} | class ____(Data):
"""Generator schema wrapper."""
_schema = {"$ref": "#/definitions/Generator"}
def __init__(self, *args, **kwds):
super().__init__(*args, **kwds)
| Generator |
python | tensorflow__tensorflow | tensorflow/lite/python/metrics/metrics_nonportable_test.py | {
"start": 14582,
"end": 23692
} | class ____(test_util.TensorFlowTestCase,
parameterized.TestCase):
"""Testing conversion error metric."""
def setUp(self):
super(ConverterErrorMetricTest, self).setUp()
# Mock metrics instance except errors so other test cases are not affected.
mock_attempt = mock.create_autospec(monitoring.Counter, instance=True)
self._counter_conversion_attempt = metrics._counter_conversion_attempt
metrics._counter_conversion_attempt = mock_attempt
mock_success = mock.create_autospec(monitoring.Counter, instance=True)
self._counter_conversion_success = metrics._counter_conversion_success
metrics._counter_conversion_success = mock_success
mock_params = mock.create_autospec(monitoring.StringGauge, instance=True)
self._gauge_conversion_params = metrics._gauge_conversion_params
metrics._gauge_conversion_params = mock_params
def tearDown(self):
super(ConverterErrorMetricTest, self).tearDown()
# # Restore metrics instances.
metrics._counter_conversion_attempt = self._counter_conversion_attempt
metrics._counter_conversion_success = self._counter_conversion_success
metrics._gauge_conversion_params = self._gauge_conversion_params
def convert_and_check_location_info(self,
converter,
expected_type,
expected_sources=None):
# The custom attribute of ConverterError can't be accessed with
# assertRaises so use try-catch block instead.
try:
tflite_model = converter.convert()
self.assertIsNone(tflite_model)
except ConverterError as converter_error:
# pylint: disable=g-assert-in-except
self.assertLen(converter_error.errors, 1)
location = converter_error.errors[0].location
self.assertEqual(location.type, expected_type)
if expected_sources:
debug_string = str(location)
for source in expected_sources:
self.assertIn(source, debug_string)
# pylint: enable=g-assert-in-except
def test_failure_at_PrepareCompositeFunctionsPass(self):
if context.is_tfrt_enabled():
self.skipTest('This test crashed with TFRT.')
class NgramsLayer(tf.keras.layers.Layer):
def call(self, input_tensor, **kwargs):
return mock_ngrams(input_tensor, width=2, axis=-1, string_separator=' ')
# Registers a fake WhitespaceTokenizeWithOffsets so the TFText fusing logic
# is enable in MLIR side.
custom_opdefs_str = (
'name: \'WhitespaceTokenizeWithOffsets\' input_arg: {name: \'Input1\' '
'type: DT_FLOAT} input_arg: {name: \'Input2\' type: DT_FLOAT} '
'output_arg: {name: \'Output\' type: DT_FLOAT}')
register_custom_opdefs([custom_opdefs_str])
model = tf.keras.models.Sequential([NgramsLayer()])
model.predict(tf.constant(['test']))
converter = lite.TFLiteConverterV2.from_keras_model(model)
converter.allow_custom_ops = True
self.convert_and_check_location_info(
converter, converter_error_data_pb2.ConverterErrorData.UNKNOWNLOC)
exported_error = metrics._gauge_conversion_errors.get_cell(
'CONVERT_TF_TO_TFLITE_MODEL',
'PrepareCompositeFunctionsPass',
'tf.Const',
'UNKNOWN',
).value()
self.assertEqual(exported_error,
"\'width\' attribute is not set or not an integer")
def test_need_flex_ops(self):
def create_graph_with_custom_add(opname='CustomAdd'):
custom_opdefs_str = (
'name: \'' + opname +
'\' input_arg: {name: \'Input1\' type: DT_FLOAT} '
'input_arg: {name: \'Input2\' type: DT_FLOAT} output_arg: {name: '
'\'Output\' type: DT_FLOAT}')
# Create a graph that has one add op.
new_graph = graph_pb2.GraphDef()
with ops.Graph().as_default():
with session.Session() as sess:
in_tensor = array_ops.placeholder(
shape=[1, 16, 16, 3], dtype=dtypes.float32, name='input')
out_tensor = in_tensor + in_tensor
inputs = {'x': in_tensor}
outputs = {'z': out_tensor}
new_graph.CopyFrom(sess.graph_def)
# Rename Add op name to opname.
for node in new_graph.node:
if node.op.startswith('Add'):
node.op = opname
del node.attr['T']
# Register custom op defs to import modified graph def.
register_custom_opdefs([custom_opdefs_str])
return (new_graph, inputs, outputs)
new_graph, inputs, outputs = create_graph_with_custom_add()
# Import to load the custom opdef.
saved_model_dir = os.path.join(self.get_temp_dir(), 'model')
with ops.Graph().as_default():
with session.Session() as sess:
import_graph_def(new_graph, name='')
saved_model.simple_save(sess, saved_model_dir, inputs, outputs)
converter = lite.TFLiteConverterV2.from_saved_model(saved_model_dir)
self.convert_and_check_location_info(
converter,
converter_error_data_pb2.ConverterErrorData.NAMELOC,
expected_sources='add')
exported_error = metrics._gauge_conversion_errors.get_cell(
'CONVERT_TF_TO_TFLITE_MODEL', 'CONVERT_SAVED_MODEL', 'tf.CustomAdd',
'ERROR_NEEDS_CUSTOM_OPS').value()
self.assertIn(
"'tf.CustomAdd' op is neither a custom op nor a flex op\n",
exported_error,
)
self.assertIn('Error code: ERROR_NEEDS_CUSTOM_OPS', exported_error)
def test_unsupported_control_flow_v1(self):
filename = resource_loader.get_path_to_datafile(
'../testdata/control_flow_v1_saved_model')
converter = lite.TFLiteConverterV2.from_saved_model(filename)
self.convert_and_check_location_info(
converter, converter_error_data_pb2.ConverterErrorData.UNKNOWNLOC)
exported_error = metrics._gauge_conversion_errors.get_cell(
'CONVERT_TF_TO_TFLITE_MODEL', 'CONVERT_SAVED_MODEL', '',
'ERROR_UNSUPPORTED_CONTROL_FLOW_V1').value()
self.assertEqual(
exported_error,
'Merge only has 4 inputs, while only merge nodes with two inputs '
'supported.\n\tFailed to functionalize Control Flow V1 ops. Consider '
'using Control Flow V2 ops instead. See https://www.tensorflow.org/'
'api_docs/python/tf/compat/v1/enable_control_flow_v2.')
def test_location_from_concrete_functions(self):
@tf.function(input_signature=[
tf.TensorSpec(shape=[None, None, 2, 3, 3], dtype=tf.complex64),
tf.TensorSpec(shape=[None, None, 1, 3, 3], dtype=tf.complex64),
])
def model(a, b):
return tf.add(a, b, name='add')
converter = lite.TFLiteConverterV2.from_concrete_functions(
[model.get_concrete_function()], model)
self.convert_and_check_location_info(
converter,
converter_error_data_pb2.ConverterErrorData.CALLSITELOC,
expected_sources=[
'tensorflow/lite/python/metrics/metrics_nonportable_test.py',
])
def test_location_from_saved_model(self):
with tempfile.TemporaryDirectory() as tmp_dir:
class Adder(tf.Module):
@tf.function(input_signature=[
tf.TensorSpec(shape=[None, None, 2, 3, 3], dtype=tf.complex64),
tf.TensorSpec(shape=[None, None, 1, 3, 3], dtype=tf.complex64),
])
def serving_default(self, a, b):
return tf.add(a, b, name='add')
tf.saved_model.save(
Adder(),
tmp_dir,
options=tf.saved_model.SaveOptions(save_debug_info=True))
converter = lite.TFLiteConverterV2.from_saved_model(tmp_dir)
self.convert_and_check_location_info(
converter,
converter_error_data_pb2.ConverterErrorData.CALLSITELOC,
expected_sources=[
'tensorflow/lite/python/metrics/metrics_nonportable_test.py',
])
@parameterized.named_parameters(
('_WithoutLoweringToSavedModel', False, None),
('_WithLoweringToSavedModel', True,
'tensorflow/lite/python/metrics/metrics_nonportable_test.py'))
def test_location_from_keras_model(self, lower_to_saved_model,
expected_source):
input_tensor1 = tf.keras.layers.Input(
shape=[None, None, 2, 3, 3], dtype=tf.complex64)
input_tensor2 = tf.keras.layers.Input(
shape=[None, None, 2, 3, 3], dtype=tf.complex64)
output = tf.keras.layers.Add()([input_tensor1, input_tensor2])
model = tf.keras.Model(
inputs=[input_tensor1, input_tensor2], outputs=output)
model.compile(
optimizer='adam',
loss='sparse_categorical_crossentropy',
metrics=['accuracy'])
converter = lite.TFLiteConverterV2.from_keras_model(model)
converter.experimental_lower_to_saved_model = lower_to_saved_model
# The location does not contain callsite to the current file.
self.convert_and_check_location_info(
converter,
converter_error_data_pb2.ConverterErrorData.CALLSITELOC,
expected_sources=[expected_source] if expected_source else None)
if __name__ == '__main__':
test.main()
| ConverterErrorMetricTest |
python | great-expectations__great_expectations | great_expectations/validator/validator.py | {
"start": 4597,
"end": 64939
} | class ____:
"""Validator is the key object used to create Expectations, validate Expectations, and get Metrics for Expectations.
Validators are used by Checkpoints to validate Expectations.
Args:
execution_engine: The Execution Engine to be used to perform validation.
interactive_evaluation: If True, the Validator will perform evaluation when Expectations are added.
expectation_suite: The Expectation Suite to validate.
expectation_suite_name: The name of the Expectation Suite to validate.
data_context: The Data Context associated with this Validator.
batches: The Batches for which to validate.
""" # noqa: E501 # FIXME CoP
DEFAULT_RUNTIME_CONFIGURATION = {
"catch_exceptions": False,
"result_format": "BASIC",
}
RUNTIME_KEYS = DEFAULT_RUNTIME_CONFIGURATION.keys()
# noinspection PyUnusedLocal
def __init__( # noqa: PLR0913 # FIXME CoP
self,
execution_engine: ExecutionEngine,
interactive_evaluation: bool = True,
expectation_suite: ExpectationSuite | None = None,
expectation_suite_name: Optional[str] = None,
data_context: Optional[AbstractDataContext] = None,
batches: List[Batch] | Sequence[Batch | FluentBatch] = tuple(),
**kwargs,
) -> None:
self._data_context: Optional[AbstractDataContext] = data_context
self._metrics_calculator: MetricsCalculator = MetricsCalculator(
execution_engine=execution_engine,
show_progress_bars=self._determine_progress_bars(),
)
execution_engine.batch_manager.reset_batch_cache()
self._execution_engine: ExecutionEngine = execution_engine
if batches:
self.load_batch_list(batch_list=batches)
self._expose_dataframe_methods: bool = False
self.interactive_evaluation: bool = interactive_evaluation
self._initialize_expectations(
expectation_suite=expectation_suite,
expectation_suite_name=expectation_suite_name,
)
self._default_expectation_args: Dict[str, Union[bool, str]] = copy.deepcopy(
Validator.DEFAULT_RUNTIME_CONFIGURATION # type: ignore[arg-type] # FIXME CoP
)
# This special state variable tracks whether a validation run is going on, which will disable # noqa: E501 # FIXME CoP
# saving expectation config objects
self._active_validation: bool = False
@property
def _include_rendered_content(self) -> bool:
return project_manager.is_using_cloud()
@property
def execution_engine(self) -> ExecutionEngine:
"""Returns the execution engine being used by the validator at the given time"""
return self._execution_engine
@property
def metrics_calculator(self) -> MetricsCalculator:
"""Returns the "MetricsCalculator" object being used by the Validator to handle metrics computations.""" # noqa: E501 # FIXME CoP
return self._metrics_calculator
@property
def data_context(self) -> Optional[AbstractDataContext]:
"""Reference to DataContext object handle."""
return self._data_context
@property
def expose_dataframe_methods(self) -> bool:
"""The "expose_dataframe_methods" getter property."""
return self._expose_dataframe_methods
@expose_dataframe_methods.setter
def expose_dataframe_methods(self, value: bool) -> None:
"""The "expose_dataframe_methods" setter property."""
self._expose_dataframe_methods = value
@property
def loaded_batch_ids(self) -> List[str]:
"""Getter for IDs of loaded Batch objects (convenience property)"""
return self._execution_engine.batch_manager.loaded_batch_ids
@property
def active_batch_data(self) -> Optional[BatchDataUnion]:
"""Getter for BatchData object from the currently-active Batch object (convenience property).""" # noqa: E501 # FIXME CoP
return self._execution_engine.batch_manager.active_batch_data
@property
def batch_cache(self) -> Dict[str, AnyBatch]:
"""Getter for dictionary of Batch objects (convenience property)"""
return self._execution_engine.batch_manager.batch_cache
@property
def batches(self) -> Dict[str, AnyBatch]:
"""Getter for dictionary of Batch objects (alias convenience property, to be deprecated)"""
return self.batch_cache
@property
def active_batch_id(self) -> Optional[str]:
"""Getter for batch_id of active Batch (convenience property)"""
return self._execution_engine.batch_manager.active_batch_id
@property
def active_batch(self) -> Optional[AnyBatch]:
"""Getter for active Batch (convenience property)"""
return self._execution_engine.batch_manager.active_batch
@property
def active_batch_spec(self) -> Optional[BatchSpec]:
"""Getter for batch_spec of active Batch (convenience property)"""
return self._execution_engine.batch_manager.active_batch_spec
@property
def active_batch_markers(self) -> Optional[BatchMarkers]:
"""Getter for batch_markers of active Batch (convenience property)"""
return self._execution_engine.batch_manager.active_batch_markers
@property
def active_batch_definition(self) -> Optional[LegacyBatchDefinition]:
"""Getter for batch_definition of active Batch (convenience property)"""
return self._execution_engine.batch_manager.active_batch_definition
@property
def expectation_suite(self) -> ExpectationSuite:
return self._expectation_suite
@expectation_suite.setter
def expectation_suite(self, value: ExpectationSuite) -> None:
self._initialize_expectations(
expectation_suite=value,
expectation_suite_name=value.name,
)
@property
def expectation_suite_name(self) -> str:
"""Gets the current expectation_suite name of this data_asset as stored in the expectations configuration.""" # noqa: E501 # FIXME CoP
return self._expectation_suite.name
@expectation_suite_name.setter
def expectation_suite_name(self, name: str) -> None:
"""Sets the expectation_suite name of this data_asset as stored in the expectations configuration.""" # noqa: E501 # FIXME CoP
self._expectation_suite.name = name
def load_batch_list(self, batch_list: Sequence[Batch | FluentBatch]) -> None:
self._execution_engine.batch_manager.load_batch_list(batch_list=batch_list)
def get_metric(
self,
metric: MetricConfiguration,
) -> Any:
"""Convenience method, return the value of the requested metric.
Args:
metric: MetricConfiguration
Returns:
The value of the requested metric.
"""
return self._metrics_calculator.get_metric(metric=metric)
def get_metrics(
self,
metrics: Dict[str, MetricConfiguration],
) -> Dict[str, Any]:
"""
Convenience method that resolves requested metrics (specified as dictionary, keyed by MetricConfiguration ID).
Args:
metrics: Dictionary of desired metrics to be resolved; metric_name is key and MetricConfiguration is value.
Returns:
Return Dictionary with requested metrics resolved, with metric_name as key and computed metric as value.
""" # noqa: E501 # FIXME CoP
return self._metrics_calculator.get_metrics(metrics=metrics)
def compute_metrics(
self,
metric_configurations: List[MetricConfiguration],
runtime_configuration: Optional[dict] = None,
min_graph_edges_pbar_enable: int = 0,
# Set to low number (e.g., 3) to suppress progress bar for small graphs.
) -> tuple[_MetricsDict, _AbortedMetricsInfoDict]:
"""
Convenience method that computes requested metrics (specified as elements of "MetricConfiguration" list).
Args:
metric_configurations: List of desired MetricConfiguration objects to be resolved.
runtime_configuration: Additional run-time settings (see "Validator.DEFAULT_RUNTIME_CONFIGURATION").
min_graph_edges_pbar_enable: Minumum number of graph edges to warrant showing progress bars.
Returns:
Tuple of two elements, the first is a dictionary with requested metrics resolved,
with unique metric ID as key and computed metric as value. The second is a dictionary of the
aborted metrics information, with metric ID as key if any metrics were aborted.
""" # noqa: E501 # FIXME CoP
return self._metrics_calculator.compute_metrics(
metric_configurations=metric_configurations,
runtime_configuration=runtime_configuration,
min_graph_edges_pbar_enable=min_graph_edges_pbar_enable,
)
def columns(self, domain_kwargs: Optional[Dict[str, Any]] = None) -> List[str]:
"""Convenience method to obtain Batch columns.
Arguments:
domain_kwargs: Optional dictionary of domain kwargs (e.g., containing "batch_id").
Returns:
The list of Batch columns.
"""
return self._metrics_calculator.columns(domain_kwargs=domain_kwargs)
def head(
self,
n_rows: int = 5,
domain_kwargs: Optional[Dict[str, Any]] = None,
fetch_all: bool = False,
) -> pd.DataFrame:
"""Convenience method to return the first several rows or records from a Batch of data.
Args:
n_rows: The number of rows to return.
domain_kwargs: If provided, the domain for which to return records.
fetch_all: If True, ignore n_rows and return the entire batch.
Returns:
A Pandas DataFrame containing the records' data.
"""
return self._metrics_calculator.head(
n_rows=n_rows, domain_kwargs=domain_kwargs, fetch_all=fetch_all
)
@override
def __dir__(self) -> List[str]:
"""
This custom magic method is used to enable expectation tab completion on Validator objects.
It also allows users to call Pandas.DataFrame methods on Validator objects
"""
validator_attrs = set(super().__dir__())
class_expectation_impls = set(list_registered_expectation_implementations())
# execution_engine_expectation_impls = (
# {
# attr_name
# for attr_name in self.execution_engine.__dir__()
# if attr_name.startswith("expect_")
# }
# if self.execution_engine
# else set()
# )
combined_dir = (
validator_attrs | class_expectation_impls
# | execution_engine_expectation_impls
)
if self._expose_dataframe_methods:
combined_dir | set(dir(pd.DataFrame))
return list(combined_dir)
def _determine_progress_bars(self) -> bool:
enable: bool = True
if self._data_context:
progress_bars = self._data_context.progress_bars
# If progress_bars are not present, assume we want them enabled
if progress_bars is not None:
if "globally" in progress_bars:
enable = bool(progress_bars["globally"])
if "metric_calculations" in progress_bars:
enable = bool(progress_bars["metric_calculations"])
return enable
def __getattr__(self, name):
if self.active_batch is None:
raise TypeError("active_batch cannot be None") # noqa: TRY003 # FIXME CoP
name = name.lower()
if (
name.startswith("expect_") or name == "unexpected_rows_expectation"
) and get_expectation_impl(name):
return self.validate_expectation(name)
elif (
self._expose_dataframe_methods
and isinstance(self.active_batch.data, PandasBatchData)
and hasattr(pd.DataFrame, name)
):
return getattr(self.active_batch.data.dataframe, name)
else:
raise AttributeError(f"'{type(self).__name__}' object has no attribute '{name}'") # noqa: TRY003 # FIXME CoP
def validate_expectation(self, name: str) -> Callable: # noqa: C901, PLR0915 # FIXME CoP
"""
Given the name of an Expectation, obtains the Class-first Expectation implementation and utilizes the
expectation's validate method to obtain a validation result. Also adds in the runtime configuration
Args:
name (str): The name of the Expectation being validated
Returns:
The Expectation's validation result
""" # noqa: E501 # FIXME CoP
expectation_impl = get_expectation_impl(name)
def inst_expectation(*args: dict, **kwargs): # noqa: C901, PLR0912 # FIXME CoP
# this is used so that exceptions are caught appropriately when they occur in expectation config # noqa: E501 # FIXME CoP
# TODO: JPC - THIS LOGIC DOES NOT RESPECT DEFAULTS SET BY USERS IN THE VALIDATOR VS IN THE EXPECTATION # noqa: E501 # FIXME CoP
# DEVREL has action to develop a new plan in coordination with MarioPod
expectation_kwargs = recursively_convert_to_json_serializable(kwargs)
meta: Optional[dict] = expectation_kwargs.pop("meta", None)
basic_default_expectation_args: dict = {
k: v
for k, v in self.default_expectation_args.items()
if k in Validator.RUNTIME_KEYS
}
basic_runtime_configuration: dict = copy.deepcopy(basic_default_expectation_args)
basic_runtime_configuration.update(
{k: v for k, v in kwargs.items() if k in Validator.RUNTIME_KEYS}
)
allowed_config_keys: Tuple[str, ...] = expectation_impl.get_allowed_config_keys()
args_keys: Tuple[str, ...] = expectation_impl.args_keys or tuple()
arg_name: str
idx: int
arg: dict
for idx, arg in enumerate(args):
try:
arg_name = args_keys[idx]
if arg_name in allowed_config_keys:
expectation_kwargs[arg_name] = arg
if arg_name == "meta":
logger.warning(
"Setting meta via args could be ambiguous; please use a kwarg instead."
)
meta = arg
except IndexError:
raise InvalidExpectationConfigurationError( # noqa: TRY003 # FIXME CoP
f"Invalid positional argument: {arg}"
)
configuration: ExpectationConfiguration | None = None
try:
expectation = expectation_impl(**expectation_kwargs, meta=meta)
configuration = expectation.configuration
if self.interactive_evaluation:
configuration.process_suite_parameters(
self._expectation_suite.suite_parameters,
True,
self._data_context,
)
"""Given an implementation and a configuration for any Expectation, returns its validation result""" # noqa: E501 # FIXME CoP
if not self.interactive_evaluation and not self._active_validation:
validation_result = ExpectationValidationResult(
expectation_config=copy.deepcopy(configuration)
)
else:
validation_result = expectation.validate_(
validator=self,
suite_parameters=self._expectation_suite.suite_parameters,
data_context=self._data_context,
runtime_configuration=basic_runtime_configuration,
)
# If validate has set active_validation to true, then we do not save the config to avoid # noqa: E501 # FIXME CoP
# saving updating expectation configs to the same suite during validation runs
if self._active_validation is True:
stored_config = configuration.get_raw_configuration()
else:
# Append the expectation to the config.
stored_config = self._expectation_suite._add_expectation(
expectation_configuration=configuration.get_raw_configuration(),
)
# If there was no interactive evaluation, success will not have been computed.
if validation_result.success is not None:
# Add a "success" object to the config
stored_config.success_on_last_run = validation_result.success
except Exception as err:
if basic_runtime_configuration.get("catch_exceptions"):
exception_traceback = traceback.format_exc()
exception_message = f"{type(err).__name__}: {err!s}"
exception_info = ExceptionInfo(
exception_traceback=exception_traceback,
exception_message=exception_message,
)
if not configuration:
configuration = ExpectationConfiguration(
type=name, kwargs=expectation_kwargs, meta=meta
)
validation_result = ExpectationValidationResult(
success=False,
exception_info=exception_info,
expectation_config=configuration,
)
else:
raise err # noqa: TRY201 # FIXME CoP
if self._include_rendered_content:
validation_result.render()
return validation_result
inst_expectation.__name__ = name
inst_expectation.__doc__ = expectation_impl.__doc__
return inst_expectation
def list_available_expectation_types(self) -> List[str]:
"""Returns a list of all expectations available to the validator"""
keys = dir(self)
return [expectation for expectation in keys if expectation.startswith("expect_")]
def graph_validate(
self,
configurations: List[ExpectationConfiguration],
runtime_configuration: Optional[dict] = None,
) -> List[ExpectationValidationResult]:
"""Obtains validation dependencies for each metric using the implementation of their
associated expectation, then proceeds to add these dependencies to the validation graph,
supply readily available metric implementations to fulfill current metric requirements,
and validate these metrics.
Args:
configurations(List[ExpectationConfiguration]): A list of needed Expectation
Configurations that will be used to supply domain and values for metrics.
runtime_configuration (dict): A dictionary of runtime keyword arguments, controlling
semantics, such as the result_format.
Returns:
A list of Validations, validating that all necessary metrics are available.
"""
if runtime_configuration is None:
runtime_configuration = {}
if runtime_configuration.get("catch_exceptions", True):
catch_exceptions = True
else:
catch_exceptions = False
expectation_validation_graphs: List[ExpectationValidationGraph]
evrs: List[ExpectationValidationResult]
processed_configurations: List[ExpectationConfiguration] = []
(
expectation_validation_graphs,
evrs,
processed_configurations,
) = self._generate_metric_dependency_subgraphs_for_each_expectation_configuration(
expectation_configurations=configurations,
processed_configurations=processed_configurations,
catch_exceptions=catch_exceptions,
runtime_configuration=runtime_configuration,
)
graph: ValidationGraph = self._generate_suite_level_graph_from_expectation_level_sub_graphs(
expectation_validation_graphs=expectation_validation_graphs
)
resolved_metrics: _MetricsDict
try:
(
resolved_metrics,
evrs,
processed_configurations,
) = self._resolve_suite_level_graph_and_process_metric_evaluation_errors(
graph=graph,
runtime_configuration=runtime_configuration,
expectation_validation_graphs=expectation_validation_graphs,
evrs=evrs,
processed_configurations=processed_configurations,
show_progress_bars=self._determine_progress_bars(),
)
except Exception as err:
# If a general Exception occurs during the execution of "ValidationGraph.resolve()", then # noqa: E501 # FIXME CoP
# all expectations in the suite are impacted, because it is impossible to attribute the failure to a metric. # noqa: E501 # FIXME CoP
if catch_exceptions:
exception_traceback: str = traceback.format_exc()
evrs = self._catch_exceptions_in_failing_expectation_validations(
exception_traceback=exception_traceback,
exception=err,
failing_expectation_configurations=processed_configurations,
evrs=evrs,
)
return evrs
else:
raise err # noqa: TRY201 # FIXME CoP
configuration: ExpectationConfiguration
result: ExpectationValidationResult
for configuration in processed_configurations:
try:
runtime_configuration_default = copy.deepcopy(runtime_configuration)
expectation = configuration.to_domain_obj()
result = expectation.metrics_validate(
metrics=resolved_metrics,
execution_engine=self._execution_engine,
runtime_configuration=runtime_configuration_default,
)
evrs.append(result)
except Exception as err:
if catch_exceptions:
exception_traceback = traceback.format_exc()
evrs = self._catch_exceptions_in_failing_expectation_validations(
exception_traceback=exception_traceback,
exception=err,
failing_expectation_configurations=[configuration],
evrs=evrs,
)
else:
raise err # noqa: TRY201 # FIXME CoP
return evrs
def _generate_metric_dependency_subgraphs_for_each_expectation_configuration(
self,
expectation_configurations: List[ExpectationConfiguration],
processed_configurations: List[ExpectationConfiguration],
catch_exceptions: bool,
runtime_configuration: Optional[dict] = None,
) -> Tuple[
List[ExpectationValidationGraph],
List[ExpectationValidationResult],
List[ExpectationConfiguration],
]:
# While evaluating expectation configurations, create sub-graph for every metric dependency and incorporate # noqa: E501 # FIXME CoP
# these sub-graphs under corresponding expectation-level sub-graph (state of ExpectationValidationGraph object). # noqa: E501 # FIXME CoP
expectation_validation_graphs: List[ExpectationValidationGraph] = []
evrs: List[ExpectationValidationResult] = []
configuration: ExpectationConfiguration
evaluated_config: ExpectationConfiguration
metric_configuration: MetricConfiguration
graph: ValidationGraph
for configuration in expectation_configurations:
# Validating
try:
assert configuration.type is not None, (
"Given configuration should include expectation type"
)
except AssertionError as e:
raise InvalidExpectationConfigurationError(str(e))
evaluated_config = copy.deepcopy(configuration)
if self.active_batch_id:
evaluated_config.kwargs.update({"batch_id": self.active_batch_id})
expectation = evaluated_config.to_domain_obj()
validation_dependencies: ValidationDependencies = (
expectation.get_validation_dependencies(
execution_engine=self._execution_engine,
runtime_configuration=runtime_configuration,
)
)
try:
expectation_validation_graph: ExpectationValidationGraph = ExpectationValidationGraph( # noqa: E501 # FIXME CoP
configuration=evaluated_config,
graph=self._metrics_calculator.build_metric_dependency_graph(
metric_configurations=validation_dependencies.get_metric_configurations(),
runtime_configuration=runtime_configuration,
),
)
expectation_validation_graphs.append(expectation_validation_graph)
processed_configurations.append(evaluated_config)
except Exception as err:
if catch_exceptions:
exception_traceback: str = traceback.format_exc()
exception_message: str = str(err)
exception_info = ExceptionInfo(
exception_traceback=exception_traceback,
exception_message=exception_message,
)
result = ExpectationValidationResult(
success=False,
exception_info=exception_info,
expectation_config=evaluated_config,
)
evrs.append(result)
else:
raise err # noqa: TRY201 # FIXME CoP
return expectation_validation_graphs, evrs, processed_configurations
def _generate_suite_level_graph_from_expectation_level_sub_graphs(
self,
expectation_validation_graphs: List[ExpectationValidationGraph],
) -> ValidationGraph:
# Collect edges from all expectation-level sub-graphs and incorporate them under common suite-level graph. # noqa: E501 # FIXME CoP
expectation_validation_graph: ExpectationValidationGraph
edges: List[MetricEdge] = list(
itertools.chain.from_iterable(
[
expectation_validation_graph.graph.edges
for expectation_validation_graph in expectation_validation_graphs
]
)
)
validation_graph = ValidationGraph(execution_engine=self._execution_engine, edges=edges)
return validation_graph
def _resolve_suite_level_graph_and_process_metric_evaluation_errors( # noqa: PLR0913 # FIXME CoP
self,
graph: ValidationGraph,
runtime_configuration: dict,
expectation_validation_graphs: List[ExpectationValidationGraph],
evrs: List[ExpectationValidationResult],
processed_configurations: List[ExpectationConfiguration],
show_progress_bars: bool,
) -> Tuple[
_MetricsDict,
List[ExpectationValidationResult],
List[ExpectationConfiguration],
]:
# Resolve overall suite-level graph and process any MetricResolutionError type exceptions that might occur. # noqa: E501 # FIXME CoP
resolved_metrics: _MetricsDict
aborted_metrics_info: _AbortedMetricsInfoDict
(
resolved_metrics,
aborted_metrics_info,
) = self._metrics_calculator.resolve_validation_graph(
graph=graph,
runtime_configuration=runtime_configuration,
min_graph_edges_pbar_enable=0,
)
# Trace MetricResolutionError occurrences to expectations relying on corresponding malfunctioning metrics. # noqa: E501 # FIXME CoP
rejected_configurations: List[ExpectationConfiguration] = []
for expectation_validation_graph in expectation_validation_graphs:
metric_exception_info: Dict[str, Union[MetricConfiguration, ExceptionInfo, int]] = (
expectation_validation_graph.get_exception_info(metric_info=aborted_metrics_info)
)
# Report all MetricResolutionError occurrences impacting expectation and append it to rejected list. # noqa: E501 # FIXME CoP
if len(metric_exception_info) > 0:
configuration = expectation_validation_graph.configuration
result = ExpectationValidationResult(
success=False,
exception_info=metric_exception_info,
expectation_config=configuration,
)
evrs.append(result)
if configuration not in rejected_configurations:
rejected_configurations.append(configuration)
# Exclude all rejected expectations from list of expectations cleared for validation.
for configuration in rejected_configurations:
processed_configurations.remove(configuration)
return resolved_metrics, evrs, processed_configurations
@staticmethod
def _catch_exceptions_in_failing_expectation_validations(
exception_traceback: str,
exception: Exception,
failing_expectation_configurations: List[ExpectationConfiguration],
evrs: List[ExpectationValidationResult],
) -> List[ExpectationValidationResult]:
"""
Catch exceptions in failing Expectation validations and convert to unsuccessful ExpectationValidationResult
Args:
exception_traceback: Traceback related to raised Exception
exception: Exception raised
failing_expectation_configurations: ExpectationConfigurations that failed
evrs: List of ExpectationValidationResult objects to append failures to
Returns:
List of ExpectationValidationResult objects with unsuccessful ExpectationValidationResult objects appended
""" # noqa: E501 # FIXME CoP
exception_message: str = str(exception)
exception_info = ExceptionInfo(
exception_traceback=exception_traceback,
exception_message=exception_message,
)
configuration: ExpectationConfiguration
result: ExpectationValidationResult
for configuration in failing_expectation_configurations:
result = ExpectationValidationResult(
success=False,
exception_info=exception_info,
expectation_config=configuration,
)
evrs.append(result)
return evrs
def remove_expectation(
self,
expectation_configuration: ExpectationConfiguration,
match_type: str = "domain",
remove_multiple_matches: bool = False,
id: Optional[str] = None,
) -> List[ExpectationConfiguration]:
"""Remove an ExpectationConfiguration from the ExpectationSuite associated with the Validator.
Args:
expectation_configuration: A potentially incomplete (partial) Expectation Configuration to match against.
match_type: This determines what kwargs to use when matching. Options are:
- 'domain' to match based on the data evaluated by that expectation
- 'success' to match based on all configuration parameters that influence whether an expectation succeeds on a given batch of data
- 'runtime' to match based on all configuration parameters.
remove_multiple_matches: If True, will remove multiple matching expectations.
id: Great Expectations Cloud id for an Expectation.
Returns:
The list of deleted ExpectationConfigurations.
Raises:
TypeError: Must provide either expectation_configuration or id.
ValueError: No match or multiple matches found (and remove_multiple_matches=False).
""" # noqa: E501 # FIXME CoP
return self._expectation_suite.remove_expectation(
expectation_configuration=expectation_configuration,
match_type=match_type,
remove_multiple_matches=remove_multiple_matches,
id=id,
)
def discard_failing_expectations(self) -> None:
"""Removes any expectations from the validator where the validation has failed"""
res = self.validate(only_return_failures=True).results # type: ignore[union-attr] # ExpectationValidationResult has no `.results` attr
if any(res):
for item in res:
config = item.expectation_config
if not config:
raise ValueError( # noqa: TRY003 # FIXME CoP
"ExpectationValidationResult does not have an expectation_config"
)
self.remove_expectation(
expectation_configuration=config,
match_type="runtime",
)
warnings.warn(f"Removed {len(res)} expectations that were 'False'")
def get_default_expectation_arguments(self) -> dict:
"""Fetch default expectation arguments for this data_asset
Returns:
A dictionary containing all the current default expectation arguments for a data_asset
Ex::
{
"catch_exceptions" : False,
"result_format" : 'BASIC'
}
See also:
set_default_expectation_arguments
"""
return self.default_expectation_args
@property
def cloud_mode(self) -> bool:
"""
Wrapper around cloud_mode property of associated Data Context
"""
from great_expectations.data_context.data_context.cloud_data_context import (
CloudDataContext,
)
return isinstance(self._data_context, CloudDataContext)
@property
def ge_cloud_mode(self) -> bool:
# <GE_RENAME> Deprecated 0.15.37
return self.cloud_mode
@property
def default_expectation_args(self) -> dict:
"""A getter for default Expectation arguments"""
return self._default_expectation_args
def set_default_expectation_argument(self, argument: str, value) -> None:
"""
Set a default expectation argument for this data_asset
Args:
argument (string): The argument to be replaced
value : The New argument to use for replacement
Returns:
None
See also:
get_default_expectation_arguments
"""
self._default_expectation_args[argument] = value
def get_expectation_suite( # noqa: C901, PLR0912, PLR0913 # FIXME CoP
self,
discard_failed_expectations: bool = True,
discard_result_format_kwargs: bool = True,
discard_include_config_kwargs: bool = True,
discard_catch_exceptions_kwargs: bool = True,
suppress_warnings: bool = False,
suppress_logging: bool = False,
) -> ExpectationSuite:
"""Get a copy of the Expectation Suite from the Validator object.
Args:
discard_failed_expectations: Omit Expectations which failed on their last run.
discard_result_format_kwargs: Omit `result_format` from each Expectation.
discard_include_config_kwargs: Omit `include_config` from each Expectation.
discard_catch_exceptions_kwargs: Omit `catch_exceptions` from each Expectation.
suppress_warnings: Do not log warnings.
suppress_logging: Do not log anything.
Returns:
ExpectationSuite object.
"""
expectation_suite = copy.deepcopy(self.expectation_suite)
expectations = expectation_suite.expectation_configurations
discards: defaultdict[str, int] = defaultdict(int)
if discard_failed_expectations:
new_expectations = []
for expectation in expectations:
# Note: This is conservative logic.
# Instead of retaining expectations IFF success==True, it discard expectations IFF success==False. # noqa: E501 # FIXME CoP
# In cases where expectation.success is missing or None, expectations are *retained*. # noqa: E501 # FIXME CoP
# Such a case could occur if expectations were loaded from a config file and never run. # noqa: E501 # FIXME CoP
if expectation.success_on_last_run is False:
discards["failed_expectations"] += 1
else:
new_expectations.append(expectation)
expectations = new_expectations
message = f"\t{len(expectations)} expectation(s) included in expectation_suite."
if discards["failed_expectations"] > 0 and not suppress_warnings:
message += (
f" Omitting {discards['failed_expectations']} expectation(s) that failed when last run; set " # noqa: E501 # FIXME CoP
"discard_failed_expectations=False to include them."
)
for expectation in expectations:
# FIXME: Factor this out into a new function. The logic is duplicated in remove_expectation, # noqa: E501 # FIXME CoP
# which calls _copy_and_clean_up_expectation
expectation.success_on_last_run = None
if discard_result_format_kwargs:
if "result_format" in expectation.kwargs:
del expectation.kwargs["result_format"]
discards["result_format"] += 1
if discard_include_config_kwargs:
if "include_config" in expectation.kwargs:
del expectation.kwargs["include_config"]
discards["include_config"] += 1
if discard_catch_exceptions_kwargs:
if "catch_exceptions" in expectation.kwargs:
del expectation.kwargs["catch_exceptions"]
discards["catch_exceptions"] += 1
settings_message = ""
if discards["result_format"] > 0 and not suppress_warnings:
settings_message += " result_format"
if discards["include_config"] > 0 and not suppress_warnings:
settings_message += " include_config"
if discards["catch_exceptions"] > 0 and not suppress_warnings:
settings_message += " catch_exceptions"
if len(settings_message) > 1: # Only add this if we added one of the settings above.
settings_message += " settings filtered."
expectation_suite.expectations = []
expectation_suite.add_expectation_configurations(expectation_configurations=expectations)
if not suppress_logging:
logger.info(message + settings_message)
return expectation_suite
def save_expectation_suite( # noqa: PLR0913 # FIXME CoP
self,
filepath: Optional[str] = None,
discard_failed_expectations: bool = True,
discard_result_format_kwargs: bool = True,
discard_include_config_kwargs: bool = True,
discard_catch_exceptions_kwargs: bool = True,
suppress_warnings: bool = False,
) -> None:
"""Write the Expectation Suite (e.g. from interactive evaluation) to the Expectation Store associated with the Validator's Data Context.
If `filepath` is provided, the Data Context configuration will be ignored and the configuration will be written, as JSON, to the specified file.
Args:
filepath: The location and name to write the JSON config file to. This parameter overrides the Data Context configuration.
discard_failed_expectations: If True, excludes expectations that do not return `success = True`. If False, all expectations are saved.
discard_result_format_kwargs: If True, the `result_format` attribute for each expectation is not included in the saved configuration.
discard_include_config_kwargs: If True, the `include_config` attribute for each expectation is not included in the saved configuration.
discard_catch_exceptions_kwargs: If True, the `catch_exceptions` attribute for each expectation is not included in the saved configuration.
suppress_warnings: If True, all warnings raised by Great Expectations, as a result of dropped expectations, are suppressed.
Raises:
ValueError: Must configure a Data Context when instantiating the Validator or pass in `filepath`.
""" # noqa: E501 # FIXME CoP
expectation_suite: ExpectationSuite = self.get_expectation_suite(
discard_failed_expectations,
discard_result_format_kwargs,
discard_include_config_kwargs,
discard_catch_exceptions_kwargs,
suppress_warnings,
)
if filepath is None and self._data_context is not None:
self._data_context.suites.add(expectation_suite)
if self.cloud_mode:
updated_suite = self._data_context.suites.get(expectation_suite.name)
self._initialize_expectations(expectation_suite=updated_suite)
elif filepath is not None:
with open(filepath, "w") as outfile:
json.dump(
expectationSuiteSchema.dump(expectation_suite),
outfile,
indent=2,
sort_keys=True,
)
else:
raise ValueError("Unable to save config: filepath or data_context must be available.") # noqa: TRY003 # FIXME CoP
@deprecated_argument(
argument_name="run_id",
message="Only the str version of this argument is deprecated. run_id should be a RunIdentifier or dict. Support will be removed in 0.16.0.", # noqa: E501 # FIXME CoP
version="0.13.0",
)
def validate( # noqa: C901, PLR0912, PLR0913 # FIXME CoP
self,
expectation_suite: str | ExpectationSuite | None = None,
run_id: str | RunIdentifier | Dict[str, str] | None = None,
data_context: Optional[Any] = None, # Cannot type DataContext due to circular import
suite_parameters: Optional[dict] = None,
catch_exceptions: bool = True,
result_format: Optional[str] = None,
only_return_failures: bool = False,
run_name: Optional[str] = None,
run_time: Optional[str] = None,
checkpoint_name: Optional[str] = None,
) -> Union[ExpectationValidationResult, ExpectationSuiteValidationResult]:
# noinspection SpellCheckingInspection
"""Run all expectations and return the outcome of the run.
Args:
expectation_suite: If None, uses the Expectation Suite configuration generated during the current Validator session. If an `ExpectationSuite` object, uses it as the configuration. If a string, assumes it is a path to a JSON file, and loads it as the Expectation Sutie configuration.
run_id: Used to identify this validation result as part of a collection of validations.
run_name: Used to identify this validation result as part of a collection of validations. Only used if a `run_id` is not passed. See DataContext for more information.
run_time: Used to identify this validation result as part of a collection of validations. Only used if a `run_id` is not passed. See DataContext for more information.
data_context: A datacontext object to use as part of validation for binding suite parameters and registering validation results. Overrides the Data Context configured when the Validator is instantiated.
suite_parameters: If None, uses the evaluation_paramters from the Expectation Suite provided or as part of the Data Asset. If a dict, uses the suite parameters in the dictionary.
catch_exceptions: If True, exceptions raised by tests will not end validation and will be described in the returned report.
result_format: If None, uses the default value ('BASIC' or as specified). If string, the returned expectation output follows the specified format ('BOOLEAN_ONLY','BASIC', etc.).
only_return_failures: If True, expectation results are only returned when `success = False`.
checkpoint_name: Name of the Checkpoint which invoked this Validator.validate() call against an Expectation Suite. It will be added to `meta` field of the returned ExpectationSuiteValidationResult.
Returns:
Object containg the results.
Raises:
Exception: Depending on the Data Context configuration and arguments, there are numerous possible exceptions that may be raised.
GreatExpectationsError: If `expectation_suite` is a string it must point to an existing and readable file.
ValidationError: If `expectation_suite` is a string, the file it points to must be valid JSON.
""" # noqa: E501 # FIXME CoP
# noinspection PyUnusedLocal
try:
validation_time = datetime.datetime.now(datetime.timezone.utc).strftime(
"%Y%m%dT%H%M%S.%fZ"
)
assert not (run_id and run_name) and not (run_id and run_time), (
"Please provide either a run_id or run_name and/or run_time."
)
if isinstance(run_id, dict):
run_id = RunIdentifier(**run_id)
elif not isinstance(run_id, RunIdentifier):
run_id = RunIdentifier(run_name=run_name, run_time=run_time)
self._active_validation = True
# If a different validation data context was provided, override
validation_data_context = self._data_context
if data_context is None and self._data_context is not None:
data_context = self._data_context
elif data_context is not None:
# temporarily set self._data_context so it is used inside the expectation decorator
self._data_context = data_context
if expectation_suite is None:
expectation_suite = self.get_expectation_suite(
discard_failed_expectations=False,
discard_result_format_kwargs=False,
discard_include_config_kwargs=False,
discard_catch_exceptions_kwargs=False,
)
elif isinstance(expectation_suite, str):
try:
with open(expectation_suite) as infile:
expectation_suite = expectationSuiteSchema.loads(infile.read())
except ValidationError:
raise
except OSError:
raise GreatExpectationsError( # noqa: TRY003 # FIXME CoP
f"Unable to load expectation suite: IO error while reading {expectation_suite}" # noqa: E501 # FIXME CoP
)
if not isinstance(expectation_suite, ExpectationSuite):
logger.error(
"Unable to validate using the provided value for expectation suite; does it need to be " # noqa: E501 # FIXME CoP
"loaded from a dictionary?"
)
return ExpectationValidationResult(success=False)
# Suite parameter priority is
# 1. from provided parameters
# 2. from expectation configuration
# 3. from data context
# So, we load them in reverse order
runtime_suite_parameters: dict = {}
if expectation_suite.suite_parameters:
runtime_suite_parameters.update(expectation_suite.suite_parameters)
if suite_parameters is not None:
runtime_suite_parameters.update(suite_parameters)
# Convert suite parameters to be json-serializable
runtime_suite_parameters = recursively_convert_to_json_serializable(
runtime_suite_parameters
)
# Warn if our version is different from the version in the configuration
# TODO: Deprecate "great_expectations.__version__"
expectations_to_evaluate = self.process_expectations_for_validation(
expectation_suite.expectation_configurations,
runtime_suite_parameters,
)
runtime_configuration = self._get_runtime_configuration(
catch_exceptions=catch_exceptions, result_format=result_format
)
results = self.graph_validate(
configurations=expectations_to_evaluate,
runtime_configuration=runtime_configuration,
)
if self._include_rendered_content:
for validation_result in results:
validation_result.render()
statistics = calc_validation_statistics(results)
if only_return_failures:
abbrev_results = []
for exp in results:
if not exp.success:
abbrev_results.append(exp)
results = abbrev_results
expectation_suite_name = expectation_suite.name
result = ExpectationSuiteValidationResult(
results=results,
success=statistics.success,
suite_name=expectation_suite_name,
statistics={
"evaluated_expectations": statistics.evaluated_expectations,
"successful_expectations": statistics.successful_expectations,
"unsuccessful_expectations": statistics.unsuccessful_expectations,
"success_percent": statistics.success_percent,
},
suite_parameters=runtime_suite_parameters,
meta={
"great_expectations_version": ge_version,
"expectation_suite_name": expectation_suite_name,
"run_id": run_id,
"batch_spec": convert_to_json_serializable(self.active_batch_spec),
"batch_markers": self.active_batch_markers,
"active_batch_definition": self.active_batch_definition,
"validation_time": validation_time,
"checkpoint_name": checkpoint_name,
},
batch_id=self.active_batch_id,
)
self._data_context = validation_data_context
finally:
self._active_validation = False
return result
def process_expectations_for_validation(
self,
expectation_configurations: list[ExpectationConfiguration],
suite_parameters: Optional[dict[str, Any]] = None,
) -> list[ExpectationConfiguration]:
"""Substitute suite parameters into the provided expectations and sort by column."""
NO_COLUMN = "_nocolumn" # just used to group expectations that don't specify a column
columns: dict[str, list[ExpectationConfiguration]] = {}
for expectation in expectation_configurations:
expectation.process_suite_parameters(
suite_parameters=suite_parameters,
interactive_evaluation=self.interactive_evaluation,
data_context=self._data_context,
)
if "column" in expectation.kwargs and isinstance(expectation.kwargs["column"], str):
column = expectation.kwargs["column"]
else:
column = NO_COLUMN
if column not in columns:
columns[column] = []
columns[column].append(expectation)
expectations_to_evaluate = []
for col in columns.values():
expectations_to_evaluate.extend(col)
return expectations_to_evaluate
def get_suite_parameter(self, parameter_name, default_value=None):
"""
Get an suite parameter value that has been stored in meta.
Args:
parameter_name (string): The name of the parameter to store.
default_value (any): The default value to be returned if the parameter is not found.
Returns:
The current value of the suite parameter.
"""
if parameter_name in self._expectation_suite.suite_parameters:
return self._expectation_suite.suite_parameters[parameter_name]
else:
return default_value
def set_suite_parameter(self, parameter_name, parameter_value) -> None:
"""
Provide a value to be stored in the data_asset suite_parameters object and used to evaluate
parameterized expectations.
Args:
parameter_name (string): The name of the kwarg to be replaced at evaluation time
parameter_value (any): The value to be used
"""
self._expectation_suite.suite_parameters.update(
{parameter_name: convert_to_json_serializable(parameter_value)}
)
def test_expectation_function(self, function: Callable, *args, **kwargs) -> Callable:
"""Test a generic expectation function
Args:
function (func): The function to be tested. (Must be a valid expectation function.)
*args : Positional arguments to be passed the function
**kwargs : Keyword arguments to be passed the function
Returns:
A JSON-serializable expectation result object.
Notes:
This function is a thin layer to allow quick testing of new expectation functions, without having to \
define custom classes, etc. To use developed expectations from the command-line tool, you will still need \
to define custom classes, etc.
Check out :ref:`how_to_guides__creating_and_editing_expectations__how_to_create_custom_expectations` for
more information.
""" # noqa: E501 # FIXME CoP
# noinspection SpellCheckingInspection
argspec = inspect.getfullargspec(function)[0][1:]
new_function = self.expectation(argspec)(function)
return new_function(self, *args, **kwargs)
@staticmethod
def _parse_validation_graph(
validation_graph: ValidationGraph,
metrics: _MetricsDict,
) -> Tuple[Set[MetricConfiguration], Set[MetricConfiguration]]:
"""Given validation graph, returns the ready and needed metrics necessary for validation using a traversal of
validation graph (a graph structure of metric ids) edges""" # noqa: E501 # FIXME CoP
unmet_dependency_ids = set()
unmet_dependency = set()
maybe_ready_ids = set()
maybe_ready = set()
for edge in validation_graph.edges:
if edge.left.id not in metrics:
if edge.right is None or edge.right.id in metrics:
if edge.left.id not in maybe_ready_ids:
maybe_ready_ids.add(edge.left.id)
maybe_ready.add(edge.left)
else: # noqa: PLR5501 # FIXME CoP
if edge.left.id not in unmet_dependency_ids:
unmet_dependency_ids.add(edge.left.id)
unmet_dependency.add(edge.left)
return maybe_ready - unmet_dependency, unmet_dependency
def _initialize_expectations(
self,
expectation_suite: Optional[ExpectationSuite] = None,
expectation_suite_name: Optional[str] = None,
) -> None:
"""Instantiates `_expectation_suite` as empty by default or with a specified expectation `config`.
In addition, this always sets the `default_expectation_args` to:
`include_config`: False,
`catch_exceptions`: False,
`output_format`: 'BASIC'
By default, initializes data_asset_type to the name of the implementing class, but subclasses
that have interoperable semantics (e.g. Dataset) may override that parameter to clarify their
interoperability.
Args:
expectation_suite (json): \
A json-serializable expectation config. \
If None, creates default `_expectation_suite` with an empty list of expectations and \
key value `data_asset_name` as `data_asset_name`.
expectation_suite_name (string): \
The name to assign to the `expectation_suite.name`
Returns:
None
""" # noqa: E501 # FIXME CoP
# Checking type of expectation_suite.
# Check for expectation_suite_name is already done by ExpectationSuiteIdentifier
if expectation_suite and not isinstance(expectation_suite, ExpectationSuite):
raise TypeError( # noqa: TRY003 # FIXME CoP
f"expectation_suite must be of type ExpectationSuite, not {type(expectation_suite)}"
)
if expectation_suite is not None:
if isinstance(expectation_suite, dict):
expectation_suite_dict: dict = expectationSuiteSchema.load(expectation_suite)
expectation_suite = ExpectationSuite(**expectation_suite_dict)
else:
expectation_suite = copy.deepcopy(expectation_suite)
self._expectation_suite: ExpectationSuite = expectation_suite
if expectation_suite_name is not None:
if self._expectation_suite.name != expectation_suite_name:
logger.warning(
f"Overriding existing expectation_suite_name {self._expectation_suite.name} with new name {expectation_suite_name}" # noqa: E501 # FIXME CoP
)
self._expectation_suite.name = expectation_suite_name
else:
if expectation_suite_name is None:
expectation_suite_name = "default"
self._expectation_suite = ExpectationSuite(name=expectation_suite_name)
def _get_runtime_configuration(
self,
catch_exceptions: Optional[bool] = None,
result_format: Optional[Union[dict, str]] = None,
) -> dict:
runtime_configuration = copy.deepcopy(self.default_expectation_args)
if catch_exceptions is not None:
runtime_configuration.update({"catch_exceptions": catch_exceptions})
if (
self.default_expectation_args["result_format"]
== Validator.DEFAULT_RUNTIME_CONFIGURATION["result_format"]
):
if result_format is None:
runtime_configuration.pop("result_format")
else:
runtime_configuration.update({"result_format": result_format})
else: # noqa: PLR5501 # FIXME CoP
if result_format is not None:
runtime_configuration.update({"result_format": result_format})
return runtime_configuration
def convert_to_checkpoint_validations_list(
self,
) -> list[CheckpointValidationDefinition]:
"""
Generates a list of validations to be used in the construction of a Checkpoint.
Returns:
A list of CheckpointValidationDefinitions (one for each batch in the Validator).
"""
validations = []
for batch in self.batch_cache.values():
validation = CheckpointValidationDefinition(
expectation_suite_name=self.expectation_suite_name,
expectation_suite_id=self.expectation_suite.id,
batch_request=batch.batch_request,
)
validations.append(validation)
return validations
| Validator |
python | django__django | tests/modeladmin/models.py | {
"start": 983,
"end": 1708
} | class ____(models.Model):
name = models.CharField(max_length=100)
slug = models.SlugField()
users = models.ManyToManyField(User)
state = models.CharField(
max_length=2, choices=(("CO", "Colorado"), ("WA", "Washington"))
)
is_active = models.BooleanField(default=False)
pub_date = models.DateTimeField()
band = models.ForeignKey(Band, models.CASCADE)
best_friend = models.OneToOneField(User, models.CASCADE, related_name="best_friend")
# This field is intentionally 2 characters long (#16080).
no = models.IntegerField(verbose_name="Number", blank=True, null=True)
def decade_published_in(self):
return self.pub_date.strftime("%Y")[:3] + "0's"
| ValidationTestModel |
python | getsentry__sentry | tests/sentry/utils/sdk_crashes/test_sdk_crash_detection_cocoa.py | {
"start": 28572,
"end": 28874
} | class ____(
TestCase, CococaSDKFilenameTestMixin, CococaSDKFramesTestMixin, CococaSDKFunctionTestMixin
):
def create_event(self, data, project_id, assert_no_errors=True):
return self.store_event(data=data, project_id=project_id, assert_no_errors=assert_no_errors)
| SDKCrashDetectionCocoaTest |
python | jmcnamara__XlsxWriter | xlsxwriter/test/comparison/test_cond_format10.py | {
"start": 315,
"end": 1285
} | class ____(ExcelComparisonTest):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.set_filename("cond_format10.xlsx")
def test_create_file(self):
"""Test the creation of a simple XlsxWriter file with conditional formatting."""
workbook = Workbook(self.got_filename)
worksheet = workbook.add_worksheet()
format1 = workbook.add_format({"bold": 1, "italic": 1})
worksheet.write("A1", "Hello", format1)
worksheet.write("B3", 10)
worksheet.write("B4", 20)
worksheet.write("B5", 30)
worksheet.write("B6", 40)
worksheet.conditional_format(
"B3:B6",
{
"type": "cell",
"format": format1,
"criteria": "greater than",
"value": 20,
},
)
workbook.close()
self.assertExcelEqual()
| TestCompareXLSXFiles |
python | Pylons__pyramid | src/pyramid/exceptions.py | {
"start": 2950,
"end": 3513
} | class ____(ConfigurationError):
"""Raised when a configuration conflict is detected during action
processing"""
def __init__(self, conflicts):
self._conflicts = conflicts
def __str__(self):
r = ["Conflicting configuration actions"]
for discriminator, infos in self._conflicts.items():
r.append(f" For: {discriminator}")
for info in infos:
for line in str(info).rstrip().split('\n'):
r.append(" " + line)
return '\n'.join(r)
| ConfigurationConflictError |
python | numba__numba | numba/core/ssa.py | {
"start": 8714,
"end": 8860
} | class ____:
def __init__(self):
raise NotImplementedError("Not intended for instantiation")
target = ir.UNDEFINED
| UndefinedVariable |
python | pandas-dev__pandas | pandas/tests/tseries/offsets/test_custom_business_hour.py | {
"start": 999,
"end": 12217
} | class ____:
def test_constructor_errors(self):
msg = "time data must be specified only with hour and minute"
with pytest.raises(ValueError, match=msg):
CustomBusinessHour(start=dt_time(11, 0, 5))
msg = "time data must match '%H:%M' format"
with pytest.raises(ValueError, match=msg):
CustomBusinessHour(start="AAA")
msg = "time data must match '%H:%M' format"
with pytest.raises(ValueError, match=msg):
CustomBusinessHour(start="14:00:05")
def test_different_normalize_equals(self, _offset):
# GH#21404 changed __eq__ to return False when `normalize` does not match
offset = _offset()
offset2 = _offset(normalize=True)
assert offset != offset2
def test_repr(self, offset1, offset2):
assert repr(offset1) == "<CustomBusinessHour: cbh=09:00-17:00>"
assert repr(offset2) == "<CustomBusinessHour: cbh=09:00-17:00>"
def test_with_offset(self, dt):
expected = Timestamp("2014-07-01 13:00")
assert dt + CustomBusinessHour() * 3 == expected
assert dt + CustomBusinessHour(n=3) == expected
def test_eq(self, offset1, offset2):
for offset in [offset1, offset2]:
assert offset == offset
assert CustomBusinessHour() != CustomBusinessHour(-1)
assert CustomBusinessHour(start="09:00") == CustomBusinessHour()
assert CustomBusinessHour(start="09:00") != CustomBusinessHour(start="09:01")
assert CustomBusinessHour(start="09:00", end="17:00") != CustomBusinessHour(
start="17:00", end="09:01"
)
assert CustomBusinessHour(weekmask="Tue Wed Thu Fri") != CustomBusinessHour(
weekmask="Mon Tue Wed Thu Fri"
)
assert CustomBusinessHour(holidays=["2014-06-27"]) != CustomBusinessHour(
holidays=["2014-06-28"]
)
def test_hash(self, offset1, offset2):
assert hash(offset1) == hash(offset1)
assert hash(offset2) == hash(offset2)
def test_add_dateime(self, dt, offset1, offset2):
assert offset1 + dt == datetime(2014, 7, 1, 11)
assert offset2 + dt == datetime(2014, 7, 1, 11)
def testRollback1(self, dt, offset1, offset2):
assert offset1.rollback(dt) == dt
assert offset2.rollback(dt) == dt
d = datetime(2014, 7, 1, 0)
# 2014/07/01 is Tuesday, 06/30 is Monday(holiday)
assert offset1.rollback(d) == datetime(2014, 6, 27, 17)
# 2014/6/30 and 2014/6/27 are holidays
assert offset2.rollback(d) == datetime(2014, 6, 26, 17)
def testRollback2(self, _offset):
assert _offset(-3).rollback(datetime(2014, 7, 5, 15, 0)) == datetime(
2014, 7, 4, 17, 0
)
def testRollforward1(self, dt, offset1, offset2):
assert offset1.rollforward(dt) == dt
assert offset2.rollforward(dt) == dt
d = datetime(2014, 7, 1, 0)
assert offset1.rollforward(d) == datetime(2014, 7, 1, 9)
assert offset2.rollforward(d) == datetime(2014, 7, 1, 9)
def testRollforward2(self, _offset):
assert _offset(-3).rollforward(datetime(2014, 7, 5, 16, 0)) == datetime(
2014, 7, 7, 9
)
def test_roll_date_object(self):
offset = BusinessHour()
dt = datetime(2014, 7, 6, 15, 0)
result = offset.rollback(dt)
assert result == datetime(2014, 7, 4, 17)
result = offset.rollforward(dt)
assert result == datetime(2014, 7, 7, 9)
normalize_cases = [
(
CustomBusinessHour(normalize=True, holidays=holidays),
{
datetime(2014, 7, 1, 8): datetime(2014, 7, 1),
datetime(2014, 7, 1, 17): datetime(2014, 7, 3),
datetime(2014, 7, 1, 16): datetime(2014, 7, 3),
datetime(2014, 7, 1, 23): datetime(2014, 7, 3),
datetime(2014, 7, 1, 0): datetime(2014, 7, 1),
datetime(2014, 7, 4, 15): datetime(2014, 7, 4),
datetime(2014, 7, 4, 15, 59): datetime(2014, 7, 4),
datetime(2014, 7, 4, 16, 30): datetime(2014, 7, 7),
datetime(2014, 7, 5, 23): datetime(2014, 7, 7),
datetime(2014, 7, 6, 10): datetime(2014, 7, 7),
},
),
(
CustomBusinessHour(-1, normalize=True, holidays=holidays),
{
datetime(2014, 7, 1, 8): datetime(2014, 6, 26),
datetime(2014, 7, 1, 17): datetime(2014, 7, 1),
datetime(2014, 7, 1, 16): datetime(2014, 7, 1),
datetime(2014, 7, 1, 10): datetime(2014, 6, 26),
datetime(2014, 7, 1, 0): datetime(2014, 6, 26),
datetime(2014, 7, 7, 10): datetime(2014, 7, 4),
datetime(2014, 7, 7, 10, 1): datetime(2014, 7, 7),
datetime(2014, 7, 5, 23): datetime(2014, 7, 4),
datetime(2014, 7, 6, 10): datetime(2014, 7, 4),
},
),
(
CustomBusinessHour(
1, normalize=True, start="17:00", end="04:00", holidays=holidays
),
{
datetime(2014, 7, 1, 8): datetime(2014, 7, 1),
datetime(2014, 7, 1, 17): datetime(2014, 7, 1),
datetime(2014, 7, 1, 23): datetime(2014, 7, 2),
datetime(2014, 7, 2, 2): datetime(2014, 7, 2),
datetime(2014, 7, 2, 3): datetime(2014, 7, 3),
datetime(2014, 7, 4, 23): datetime(2014, 7, 5),
datetime(2014, 7, 5, 2): datetime(2014, 7, 5),
datetime(2014, 7, 7, 2): datetime(2014, 7, 7),
datetime(2014, 7, 7, 17): datetime(2014, 7, 7),
},
),
]
@pytest.mark.parametrize("norm_cases", normalize_cases)
def test_normalize(self, norm_cases):
offset, cases = norm_cases
for dt, expected in cases.items():
assert offset._apply(dt) == expected
@pytest.mark.parametrize(
"dt, expected",
[
[datetime(2014, 7, 1, 9), False],
[datetime(2014, 7, 1, 10), True],
[datetime(2014, 7, 1, 15), True],
[datetime(2014, 7, 1, 15, 1), False],
[datetime(2014, 7, 5, 12), False],
[datetime(2014, 7, 6, 12), False],
],
)
def test_is_on_offset(self, dt, expected):
offset = CustomBusinessHour(start="10:00", end="15:00", holidays=holidays)
assert offset.is_on_offset(dt) == expected
apply_cases = [
(
CustomBusinessHour(holidays=holidays),
{
datetime(2014, 7, 1, 11): datetime(2014, 7, 1, 12),
datetime(2014, 7, 1, 13): datetime(2014, 7, 1, 14),
datetime(2014, 7, 1, 15): datetime(2014, 7, 1, 16),
datetime(2014, 7, 1, 19): datetime(2014, 7, 3, 10),
datetime(2014, 7, 1, 16): datetime(2014, 7, 3, 9),
datetime(2014, 7, 1, 16, 30, 15): datetime(2014, 7, 3, 9, 30, 15),
datetime(2014, 7, 1, 17): datetime(2014, 7, 3, 10),
datetime(2014, 7, 2, 11): datetime(2014, 7, 3, 10),
# out of business hours
datetime(2014, 7, 2, 8): datetime(2014, 7, 3, 10),
datetime(2014, 7, 2, 19): datetime(2014, 7, 3, 10),
datetime(2014, 7, 2, 23): datetime(2014, 7, 3, 10),
datetime(2014, 7, 3, 0): datetime(2014, 7, 3, 10),
# saturday
datetime(2014, 7, 5, 15): datetime(2014, 7, 7, 10),
datetime(2014, 7, 4, 17): datetime(2014, 7, 7, 10),
datetime(2014, 7, 4, 16, 30): datetime(2014, 7, 7, 9, 30),
datetime(2014, 7, 4, 16, 30, 30): datetime(2014, 7, 7, 9, 30, 30),
},
),
(
CustomBusinessHour(4, holidays=holidays),
{
datetime(2014, 7, 1, 11): datetime(2014, 7, 1, 15),
datetime(2014, 7, 1, 13): datetime(2014, 7, 3, 9),
datetime(2014, 7, 1, 15): datetime(2014, 7, 3, 11),
datetime(2014, 7, 1, 16): datetime(2014, 7, 3, 12),
datetime(2014, 7, 1, 17): datetime(2014, 7, 3, 13),
datetime(2014, 7, 2, 11): datetime(2014, 7, 3, 13),
datetime(2014, 7, 2, 8): datetime(2014, 7, 3, 13),
datetime(2014, 7, 2, 19): datetime(2014, 7, 3, 13),
datetime(2014, 7, 2, 23): datetime(2014, 7, 3, 13),
datetime(2014, 7, 3, 0): datetime(2014, 7, 3, 13),
datetime(2014, 7, 5, 15): datetime(2014, 7, 7, 13),
datetime(2014, 7, 4, 17): datetime(2014, 7, 7, 13),
datetime(2014, 7, 4, 16, 30): datetime(2014, 7, 7, 12, 30),
datetime(2014, 7, 4, 16, 30, 30): datetime(2014, 7, 7, 12, 30, 30),
},
),
]
@pytest.mark.parametrize("apply_case", apply_cases)
def test_apply(self, apply_case):
offset, cases = apply_case
for base, expected in cases.items():
assert_offset_equal(offset, base, expected)
nano_cases = [
(
CustomBusinessHour(holidays=holidays),
{
Timestamp("2014-07-01 15:00") + Nano(5): Timestamp("2014-07-01 16:00")
+ Nano(5),
Timestamp("2014-07-01 16:00") + Nano(5): Timestamp("2014-07-03 09:00")
+ Nano(5),
Timestamp("2014-07-01 16:00") - Nano(5): Timestamp("2014-07-01 17:00")
- Nano(5),
},
),
(
CustomBusinessHour(-1, holidays=holidays),
{
Timestamp("2014-07-01 15:00") + Nano(5): Timestamp("2014-07-01 14:00")
+ Nano(5),
Timestamp("2014-07-01 10:00") + Nano(5): Timestamp("2014-07-01 09:00")
+ Nano(5),
Timestamp("2014-07-01 10:00") - Nano(5): Timestamp("2014-06-26 17:00")
- Nano(5),
},
),
]
@pytest.mark.parametrize("nano_case", nano_cases)
def test_apply_nanoseconds(self, nano_case):
offset, cases = nano_case
for base, expected in cases.items():
assert_offset_equal(offset, base, expected)
def test_us_federal_holiday_with_datetime(self):
# GH 16867
bhour_us = CustomBusinessHour(calendar=USFederalHolidayCalendar())
t0 = datetime(2014, 1, 17, 15)
result = t0 + bhour_us * 8
expected = Timestamp("2014-01-21 15:00:00")
assert result == expected
@pytest.mark.parametrize(
"weekmask, expected_time, mult",
[
["Mon Tue Wed Thu Fri Sat", "2018-11-10 09:00:00", 10],
["Tue Wed Thu Fri Sat", "2018-11-13 08:00:00", 18],
],
)
def test_custom_businesshour_weekmask_and_holidays(weekmask, expected_time, mult):
# GH 23542
holidays = ["2018-11-09"]
bh = CustomBusinessHour(
start="08:00", end="17:00", weekmask=weekmask, holidays=holidays
)
result = Timestamp("2018-11-08 08:00") + mult * bh
expected = Timestamp(expected_time)
assert result == expected
| TestCustomBusinessHour |
python | ray-project__ray | python/ray/llm/_internal/serve/core/configs/openai_api_models.py | {
"start": 2522,
"end": 2673
} | class ____(vLLMCompletionStreamResponse):
model_config = ConfigDict(arbitrary_types_allowed=True)
# TODO (Kourosh): Upstream
| CompletionStreamResponse |
python | pypa__warehouse | warehouse/oidc/models/gitlab.py | {
"start": 12155,
"end": 15525
} | class ____(GitLabPublisherMixin, OIDCPublisher):
__tablename__ = "gitlab_oidc_publishers"
__mapper_args__ = {"polymorphic_identity": "gitlab_oidc_publishers"}
__table_args__ = (
UniqueConstraint(
"namespace",
"project",
"workflow_filepath",
"environment",
name="_gitlab_oidc_publisher_uc",
),
)
id: Mapped[UUID] = mapped_column(
PG_UUID(as_uuid=True), ForeignKey(OIDCPublisher.id), primary_key=True
)
def verify_url(self, url: str) -> bool:
"""
Verify a given URL against this GitLab's publisher information
In addition to the generic Trusted Publisher verification logic in
the parent class, the GitLab Trusted Publisher ignores the suffix `.git`
in repo URLs, since `gitlab.com/org/repo.git` always redirects to
`gitlab.com/org/repo`. This does not apply to subpaths like
`gitlab.com/org/repo.git/issues`, which do not redirect to the correct URL.
GitLab uses case-insensitive owner/repo slugs - so we perform a case-insensitive
comparison.
In addition to the generic Trusted Publisher verification logic in
the parent class, the GitLab Trusted Publisher allows URLs hosted
on `gitlab.io` for the configured repository, i.e:
`https://${OWNER}.gitlab.io/${SUBGROUP}/${PROJECT}`.
This method does not support the verification when the Unique Domain setting is
used.
The rules implemented in this method are derived from
https://docs.gitlab.com/ee/user/project/pages/getting_started_part_one.html#project-website-examples
https://docs.gitlab.com/ee/user/project/pages/getting_started_part_one.html#user-and-group-website-examples
The table stems from GitLab documentation and is replicated here for clarity.
| Namespace | GitLab Page URL |
| ---------------------------- | ---------------------------------------- |
| username/username.example.io | https://username.gitlab.io |
| acmecorp/acmecorp.example.io | https://acmecorp.gitlab.io |
| username/my-website | https://username.gitlab.io/my-website |
| group/webshop | https://group.gitlab.io/webshop |
| group/subgroup/project | https://group.gitlab.io/subgroup/project |
"""
lowercase_base_url = self.publisher_base_url.lower()
if url.lower().startswith(lowercase_base_url):
url = lowercase_base_url + url[len(lowercase_base_url) :]
url_for_generic_check = url.removesuffix("/").removesuffix(".git")
if verify_url_from_reference(
reference_url=lowercase_base_url, url=url_for_generic_check
):
return True
try:
owner, subgroup = self.namespace.split("/", maxsplit=1)
subgroup += "/"
except ValueError:
owner, subgroup = self.namespace, ""
if self.project == f"{owner}.gitlab.io" and not subgroup:
docs_url = f"https://{owner}.gitlab.io"
else:
docs_url = f"https://{owner}.gitlab.io/{subgroup}{self.project}"
return verify_url_from_reference(reference_url=docs_url, url=url)
| GitLabPublisher |
python | django-extensions__django-extensions | tests/testapp/models.py | {
"start": 2300,
"end": 2432
} | class ____(ActivatorModel):
title = models.CharField(max_length=255)
class Meta:
app_label = "django_extensions"
| Post |
python | ray-project__ray | python/ray/serve/_private/logging_utils.py | {
"start": 3895,
"end": 7641
} | class ____(TextFormatter):
"""Serve Logging Formatter
The formatter will generate the log format on the fly based on the field of record.
Optimized to pre-compute format strings and formatters for better performance.
"""
COMPONENT_LOG_FMT = f"%({SERVE_LOG_LEVEL_NAME})s %({SERVE_LOG_TIME})s {{{SERVE_LOG_COMPONENT}}} {{{SERVE_LOG_COMPONENT_ID}}} " # noqa:E501
def __init__(
self,
component_name: str,
component_id: str,
fmt: Optional[str] = None,
datefmt: Optional[str] = None,
style: str = "%",
validate: bool = True,
):
super().__init__(fmt, datefmt, style, validate)
self.component_log_fmt = ServeFormatter.COMPONENT_LOG_FMT.format(
component_name=component_name, component_id=component_id
)
# Pre-compute format strings and formatters for performance
self._precompute_formatters()
def set_additional_log_standard_attrs(self, *args, **kwargs):
super().set_additional_log_standard_attrs(*args, **kwargs)
self._precompute_formatters()
def _precompute_formatters(self):
self.base_formatter = self._create_formatter([])
self.request_formatter = self._create_formatter(
[SERVE_LOG_RECORD_FORMAT[SERVE_LOG_REQUEST_ID]]
)
def _create_formatter(self, initial_attrs: list) -> logging.Formatter:
attrs = initial_attrs.copy()
attrs.extend([f"%({k})s" for k in self.additional_log_standard_attrs])
attrs.append(SERVE_LOG_RECORD_FORMAT[SERVE_LOG_MESSAGE])
format_string = self.component_log_fmt + " ".join(attrs)
return logging.Formatter(format_string)
def format(self, record: logging.LogRecord) -> str:
"""Format the log record into the format string.
Args:
record: The log record to be formatted.
Returns:
The formatted log record in string format.
"""
# Use pre-computed formatters for better performance
if SERVE_LOG_REQUEST_ID in record.__dict__:
return self.request_formatter.format(record)
else:
return self.base_formatter.format(record)
def access_log_msg(*, method: str, route: str, status: str, latency_ms: float):
"""Returns a formatted message for an HTTP or ServeHandle access log."""
return f"{method} {route} {status} {latency_ms:.1f}ms"
def log_to_stderr_filter(record: logging.LogRecord) -> bool:
"""Filters log records based on a parameter in the `extra` dictionary."""
if not hasattr(record, "log_to_stderr") or record.log_to_stderr is None:
return True
return record.log_to_stderr
def log_access_log_filter(record: logging.LogRecord) -> bool:
"""Filters ray serve access log based on 'serve_access_log' key in `extra` dict."""
if not hasattr(record, "serve_access_log") or record.serve_access_log is None:
return True
return not record.serve_access_log
def get_component_logger_file_path() -> Optional[str]:
"""Returns the relative file path for the Serve logger, if it exists.
If a logger was configured through configure_component_logger() for the Serve
component that's calling this function, this returns the location of the log file
relative to the ray logs directory.
"""
logger = logging.getLogger(SERVE_LOGGER_NAME)
for handler in logger.handlers:
if isinstance(handler, logging.handlers.MemoryHandler):
absolute_path = handler.target.baseFilename
ray_logs_dir = ray._private.worker._global_node.get_logs_dir_path()
if absolute_path.startswith(ray_logs_dir):
return absolute_path[len(ray_logs_dir) :]
| ServeFormatter |
python | pytorch__pytorch | test/dynamo/test_aot_autograd.py | {
"start": 1318,
"end": 62835
} | class ____(torch._inductor.test_case.TestCase):
def test_LSTM(self):
# https://github.com/pytorch/torchdynamo/issues/1147
class Repro(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.self_mod_model_lstm_lstm = torch.nn.LSTM(
64, 64, num_layers=2, bidirectional=True
)
def forward(self, permute: torch.Tensor):
self_mod_model_lstm_lstm = self.self_mod_model_lstm_lstm(permute)
return (self_mod_model_lstm_lstm,)
mod = Repro()
aot_mod = torch.compile(mod, backend="aot_eager")
args = [((92, 4, 64), (1, 5888, 92), torch.float32, "cpu", False)]
args = [
rand_strided(sh, st, dt, dev).requires_grad_(rg)
for (sh, st, dt, dev, rg) in args
]
eager_result = mod(*args)
aot_result = aot_mod(*args)
self.assertTrue(torch._dynamo.testing.same(eager_result, aot_result))
def test_mutation(self):
# https://github.com/pytorch/torchdynamo/issues/1301
def fn(param, y):
prev_grad = torch.is_grad_enabled()
try:
torch.set_grad_enabled(False)
param.add_(y)
finally:
torch.set_grad_enabled(prev_grad)
return y
y = torch.randn(4)
x = torch.nn.Parameter(torch.randn(4))
aot_fn = torch.compile(fn, backend="aot_eager")
# This should not error: we mutated an autograd leaf under no_grad mode.
aot_fn(x, y)
def test_mutation1(self):
def fn(_stack0: torch.Tensor, diagonal_chunked_attention_scores: torch.Tensor):
getitem = diagonal_chunked_attention_scores[
(
slice(None, None, None),
slice(None, None, None),
slice(None, 256, None),
slice(None, 257, None),
)
]
_stack0[
(
slice(None, None, None),
slice(None, -1, None),
slice(None, None, None),
slice(256, None, None),
)
] = getitem
view = _stack0.view(1, 12, 1024, 513)
return (view,)
x = torch.randn(torch.Size([12, 4, 256, 513]))
y = torch.randn(torch.Size([12, 3, 512, 513]))
aot_fn = torch.compile(fn, backend="aot_eager")
aot_fn(x, y)
def test_negative_testing_mutation(self):
def fn(_stack0: torch.Tensor, diagonal_chunked_attention_scores: torch.Tensor):
getitem = diagonal_chunked_attention_scores[
(
slice(None, None, None),
slice(None, None, None),
slice(None, 256, None),
slice(None, 257, None),
)
]
_stack0 = torch.sin(_stack0)
_stack0[
(
slice(None, None, None),
slice(None, -1, None),
slice(None, None, None),
slice(256, None, None),
)
] = getitem
view = _stack0.view(1, 12, 1024, 513)
return (view,)
x = torch.randn(torch.Size([12, 4, 256, 513]))
y = torch.randn(torch.Size([12, 3, 512, 513]))
aot_fn = torch.compile(fn, backend="aot_eager")
aot_fn(x, y)
def test_negative_testing(self):
def fn(x, y):
return torch.sin(x).add_(y)
y = torch.randn(4)
x = torch.randn(4)
aot_fn = torch.compile(fn, backend="aot_eager")
aot_fn(x, y)
def test_call_fn_with_non_const_inputs_aot_safe(self):
class ModuleSpecialFwd(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.conv = torch.nn.Conv2d(
in_channels=3, out_channels=20, kernel_size=(5, 5)
)
def _conv_forward(self, x):
return self.conv._conv_forward(x, self.conv.weight, self.conv.bias)
def forward(self, x):
return self._conv_forward(x)
# Init mod
mod = ModuleSpecialFwd()
rx = torch.randn([3, 10, 10])
# Run it for real
real = mod(rx)
# Run it in export
graph, _ = torch._dynamo.export(mod)(rx)
# Run exported graph with AOT
self.assertTrue(torch._dynamo.testing.same(real, graph(rx)))
aot_fn = torch.compile(graph, backend="aot_eager")
aot_fn(rx)
def test_call_fn_with_non_const_inputs_aot_unsafe(self):
class ModuleSpecialFwd(torch.nn.Module):
def _some_bad_fwd(self, param, y):
prev_grad = torch.is_grad_enabled()
try:
torch.set_grad_enabled(False)
param.add_(y)
finally:
torch.set_grad_enabled(prev_grad)
return y
def forward(self, x, y):
return self._some_bad_fwd(x, y)
# Init mod
mod = ModuleSpecialFwd()
x = torch.nn.Parameter(torch.randn(4))
y = torch.randn([4])
# Run it for real
real = mod(x, y)
# Run it in export
graph, _ = torch._dynamo.export(mod)(x, y)
# Assert equal
self.assertTrue(torch._dynamo.testing.same(real, graph(x, y)))
# Run exported graph with AOT
aot_fn = torch.compile(graph, backend="aot_eager")
# This should not error: we mutated an autograd leaf under no_grad mode.
aot_fn(x, y)
def test_call_fn_with_non_const_inputs_aot_unsafe_control_flow(self):
class ModuleSpecialFwd(torch.nn.Module):
def _some_bad_fwd(self, param, y):
if y[0][0] < 3:
return y + param
return param * y
def forward(self, x, y):
a = x * y
a = self._some_bad_fwd(a, a)
b = x + y
return a * b
# Init mod
mod = ModuleSpecialFwd()
x = torch.nn.Parameter(torch.randn([2, 2]))
y = torch.randn([2, 2])
# Run it for real
real = mod(x, y)
# Run it through optimize, with our capturing fn
gms = []
counter = CompileCounter()
def capturing_fn(gm, inputs):
nonlocal gms
gms.append(gm)
return counter(gm, inputs)
optimized_mod = torch.compile(mod, backend=capturing_fn)
# Assert equal
self.assertTrue(torch._dynamo.testing.same(real, optimized_mod(x, y)))
# Uncomment to reproduce commented out graphs below.
# for gm in gms:
# print("GM CODE", gm.code)
self.assertEqual(counter.frame_count, 4)
self.assertEqual(counter.op_count, 7)
# Graph 1
# def forward(self, x : torch.nn.parameter.Parameter, y : torch.Tensor):
# mul = x * y; x = y = None
# return (mul,)
# BREAK
# Graph 2
# def forward(self, y : torch.Tensor):
# getitem = y[0]; y = None
# getitem_1 = getitem[0]; getitem = None
# lt = getitem_1 < 3; getitem_1 = None
# return (lt,)
# BREAK
# Graph 3
# def forward(self, param : torch.Tensor, y : torch.Tensor):
# add = y + param; y = param = None
# return (add,)
# BREAK
# Graph 4
# def forward(self, _stack0 : torch.Tensor, x : torch.nn.parameter.Parameter, y : torch.Tensor):
# add = x + y; x = y = None
# mul = _stack0 * add; _stack0 = add = None
# return (mul,)
# Run fn with AOT
torch._dynamo.reset()
aot_fn = torch.compile(optimized_mod, backend="aot_eager")
aot_fn(x, y)
# Note: Dynamo recompilation guarding invalid grad
#
# This test is a spiritual equivalent to test_invalid_requires_grad_fake in test_autodispatch.py
# The point of this test is to invoke aot_autograd in a way that would normally trigger an assertion
# (This is what test_invalid_requires_grad_fake) does. However, the point of this test is to prove
# that we do not hit this assertion, as dynamo recompiles correctly and protects this condition.
#
# Subnote: The reason for us having test_invalid_requires_grad_fake utilizing fake tensors
# is because dynamo sends fake tensors down to aot_autograd.
@patch("torch._functorch.config.debug_assert", True)
def test_requires_grad_fake_via_dynamo_recompiles(self):
class F(torch.nn.Module):
def forward(self, x, y):
return (x + y,)
x = torch.randn(3, 3, requires_grad=True)
y = torch.randn(3, 3, requires_grad=True)
z = torch.randn(3, 3, requires_grad=False)
cc = torch._dynamo.testing.CompileCounterWithBackend("aot_eager")
failure_reason = None
def guard_fail_fn(failure):
nonlocal failure_reason
failure_reason = failure[0]
fxy = torch._dynamo.optimize(cc, guard_fail_fn=guard_fail_fn)(F())
compare_equal_outs_and_grads(self, F(), fxy, (x, y))
compare_equal_outs_and_grads(self, F(), fxy, (x, z))
self.assertIn(
"""tensor 'y' requires_grad mismatch. expected requires_grad=1""",
failure_reason,
)
# Reset failure reason
failure_reason = None
self.assertEqual(cc.frame_count, 2)
torch._dynamo.reset() # for new backend
cc = torch._dynamo.testing.CompileCounterWithBackend("aot_eager")
fxz = torch._dynamo.optimize(cc, guard_fail_fn=guard_fail_fn)(F())
compare_equal_outs_and_grads(self, F(), fxz, (x, z))
compare_equal_outs_and_grads(self, F(), fxz, (x, z))
self.assertEqual(cc.frame_count, 1)
self.assertTrue(failure_reason is None)
def test_double_backward_errors(self):
# Remove this test after we get double backward to actually work
for grad_output in (torch.tensor(1.0, requires_grad=True), None):
x = torch.tensor(1.0, requires_grad=True)
err = "torch.compile with aot_autograd does not currently support double backward"
# The following cases should be equivalent:
# (1) double backward entirely inside compiled function
def f1(x):
y = x.sin().exp()
(gx,) = torch.autograd.grad(
y, x, create_graph=True, grad_outputs=grad_output
)
torch.autograd.grad(gx, x)
return gx
compiled_f1 = torch.compile(backend="aot_eager")(f1)
f1(x)
with self.assertRaisesRegex(RuntimeError, err):
compiled_f1(x)
# (2) the second half of double backward outside compiled function
def f2(x):
y = x.sin().exp()
(gx,) = torch.autograd.grad(
y, x, create_graph=True, grad_outputs=grad_output
)
return gx
compiled_f2 = torch.compile(backend="aot_eager")(f2)
gx = compiled_f2(x)
with self.assertRaisesRegex(RuntimeError, err):
torch.autograd.grad(gx, x)
# (3) double backward entirely outside compiled function
def f3(x):
y = x.sin().exp()
return y
compiled_f3 = torch.compile(backend="aot_eager")(f3)
y = compiled_f3(x)
(gx,) = torch.autograd.grad(
y, x, create_graph=True, grad_outputs=grad_output
)
with self.assertRaisesRegex(RuntimeError, err):
torch.autograd.grad(gx, x)
# create_graph=False
def f4(x):
y = x.sin().exp()
return y
compiled_f4 = torch.compile(backend="aot_eager")(f4)
x = torch.tensor(1.0, requires_grad=True)
y = compiled_f4(x)
(gx,) = torch.autograd.grad(y, x, create_graph=False, grad_outputs=grad_output)
@patch("torch._functorch.config.debug_assert", True)
def test_arg_dupe_via_dynamo_recompiles(self):
class F(torch.nn.Module):
def forward(self, x, y):
x = x.trunc_()
y = y.trunc_()
return (x + y,)
x = torch.randn(3, 3, requires_grad=True)
x1, x2, x3, x4 = x.clone(), x.clone(), x.clone(), x.clone()
y = torch.randn(3, 3, requires_grad=True)
y1, y2, y4 = y.clone(), y.clone(), y.clone()
cc = torch._dynamo.testing.CompileCounterWithBackend("aot_eager")
failure_reason = None
def guard_fail_fn(failure):
nonlocal failure_reason
failure_reason = failure[0]
fxy = torch._dynamo.optimize(cc, guard_fail_fn=guard_fail_fn)(F())
# Note: to prevent a recompilation between the two calls,
# we need to clone x and y on each use.
# fxy mutates the input's metadata, so otherwise dynamo will end up recompiling.
fxy(x1, y1)
fxy(x2, y2)
self.assertTrue(failure_reason is None)
# Reset failure reason
failure_reason = None
self.assertEqual(cc.frame_count, 1)
torch._dynamo.reset() # for new backend
cc = torch._dynamo.testing.CompileCounterWithBackend("aot_eager")
fxx = torch._dynamo.optimize(cc, guard_fail_fn=guard_fail_fn)(F())
fxx(x3, x3)
fxx(x4, y4)
self.assertEqual(cc.frame_count, 2)
self.assertIn("""x is y""", failure_reason)
@patch("torch._functorch.config.debug_assert", True)
def test_arg_dupe_via_dynamo_recompiles_many_args_param_non_tensor_arg(self):
class F(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.mean = torch.nn.Parameter(torch.randn(3, 3))
def forward(self, a, b, e, f):
a.trunc_()
b.trunc_()
return (a + b + self.mean) * e * f
a = torch.randn(3, 3, requires_grad=True)
b = torch.randn(3, 3, requires_grad=True)
a1, a2 = a.clone(), a.clone()
_, b2 = b.clone(), b.clone()
failure_reason = None
def guard_fail_fn(failure):
nonlocal failure_reason
failure_reason = failure[0]
self.assertTrue(failure_reason is None)
cc = torch._dynamo.testing.CompileCounterWithBackend("aot_eager")
f = torch._dynamo.optimize(cc, guard_fail_fn=guard_fail_fn)(F())
f(a1, a1, 2, 2)
f(a2, b2, 2, 2)
self.assertEqual(cc.frame_count, 2)
self.assertIn(
"""a is b""",
failure_reason,
)
torch._dynamo.reset()
cc = torch._dynamo.testing.CompileCounterWithBackend("aot_eager")
c = torch.randn(3, 3, requires_grad=True)
d = torch.randn(3, 3, requires_grad=True)
c3, c4 = c.clone(), c.clone()
_, d4 = d.clone(), d.clone()
f = torch._dynamo.optimize(cc, guard_fail_fn=guard_fail_fn)(F())
f(c3, c3, 3, 3)
f(c4, d4, 3, 3)
self.assertEqual(cc.frame_count, 2)
self.assertIn("""a is b""", failure_reason)
@patch("torch._functorch.config.debug_assert", True)
def test_arg_dupe_via_dynamo_recompiles_many_with_global(self):
z = None
class F(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.mean = torch.nn.Parameter(torch.randn(3, 3))
def forward(self, a, b, e, f):
a.trunc_()
b.trunc_()
return (a + b + z + self.mean) * e * f
a = torch.randn(3, 3, requires_grad=True)
b = torch.randn(3, 3, requires_grad=True)
z = a
a1, a2 = a.clone(), a.clone()
_, b2 = b.clone(), b.clone()
failure_reason = None
def guard_fail_fn(failure):
nonlocal failure_reason
failure_reason = failure[0]
self.assertTrue(failure_reason is None)
cc = torch._dynamo.testing.CompileCounterWithBackend("aot_eager")
f = torch._dynamo.optimize(cc, guard_fail_fn=guard_fail_fn)(F())
f(a1, a1, 2, 2)
f(a2, b2, 2, 2)
self.assertEqual(cc.frame_count, 2)
self.assertIn(
"""a is b""",
failure_reason,
)
@patch("torch._functorch.config.debug_assert", True)
def test_arg_dupe_via_dynamo_recompiles_many_args_param_non_tensor_arg_list(self):
class F(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.mean = torch.nn.Parameter(torch.randn(3, 3))
def forward(self, e, f, a, b):
a.trunc_()
b.trunc_()
return (a + b + self.mean) * e[0] * f[0]
a = torch.randn(3, 3, requires_grad=True)
b = torch.randn(3, 3, requires_grad=True)
a1, a2 = a.clone(), a.clone()
_, b2 = b.clone(), b.clone()
failure_reason = None
def guard_fail_fn(failure):
nonlocal failure_reason
failure_reason = failure[0]
self.assertTrue(failure_reason is None)
cc = torch._dynamo.testing.CompileCounterWithBackend("aot_eager")
f = torch._dynamo.optimize(cc, guard_fail_fn=guard_fail_fn)(F())
f([3, 2, 1], [4, 5, 6], a1, a1)
f([3, 2, 1], [4, 5, 6], a2, b2)
self.assertEqual(cc.frame_count, 2)
self.assertIn(
"""a is b""",
failure_reason,
)
torch._dynamo.reset()
cc = torch._dynamo.testing.CompileCounterWithBackend("aot_eager")
c = torch.randn(3, 3, requires_grad=True)
d = torch.randn(3, 3, requires_grad=True)
c3, c4 = c.clone(), c.clone()
_, d4 = d.clone(), d.clone()
f = torch._dynamo.optimize(cc, guard_fail_fn=guard_fail_fn)(F())
f([3, 2, 1], [4, 5, 6], c3, c3)
f([3, 2, 1], [4, 5, 6], c4, d4)
self.assertEqual(cc.frame_count, 2)
@patch("torch._functorch.config.debug_assert", True)
def test_arg_dupe_via_dynamo_recompiles_many_args_param(self):
class F(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.mean = torch.nn.Parameter(torch.randn(3, 3))
def forward(self, a, b):
a.trunc_()
b.trunc_()
return a + b + self.mean
a = torch.randn(3, 3, requires_grad=True)
b = torch.randn(3, 3, requires_grad=True)
a1, a2 = a.clone(), a.clone()
_, b2 = b.clone(), b.clone()
failure_reason = None
def guard_fail_fn(failure):
nonlocal failure_reason
failure_reason = failure[0]
self.assertTrue(failure_reason is None)
cc = torch._dynamo.testing.CompileCounterWithBackend("aot_eager")
f = torch._dynamo.optimize(cc, guard_fail_fn=guard_fail_fn)(F())
f(a1, a1)
f(a2, b2)
self.assertEqual(cc.frame_count, 2)
self.assertIn(
"""a is b""",
failure_reason,
)
torch._dynamo.reset()
cc = torch._dynamo.testing.CompileCounterWithBackend("aot_eager")
c = torch.randn(3, 3, requires_grad=True)
d = torch.randn(3, 3, requires_grad=True)
c3, c4 = c.clone(), c.clone()
_, d4 = d.clone(), d.clone()
f = torch._dynamo.optimize(cc, guard_fail_fn=guard_fail_fn)(F())
f(c3, c3)
f(c4, d4)
self.assertEqual(cc.frame_count, 2)
self.assertIn("""a is b""", failure_reason)
@patch("torch._functorch.config.debug_assert", True)
def test_arg_dupe_via_dynamo_recompiles_many_args(self):
class F(torch.nn.Module):
def forward(self, a, b, c, d):
a.trunc_()
b.trunc_()
c.trunc_()
d.trunc_()
return (a + b + c + d,)
a = torch.randn(3, 3, requires_grad=True)
b = torch.randn(3, 3, requires_grad=True)
a1, a2, a3, a4 = a.clone(), a.clone(), a.clone(), a.clone()
_, b2, b3, b4 = b.clone(), b.clone(), b.clone(), b.clone()
failure_reason = None
def guard_fail_fn(failure):
nonlocal failure_reason
failure_reason = failure[0]
self.assertTrue(failure_reason is None)
cc = torch._dynamo.testing.CompileCounterWithBackend("aot_eager")
f = torch._dynamo.optimize(cc, guard_fail_fn=guard_fail_fn)(F())
f(a1, a1, a1, a1)
f(a2, b2, b2, b2)
self.assertEqual(cc.frame_count, 2)
self.assertIn(
"""a is b""",
failure_reason,
)
torch._dynamo.reset()
cc = torch._dynamo.testing.CompileCounterWithBackend("aot_eager")
c = torch.randn(3, 3, requires_grad=True)
d = torch.randn(3, 3, requires_grad=True)
c3, c4 = c.clone(), c.clone()
_, d4 = d.clone(), d.clone()
f = torch._dynamo.optimize(cc, guard_fail_fn=guard_fail_fn)(F())
f(a3, b3, c3, c3)
f(a4, b4, c4, d4)
self.assertEqual(cc.frame_count, 2)
self.assertIn("""c is d""", failure_reason)
def test_alias_inputs(self):
def fn():
a = torch.tensor([1])
a = a[0:1]
b = a.squeeze()
a[0] = 0
if a[0] < 1e5:
pass
a[0] = 2
return b
ref_output = fn()
aot_fn = torch.compile(fn, backend="aot_eager")
actual_output = aot_fn()
self.assertEqual(ref_output, actual_output)
def test_grad_inputs_alias_inputs(self):
class Test(torch.autograd.Function):
@staticmethod
def forward(ctx, x, y):
ctx.save_for_backward(x)
return y
@staticmethod
def backward(ctx, grad):
(x,) = ctx.saved_tensors
return x, grad
def fn(x, y):
return Test.apply(x, y)
x = torch.ones(1, requires_grad=True)
y = torch.ones(1, requires_grad=True)
compiled_fn = torch.compile(fn, backend="aot_eager")
out = compiled_fn(x, y)
out.sum().backward()
def test_joint_custom_pass(self):
is_called = False
def joint_custom_pass(joint_gm: torch.fx.GraphModule, joint_inputs):
nonlocal is_called
is_called = True
self.assertTrue(isinstance(joint_gm, torch.fx.GraphModule))
self.assertTrue(isinstance(joint_inputs, tuple))
# first input is list of primals
self.assertTrue(isinstance(joint_inputs[0], list))
# second input is list of tangents
self.assertTrue(isinstance(joint_inputs[1], list))
return joint_gm
class M(torch.nn.Module):
def forward(self, x):
return x.sin()
x = torch.randn(10, requires_grad=False)
compiled_fn = torch.compile(M(), backend="aot_eager")
with torch._functorch.config.patch("joint_custom_pass", joint_custom_pass):
_ = compiled_fn(x)
# x doesn't require grad, shouldn't trigger joint graph compiler
self.assertFalse(is_called)
y = torch.randn(10, requires_grad=True)
with torch._functorch.config.patch("joint_custom_pass", joint_custom_pass):
out = compiled_fn(y)
# y requires grad, should trigger joint graph compiler
self.assertTrue(is_called)
out.sum().backward()
@expectedFailureDynamic # https://github.com/pytorch/pytorch/issues/103539
@torch._dynamo.config.patch(automatic_dynamic_shapes=False)
@patch("torch._functorch.config.debug_assert", True)
def test_multiple_aot_autograd_calls_dupe_args(self):
# this is just dealing with the fact that
# aot_module_simplified expects submods to always return tuples/lists
class WrapperModule(torch.nn.Module):
def __init__(self, mod):
super().__init__()
self.mod = mod
def forward(self, *args):
out = self.mod(*args)
if isinstance(out, (list, tuple)):
return out
return (out,)
def compile_submod(input_mod, args):
from functorch.compile import nop
from torch._functorch.aot_autograd import aot_module_simplified
class WrapperModule(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.original = input_mod
self.submod = aot_module_simplified(input_mod, args, nop)
def forward(self, *args):
return self.submod(*args)
return WrapperModule()
def test_compile(fx_g, example_inps):
split_gm = torch.fx.passes.split_module.split_module(
fx_g, None, lambda node: 1 if "mul" in str(node) else 0
)
submod_1_inps = split_gm.submod_0(*example_inps)
split_gm.submod_0 = compile_submod(
WrapperModule(split_gm.submod_0), example_inps
)
split_gm.submod_1 = compile_submod(
WrapperModule(split_gm.submod_1), submod_1_inps
)
return split_gm
@torch.compile(backend=test_compile)
def f(a):
b, c = torch.ops.custom.maybe_dupe_op(a)
return (b.mul_(c),)
f(torch.ones(4))
f(torch.ones(6))
def test_nn_parameter_construction(self):
# https://github.com/pytorch/pytorch/issues/99569
def fn(x):
y = x.sin()
z = torch.nn.Parameter(torch.ones(1))
return y + z
x = torch.rand((4, 4))
opt_fn = torch.compile(fn, backend="aot_eager")
self.assertTrue(torch._dynamo.testing.same(fn(x), opt_fn(x)))
def test_aot_sequence_nr(self):
class Model(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.conv1 = torch.nn.Conv2d(
in_channels=16,
out_channels=16,
kernel_size=(1, 1),
stride=1,
padding="same",
bias=True,
)
self.bn1 = torch.nn.BatchNorm2d(num_features=16)
self.relu1 = torch.nn.ReLU()
self.fc1 = torch.nn.Linear(in_features=1638400, out_features=1)
self.loss_fn = torch.nn.L1Loss()
def forward(self, x, target):
y = x
x = self.conv1(x)
x = self.bn1(x)
x = self.relu1(x)
x = x + y
x = torch.flatten(x)
x = self.fc1(x)
output = self.loss_fn(x, target)
return (output,)
mod = Model()
mod.train()
x = torch.rand(100, 16, 32, 32, requires_grad=True)
target = torch.rand(1)
# Use dynamo export to get the fx graph module
g_mod, _ = torch._dynamo.export(mod, x, target)
def _prepare_model_args():
named_parameters = dict(g_mod.named_parameters(remove_duplicate=False))
named_buffers = dict(g_mod.named_buffers(remove_duplicate=False))
params_and_buffers = {
**dict(named_parameters),
**dict(named_buffers),
}
params_and_buffers_flat, params_spec = pytree.tree_flatten(
params_and_buffers
)
params_len = len(params_and_buffers_flat)
functional_call = create_functional_call(g_mod, params_spec, params_len)
return params_and_buffers_flat, functional_call
full_args, fn_to_trace = _prepare_model_args()
param_and_buf_len = len(full_args)
full_args.extend([x, target])
# aot_export requires a graph mod input of fwd graph
# returns the full fwd/bwd graph in graph mod format
with torch.enable_grad(), fx_traceback.preserve_node_meta():
fx_g, _, _, _ = _aot_export_function(
fn_to_trace,
full_args,
decompositions=None,
num_params_buffers=param_and_buf_len,
no_tangents=True,
)
# Walk all the nodes in fx graph.
# Write the resulting ops to a table
min_seq_nr = -1
seq_table = "SeqNr|OrigAten|SrcFn|FwdSrcFn\n"
for node in fx_g.graph.nodes:
if "call_" in node.op and "getitem" not in str(node.target):
seq_nr = node.meta.get("seq_nr", -1)
if seq_nr < 0:
continue
if min_seq_nr < 0:
min_seq_nr = seq_nr
source_fn_stack = node.meta.get("source_fn_stack", [])
orig_aten = node.meta.get("original_aten", "")
mod_name = ""
if len(source_fn_stack) > 0:
mod_name = source_fn_stack[-1][0]
# Make all seq_nr relative so it starts at 0
seq_nr = seq_nr - min_seq_nr
# For backward nodes, also test that metadata from the corresponding
# forward node is copied over.
fwd_source_fn_stack = node.meta.get("fwd_source_fn_stack", [])
fwd_mod_name = ""
if len(fwd_source_fn_stack):
fwd_mod_name = fwd_source_fn_stack[-1][0]
seq_table = (
seq_table + f"{seq_nr}|{orig_aten}|{mod_name}|{fwd_mod_name}\n"
)
self.maxDiff = None
self.assertExpectedInline(
seq_table,
dedent(
"""\
SeqNr|OrigAten|SrcFn|FwdSrcFn
0|aten.convolution.default|conv2d|
0|aten.add.Tensor|add_|
1|aten._native_batch_norm_legit_functional.default|batch_norm|
2|aten.relu.default|relu|
2|aten.detach.default|relu|
3|aten.add.Tensor|add|
4|aten.view.default|flatten|
5|aten.view.default|linear|
6|aten.t.default|linear|
7|aten.addmm.default|linear|
8|aten.view.default|linear|
9|aten.sub.Tensor|l1_loss|
10|aten.abs.default|l1_loss|
11|aten.mean.default|l1_loss|
11|aten.ones_like.default||l1_loss
11|aten.expand.default||l1_loss
11|aten.div.Scalar||l1_loss
10|aten.sgn.default||l1_loss
10|aten.mul.Tensor||l1_loss
8|aten.view.default||linear
7|aten.t.default||linear
7|aten.mm.default||linear
7|aten.t.default||linear
7|aten.mm.default||linear
7|aten.t.default||linear
7|aten.sum.dim_IntList||linear
7|aten.view.default||linear
6|aten.t.default||linear
5|aten.view.default||linear
4|aten.view.default||flatten
2|aten.detach.default||relu
2|aten.threshold_backward.default||relu
1|aten.native_batch_norm_backward.default||batch_norm
0|aten.convolution_backward.default||conv2d
11|aten.add.Tensor||
"""
),
)
def test_split_with_sizes_aot_autograd_cleans_up_traceback_meta(self):
from torch._functorch.aot_autograd import setup_stacktrace_preservation_hooks
def fn(result, split_sizes):
rs = torch.ops.aten.split_with_sizes(result, split_sizes.tolist())
return rs
example_inputs = (
torch.randn(32, requires_grad=True),
torch.tensor((7, 16, 9)),
)
outs = fn(*example_inputs)
setup_stacktrace_preservation_hooks([out.grad_fn for out in outs])
with fx_traceback.preserve_node_meta():
(outs[0].sum() + outs[1].sum() + outs[2].sum()).backward()
self.assertNotIn("grad_fn_seq_nr", fx_traceback.current_meta)
self.assertNotIn("in_grad_fn", fx_traceback.current_meta)
# https://github.com/pytorch/pytorch/issues/110121
def test_aot_export_joint_simple_repro(self):
class Mod(torch.nn.Module):
def __init__(self, *args, **kwargs) -> None:
super().__init__(*args, **kwargs)
self.linear = torch.nn.Linear(5, 7)
def forward(self, x):
return self.linear(x)
def mini_backend(gm, sample_inputs):
from torch._functorch.aot_autograd import aot_export_joint_simple
fake_mode = torch._dynamo.utils.detect_fake_mode(sample_inputs)
with patch.object(fake_mode, "allow_non_fake_inputs", True), fake_mode:
return aot_export_joint_simple(gm, sample_inputs, trace_joint=False)
sample_inputs = [torch.rand((3, 4, 5))]
model = Mod()
m_compiled = torch.compile(model, backend=mini_backend)
out_ref = model(*sample_inputs)
out_test = m_compiled(*sample_inputs)
self.assertEqual(out_ref, out_test)
# set donated_buffer=False due to create_graph=True
@torch._functorch.config.patch("donated_buffer", False)
def test_eager_sequence_nr(self):
class Model(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.conv1 = torch.nn.Conv2d(
in_channels=16,
out_channels=16,
kernel_size=(1, 1),
stride=1,
padding="same",
bias=True,
)
self.bn1 = torch.nn.BatchNorm2d(num_features=16)
self.relu1 = torch.nn.ReLU()
self.fc1 = torch.nn.Linear(in_features=1638400, out_features=1)
self.loss_fn = torch.nn.L1Loss()
def forward(self, x, target):
y = x
x = self.conv1(x)
x = self.bn1(x)
x = self.relu1(x)
x = x + y
x = torch.flatten(x)
x = self.fc1(x)
output = self.loss_fn(x, target)
return (output,)
def grad_with_create_graph(mod, x, target):
y = mod(x, target)
# Set create_graph=True to ensure that the sequence_nr
# for backward ops continues to count down.
(gx,) = torch.autograd.grad(
y[0], x, create_graph=True, grad_outputs=grad_output
)
return gx
x = torch.rand(100, 16, 32, 32, requires_grad=True)
target = torch.rand(1)
mod = Model()
args = [mod, x, target]
grad_output = torch.tensor(1.0, requires_grad=True)
compiled_f1 = torch.compile(backend="aot_eager")(grad_with_create_graph)
model_instance = compiled_f1
with profile(
activities=[torch.profiler.ProfilerActivity.CPU],
record_shapes=True,
) as kineto_prof:
model_instance(*args)
bwd_set = set()
prof_str = "SeqNr|Thread|FwdThread|Name\n"
for event in kineto_prof.events():
if event.sequence_nr >= 0:
prof_str = (
prof_str + f"{event.sequence_nr}|{event.thread}"
f"|{event.fwd_thread}|{event.name}|\n"
)
if re.search(r"Backward[01]", event.name):
bwd_set.add(event.sequence_nr)
self.assertTrue(len(bwd_set), 13)
def test_aot_grad_mode_mutation(self):
for compiler in ["aot_eager", "inductor"]:
def f(x):
y = x * x
torch.set_grad_enabled(False)
return y.clone(), y
f_compiled = torch.compile(f, backend=compiler, fullgraph=True)
torch.set_grad_enabled(True)
x = torch.ones(3, requires_grad=True) * 3
y_ref = f(x)
self.assertEqual(torch.is_grad_enabled(), False)
torch.set_grad_enabled(True)
y = f_compiled(x)
self.assertEqual(torch.is_grad_enabled(), False)
torch.set_grad_enabled(True)
self.assertEqual(y_ref, y)
self.assertIsNone(y_ref[0].grad_fn)
self.assertIsNone(y[0].grad_fn)
self.assertIsNotNone(y_ref[1].grad_fn)
self.assertIsNotNone(y[1].grad_fn)
# Check that the grad computed for the inputs, given the input, is the same
# The tangent to `y[0]`, which has grad_required=False, is irrelevant
self.assertEqual(
sum(y_ref[1].grad_fn(torch.tensor([-1.0, 2.0, 0.0]))),
sum(
x
for x in y[1].grad_fn.apply(None, torch.tensor([-1.0, 2.0, 0.0]))
if x is not None
),
)
def test_aot_autograd_raises_invalid_leaf_set(self):
@torch.compile
def f(x):
x.set_(torch.ones(2))
# We still want to make sure that this raises
x = torch.ones(2, requires_grad=True)
with self.assertRaisesRegex(
RuntimeError, "is being used in an in-place operation"
):
f(x)
def test_aot_autograd_expand_mutation_functionalizes(self):
def fn(x):
y = x.expand(3, *x.shape)
y[0, 0].add_(5)
return y
opt_fn = torch.compile(fn, backend="aot_eager")
x = torch.arange(6)
x_opt = x.detach().clone()
self.assertEqual(fn(x), opt_fn(x_opt))
self.assertEqual(x, x_opt)
def test_aot_autograd_expand_mutation_backwards(self):
def fn(x, z):
y = x.expand(3, *x.shape)
y[1, 1].mul_(5)
ret = y * z
return ret
opt_fn = torch.compile(fn, backend="aot_eager")
x = torch.arange(6, dtype=torch.float)
z = x.detach().clone()
x_opt = x.detach().clone()
z_opt = x.detach().clone()
z.requires_grad = True
z_opt.requires_grad = True
res = fn(x, z)
opt_res = opt_fn(x_opt, z_opt)
self.assertEqual(res, opt_res)
res.sum().backward()
opt_res.sum().backward()
self.assertEqual(x, x_opt)
self.assertEqual(z.grad, z_opt.grad)
def test_data_ptr_access_copy(self):
import torch._functorch.config as _config
with _config.patch(fake_tensor_allow_unsafe_data_ptr_access=False):
with FakeTensorMode():
x = torch.randn(3)
y = copy.copy(x)
self.assertEqual(y.shape, x.shape)
def test_data_ptr_access_fails_in_forward(self):
with torch.library._scoped_library("mylib", "FRAGMENT") as lib:
torch.library.define(
"mylib::foo_data_ptr_forward", "(Tensor x) -> Tensor", lib=lib
)
@torch.library.impl(
"mylib::foo_data_ptr_forward", "CompositeImplicitAutograd", lib=lib
)
def _(x):
x.data_ptr()
return x.clone()
x = torch.randn(3)
def data_ptr_graph_input(x):
r0 = torch.ops.mylib.foo_data_ptr_forward(x)
return r0
def data_ptr_graph_intermediate(x):
y = x.clone()
r0 = torch.ops.mylib.foo_data_ptr_forward(y)
return r0
tests = [data_ptr_graph_input, data_ptr_graph_intermediate]
def ctx():
return self.assertRaisesRegex(
RuntimeError, "Cannot access data pointer"
)
for f in tests:
with ctx():
make_fx(f, tracing_mode="fake")(x)
with ctx():
make_fx(f, tracing_mode="symbolic")(x)
with ctx():
torch.compile(f, backend="eager", fullgraph=True)(x)
def test_data_ptr_access_fails_in_backward(self):
with torch.library._scoped_library("mylib", "FRAGMENT") as lib:
torch.library.define(
"mylib::foo_data_ptr_backward", "(Tensor x) -> Tensor", lib=lib
)
backward_called = False
class Foo(torch.autograd.Function):
@staticmethod
def forward(ctx, x):
return x.clone()
@staticmethod
def backward(ctx, grad):
nonlocal backward_called
backward_called = True
grad.data_ptr()
return grad.clone()
@torch.library.impl(
"mylib::foo_data_ptr_backward", "CompositeImplicitAutograd", lib=lib
)
def _(x):
return Foo.apply(x)
def f(x):
return torch.ops.mylib.foo_data_ptr_backward(x)
x = torch.randn(3, requires_grad=True)
with self.assertRaisesRegex(RuntimeError, "Cannot access data pointer"):
torch.compile(f, backend="aot_eager", fullgraph=True)(x)
self.assertTrue(backward_called)
# We don't know how to catch multiple mutations to the same memory location
@unittest.expectedFailure
def test_aot_autograd_expand_mutation_error(self):
def fn(x):
y = x.expand(3, *x.shape)
y[0:3, 0].add_(5)
return y
opt_fn = torch.compile(fn, backend="aot_eager")
x = torch.arange(6)
x_opt = x.detach().clone()
with self.assertRaises(Exception):
fn(x)
with self.assertRaises(Exception):
opt_fn(x_opt)
@torch._functorch.config.patch(donated_buffer=True)
def test_donated_buffer1(self):
logger_name = "torch._functorch._aot_autograd.graph_compile"
@torch.compile()
def relu(x):
return torch.nn.functional.relu(x)
with self.assertLogs(logger_name, level="INFO") as captured:
relu(torch.rand([3, 3], requires_grad=True)).sum().backward()
if is_dynamic_shape_test(self._testMethodName):
# an extra symint exists
expected_msg = "bw_donated_idxs=[1]"
else:
expected_msg = "bw_donated_idxs=[0]"
# le is a donated buffer from relu
FileCheck().check(expected_msg).run("\n".join(captured.output))
@torch._functorch.config.patch("donated_buffer", True)
def test_donated_buffer2(self):
logger_name = "torch._functorch._aot_autograd.graph_compile"
# we will reuse the graph for g across f1 and f2
@torch.compile()
def g(activation, param2):
return torch.matmul(activation, param2)
def f(inp, param1, param2):
activation = inp + param1
return g(activation, param2)
inp = torch.ones(4, 4)
param1 = torch.ones(4, 4, requires_grad=True)
param2 = torch.ones(4, 4, requires_grad=True)
with self.assertLogs(logger_name, level="INFO") as captured:
f(inp, param1, param2).sum().backward()
FileCheck().check("bw_donated_idxs=[]").run("\n".join(captured.output))
@torch._functorch.config.patch("donated_buffer", True)
def test_donated_buffer3(self):
logger_name = "torch._functorch._aot_autograd.graph_compile"
# we will reuse the graph for g across f1 and f2
@torch.compile()
def g(activation, param2):
return torch.matmul(activation, param2)
def f(inp, param1, param2):
# exp saves it output (the activation) for bw
activation = torch.exp(inp + param1)
return g(activation, param2)
inp = torch.ones(4, 4)
param1 = torch.ones(4, 4, requires_grad=True)
param2 = torch.ones(4, 4, requires_grad=True)
with self.assertLogs(logger_name, level="INFO") as captured:
f(inp, param1, param2).sum().backward()
FileCheck().check("bw_donated_idxs=[]").run("\n".join(captured.output))
@torch._functorch.config.patch("donated_buffer", True)
def test_donated_buffer4(self):
logger_name = "torch._functorch._aot_autograd.graph_compile"
class Mod(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.param = torch.nn.Parameter(torch.zeros([2, 2]))
def forward(self, x: torch.Tensor) -> torch.Tensor:
return torch.nn.functional.relu(x) + self.param
mod = Mod()
mod = torch.compile(mod)
inp = torch.ones([2, 2], requires_grad=True)
with self.assertLogs(logger_name, level="INFO") as captured:
mod(inp).sum().backward()
# Forward graph:
# %primals_1 : [num_users=1] = placeholder[target=primals_1]
# %primals_2 : [num_users=1] = placeholder[target=primals_2]
# %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%primals_2,), kwargs = {})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%relu, %primals_1), kwargs = {})
# %le : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu, 0), kwargs = {})
# return [add, le]
#
# `le` is a donated buffer
FileCheck().check("bw_donated_idxs=[0]").run("\n".join(captured.output))
@torch._functorch.config.patch("donated_buffer", True)
def test_donated_buffer5(self):
logger_name = "torch._functorch._aot_autograd.graph_compile"
@torch.compile()
def f(x, z):
y = x.view(2, 3)
z = torch.nn.functional.relu(z)
return torch.mm(y, x) + z
inp = [
torch.rand([3, 2], requires_grad=True),
torch.rand([2, 2], requires_grad=True),
]
with self.assertLogs(logger_name, level="INFO") as captured:
f(*inp).sum().backward()
# Forward graph:
# %primals_1 : [num_users=3] = placeholder[target=primals_1]
# %primals_2 : [num_users=1] = placeholder[target=primals_2]
# %view : [num_users=1] = call_function[target=torch.ops.aten.view.default](args = (%primals_1, [2, 3]), kwargs = {})
# %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%primals_2,), kwargs = {})
# %mm : [num_users=1] = call_function[target=torch.ops.aten.mm.default](args = (%view, %primals_1), kwargs = {})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mm, %relu), kwargs = {})
# %le : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu, 0), kwargs = {})
# return [add, primals_1, le]
#
# `le` is a donated buffer but primals_1 is not.
FileCheck().check("bw_donated_idxs=[1]").run("\n".join(captured.output))
@torch._functorch.config.patch("donated_buffer", True)
@torch._dynamo.config.patch("graph_break_on_nn_param_ctor", False)
def test_donated_buffer6(self):
if is_dynamic_shape_test(self._testMethodName):
# parameters should not be dynamic shape
# torch._dynamo.exc.Unsupported: Parameter not python_constant:
# SymNodeVariable() is not a constant
return
logger_name = "torch._functorch._aot_autograd.graph_compile"
def fn(x):
p = torch.nn.Parameter(x + 123)
return p, p.sin()
opt = torch.compile(fn, fullgraph=True)
x = torch.randn(16)
with self.assertLogs(logger_name, level="INFO") as captured:
p, r = opt(x)
r.sum().backward()
FileCheck().check("bw_donated_idxs=[]").run("\n".join(captured.output))
@torch._functorch.config.patch("donated_buffer", True)
def test_donated_buffer_with_retain_or_create_graph1(self):
# Gives non-empty bw_donated_idxs
class Mod(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.param = torch.nn.Parameter(torch.zeros([3, 3]))
def forward(self, x):
return torch.nn.functional.relu(x) + self.param
inp = torch.randn(3, 3, requires_grad=True)
mod = torch.compile(Mod())
for _ in range(5):
mod(inp).sum().backward()
@torch._functorch.config.patch("donated_buffer", True)
def test_donated_buffer_with_retain_or_create_graph2(self):
# Gives non-empty bw_donated_idxs
class Mod(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.param = torch.nn.Parameter(torch.zeros([3, 3]))
def forward(self, x):
return torch.nn.functional.relu(x) + self.param
inp = torch.randn(3, 3, requires_grad=True)
mod = torch.compile(Mod())
out = mod(inp).sum()
for _ in range(5):
out.backward(retain_graph=True)
out.backward()
@torch._functorch.config.patch("donated_buffer", True)
def test_donated_buffer_with_retain_or_create_graph3(self):
# Gives non-empty bw_donated_idxs
class Mod(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.param = torch.nn.Parameter(torch.zeros([3, 3]))
def forward(self, x):
return torch.nn.functional.relu(x) + self.param
inp = torch.randn(3, 3, requires_grad=True)
mod = torch.compile(Mod())
mod(inp).sum().backward(create_graph=True)
out = mod(inp).sum()
for _ in range(5):
out.backward(retain_graph=True)
out.backward()
def test_autograd_function_tangent_mutation(self):
class Foo(torch.autograd.Function):
@staticmethod
def forward(ctx, x):
return x.clone(), x.clone()
@staticmethod
def backward(ctx, grad1, grad2):
return grad1.copy_(grad2)
def f(x):
return Foo.apply(x)
x = torch.randn(4, requires_grad=True)
x_ref = x.clone().detach().requires_grad_()
out_ref = f(x_ref)
out = torch.compile(f, backend="aot_eager", fullgraph=True)(x)
self.assertEqual(out_ref, out)
self.assertEqual(x_ref, x)
(out[0] + out[1]).sum().backward()
(out_ref[0] + out_ref[1]).sum().backward()
self.assertEqual(x_ref.grad, x.grad)
@torch._functorch.config.patch("donated_buffer", True)
def test_donated_buffer_with_retain_or_create_graph4(self):
# Gives non-empty bw_donated_idxs
class Mod(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.param = torch.nn.Parameter(torch.zeros([3, 3]))
def forward(self, x):
return torch.nn.functional.relu(x) + self.param
inp = torch.randn(3, 3, requires_grad=True)
mod = torch.compile(Mod())
mod(inp).sum().backward()
out = mod(inp).sum()
with self.assertRaisesRegex(
RuntimeError,
r"This backward function was compiled with non-empty donated "
r"buffers which requires create_graph=False and retain_graph=False. "
r"Please keep backward\(create_graph=False, retain_graph=False\) "
r"across all backward\(\) function calls, or set "
r"torch._functorch.config.donated_buffer=False to disable "
r"donated buffer.",
):
out.backward(retain_graph=True)
def _get_guard_failure_on_overlapping_view_inputs(self, f, argsfn1, argsfn2):
# Compile and run f twice, using the arguments generated by argsfn1 and argsfn2.
#
# This function expects that the second argument set will trigger a recompilation,
# which shall be returned in the end.
guard_failure = []
def guard_fail_fn(failure):
nonlocal guard_failure
guard_failure.append(failure[0])
input = torch.ones(20)
opt_input = input.clone().detach()
opt_f = torch._dynamo.optimize(
"aot_eager", dynamic=True, guard_fail_fn=guard_fail_fn
)(f)
out0 = f(*argsfn1(input))
opt_out0 = opt_f(*argsfn1(opt_input))
self.assertEqual(out0, opt_out0)
out1 = f(*argsfn2(input))
opt_out1 = opt_f(*argsfn2(opt_input))
self.assertEqual(out1, opt_out1)
# Check that we only have one instance of guard failure, and that it is due to
# the overlapping state not matching.
self.assertEqual(len(guard_failure), 1)
return guard_failure[0]
def test_inputs_overlapping_with_mutation_recompile(self):
# Check that the overlap guard actually fails when we run the second time with
# args that have no storage overlap.
def f(*args):
for a in args:
a.add_(1)
return args[0]
def overlapping_args(x):
return x[:5], x[7:13], x[9:]
def non_overlapping_args(x):
return x[:5], x[7:13], x[13:15]
guard_failure = self._get_guard_failure_on_overlapping_view_inputs(
f, overlapping_args, non_overlapping_args
)
self.assertExpectedInline(
guard_failure,
"""0/0: check_overlapping(overlapping=[args[1], args[2]], non_overlapping=[args[0]])""",
)
def test_different_inputs_overlapping_set_with_mutation(self):
# Check that the overlap guard actually fails when we run the second time with
# arguments whose overlapping set is a superset of the set of arguments used in
# the first time.
def f(a, b, c, d):
a.mul_(2)
return a + b + c + d
def a_b_overlapping_args(x):
return x[:5], x[4:9], x[10:15], x[15:]
def a_b_c_overlapping_args(x):
return x[:5], x[4:9], x[8:13], x[15:]
guard_failure = self._get_guard_failure_on_overlapping_view_inputs(
f, a_b_overlapping_args, a_b_c_overlapping_args
)
self.assertExpectedInline(
guard_failure,
"""0/0: check_overlapping(overlapping=[a, b], non_overlapping=[c, d])""",
)
def _test_no_storage_overlap_guards(self, f, argsfn):
# Compile f with aot_eager backend, and run it with the argument set returned by
# argsfn function. Meanwhile, keep track of the aotautograd_gurads, so as to make
# sure no StorageOverlap guard was added.
class Compiler:
def __init__(self):
self.counter = CompileCounterWithBackend("aot_eager")
def __call__(self, *args, **kwargs):
# Instead of checking here, we need to check afterwards, since the
# StorageOverlap guard is only added later.
self.guards = TracingContext.get().guards_context.aotautograd_guards
return self.counter(*args, **kwargs)
compiler = Compiler()
input = torch.arange(20)
opt_input = input.clone().detach()
out = f(*argsfn(input))
opt_out = torch.compile(f, backend=compiler, dynamic=True)(*argsfn(opt_input))
self.assertEqual(out, opt_out)
self.assertEqual(compiler.counter.frame_count, 1)
# Check none of the AOTAutograd guards are StorageOverlap guards.
for g in compiler.guards:
self.assertNotIsInstance(g, StorageOverlap)
def test_no_storage_overlap_guards_no_mutation(self):
def f(a, b):
return a + b
def overlapping_args(input):
return input[:10], input[5:15]
self._test_no_storage_overlap_guards(f, overlapping_args)
def test_no_storage_overlap_guards_no_aliasing(self):
def f(a, b):
a.add_(1)
b.add_(1)
return a
def non_overlapping_args(input):
return input[:10], torch.arange(20)[5:15]
self._test_no_storage_overlap_guards(f, non_overlapping_args)
def test_inputs_overlapping_with_mutation_stress(self):
# Stress test for StorageOverlap guard.
#
# Create 100 non-overlapping tensor views, and an extra one that overlaps with
# the first 50 of them. Then, make sure that none of the produced ShapeEnv
# guards came from the overlapping computation.
def f(*args):
for a in args:
a.add_(1)
return args[0]
def overlapping_args(input):
return (
# 100 non-overlapping tensors of size 10.
*input.split(10),
# A tensor that overlaps with half of the tensors above.
input[4:44],
)
class Compiler:
def __init__(self):
self.counter = CompileCounterWithBackend("aot_eager")
def __call__(self, *args, **kwargs):
self.compile_context = CompileContext.get()
return self.counter(*args, **kwargs)
compiler = Compiler()
opt_f = torch.compile(f, backend=compiler, dynamic=True)
input = torch.arange(1_000)
opt_input = input.clone().detach()
out0 = f(*overlapping_args(input))
opt_out0 = opt_f(*overlapping_args(opt_input))
self.assertEqual(out0, opt_out0)
# Check that none of the produced ShapeEnv guards came from compute_overlapping_inputs
# function.
overlapping_computation_fn = "compute_overlapping_inputs"
shape_env_guards = compiler.compile_context.shape_env_guards
for g in shape_env_guards:
self.assertNotIn(overlapping_computation_fn, g)
# Check that we have no more than 500 ShapeEnv guards.
#
# Note: this is an arbitrary number. So, we might have to change it in the future.
# However, at the time this change was introduced, it went down from 15154 to 403.
self.assertLess(len(shape_env_guards), 1000)
# See # https://github.com/pytorch/pytorch/issues/164814
def test_aot_autograd_stride_reconstruction_on_zero_dim_dynamic_shaped_tensor(
self,
) -> None:
def repro(sentinel: torch.Tensor, skip_squeeze: bool = False) -> torch.Tensor:
x = torch.unique(torch.ones(1))
x = torch.reshape(x, [1])
if not skip_squeeze:
x = torch.squeeze(x) # 0-d tensor
return x * sentinel
# Grad required to trigger the issue (need to replay stride)
sentinel = torch.tensor(1.0, requires_grad=True)
eager_sq = repro(sentinel)
comp_aot_sq = torch.compile(repro, backend="aot_eager", fullgraph=True)(
sentinel
)
comp_ind_sq = torch.compile(repro, backend="inductor", fullgraph=True)(sentinel)
self.assertEqual(eager_sq, comp_aot_sq)
self.assertEqual(eager_sq, comp_ind_sq)
self.assertEqual(eager_sq.stride(), comp_ind_sq.stride())
# Now check semantics preserved when skipping squeeze
eager_no_sq = repro(sentinel, skip_squeeze=True)
comp_aot_no_sq = torch.compile(repro, backend="aot_eager", fullgraph=True)(
sentinel, skip_squeeze=True
)
comp_ind_no_sq = torch.compile(repro, backend="inductor", fullgraph=True)(
sentinel, skip_squeeze=True
)
self.assertEqual(eager_no_sq, comp_aot_no_sq)
self.assertEqual(eager_no_sq, comp_ind_no_sq)
self.assertEqual(eager_no_sq.stride(), comp_ind_no_sq.stride())
@torch._dynamo.config.patch(capture_scalar_outputs=True)
@torch._dynamo.config.patch(capture_dynamic_output_shape_ops=True)
def test_unbacked_activation_specialized_in_inductor(self):
"""Test compilation with unbacked operations like nonzero."""
torch._dynamo.reset()
def fuzzed_program(arg_0, sentinel):
var_node_1 = arg_0
var_node_5 = torch.full((1, 2), -66, dtype=torch.int32)
var_node_6 = torch.full((1, 2), 77, dtype=torch.int64)
var_node_4 = torch.ops.aten.add(var_node_5, var_node_6)
var_node_7 = torch.full((1, 2), -64, dtype=torch.int32)
var_node_3 = torch.ops.aten.mul(var_node_4, var_node_7)
var_node_9 = torch.full((3, 4), False, dtype=torch.bool)
var_node_8 = torch.nonzero(var_node_9)
var_node_2 = torch.ops.aten.add(var_node_3, var_node_8)
var_node_0 = torch.ops.aten.div(var_node_1, var_node_2)
result = var_node_0 * sentinel
if result.is_complex():
result = result.real
return result
sentinel = torch.tensor(1.0, requires_grad=True)
arg_0 = torch.randint(0, 3, (1, 2), dtype=torch.int64)
args = (arg_0,) + (sentinel,)
result_original = fuzzed_program(*args)
compiled_program = torch.compile(fuzzed_program, fullgraph=True, dynamic=True)
result_compiled = compiled_program(*args)
self.assertTrue(torch.allclose(result_original, result_compiled))
if __name__ == "__main__":
from torch._dynamo.test_case import run_tests
run_tests()
| AotAutogradFallbackTests |
python | pytorch__pytorch | test/cpp_extensions/open_registration_extension/torch_openreg/tests/test_streams.py | {
"start": 140,
"end": 2509
} | class ____(TestCase):
@skipIfTorchDynamo()
def test_stream_create(self):
stream = torch.Stream(device="openreg")
self.assertEqual(stream.device_index, torch.openreg.current_device())
stream = torch.Stream(device="openreg:1")
self.assertEqual(stream.device.type, "openreg")
self.assertEqual(stream.device_index, 1)
stream = torch.Stream(1)
self.assertEqual(stream.device.type, "openreg")
self.assertEqual(stream.device_index, 1)
stream1 = torch.Stream(
stream_id=stream.stream_id,
device_type=stream.device_type,
device_index=stream.device_index,
)
self.assertEqual(stream, stream1)
@skipIfTorchDynamo()
def test_stream_context(self):
with torch.Stream(device="openreg:1") as stream:
self.assertEqual(torch.accelerator.current_stream(), stream)
@skipIfTorchDynamo()
def test_stream_switch(self):
stream1 = torch.Stream(device="openreg:0")
torch.accelerator.set_stream(stream1)
current_stream = torch.accelerator.current_stream()
self.assertEqual(current_stream, stream1)
stream2 = torch.Stream(device="openreg:1")
torch.accelerator.set_stream(stream2)
current_stream = torch.accelerator.current_stream()
self.assertEqual(current_stream, stream2)
@skipIfTorchDynamo()
def test_stream_synchronize(self):
stream = torch.Stream(device="openreg:1")
self.assertEqual(True, stream.query())
event = torch.Event()
event.record(stream)
stream.synchronize()
self.assertEqual(True, stream.query())
@skipIfTorchDynamo()
def test_stream_repr(self):
stream = torch.Stream(device="openreg:1")
self.assertTrue(
"torch.Stream device_type=openreg, device_index=1" in repr(stream)
)
@skipIfTorchDynamo()
def test_stream_wait_stream(self):
stream_1 = torch.Stream(device="openreg:0")
stream_2 = torch.Stream(device="openreg:1")
stream_2.wait_stream(stream_1)
@skipIfTorchDynamo()
def test_stream_wait_event(self):
s1 = torch.Stream(device="openreg")
s2 = torch.Stream(device="openreg")
e = s1.record_event()
s2.wait_event(e)
if __name__ == "__main__":
run_tests()
| TestStream |
python | py-pdf__pypdf | pypdf/errors.py | {
"start": 839,
"end": 959
} | class ____(PdfReadError):
"""Raised when there is an issue reading the stream of data in a PDF file."""
| PdfStreamError |
python | dabeaz-course__practical-python | Solutions/6_12/portfolio.py | {
"start": 16,
"end": 722
} | class ____:
def __init__(self, holdings):
self._holdings = holdings
def __iter__(self):
return self._holdings.__iter__()
def __len__(self):
return len(self._holdings)
def __getitem__(self, index):
return self._holdings[index]
def __contains__(self, name):
return any([s.name == name for s in self._holdings])
@property
def total_cost(self):
return sum([s.shares * s.price for s in self._holdings])
def tabulate_shares(self):
from collections import Counter
total_shares = Counter()
for s in self._holdings:
total_shares[s.name] += s.shares
return total_shares
| Portfolio |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-bing-ads/unit_tests/integrations/test_hourly_reports.py | {
"start": 63217,
"end": 70502
} | class ____(HourlyReportsTestWithStateChangesAfterMigration):
stream_name = "user_location_performance_report_hourly"
report_file = "user_location_performance_report_hourly"
records_number = 24
state_file = "hourly_reports_state"
incremental_report_file = "user_location_performance_report_hourly_incremental"
report_file_with_records_further_start_date = "user_location_performance_report_hourly_with_records_further_config_start_date"
state_file_legacy = "hourly_reports_state_legacy"
state_file_after_migration = "hourly_reports_state_after_migration"
state_file_after_migration_with_cursor_further_config_start_date = (
"hourly_reports_state_after_migration_with_cursor_further_config_start_date"
)
incremental_report_file_with_records_further_cursor = "user_location_performance_report_hourly_incremental_with_records_further_cursor"
def mock_report_apis(self):
self.mock_user_query_api(response_template="user_query")
self.mock_accounts_search_api(
response_template="accounts_search_for_report",
body=b'{"PageInfo": {"Index": 0, "Size": 1000}, "Predicates": [{"Field": "UserId", "Operator": "Equals", "Value": "123456789"}], "ReturnAdditionalFields": "TaxCertificate,AccountMode"}',
)
self.mock_generate_report_api(
endpoint="Submit",
response_template="generate_report",
body=b'{"ReportRequest": {"ExcludeColumnHeaders": false, "ExcludeReportFooter": true, "ExcludeReportHeader": true, "Format": "Csv", "FormatVersion": "2.0", "ReportName": "UserLocationPerformanceReport", "ReturnOnlyCompleteData": false, "Type": "UserLocationPerformanceReportRequest", "Aggregation": "Hourly", "Columns": ["AccountName", "AccountNumber", "AccountId", "TimePeriod", "CampaignName", "CampaignId", "AdGroupName", "AdGroupId", "Country", "State", "MetroArea", "CurrencyCode", "AdDistribution", "Impressions", "Clicks", "Ctr", "AverageCpc", "Spend", "AveragePosition", "ProximityTargetLocation", "Radius", "Language", "City", "QueryIntentCountry", "QueryIntentState", "QueryIntentCity", "QueryIntentDMA", "BidMatchType", "DeliveredMatchType", "Network", "TopVsOther", "DeviceType", "DeviceOS", "Assists", "Conversions", "ConversionRate", "Revenue", "ReturnOnAdSpend", "CostPerConversion", "CostPerAssist", "RevenuePerConversion", "RevenuePerAssist", "County", "PostalCode", "QueryIntentCounty", "QueryIntentPostalCode", "LocationId", "QueryIntentLocationId", "AllConversions", "AllRevenue", "AllConversionRate", "AllCostPerConversion", "AllReturnOnAdSpend", "AllRevenuePerConversion", "ViewThroughConversions", "Goal", "GoalType", "AbsoluteTopImpressionRatePercent", "TopImpressionRatePercent", "AverageCpm", "ConversionsQualified", "AllConversionsQualified", "ViewThroughConversionsQualified", "Neighborhood", "QueryIntentNeighborhood", "ViewThroughRevenue", "CampaignType", "AssetGroupId", "AssetGroupName"], "Scope": {"AccountIds": [180535609]}, "Time": {"CustomDateRangeStart": {"Day": 1, "Month": 1, "Year": 2024}, "CustomDateRangeEnd": {"Day": 6, "Month": 5, "Year": 2024}, "ReportTimeZone": "GreenwichMeanTimeDublinEdinburghLisbonLondon"}}}',
)
# for second read
self.mock_generate_report_api(
endpoint="Submit",
response_template="generate_report",
body=b'{"ReportRequest": {"ExcludeColumnHeaders": false, "ExcludeReportFooter": true, "ExcludeReportHeader": true, "Format": "Csv", "FormatVersion": "2.0", "ReportName": "UserLocationPerformanceReport", "ReturnOnlyCompleteData": false, "Type": "UserLocationPerformanceReportRequest", "Aggregation": "Hourly", "Columns": ["AccountName", "AccountNumber", "AccountId", "TimePeriod", "CampaignName", "CampaignId", "AdGroupName", "AdGroupId", "Country", "State", "MetroArea", "CurrencyCode", "AdDistribution", "Impressions", "Clicks", "Ctr", "AverageCpc", "Spend", "AveragePosition", "ProximityTargetLocation", "Radius", "Language", "City", "QueryIntentCountry", "QueryIntentState", "QueryIntentCity", "QueryIntentDMA", "BidMatchType", "DeliveredMatchType", "Network", "TopVsOther", "DeviceType", "DeviceOS", "Assists", "Conversions", "ConversionRate", "Revenue", "ReturnOnAdSpend", "CostPerConversion", "CostPerAssist", "RevenuePerConversion", "RevenuePerAssist", "County", "PostalCode", "QueryIntentCounty", "QueryIntentPostalCode", "LocationId", "QueryIntentLocationId", "AllConversions", "AllRevenue", "AllConversionRate", "AllCostPerConversion", "AllReturnOnAdSpend", "AllRevenuePerConversion", "ViewThroughConversions", "Goal", "GoalType", "AbsoluteTopImpressionRatePercent", "TopImpressionRatePercent", "AverageCpm", "ConversionsQualified", "AllConversionsQualified", "ViewThroughConversionsQualified", "Neighborhood", "QueryIntentNeighborhood", "ViewThroughRevenue", "CampaignType", "AssetGroupId", "AssetGroupName"], "Scope": {"AccountIds": [180535609]}, "Time": {"CustomDateRangeStart": {"Day": 6, "Month": 5, "Year": 2024}, "CustomDateRangeEnd": {"Day": 8, "Month": 5, "Year": 2024}, "ReportTimeZone": "GreenwichMeanTimeDublinEdinburghLisbonLondon"}}}',
)
# for no config start date test
self.mock_generate_report_api(
endpoint="Submit",
response_template="generate_report",
body=b'{"ReportRequest": {"ExcludeColumnHeaders": false, "ExcludeReportFooter": true, "ExcludeReportHeader": true, "Format": "Csv", "FormatVersion": "2.0", "ReportName": "UserLocationPerformanceReport", "ReturnOnlyCompleteData": false, "Type": "UserLocationPerformanceReportRequest", "Aggregation": "Hourly", "Columns": ["AccountName", "AccountNumber", "AccountId", "TimePeriod", "CampaignName", "CampaignId", "AdGroupName", "AdGroupId", "Country", "State", "MetroArea", "CurrencyCode", "AdDistribution", "Impressions", "Clicks", "Ctr", "AverageCpc", "Spend", "AveragePosition", "ProximityTargetLocation", "Radius", "Language", "City", "QueryIntentCountry", "QueryIntentState", "QueryIntentCity", "QueryIntentDMA", "BidMatchType", "DeliveredMatchType", "Network", "TopVsOther", "DeviceType", "DeviceOS", "Assists", "Conversions", "ConversionRate", "Revenue", "ReturnOnAdSpend", "CostPerConversion", "CostPerAssist", "RevenuePerConversion", "RevenuePerAssist", "County", "PostalCode", "QueryIntentCounty", "QueryIntentPostalCode", "LocationId", "QueryIntentLocationId", "AllConversions", "AllRevenue", "AllConversionRate", "AllCostPerConversion", "AllReturnOnAdSpend", "AllRevenuePerConversion", "ViewThroughConversions", "Goal", "GoalType", "AbsoluteTopImpressionRatePercent", "TopImpressionRatePercent", "AverageCpm", "ConversionsQualified", "AllConversionsQualified", "ViewThroughConversionsQualified", "Neighborhood", "QueryIntentNeighborhood", "ViewThroughRevenue", "CampaignType", "AssetGroupId", "AssetGroupName"], "Scope": {"AccountIds": [180535609]}, "Time": {"CustomDateRangeStart": {"Day": 1, "Month": 1, "Year": 2023}, "CustomDateRangeEnd": {"Day": 6, "Month": 5, "Year": 2024}, "ReportTimeZone": "GreenwichMeanTimeDublinEdinburghLisbonLondon"}}}',
)
self.mock_generate_report_api(
endpoint="Poll", response_template="generate_report_poll", body=b'{"ReportRequestId": "thisisthereport_requestid"}'
)
| TestUserLocationPerformanceReportHourlyStream |
python | modin-project__modin | asv_bench/benchmarks/scalability/scalability_benchmarks.py | {
"start": 1495,
"end": 1986
} | class ____:
param_names = ["shape", "cpus"]
params = [
get_benchmark_shapes("TimeFromPandas"),
[4, 16, 32],
]
def setup(self, shape, cpus):
self.data = pandas.DataFrame(gen_data("int", *shape, RAND_LOW, RAND_HIGH))
from modin.config import NPartitions
NPartitions.get = lambda: cpus
# trigger ray init
pd.DataFrame([])
def time_from_pandas(self, shape, cpus):
execute(from_pandas(self.data))
| TimeFromPandas |
python | pytorch__pytorch | torch/_dynamo/source.py | {
"start": 8270,
"end": 8653
} | class ____(Source):
global_name: str
def reconstruct(self, codegen: "PyCodegen") -> None:
codegen.append_output(codegen.create_load_global(self.global_name, add=True))
def guard_source(self) -> GuardSource:
return GuardSource.GLOBAL
def name(self) -> str:
return f"G[{repr(self.global_name)}]"
@dataclasses.dataclass(frozen=True)
| GlobalSource |
python | plotly__plotly.py | plotly/graph_objs/histogram/_insidetextfont.py | {
"start": 233,
"end": 9946
} | class ____(_BaseTraceHierarchyType):
_parent_path_str = "histogram"
_path_str = "histogram.insidetextfont"
_valid_props = {
"color",
"family",
"lineposition",
"shadow",
"size",
"style",
"textcase",
"variant",
"weight",
}
@property
def color(self):
"""
The 'color' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color: see https://plotly.com/python/css-colors/ for a list
Returns
-------
str
"""
return self["color"]
@color.setter
def color(self, val):
self["color"] = val
@property
def family(self):
"""
HTML font family - the typeface that will be applied by the web
browser. The web browser can only apply a font if it is
available on the system where it runs. Provide multiple font
families, separated by commas, to indicate the order in which
to apply fonts if they aren't available.
The 'family' property is a string and must be specified as:
- A non-empty string
Returns
-------
str
"""
return self["family"]
@family.setter
def family(self, val):
self["family"] = val
@property
def lineposition(self):
"""
Sets the kind of decoration line(s) with text, such as an
"under", "over" or "through" as well as combinations e.g.
"under+over", etc.
The 'lineposition' property is a flaglist and may be specified
as a string containing:
- Any combination of ['under', 'over', 'through'] joined with '+' characters
(e.g. 'under+over')
OR exactly one of ['none'] (e.g. 'none')
Returns
-------
Any
"""
return self["lineposition"]
@lineposition.setter
def lineposition(self, val):
self["lineposition"] = val
@property
def shadow(self):
"""
Sets the shape and color of the shadow behind text. "auto"
places minimal shadow and applies contrast text font color. See
https://developer.mozilla.org/en-US/docs/Web/CSS/text-shadow
for additional options.
The 'shadow' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["shadow"]
@shadow.setter
def shadow(self, val):
self["shadow"] = val
@property
def size(self):
"""
The 'size' property is a number and may be specified as:
- An int or float in the interval [1, inf]
Returns
-------
int|float
"""
return self["size"]
@size.setter
def size(self, val):
self["size"] = val
@property
def style(self):
"""
Sets whether a font should be styled with a normal or italic
face from its family.
The 'style' property is an enumeration that may be specified as:
- One of the following enumeration values:
['normal', 'italic']
Returns
-------
Any
"""
return self["style"]
@style.setter
def style(self, val):
self["style"] = val
@property
def textcase(self):
"""
Sets capitalization of text. It can be used to make text appear
in all-uppercase or all-lowercase, or with each word
capitalized.
The 'textcase' property is an enumeration that may be specified as:
- One of the following enumeration values:
['normal', 'word caps', 'upper', 'lower']
Returns
-------
Any
"""
return self["textcase"]
@textcase.setter
def textcase(self, val):
self["textcase"] = val
@property
def variant(self):
"""
Sets the variant of the font.
The 'variant' property is an enumeration that may be specified as:
- One of the following enumeration values:
['normal', 'small-caps', 'all-small-caps',
'all-petite-caps', 'petite-caps', 'unicase']
Returns
-------
Any
"""
return self["variant"]
@variant.setter
def variant(self, val):
self["variant"] = val
@property
def weight(self):
"""
Sets the weight (or boldness) of the font.
The 'weight' property is a integer and may be specified as:
- An int (or float that will be cast to an int)
in the interval [1, 1000]
OR exactly one of ['normal', 'bold'] (e.g. 'bold')
Returns
-------
int
"""
return self["weight"]
@weight.setter
def weight(self, val):
self["weight"] = val
@property
def _prop_descriptions(self):
return """\
color
family
HTML font family - the typeface that will be applied by
the web browser. The web browser can only apply a font
if it is available on the system where it runs. Provide
multiple font families, separated by commas, to
indicate the order in which to apply fonts if they
aren't available.
lineposition
Sets the kind of decoration line(s) with text, such as
an "under", "over" or "through" as well as combinations
e.g. "under+over", etc.
shadow
Sets the shape and color of the shadow behind text.
"auto" places minimal shadow and applies contrast text
font color. See https://developer.mozilla.org/en-
US/docs/Web/CSS/text-shadow for additional options.
size
style
Sets whether a font should be styled with a normal or
italic face from its family.
textcase
Sets capitalization of text. It can be used to make
text appear in all-uppercase or all-lowercase, or with
each word capitalized.
variant
Sets the variant of the font.
weight
Sets the weight (or boldness) of the font.
"""
def __init__(
self,
arg=None,
color=None,
family=None,
lineposition=None,
shadow=None,
size=None,
style=None,
textcase=None,
variant=None,
weight=None,
**kwargs,
):
"""
Construct a new Insidetextfont object
Sets the font used for `text` lying inside the bar.
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of
:class:`plotly.graph_objs.histogram.Insidetextfont`
color
family
HTML font family - the typeface that will be applied by
the web browser. The web browser can only apply a font
if it is available on the system where it runs. Provide
multiple font families, separated by commas, to
indicate the order in which to apply fonts if they
aren't available.
lineposition
Sets the kind of decoration line(s) with text, such as
an "under", "over" or "through" as well as combinations
e.g. "under+over", etc.
shadow
Sets the shape and color of the shadow behind text.
"auto" places minimal shadow and applies contrast text
font color. See https://developer.mozilla.org/en-
US/docs/Web/CSS/text-shadow for additional options.
size
style
Sets whether a font should be styled with a normal or
italic face from its family.
textcase
Sets capitalization of text. It can be used to make
text appear in all-uppercase or all-lowercase, or with
each word capitalized.
variant
Sets the variant of the font.
weight
Sets the weight (or boldness) of the font.
Returns
-------
Insidetextfont
"""
super().__init__("insidetextfont")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError("""\
The first argument to the plotly.graph_objs.histogram.Insidetextfont
constructor must be a dict or
an instance of :class:`plotly.graph_objs.histogram.Insidetextfont`""")
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
self._set_property("color", arg, color)
self._set_property("family", arg, family)
self._set_property("lineposition", arg, lineposition)
self._set_property("shadow", arg, shadow)
self._set_property("size", arg, size)
self._set_property("style", arg, style)
self._set_property("textcase", arg, textcase)
self._set_property("variant", arg, variant)
self._set_property("weight", arg, weight)
self._process_kwargs(**dict(arg, **kwargs))
self._skip_invalid = False
| Insidetextfont |
python | django-debug-toolbar__django-debug-toolbar | debug_toolbar/panels/request.py | {
"start": 243,
"end": 1991
} | class ____(Panel):
"""
A panel to display request variables (POST/GET, session, cookies).
"""
template = "debug_toolbar/panels/request.html"
title = _("Request")
@property
def nav_subtitle(self):
"""
Show abbreviated name of view function as subtitle
"""
view_func = self.get_stats().get("view_func", "")
return view_func.rsplit(".", 1)[-1]
def generate_stats(self, request, response):
self.record_stats(
{
"get": sanitize_and_sort_request_vars(request.GET),
"post": sanitize_and_sort_request_vars(request.POST),
"cookies": sanitize_and_sort_request_vars(request.COOKIES),
}
)
view_info = {
"view_func": _("<no view>"),
"view_args": "None",
"view_kwargs": "None",
"view_urlname": "None",
}
try:
match = resolve(request.path_info)
func, args, kwargs = match
view_info["view_func"] = get_name_from_obj(func)
view_info["view_args"] = args
view_info["view_kwargs"] = kwargs
if getattr(match, "url_name", False):
url_name = match.url_name
if match.namespaces:
url_name = ":".join([*match.namespaces, url_name])
else:
url_name = _("<unavailable>")
view_info["view_urlname"] = url_name
except Http404:
pass
self.record_stats(view_info)
if hasattr(request, "session"):
session_data = dict(request.session)
self.record_stats({"session": sanitize_and_sort_request_vars(session_data)})
| RequestPanel |
python | huggingface__transformers | src/transformers/models/cpmant/modeling_cpmant.py | {
"start": 28514,
"end": 32344
} | class ____(CpmAntPreTrainedModel, GenerationMixin):
_tied_weights_keys = {"lm_head.weight": "cpmant.input_embedding.weight"}
def __init__(self, config: CpmAntConfig):
super().__init__(config)
self.cpmant = CpmAntModel(config)
# lm_head.weight is tied to cpmant.input_embedding.weight
self.lm_head = nn.Linear(
config.hidden_size, config.vocab_size + config.prompt_types * config.prompt_length, bias=False
)
self.post_init()
@auto_docstring
def forward(
self,
input_ids: Optional[torch.Tensor] = None,
past_key_values: Optional[Cache] = None,
use_cache: Optional[bool] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
labels: Optional[torch.Tensor] = None,
return_dict: Optional[bool] = None,
attention_mask: Optional[torch.Tensor] = None, # dummy parameter for text-generation pipeline
cache_position: Optional[torch.Tensor] = None,
logits_to_keep: Union[int, torch.Tensor] = 0,
**kwargs,
) -> Union[tuple, CausalLMOutputWithPast]:
r"""
input_ids (`torch.Tensor` of shape `(batch_size, seq_len)`):
Indices of input sequence tokens in the vocabulary.
Indices can be obtained using [`CPMAntTokenizer`]. See [`PreTrainedTokenizer.encode`] and
[`PreTrainedTokenizer.__call__`] for details.
[What are input IDs?](../glossary#input-ids)
labels (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
Labels for computing the masked language modeling loss.
Example:
Text Generation with CpmAntForCausalLM.
```python
>>> from transformers import CPMAntTokenizer, CpmAntForCausalLM
>>> texts = "今天天气不错,"
>>> model = CpmAntForCausalLM.from_pretrained("openbmb/cpm-ant-10b")
>>> tokenizer = CPMAntTokenizer.from_pretrained("openbmb/cpm-ant-10b")
>>> input_ids = tokenizer(texts, return_tensors="pt")
>>> outputs = model.generate(**input_ids)
>>> output_texts = tokenizer.batch_decode(outputs)
>>> print(output_texts)
['今天天气不错,阳光明媚,我和妈妈一起去超市买东西。\n在超市里,我看到了一个很好玩的玩具,它的名字叫“机器人”。它有一个圆圆的脑袋,两只圆圆的眼睛,还有一个圆圆的']
```
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
model_output = self.cpmant(
input_ids,
output_attentions,
output_hidden_states,
past_key_values,
use_cache,
return_dict,
cache_position,
)
hidden_states = model_output.last_hidden_state if return_dict else model_output[0]
# Only compute necessary logits
slice_indices = slice(-logits_to_keep, None) if isinstance(logits_to_keep, int) else logits_to_keep
logits = self.lm_head(hidden_states[:, slice_indices, :])
loss = None
if labels is not None:
loss_func = CrossEntropyLoss()
loss = loss_func(logits.view(-1, logits.size(-1)), labels.view(-1))
if not return_dict:
output = (logits,) + model_output[1:]
return ((loss,) + output) if loss is not None else output
return CausalLMOutputWithPast(
loss=loss,
logits=logits,
past_key_values=model_output.past_key_values,
hidden_states=model_output.hidden_states,
attentions=model_output.attentions,
)
def get_input_embeddings(self):
return self.cpmant.input_embedding
def set_input_embeddings(self, embeddings):
self.cpmant.input_embedding = embeddings
__all__ = ["CpmAntForCausalLM", "CpmAntModel", "CpmAntPreTrainedModel"]
| CpmAntForCausalLM |
python | tensorflow__tensorflow | tensorflow/lite/python/convert_test.py | {
"start": 9210,
"end": 18463
} | class ____(test_util.TensorFlowTestCase):
"""Test the hint to stub functionality."""
def _getGraphOpTypes(self, graphdef, output_nodes):
"""Returns used op types in `graphdef` reachable from `output_nodes`.
This is used to check that after the stub transformation the expected
nodes are there.
NOTE: this is not a exact test that the graph is the correct output, but
it balances compact expressibility of test with sanity checking.
Args:
graphdef: TensorFlow proto graphdef.
output_nodes: A list of output node names that we need to reach.
Returns:
A set of node types reachable from `output_nodes`.
"""
name_to_input_name, name_to_node, _ = _extract_graph_summary(graphdef)
# Find all nodes that are needed by the outputs
used_node_names = _bfs_for_reachable_nodes(output_nodes, name_to_input_name)
return set([name_to_node[node_name].op for node_name in used_node_names])
def _countIdentities(self, nodes):
"""Count the number of "Identity" op types in the list of proto nodes.
Args:
nodes: NodeDefs of the graph.
Returns:
The number of nodes with op type "Identity" found.
"""
return len([x for x in nodes if x.op == "Identity"])
def testSwishLiteHint(self):
"""Makes a custom op swish and makes sure it gets converted as a unit."""
with ops.Graph().as_default():
image = array_ops.constant([1.0, 2.0, 3.0, 4.0])
swish_scale = array_ops.constant(1.0)
def _swish(input_tensor, scale):
custom = op_hint.OpHint("cool_activation")
input_tensor, scale = custom.add_inputs(input_tensor, scale)
output = math_ops.sigmoid(input_tensor) * input_tensor * scale
(output,) = custom.add_outputs(output)
return output
output = array_ops.identity(
_swish(image, swish_scale), name="ModelOutput"
)
with self.cached_session() as sess:
# check if identities have been put into the graph (2 input, 1 output,
# and 1 final output).
self.assertEqual(self._countIdentities(sess.graph_def.node), 4)
stubbed_graphdef = op_hint.convert_op_hints_to_stubs(
graph_def=sess.graph_def
)
self.assertEqual(
self._getGraphOpTypes(
stubbed_graphdef,
output_nodes=[op_hint._tensor_name_base(output.name)],
),
set(["cool_activation", "Const", "Identity"]),
)
def testScaleAndBiasAndIdentity(self):
"""This tests a scaled add which has 3 inputs and 2 outputs."""
with ops.Graph().as_default():
a = array_ops.constant(1.0)
x = array_ops.constant([2.0, 3.0])
b = array_ops.constant([4.0, 5.0])
def _scaled_and_bias_and_identity(a, x, b):
custom = op_hint.OpHint("scale_and_bias_and_identity")
a, x, b = custom.add_inputs(a, x, b)
return custom.add_outputs(a * x + b, x)
output = array_ops.identity(
_scaled_and_bias_and_identity(a, x, b), name="ModelOutput"
)
with self.cached_session() as sess:
# make sure one identity for each input (3) and output (2) => 3 + 2 = 5
# +1 for the final output
self.assertEqual(self._countIdentities(sess.graph_def.node), 6)
stubbed_graphdef = op_hint.convert_op_hints_to_stubs(
graph_def=sess.graph_def
)
self.assertEqual(
self._getGraphOpTypes(
stubbed_graphdef,
output_nodes=[op_hint._tensor_name_base(output.name)],
),
set(["scale_and_bias_and_identity", "Const", "Identity", "Pack"]),
)
def testTwoFunctions(self):
"""Tests if two functions are converted correctly."""
with ops.Graph().as_default():
a = array_ops.constant([1.0])
b = array_ops.constant([1.0])
def _double_values(x):
custom = op_hint.OpHint("add_test")
(x,) = custom.add_inputs(x)
output = math_ops.multiply(x, x)
(output,) = custom.add_outputs(output)
return output
output = array_ops.identity(
math_ops.add(_double_values(a), _double_values(b)), name="ModelOutput"
)
with self.cached_session() as sess:
# make sure one identity for each input (2) and output (2) => 2 + 2
# +1 for the final output
self.assertEqual(self._countIdentities(sess.graph_def.node), 5)
stubbed_graphdef = op_hint.convert_op_hints_to_stubs(
graph_def=sess.graph_def
)
self.assertEqual(
self._getGraphOpTypes(
stubbed_graphdef,
output_nodes=[op_hint._tensor_name_base(output.name)],
),
set(["add_test", "Const", "Identity", "AddV2"]),
)
def _get_input_index(self, x):
return x.op.node_def.attr[op_hint.OpHint.FUNCTION_INPUT_INDEX_ATTR].i
def _get_output_index(self, x):
return x.op.node_def.attr[op_hint.OpHint.FUNCTION_OUTPUT_INDEX_ATTR].i
def _get_sort_index(self, x):
return x.op.node_def.attr[op_hint.OpHint.FUNCTION_SORT_INDEX_ATTR].i
def testTags(self):
"""Test if multiple args with the same tag are grouped."""
with ops.Graph().as_default():
a = array_ops.constant([1.0])
b = array_ops.constant([2.0])
c = array_ops.constant([3.0])
d = array_ops.constant([4.0])
custom = op_hint.OpHint("test_tag")
a = custom.add_input(
a, tag="mytag", aggregate=op_hint.OpHint.AGGREGATE_STACK
)
(b,) = custom.add_inputs(b)
c = custom.add_input(
c, tag="mytag", aggregate=op_hint.OpHint.AGGREGATE_STACK
)
d = custom.add_input(
d, tag="mytag2", aggregate=op_hint.OpHint.AGGREGATE_STACK
)
res = math_ops.add(math_ops.mul(a, b), math_ops.mul(c, b))
custom.add_outputs([res])
with self.cached_session():
self.assertEqual(self._get_input_index(a), 0)
self.assertEqual(self._get_sort_index(a), 0)
self.assertEqual(self._get_input_index(b), 1)
self.assertEqual(self._get_sort_index(b), 0)
self.assertEqual(self._get_input_index(c), 0)
self.assertEqual(self._get_sort_index(c), 1)
def testOverrideIndex(self):
with ops.Graph().as_default():
a = array_ops.constant([1.0])
b = array_ops.constant([2.0])
c = array_ops.constant([3.0])
custom = op_hint.OpHint("test_override")
b = custom.add_input(b) # should auto assign 0
a = custom.add_input(a, index_override=1)
c = custom.add_input(c) # should auto assign 2
with self.cached_session():
self.assertEqual(self._get_input_index(a), 1)
self.assertEqual(self._get_input_index(b), 0)
self.assertEqual(self._get_input_index(c), 2)
def testAggregate(self):
with ops.Graph().as_default():
a = array_ops.constant([3.0, 4.0])
b = array_ops.constant([5.0, 6.0])
hint = op_hint.OpHint("agg")
a0, a1 = array_ops_stack.unstack(a)
b0, b1 = array_ops_stack.unstack(b)
a0 = hint.add_input(a0, tag="c", aggregate=op_hint.OpHint.AGGREGATE_STACK)
b0 = hint.add_input(b0, tag="n", aggregate=op_hint.OpHint.AGGREGATE_STACK)
a1 = hint.add_input(a1, tag="c", aggregate=op_hint.OpHint.AGGREGATE_STACK)
b1 = hint.add_input(b1, tag="n", aggregate=op_hint.OpHint.AGGREGATE_STACK)
c0 = math_ops.add(a0, b0, name="addleft")
c1 = math_ops.add(a1, b1, name="addright")
c0 = hint.add_output(
c0, tag="out", aggregate=op_hint.OpHint.AGGREGATE_STACK
)
c1 = hint.add_output(
c1, tag="out", aggregate=op_hint.OpHint.AGGREGATE_STACK
)
curr = array_ops_stack.stack([c0, c1])
output = array_ops.identity(curr, name="FINAL_OUTPUT")
with self.cached_session() as sess:
stubbed_graphdef = op_hint.convert_op_hints_to_stubs(
graph_def=sess.graph_def
)
self.assertEqual(
self._getGraphOpTypes(
stubbed_graphdef,
output_nodes=[op_hint._tensor_name_base(output.name)],
),
set(["agg", "Const", "Identity"]),
)
def testFindHintedOutputNodes(self):
"""Test if all hinted output nodes are correctly found."""
with ops.Graph().as_default():
def _build_ophinted_op(name, input1, input2):
custom_op = op_hint.OpHint(name)
input1 = custom_op.add_input(input1)
input2 = custom_op.add_input(input2)
output = math_ops.mul(input1, input2)
return custom_op.add_output(output)
output_1 = _build_ophinted_op(
"custom_op_1", array_ops.constant([1.0]), array_ops.constant([2.0])
)
output_2 = _build_ophinted_op(
"custom_op_2", array_ops.constant([3.0]), array_ops.constant([4.0])
)
with self.cached_session() as sess:
hinted_outputs_nodes = op_hint.find_all_hinted_output_nodes(sess)
expected_hinted_output_nodes = [
_node_name(output_1.name),
_node_name(output_2.name),
]
self.assertEqual(
len(hinted_outputs_nodes), len(expected_hinted_output_nodes)
)
if __name__ == "__main__":
test.main()
| ConvertTestOpHint |
python | charliermarsh__ruff | crates/ruff_linter/resources/test/fixtures/ruff/RUF009.py | {
"start": 3248,
"end": 3339
} | class ____:
@dataclass(frozen=True)
class D:
foo: int = 1
d: D = D() # OK | C |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-github/source_github/github_schema.py | {
"start": 825238,
"end": 826020
} | class ____(sgqlc.types.relay.Connection):
"""The connection type for OrganizationInvitation."""
__schema__ = github_schema
__field_names__ = ("edges", "nodes", "page_info", "total_count")
edges = sgqlc.types.Field(sgqlc.types.list_of("OrganizationInvitationEdge"), graphql_name="edges")
"""A list of edges."""
nodes = sgqlc.types.Field(sgqlc.types.list_of("OrganizationInvitation"), graphql_name="nodes")
"""A list of nodes."""
page_info = sgqlc.types.Field(sgqlc.types.non_null("PageInfo"), graphql_name="pageInfo")
"""Information to aid in pagination."""
total_count = sgqlc.types.Field(sgqlc.types.non_null(Int), graphql_name="totalCount")
"""Identifies the total count of items in the connection."""
| OrganizationInvitationConnection |
python | pytorch__pytorch | torch/_dynamo/variables/functions.py | {
"start": 109570,
"end": 110139
} | class ____(VariableTracker):
def call_function(
self,
tx: "InstructionTranslator",
args: Sequence[VariableTracker],
kwargs: dict[str, VariableTracker],
) -> VariableTracker:
tensor = kwargs["tensor"] if "tensor" in kwargs else args[0]
block_shape = kwargs["block_shape"] if "block_shape" in kwargs else args[1]
return TMADescriptorStableVariable(
tensor=tensor, # type: ignore[arg-type]
block_shape=block_shape, # type: ignore[arg-type]
)
| CreateTMADescriptorStableVariable |
python | ray-project__ray | rllib/examples/algorithms/classes/appo_w_shared_data_actor.py | {
"start": 200,
"end": 839
} | class ____:
"""Simple example of an actor that's accessible from all other actors of an algo.
Exposes remote APIs `put` and `get` to other actors for storing and retrieving
arbitrary data.
"""
def __init__(self):
self.storage = {}
def get(self, key, delete: bool = False):
value = self.storage.get(key)
if delete and key in self.storage:
del self.storage[key]
return value
def put(self, key, value):
self.storage[key] = value
def get_state(self):
return self.storage
def set_state(self, state):
self.storage = state
| SharedDataActor |
python | celery__celery | t/unit/events/test_snapshot.py | {
"start": 332,
"end": 2262
} | class ____:
def setup_method(self):
self.state = self.app.events.State()
def test_constructor(self):
x = Polaroid(self.state, app=self.app)
assert x.app is self.app
assert x.state is self.state
assert x.freq
assert x.cleanup_freq
assert x.logger
assert not x.maxrate
def test_install_timers(self):
x = Polaroid(self.state, app=self.app)
x.timer = timer
x.__exit__()
x.__enter__()
assert x.capture in MockTimer.installed
assert x.cleanup in MockTimer.installed
x._tref.cancel.assert_not_called()
x._ctref.cancel.assert_not_called()
x.__exit__()
x._tref.cancel.assert_called()
x._ctref.cancel.assert_called()
x._tref.assert_called()
x._ctref.assert_not_called()
def test_cleanup(self):
x = Polaroid(self.state, app=self.app)
cleanup_signal_sent = [False]
def handler(**kwargs):
cleanup_signal_sent[0] = True
x.cleanup_signal.connect(handler)
x.cleanup()
assert cleanup_signal_sent[0]
def test_shutter__capture(self):
x = Polaroid(self.state, app=self.app)
shutter_signal_sent = [False]
def handler(**kwargs):
shutter_signal_sent[0] = True
x.shutter_signal.connect(handler)
x.shutter()
assert shutter_signal_sent[0]
shutter_signal_sent[0] = False
x.capture()
assert shutter_signal_sent[0]
def test_shutter_maxrate(self):
x = Polaroid(self.state, app=self.app, maxrate='1/h')
shutter_signal_sent = [0]
def handler(**kwargs):
shutter_signal_sent[0] += 1
x.shutter_signal.connect(handler)
for i in range(30):
x.shutter()
x.shutter()
x.shutter()
assert shutter_signal_sent[0] == 1
| test_Polaroid |
python | getsentry__sentry | src/sentry/relocation/models/relocation.py | {
"start": 13201,
"end": 14006
} | class ____(DefaultFieldsModelExisting):
"""
Represents a single Google CloudBuild validation run invocation, and tracks it over its
lifetime.
"""
__relocation_scope__ = RelocationScope.Excluded
relocation = FlexibleForeignKey("sentry.Relocation")
relocation_validation = FlexibleForeignKey("sentry.RelocationValidation")
# Possible values are in the `ValidationStatus` enum.
status = status = models.SmallIntegerField(
choices=ValidationStatus.get_choices(), default=ValidationStatus.IN_PROGRESS.value
)
# Unique build ID generated by CloudBuild for this import attempt.
build_id = UUIDField(db_index=True, unique=True)
class Meta:
app_label = "sentry"
db_table = "sentry_relocationvalidationattempt"
| RelocationValidationAttempt |
python | jazzband__django-oauth-toolkit | tests/test_implicit.py | {
"start": 10264,
"end": 18380
} | class ____(BaseTest):
@classmethod
def setUpTestData(cls):
super().setUpTestData()
cls.application.algorithm = Application.RS256_ALGORITHM
cls.application.save()
def test_id_token_post_auth_allow(self):
"""
Test authorization code is given for an allowed request with response_type: id_token
"""
self.client.login(username="test_user", password="123456")
form_data = {
"client_id": self.application.client_id,
"state": "random_state_string",
"nonce": "random_nonce_string",
"scope": "openid",
"redirect_uri": "http://example.org",
"response_type": "id_token",
"allow": True,
}
response = self.client.post(reverse("oauth2_provider:authorize"), data=form_data)
self.assertEqual(response.status_code, 302)
self.assertIn("http://example.org#", response["Location"])
self.assertNotIn("access_token=", response["Location"])
self.assertIn("id_token=", response["Location"])
self.assertIn("state=random_state_string", response["Location"])
uri_query = urlparse(response["Location"]).fragment
uri_query_params = dict(parse_qs(uri_query, keep_blank_values=True, strict_parsing=True))
id_token = uri_query_params["id_token"][0]
jwt_token = jwt.JWT(key=self.key, jwt=id_token)
claims = json.loads(jwt_token.claims)
self.assertIn("nonce", claims)
self.assertNotIn("at_hash", claims)
def test_id_token_skip_authorization_completely(self):
"""
If application.skip_authorization = True, should skip the authorization page.
"""
self.client.login(username="test_user", password="123456")
self.application.skip_authorization = True
self.application.save()
query_data = {
"client_id": self.application.client_id,
"response_type": "id_token",
"state": "random_state_string",
"nonce": "random_nonce_string",
"scope": "openid",
"redirect_uri": "http://example.org",
}
response = self.client.get(reverse("oauth2_provider:authorize"), data=query_data)
self.assertEqual(response.status_code, 302)
self.assertIn("http://example.org#", response["Location"])
self.assertNotIn("access_token=", response["Location"])
self.assertIn("id_token=", response["Location"])
self.assertIn("state=random_state_string", response["Location"])
uri_query = urlparse(response["Location"]).fragment
uri_query_params = dict(parse_qs(uri_query, keep_blank_values=True, strict_parsing=True))
id_token = uri_query_params["id_token"][0]
jwt_token = jwt.JWT(key=self.key, jwt=id_token)
claims = json.loads(jwt_token.claims)
self.assertIn("nonce", claims)
self.assertNotIn("at_hash", claims)
def test_id_token_skip_authorization_completely_missing_nonce(self):
"""
If application.skip_authorization = True, should skip the authorization page.
"""
self.client.login(username="test_user", password="123456")
self.application.skip_authorization = True
self.application.save()
query_data = {
"client_id": self.application.client_id,
"response_type": "id_token",
"state": "random_state_string",
"scope": "openid",
"redirect_uri": "http://example.org",
}
response = self.client.get(reverse("oauth2_provider:authorize"), data=query_data)
self.assertEqual(response.status_code, 302)
self.assertIn("error=invalid_request", response["Location"])
self.assertIn("error_description=Request+is+missing+mandatory+nonce+parameter", response["Location"])
def test_id_token_post_auth_deny(self):
"""
Test error when resource owner deny access
"""
self.client.login(username="test_user", password="123456")
form_data = {
"client_id": self.application.client_id,
"state": "random_state_string",
"nonce": "random_nonce_string",
"scope": "openid",
"redirect_uri": "http://example.org",
"response_type": "id_token",
"allow": False,
}
response = self.client.post(reverse("oauth2_provider:authorize"), data=form_data)
self.assertEqual(response.status_code, 302)
self.assertIn("error=access_denied", response["Location"])
def test_access_token_and_id_token_post_auth_allow(self):
"""
Test authorization code is given for an allowed request with response_type: token
"""
self.client.login(username="test_user", password="123456")
form_data = {
"client_id": self.application.client_id,
"state": "random_state_string",
"nonce": "random_nonce_string",
"scope": "openid",
"redirect_uri": "http://example.org",
"response_type": "id_token token",
"allow": True,
}
response = self.client.post(reverse("oauth2_provider:authorize"), data=form_data)
self.assertEqual(response.status_code, 302)
self.assertIn("http://example.org#", response["Location"])
self.assertIn("access_token=", response["Location"])
self.assertIn("id_token=", response["Location"])
self.assertIn("state=random_state_string", response["Location"])
uri_query = urlparse(response["Location"]).fragment
uri_query_params = dict(parse_qs(uri_query, keep_blank_values=True, strict_parsing=True))
id_token = uri_query_params["id_token"][0]
jwt_token = jwt.JWT(key=self.key, jwt=id_token)
claims = json.loads(jwt_token.claims)
self.assertIn("nonce", claims)
self.assertIn("at_hash", claims)
def test_access_token_and_id_token_skip_authorization_completely(self):
"""
If application.skip_authorization = True, should skip the authorization page.
"""
self.client.login(username="test_user", password="123456")
self.application.skip_authorization = True
self.application.save()
query_data = {
"client_id": self.application.client_id,
"response_type": "id_token token",
"state": "random_state_string",
"nonce": "random_nonce_string",
"scope": "openid",
"redirect_uri": "http://example.org",
}
response = self.client.get(reverse("oauth2_provider:authorize"), data=query_data)
self.assertEqual(response.status_code, 302)
self.assertIn("http://example.org#", response["Location"])
self.assertIn("access_token=", response["Location"])
self.assertIn("id_token=", response["Location"])
self.assertIn("state=random_state_string", response["Location"])
uri_query = urlparse(response["Location"]).fragment
uri_query_params = dict(parse_qs(uri_query, keep_blank_values=True, strict_parsing=True))
id_token = uri_query_params["id_token"][0]
jwt_token = jwt.JWT(key=self.key, jwt=id_token)
claims = json.loads(jwt_token.claims)
self.assertIn("nonce", claims)
self.assertIn("at_hash", claims)
def test_access_token_and_id_token_post_auth_deny(self):
"""
Test error when resource owner deny access
"""
self.client.login(username="test_user", password="123456")
form_data = {
"client_id": self.application.client_id,
"state": "random_state_string",
"scope": "openid",
"redirect_uri": "http://example.org",
"response_type": "id_token token",
"allow": False,
}
response = self.client.post(reverse("oauth2_provider:authorize"), data=form_data)
self.assertEqual(response.status_code, 302)
self.assertIn("error=access_denied", response["Location"])
| TestOpenIDConnectImplicitFlow |
python | django-extensions__django-extensions | tests/testapp/models.py | {
"start": 8081,
"end": 8398
} | class ____(models.Model):
title = models.CharField(max_length=42)
slug = AutoSlugField(populate_from="title", overwrite_on_add=False)
class Meta:
app_label = "django_extensions"
def get_readable_title(instance):
return "The title is {}".format(instance.title)
| SluggedTestNoOverwriteOnAddModel |
python | realpython__materials | python-313/docstrings.py | {
"start": 44,
"end": 273
} | class ____:
"""Model a person with a name, location, and Python version."""
name: str
place: str
version: str
print(Person.__doc__)
print(len(dataclasses.replace.__doc__))
print(dataclasses.replace.__doc__)
| Person |
python | rq__rq | tests/test_callbacks.py | {
"start": 4402,
"end": 7702
} | class ____(RQTestCase):
def test_success_callback(self):
"""Test success callback is executed only when job is successful"""
queue = Queue(is_async=False, connection=self.connection)
job = queue.enqueue(say_hello, on_success=save_result)
self.assertEqual(job.get_status(), JobStatus.FINISHED)
self.assertEqual(self.connection.get('success_callback:%s' % job.id).decode(), job.result)
job = queue.enqueue(div_by_zero, on_success=save_result)
self.assertEqual(job.get_status(), JobStatus.FAILED)
self.assertFalse(self.connection.exists('success_callback:%s' % job.id))
# test string callbacks
job = queue.enqueue(say_hello, on_success=Callback('tests.fixtures.save_result'))
self.assertEqual(job.get_status(), JobStatus.FINISHED)
self.assertEqual(self.connection.get('success_callback:%s' % job.id).decode(), job.result)
job = queue.enqueue(div_by_zero, on_success=Callback('tests.fixtures.save_result'))
self.assertEqual(job.get_status(), JobStatus.FAILED)
self.assertFalse(self.connection.exists('success_callback:%s' % job.id))
def test_failure_callback(self):
"""queue.enqueue* methods with on_failure is persisted correctly"""
queue = Queue(is_async=False, connection=self.connection)
job = queue.enqueue(div_by_zero, on_failure=save_exception)
self.assertEqual(job.get_status(), JobStatus.FAILED)
self.assertIn('div_by_zero', self.connection.get('failure_callback:%s' % job.id).decode())
job = queue.enqueue(div_by_zero, on_success=save_result)
self.assertEqual(job.get_status(), JobStatus.FAILED)
self.assertFalse(self.connection.exists('failure_callback:%s' % job.id))
# test string callbacks
job = queue.enqueue(div_by_zero, on_failure=Callback('tests.fixtures.save_exception'))
self.assertEqual(job.get_status(), JobStatus.FAILED)
self.assertIn('div_by_zero', self.connection.get('failure_callback:%s' % job.id).decode())
job = queue.enqueue(div_by_zero, on_success=Callback('tests.fixtures.save_result'))
self.assertEqual(job.get_status(), JobStatus.FAILED)
self.assertFalse(self.connection.exists('failure_callback:%s' % job.id))
def test_stopped_callback(self):
"""queue.enqueue* methods with on_stopped is persisted correctly"""
connection = self.connection
queue = Queue('foo', connection=connection, serializer=JSONSerializer)
worker = SimpleWorker('foo', connection=connection, serializer=JSONSerializer)
job = queue.enqueue(long_process, on_stopped=save_result_if_not_stopped)
job.execute_stopped_callback(
worker.death_penalty_class
) # Calling execute_stopped_callback directly for coverage
self.assertTrue(self.connection.exists('stopped_callback:%s' % job.id))
# test string callbacks
job = queue.enqueue(long_process, on_stopped=Callback('tests.fixtures.save_result_if_not_stopped'))
job.execute_stopped_callback(
worker.death_penalty_class
) # Calling execute_stopped_callback directly for coverage
self.assertTrue(self.connection.exists('stopped_callback:%s' % job.id))
| SyncJobCallback |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-github/source_github/github_schema.py | {
"start": 219698,
"end": 220158
} | class ____(sgqlc.types.Input):
"""Autogenerated input type of DeleteRef"""
__schema__ = github_schema
__field_names__ = ("ref_id", "client_mutation_id")
ref_id = sgqlc.types.Field(sgqlc.types.non_null(ID), graphql_name="refId")
"""The Node ID of the Ref to be deleted."""
client_mutation_id = sgqlc.types.Field(String, graphql_name="clientMutationId")
"""A unique identifier for the client performing the mutation."""
| DeleteRefInput |
python | davidhalter__jedi | test/refactor/extract_function.py | {
"start": 7579,
"end": 7993
} | class ____:
# ha
def g(self): pass
# haha
def ab(self, b):
#foo
local1 = 3
local2 = 4
x= self.g() or self.f(b) ^ glob1 & b is local1
return x
def f(self, b, c):
#? 11 text {'new_name': 'ab', 'until_line': 12, 'until_column': 28}
x = self.ab(b)
# bar
# -------------------------------------------------- in-method-range-2
glob1 = 1
| X |
python | pytorch__pytorch | test/dynamo/test_backward_higher_order_ops.py | {
"start": 5880,
"end": 9296
} | class ____(torch.nn.Module):
def forward(self, L_inputs_ : list, s69: "Sym(s21)", L_sizes_0_: "f32[0, s21]"):
l_inputs_ = L_inputs_
l_sizes_0_ = L_sizes_0_
getitem: "f32[s21]" = l_inputs_[0]
getitem_1: "f32[s21]" = l_inputs_[1]
getitem_2: "f32[s21]" = l_inputs_[2]; l_inputs_ = None
size: "Sym(s21)" = l_sizes_0_.size(1); l_sizes_0_ = None
validate_outputs = torch__dynamo_compiled_autograd_ops_validate_outputs([getitem], [((None, None, device(type='cpu'), 6, 0, None), [size], False, 6)]); getitem = size = None
getitem_9: "f32[s21]" = validate_outputs[0]; validate_outputs = None
call_aot_bwd_prologue = torch__dynamo_compiled_autograd_call_aot_bwd_prologue((), [], getitem_9); getitem_9 = None
aot3_tangents_1: "f32[s21]" = call_aot_bwd_prologue[0]; call_aot_bwd_prologue = None
accumulate_grad = torch__dynamo_compiled_autograd_ops_AccumulateGrad([aot3_tangents_1], getitem_1, None, False); getitem_1 = None
getitem_11: "f32[s21]" = accumulate_grad[0]; accumulate_grad = None
result: "f32[s21]" = aot3_tangents_1 * aot3_tangents_1; aot3_tangents_1 = None
accumulate_grad_1 = torch__dynamo_compiled_autograd_ops_AccumulateGrad([result], getitem_2, None, False); result = getitem_2 = None
getitem_12: "f32[s21]" = accumulate_grad_1[0]; accumulate_grad_1 = None
return (getitem_11, getitem_12)
""",
)
graph = None
@mock.patch(
"torch._functorch.aot_autograd.AOT_COUNTER", new_callable=itertools.count
)
def test_invoke_in_pt2_compiled_autograd_side_effect(self, _):
def _side_effect_stateful_fn2(x, obj):
obj.counter = obj.counter + 1
return _multiply(x)
def _side_effectful_invoke2(grad, fn):
return trace_wrapped(grad, fn=fn)
graph = None
def compiler_fn(gm):
def inner_compiler(gm_, example_inputs_):
nonlocal graph
self.assertEqual(graph, None)
graph = gm_
return inductor.compile(gm_, example_inputs_)
return torch.compile(
gm, backend=inner_compiler, fullgraph=True, dynamic=True
)
for backend in ["inductor"]:
torch._dynamo.reset()
x = torch.tensor([0.5, 0.5], requires_grad=True)
y = torch.tensor([0.5, 0.5], requires_grad=True)
class MyObj:
def __init__(self) -> None:
self.counter = 0
obj = MyObj()
inner_fn = functools.partial(_side_effect_stateful_fn2, obj=obj)
hook_fn = functools.partial(_side_effectful_invoke2, fn=inner_fn)
x.register_hook(hook_fn)
def fn(x, y):
return x + y
fn = torch.compile(fn, backend=backend, fullgraph=True)
out = fn(x, y)
grad_out = torch.tensor([2.0, 2.0])
with compiled_autograd._enable(compiler_fn):
out.backward(grad_out)
actual = normalize_gm(graph.print_readable(False))
self.assertEqual(obj.counter, 1)
self.assertEqual(x.grad, grad_out + grad_out)
if backend in ["aot_eager", "inductor"]:
self.assertExpectedInline(
actual,
"""\
| GraphModule |
python | getsentry__responses | responses/tests/test_responses.py | {
"start": 73005,
"end": 74199
} | class ____:
"""Test to validate that multiple decorators could be applied.
Ensures that we can call one function that is wrapped with
``responses.activate`` decorator from within another wrapped function.
Validates that mock patch is not leaked to other tests.
For more detail refer to https://github.com/getsentry/responses/issues/481
"""
@responses.activate
def test_wrapped(self):
responses.add(responses.GET, "http://example.com/1", body="Hello 1")
assert b"Hello 1" == requests.get("http://example.com/1").content
@responses.activate
def test_call_another_wrapped_function(self):
self.test_wrapped()
def test_mock_not_leaked(self, httpserver):
"""
Validate that ``responses.activate`` does not leak to unpatched test.
Parameters
----------
httpserver : ContentServer
Mock real HTTP server
"""
httpserver.expect_request("/").respond_with_data(
"OK", content_type="text/plain", status=969
)
url = httpserver.url_for("/")
response = requests.get(url)
assert response.status_code == 969
| TestMultipleWrappers |
python | bokeh__bokeh | src/bokeh/models/glyphs.py | {
"start": 36842,
"end": 37870
} | class ____(LRTBGlyph):
''' Render axis-aligned quads.
'''
# explicit __init__ to support Init signatures
def __init__(self, *args: Any, **kwargs: Any) -> None:
super().__init__(*args, **kwargs)
__example__ = "examples/reference/models/Quad.py"
_args = ('left', 'right', 'top', 'bottom')
left = NumberSpec(default=field("left"), help="""
The x-coordinates of the left edges.
""")
right = NumberSpec(default=field("right"), help="""
The x-coordinates of the right edges.
""")
bottom = NumberSpec(default=field("bottom"), help="""
The y-coordinates of the bottom edges.
""")
top = NumberSpec(default=field("top"), help="""
The y-coordinates of the top edges.
""")
line_props = Include(LineProps, help="""
The {prop} values for the quads.
""")
fill_props = Include(FillProps, help="""
The {prop} values for the quads.
""")
hatch_props = Include(HatchProps, help="""
The {prop} values for the quads.
""")
| Quad |
python | realpython__materials | python-313/replace.py | {
"start": 595,
"end": 1638
} | class ____:
def __init__(self, name, **items):
print(f"Initializing {name} with {items}")
self.name = name
self.items = items
def __replace__(self, **kwargs):
""".__replace__() is called by copy.replace()"""
if "name" in kwargs:
raise ValueError("'name' can't be updated")
print(f"Replacing {kwargs} in {self.name}")
init_kwargs = {"name": self.name} | self.items | kwargs
# Create a new object with updated arguments
cls = type(self)
return cls(**init_kwargs)
def __repr__(self):
items = [f"{key}={value!r}" for key, value in self.items.items()]
return f"{type(self).__name__}(name='{self.name}', {', '.join(items)})"
capitals = NamedContainer(
"capitals", norway="oslo", sweden="Stockholm", denmark="Copenhagen"
)
print(f"{capitals = }")
capitals = copy.replace(capitals, norway="Oslo")
print(f"{capitals = }")
# copy.replace(capitals, name="Scandinavia") # Raises an error, name can't be replaced
| NamedContainer |
python | walkccc__LeetCode | solutions/286. Walls and Gates/286.py | {
"start": 0,
"end": 632
} | class ____:
def wallsAndGates(self, rooms: list[list[int]]) -> None:
DIRS = ((0, 1), (1, 0), (0, -1), (-1, 0))
INF = 2**31 - 1
m = len(rooms)
n = len(rooms[0])
q = collections.deque((i, j)
for i in range(m)
for j in range(n)
if rooms[i][j] == 0)
while q:
i, j = q.popleft()
for dx, dy in DIRS:
x = i + dx
y = j + dy
if x < 0 or x == m or y < 0 or y == n:
continue
if rooms[x][y] != INF:
continue
rooms[x][y] = rooms[i][j] + 1
q.append((x, y))
| Solution |
python | doocs__leetcode | solution/3200-3299/3223.Minimum Length of String After Operations/Solution.py | {
"start": 0,
"end": 145
} | class ____:
def minimumLength(self, s: str) -> int:
cnt = Counter(s)
return sum(1 if x & 1 else 2 for x in cnt.values())
| Solution |
python | docker__docker-py | tests/integration/api_image_test.py | {
"start": 11528,
"end": 12227
} | class ____(BaseAPIIntegrationTest):
@requires_api_version('1.23')
def test_get_image_load_image(self):
with tempfile.TemporaryFile() as f:
stream = self.client.get_image(TEST_IMG)
for chunk in stream:
f.write(chunk)
f.seek(0)
result = self.client.load_image(f.read())
success = False
result_line = f'Loaded image: {TEST_IMG}\n'
for data in result:
print(data)
if 'stream' in data:
if data['stream'] == result_line:
success = True
break
assert success is True
@requires_api_version('1.30')
| SaveLoadImagesTest |
python | walkccc__LeetCode | solutions/640. Solve the Equation/640.py | {
"start": 0,
"end": 1041
} | class ____:
def solveEquation(self, equation: str) -> str:
def calculate(s: str) -> tuple:
coefficient = 0
constant = 0
num = 0
sign = 1
for i, c in enumerate(s):
if c.isdigit():
num = num * 10 + int(c)
elif c in '+-':
constant += sign * num
sign = 1 if c == '+' else -1
num = 0
else:
if i > 0 and num == 0 and s[i - 1] == '0':
continue
coefficient += sign if num == 0 else sign * num
num = 0
return coefficient, constant + sign * num
lhsEquation, rhsEquation = equation.split('=')
lhsCoefficient, lhsConstant = calculate(lhsEquation)
rhsCoefficient, rhsConstant = calculate(rhsEquation)
coefficient = lhsCoefficient - rhsCoefficient
constant = rhsConstant - lhsConstant
if coefficient == 0 and constant == 0:
return "Infinite solutions"
if coefficient == 0 and constant != 0:
return "No solution"
return "x=" + str(constant // coefficient)
| Solution |
python | plotly__plotly.py | plotly/graph_objs/histogram/_error_y.py | {
"start": 233,
"end": 14397
} | class ____(_BaseTraceHierarchyType):
_parent_path_str = "histogram"
_path_str = "histogram.error_y"
_valid_props = {
"array",
"arrayminus",
"arrayminussrc",
"arraysrc",
"color",
"symmetric",
"thickness",
"traceref",
"tracerefminus",
"type",
"value",
"valueminus",
"visible",
"width",
}
@property
def array(self):
"""
Sets the data corresponding the length of each error bar.
Values are plotted relative to the underlying data.
The 'array' property is an array that may be specified as a tuple,
list, numpy array, or pandas Series
Returns
-------
numpy.ndarray
"""
return self["array"]
@array.setter
def array(self, val):
self["array"] = val
@property
def arrayminus(self):
"""
Sets the data corresponding the length of each error bar in the
bottom (left) direction for vertical (horizontal) bars Values
are plotted relative to the underlying data.
The 'arrayminus' property is an array that may be specified as a tuple,
list, numpy array, or pandas Series
Returns
-------
numpy.ndarray
"""
return self["arrayminus"]
@arrayminus.setter
def arrayminus(self, val):
self["arrayminus"] = val
@property
def arrayminussrc(self):
"""
Sets the source reference on Chart Studio Cloud for
`arrayminus`.
The 'arrayminussrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["arrayminussrc"]
@arrayminussrc.setter
def arrayminussrc(self, val):
self["arrayminussrc"] = val
@property
def arraysrc(self):
"""
Sets the source reference on Chart Studio Cloud for `array`.
The 'arraysrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["arraysrc"]
@arraysrc.setter
def arraysrc(self, val):
self["arraysrc"] = val
@property
def color(self):
"""
Sets the stroke color of the error bars.
The 'color' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color: see https://plotly.com/python/css-colors/ for a list
Returns
-------
str
"""
return self["color"]
@color.setter
def color(self, val):
self["color"] = val
@property
def symmetric(self):
"""
Determines whether or not the error bars have the same length
in both direction (top/bottom for vertical bars, left/right for
horizontal bars.
The 'symmetric' property must be specified as a bool
(either True, or False)
Returns
-------
bool
"""
return self["symmetric"]
@symmetric.setter
def symmetric(self, val):
self["symmetric"] = val
@property
def thickness(self):
"""
Sets the thickness (in px) of the error bars.
The 'thickness' property is a number and may be specified as:
- An int or float in the interval [0, inf]
Returns
-------
int|float
"""
return self["thickness"]
@thickness.setter
def thickness(self, val):
self["thickness"] = val
@property
def traceref(self):
"""
The 'traceref' property is a integer and may be specified as:
- An int (or float that will be cast to an int)
in the interval [0, 9223372036854775807]
Returns
-------
int
"""
return self["traceref"]
@traceref.setter
def traceref(self, val):
self["traceref"] = val
@property
def tracerefminus(self):
"""
The 'tracerefminus' property is a integer and may be specified as:
- An int (or float that will be cast to an int)
in the interval [0, 9223372036854775807]
Returns
-------
int
"""
return self["tracerefminus"]
@tracerefminus.setter
def tracerefminus(self, val):
self["tracerefminus"] = val
@property
def type(self):
"""
Determines the rule used to generate the error bars. If
"constant", the bar lengths are of a constant value. Set this
constant in `value`. If "percent", the bar lengths correspond
to a percentage of underlying data. Set this percentage in
`value`. If "sqrt", the bar lengths correspond to the square of
the underlying data. If "data", the bar lengths are set with
data set `array`.
The 'type' property is an enumeration that may be specified as:
- One of the following enumeration values:
['percent', 'constant', 'sqrt', 'data']
Returns
-------
Any
"""
return self["type"]
@type.setter
def type(self, val):
self["type"] = val
@property
def value(self):
"""
Sets the value of either the percentage (if `type` is set to
"percent") or the constant (if `type` is set to "constant")
corresponding to the lengths of the error bars.
The 'value' property is a number and may be specified as:
- An int or float in the interval [0, inf]
Returns
-------
int|float
"""
return self["value"]
@value.setter
def value(self, val):
self["value"] = val
@property
def valueminus(self):
"""
Sets the value of either the percentage (if `type` is set to
"percent") or the constant (if `type` is set to "constant")
corresponding to the lengths of the error bars in the bottom
(left) direction for vertical (horizontal) bars
The 'valueminus' property is a number and may be specified as:
- An int or float in the interval [0, inf]
Returns
-------
int|float
"""
return self["valueminus"]
@valueminus.setter
def valueminus(self, val):
self["valueminus"] = val
@property
def visible(self):
"""
Determines whether or not this set of error bars is visible.
The 'visible' property must be specified as a bool
(either True, or False)
Returns
-------
bool
"""
return self["visible"]
@visible.setter
def visible(self, val):
self["visible"] = val
@property
def width(self):
"""
Sets the width (in px) of the cross-bar at both ends of the
error bars.
The 'width' property is a number and may be specified as:
- An int or float in the interval [0, inf]
Returns
-------
int|float
"""
return self["width"]
@width.setter
def width(self, val):
self["width"] = val
@property
def _prop_descriptions(self):
return """\
array
Sets the data corresponding the length of each error
bar. Values are plotted relative to the underlying
data.
arrayminus
Sets the data corresponding the length of each error
bar in the bottom (left) direction for vertical
(horizontal) bars Values are plotted relative to the
underlying data.
arrayminussrc
Sets the source reference on Chart Studio Cloud for
`arrayminus`.
arraysrc
Sets the source reference on Chart Studio Cloud for
`array`.
color
Sets the stroke color of the error bars.
symmetric
Determines whether or not the error bars have the same
length in both direction (top/bottom for vertical bars,
left/right for horizontal bars.
thickness
Sets the thickness (in px) of the error bars.
traceref
tracerefminus
type
Determines the rule used to generate the error bars. If
"constant", the bar lengths are of a constant value.
Set this constant in `value`. If "percent", the bar
lengths correspond to a percentage of underlying data.
Set this percentage in `value`. If "sqrt", the bar
lengths correspond to the square of the underlying
data. If "data", the bar lengths are set with data set
`array`.
value
Sets the value of either the percentage (if `type` is
set to "percent") or the constant (if `type` is set to
"constant") corresponding to the lengths of the error
bars.
valueminus
Sets the value of either the percentage (if `type` is
set to "percent") or the constant (if `type` is set to
"constant") corresponding to the lengths of the error
bars in the bottom (left) direction for vertical
(horizontal) bars
visible
Determines whether or not this set of error bars is
visible.
width
Sets the width (in px) of the cross-bar at both ends of
the error bars.
"""
def __init__(
self,
arg=None,
array=None,
arrayminus=None,
arrayminussrc=None,
arraysrc=None,
color=None,
symmetric=None,
thickness=None,
traceref=None,
tracerefminus=None,
type=None,
value=None,
valueminus=None,
visible=None,
width=None,
**kwargs,
):
"""
Construct a new ErrorY object
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of
:class:`plotly.graph_objs.histogram.ErrorY`
array
Sets the data corresponding the length of each error
bar. Values are plotted relative to the underlying
data.
arrayminus
Sets the data corresponding the length of each error
bar in the bottom (left) direction for vertical
(horizontal) bars Values are plotted relative to the
underlying data.
arrayminussrc
Sets the source reference on Chart Studio Cloud for
`arrayminus`.
arraysrc
Sets the source reference on Chart Studio Cloud for
`array`.
color
Sets the stroke color of the error bars.
symmetric
Determines whether or not the error bars have the same
length in both direction (top/bottom for vertical bars,
left/right for horizontal bars.
thickness
Sets the thickness (in px) of the error bars.
traceref
tracerefminus
type
Determines the rule used to generate the error bars. If
"constant", the bar lengths are of a constant value.
Set this constant in `value`. If "percent", the bar
lengths correspond to a percentage of underlying data.
Set this percentage in `value`. If "sqrt", the bar
lengths correspond to the square of the underlying
data. If "data", the bar lengths are set with data set
`array`.
value
Sets the value of either the percentage (if `type` is
set to "percent") or the constant (if `type` is set to
"constant") corresponding to the lengths of the error
bars.
valueminus
Sets the value of either the percentage (if `type` is
set to "percent") or the constant (if `type` is set to
"constant") corresponding to the lengths of the error
bars in the bottom (left) direction for vertical
(horizontal) bars
visible
Determines whether or not this set of error bars is
visible.
width
Sets the width (in px) of the cross-bar at both ends of
the error bars.
Returns
-------
ErrorY
"""
super().__init__("error_y")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError("""\
The first argument to the plotly.graph_objs.histogram.ErrorY
constructor must be a dict or
an instance of :class:`plotly.graph_objs.histogram.ErrorY`""")
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
self._set_property("array", arg, array)
self._set_property("arrayminus", arg, arrayminus)
self._set_property("arrayminussrc", arg, arrayminussrc)
self._set_property("arraysrc", arg, arraysrc)
self._set_property("color", arg, color)
self._set_property("symmetric", arg, symmetric)
self._set_property("thickness", arg, thickness)
self._set_property("traceref", arg, traceref)
self._set_property("tracerefminus", arg, tracerefminus)
self._set_property("type", arg, type)
self._set_property("value", arg, value)
self._set_property("valueminus", arg, valueminus)
self._set_property("visible", arg, visible)
self._set_property("width", arg, width)
self._process_kwargs(**dict(arg, **kwargs))
self._skip_invalid = False
| ErrorY |
python | pydantic__pydantic | pydantic/warnings.py | {
"start": 512,
"end": 1951
} | class ____(DeprecationWarning):
"""A Pydantic specific deprecation warning.
This warning is raised when using deprecated functionality in Pydantic. It provides information on when the
deprecation was introduced and the expected version in which the corresponding functionality will be removed.
Attributes:
message: Description of the warning.
since: Pydantic version in what the deprecation was introduced.
expected_removal: Pydantic version in what the corresponding functionality expected to be removed.
"""
message: str
since: tuple[int, int]
expected_removal: tuple[int, int]
def __init__(
self, message: str, *args: object, since: tuple[int, int], expected_removal: tuple[int, int] | None = None
) -> None:
super().__init__(message, *args)
self.message = message.rstrip('.')
self.since = since
self.expected_removal = expected_removal if expected_removal is not None else (since[0] + 1, 0)
def __str__(self) -> str:
message = (
f'{self.message}. Deprecated in Pydantic V{self.since[0]}.{self.since[1]}'
f' to be removed in V{self.expected_removal[0]}.{self.expected_removal[1]}.'
)
if self.since == (2, 0):
message += f' See Pydantic V2 Migration Guide at https://errors.pydantic.dev/{version_short()}/migration/'
return message
| PydanticDeprecationWarning |
python | great-expectations__great_expectations | great_expectations/exceptions/resource_freshness.py | {
"start": 941,
"end": 1024
} | class ____(GreatExpectationsAggregateError):
pass
| ResourceFreshnessAggregateError |
python | run-llama__llama_index | llama-index-core/tests/program/test_streaming_utils.py | {
"start": 382,
"end": 486
} | class ____(BaseModel):
"""Test joke model."""
setup: str
punchline: Optional[str] = None
| Joke |
python | altair-viz__altair | tools/schemapi/utils.py | {
"start": 32248,
"end": 33083
} | class ____(Generic[T]):
"""
Simple group-by like utility.
Intended for consuming an iterator in full, splitting into true/false cases.
Parameters
----------
iterable
Elements to divide into two groups.
predicate
Function to classify each element.
Attributes
----------
truthy: deque[T]
Elements which pass ``predicate``.
falsy: deque[T]
Elements which fail ``predicate``.
"""
def __init__(
self, iterable: Iterable[T], /, predicate: Callable[[T], bool]
) -> None:
truthy, falsy = deque[T](), deque[T]()
for el in iterable:
if predicate(el):
truthy.append(el)
else:
falsy.append(el)
self.truthy: deque[T] = truthy
self.falsy: deque[T] = falsy
| Grouped |
python | tensorflow__tensorflow | ci/official/utilities/extract_resultstore_links.py | {
"start": 1422,
"end": 11184
} | class ____:
tests_failed = 'tests_failed'
build_failed = 'build_failed'
passed = 'passed'
def parse_args() -> argparse.Namespace:
"""Parses the commandline args."""
parser = argparse.ArgumentParser(
description='Extracts ResultStore links from a build log.\n'
'These can be then printed out, and/or output into a '
'JUnit-based XML file inside a specified directory.')
parser.add_argument('build_log',
help='Path to a build log.')
parser.add_argument('--xml-out-path',
required=False,
help='Path to which to output '
'the JUnit-based XML with ResultStore links.')
parser.add_argument('--print',
action='store_true', dest='print', default=False,
help='Whether to print out a short summary with the '
'found ResultStore links (if any).')
parser.add_argument('-v', '--verbose',
action='store_true', dest='verbose', default=False,
help='Prints out lines helpful for debugging.')
parsed_args = parser.parse_args()
if not parsed_args.print and not parsed_args.xml_out_path:
raise TypeError('`--print` or `--xml-out-path` must be specified')
return parsed_args
def parse_log(file_path: str,
verbose: bool = False) -> ResultDictType:
"""Finds ResultStore links, and tries to determine their status."""
with open(file_path, 'r', encoding='utf-8', errors='ignore') as f:
log_lines = f.read().splitlines()
result_store_links: ResultDictType = {}
current_url = None
for i in range(len(log_lines)):
line = log_lines[i]
result_store_line_match = re.search(RESULT_STORE_LINK_RE, line)
if not result_store_line_match:
continue
url = result_store_line_match.group(1)
url_lines = result_store_links.setdefault(url, {})
# Each bazel RBE invocation should produce two
# 'Streaming build results to: ...' lines, one at the start, and one at the
# end of the invocation.
# If there's a failure message, it will be found in-between these two.
if not current_url:
url_lines['start'] = i
elif current_url == url:
url_lines['end'] = i
else:
result_store_links[current_url]['next_url'] = i
url_lines['start'] = i
current_url = url
previous_end_line = None
for url, lines in result_store_links.items():
lines['status'] = InvokeStatus.passed # default to passed
start_line = lines['start']
end_line = lines.get('end', lines.get('next_url', len(log_lines))) - 1
k = end_line
while k > start_line:
backtrack_line = log_lines[k]
build_failed = backtrack_line.startswith(FAILED_BUILD_LINE)
if build_failed or not backtrack_line.startswith(BUILD_STATUS_LINE):
tests_failed = False
else:
tests_failed = re.search(TESTS_FAILED_RE, backtrack_line)
if build_failed or tests_failed:
log_fragment = '\n'.join(
log_lines[max(k - 20, 0):min(end_line + 1, len(log_lines) - 1)])
lines['log_fragment'] = log_fragment
lines['status'] = (InvokeStatus.build_failed if build_failed
else InvokeStatus.tests_failed)
if verbose:
print(f'Found failed invocation: {url.rsplit("/")[-1]}\n'
f'Log fragment:\n'
f'```\n{log_fragment}\n```\n'
f'{"=" * 140}')
break
k -= 1
# A low-effort attempt to find the bazel command that triggered the
# invocation.
bazel_comm_min_line_i = (previous_end_line if previous_end_line is not None
else 0)
while k > bazel_comm_min_line_i:
backtrack_line = log_lines[k]
# Don't attempt to parse multi-line commands broken up by backslashes
if 'bazel ' in backtrack_line and not backtrack_line.endswith('\\'):
bazel_line = BAZEL_COMMAND_RE.search(backtrack_line)
if bazel_line:
lines['command'] = bazel_line.group('command')
lines['command_type'] = bazel_line.group('type')
break
k -= 1
continue
previous_end_line = lines.get('end') or start_line
return result_store_links
def indent_xml(elem, level=0) -> None:
"""Indents and newlines the XML for better output."""
indent_str = '\n' + level * ' '
if len(elem): # pylint: disable=g-explicit-length-test # `if elem` not valid
if not elem.text or not elem.text.strip():
elem.text = indent_str + ' '
if not elem.tail or not elem.tail.strip():
elem.tail = indent_str
for elem in elem:
indent_xml(elem, level + 1)
if not elem.tail or not elem.tail.strip():
elem.tail = indent_str
else:
if level and (not elem.tail or not elem.tail.strip()):
elem.tail = indent_str
def create_xml_file(result_store_dict: ResultDictType,
output_path: str,
verbose: bool = False):
"""Creates a JUnit-based XML file, with each invocation as a testcase."""
os.makedirs(os.path.dirname(output_path), exist_ok=True)
failure_count = 0
error_count = 0
date_time = datetime.datetime
attrib = {'name': 'Bazel Invocations', 'time': '0.0',
'timestamp': date_time.isoformat(date_time.utcnow())}
testsuites = ElemTree.Element('testsuites')
testsuite = ElemTree.SubElement(testsuites, 'testsuite')
for url, invocation_results in result_store_dict.items():
invocation_id = url.rsplit('/')[-1]
if verbose:
print(f'Creating testcase for invocation {invocation_id}')
status = invocation_results['status']
command = invocation_results.get('command')
command_type = invocation_results.get('command_type')
case_attrib = attrib.copy()
if command_type:
command_type = command_type.title()
case_name = f'{command_type} invocation {invocation_id}'
else:
case_name = f' Invocation {invocation_id}'
case_attrib.update({'name': case_name,
'status': 'run', 'result': 'completed'})
testcase = ElemTree.SubElement(testsuite, 'testcase', attrib=case_attrib)
if status in (InvokeStatus.tests_failed, InvokeStatus.build_failed):
if status == InvokeStatus.tests_failed:
failure_count += 1
elem_name = 'failure'
else:
error_count += 1
elem_name = 'error'
if command:
failure_msg = (f'\nThe command was:\n\n'
f'{command}\n\n')
else:
failure_msg = ('\nCouldn\'t parse a bazel command '
'matching the invocation, inside the log. '
'Please look for it in the build log.\n\n')
failure_msg += (
f'See the ResultStore link for a detailed view of failed targets:\n'
f'{url}\n\n')
failure_msg += (
f'Here\'s a fragment of the log containing the failure:\n\n'
f'[ ... TRUNCATED ... ]\n\n'
f'{invocation_results["log_fragment"]}\n'
f'\n[ ... TRUNCATED ... ]\n'
)
failure = ElemTree.SubElement(
testcase, elem_name,
message=f'Bazel invocation {invocation_id} failed.')
failure.text = failure_msg
else:
properties = ElemTree.SubElement(testcase, 'properties')
success_msg = 'Build completed successfully.\n' f'See {url} for details.'
ElemTree.SubElement(properties, 'property',
name='description',
value=success_msg)
if command:
ElemTree.SubElement(properties, 'property',
name='bazel_command',
value=command)
suite_specific = {'tests': str(len(result_store_dict)),
'errors': str(error_count),
'failures': str(failure_count)}
suite_attrib = attrib.copy()
suite_attrib.update(suite_specific)
testsuites.attrib = suite_attrib
testsuite.attrib = suite_attrib
indent_xml(testsuites)
tree = ElemTree.ElementTree(testsuites)
file_path = os.path.join(output_path)
with open(file_path, 'wb') as f:
f.write(b'<?xml version="1.0"?>\n')
tree.write(f)
if verbose:
print(f'\nWrote XML with Bazel invocation results to {file_path}')
def print_invocation_results(result_store_dict: ResultDictType):
"""Prints out a short summary of the found ResultStore links (if any)."""
print()
if not result_store_dict:
print('Found no ResultStore links for Bazel build/test invocations.')
else:
print(f'Found {len(result_store_dict)} ResultStore link(s) for '
f'Bazel invocations.\n'
f'ResultStore contains individual representations of each target '
f'that were run/built during the invocation.\n'
f'These results are generally easier to read than looking through '
f'the entire build log:\n')
i = 1
for url, invocation_results in result_store_dict.items():
line_str = f'Invocation #{i} ({invocation_results["status"]}):\n'
command = invocation_results.get('command')
if command:
line_str += command
else:
line_str += ('Couldn\'t parse the bazel command, '
'check inside the build log instead')
line_str += f'\n{url}\n'
print(line_str)
i += 1
def main():
args = parse_args()
verbose = args.verbose
build_log_path = os.path.expandvars(args.build_log)
links = parse_log(build_log_path, verbose=verbose)
if args.xml_out_path:
output_path = os.path.expandvars(args.xml_out_path)
create_xml_file(links, output_path, verbose=verbose)
if args.print:
print_invocation_results(links)
if __name__ == '__main__':
main()
| InvokeStatus |
python | bokeh__bokeh | tests/unit/bokeh/core/property/test_data_frame.py | {
"start": 2913,
"end": 3783
} | class ____:
def test_valid(self) -> None:
prop = bcpp.EagerSeries()
assert prop.is_valid(pd.Series(dtype='float64'))
def test_valid_polars(self) -> None:
polars = pytest.importorskip('polars')
prop = bcpp.EagerSeries()
assert prop.is_valid(polars.Series())
def test_valid_pyarrow(self) -> None:
pa = pytest.importorskip('pyarrow')
prop = bcpp.EagerSeries()
assert prop.is_valid(pa.chunked_array([], type=pa.int64()))
def test_invalid(self) -> None:
prop = bcpp.EagerSeries()
assert not prop.is_valid(None)
assert not prop.is_valid(1.0+1.0j)
assert not prop.is_valid(())
assert not prop.is_valid([])
assert not prop.is_valid({})
assert not prop.is_valid(_TestHasProps())
assert not prop.is_valid(_TestModel())
| Test_EagerSeries |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.