language stringclasses 1
value | repo stringclasses 346
values | path stringlengths 6 201 | class_span dict | source stringlengths 21 2.38M | target stringlengths 1 96 |
|---|---|---|---|---|---|
python | astropy__astropy | astropy/cosmology/_src/tests/io/test_connect.py | {
"start": 6749,
"end": 9363
} | class ____(
test_cosmology.ToFromCosmologyTestMixin,
test_mapping.ToFromMappingTestMixin,
test_model.ToFromModelTestMixin,
test_row.ToFromRowTestMixin,
test_table.ToFromTableTestMixin,
test_yaml.ToFromYAMLTestMixin,
):
"""
Tests for a Cosmology[To/From]Format on a |Cosmology|.
This class will not be directly called by :mod:`pytest` since its name does
not begin with ``Test``. To activate the contained tests this class must
be inherited in a subclass. Subclasses must define a :func:`pytest.fixture`
``cosmo`` that returns/yields an instance of a |Cosmology|.
See ``TestCosmology`` for an example.
"""
@pytest.mark.parametrize("format, totype", tofrom_formats)
def test_tofromformat_complete_info(
self, cosmo, format, totype, xfail_if_not_registered_with_yaml
):
"""Read tests happen later."""
# test to_format
obj = cosmo.to_format(format)
assert isinstance(obj, totype)
# test from_format
got = Cosmology.from_format(obj, format=format)
# Test autodetect, if enabled
if self.can_autodentify(format):
got2 = Cosmology.from_format(obj)
assert got2 == got # internal consistency
assert got == cosmo # external consistency
assert got.meta == cosmo.meta
@pytest.mark.parametrize("format, totype", tofrom_formats)
def test_fromformat_subclass_complete_info(
self, cosmo_cls, cosmo, format, totype, xfail_if_not_registered_with_yaml
):
"""
Test transforming an instance and parsing from that class, when there's
full information available.
Partial information tests are handled in the Mixin super classes.
"""
# test to_format
obj = cosmo.to_format(format)
assert isinstance(obj, totype)
# read with the same class that wrote.
got = cosmo_cls.from_format(obj, format=format)
if self.can_autodentify(format):
got2 = Cosmology.from_format(obj) # and autodetect
assert got2 == got # internal consistency
assert got == cosmo # external consistency
assert got.meta == cosmo.meta
# this should be equivalent to
got = Cosmology.from_format(obj, format=format, cosmology=cosmo_cls)
assert got == cosmo
assert got.meta == cosmo.meta
# and also
got = Cosmology.from_format(
obj, format=format, cosmology=cosmo_cls.__qualname__
)
assert got == cosmo
assert got.meta == cosmo.meta
| ToFromFormatTestMixin |
python | pallets__werkzeug | src/werkzeug/local.py | {
"start": 12345,
"end": 22184
} | class ____(t.Generic[T]):
"""A proxy to the object bound to a context-local object. All
operations on the proxy are forwarded to the bound object. If no
object is bound, a ``RuntimeError`` is raised.
:param local: The context-local object that provides the proxied
object.
:param name: Proxy this attribute from the proxied object.
:param unbound_message: The error message to show if the
context-local object is unbound.
Proxy a :class:`~contextvars.ContextVar` to make it easier to
access. Pass a name to proxy that attribute.
.. code-block:: python
_request_var = ContextVar("request")
request = LocalProxy(_request_var)
session = LocalProxy(_request_var, "session")
Proxy an attribute on a :class:`Local` namespace by calling the
local with the attribute name:
.. code-block:: python
data = Local()
user = data("user")
Proxy the top item on a :class:`LocalStack` by calling the local.
Pass a name to proxy that attribute.
.. code-block::
app_stack = LocalStack()
current_app = app_stack()
g = app_stack("g")
Pass a function to proxy the return value from that function. This
was previously used to access attributes of local objects before
that was supported directly.
.. code-block:: python
session = LocalProxy(lambda: request.session)
``__repr__`` and ``__class__`` are proxied, so ``repr(x)`` and
``isinstance(x, cls)`` will look like the proxied object. Use
``issubclass(type(x), LocalProxy)`` to check if an object is a
proxy.
.. code-block:: python
repr(user) # <User admin>
isinstance(user, User) # True
issubclass(type(user), LocalProxy) # True
.. versionchanged:: 2.2.2
``__wrapped__`` is set when wrapping an object, not only when
wrapping a function, to prevent doctest from failing.
.. versionchanged:: 2.2
Can proxy a ``ContextVar`` or ``LocalStack`` directly.
.. versionchanged:: 2.2
The ``name`` parameter can be used with any proxied object, not
only ``Local``.
.. versionchanged:: 2.2
Added the ``unbound_message`` parameter.
.. versionchanged:: 2.0
Updated proxied attributes and methods to reflect the current
data model.
.. versionchanged:: 0.6.1
The class can be instantiated with a callable.
"""
__slots__ = ("__wrapped", "_get_current_object")
_get_current_object: t.Callable[[], T]
"""Return the current object this proxy is bound to. If the proxy is
unbound, this raises a ``RuntimeError``.
This should be used if you need to pass the object to something that
doesn't understand the proxy. It can also be useful for performance
if you are accessing the object multiple times in a function, rather
than going through the proxy multiple times.
"""
def __init__(
self,
local: ContextVar[T] | Local | LocalStack[T] | t.Callable[[], T],
name: str | None = None,
*,
unbound_message: str | None = None,
) -> None:
if name is None:
get_name = _identity
else:
get_name = attrgetter(name) # type: ignore[assignment]
if unbound_message is None:
unbound_message = "object is not bound"
if isinstance(local, Local):
if name is None:
raise TypeError("'name' is required when proxying a 'Local' object.")
def _get_current_object() -> T:
try:
return get_name(local) # type: ignore[return-value]
except AttributeError:
raise RuntimeError(unbound_message) from None
elif isinstance(local, LocalStack):
def _get_current_object() -> T:
obj = local.top
if obj is None:
raise RuntimeError(unbound_message)
return get_name(obj)
elif isinstance(local, ContextVar):
def _get_current_object() -> T:
try:
obj = local.get()
except LookupError:
raise RuntimeError(unbound_message) from None
return get_name(obj)
elif callable(local):
def _get_current_object() -> T:
return get_name(local())
else:
raise TypeError(f"Don't know how to proxy '{type(local)}'.")
object.__setattr__(self, "_LocalProxy__wrapped", local)
object.__setattr__(self, "_get_current_object", _get_current_object)
__doc__ = _ProxyLookup(
class_value=__doc__, fallback=lambda self: type(self).__doc__, is_attr=True
)
__wrapped__ = _ProxyLookup(
fallback=lambda self: self._LocalProxy__wrapped, # type: ignore[attr-defined]
is_attr=True,
)
# __del__ should only delete the proxy
__repr__ = _ProxyLookup(
repr, fallback=lambda self: f"<{type(self).__name__} unbound>"
)
__str__ = _ProxyLookup(str)
__bytes__ = _ProxyLookup(bytes)
__format__ = _ProxyLookup()
__lt__ = _ProxyLookup(operator.lt)
__le__ = _ProxyLookup(operator.le)
__eq__ = _ProxyLookup(operator.eq)
__ne__ = _ProxyLookup(operator.ne)
__gt__ = _ProxyLookup(operator.gt)
__ge__ = _ProxyLookup(operator.ge)
__hash__ = _ProxyLookup(hash)
__bool__ = _ProxyLookup(bool, fallback=lambda self: False)
__getattr__ = _ProxyLookup(getattr)
# __getattribute__ triggered through __getattr__
__setattr__ = _ProxyLookup(setattr)
__delattr__ = _ProxyLookup(delattr)
__dir__ = _ProxyLookup(dir, fallback=lambda self: [])
# __get__ (proxying descriptor not supported)
# __set__ (descriptor)
# __delete__ (descriptor)
# __set_name__ (descriptor)
# __objclass__ (descriptor)
# __slots__ used by proxy itself
# __dict__ (__getattr__)
# __weakref__ (__getattr__)
# __init_subclass__ (proxying metaclass not supported)
# __prepare__ (metaclass)
__class__ = _ProxyLookup(fallback=lambda self: type(self), is_attr=True)
__instancecheck__ = _ProxyLookup(lambda self, other: isinstance(other, self))
__subclasscheck__ = _ProxyLookup(lambda self, other: issubclass(other, self))
# __class_getitem__ triggered through __getitem__
__call__ = _ProxyLookup(lambda self, *args, **kwargs: self(*args, **kwargs))
__len__ = _ProxyLookup(len)
__length_hint__ = _ProxyLookup(operator.length_hint)
__getitem__ = _ProxyLookup(operator.getitem)
__setitem__ = _ProxyLookup(operator.setitem)
__delitem__ = _ProxyLookup(operator.delitem)
# __missing__ triggered through __getitem__
__iter__ = _ProxyLookup(iter)
__next__ = _ProxyLookup(next)
__reversed__ = _ProxyLookup(reversed)
__contains__ = _ProxyLookup(operator.contains)
__add__ = _ProxyLookup(operator.add)
__sub__ = _ProxyLookup(operator.sub)
__mul__ = _ProxyLookup(operator.mul)
__matmul__ = _ProxyLookup(operator.matmul)
__truediv__ = _ProxyLookup(operator.truediv)
__floordiv__ = _ProxyLookup(operator.floordiv)
__mod__ = _ProxyLookup(operator.mod)
__divmod__ = _ProxyLookup(divmod)
__pow__ = _ProxyLookup(pow)
__lshift__ = _ProxyLookup(operator.lshift)
__rshift__ = _ProxyLookup(operator.rshift)
__and__ = _ProxyLookup(operator.and_)
__xor__ = _ProxyLookup(operator.xor)
__or__ = _ProxyLookup(operator.or_)
__radd__ = _ProxyLookup(_l_to_r_op(operator.add))
__rsub__ = _ProxyLookup(_l_to_r_op(operator.sub))
__rmul__ = _ProxyLookup(_l_to_r_op(operator.mul))
__rmatmul__ = _ProxyLookup(_l_to_r_op(operator.matmul))
__rtruediv__ = _ProxyLookup(_l_to_r_op(operator.truediv))
__rfloordiv__ = _ProxyLookup(_l_to_r_op(operator.floordiv))
__rmod__ = _ProxyLookup(_l_to_r_op(operator.mod))
__rdivmod__ = _ProxyLookup(_l_to_r_op(divmod))
__rpow__ = _ProxyLookup(_l_to_r_op(pow))
__rlshift__ = _ProxyLookup(_l_to_r_op(operator.lshift))
__rrshift__ = _ProxyLookup(_l_to_r_op(operator.rshift))
__rand__ = _ProxyLookup(_l_to_r_op(operator.and_))
__rxor__ = _ProxyLookup(_l_to_r_op(operator.xor))
__ror__ = _ProxyLookup(_l_to_r_op(operator.or_))
__iadd__ = _ProxyIOp(operator.iadd)
__isub__ = _ProxyIOp(operator.isub)
__imul__ = _ProxyIOp(operator.imul)
__imatmul__ = _ProxyIOp(operator.imatmul)
__itruediv__ = _ProxyIOp(operator.itruediv)
__ifloordiv__ = _ProxyIOp(operator.ifloordiv)
__imod__ = _ProxyIOp(operator.imod)
__ipow__ = _ProxyIOp(operator.ipow)
__ilshift__ = _ProxyIOp(operator.ilshift)
__irshift__ = _ProxyIOp(operator.irshift)
__iand__ = _ProxyIOp(operator.iand)
__ixor__ = _ProxyIOp(operator.ixor)
__ior__ = _ProxyIOp(operator.ior)
__neg__ = _ProxyLookup(operator.neg)
__pos__ = _ProxyLookup(operator.pos)
__abs__ = _ProxyLookup(abs)
__invert__ = _ProxyLookup(operator.invert)
__complex__ = _ProxyLookup(complex)
__int__ = _ProxyLookup(int)
__float__ = _ProxyLookup(float)
__index__ = _ProxyLookup(operator.index)
__round__ = _ProxyLookup(round)
__trunc__ = _ProxyLookup(math.trunc)
__floor__ = _ProxyLookup(math.floor)
__ceil__ = _ProxyLookup(math.ceil)
__enter__ = _ProxyLookup()
__exit__ = _ProxyLookup()
__await__ = _ProxyLookup()
__aiter__ = _ProxyLookup()
__anext__ = _ProxyLookup()
__aenter__ = _ProxyLookup()
__aexit__ = _ProxyLookup()
__copy__ = _ProxyLookup(copy.copy)
__deepcopy__ = _ProxyLookup(copy.deepcopy)
# __getnewargs_ex__ (pickle through proxy not supported)
# __getnewargs__ (pickle)
# __getstate__ (pickle)
# __setstate__ (pickle)
# __reduce__ (pickle)
# __reduce_ex__ (pickle)
| LocalProxy |
python | apache__airflow | providers/apache/hive/tests/unit/apache/hive/operators/test_hive_stats.py | {
"start": 1274,
"end": 1433
} | class ____:
def __init__(self, col_name, col_type):
self.name = col_name
self.type = col_type
fake_col = _FakeCol("col", "string")
| _FakeCol |
python | numpy__numpy | tools/swig/test/testFlat.py | {
"start": 314,
"end": 2622
} | class ____(unittest.TestCase):
def __init__(self, methodName="runTest"):
unittest.TestCase.__init__(self, methodName)
self.typeStr = "double"
self.typeCode = "d"
# Test the (type* INPLACE_ARRAY_FLAT, int DIM_FLAT) typemap
def testProcess1D(self):
"Test Process function 1D array"
print(self.typeStr, "... ", end=' ', file=sys.stderr)
process = Flat.__dict__[self.typeStr + "Process"]
pack_output = b''
for i in range(10):
pack_output += struct.pack(self.typeCode, i)
x = np.frombuffer(pack_output, dtype=self.typeCode)
y = x.copy()
process(y)
self.assertEqual(np.all((x + 1) == y), True)
def testProcess3D(self):
"Test Process function 3D array"
print(self.typeStr, "... ", end=' ', file=sys.stderr)
process = Flat.__dict__[self.typeStr + "Process"]
pack_output = b''
for i in range(24):
pack_output += struct.pack(self.typeCode, i)
x = np.frombuffer(pack_output, dtype=self.typeCode)
x = x.reshape((2, 3, 4))
y = x.copy()
process(y)
self.assertEqual(np.all((x + 1) == y), True)
def testProcess3DTranspose(self):
"Test Process function 3D array, FORTRAN order"
print(self.typeStr, "... ", end=' ', file=sys.stderr)
process = Flat.__dict__[self.typeStr + "Process"]
pack_output = b''
for i in range(24):
pack_output += struct.pack(self.typeCode, i)
x = np.frombuffer(pack_output, dtype=self.typeCode)
x = x.reshape((2, 3, 4))
y = x.copy()
process(y.T)
self.assertEqual(np.all((x.T + 1) == y.T), True)
def testProcessNoncontiguous(self):
"Test Process function with non-contiguous array, which should raise an error"
print(self.typeStr, "... ", end=' ', file=sys.stderr)
process = Flat.__dict__[self.typeStr + "Process"]
pack_output = b''
for i in range(24):
pack_output += struct.pack(self.typeCode, i)
x = np.frombuffer(pack_output, dtype=self.typeCode)
x = x.reshape((2, 3, 4))
self.assertRaises(TypeError, process, x[:, :, 0])
######################################################################
| FlatTestCase |
python | numpy__numpy | numpy/_core/tests/test_getlimits.py | {
"start": 311,
"end": 468
} | class ____:
def test_singleton(self):
ftype = finfo(float)
ftype2 = finfo(float)
assert_equal(id(ftype), id(ftype2))
| TestPythonFloat |
python | has2k1__plotnine | plotnine/stats/stat_hull.py | {
"start": 107,
"end": 1542
} | class ____(stat):
"""
2 Dimensional Convex Hull
{usage}
Parameters
----------
{common_parameters}
qhull_options: str, default=None
Additional options to pass to Qhull.
See `Qhull <http://www.qhull.org/>`__ documentation
for details.
Raises
------
scipy.spatial.QhullError
Raised when Qhull encounters an error condition,
such as geometrical degeneracy when options to resolve are
not enabled.
See Also
--------
plotnine.geom_path : The default `geom` for this `stat`.
"""
_aesthetics_doc = """
{aesthetics_table}
**Options for computed aesthetics**
```python
"area" # Area of the convex hull
```
"""
REQUIRED_AES = {"x", "y"}
DEFAULT_PARAMS = {
"geom": "path",
"position": "identity",
"na_rm": False,
"qhull_options": None,
}
CREATES = {"area"}
def compute_group(self, data, scales):
from scipy.spatial import ConvexHull
hull = ConvexHull(
data[["x", "y"]], qhull_options=self.params["qhull_options"]
)
idx = np.hstack([hull.vertices, hull.vertices[0]])
new_data = pd.DataFrame(
{
"x": data["x"].iloc[idx].to_numpy(),
"y": data["y"].iloc[idx].to_numpy(),
"area": hull.area,
}
)
return new_data
| stat_hull |
python | tensorflow__tensorflow | tensorflow/python/kernel_tests/strings_ops/base64_ops_test.py | {
"start": 1081,
"end": 5587
} | class ____(test_util.TensorFlowTestCase):
def setUp(self):
self._msg = array_ops.placeholder(dtype=dtypes.string)
self._encoded_f = string_ops.encode_base64(self._msg, pad=False)
self._decoded_f = string_ops.decode_base64(self._encoded_f)
self._encoded_t = string_ops.encode_base64(self._msg, pad=True)
self._decoded_t = string_ops.decode_base64(self._encoded_t)
def _RemovePad(self, msg, base64_msg):
if len(msg) % 3 == 1:
return base64_msg[:-2]
if len(msg) % 3 == 2:
return base64_msg[:-1]
return base64_msg
def _RunTest(self, msg, pad):
with self.cached_session() as sess:
if pad:
encoded, decoded = sess.run([self._encoded_t, self._decoded_t],
feed_dict={self._msg: msg})
else:
encoded, decoded = sess.run([self._encoded_f, self._decoded_f],
feed_dict={self._msg: msg})
if not isinstance(msg, (list, tuple)):
msg = [msg]
encoded = [encoded]
decoded = [decoded]
base64_msg = [base64.urlsafe_b64encode(m) for m in msg]
if not pad:
base64_msg = [self._RemovePad(m, b) for m, b in zip(msg, base64_msg)]
for i in range(len(msg)):
self.assertEqual(base64_msg[i], encoded[i])
self.assertEqual(msg[i], decoded[i])
def testWithPythonBase64(self):
for pad in (False, True):
self._RunTest(b"", pad=pad)
for _ in range(100):
length = np.random.randint(1024 * 1024)
msg = np.random.bytes(length)
self._RunTest(msg, pad=pad)
def testShape(self):
for pad in (False, True):
for _ in range(10):
msg = [np.random.bytes(np.random.randint(20))
for _ in range(np.random.randint(10))]
self._RunTest(msg, pad=pad)
# Zero-element, non-trivial shapes.
for _ in range(10):
k = np.random.randint(10)
msg = np.empty((0, k), dtype=bytes)
encoded = string_ops.encode_base64(msg, pad=pad)
decoded = string_ops.decode_base64(encoded)
with self.cached_session() as sess:
encoded_value, decoded_value = self.evaluate([encoded, decoded])
self.assertEqual(encoded_value.shape, msg.shape)
self.assertEqual(decoded_value.shape, msg.shape)
def testInvalidInput(self):
def try_decode(enc):
self._decoded_f.eval(feed_dict={self._encoded_f: enc})
with self.cached_session():
# Invalid length.
msg = np.random.bytes(99)
enc = base64.urlsafe_b64encode(msg)
with self.assertRaisesRegex(errors.InvalidArgumentError, "1 modulo 4"):
try_decode(enc + b"a")
# Invalid char used in encoding.
msg = np.random.bytes(34)
enc = base64.urlsafe_b64encode(msg)
for i in range(len(msg)):
with self.assertRaises(errors.InvalidArgumentError):
try_decode(enc[:i] + b"?" + enc[(i + 1):])
with self.assertRaises(errors.InvalidArgumentError):
try_decode(enc[:i] + b"\x80" + enc[(i + 1):]) # outside ascii range.
with self.assertRaises(errors.InvalidArgumentError):
try_decode(enc[:i] + b"+" + enc[(i + 1):]) # not url-safe.
with self.assertRaises(errors.InvalidArgumentError):
try_decode(enc[:i] + b"/" + enc[(i + 1):]) # not url-safe.
# Partial padding.
msg = np.random.bytes(34)
enc = base64.urlsafe_b64encode(msg)
with self.assertRaises(errors.InvalidArgumentError):
# enc contains == at the end. Partial padding is not allowed.
try_decode(enc[:-1])
# Unnecessary padding.
msg = np.random.bytes(33)
enc = base64.urlsafe_b64encode(msg)
with self.assertRaises(errors.InvalidArgumentError):
try_decode(enc + b"==")
with self.assertRaises(errors.InvalidArgumentError):
try_decode(enc + b"===")
with self.assertRaises(errors.InvalidArgumentError):
try_decode(enc + b"====")
# Padding in the middle. (Previous implementation was ok with this as long
# as padding char location was 2 or 3 (mod 4).
msg = np.random.bytes(33)
enc = base64.urlsafe_b64encode(msg)
for i in range(len(msg) - 1):
with self.assertRaises(errors.InvalidArgumentError):
try_decode(enc[:i] + b"=" + enc[(i + 1):])
for i in range(len(msg) - 2):
with self.assertRaises(errors.InvalidArgumentError):
try_decode(enc[:i] + b"==" + enc[(i + 2):])
if __name__ == "__main__":
test.main()
| Base64OpsTest |
python | streamlit__streamlit | lib/tests/streamlit/write_test.py | {
"start": 1441,
"end": 16567
} | class ____(unittest.TestCase):
"""Test st.write.
Unit tests for https://docs.streamlit.io/develop/api-reference/write-magic/st.write
Because we're going to test st.markdown, st.pyplot, st.altair_chart
later on, we don't have to test it in st.write In st.write, all we're
trying to check is that the right st.* method gets called
"""
def test_repr_html(self):
"""Test st.write with an object that defines _repr_html_."""
class FakeHTMLable:
def _repr_html_(self):
return "<strong>hello world</strong>"
with patch("streamlit.delta_generator.DeltaGenerator.help") as p:
fake = FakeHTMLable()
st.write(fake)
p.assert_called_once_with(fake)
def test_repr_html_allowing_html(self):
"""Test st.write with an object that defines _repr_html_ and allows
unsafe HTML explicitly."""
class FakeHTMLable:
def _repr_html_(self):
return "<strong>hello world</strong>"
with patch("streamlit.delta_generator.DeltaGenerator.html") as p:
st.write(FakeHTMLable(), unsafe_allow_html=True)
p.assert_called_once_with("<strong>hello world</strong>")
def test_repr_html_no_html_tags_in_string(self):
"""Test st.write with an object that defines _repr_html_ but does not have any
html tags in the returned string, when unsafe_allow_html=False. In that case,
we should just honor unsafe_allow_html even though the output of _repr_html_
actually doesn't have HTML. (The reason we're testing for this is because this
is a behavior change)
"""
class FakeHTMLable:
def _repr_html_(self):
return "hello **world**"
with (
patch("streamlit.delta_generator.DeltaGenerator.html") as p1,
patch("streamlit.delta_generator.DeltaGenerator.help") as p2,
):
obj = FakeHTMLable()
st.write(obj)
p1.assert_not_called()
p2.assert_called_once_with(obj)
def test_repr_html_not_callable(self):
"""Test st.write with an object that defines _repr_html_ but is not callable"""
class FakeHTMLable:
_repr_html_ = "hello **world**"
with patch("streamlit.delta_generator.DeltaGenerator.help") as p:
fake = FakeHTMLable()
st.write(fake)
p.assert_called_once_with(fake)
def test_string(self):
"""Test st.write with a string."""
with patch("streamlit.delta_generator.DeltaGenerator.markdown") as p:
st.write("some string")
p.assert_called_once()
with patch("streamlit.delta_generator.DeltaGenerator.markdown") as p:
st.write("more", "strings", "to", "pass")
p.assert_called_once_with("more strings to pass", unsafe_allow_html=False)
def test_exception_type(self):
"""Test st.write with exception."""
with patch("streamlit.delta_generator.DeltaGenerator.exception") as p:
st.write(Exception("some exception"))
p.assert_called_once()
def test_help(self):
"""Test st.write with help types."""
# Test module
with patch("streamlit.delta_generator.DeltaGenerator.help") as p:
st.write(np)
p.assert_called_once()
# Test function
with patch("streamlit.delta_generator.DeltaGenerator.help") as p:
st.write(st.set_option)
p.assert_called_once()
@patch("streamlit.type_util.is_type")
def test_altair_chart(self, is_type):
"""Test st.write with altair_chart."""
is_type.side_effect = make_is_type_mock(type_util._ALTAIR_RE)
class FakeChart:
pass
with patch("streamlit.delta_generator.DeltaGenerator.altair_chart") as p:
st.write(FakeChart())
p.assert_called_once()
@patch("streamlit.type_util.is_type")
def test_pyplot(self, is_type):
"""Test st.write with matplotlib."""
is_type.side_effect = make_is_type_mock("matplotlib.figure.Figure")
class FakePyplot:
pass
with patch("streamlit.delta_generator.DeltaGenerator.pyplot") as p:
st.write(FakePyplot())
p.assert_called_once()
@parameterized.expand(
SHARED_TEST_CASES,
)
def test_input_data(
self,
name: str,
input_data: Any,
metadata: CaseMetadata,
):
"""Test st.write with various input data and check that it uses
the expected command."""
with patch(
f"streamlit.delta_generator.DeltaGenerator.{metadata.expected_write_command}"
) as p:
st.write(input_data)
p.assert_called_once()
def test_plotly(self):
import plotly.graph_objs as go
"""Test st.write with plotly object."""
with patch("streamlit.delta_generator.DeltaGenerator.plotly_chart") as p:
st.write([go.Scatter(x=[1, 2], y=[10, 20])])
p.assert_called_once()
def test_pil_image(self):
"""Test st.write with PIL image objects."""
with patch("streamlit.delta_generator.DeltaGenerator.image") as p:
st.write(Image.new("L", (10, 10), "black"))
p.assert_called_once()
def test_generator(self):
"""Test st.write with generator function."""
def gen_function():
yield "hello"
yield "world"
# Should support it as a generator function
with patch("streamlit.delta_generator.DeltaGenerator.write_stream") as p:
st.write(gen_function)
p.assert_called_once()
# Should support it as a generator function call
with patch("streamlit.delta_generator.DeltaGenerator.write_stream") as p:
st.write(gen_function())
p.assert_called_once()
def test_async_generator(self):
"""Test st.write with async generator function."""
async def async_gen_function():
yield "hello"
yield "world"
# Should support it as a generator function
with patch("streamlit.delta_generator.DeltaGenerator.write_stream") as p:
st.write(async_gen_function)
p.assert_called_once()
with patch("streamlit.delta_generator.DeltaGenerator.write_stream") as p:
st.write(async_gen_function())
p.assert_called_once()
@patch("streamlit.type_util.is_type")
def test_openai_stream(self, is_type):
"""Test st.write with openai.Stream."""
is_type.side_effect = make_is_type_mock("openai.Stream")
class FakeOpenaiStream:
pass
with patch("streamlit.delta_generator.DeltaGenerator.write_stream") as p:
st.write(FakeOpenaiStream())
p.assert_called_once()
def test_namedtuple(self):
"""Test st.write with list."""
with patch("streamlit.delta_generator.DeltaGenerator.json") as p:
Boy = namedtuple("Boy", ("name", "age")) # noqa: PYI024
John = Boy("John", 29)
st.write(John)
p.assert_called_once()
def test_session_state(self):
"""Test st.write with st.session_state."""
with patch("streamlit.delta_generator.DeltaGenerator.json") as p:
st.write(SessionStateProxy())
p.assert_called_once()
def test_query_params(self):
"""Test st.write with st.query_params."""
with patch("streamlit.delta_generator.DeltaGenerator.json") as p:
st.write(QueryParamsProxy())
p.assert_called_once()
def test_delta_generator_input(self):
"""Test st.write with DeltaGenerator as input uses st.help."""
with patch("streamlit.delta_generator.DeltaGenerator.help") as p:
st.write(st.container())
p.assert_called_once()
@patch("builtins.open", new_callable=mock_open, read_data=MOCK_TOML)
def test_streamlit_secrets(self, *mocks):
"""Test st.write with st.secrets."""
with patch("streamlit.delta_generator.DeltaGenerator.json") as p:
st.write(st.secrets)
p.assert_called_once()
@patch("streamlit.delta_generator.DeltaGenerator.markdown")
@patch("streamlit.delta_generator.DeltaGenerator.json")
def test_dict_and_string(self, mock_json, mock_markdown):
"""Test st.write with dict."""
manager = Mock()
manager.attach_mock(mock_json, "json")
manager.attach_mock(mock_markdown, "markdown")
st.write("here is a dict", {"a": 1, "b": 2}, " and that is all")
expected_calls = [
call.markdown("here is a dict", unsafe_allow_html=False),
call.json({"a": 1, "b": 2}),
call.markdown(" and that is all", unsafe_allow_html=False),
]
assert manager.mock_calls == expected_calls
def test_default_object(self):
"""Test st.write with default clause ie some object."""
class SomeObject:
def __str__(self):
return "1 * 2 - 3 = 4 `ok` !"
with patch("streamlit.delta_generator.DeltaGenerator.markdown") as p:
st.write(SomeObject())
p.assert_called_once_with(
"``1 * 2 - 3 = 4 `ok` !``", unsafe_allow_html=False
)
def test_default_object_multiline(self):
"""Test st.write with default clause ie some object with multiline string."""
class SomeObject:
def __str__(self):
return "1 * 2\n - 3\n ``` = \n````\n4 `ok` !"
with patch("streamlit.delta_generator.DeltaGenerator.markdown") as p:
st.write(SomeObject())
p.assert_called_once_with(
"`````\n1 * 2\n - 3\n ``` = \n````\n4 `ok` !\n`````",
unsafe_allow_html=False,
)
def test_class(self):
"""Test st.write with a class."""
class SomeClass:
pass
with patch("streamlit.delta_generator.DeltaGenerator.help") as p:
st.write(SomeClass)
p.assert_called_once_with(SomeClass)
with patch("streamlit.delta_generator.DeltaGenerator.help") as p:
empty_df = pd.DataFrame()
st.write(type(empty_df))
p.assert_called_once_with(type(empty_df))
def test_obj_instance(self):
"""Test st.write with an object instance that doesn't know how to str()."""
class SomeClass:
pass
my_instance = SomeClass()
with patch("streamlit.delta_generator.DeltaGenerator.help") as p:
st.write(my_instance)
p.assert_called_once_with(my_instance)
def test_dataclass_instance(self):
"""Test st.write with a dataclass instance."""
@dataclasses.dataclass
class SomeClass:
pass
my_instance = SomeClass()
with patch("streamlit.delta_generator.DeltaGenerator.help") as p:
st.write(my_instance)
p.assert_called_once_with(my_instance)
# We use "looks like a memory address" as a test inside st.write, so here we're
# checking that that logic isn't broken.
def test_str_looking_like_mem_address(self):
"""Test calling st.write on a string that looks like a memory address."""
with patch("streamlit.delta_generator.DeltaGenerator.markdown") as p:
st.write("<__main__.MyObj object at 0x13d2d0bb0>")
p.assert_called_once()
def test_exception(self):
"""Test st.write that raises an exception."""
# We patch streamlit.exception to observe it, but we also make sure
# it's still called (via side_effect). This ensures that it's called
# with the proper arguments.
with (
patch("streamlit.delta_generator.DeltaGenerator.markdown") as m,
patch(
"streamlit.delta_generator.DeltaGenerator.exception",
side_effect=handle_uncaught_app_exception,
),
):
m.side_effect = Exception("some exception")
with pytest.raises(Exception, match="some exception"):
st.write("some text")
def test_spinner(self):
"""Test st.spinner."""
# TODO(armando): Test that the message is actually passed to
# message.warning
with patch("streamlit.delta_generator.DeltaGenerator.empty") as e:
with st.spinner("some message"):
time.sleep(0.15)
e.assert_called_once_with()
def test_sidebar(self):
"""Test st.write in the sidebar."""
with (
patch("streamlit.delta_generator.DeltaGenerator.markdown") as m,
patch("streamlit.delta_generator.DeltaGenerator.help") as h,
):
st.sidebar.write("markdown", st.help)
m.assert_called_once()
h.assert_called_once()
def test_empty(self):
"""Test st.write from a specific element."""
placeholder = st.empty()
with patch("streamlit.delta_generator.DeltaGenerator.markdown") as p:
placeholder.write("One argument is okay...")
p.assert_called_once()
with pytest.raises(StreamlitAPIException):
# Also override dg._is_top_level for this test.
with patch.object(
st.delta_generator.DeltaGenerator,
"_is_top_level",
new_callable=PropertyMock,
) as top_level:
top_level.return_value = False
placeholder.write("But", "multiple", "args", "should", "fail")
def test_single_string_optimization(self):
"""Test the optimization in st.write() for single string arguments.
When st.write() is called with a single string argument, it should
directly call markdown() without using the buffer logic.
"""
with (
patch("streamlit.delta_generator.DeltaGenerator.markdown") as markdown,
patch("streamlit.delta_generator.DeltaGenerator.empty") as empty,
):
# Test single string - should use optimization
st.write("Hello world")
markdown.assert_called_once_with("Hello world", unsafe_allow_html=False)
empty.assert_not_called() # Verify empty() is not called in optimized case
markdown.reset_mock()
empty.reset_mock()
# Test single string with unsafe_allow_html
st.write("Hello world", unsafe_allow_html=True)
markdown.assert_called_once_with("Hello world", unsafe_allow_html=True)
empty.assert_not_called() # Verify empty() is not called in optimized case
markdown.reset_mock()
empty.reset_mock()
# Test multiple strings - should not use optimization
st.write("Hello", "world")
empty.assert_called_once() # Verify empty() is called in non-optimized case
empty.reset_mock()
| StreamlitWriteTest |
python | dagster-io__dagster | python_modules/dagster/dagster/_core/executor/in_process.py | {
"start": 1461,
"end": 4024
} | class ____(Executor):
def __init__(
self,
retries: RetryMode,
step_dependency_config: StepDependencyConfig = StepDependencyConfig.default(),
marker_to_close: Optional[str] = None,
):
self._retries = check.inst_param(retries, "retries", RetryMode)
self._step_dependency_config = check.inst_param(
step_dependency_config, "step_dependency_config", StepDependencyConfig
)
self.marker_to_close = check.opt_str_param(marker_to_close, "marker_to_close")
@property
def retries(self) -> RetryMode:
return self._retries
@property
def step_dependency_config(self) -> StepDependencyConfig:
return self._step_dependency_config
def execute(
self, plan_context: PlanOrchestrationContext, execution_plan: ExecutionPlan
) -> Iterator[DagsterEvent]:
check.inst_param(plan_context, "plan_context", PlanOrchestrationContext)
check.inst_param(execution_plan, "execution_plan", ExecutionPlan)
step_keys_to_execute = execution_plan.step_keys_to_execute
yield DagsterEvent.engine_event(
plan_context,
f"Executing steps in process (pid: {os.getpid()})",
event_specific_data=EngineEventData.in_process(os.getpid(), step_keys_to_execute),
)
with time_execution_scope() as timer_result:
yield from iter(
ExecuteRunWithPlanIterable(
execution_plan=plan_context.execution_plan,
iterator=inprocess_execution_iterator,
execution_context_manager=PlanExecutionContextManager(
job=plan_context.job,
retry_mode=plan_context.retry_mode,
execution_plan=plan_context.execution_plan,
run_config=plan_context.run_config,
dagster_run=plan_context.dagster_run,
instance=plan_context.instance,
raise_on_error=plan_context.raise_on_error,
output_capture=plan_context.output_capture,
step_dependency_config=self.step_dependency_config,
),
)
)
yield DagsterEvent.engine_event(
plan_context,
f"Finished steps in process (pid: {os.getpid()}) in {format_duration(timer_result.millis)}",
event_specific_data=EngineEventData.in_process(os.getpid(), step_keys_to_execute),
)
| InProcessExecutor |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/typeVar5.py | {
"start": 245,
"end": 1551
} | class ____(Generic[_T1, _T2]):
async def func1(self, a: _T1):
# This should generate an error.
_ = a.temp
# This should generate an error.
_ = a(3)
# This should generate an error.
_ = a[0]
# This should generate an error.
_ = a.temp
# This should generate an error.
_ = a + 1
# This should generate an error.
_ = -a
# This should generate an error.
a += 3
# This should generate an error.
_ = await a
# This should generate an error.
for _ in a:
pass
_ = a.__class__
_ = a.__doc__
async def func2(self, a: _T2):
# This should generate an error.
_ = a.temp
# This should generate an error.
_ = a(3)
# This should generate an error.
_ = a[0]
# This should generate an error.
_ = a.temp
# This should generate an error.
_ = a + 1
# This should generate an error.
_ = -a
# This should generate an error.
a += 3
# This should generate an error.
_ = await a
# This should generate an error.
for _ in a:
pass
_ = a.__class__
_ = a.__doc__
| ClassA |
python | OmkarPathak__pygorithm | tests/test_sorting.py | {
"start": 3408,
"end": 3596
} | class ____(unittest.TestCase, TestSortingAlgorithm):
inplace = True
alph_support = False
@staticmethod
def sort(arr):
return counting_sort.sort(arr)
| TestCountingSort |
python | arrow-py__arrow | arrow/locales.py | {
"start": 131675,
"end": 136237
} | class ____(Locale):
names = ["si", "si-lk"]
past = "{0}ට පෙර"
future = "{0}"
and_word = "සහ"
timeframes: ClassVar[Mapping[TimeFrameLiteral, Union[Mapping[str, str], str]]] = {
"now": "දැන්",
"second": {
"past": "තත්පරයක",
"future": "තත්පරයකින්",
}, # ක් is the article
"seconds": {
"past": "තත්පර {0} ක",
"future": "තත්පර {0} කින්",
},
"minute": {
"past": "විනාඩියක",
"future": "විනාඩියකින්",
},
"minutes": {
"past": "විනාඩි {0} ක",
"future": "මිනිත්තු {0} කින්",
},
"hour": {"past": "පැයක", "future": "පැයකින්"},
"hours": {
"past": "පැය {0} ක",
"future": "පැය {0} කින්",
},
"day": {"past": "දිනක", "future": "දිනකට"},
"days": {
"past": "දින {0} ක",
"future": "දින {0} කින්",
},
"week": {"past": "සතියක", "future": "සතියකින්"},
"weeks": {
"past": "සති {0} ක",
"future": "සති {0} කින්",
},
"month": {"past": "මාසයක", "future": "එය මාසය තුළ"},
"months": {
"past": "මාස {0} ක",
"future": "මාස {0} කින්",
},
"year": {"past": "වසරක", "future": "වසරක් තුළ"},
"years": {
"past": "අවුරුදු {0} ක",
"future": "අවුරුදු {0} තුළ",
},
}
# Sinhala: the general format to describe timeframe is different from past and future,
# so we do not copy the original timeframes dictionary
timeframes_only_distance = {}
timeframes_only_distance["second"] = "තත්පරයක්"
timeframes_only_distance["seconds"] = "තත්පර {0}"
timeframes_only_distance["minute"] = "මිනිත්තුවක්"
timeframes_only_distance["minutes"] = "විනාඩි {0}"
timeframes_only_distance["hour"] = "පැයක්"
timeframes_only_distance["hours"] = "පැය {0}"
timeframes_only_distance["day"] = "දවසක්"
timeframes_only_distance["days"] = "දවස් {0}"
timeframes_only_distance["week"] = "සතියක්"
timeframes_only_distance["weeks"] = "සති {0}"
timeframes_only_distance["month"] = "මාසයක්"
timeframes_only_distance["months"] = "මාස {0}"
timeframes_only_distance["year"] = "අවුරුද්දක්"
timeframes_only_distance["years"] = "අවුරුදු {0}"
def _format_timeframe(self, timeframe: TimeFrameLiteral, delta: int) -> str:
"""
Sinhala awares time frame format function, takes into account
the differences between general, past, and future forms (three different suffixes).
"""
abs_delta = abs(delta)
form = self.timeframes[timeframe]
if isinstance(form, str):
return form.format(abs_delta)
if delta > 0:
key = "future"
else:
key = "past"
form = form[key]
return form.format(abs_delta)
def describe(
self,
timeframe: TimeFrameLiteral,
delta: Union[float, int] = 1, # key is always future when only_distance=False
only_distance: bool = False,
) -> str:
"""Describes a delta within a timeframe in plain language.
:param timeframe: a string representing a timeframe.
:param delta: a quantity representing a delta in a timeframe.
:param only_distance: return only distance eg: "11 seconds" without "in" or "ago" keywords
"""
if not only_distance:
return super().describe(timeframe, delta, only_distance)
# Sinhala uses a different case without 'in' or 'ago'
humanized = self.timeframes_only_distance[timeframe].format(trunc(abs(delta)))
return humanized
month_names = [
"",
"ජනවාරි",
"පෙබරවාරි",
"මාර්තු",
"අප්රේල්",
"මැයි",
"ජූනි",
"ජූලි",
"අගෝස්තු",
"සැප්තැම්බර්",
"ඔක්තෝබර්",
"නොවැම්බර්",
"දෙසැම්බර්",
]
month_abbreviations = [
"",
"ජන",
"පෙබ",
"මාර්",
"අප්රේ",
"මැයි",
"ජුනි",
"ජූලි",
"අගෝ",
"සැප්",
"ඔක්",
"නොවැ",
"දෙසැ",
]
day_names = [
"",
"සදුදා",
"අඟහරැවදා",
"බදාදා",
"බ්රහස්පතින්දා",
"සිකුරාදා",
"සෙනසුරාදා",
"ඉරිදා",
]
day_abbreviations = [
"",
"සදුද",
"බදා",
"බදා",
"සිකු",
"සෙන",
"අ",
"ඉරිදා",
]
| SinhalaLocale |
python | catalyst-team__catalyst | catalyst/callbacks/misc.py | {
"start": 289,
"end": 1533
} | class ____(ABC, Callback):
"""Docs"""
def __init__(
self,
loader_key: str,
metric_key: str,
minimize: bool = True,
min_delta: float = 1e-6,
):
"""Docs"""
super().__init__(order=CallbackOrder.external)
self.is_better = MetricHandler(minimize=minimize, min_delta=min_delta)
self.loader_key = loader_key
self.metric_key = metric_key
self.best_score = None
@abstractmethod
def handle_score_is_better(self, runner: "IRunner"):
"""Event handler."""
pass
@abstractmethod
def handle_score_is_not_better(self, runner: "IRunner"):
"""Event handler."""
pass
def on_experiment_start(self, runner: "IRunner") -> None:
"""Event handler."""
self.best_score = None
def on_epoch_end(self, runner: "IRunner") -> None:
"""Event handler."""
score = runner.epoch_metrics[self.loader_key][self.metric_key]
if self.best_score is None or self.is_better(score, self.best_score):
self.best_score = score
self.handle_score_is_better(runner=runner)
else:
self.handle_score_is_not_better(runner=runner)
| IEpochMetricHandlerCallback |
python | numba__numba | numba/tests/test_dyn_array.py | {
"start": 40484,
"end": 45129
} | class ____(MemoryLeakMixin, BaseTest):
def test_0d(self):
def pyfunc(arg):
return np.array(arg)
cfunc = nrtjit(pyfunc)
got = cfunc(42)
self.assertPreciseEqual(got, np.array(42, dtype=np.intp))
got = cfunc(2.5)
self.assertPreciseEqual(got, np.array(2.5))
def test_0d_with_dtype(self):
def pyfunc(arg):
return np.array(arg, dtype=np.int16)
self.check_outputs(pyfunc, [(42,), (3.5,)])
def test_1d(self):
def pyfunc(arg):
return np.array(arg)
cfunc = nrtjit(pyfunc)
# A list
got = cfunc([2, 3, 42])
self.assertPreciseEqual(got, np.intp([2, 3, 42]))
# A heterogeneous tuple
got = cfunc((1.0, 2.5j, 42))
self.assertPreciseEqual(got, np.array([1.0, 2.5j, 42]))
# An empty tuple
got = cfunc(())
self.assertPreciseEqual(got, np.float64(()))
def test_1d_with_dtype(self):
def pyfunc(arg):
return np.array(arg, dtype=np.float32)
self.check_outputs(pyfunc,
[([2, 42],),
([3.5, 1.0],),
((1, 3.5, 42),),
((),),
])
def test_1d_with_str_dtype(self):
def pyfunc(arg):
return np.array(arg, dtype='float32')
self.check_outputs(pyfunc,
[([2, 42],),
([3.5, 1.0],),
((1, 3.5, 42),),
((),),
])
def test_1d_with_non_const_str_dtype(self):
@njit
def func(arg, dt):
return np.array(arg, dtype=dt)
with self.assertRaises(TypingError) as raises:
func((5, 3), 'int32')
excstr = str(raises.exception)
msg = (f"If np.array dtype is a string it must be a "
"string constant.")
self.assertIn(msg, excstr)
def test_2d(self):
def pyfunc(arg):
return np.array(arg)
cfunc = nrtjit(pyfunc)
# A list of tuples
got = cfunc([(1, 2), (3, 4)])
self.assertPreciseEqual(got, np.intp([[1, 2], [3, 4]]))
got = cfunc([(1, 2.5), (3, 4.5)])
self.assertPreciseEqual(got, np.float64([[1, 2.5], [3, 4.5]]))
# A tuple of lists
got = cfunc(([1, 2], [3, 4]))
self.assertPreciseEqual(got, np.intp([[1, 2], [3, 4]]))
got = cfunc(([1, 2], [3.5, 4.5]))
self.assertPreciseEqual(got, np.float64([[1, 2], [3.5, 4.5]]))
# A tuple of tuples
got = cfunc(((1.5, 2), (3.5, 4.5)))
self.assertPreciseEqual(got, np.float64([[1.5, 2], [3.5, 4.5]]))
got = cfunc(((), ()))
self.assertPreciseEqual(got, np.float64(((), ())))
def test_2d_with_dtype(self):
def pyfunc(arg):
return np.array(arg, dtype=np.int32)
cfunc = nrtjit(pyfunc)
got = cfunc([(1, 2.5), (3, 4.5)])
self.assertPreciseEqual(got, np.int32([[1, 2], [3, 4]]))
def test_raises(self):
def pyfunc(arg):
return np.array(arg)
cfunc = nrtjit(pyfunc)
@contextlib.contextmanager
def check_raises(msg):
with self.assertRaises(TypingError) as raises:
yield
self.assertIn(msg, str(raises.exception))
with check_raises(('array(float64, 1d, C) not allowed in a '
'homogeneous sequence')):
cfunc(np.array([1.]))
with check_raises(('type Tuple(int64, reflected list(int64)<iv=None>) '
'does not have a regular shape')):
cfunc((np.int64(1), [np.int64(2)]))
with check_raises(
"cannot convert Tuple(int64, Record(a[type=int32;offset=0],"
"b[type=float32;offset=4];8;False)) to a homogeneous type",
):
st = np.dtype([('a', 'i4'), ('b', 'f4')])
val = np.zeros(1, dtype=st)[0]
cfunc(((1, 2), (np.int64(1), val)))
def test_bad_array(self):
@njit
def func(obj):
return np.array(obj)
msg = '.*The argument "object" must be array-like.*'
with self.assertRaisesRegex(TypingError, msg) as raises:
func(None)
def test_bad_dtype(self):
@njit
def func(obj, dt):
return np.array(obj, dt)
msg = '.*The argument "dtype" must be a data-type if it is provided.*'
with self.assertRaisesRegex(TypingError, msg) as raises:
func(5, 4)
| TestNpArray |
python | MorvanZhou__Reinforcement-learning-with-tensorflow | experiments/Solve_BipedalWalker/DDPG.py | {
"start": 4406,
"end": 7579
} | class ____(object):
def __init__(self, sess, state_dim, action_dim, learning_rate, gamma, t_replace_iter, a, a_):
self.sess = sess
self.s_dim = state_dim
self.a_dim = action_dim
self.lr = learning_rate
self.gamma = gamma
self.t_replace_iter = t_replace_iter
self.t_replace_counter = 0
with tf.variable_scope('Critic'):
# Input (s, a), output q
self.a = a
self.q = self._build_net(S, self.a, 'eval_net', trainable=True)
# Input (s_, a_), output q_ for q_target
self.q_ = self._build_net(S_, a_, 'target_net', trainable=False) # target_q is based on a_ from Actor's target_net
self.e_params = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope='Critic/eval_net')
self.t_params = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope='Critic/target_net')
with tf.variable_scope('target_q'):
self.target_q = R + self.gamma * self.q_
with tf.variable_scope('abs_TD'):
self.abs_td = tf.abs(self.target_q - self.q)
self.ISWeights = tf.placeholder(tf.float32, [None, 1], name='IS_weights')
with tf.variable_scope('TD_error'):
self.loss = tf.reduce_mean(self.ISWeights * tf.squared_difference(self.target_q, self.q))
with tf.variable_scope('C_train'):
self.train_op = tf.train.AdamOptimizer(self.lr).minimize(self.loss, global_step=GLOBAL_STEP)
with tf.variable_scope('a_grad'):
self.a_grads = tf.gradients(self.q, a)[0] # tensor of gradients of each sample (None, a_dim)
def _build_net(self, s, a, scope, trainable):
with tf.variable_scope(scope):
init_w = tf.random_normal_initializer(0., 0.01)
init_b = tf.constant_initializer(0.01)
with tf.variable_scope('l1'):
n_l1 = 700
# combine the action and states together in this way
w1_s = tf.get_variable('w1_s', [self.s_dim, n_l1], initializer=init_w, trainable=trainable)
w1_a = tf.get_variable('w1_a', [self.a_dim, n_l1], initializer=init_w, trainable=trainable)
b1 = tf.get_variable('b1', [1, n_l1], initializer=init_b, trainable=trainable)
net = tf.nn.relu(tf.matmul(s, w1_s) + tf.matmul(a, w1_a) + b1)
with tf.variable_scope('l2'):
net = tf.layers.dense(net, 20, activation=tf.nn.relu, kernel_initializer=init_w,
bias_initializer=init_b, name='l2', trainable=trainable)
with tf.variable_scope('q'):
q = tf.layers.dense(net, 1, kernel_initializer=init_w, bias_initializer=init_b, trainable=trainable) # Q(s,a)
return q
def learn(self, s, a, r, s_, ISW):
_, abs_td = self.sess.run([self.train_op, self.abs_td], feed_dict={S: s, self.a: a, R: r, S_: s_, self.ISWeights: ISW})
if self.t_replace_counter % self.t_replace_iter == 0:
self.sess.run([tf.assign(t, e) for t, e in zip(self.t_params, self.e_params)])
self.t_replace_counter += 1
return abs_td
| Critic |
python | django-extensions__django-extensions | tests/testapp/models.py | {
"start": 8398,
"end": 8686
} | class ____(models.Model):
title = models.CharField(max_length=42)
slug = AutoSlugField(populate_from="get_readable_title")
class Meta:
app_label = "django_extensions"
def get_readable_title(self):
return get_readable_title(self)
| ModelMethodSluggedTestModel |
python | allegroai__clearml | clearml/backend_api/services/v2_13/tasks.py | {
"start": 320073,
"end": 321298
} | class ____(Response):
"""
Response of tasks.get_types endpoint.
:param types: Unique list of the task types used in the requested projects
:type types: Sequence[str]
"""
_service = "tasks"
_action = "get_types"
_version = "2.13"
_schema = {
"definitions": {},
"properties": {
"types": {
"description": "Unique list of the task types used in the requested projects",
"items": {"type": "string"},
"type": ["array", "null"],
}
},
"type": "object",
}
def __init__(self, types: Optional[List[str]] = None, **kwargs: Any) -> None:
super(GetTypesResponse, self).__init__(**kwargs)
self.types = types
@schema_property("types")
def types(self) -> Optional[List[str]]:
return self._property_types
@types.setter
def types(self, value: Optional[List[str]]) -> None:
if value is None:
self._property_types = None
return
self.assert_isinstance(value, "types", (list, tuple))
self.assert_isinstance(value, "types", six.string_types, is_array=True)
self._property_types = value
| GetTypesResponse |
python | scikit-image__scikit-image | src/skimage/transform/_geometric.py | {
"start": 45011,
"end": 53606
} | class ____(ProjectiveTransform):
"""Affine transformation.
Has the following form::
X = a0 * x + a1 * y + a2
= sx * x * [cos(rotation) + tan(shear_y) * sin(rotation)]
- sy * y * [tan(shear_x) * cos(rotation) + sin(rotation)]
+ translation_x
Y = b0 * x + b1 * y + b2
= sx * x * [sin(rotation) - tan(shear_y) * cos(rotation)]
- sy * y * [tan(shear_x) * sin(rotation) - cos(rotation)]
+ translation_y
where ``sx`` and ``sy`` are scale factors in the x and y directions.
This is equivalent to applying the operations in the following order:
1. Scale
2. Shear
3. Rotate
4. Translate
The homogeneous transformation matrix is::
[[a0 a1 a2]
[b0 b1 b2]
[0 0 1]]
In 2D, the transformation parameters can be given as the homogeneous
transformation matrix, above, or as the implicit parameters, scale,
rotation, shear, and translation in x (a2) and y (b2). For 3D and higher,
only the matrix form is allowed.
In narrower transforms, such as the Euclidean (only rotation and
translation) or Similarity (rotation, translation, and a global scale
factor) transforms, it is possible to specify 3D transforms using implicit
parameters also.
Parameters
----------
matrix : (D+1, D+1) array_like, optional
Homogeneous transformation matrix. If this matrix is provided, it is an
error to provide any of scale, rotation, shear, or translation.
scale : {s as float or (sx, sy) as array, list or tuple}, optional
Scale factor(s). If a single value, it will be assigned to both
sx and sy. Only available for 2D.
.. versionadded:: 0.17
Added support for supplying a single scalar value.
shear : float or 2-tuple of float, optional
The x and y shear angles, clockwise, by which these axes are
rotated around the origin [2].
If a single value is given, take that to be the x shear angle, with
the y angle remaining 0. Only available in 2D.
rotation : float, optional
Rotation angle, clockwise, as radians. Only available for 2D.
translation : (tx, ty) as array, list or tuple, optional
Translation parameters. Only available for 2D.
dimensionality : int, optional
Fallback number of dimensions for transform when none of `matrix`,
`scale`, `rotation`, `shear` or `translation` are specified. If any of
`scale`, `rotation`, `shear` or `translation` are specified, must equal
2 (the default).
Attributes
----------
params : (D+1, D+1) array
Homogeneous transformation matrix.
Raises
------
ValueError
If both ``matrix`` and any of the other parameters are provided.
Examples
--------
>>> import numpy as np
>>> import skimage as ski
Define a transform with an homogeneous transformation matrix:
>>> tform = ski.transform.AffineTransform(np.diag([2., 3., 1.]))
>>> tform.params
array([[2., 0., 0.],
[0., 3., 0.],
[0., 0., 1.]])
Define a transform with parameters:
>>> tform = ski.transform.AffineTransform(scale=4, rotation=0.2)
>>> np.round(tform.params, 2)
array([[ 3.92, -0.79, 0. ],
[ 0.79, 3.92, 0. ],
[ 0. , 0. , 1. ]])
You can estimate a transformation to map between source and destination
points:
>>> src = np.array([[150, 150],
... [250, 100],
... [150, 200]])
>>> dst = np.array([[200, 200],
... [300, 150],
... [150, 400]])
>>> tform = ski.transform.AffineTransform.from_estimate(src, dst)
>>> np.allclose(tform.params, [[ 0.5, -1. , 275. ],
... [ 1.5, 4. , -625. ],
... [ 0. , 0. , 1. ]])
True
Apply the transformation to some image data.
>>> img = ski.data.astronaut()
>>> warped = ski.transform.warp(img, inverse_map=tform.inverse)
The estimation can fail - for example, if all the input or output points
are the same. If this happens, you will get a transform that is not
"truthy" - meaning that ``bool(tform)`` is ``False``:
>>> # A successfully estimated model is truthy (applying ``bool()``
>>> # gives ``True``):
>>> if tform:
... print("Estimation succeeded.")
Estimation succeeded.
>>> # Not so for a degenerate transform with identical points.
>>> bad_src = np.ones((3, 2))
>>> bad_tform = ski.transform.AffineTransform.from_estimate(
... bad_src, dst)
>>> if not bad_tform:
... print("Estimation failed.")
Estimation failed.
Trying to use this failed estimation transform result will give a suitable
error:
>>> bad_tform.params # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
FailedEstimationAccessError: No attribute "params" for failed estimation ...
References
----------
.. [1] Wikipedia, "Affine transformation",
https://en.wikipedia.org/wiki/Affine_transformation#Image_transformation
.. [2] Wikipedia, "Shear mapping",
https://en.wikipedia.org/wiki/Shear_mapping
"""
def __init__(
self,
matrix=None,
*,
scale=None,
shear=None,
rotation=None,
translation=None,
dimensionality=None,
):
n_srst_none = sum(p is None for p in (scale, rotation, shear, translation))
if n_srst_none != 4:
if matrix is not None:
raise ValueError(
"Do not specify any implicit parameters when "
"matrix is specified."
)
if dimensionality is not None and dimensionality > 2:
raise ValueError('Implicit parameters only valid for 2D transforms')
# 2D parameter checks explicit or implicit in _srst2matrix.
matrix = self._srst2matrix(scale, rotation, shear, translation)
if matrix.shape[0] != 3:
raise ValueError('Implicit parameters must give 2D transforms')
super().__init__(matrix=matrix, dimensionality=dimensionality)
@property
def _coeff_inds(self):
"""Indices into flat ``self.params`` with coefficients to estimate"""
return range(self.dimensionality * (self.dimensionality + 1))
def _srst2matrix(self, scale, rotation, shear, translation):
scale = (1, 1) if scale is None else scale
sx, sy = (scale, scale) if np.isscalar(scale) else scale
rotation = 0 if rotation is None else rotation
if not np.isscalar(rotation):
raise ValueError('rotation must be scalar (2D rotation)')
shear = 0 if shear is None else shear
shear_x, shear_y = (shear, 0) if np.isscalar(shear) else shear
translation = (0, 0) if translation is None else translation
if np.isscalar(translation):
raise ValueError('translation must be length 2')
a2, b2 = translation
a0 = sx * (math.cos(rotation) + math.tan(shear_y) * math.sin(rotation))
a1 = -sy * (math.tan(shear_x) * math.cos(rotation) + math.sin(rotation))
b0 = sx * (math.sin(rotation) - math.tan(shear_y) * math.cos(rotation))
b1 = -sy * (math.tan(shear_x) * math.sin(rotation) - math.cos(rotation))
return np.array([[a0, a1, a2], [b0, b1, b2], [0, 0, 1]])
@property
def scale(self):
if self.dimensionality != 2:
return np.sqrt(np.sum(self.params**2, axis=0))[: self.dimensionality]
ss = np.sum(self.params**2, axis=0)
ss[1] = ss[1] / (math.tan(self.shear) ** 2 + 1)
return np.sqrt(ss)[: self.dimensionality]
@property
def rotation(self):
if self.dimensionality != 2:
raise NotImplementedError(
'The rotation property is only implemented for 2D transforms.'
)
return math.atan2(self.params[1, 0], self.params[0, 0])
@property
def shear(self):
if self.dimensionality != 2:
raise NotImplementedError(
'The shear property is only implemented for 2D transforms.'
)
beta = math.atan2(-self.params[0, 1], self.params[1, 1])
return beta - self.rotation
@property
def translation(self):
return self.params[0 : self.dimensionality, self.dimensionality]
| AffineTransform |
python | getsentry__sentry | src/sentry/web/frontend/debug/debug_recovery_codes_regenerated_email.py | {
"start": 390,
"end": 1156
} | class ____(View):
def get(self, request: AuthenticatedHttpRequest) -> HttpResponse:
authenticator = Authenticator(id=0, type=3, user_id=request.user.id) # u2f
email = generate_security_email(
account=request.user,
actor=request.user,
type="recovery-codes-regenerated",
ip_address=request.META["REMOTE_ADDR"],
context={"authenticator": authenticator},
# make this consistent for acceptance tests
current_datetime=datetime.datetime(2017, 1, 20, 21, 39, 23, 30723),
)
return MailPreview(
html_template=email.html_template, text_template=email.template, context=email.context
).render(request)
| DebugRecoveryCodesRegeneratedEmailView |
python | walkccc__LeetCode | solutions/1453. Maximum Number of Darts Inside of a Circular Dartboard/1453.py | {
"start": 0,
"end": 87
} | class ____:
def __init__(self, x: float, y: float):
self.x = x
self.y = y
| Point |
python | matplotlib__matplotlib | lib/mpl_toolkits/axisartist/axisline_style.py | {
"start": 3715,
"end": 6698
} | class ____(_Style):
"""
A container class which defines style classes for AxisArtists.
An instance of any axisline style class is a callable object,
whose call signature is ::
__call__(self, axis_artist, path, transform)
When called, this should return an `.Artist` with the following methods::
def set_path(self, path):
# set the path for axisline.
def set_line_mutation_scale(self, scale):
# set the scale
def draw(self, renderer):
# draw
"""
_style_list = {}
class _Base:
# The derived classes are required to be able to be initialized
# w/o arguments, i.e., all its argument (except self) must have
# the default values.
def __init__(self):
"""
initialization.
"""
super().__init__()
def __call__(self, axis_artist, transform):
"""
Given the AxisArtist instance, and transform for the path (set_path
method), return the Matplotlib artist for drawing the axis line.
"""
return self.new_line(axis_artist, transform)
class SimpleArrow(_Base):
"""
A simple arrow.
"""
ArrowAxisClass = _FancyAxislineStyle.SimpleArrow
def __init__(self, size=1):
"""
Parameters
----------
size : float
Size of the arrow as a fraction of the ticklabel size.
"""
self.size = size
super().__init__()
def new_line(self, axis_artist, transform):
linepath = Path([(0, 0), (0, 1)])
axisline = self.ArrowAxisClass(axis_artist, linepath, transform,
line_mutation_scale=self.size)
return axisline
_style_list["->"] = SimpleArrow
class FilledArrow(SimpleArrow):
"""
An arrow with a filled head.
"""
ArrowAxisClass = _FancyAxislineStyle.FilledArrow
def __init__(self, size=1, facecolor=None):
"""
Parameters
----------
size : float
Size of the arrow as a fraction of the ticklabel size.
facecolor : :mpltype:`color`, default: :rc:`axes.edgecolor`
Fill color.
.. versionadded:: 3.7
"""
facecolor = mpl._val_or_rc(facecolor, 'axes.edgecolor')
self.size = size
self._facecolor = facecolor
super().__init__(size=size)
def new_line(self, axis_artist, transform):
linepath = Path([(0, 0), (0, 1)])
axisline = self.ArrowAxisClass(axis_artist, linepath, transform,
line_mutation_scale=self.size,
facecolor=self._facecolor)
return axisline
_style_list["-|>"] = FilledArrow
| AxislineStyle |
python | getsentry__sentry | src/sentry/snuba/metrics/query.py | {
"start": 4859,
"end": 18583
} | class ____(MetricsQueryValidationRunner):
"""
Snuba provides a new language called MQL which has been designed to replace the old metrics language.
We intend to deprecate the old metrics language in the future. For any new features, we recommend using MQL.
Documentation of MQL can be found at https://getsentry.github.io/snuba/language/mql.html and
https://getsentry.github.io/snuba-sdk/snuba_sdk.html#MetricsQuery
Definition of a metrics query, inspired by snuba_sdk.Query
"""
org_id: int
project_ids: Sequence[int]
select: Sequence[MetricField]
granularity: Granularity
# ToDo(ahmed): In the future, once we start parsing conditions, the only conditions that should be here should be
# instances of MetricConditionField
start: datetime | None = None
end: datetime | None = None
where: Sequence[BooleanCondition | Condition | MetricConditionField] | None = None
having: ConditionGroup | None = None
groupby: Sequence[MetricGroupByField] | None = None
orderby: Sequence[MetricOrderByField] | None = None
limit: Limit | None = None
# In cases where limit involves calculation (eg. top N series), we want to cap the limit since it'll be blocked otherwise.
max_limit: Limit | None = None
offset: Offset | None = None
include_totals: bool = True
include_series: bool = True
interval: int | None = None
# This field is used as a temporary fix to allow the metrics layer to support alerts by generating snql that
# doesn't take into account time bounds as the alerts service uses subscriptable queries that react in real time
# to dataset changes.
is_alerts_query: bool = False
# Need to skip the orderby validation for ondemand queries, this is because ondemand fields are based on a spec
# instead of being direct fields
skip_orderby_validation: bool = False
@cached_property
def projects(self) -> QuerySet[Project]:
return Project.objects.filter(id__in=self.project_ids)
@cached_property
def use_case_id(self) -> UseCaseID:
return self._use_case_id(self.select[0].metric_mri)
@staticmethod
def _use_case_id(metric_mri: str) -> UseCaseID:
"""Find correct use_case_id based on metric_name"""
parsed_mri = parse_mri(metric_mri)
assert parsed_mri is not None
try:
return UseCaseID(parsed_mri.namespace)
except ValueError:
raise ValueError("Can't find correct use_case_id based on metric MRI")
@staticmethod
def _validate_field(field: MetricField) -> None:
all_derived_metrics = get_derived_metrics()
# Validate the validity of the expression meaning that if an operation is present, then it needs to be one of
# of the supported operations and that the metric mri should be one of the aggregated derived metrics
if field.op:
if field.op not in OPERATIONS:
raise InvalidParams(
f"Invalid operation '{field.op}'. Must be one of {', '.join(OPERATIONS)}"
)
if field.metric_mri in all_derived_metrics:
raise DerivedMetricParseException(
f"Failed to parse {field.op}({get_public_name_from_mri(field.metric_mri)}). No operations can be "
f"applied on this field as it is already a derived metric with an "
f"aggregation applied to it."
)
def validate_select(self) -> None:
if len(self.select) == 0:
raise InvalidParams('Request is missing a "field"')
use_case_ids = set()
for field in self.select:
use_case_ids.add(self._use_case_id(field.metric_mri))
self._validate_field(field)
if len(use_case_ids) > 1:
raise InvalidParams("All select fields should have the same use_case_id")
def validate_where(self) -> None:
if not self.where:
return
for condition in self.where:
if (
isinstance(condition, Condition)
and isinstance(condition.lhs, Column)
and condition.lhs.name in UNALLOWED_TAGS
):
# This is a special condition that holds only for the usage with alerts which requires the
# session.status to be injected in the where clause for performance reasons. This condition should
# be removed once we change how alerts uses the metrics layer.
if not (condition.lhs.name == "session.status" and self.is_alerts_query):
raise InvalidParams(
f"Tag name {condition.lhs.name} is not a valid query filter"
)
def validate_orderby(self) -> None:
if not self.orderby or self.skip_orderby_validation:
return
for metric_order_by_field in self.orderby:
# We filter all the fields that are strings because we don't require them for the order by validation and
# if they contain invalid strings, they will be catched during the snql generation.
if isinstance(metric_order_by_field.field, MetricField):
self._validate_field(metric_order_by_field.field)
orderby_metric_fields: set[MetricField] = set()
metric_entities: set[MetricEntity | None] = set()
group_by_str_fields: set[str] = self.action_by_str_fields(on_group_by=True)
for metric_order_by_field in self.orderby:
if isinstance(metric_order_by_field.field, MetricField):
orderby_metric_fields.add(metric_order_by_field.field)
# Construct a metrics expression
metric_field_obj = metric_object_factory(
metric_order_by_field.field.op, metric_order_by_field.field.metric_mri
)
use_case_id = self._use_case_id(metric_order_by_field.field.metric_mri)
entity = metric_field_obj.get_entity(self.projects, use_case_id)
if isinstance(entity, Mapping):
metric_entities.update(entity.keys())
else:
metric_entities.add(entity)
elif isinstance(metric_order_by_field.field, str):
if metric_order_by_field.field not in group_by_str_fields:
raise InvalidParams(
f"String field {metric_order_by_field.field} in the 'order by' must be also "
f"in the 'group by'"
)
# If metric entities set contains more than 1 metric, we can't orderBy these fields
if len(metric_entities) > 1:
raise InvalidParams("Selected 'orderBy' columns must belongs to the same entity")
# Validate all orderby columns are presented in provided 'fields'
if set(self.select).issuperset(orderby_metric_fields):
return
raise InvalidParams("'orderBy' must be one of the provided 'fields'")
def action_by_str_fields(self, on_group_by: bool) -> set[str]:
action_by_str_fields: set[str] = set()
for action_by_field in (self.groupby if on_group_by else self.orderby) or []:
if isinstance(action_by_field.field, str):
action_by_str_fields.add(action_by_field.field)
return action_by_str_fields
def validate_limit(self) -> None:
if self.limit is None:
return
intervals_len = get_num_intervals(
end=self.end,
start=self.start,
granularity=self.granularity.granularity,
interval=self.interval,
)
if self.max_limit and self.max_limit < MAX_POINTS:
return
if self.limit.limit > MAX_POINTS:
raise InvalidParams(
f"Requested limit exceeds the maximum allowed limit of {MAX_POINTS}"
)
if self.start and self.end and self.include_series:
if intervals_len * self.limit.limit > MAX_POINTS:
raise InvalidParams(
f"Requested intervals ({intervals_len}) of timedelta of "
f"{timedelta(seconds=self.granularity.granularity)} with statsPeriod "
f"timedelta of {self.end - self.start} is too granular for a per_page of "
f"{self.limit.limit} elements. Increase your interval, decrease your "
f"statsPeriod, or decrease your per_page parameter."
)
def validate_groupby(self) -> None:
if not self.groupby:
return
for group_by_field in self.groupby:
if isinstance(group_by_field.field, str) and group_by_field.field in UNALLOWED_TAGS:
raise InvalidParams(
f"Tag name {group_by_field.field} cannot be used in groupBy query"
)
def validate_include_totals(self) -> None:
if self.include_totals or self.include_series:
return
raise InvalidParams("Cannot omit both series and totals")
def get_default_limit(self) -> int:
totals_limit: int = MAX_POINTS
if self.start and self.end and self.include_series:
intervals_len = get_num_intervals(
start=self.start,
end=self.end,
granularity=self.granularity.granularity,
interval=self.interval,
)
# In a series query, we also need to factor in the len of the intervals
# array. The number of totals should never get so large that the
# intervals exceed MAX_POINTS, however at least a single group.
totals_limit = max(totals_limit // intervals_len, 1)
return totals_limit
def validate_end(self) -> None:
if self.start and self.end and self.start >= self.end:
raise InvalidParams("start must be before end")
def validate_granularity(self) -> None:
# Logic specific to how we handle time series in discover in terms of granularity and interval
if (
self.use_case_id == UseCaseID.TRANSACTIONS
and self.include_series
and self.interval is not None
):
if self.granularity.granularity > self.interval:
# If granularity is greater than interval, then we try to set granularity to the smallest allowed
# granularity smaller than that interval
# Copied from: sentry/search/events/builder.py::TimeseriesMetricQueryBuilder.__init__()
for granularity in METRICS_LAYER_GRANULARITIES:
if granularity < self.interval:
object.__setattr__(self, "granularity", Granularity(granularity))
break
# hard code min. allowed resolution to 10 seconds
allowed_resolution = AllowedResolution.ten_seconds
smallest_interval, interval_str = allowed_resolution.value
if (
self.granularity.granularity % smallest_interval != 0
or self.granularity.granularity < smallest_interval
):
raise InvalidParams(
f"The interval has to be a multiple of the minimum interval of {interval_str}."
)
if ONE_DAY % self.granularity.granularity != 0:
raise InvalidParams("The interval should divide one day without a remainder.")
# see what's our effective interval (either the one passed in or the one from the granularity)
if self.interval is None:
interval = self.granularity.granularity
else:
interval = self.interval
if self.start and self.end and self.include_series:
# For this calculation, we decided to round down to the integer since if we get 10.000,x we prefer to allow
# the query and lose some data points. On the other hand, if we get 11.000,x we will not allow the query.
if int((self.end - self.start).total_seconds() / interval) > MAX_POINTS:
raise InvalidParams(
"Your interval and date range would create too many results. "
"Use a larger interval, or a smaller date range."
)
def validate_interval(self) -> None:
if self.interval is not None:
if self.use_case_id is UseCaseID.SESSIONS or (
self.use_case_id is UseCaseID.TRANSACTIONS and not self.include_series
):
raise InvalidParams("Interval is only supported for timeseries performance queries")
def validate_is_alerts_query(self) -> None:
# We only allow the omission of start and end if this is an alerts query.
if (self.start is None or self.end is None) and not self.is_alerts_query:
raise InvalidParams(
"start and env fields can only be None if the query is needed by alerts"
)
def __post_init__(self) -> None:
super().__post_init__()
# Only if we have a start and end date we want to use the limit.
if self.start and self.end and self.limit is None:
# Cannot set attribute directly because dataclass is frozen:
# https://docs.python.org/3/library/dataclasses.html#frozen-instances
object.__setattr__(self, "limit", Limit(self.get_default_limit()))
if (
self.use_case_id
in [
UseCaseID.TRANSACTIONS,
]
and self.include_series
and self.interval is None
):
object.__setattr__(self, "interval", self.granularity.granularity)
| DeprecatingMetricsQuery |
python | getsentry__sentry | src/sentry/apidocs/examples/sentry_app_examples.py | {
"start": 51,
"end": 6457
} | class ____:
RETRIEVE_SENTRY_APP = [
OpenApiExample(
"Retrieve a custom integration",
value={
"allowedOrigins": [],
"author": "ACME Corp",
"avatars": [
{
"avatarType": "avatar",
"avatarUuid": "6c25b771-a576-4c18-a1c3-ab059c1d42ba",
"avatarUrl": "https://example.com/avatar.png",
"color": False,
"photoType": "icon",
}
],
"events": ["issue"],
"isAlertable": False,
"metadata": "",
"name": "ACME Corp Integration",
"overview": None,
"popularity": 27,
"redirectUrl": None,
"featureData": [],
"schema": "",
"scopes": ["event:read", "org:read"],
"slug": "acme-corp-integration",
"status": "unpublished",
"uuid": "77cebea3-019e-484d-8673-6c3969698827",
"verifyInstall": True,
"webhookUrl": "https://example.com/webhook",
"clientId": "ed06141686bb60102d878c607eff449fa9907fa7a8cb70f0d337a8fb0b6566c3",
"clientSecret": "**********",
"owner": {"id": 42, "slug": "acme-corp"},
},
status_codes=["200"],
response_only=True,
)
]
UPDATE_SENTRY_APP = [
OpenApiExample(
"Update a custom integration",
value={
"allowedOrigins": [],
"author": "ACME Corp",
"avatars": [
{
"avatarType": "avatar",
"avatarUuid": "6c25b771-a576-4c18-a1c3-ab059c1d42ba",
"avatarUrl": "https://example.com/avatar.png",
"color": False,
"photoType": "icon",
}
],
"events": ["issue"],
"isAlertable": False,
"metadata": "",
"name": "ACME Corp Integration",
"overview": None,
"popularity": 27,
"redirectUrl": None,
"featureData": [],
"schema": "",
"scopes": ["event:read", "org:read"],
"slug": "acme-corp-integration",
"status": "unpublished",
"uuid": "77cebea3-019e-484d-8673-6c3969698827",
"verifyInstall": True,
"webhookUrl": "https://example.com/webhook",
"clientId": "ed06141686bb60102d878c607eff449fa9907fa7a8cb70f0d337a8fb0b6566c3",
"clientSecret": "**********",
"owner": {"id": 42, "slug": "acme-corp"},
},
status_codes=["200"],
response_only=True,
)
]
GET_PLATFORM_EXTERNAL_ISSUE = [
OpenApiExample(
"Retrieve the custom integrations associated with an issue id",
value=[
{
"id": "123456",
"issueId": "1234567890",
"serviceType": "example-app",
"displayName": "example-issue#2",
"webUrl": "https://example.com/my-test-project/issue/example-issue-2/this-is-an-example-python-exception",
}
],
status_codes=["200"],
response_only=True,
)
]
GET_ORGANIZATIONS_SENTRY_APPS = [
OpenApiExample(
"Retrieve the custom integrations created by the given organization",
value=[
{
"allowedOrigins": [],
"author": "ACME Corp",
"avatars": [
{
"avatarType": "avatar",
"avatarUuid": "6c25b771-a576-4c18-a1c3-ab059c1d42ba",
"avatarUrl": "https://example.com/avatar.png",
"color": False,
"photoType": "icon",
}
],
"events": ["issue"],
"isAlertable": False,
"metadata": "",
"name": "ACME Corp Integration",
"overview": None,
"popularity": 27,
"redirectUrl": None,
"featureData": [],
"schema": "",
"scopes": ["event:read", "org:read"],
"slug": "acme-corp-integration",
"status": "unpublished",
"uuid": "77cebea3-019e-484d-8673-6c3969698827",
"verifyInstall": True,
"webhookUrl": "https://example.com/webhook",
"clientId": "ed06141686bb60102d878c607eff449fa9907fa7a8cb70f0d337a8fb0b6566c3",
"clientSecret": "**********",
"owner": {"id": 42, "slug": "acme-corp"},
},
{
"allowedOrigins": [],
"author": "ACME Corp",
"avatars": [],
"events": ["issue", "event"],
"isAlertable": False,
"metadata": "",
"name": "ACME Corp Integration v2",
"overview": None,
"popularity": 0,
"redirectUrl": "example.com",
"featureData": [],
"schema": "",
"scopes": ["event:admin", "org:admin"],
"slug": "acme-corp-integration-v2",
"status": "unpublished",
"uuid": "77cebea3-019e-484d-8673-123124234",
"verifyInstall": True,
"webhookUrl": "https://example.com/webhook",
"clientId": "2730a92919437a7b052e6827cd2c9f119be37101asdasdad123131231231231",
"clientSecret": "**********",
"owner": {"id": 42, "slug": "acme-corp"},
},
],
status_codes=["200"],
response_only=True,
)
]
| SentryAppExamples |
python | python__mypy | mypy/test/testsemanal.py | {
"start": 3510,
"end": 4716
} | class ____(DataSuite):
required_out_section = True
files = ["semanal-symtable.test"]
def run_case(self, testcase: DataDrivenTestCase) -> None:
"""Perform a test case."""
try:
# Build test case input.
src = "\n".join(testcase.input)
result = build.build(
sources=[BuildSource("main", None, src)],
options=get_semanal_options(src, testcase),
alt_lib_path=test_temp_dir,
)
# The output is the symbol table converted into a string.
a = result.errors
if a:
raise CompileError(a)
for module in sorted(result.files.keys()):
if module in testcase.test_modules:
a.append(f"{module}:")
for s in str(result.files[module].names).split("\n"):
a.append(" " + s)
except CompileError as e:
a = e.messages
assert_string_arrays_equal(
testcase.output,
a,
f"Invalid semantic analyzer output ({testcase.file}, line {testcase.line})",
)
# Type info export test cases
| SemAnalSymtableSuite |
python | tensorflow__tensorflow | tensorflow/python/kernel_tests/signal/fft_ops_test.py | {
"start": 35338,
"end": 38599
} | class ____(test.TestCase, parameterized.TestCase):
def test_definition(self):
with self.session():
x = [0, 1, 2, 3, 4, -4, -3, -2, -1]
y = [-4, -3, -2, -1, 0, 1, 2, 3, 4]
self.assertAllEqual(fft_ops.fftshift(x), y)
self.assertAllEqual(fft_ops.ifftshift(y), x)
x = [0, 1, 2, 3, 4, -5, -4, -3, -2, -1]
y = [-5, -4, -3, -2, -1, 0, 1, 2, 3, 4]
self.assertAllEqual(fft_ops.fftshift(x), y)
self.assertAllEqual(fft_ops.ifftshift(y), x)
def test_axes_keyword(self):
with self.session():
freqs = [[0, 1, 2], [3, 4, -4], [-3, -2, -1]]
shifted = [[-1, -3, -2], [2, 0, 1], [-4, 3, 4]]
self.assertAllEqual(fft_ops.fftshift(freqs, axes=(0, 1)), shifted)
self.assertAllEqual(
fft_ops.fftshift(freqs, axes=0),
fft_ops.fftshift(freqs, axes=(0,)))
self.assertAllEqual(fft_ops.ifftshift(shifted, axes=(0, 1)), freqs)
self.assertAllEqual(
fft_ops.ifftshift(shifted, axes=0),
fft_ops.ifftshift(shifted, axes=(0,)))
self.assertAllEqual(fft_ops.fftshift(freqs), shifted)
self.assertAllEqual(fft_ops.ifftshift(shifted), freqs)
def test_numpy_compatibility(self):
with self.session():
x = [0, 1, 2, 3, 4, -4, -3, -2, -1]
y = [-4, -3, -2, -1, 0, 1, 2, 3, 4]
self.assertAllEqual(fft_ops.fftshift(x), np.fft.fftshift(x))
self.assertAllEqual(fft_ops.ifftshift(y), np.fft.ifftshift(y))
x = [0, 1, 2, 3, 4, -5, -4, -3, -2, -1]
y = [-5, -4, -3, -2, -1, 0, 1, 2, 3, 4]
self.assertAllEqual(fft_ops.fftshift(x), np.fft.fftshift(x))
self.assertAllEqual(fft_ops.ifftshift(y), np.fft.ifftshift(y))
freqs = [[0, 1, 2], [3, 4, -4], [-3, -2, -1]]
shifted = [[-1, -3, -2], [2, 0, 1], [-4, 3, 4]]
self.assertAllEqual(
fft_ops.fftshift(freqs, axes=(0, 1)),
np.fft.fftshift(freqs, axes=(0, 1)))
self.assertAllEqual(
fft_ops.ifftshift(shifted, axes=(0, 1)),
np.fft.ifftshift(shifted, axes=(0, 1)))
@parameterized.parameters(None, 1, ([1, 2],))
def test_placeholder(self, axes):
if context.executing_eagerly():
return
x = array_ops.placeholder(shape=[None, None, None], dtype="float32")
y_fftshift = fft_ops.fftshift(x, axes=axes)
y_ifftshift = fft_ops.ifftshift(x, axes=axes)
x_np = np.random.rand(16, 256, 256)
with self.session() as sess:
y_fftshift_res, y_ifftshift_res = sess.run(
[y_fftshift, y_ifftshift],
feed_dict={x: x_np})
self.assertAllClose(y_fftshift_res, np.fft.fftshift(x_np, axes=axes))
self.assertAllClose(y_ifftshift_res, np.fft.ifftshift(x_np, axes=axes))
def test_negative_axes(self):
with self.session():
freqs = [[0, 1, 2], [3, 4, -4], [-3, -2, -1]]
shifted = [[-1, -3, -2], [2, 0, 1], [-4, 3, 4]]
self.assertAllEqual(fft_ops.fftshift(freqs, axes=(0, -1)), shifted)
self.assertAllEqual(fft_ops.ifftshift(shifted, axes=(0, -1)), freqs)
self.assertAllEqual(
fft_ops.fftshift(freqs, axes=-1), fft_ops.fftshift(freqs, axes=(1,)))
self.assertAllEqual(
fft_ops.ifftshift(shifted, axes=-1),
fft_ops.ifftshift(shifted, axes=(1,)))
if __name__ == "__main__":
test.main()
| FFTShiftTest |
python | sqlalchemy__sqlalchemy | lib/sqlalchemy/sql/visitors.py | {
"start": 4017,
"end": 12416
} | class ____(Enum):
r"""Defines visitor symbols used for internal traversal.
The :class:`.InternalTraversal` class is used in two ways. One is that
it can serve as the superclass for an object that implements the
various visit methods of the class. The other is that the symbols
themselves of :class:`.InternalTraversal` are used within
the ``_traverse_internals`` collection. Such as, the :class:`.Case`
object defines ``_traverse_internals`` as ::
class Case(ColumnElement[_T]):
_traverse_internals = [
("value", InternalTraversal.dp_clauseelement),
("whens", InternalTraversal.dp_clauseelement_tuples),
("else_", InternalTraversal.dp_clauseelement),
]
Above, the :class:`.Case` class indicates its internal state as the
attributes named ``value``, ``whens``, and ``else_``. They each
link to an :class:`.InternalTraversal` method which indicates the type
of datastructure to which each attribute refers.
Using the ``_traverse_internals`` structure, objects of type
:class:`.InternalTraversible` will have the following methods automatically
implemented:
* :meth:`.HasTraverseInternals.get_children`
* :meth:`.HasTraverseInternals._copy_internals`
* :meth:`.HasCacheKey._gen_cache_key`
Subclasses can also implement these methods directly, particularly for the
:meth:`.HasTraverseInternals._copy_internals` method, when special steps
are needed.
.. versionadded:: 1.4
"""
dp_has_cache_key = "HC"
"""Visit a :class:`.HasCacheKey` object."""
dp_has_cache_key_list = "HL"
"""Visit a list of :class:`.HasCacheKey` objects."""
dp_clauseelement = "CE"
"""Visit a :class:`_expression.ClauseElement` object."""
dp_fromclause_canonical_column_collection = "FC"
"""Visit a :class:`_expression.FromClause` object in the context of the
``columns`` attribute.
The column collection is "canonical", meaning it is the originally
defined location of the :class:`.ColumnClause` objects. Right now
this means that the object being visited is a
:class:`_expression.TableClause`
or :class:`_schema.Table` object only.
"""
dp_clauseelement_tuples = "CTS"
"""Visit a list of tuples which contain :class:`_expression.ClauseElement`
objects.
"""
dp_clauseelement_list = "CL"
"""Visit a list of :class:`_expression.ClauseElement` objects.
"""
dp_clauseelement_tuple = "CT"
"""Visit a tuple of :class:`_expression.ClauseElement` objects.
"""
dp_executable_options = "EO"
dp_compile_state_funcs = "WC"
dp_fromclause_ordered_set = "CO"
"""Visit an ordered set of :class:`_expression.FromClause` objects. """
dp_string = "S"
"""Visit a plain string value.
Examples include table and column names, bound parameter keys, special
keywords such as "UNION", "UNION ALL".
The string value is considered to be significant for cache key
generation.
"""
dp_string_list = "SL"
"""Visit a list of strings."""
dp_anon_name = "AN"
"""Visit a potentially "anonymized" string value.
The string value is considered to be significant for cache key
generation.
"""
dp_boolean = "B"
"""Visit a boolean value.
The boolean value is considered to be significant for cache key
generation.
"""
dp_operator = "O"
"""Visit an operator.
The operator is a function from the :mod:`sqlalchemy.sql.operators`
module.
The operator value is considered to be significant for cache key
generation.
"""
dp_type = "T"
"""Visit a :class:`.TypeEngine` object
The type object is considered to be significant for cache key
generation.
"""
dp_plain_dict = "PD"
"""Visit a dictionary with string keys.
The keys of the dictionary should be strings, the values should
be immutable and hashable. The dictionary is considered to be
significant for cache key generation.
"""
dp_dialect_options = "DO"
"""Visit a dialect options structure."""
dp_string_clauseelement_dict = "CD"
"""Visit a dictionary of string keys to :class:`_expression.ClauseElement`
objects.
"""
dp_string_multi_dict = "MD"
"""Visit a dictionary of string keys to values which may either be
plain immutable/hashable or :class:`.HasCacheKey` objects.
"""
dp_annotations_key = "AK"
"""Visit the _annotations_cache_key element.
This is a dictionary of additional information about a ClauseElement
that modifies its role. It should be included when comparing or caching
objects, however generating this key is relatively expensive. Visitors
should check the "_annotations" dict for non-None first before creating
this key.
"""
dp_plain_obj = "PO"
"""Visit a plain python object.
The value should be immutable and hashable, such as an integer.
The value is considered to be significant for cache key generation.
"""
dp_named_ddl_element = "DD"
"""Visit a simple named DDL element.
The current object used by this method is the :class:`.Sequence`.
The object is only considered to be important for cache key generation
as far as its name, but not any other aspects of it.
"""
dp_prefix_sequence = "PS"
"""Visit the sequence represented by :class:`_expression.HasPrefixes`
or :class:`_expression.HasSuffixes`.
"""
dp_table_hint_list = "TH"
"""Visit the ``_hints`` collection of a :class:`_expression.Select`
object.
"""
dp_setup_join_tuple = "SJ"
dp_memoized_select_entities = "ME"
dp_statement_hint_list = "SH"
"""Visit the ``_statement_hints`` collection of a
:class:`_expression.Select`
object.
"""
dp_unknown_structure = "UK"
"""Visit an unknown structure.
"""
dp_dml_ordered_values = "DML_OV"
"""Visit the values() ordered tuple list of an
:class:`_expression.Update` object."""
dp_dml_values = "DML_V"
"""Visit the values() dictionary of a :class:`.ValuesBase`
(e.g. Insert or Update) object.
"""
dp_dml_multi_values = "DML_MV"
"""Visit the values() multi-valued list of dictionaries of an
:class:`_expression.Insert` object.
"""
dp_propagate_attrs = "PA"
"""Visit the propagate attrs dict. This hardcodes to the particular
elements we care about right now."""
"""Symbols that follow are additional symbols that are useful in
caching applications.
Traversals for :class:`_expression.ClauseElement` objects only need to use
those symbols present in :class:`.InternalTraversal`. However, for
additional caching use cases within the ORM, symbols dealing with the
:class:`.HasCacheKey` class are added here.
"""
dp_ignore = "IG"
"""Specify an object that should be ignored entirely.
This currently applies function call argument caching where some
arguments should not be considered to be part of a cache key.
"""
dp_inspectable = "IS"
"""Visit an inspectable object where the return value is a
:class:`.HasCacheKey` object."""
dp_multi = "M"
"""Visit an object that may be a :class:`.HasCacheKey` or may be a
plain hashable object."""
dp_multi_list = "MT"
"""Visit a tuple containing elements that may be :class:`.HasCacheKey` or
may be a plain hashable object."""
dp_has_cache_key_tuples = "HT"
"""Visit a list of tuples which contain :class:`.HasCacheKey`
objects.
"""
dp_inspectable_list = "IL"
"""Visit a list of inspectable objects which upon inspection are
HasCacheKey objects."""
dp_params = "PM"
"""Visit the _params collection of ExecutableStatement"""
_TraverseInternalsType = List[Tuple[str, InternalTraversal]]
"""a structure that defines how a HasTraverseInternals should be
traversed.
This structure consists of a list of (attributename, internaltraversal)
tuples, where the "attributename" refers to the name of an attribute on an
instance of the HasTraverseInternals object, and "internaltraversal" refers
to an :class:`.InternalTraversal` enumeration symbol defining what kind
of data this attribute stores, which indicates to the traverser how it should
be handled.
"""
| InternalTraversal |
python | pytorch__pytorch | test/test_transformers.py | {
"start": 207107,
"end": 221571
} | class ____(NNTestCase):
""" Used to test XPU only functionality of scaled_dot_product_attention
Mostly migrate from TestSDPACudaOnly in test/test_transformers.py
"""
@parametrize("type", ["dense"])
@parametrize("dropout", [0.0, 0.7])
@parametrize("dtype", [torch.float64, torch.float32, torch.bfloat16, torch.half])
@skipIfTorchDynamo()
def test_fused_sdp_choice_xpu(self, device, type: str, dropout: float, dtype: torch.dtype):
# Migrate from test_fused_sdp_choice_cpu
make_tensor = partial(rand_sdpa_tensor, type=type, device=device, dtype=dtype)
size = SdpaShape(2, 8, 128, 64)
q, k, v = make_tensor(size), make_tensor(size), make_tensor(size)
if dropout > 0.0 or dtype not in [torch.float32, torch.bfloat16, torch.float16]:
assert torch._fused_sdp_choice(q, k, v, dropout_p=dropout) == SDPBackend.MATH.value
else:
assert torch._fused_sdp_choice(q, k, v, dropout_p=dropout) == SDPBackend.OVERRIDEABLE.value
def test_fused_attention_different_dk_dv(self, device):
dtype = torch.bfloat16
make_tensor = partial(torch.rand, device=device, dtype=dtype, requires_grad=False)
batch, num_heads, head_dim_k, head_dim_v = 32, 16, 128, 64
q_shape = SdpaShape(batch, num_heads, 1, head_dim_k)
k_shape = SdpaShape(batch, num_heads, 2, head_dim_k)
v_shape = SdpaShape(batch, num_heads, 2, head_dim_v)
query, key, value = make_tensor(q_shape), make_tensor(k_shape), make_tensor(v_shape)
actual = F.scaled_dot_product_attention(
query, key, value, attn_mask=None, dropout_p=0.0, is_causal=False)
math_ref = torch.ops.aten._scaled_dot_product_attention_math(
query.float(), key.float(), value.float(), attn_mask=None, dropout_p=0.0, is_causal=False)[0]
self.assertEqual(actual.contiguous(), math_ref.contiguous().to(dtype), atol=1e-3, rtol=1e-2)
@parametrize("dtype", [torch.half, torch.bfloat16])
@parametrize("batch_size,n_head,n_head_kv,q_size,kv_size,head_dim", [
(2, 64, 16, 9216, 77, 64),
(2, 32, 4, 2304, 2304, 64),
(2, 32, 2, 2304, 77, 64),
(2, 20, 2, 576, 576, 64),
(2, 20, 2, 576, 77, 64),
(2, 20, 2, 144, 144, 64),
(2, 20, 2, 144, 77, 64),
(1, 32, 2, 1, 32, 128),
(4, 32, 4, 1, 32, 128),
(1, 32, 2, 32, 32, 128),
(4, 32, 4, 32, 32, 128),
(1, 32, 2, 2016, 2016, 128),
(4, 32, 4, 2016, 2016, 128),
])
@parametrize("is_causal", [True, False])
def test_fused_attention_gqa(self, device, dtype, batch_size, n_head, n_head_kv, q_size, kv_size, head_dim, is_causal):
tol = Tolerances(1e-5, 5e-6)
if dtype is torch.bfloat16:
tol = Tolerances(5e-2, 5e-2)
if dtype is torch.float16:
tol = Tolerances(1e-2, 1e-2)
make_tensor = partial(torch.rand, device=device, dtype=dtype, requires_grad=False)
q_shape = SdpaShape(batch_size, n_head, q_size, head_dim)
k_shape = SdpaShape(batch_size, n_head_kv, kv_size, head_dim)
v_shape = SdpaShape(batch_size, n_head_kv, kv_size, head_dim)
query, key, value = make_tensor(q_shape), make_tensor(k_shape), make_tensor(v_shape)
actual = F.scaled_dot_product_attention(
query, key, value, attn_mask=None, dropout_p=0.0, is_causal=is_causal, enable_gqa=True)
math_ref = torch.ops.aten._scaled_dot_product_attention_math(
query.float(), key.float(), value.float(), attn_mask=None, dropout_p=0.0, is_causal=is_causal, enable_gqa=True)[0]
self.assertEqual(actual.contiguous(), math_ref.contiguous().to(dtype), atol=tol.atol, rtol=tol.rtol)
def test_onednn_attention_fail_d576(self, device):
# Test that onednn graph attention dispatching correctly bails out on d > 576
b, h = 1, 2
s_q, s_kv = 128, 128
d_qk, d_v = 1024, 1024
q = torch.randn(b, h, s_q, d_qk, device=device, dtype=torch.bfloat16)
k = torch.randn(b, h, s_kv, d_qk, device=device, dtype=torch.bfloat16)
v = torch.randn(b, h, s_kv, d_v, device=device, dtype=torch.bfloat16)
with sdpa_kernel(backends=[SDPBackend.OVERRIDEABLE]):
with self.assertRaisesRegex(RuntimeError, "No available kernel."):
_ = F.scaled_dot_product_attention(q, k, v)
def test_fused_attention_broadcasted_input(self, device):
dtype = torch.bfloat16
make_tensor = partial(torch.rand, device=device, dtype=dtype, requires_grad=False)
batch, num_heads, seqlen, head_dim = 32, 16, 128, 32
q_shape = SdpaShape(batch, num_heads, seqlen, head_dim)
k_shape = SdpaShape(batch, num_heads, seqlen, head_dim)
v_shape = SdpaShape(batch, num_heads, seqlen, head_dim)
query, key, value = make_tensor(q_shape), make_tensor(k_shape), make_tensor(v_shape)
attn_mask_shape = (1, seqlen)
attn_mask = make_tensor(attn_mask_shape)
attn_mask = attn_mask.expand(1, 1, seqlen, seqlen)
# test that we do not dispatch to onednn for an unsupported case
actual = F.scaled_dot_product_attention(
query, key, value, attn_mask=attn_mask, dropout_p=0.0, is_causal=False)
math_ref = torch.ops.aten._scaled_dot_product_attention_math(
query.float(), key.float(), value.float(), attn_mask=attn_mask, dropout_p=0.0, is_causal=False)[0]
self.assertEqual(actual.contiguous(), math_ref.contiguous().to(dtype), atol=1e-3, rtol=1e-2)
def test_attention_preserves_query_layout(self, device):
def test_attention(permute_order: list[list[int]]):
BHSqD = [4, 16, 256, 64]
BHSkvD = [4, 16, 512, 64]
shape_q = [BHSqD[idx] for idx in permute_order]
shape_kv = [BHSkvD[idx] for idx in permute_order]
reverse = [permute_order.index(idx) for idx in range(4)]
q = torch.randn(*shape_q, dtype=torch.bfloat16, device=device, requires_grad=False).permute(reverse)
k = torch.randn(*shape_kv, dtype=torch.bfloat16, device=device, requires_grad=False).permute(reverse)
v = torch.randn(*shape_kv, dtype=torch.bfloat16, device=device, requires_grad=False).permute(reverse)
self.assertEqual(q.shape, BHSqD)
self.assertEqual(k.shape, BHSkvD)
self.assertEqual(v.shape, BHSkvD)
out = F.scaled_dot_product_attention(q, k, v)
self.assertTrue(out.permute(permute_order).is_contiguous())
permutable = [0, 1, 2]
permute_orders = itertools.permutations(permutable)
for permute_order in permute_orders:
test_attention(list(permute_order) + [3])
def test_backends_set_to_math(self, device):
dtype = torch.bfloat16
q_shape = SdpaShape(1, 1, 8, 16)
kv_shape = SdpaShape(1, 1, 12, 16)
make_q = partial(torch.rand, q_shape, device=device, dtype=dtype)
make_kv = partial(torch.rand, kv_shape, device=device, dtype=dtype)
q, k, v = make_q(), make_kv(), make_kv()
with sdpa_kernel(backends=[SDPBackend.MATH]):
self.assertTrue(torch._C._get_math_sdp_enabled())
self.assertFalse(torch._C._get_overrideable_sdp_enabled())
_ = F.scaled_dot_product_attention(q, k, v)
def test_default_priority_order(self, device):
# The default priority order of xpu is overridable, math, flash, efficient, cudnn
# For xpu backend, we need to make sure that overridable > math > flash
dtype = torch.bfloat16
shape = SdpaShape(1, 1, 1, 1)
make_tensor = partial(torch.rand, shape, device=device, dtype=dtype)
t = make_tensor()
# run sdp_choice to make sure priority_order is set by XPU default priority_order
torch._fused_sdp_choice(t, t, t)
from torch.nn.attention import _cur_sdpa_kernel_backends
default_priority = _cur_sdpa_kernel_backends(with_priority=True)
flash_index = default_priority.index(SDPBackend.FLASH_ATTENTION)
overrideable_index = default_priority.index(SDPBackend.OVERRIDEABLE)
math_index = default_priority.index(SDPBackend.MATH)
self.assertTrue(overrideable_index < math_index < flash_index,
f"Expected overrideable < math < flash, got {overrideable_index}, {math_index}, {flash_index}")
def test_scaled_dot_product_attention_fused_kernels_safe_softmax(self, device):
dtype = torch.bfloat16
make_tensor = partial(torch.rand, device=device, dtype=dtype, requires_grad=False)
batch, num_heads, seqlen, head_dim = 32, 16, 32, 64
q_shape = SdpaShape(batch, num_heads, seqlen, head_dim)
k_shape = SdpaShape(batch, num_heads, seqlen, head_dim)
v_shape = SdpaShape(batch, num_heads, seqlen, head_dim)
query, key, value = make_tensor(q_shape), make_tensor(k_shape), make_tensor(v_shape)
attn_mask = torch.full((seqlen, seqlen), float('-inf'), device=device, dtype=torch.bfloat16)
actual = F.scaled_dot_product_attention(
query, key, value, attn_mask=attn_mask, dropout_p=0.0, is_causal=False)
math_ref = torch.ops.aten._scaled_dot_product_attention_math(
query.float(), key.float(), value.float(), attn_mask=attn_mask, dropout_p=0.0, is_causal=False)[0]
self.assertEqual(actual.contiguous(), math_ref.contiguous().to(dtype), atol=1e-3, rtol=1e-2)
@parametrize("type", ["dense"])
@parametrize("is_contiguous", [True, False])
def test_scaled_dot_product_attention_fused_kernels_packed(self, device, type: str, is_contiguous: bool):
make_tensor = partial(rand_sdpa_tensor, type=type, device=device, dtype=torch.float16, packed=True)
batch_size, seq_len, num_heads, head_dim = 32, 64, 16, 64
shape = SdpaShape(batch_size, num_heads, seq_len, head_dim)
# Test Packed
qkv = make_tensor(shape)
query, key, value = qkv.chunk(3, dim=-1)
query = query.view(batch_size, -1, num_heads, head_dim).transpose(1, 2)
value = value.view(batch_size, -1, num_heads, head_dim).transpose(1, 2)
key = key.view(batch_size, -1, num_heads, head_dim).transpose(1, 2)
if is_contiguous:
query = query.contiguous()
key = key.contiguous()
value = value.contiguous()
with sdpa_kernel(backends=[SDPBackend.OVERRIDEABLE]):
actual = torch.nn.functional.scaled_dot_product_attention(
query, key, value, attn_mask=None, dropout_p=0.0, is_causal=False)
math_ref = torch.ops.aten._scaled_dot_product_attention_math(
query.contiguous(), key.contiguous(), value.contiguous(), attn_mask=None, dropout_p=0.0, is_causal=False)[0]
self.assertEqual(actual.contiguous(), math_ref.contiguous(), atol=2e-3, rtol=1e-2)
@parametrize("fused_kernel", [SDPBackend.MATH, SDPBackend.OVERRIDEABLE])
@parametrize("dtype", [torch.half, torch.bfloat16, torch.float32])
@parametrize("batch_size,n_head,q_size,kv_size,head_dim", [
(2, 5, 9216, 9216, 64),
(2, 5, 9216, 77, 64),
(2, 10, 2304, 2304, 64),
(2, 10, 2304, 77, 64),
(2, 20, 576, 576, 64),
(2, 20, 576, 77, 64),
(2, 20, 144, 144, 64),
(2, 20, 144, 77, 64),
(1, 32, 1, 32, 128),
(4, 32, 1, 32, 128),
(1, 32, 32, 32, 128),
(4, 32, 32, 32, 128),
(1, 32, 2016, 2016, 128),
(4, 32, 2016, 2016, 128),
])
@parametrize("mask_type", ["float", "causal"])
@parametrize("train", [False])
def test_scaled_dot_product_fused_attention_mask_vs_math(
self,
device,
fused_kernel,
dtype,
batch_size,
q_size,
kv_size,
n_head,
head_dim,
mask_type,
train,
):
# Migrate from TestSDPACpuOnly
tol = Tolerances(1e-5, 5e-6)
if dtype is torch.bfloat16:
tol = Tolerances(5e-2, 5e-2)
if dtype is torch.float16:
tol = Tolerances(1e-2, 1e-2)
mask_shape = [batch_size, 1, 1, kv_size]
make_tensor = partial(rand_sdpa_tensor, type="dense", device=device, dtype=dtype, requires_grad=False)
q_shape = SdpaShape(batch_size, n_head, q_size, head_dim)
kv_shape = SdpaShape(batch_size, n_head, kv_size, head_dim)
q = make_tensor(q_shape)
k = make_tensor(kv_shape)
v = make_tensor(kv_shape)
q2, k2, v2 = q.clone(), k.clone(), v.clone()
if train:
q.requires_grad_(True)
k.requires_grad_(True)
v.requires_grad_(True)
q2.requires_grad_(True)
k2.requires_grad_(True)
v2.requires_grad_(True)
# (B, nh, T, hs)
q = q.view(batch_size, q_size, n_head, head_dim).transpose(1, 2)
k = k.view(batch_size, kv_size, n_head, head_dim).transpose(1, 2)
v = v.view(batch_size, kv_size, n_head, head_dim).transpose(1, 2)
attn_mask = None
is_causal = False
if mask_type == "bool":
attn_mask = torch.randint(0, 2, size=mask_shape, dtype=torch.bool, device=device)
elif mask_type == "float":
attn_mask = torch.randn(mask_shape, dtype=dtype, device=device)
elif mask_type == "causal":
is_causal = True
q2, k2, v2 = q2.float(), k2.float(), v2.float()
q2 = q2.view(batch_size, q_size, n_head, head_dim).transpose(1, 2)
k2 = k2.view(batch_size, kv_size, n_head, head_dim).transpose(1, 2)
v2 = v2.view(batch_size, kv_size, n_head, head_dim).transpose(1, 2)
attn_mask2 = attn_mask.float() if attn_mask is not None else None
if fused_kernel == SDPBackend.MATH:
actual = torch.ops.aten._scaled_dot_product_attention_math(
q, k, v, attn_mask=attn_mask, dropout_p=0.0, is_causal=is_causal)[0]
elif fused_kernel == SDPBackend.OVERRIDEABLE:
actual = torch.ops.aten._scaled_dot_product_fused_attention_overrideable(
q, k, v, attn_bias=attn_mask, dropout_p=0.0, is_causal=is_causal)[0]
math_ref = torch.ops.aten._scaled_dot_product_attention_math(
q2, k2, v2, attn_mask=attn_mask2, dropout_p=0.0, is_causal=is_causal)[0]
self.assertEqual(actual.float(), math_ref, atol=tol.atol, rtol=tol.rtol)
| TestSDPAXpuOnly |
python | Pylons__pyramid | src/pyramid/events.py | {
"start": 4093,
"end": 5587
} | class ____:
"""An instance of this class is emitted as an :term:`event`
whenever any :app:`Pyramid` :term:`view` or :term:`exception
view` returns a :term:`response`.
The instance has two attributes:``request``, which is the request
which caused the response, and ``response``, which is the response
object returned by a view or renderer.
If the ``response`` was generated by an :term:`exception view`, the
request will have an attribute named ``exception``, which is the
exception object which caused the exception view to be executed. If the
response was generated by a 'normal' view, this attribute of the request
will be ``None``.
This event will not be generated if a response cannot be created due to
an exception that is not caught by an exception view (no response is
created under this circumstace).
This class implements the
:class:`pyramid.interfaces.INewResponse` interface.
.. note::
Postprocessing a response is usually better handled in a WSGI
:term:`middleware` component than in subscriber code that is
called by a :class:`pyramid.interfaces.INewResponse` event.
The :class:`pyramid.interfaces.INewResponse` event exists
almost purely for symmetry with the
:class:`pyramid.interfaces.INewRequest` event.
"""
def __init__(self, request, response):
self.request = request
self.response = response
@implementer(IBeforeTraversal)
| NewResponse |
python | streamlit__streamlit | lib/streamlit/elements/widgets/time_widgets.py | {
"start": 13558,
"end": 14357
} | class ____:
value: datetime | None
min: datetime
max: datetime
def deserialize(self, ui_value: list[str] | None) -> datetime | None:
if ui_value is not None and len(ui_value) > 0:
deserialized = _normalize_datetime_value(
datetime.strptime(ui_value[0], _DATETIME_UI_FORMAT)
)
# Validate against min/max bounds
# If the value is out of bounds, return the previous valid value
if deserialized < self.min or deserialized > self.max:
return self.value
return deserialized
return self.value
def serialize(self, v: datetime | None) -> list[str]:
if v is None:
return []
return [_datetime_to_proto_string(v)]
@dataclass
| DateTimeInputSerde |
python | cython__cython | Cython/Plex/Actions.py | {
"start": 74,
"end": 318
} | class ____:
def perform(self, token_stream, text):
pass # abstract
def __copy__(self):
return self # immutable, no need to copy
def __deepcopy__(self, memo):
return self # immutable, no need to copy
| Action |
python | scikit-learn__scikit-learn | examples/miscellaneous/plot_metadata_routing.py | {
"start": 18742,
"end": 21726
} | class ____(ClassifierMixin, BaseEstimator):
def __init__(self, transformer, classifier):
self.transformer = transformer
self.classifier = classifier
def get_metadata_routing(self):
router = (
MetadataRouter(owner=self)
# We add the routing for the transformer.
.add(
transformer=self.transformer,
method_mapping=MethodMapping()
# The metadata is routed such that it retraces how
# `SimplePipeline` internally calls the transformer's `fit` and
# `transform` methods in its own methods (`fit` and `predict`).
.add(caller="fit", callee="fit")
.add(caller="fit", callee="transform")
.add(caller="predict", callee="transform"),
)
# We add the routing for the classifier.
.add(
classifier=self.classifier,
method_mapping=MethodMapping()
.add(caller="fit", callee="fit")
.add(caller="predict", callee="predict"),
)
)
return router
def fit(self, X, y, **fit_params):
routed_params = process_routing(self, "fit", **fit_params)
self.transformer_ = clone(self.transformer).fit(
X, y, **routed_params.transformer.fit
)
X_transformed = self.transformer_.transform(
X, **routed_params.transformer.transform
)
self.classifier_ = clone(self.classifier).fit(
X_transformed, y, **routed_params.classifier.fit
)
return self
def predict(self, X, **predict_params):
routed_params = process_routing(self, "predict", **predict_params)
X_transformed = self.transformer_.transform(
X, **routed_params.transformer.transform
)
return self.classifier_.predict(
X_transformed, **routed_params.classifier.predict
)
# %%
# Note the usage of :class:`~utils.metadata_routing.MethodMapping` to
# declare which methods of the child estimator (callee) are used in which
# methods of the meta estimator (caller). As you can see, `SimplePipeline` uses
# the transformer's ``transform`` and ``fit`` methods in ``fit``, and its
# ``transform`` method in ``predict``, and that's what you see implemented in
# the routing structure of the pipeline class.
#
# Another difference in the above example with the previous ones is the usage
# of :func:`~utils.metadata_routing.process_routing`, which processes the input
# parameters, does the required validation, and returns the `routed_params`
# which we had created in previous examples. This reduces the boilerplate code
# a developer needs to write in each meta-estimator's method. Developers are
# strongly recommended to use this function unless there is a good reason
# against it.
#
# In order to test the above pipeline, let's add an example transformer.
| SimplePipeline |
python | automl__auto-sklearn | autosklearn/pipeline/components/data_preprocessing/__init__.py | {
"start": 815,
"end": 6369
} | class ____(AutoSklearnChoice):
@classmethod
def get_components(cls) -> OrderedDict:
components: OrderedDict = OrderedDict()
components.update(_preprocessors)
components.update(_addons.components)
return components
def get_available_components(
self,
dataset_properties: Optional[Dict] = None,
include: Optional[Dict] = None,
exclude: Optional[Dict] = None,
) -> OrderedDict:
if dataset_properties is None:
dataset_properties = {}
if include is not None and exclude is not None:
raise ValueError(
"The argument include and exclude cannot be used together."
)
available_comp = self.get_components()
if include is not None:
for incl in include:
if incl not in available_comp:
raise ValueError(
"Trying to include unknown component: " "%s" % incl
)
# TODO check for task type classification and/or regression!
components_dict = OrderedDict()
for name in available_comp:
if include is not None and name not in include:
continue
elif exclude is not None and name in exclude:
continue
entry = available_comp[name]
# Exclude itself to avoid infinite loop
if entry == DataPreprocessorChoice or hasattr(entry, "get_components"):
continue
target_type = dataset_properties["target_type"]
if target_type == "classification":
if entry.get_properties()["handles_classification"] is False:
continue
if (
dataset_properties.get("multiclass") is True
and entry.get_properties()["handles_multiclass"] is False
):
continue
if (
dataset_properties.get("multilabel") is True
and entry.get_properties()["handles_multilabel"] is False
):
continue
elif target_type == "regression":
if entry.get_properties()["handles_regression"] is False:
continue
if (
dataset_properties.get("multioutput") is True
and entry.get_properties()["handles_multioutput"] is False
):
continue
else:
raise ValueError("Unknown target type %s" % target_type)
components_dict[name] = entry
return components_dict
def get_hyperparameter_search_space(
self,
feat_type: Optional[FEAT_TYPE_TYPE] = None,
dataset_properties: Optional[Dict] = None,
default: str = None,
include: Optional[Dict] = None,
exclude: Optional[Dict] = None,
) -> ConfigurationSpace:
cs = ConfigurationSpace()
if dataset_properties is None:
dataset_properties = {}
# Compile a list of legal preprocessors for this problem
available_preprocessors = self.get_available_components(
dataset_properties=dataset_properties, include=include, exclude=exclude
)
if len(available_preprocessors) == 0:
raise ValueError("No preprocessors found, please add NoPreprocessing")
if default is None:
defaults = ["feature_type"]
for default_ in defaults:
if default_ in available_preprocessors:
default = default_
break
preprocessor = CategoricalHyperparameter(
"__choice__", list(available_preprocessors.keys()), default_value=default
)
cs.add_hyperparameter(preprocessor)
for name in available_preprocessors:
preprocessor_configuration_space = available_preprocessors[name](
feat_type=feat_type, dataset_properties=dataset_properties
).get_hyperparameter_search_space(dataset_properties=dataset_properties)
parent_hyperparameter = {"parent": preprocessor, "value": name}
cs.add_configuration_space(
name,
preprocessor_configuration_space,
parent_hyperparameter=parent_hyperparameter,
)
return cs
def transform(self, X: PIPELINE_DATA_DTYPE) -> PIPELINE_DATA_DTYPE:
return self.choice.transform(X)
def set_hyperparameters(
self,
configuration: ConfigurationSpace,
feat_type: Optional[FEAT_TYPE_TYPE] = None,
init_params: Optional[Dict] = None,
) -> "DataPreprocessorChoice":
config = {}
params = configuration.get_dictionary()
choice = params["__choice__"]
del params["__choice__"]
for param, value in params.items():
param = param.replace(choice, "").split(":", 1)[1]
config[param] = value
new_params = {}
if init_params is not None:
for param, value in init_params.items():
param = param.replace(choice, "").split(":", 1)[-1]
if "feat_type" in param:
feat_type = value
else:
new_params[param] = value
self.choice = self.get_components()[choice](
config=config, init_params=new_params, feat_type=feat_type
)
return self
| DataPreprocessorChoice |
python | has2k1__plotnine | plotnine/themes/themeable.py | {
"start": 61596,
"end": 61880
} | class ____(themeable):
"""
Position of legend title
Parameters
----------
theme_element : Literal["top", "bottom", "left", "right"] | None
Position of the legend title. The default depends on the position
of the legend.
"""
| legend_title_position |
python | charliermarsh__ruff | crates/ruff_linter/resources/test/fixtures/flake8_pyi/PYI049.py | {
"start": 203,
"end": 546
} | class ____(_UsedTypedDict):
bar: list[int]
_UnusedTypedDict3 = TypedDict("_UnusedTypedDict3", {"foo": int})
_UsedTypedDict3 = TypedDict("_UsedTypedDict3", {"bar": bytes})
def uses_UsedTypedDict3(arg: _UsedTypedDict3) -> None: ...
# In `.py` files, we don't flag unused definitions in class scopes (unlike in `.pyi`
# files).
| _CustomClass |
python | wandb__wandb | tests/system_tests/test_launch/test_launch_kubernetes.py | {
"start": 8197,
"end": 8704
} | class ____:
def __init__(self, mock_api_client, jobs):
self.jobs = jobs
async def read_namespaced_job(self, name, namespace):
return self.jobs[name]
async def list_namespaced_job(self, namespace, label_selector="", **kwargs):
ret = []
k, v = label_selector.split("=")
if k == "job-name":
for job in self.jobs.items():
if job.metadata.name == v:
ret.append(job)
return MockPodList(ret)
| MockBatchV1Api |
python | apache__airflow | airflow-core/tests/unit/core/test_otel_logger.py | {
"start": 1594,
"end": 11956
} | class ____:
def setup_method(self):
self.meter = mock.Mock(MeterProvider)
self.stats = SafeOtelLogger(otel_provider=self.meter)
self.map = self.stats.metrics_map.map
self.logger = logging.getLogger(__name__)
def test_is_up_down_counter_positive(self):
udc = next(iter(UP_DOWN_COUNTERS))
assert _is_up_down_counter(udc)
def test_is_up_down_counter_negative(self):
assert not _is_up_down_counter("this_is_not_a_udc")
def test_exemption_list_has_not_grown(self):
assert len(BACK_COMPAT_METRIC_NAMES) <= 26, (
"This test exists solely to ensure that nobody is adding names to the exemption list. "
"There are 26 names which are potentially too long for OTel and that number should "
"only ever go down as these names are deprecated. If this test is failing, please "
"adjust your new stat's name; do not add as exemption without a very good reason."
)
@pytest.mark.parametrize(
"invalid_stat_combo",
[
*[
pytest.param(("prefix", name), id=f"Stat name {msg}.")
for (name, msg) in INVALID_STAT_NAME_CASES
],
*[
pytest.param((prefix, "name"), id=f"Stat prefix {msg}.")
for (prefix, msg) in INVALID_STAT_NAME_CASES
],
],
)
def test_invalid_stat_names_are_caught(self, invalid_stat_combo):
prefix = invalid_stat_combo[0]
name = invalid_stat_combo[1]
self.stats.prefix = prefix
with pytest.raises(InvalidStatsNameException):
self.stats.incr(name)
self.meter.assert_not_called()
def test_old_name_exception_works(self, caplog):
name = "task_instance_created_OperatorNameWhichIsSuperLongAndExceedsTheOpenTelemetryCharacterLimit/task_instance_created_OperatorNameWhichIsSuperLongAndExceedsTheOpenTelemetryCharacterLimit/task_instance_created_OperatorNameWhichIsSuperLongAndExceedsTheOpenTelemetryCharacterLimit"
assert len(name) > OTEL_NAME_MAX_LENGTH
with pytest.warns(MetricNameLengthExemptionWarning):
self.stats.incr(name)
self.meter.get_meter().create_counter.assert_called_once_with(
name=(full_name(name)[:OTEL_NAME_MAX_LENGTH])
)
def test_incr_new_metric(self, name):
self.stats.incr(name)
self.meter.get_meter().create_counter.assert_called_once_with(name=full_name(name))
def test_incr_new_metric_with_tags(self, name):
tags = {"hello": "world"}
key = _generate_key_name(full_name(name), tags)
self.stats.incr(name, tags=tags)
self.meter.get_meter().create_counter.assert_called_once_with(name=full_name(name))
self.map[key].add.assert_called_once_with(1, attributes=tags)
def test_incr_existing_metric(self, name):
# Create the metric and set value to 1
self.stats.incr(name)
# Increment value to 2
self.stats.incr(name)
assert self.map[full_name(name)].add.call_count == 2
self.meter.get_meter().create_counter.assert_called_once_with(name=full_name(name))
@mock.patch("random.random", side_effect=[0.1, 0.9])
def test_incr_with_rate_limit_works(self, mock_random, name):
# Create the counter and set the value to 1
self.stats.incr(name, rate=0.5)
# This one should not increment because random() will return a value higher than `rate`
self.stats.incr(name, rate=0.5)
# This one should raise an exception for a negative `rate` value
with pytest.raises(ValueError, match=RATE_MUST_BE_POSITIVE_MSG):
self.stats.incr(name, rate=-0.5)
assert mock_random.call_count == 2
assert self.map[full_name(name)].add.call_count == 1
def test_decr_existing_metric(self, name):
expected_calls = [
mock.call(1, attributes=None),
mock.call(-1, attributes=None),
]
# Create the metric and set value to 1
self.stats.incr(name)
# Decrement value to 0
self.stats.decr(name)
self.map[full_name(name)].add.assert_has_calls(expected_calls)
assert self.map[full_name(name)].add.call_count == len(expected_calls)
@mock.patch("random.random", side_effect=[0.1, 0.9])
def test_decr_with_rate_limit_works(self, mock_random, name):
expected_calls = [
mock.call(1, attributes=None),
mock.call(-1, attributes=None),
]
# Create the metric and set value to 1
self.stats.incr(name)
# Decrement the counter to 0
self.stats.decr(name, rate=0.5)
# This one should not decrement because random() will return a value higher than `rate`
self.stats.decr(name, rate=0.5)
# This one should raise an exception for a negative `rate` value
with pytest.raises(ValueError, match=RATE_MUST_BE_POSITIVE_MSG):
self.stats.decr(name, rate=-0.5)
assert mock_random.call_count == 2
# add() is called once in the initial stats.incr and once for the decr that passed the rate check.
self.map[full_name(name)].add.assert_has_calls(expected_calls)
self.map[full_name(name)].add.call_count == 2
def test_gauge_new_metric(self, name):
self.stats.gauge(name, value=1)
self.meter.get_meter().create_gauge.assert_called_once_with(name=full_name(name))
assert self.map[full_name(name)].value == 1
def test_gauge_new_metric_with_tags(self, name):
tags = {"hello": "world"}
key = _generate_key_name(full_name(name), tags)
self.stats.gauge(name, value=1, tags=tags)
self.meter.get_meter().create_gauge.assert_called_once_with(name=full_name(name))
self.map[key].attributes == tags
def test_gauge_existing_metric(self, name):
self.stats.gauge(name, value=1)
self.stats.gauge(name, value=2)
self.meter.get_meter().create_gauge.assert_called_once_with(name=full_name(name))
assert self.map[full_name(name)].value == 2
def test_gauge_existing_metric_with_delta(self, name):
self.stats.gauge(name, value=1)
self.stats.gauge(name, value=2, delta=True)
self.meter.get_meter().create_gauge.assert_called_once_with(name=full_name(name))
assert self.map[full_name(name)].value == 3
@mock.patch("random.random", side_effect=[0.1, 0.9])
@mock.patch.object(MetricsMap, "set_gauge_value")
def test_gauge_with_rate_limit_works(self, mock_set_value, mock_random, name):
# Create the gauge and set the value to 1
self.stats.gauge(name, value=1, rate=0.5)
# This one should not increment because random() will return a value higher than `rate`
self.stats.gauge(name, value=1, rate=0.5)
with pytest.raises(ValueError, match=RATE_MUST_BE_POSITIVE_MSG):
self.stats.gauge(name, value=1, rate=-0.5)
assert mock_random.call_count == 2
assert mock_set_value.call_count == 1
def test_gauge_value_is_correct(self, name):
self.stats.gauge(name, value=1)
assert self.map[full_name(name)].value == 1
def test_timing_new_metric(self, name):
import datetime
self.stats.timing(name, dt=datetime.timedelta(seconds=123))
self.meter.get_meter().create_gauge.assert_called_once_with(name=full_name(name))
expected_value = 123000.0
assert self.map[full_name(name)].value == expected_value
def test_timing_new_metric_with_tags(self, name):
tags = {"hello": "world"}
key = _generate_key_name(full_name(name), tags)
self.stats.timing(name, dt=1, tags=tags)
self.meter.get_meter().create_gauge.assert_called_once_with(name=full_name(name))
self.map[key].attributes == tags
def test_timing_existing_metric(self, name):
self.stats.timing(name, dt=1)
self.stats.timing(name, dt=2)
self.meter.get_meter().create_gauge.assert_called_once_with(name=full_name(name))
assert self.map[full_name(name)].value == 2
# For the four test_timer_foo tests below:
# time.perf_count() is called once to get the starting timestamp and again
# to get the end timestamp. timer() should return the difference as a float.
@mock.patch.object(time, "perf_counter", side_effect=[0.0, 3.14])
def test_timer_with_name_returns_float_and_stores_value(self, mock_time, name):
with self.stats.timer(name) as timer:
pass
assert isinstance(timer.duration, float)
expected_duration = 3140.0
assert timer.duration == expected_duration
assert mock_time.call_count == 2
self.meter.get_meter().create_gauge.assert_called_once_with(name=full_name(name))
@mock.patch.object(time, "perf_counter", side_effect=[0.0, 3.14])
def test_timer_no_name_returns_float_but_does_not_store_value(self, mock_time, name):
with self.stats.timer() as timer:
pass
assert isinstance(timer.duration, float)
expected_duration = 3140.0
assert timer.duration == expected_duration
assert mock_time.call_count == 2
self.meter.get_meter().create_gauge.assert_not_called()
@mock.patch.object(time, "perf_counter", side_effect=[0.0, 3.14])
def test_timer_start_and_stop_manually_send_false(self, mock_time, name):
timer = self.stats.timer(name)
timer.start()
# Perform some task
timer.stop(send=False)
assert isinstance(timer.duration, float)
expected_value = 3140.0
assert timer.duration == expected_value
assert mock_time.call_count == 2
self.meter.get_meter().create_gauge.assert_not_called()
@mock.patch.object(time, "perf_counter", side_effect=[0.0, 3.14])
def test_timer_start_and_stop_manually_send_true(self, mock_time, name):
timer = self.stats.timer(name)
timer.start()
# Perform some task
timer.stop(send=True)
assert isinstance(timer.duration, float)
expected_value = 3140.0
assert timer.duration == expected_value
assert mock_time.call_count == 2
self.meter.get_meter().create_gauge.assert_called_once_with(name=full_name(name))
| TestOtelMetrics |
python | miyuchina__mistletoe | test/test_block_token.py | {
"start": 24390,
"end": 24983
} | class ____(unittest.TestCase):
def test_store_footnote(self):
lines = ['[key 1]: value1\n',
'[key 2]: value2\n']
document = block_token.Document(lines)
self.assertEqual(document.footnotes['key 1'], ('value1', ''))
self.assertEqual(document.footnotes['key 2'], ('value2', ''))
def test_auto_splitlines(self):
lines = "some\ncontinual\nlines\n"
document = block_token.Document(lines)
self.assertIsInstance(document.children[0], block_token.Paragraph)
self.assertEqual(len(document.children), 1)
| TestDocument |
python | apache__airflow | airflow-core/tests/unit/ti_deps/deps/test_dag_unpaused_dep.py | {
"start": 1059,
"end": 1735
} | class ____:
def test_concurrency_reached(self, mock_is_dag_paused):
"""
Test paused DAG should fail dependency
"""
mock_is_dag_paused.return_value = True
task = mock.Mock()
ti = TaskInstance(task=task, dag_version_id=mock.MagicMock())
assert not DagUnpausedDep().is_met(ti=ti)
def test_all_conditions_met(self, mock_is_dag_paused):
"""
Test all conditions met should pass dep
"""
mock_is_dag_paused.return_value = False
task = mock.Mock()
ti = TaskInstance(task=task, dag_version_id=mock.MagicMock())
assert DagUnpausedDep().is_met(ti=ti)
| TestDagUnpausedDep |
python | apache__airflow | providers/common/sql/tests/unit/common/sql/operators/test_sql.py | {
"start": 28277,
"end": 32112
} | class ____:
def setup_method(self):
self.task_id = "test_task"
self.conn_id = "sql_default"
self._operator = SQLCheckOperator(task_id=self.task_id, conn_id=self.conn_id, sql="sql")
@pytest.mark.parametrize("database", [None, "test-db"])
def test_get_hook(self, database):
with mock.patch(
"airflow.providers.common.sql.operators.sql.BaseHook.get_connection",
return_value=Connection(conn_id="sql_default", conn_type="postgres"),
) as mock_get_conn:
if database:
self._operator.database = database
assert isinstance(self._operator._hook, PostgresHook)
mock_get_conn.assert_called_once_with(self.conn_id)
@skip_if_force_lowest_dependencies_marker
def test_not_allowed_conn_type(self):
with mock.patch(
"airflow.providers.common.sql.operators.sql.BaseHook.get_connection",
return_value=Connection(conn_id="sql_default", conn_type="postgres"),
) as mock_get_conn:
mock_get_conn.return_value = Connection(conn_id="sql_default", conn_type="airbyte")
with pytest.raises(AirflowException, match=r"You are trying to use `common-sql`"):
self._operator._hook
@skip_if_force_lowest_dependencies_marker
def test_sql_operator_hook_params_snowflake(self):
with mock.patch(
"airflow.providers.common.sql.operators.sql.BaseHook.get_connection",
return_value=Connection(conn_id="sql_default", conn_type="postgres"),
) as mock_get_conn:
mock_get_conn.return_value = Connection(conn_id="snowflake_default", conn_type="snowflake")
self._operator.hook_params = {
"warehouse": "warehouse",
"database": "database",
"role": "role",
"schema": "schema",
"log_sql": False,
}
assert self._operator._hook.conn_type == "snowflake"
assert self._operator._hook.warehouse == "warehouse"
assert self._operator._hook.database == "database"
assert self._operator._hook.role == "role"
assert self._operator._hook.schema == "schema"
assert not self._operator._hook.log_sql
@skip_if_force_lowest_dependencies_marker
def test_sql_operator_hook_params_biguery(self):
with mock.patch(
"airflow.providers.common.sql.operators.sql.BaseHook.get_connection",
return_value=Connection(conn_id="sql_default", conn_type="postgres"),
) as mock_get_conn:
mock_get_conn.return_value = Connection(
conn_id="google_cloud_bigquery_default", conn_type="gcpbigquery"
)
self._operator.hook_params = {"use_legacy_sql": True, "location": "us-east1"}
assert self._operator._hook.conn_type == "gcpbigquery"
assert self._operator._hook.use_legacy_sql
assert self._operator._hook.location == "us-east1"
@skip_if_force_lowest_dependencies_marker
def test_sql_operator_hook_params_templated(self):
with mock.patch(
"airflow.providers.common.sql.operators.sql.BaseHook.get_connection",
return_value=Connection(conn_id="sql_default", conn_type="postgres"),
) as mock_get_conn:
mock_get_conn.return_value = Connection(conn_id="snowflake_default", conn_type="snowflake")
self._operator.hook_params = {"session_parameters": {"query_tag": "{{ ds }}"}}
logical_date = "2024-04-02"
self._operator.render_template_fields({"ds": logical_date})
assert self._operator._hook.conn_type == "snowflake"
assert self._operator._hook.session_parameters == {"query_tag": logical_date}
| TestSQLCheckOperatorDbHook |
python | crytic__slither | slither/slithir/tmp_operations/tmp_new_contract.py | {
"start": 134,
"end": 947
} | class ____(OperationWithLValue):
def __init__(self, contract_name: str, lvalue: TemporaryVariable) -> None:
super().__init__()
self._contract_name = contract_name
self._lvalue = lvalue
self._call_value = None
self._call_salt = None
@property
def contract_name(self) -> str:
return self._contract_name
@property
def call_value(self):
return self._call_value
@call_value.setter
def call_value(self, v):
self._call_value = v
@property
def call_salt(self):
return self._call_salt
@call_salt.setter
def call_salt(self, s):
self._call_salt = s
@property
def read(self):
return []
def __str__(self):
return f"{self.lvalue} = new {self.contract_name}"
| TmpNewContract |
python | django__django | tests/model_forms/tests.py | {
"start": 121604,
"end": 126418
} | class ____(TestCase):
"""
Tests the functionality of ``limit_choices_to``.
"""
@classmethod
def setUpTestData(cls):
cls.threepwood = Character.objects.create(
username="threepwood",
last_action=datetime.datetime.today() + datetime.timedelta(days=1),
)
cls.marley = Character.objects.create(
username="marley",
last_action=datetime.datetime.today() - datetime.timedelta(days=1),
)
def test_limit_choices_to_callable_for_fk_rel(self):
"""
A ForeignKey can use limit_choices_to as a callable (#2554).
"""
stumpjokeform = StumpJokeForm()
self.assertSequenceEqual(
stumpjokeform.fields["most_recently_fooled"].queryset, [self.threepwood]
)
def test_limit_choices_to_callable_for_m2m_rel(self):
"""
A ManyToManyField can use limit_choices_to as a callable (#2554).
"""
stumpjokeform = StumpJokeForm()
self.assertSequenceEqual(
stumpjokeform.fields["most_recently_fooled"].queryset, [self.threepwood]
)
def test_custom_field_with_queryset_but_no_limit_choices_to(self):
"""
A custom field with a `queryset` attribute but no `limit_choices_to`
works (#23795).
"""
f = StumpJokeWithCustomFieldForm()
self.assertEqual(f.fields["custom"].queryset, 42)
def test_fields_for_model_applies_limit_choices_to(self):
fields = fields_for_model(StumpJoke, ["has_fooled_today"])
self.assertSequenceEqual(fields["has_fooled_today"].queryset, [self.threepwood])
def test_callable_called_each_time_form_is_instantiated(self):
field = StumpJokeForm.base_fields["most_recently_fooled"]
with mock.patch.object(field, "limit_choices_to") as today_callable_dict:
StumpJokeForm()
self.assertEqual(today_callable_dict.call_count, 1)
StumpJokeForm()
self.assertEqual(today_callable_dict.call_count, 2)
StumpJokeForm()
self.assertEqual(today_callable_dict.call_count, 3)
@isolate_apps("model_forms")
def test_limit_choices_to_no_duplicates(self):
joke1 = StumpJoke.objects.create(
funny=True,
most_recently_fooled=self.threepwood,
)
joke2 = StumpJoke.objects.create(
funny=True,
most_recently_fooled=self.threepwood,
)
joke3 = StumpJoke.objects.create(
funny=True,
most_recently_fooled=self.marley,
)
StumpJoke.objects.create(funny=False, most_recently_fooled=self.marley)
joke1.has_fooled_today.add(self.marley, self.threepwood)
joke2.has_fooled_today.add(self.marley)
joke3.has_fooled_today.add(self.marley, self.threepwood)
class CharacterDetails(models.Model):
character1 = models.ForeignKey(
Character,
models.CASCADE,
limit_choices_to=models.Q(
jokes__funny=True,
jokes_today__funny=True,
),
related_name="details_fk_1",
)
character2 = models.ForeignKey(
Character,
models.CASCADE,
limit_choices_to={
"jokes__funny": True,
"jokes_today__funny": True,
},
related_name="details_fk_2",
)
character3 = models.ManyToManyField(
Character,
limit_choices_to=models.Q(
jokes__funny=True,
jokes_today__funny=True,
),
related_name="details_m2m_1",
)
class CharacterDetailsForm(forms.ModelForm):
class Meta:
model = CharacterDetails
fields = "__all__"
form = CharacterDetailsForm()
self.assertCountEqual(
form.fields["character1"].queryset,
[self.marley, self.threepwood],
)
self.assertCountEqual(
form.fields["character2"].queryset,
[self.marley, self.threepwood],
)
self.assertCountEqual(
form.fields["character3"].queryset,
[self.marley, self.threepwood],
)
def test_limit_choices_to_m2m_through(self):
class DiceForm(forms.ModelForm):
class Meta:
model = Dice
fields = ["numbers"]
Number.objects.create(value=0)
n1 = Number.objects.create(value=1)
n2 = Number.objects.create(value=2)
form = DiceForm()
self.assertCountEqual(form.fields["numbers"].queryset, [n1, n2])
| LimitChoicesToTests |
python | run-llama__llama_index | llama-index-integrations/vector_stores/llama-index-vector-stores-vectorx/tests/test_vector_stores_vectorx.py | {
"start": 965,
"end": 3678
} | class ____(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.vecx_api_token = os.getenv("VECTORX_API_TOKEN")
if not cls.vecx_api_token:
raise ValueError(
"Missing VECTORX_API_TOKEN. Please set it in your environment."
)
cls.vx = VectorX(token=cls.vecx_api_token)
cls.encryption_key = cls.vx.generate_key()
timestamp = int(time.time())
cls.test_index_name = f"test_index_{timestamp}"
cls.dimension = 384
cls.space_type = "cosine"
cls.test_indexes = {cls.test_index_name}
cls.test_documents = [
Document(
text="Python is a high-level, interpreted programming language known for its readability and simplicity.",
metadata={
"category": "programming",
"language": "python",
"difficulty": "beginner",
},
),
Document(
text="Machine learning algorithms learn patterns from data to make predictions.",
metadata={
"category": "ai",
"field": "machine_learning",
"difficulty": "intermediate",
},
),
Document(
text="Deep learning uses neural networks with multiple layers for complex pattern recognition.",
metadata={
"category": "ai",
"field": "deep_learning",
"difficulty": "advanced",
},
),
]
@classmethod
def tearDownClass(cls):
for index_name in cls.test_indexes:
try:
cls.vx.delete_index(name=index_name)
except Exception as e:
if "not found" not in str(e).lower():
print(f"Error deleting test index {index_name}: {e}")
def tearDown(self):
try:
indexes = self.vx.list_indexes()
if isinstance(indexes, list):
for index in indexes:
if isinstance(index, dict) and "name" in index:
index_name = index["name"]
if index_name.startswith("test_index_"):
try:
self.vx.delete_index(name=index_name)
except Exception as e:
print(f"Error cleaning up test index {index_name}: {e}")
except Exception as e:
print(f"Error listing indexes for cleanup: {e}")
# ------------------ VectorX VectorStore Tests ------------------
| VectorXTestSetup |
python | OmkarPathak__pygorithm | pygorithm/data_structures/quadtree.py | {
"start": 2185,
"end": 23006
} | class ____(object):
"""
A quadtree is a sorting tool for two-dimensional space, most
commonly used to reduce the number of required collision
calculations in a two-dimensional scene. In this context,
the scene is stepped without collision detection, then a
quadtree is constructed from all of the boundaries
.. caution::
Just because a quad tree has split does not mean entities will be empty. Any
entities which overlay any of the lines of the split will be included in the
parent of the quadtree.
.. tip::
It is important to tweak bucket size and depth to the problem, but a common error
is too small a bucket size. It is typically not reasonable to have a bucket size
smaller than 16; A good starting point is 64, then modify as appropriate. Larger
buckets reduce the overhead of the quad tree which could easily exceed the improvement
from reduced collision checks. The max depth is typically just a sanity check since
depth greater than 4 or 5 would either indicate a badly performing quadtree (too
dense objects, use an r-tree or kd-tree) or a very large world (where an iterative
quadtree implementation would be appropriate).
:ivar bucket_size: maximum number objects per bucket (before :py:attr:`.max_depth`)
:type bucket_size: int
:ivar max_depth: maximum depth of the quadtree
:type max_depth: int
:ivar depth: the depth of this node (0 being the topmost)
:type depth: int
:ivar location: where this quad tree node is situated
:type location: :class:`pygorithm.geometry.rect2.Rect2`
:ivar entities: the entities in this quad tree and in NO OTHER related quad tree
:type entities: list of :class:`.QuadTreeEntity`
:ivar children: either None or the 4 :class:`.QuadTree` children of this node
:type children: None or list of :class:`.QuadTree`
"""
def __init__(self, bucket_size, max_depth, location, depth = 0, entities = None):
"""
Initialize a new quad tree.
.. warning::
Passing entities to this quadtree will NOT cause it to split automatically!
You must call :py:meth:`.think` for that. This allows for more predictable
performance per line.
:param bucket_size: the number of entities in this quadtree
:type bucket_size: int
:param max_depth: the maximum depth for automatic splitting
:type max_depth: int
:param location: where this quadtree is located
:type location: :class:`pygorithm.geometry.rect2.Rect2`
:param depth: the depth of this node
:type depth: int
:param entities: the entities to initialize this quadtree with
:type entities: list of :class:`.QuadTreeEntity` or None for empty list
"""
self.bucket_size = bucket_size
self.max_depth = max_depth
self.location = location
self.depth = depth
self.entities = entities if entities is not None else []
self.children = None
def think(self, recursive = False):
"""
Call :py:meth:`.split` if appropriate
Split this quad tree if it has not split already and it has more
entities than :py:attr:`.bucket_size` and :py:attr:`.depth` is
less than :py:attr:`.max_depth`.
If `recursive` is True, think is called on the :py:attr:`.children` with
recursive set to True after splitting.
:param recursive: if `think(True)` should be called on :py:attr:`.children` (if there are any)
:type recursive: bool
"""
if not self.children and self.depth < self.max_depth and len(self.entities) > self.bucket_size:
self.split()
if recursive:
if self.children:
for child in self.children:
child.think(True)
def split(self):
"""
Split this quadtree.
.. caution::
A call to split will always split the tree or raise an error. Use
:py:meth:`.think` if you want to ensure the quadtree is operating
efficiently.
.. caution::
This function will not respect :py:attr:`.bucket_size` or
:py:attr:`.max_depth`.
:raises ValueError: if :py:attr:`.children` is not empty
"""
if self.children:
raise ValueError("cannot split twice")
_cls = type(self)
def _cstr(r):
return _cls(self.bucket_size, self.max_depth, r, self.depth + 1)
_halfwidth = self.location.width / 2
_halfheight = self.location.height / 2
_x = self.location.mincorner.x
_y = self.location.mincorner.y
self.children = [
_cstr(rect2.Rect2(_halfwidth, _halfheight, vector2.Vector2(_x, _y))),
_cstr(rect2.Rect2(_halfwidth, _halfheight, vector2.Vector2(_x + _halfwidth, _y))),
_cstr(rect2.Rect2(_halfwidth, _halfheight, vector2.Vector2(_x + _halfwidth, _y + _halfheight))),
_cstr(rect2.Rect2(_halfwidth, _halfheight, vector2.Vector2(_x, _y + _halfheight))) ]
_newents = []
for ent in self.entities:
quad = self.get_quadrant(ent)
if quad < 0:
_newents.append(ent)
else:
self.children[quad].entities.append(ent)
self.entities = _newents
def get_quadrant(self, entity):
"""
Calculate the quadrant that the specified entity belongs to.
Touching a line is considered overlapping a line. Touching is
determined using :py:meth:`math.isclose`
Quadrants are:
- -1: None (it overlaps 2 or more quadrants)
- 0: Top-left
- 1: Top-right
- 2: Bottom-right
- 3: Bottom-left
.. caution::
This function does not verify the entity is contained in this quadtree.
This operation takes O(1) time.
:param entity: the entity to place
:type entity: :class:`.QuadTreeEntity`
:returns: quadrant
:rtype: int
"""
_aabb = entity.aabb
_halfwidth = self.location.width / 2
_halfheight = self.location.height / 2
_x = self.location.mincorner.x
_y = self.location.mincorner.y
if math.isclose(_aabb.mincorner.x, _x + _halfwidth):
return -1
if math.isclose(_aabb.mincorner.x + _aabb.width, _x + _halfwidth):
return -1
if math.isclose(_aabb.mincorner.y, _y + _halfheight):
return -1
if math.isclose(_aabb.mincorner.y + _aabb.height, _y + _halfheight):
return -1
_leftside_isleft = _aabb.mincorner.x < _x + _halfwidth
_rightside_isleft = _aabb.mincorner.x + _aabb.width < _x + _halfwidth
if _leftside_isleft != _rightside_isleft:
return -1
_topside_istop = _aabb.mincorner.y < _y + _halfheight
_botside_istop = _aabb.mincorner.y + _aabb.height < _y + _halfheight
if _topside_istop != _botside_istop:
return -1
_left = _leftside_isleft
_top = _topside_istop
if _left:
if _top:
return 0
else:
return 3
else:
if _top:
return 1
else:
return 2
def insert_and_think(self, entity):
"""
Insert the entity into this or the appropriate child.
This also acts as thinking (recursively). Using :py:meth:`.insert_and_think`
iteratively is slightly less efficient but has more predictable performance
than initializing with a large number of entities then thinking is slightly
faster but may hang. Both may exceed recursion depth if :py:attr:`.max_depth`
is too large.
:param entity: the entity to insert
:type entity: :class:`.QuadTreeEntity`
"""
if not self.children and len(self.entities) == self.bucket_size and self.depth < self.max_depth:
self.split()
quad = self.get_quadrant(entity) if self.children else -1
if quad < 0:
self.entities.append(entity)
else:
self.children[quad].insert_and_think(entity)
def retrieve_collidables(self, entity, predicate = None):
"""
Find all entities that could collide with the specified entity.
.. warning::
If entity is, itself, in the quadtree, it will be returned. The
predicate may be used to prevent this using your preferred equality
method.
The predicate takes 1 positional argument (the entity being considered)
and returns `False` if the entity should never be returned, even if it
might collide with the entity. It should return `True` otherwise.
:param entity: the entity to find collidables for
:type entity: :class:`.QuadTreeEntity`
:param predicate: the predicate
:type predicate: :class:`types.FunctionType` or None
:returns: potential collidables (never `None)
:rtype: list of :class:`.QuadTreeEntity`
"""
result = list(filter(predicate, self.entities))
quadrant = self.get_quadrant(entity) if self.children else -1
if quadrant >= 0:
result.extend(self.children[quadrant].retrieve_collidables(entity, predicate))
elif self.children:
for child in self.children:
touching, overlapping, alwaysNone = rect2.Rect2.find_intersection(entity.aabb, child.location, find_mtv=False)
if touching or overlapping:
result.extend(child.retrieve_collidables(entity, predicate))
return result
def _iter_helper(self, pred):
"""
Calls pred on each child and childs child, iteratively.
pred takes one positional argument (the child).
:param pred: function to call
:type pred: `types.FunctionType`
"""
_stack = deque()
_stack.append(self)
while _stack:
curr = _stack.pop()
if curr.children:
for child in curr.children:
_stack.append(child)
pred(curr)
def find_entities_per_depth(self):
"""
Calculate the number of nodes and entities at each depth level in this
quad tree. Only returns for depth levels at or equal to this node.
This is implemented iteratively. See :py:meth:`.__str__` for usage example.
:returns: dict of depth level to number of entities
:rtype: dict int: int
"""
container = { 'result': {} }
def handler(curr, container=container):
container['result'][curr.depth] = container['result'].get(curr.depth, 0) + len(curr.entities)
self._iter_helper(handler)
return container['result']
def find_nodes_per_depth(self):
"""
Calculate the number of nodes at each depth level.
This is implemented iteratively. See :py:meth:`.__str__` for usage example.
:returns: dict of depth level to number of nodes
:rtype: dict int: int
"""
nodes_per_depth = {}
self._iter_helper(lambda curr, d=nodes_per_depth: d.update({ (curr.depth, d.get(curr.depth, 0) + 1) }))
return nodes_per_depth
def sum_entities(self, entities_per_depth=None):
"""
Sum the number of entities in this quad tree and all lower quad trees.
If `entities_per_depth` is not None, that array is used to calculate the sum
of entities rather than traversing the tree. Either way, this is implemented
iteratively. See :py:meth:`.__str__` for usage example.
:param entities_per_depth: the result of :py:meth:`.find_entities_per_depth`
:type entities_per_depth: `dict int: (int, int)` or None
:returns: number of entities in this and child nodes
:rtype: int
"""
if entities_per_depth is not None:
return sum(entities_per_depth.values())
container = { 'result': 0 }
def handler(curr, container=container):
container['result'] += len(curr.entities)
self._iter_helper(handler)
return container['result']
def calculate_avg_ents_per_leaf(self):
"""
Calculate the average number of entities per leaf node on this and child
quad trees.
In the ideal case, the average entities per leaf is equal to the bucket size,
implying maximum efficiency. Note that, as always with averages, this might
be misleading if this tree has reached its max depth.
This is implemented iteratively. See :py:meth:`.__str__` for usage example.
:returns: average number of entities at each leaf node
:rtype: :class:`numbers.Number`
"""
container = { 'leafs': 0, 'total': 0 }
def handler(curr, container=container):
if not curr.children:
container['leafs'] += 1
container['total'] += len(curr.entities)
self._iter_helper(handler)
return container['total'] / container['leafs']
def calculate_weight_misplaced_ents(self, sum_entities=None):
"""
Calculate a rating for misplaced entities.
A misplaced entity is one that is not on a leaf node. That weight is multiplied
by 4*remaining maximum depth of that node, to indicate approximately how
many additional calculations are required.
The result is then divided by the total number of entities on this node (either
calculated using :py:meth:`.sum_entities` or provided) to get the approximate
cost of the misplaced nodes in comparison with the placed nodes. A value greater
than 1 implies a different tree type (such as r-tree or kd-tree) should probably be
used.
This is implemented iteratively. See :py:meth:`.__str__` for usage example.
:param sum_entities: the number of entities on this node
:type sum_entities: int or None
:returns: weight of misplaced entities
:rtype: :class:`numbers.Number`
"""
# this iteration requires more context than _iter_helper provides.
# we must keep track of parents as well in order to correctly update
# weights
nonleaf_to_max_child_depth_dict = {}
# stack will be (quadtree, list (of parents) or None)
_stack = deque()
_stack.append((self, None))
while _stack:
curr, parents = _stack.pop()
if parents:
for p in parents:
nonleaf_to_max_child_depth_dict[p] = max(nonleaf_to_max_child_depth_dict.get(p, 0), curr.depth)
if curr.children:
new_parents = list(parents) if parents else []
new_parents.append(curr)
for child in curr.children:
_stack.append((child, new_parents))
_weight = 0
for nonleaf, maxchilddepth in nonleaf_to_max_child_depth_dict.items():
_weight += len(nonleaf.entities) * 4 * (maxchilddepth - nonleaf.depth)
_sum = self.sum_entities() if sum_entities is None else sum_entities
return _weight / _sum
def __repr__(self):
"""
Create an unambiguous representation of this quad tree.
This is implemented iteratively.
Example:
.. code-block:: python
from pygorithm.geometry import (vector2, rect2)
from pygorithm.data_structures import quadtree
# create a tree with a up to 2 entities in a bucket that
# can have a depth of up to 5.
_tree = quadtree.QuadTree(1, 5, rect2.Rect2(100, 100))
# add a few entities to the tree
_tree.insert_and_think(quadtree.QuadTreeEntity(rect2.Rect2(2, 2, vector2.Vector2(5, 5))))
_tree.insert_and_think(quadtree.QuadTreeEntity(rect2.Rect2(2, 2, vector2.Vector2(95, 5))))
# prints quadtree(bucket_size=1, max_depth=5, location=rect2(width=100, height=100, mincorner=vector2(x=0, y=0)), depth=0, entities=[], children=[quadtree(bucket_size=1, max_depth=5, location=rect2(width=50.0, height=50.0, mincorner=vector2(x=0, y=0)), depth=1, entities=[quadtreeentity(aabb=rect2(width=2, height=2, mincorner=vector2(x=5, y=5)))], children=None), quadtree(bucket_size=1, max_depth=5, location=rect2(width=50.0, height=50.0, mincorner=vector2(x=50.0, y=0)), depth=1, entities=[quadtreeentity(aabb=rect2(width=2, height=2, mincorner=vector2(x=95, y=5)))], children=None), quadtree(bucket_size=1, max_depth=5, location=rect2(width=50.0, height=50.0, mincorner=vector2(x=50.0, y=50.0)), depth=1, entities=[], children=None), quadtree(bucket_size=1, max_depth=5, location=rect2(width=50.0, height=50.0, mincorner=vector2(x=0, y=50.0)), depth=1, entities=[], children=None)])
:returns: unambiguous, recursive representation of this quad tree
:rtype: string
"""
return "quadtree(bucket_size={}, max_depth={}, location={}, depth={}, entities={}, children={})".format(self.bucket_size, self.max_depth, repr(self.location), self.depth, self.entities, self.children)
def __str__(self):
"""
Create a human-readable representation of this quad tree
.. caution::
Because of the complexity of quadtrees it takes a fair amount of calculation to
produce something somewhat legible. All returned statistics have paired functions.
This uses only iterative algorithms to calculate statistics.
Example:
.. code-block:: python
from pygorithm.geometry import (vector2, rect2)
from pygorithm.data_structures import quadtree
# create a tree with a up to 2 entities in a bucket that
# can have a depth of up to 5.
_tree = quadtree.QuadTree(2, 5, rect2.Rect2(100, 100))
# add a few entities to the tree
_tree.insert_and_think(quadtree.QuadTreeEntity(rect2.Rect2(2, 2, vector2.Vector2(5, 5))))
_tree.insert_and_think(quadtree.QuadTreeEntity(rect2.Rect2(2, 2, vector2.Vector2(95, 5))))
# prints quadtree(at rect(100x100 at <0, 0>) with 0 entities here (2 in total); (nodes, entities) per depth: [ 0: (1, 0), 1: (4, 2) ] (allowed max depth: 5, actual: 1), avg ent/leaf: 0.5 (target 1), misplaced weight 0.0 (0 best, >1 bad)
print(_tree)
:returns: human-readable representation of this quad tree
:rtype: string
"""
nodes_per_depth = self.find_nodes_per_depth()
_ents_per_depth = self.find_entities_per_depth()
_nodes_ents_per_depth_str = "[ {} ]".format(', '.join("{}: ({}, {})".format(dep, nodes_per_depth[dep], _ents_per_depth[dep]) for dep in nodes_per_depth.keys()))
_sum = self.sum_entities(entities_per_depth=_ents_per_depth)
_max_depth = max(_ents_per_depth.keys())
_avg_ent_leaf = self.calculate_avg_ents_per_leaf()
_mispl_weight = self.calculate_weight_misplaced_ents(sum_entities=_sum)
return "quadtree(at {} with {} entities here ({} in total); (nodes, entities) per depth: {} (allowed max depth: {}, actual: {}), avg ent/leaf: {} (target {}), misplaced weight {} (0 best, >1 bad)".format(self.location, len(self.entities), _sum, _nodes_ents_per_depth_str, self.max_depth, _max_depth, _avg_ent_leaf, self.bucket_size, _mispl_weight)
@staticmethod
def get_code():
"""
Get the code for the QuadTree class
:returns: code for QuadTree
:rtype: string
"""
return inspect.getsource(QuadTree) | QuadTree |
python | numpy__numpy | benchmarks/benchmarks/bench_linalg.py | {
"start": 92,
"end": 1911
} | class ____(Benchmark):
def setup(self):
self.a = np.arange(60000.0).reshape(150, 400)
self.ac = self.a.copy()
self.at = self.a.T
self.atc = self.a.T.copy()
self.b = np.arange(240000.0).reshape(400, 600)
self.c = np.arange(600)
self.d = np.arange(400)
self.a3 = np.arange(480000.).reshape(60, 80, 100)
self.b3 = np.arange(192000.).reshape(80, 60, 40)
def time_dot_a_b(self):
np.dot(self.a, self.b)
def time_dot_d_dot_b_c(self):
np.dot(self.d, np.dot(self.b, self.c))
def time_dot_trans_a_at(self):
np.dot(self.a, self.at)
def time_dot_trans_a_atc(self):
np.dot(self.a, self.atc)
def time_dot_trans_at_a(self):
np.dot(self.at, self.a)
def time_dot_trans_atc_a(self):
np.dot(self.atc, self.a)
def time_einsum_i_ij_j(self):
np.einsum('i,ij,j', self.d, self.b, self.c)
def time_einsum_ij_jk_a_b(self):
np.einsum('ij,jk', self.a, self.b)
def time_einsum_ijk_jil_kl(self):
np.einsum('ijk,jil->kl', self.a3, self.b3)
def time_inner_trans_a_a(self):
np.inner(self.a, self.a)
def time_inner_trans_a_ac(self):
np.inner(self.a, self.ac)
def time_matmul_a_b(self):
np.matmul(self.a, self.b)
def time_matmul_d_matmul_b_c(self):
np.matmul(self.d, np.matmul(self.b, self.c))
def time_matmul_trans_a_at(self):
np.matmul(self.a, self.at)
def time_matmul_trans_a_atc(self):
np.matmul(self.a, self.atc)
def time_matmul_trans_at_a(self):
np.matmul(self.at, self.a)
def time_matmul_trans_atc_a(self):
np.matmul(self.atc, self.a)
def time_tensordot_a_b_axes_1_0_0_1(self):
np.tensordot(self.a3, self.b3, axes=([1, 0], [0, 1]))
| Eindot |
python | fastapi__sqlmodel | docs_src/tutorial/fastapi/relationships/tutorial001.py | {
"start": 1093,
"end": 1256
} | class ____(SQLModel):
name: Optional[str] = None
secret_name: Optional[str] = None
age: Optional[int] = None
team_id: Optional[int] = None
| HeroUpdate |
python | astropy__astropy | astropy/visualization/transform.py | {
"start": 118,
"end": 355
} | class ____:
"""
A transformation object.
This is used to construct transformations such as scaling, stretching, and
so on.
"""
def __add__(self, other):
return CompositeTransform(other, self)
| BaseTransform |
python | Unity-Technologies__ml-agents | ml-agents/mlagents/trainers/settings.py | {
"start": 2928,
"end": 3079
} | class ____(Enum):
CONSTANT = "constant"
LINEAR = "linear"
# TODO add support for lesson based scheduling
# LESSON = "lesson"
| ScheduleType |
python | google__jax | docs/autodidax.py | {
"start": 23525,
"end": 25723
} | class ____: pass
leaf = Leaf()
def tree_flatten(x: Any) -> tuple[list[Any], PyTreeDef]:
children_iter, treedef = _tree_flatten(x)
return list(children_iter), treedef
def _tree_flatten(x: Any) -> tuple[Iterable, PyTreeDef]:
node_type = node_types.get(type(x))
if node_type:
node_metadata, children = node_type.to_iterable(x)
children_flat, child_trees = unzip2(map(_tree_flatten, children))
flattened = it.chain.from_iterable(children_flat)
return flattened, PyTreeDef(node_type, node_metadata, tuple(child_trees))
else:
return [x], leaf
def tree_unflatten(treedef: PyTreeDef, xs: list[Any]) -> Any:
return _tree_unflatten(treedef, iter(xs))
def _tree_unflatten(treedef: PyTreeDef, xs: Iterator) -> Any:
if treedef is leaf:
return next(xs)
else:
children = (_tree_unflatten(t, xs) for t in treedef.child_treedefs)
return treedef.node_type.from_iterable(treedef.node_metadata, children)
# -
# With this pytree-handling `jvp` implementation, we can now handle arbitrary
# input and output containers. That'll come in handy with future transformations
# too!
# +
def f(x):
y = sin(x) * 2.
z = - y + x
return {'hi': z, 'there': [x, y]}
x, xdot = 3., 1.
y, ydot = jvp(f, (x,), (xdot,))
print(y)
print(ydot)
# -
# ### Vectorized batching with `vmap`
#
# First, a couple helper functions, one for producing mapped abstract values
# from unmapped ones (by removing an axis), and one for moving batch dimensions
# around:
# +
def mapped_aval(batch_dim, aval):
shape = list(aval.shape)
del shape[batch_dim]
return ShapedArray(tuple(shape), aval.dtype)
def move_batch_axis(axis_size, src, dst, x):
if src is not_mapped:
target_shape = list(np.shape(x))
target_shape.insert(dst, axis_size)
return broadcast(x, target_shape, [dst])
elif src == dst:
return x
else:
return moveaxis(x, src, dst)
def moveaxis(x, src: int, dst: int):
perm = [i for i in range(np.ndim(x)) if i != src]
perm.insert(dst, src)
return transpose(x, perm)
# -
# The `Tracer` for vectorized batching carries a batched value and an optional
# integer indicating which axis (if any) is the batch axis.
# +
from typing import Union
| Leaf |
python | sqlalchemy__sqlalchemy | lib/sqlalchemy/dialects/mysql/mariadbconnector.py | {
"start": 3842,
"end": 10356
} | class ____(MySQLDialect):
driver = "mariadbconnector"
supports_statement_cache = True
# set this to True at the module level to prevent the driver from running
# against a backend that server detects as MySQL. currently this appears to
# be unnecessary as MariaDB client libraries have always worked against
# MySQL databases. However, if this changes at some point, this can be
# adjusted, but PLEASE ADD A TEST in test/dialect/mysql/test_dialect.py if
# this change is made at some point to ensure the correct exception
# is raised at the correct point when running the driver against
# a MySQL backend.
# is_mariadb = True
supports_unicode_statements = True
encoding = "utf8mb4"
convert_unicode = True
supports_sane_rowcount = True
supports_sane_multi_rowcount = True
supports_native_decimal = True
default_paramstyle = "qmark"
execution_ctx_cls = MySQLExecutionContext_mariadbconnector
statement_compiler = MySQLCompiler_mariadbconnector
supports_server_side_cursors = True
colspecs = util.update_copy(
MySQLDialect.colspecs, {sqltypes.Uuid: _MariaDBUUID}
)
@util.memoized_property
def _dbapi_version(self) -> tuple[int, ...]:
if self.dbapi and hasattr(self.dbapi, "__version__"):
return tuple(
[
int(x)
for x in re.findall(
r"(\d+)(?:[-\.]?|$)", self.dbapi.__version__
)
]
)
else:
return (99, 99, 99)
def __init__(self, **kwargs: Any) -> None:
super().__init__(**kwargs)
self.paramstyle = "qmark"
if self.dbapi is not None:
if self._dbapi_version < mariadb_cpy_minimum_version:
raise NotImplementedError(
"The minimum required version for MariaDB "
"Connector/Python is %s"
% ".".join(str(x) for x in mariadb_cpy_minimum_version)
)
@classmethod
def import_dbapi(cls) -> DBAPIModule:
return __import__("mariadb")
def is_disconnect(
self,
e: DBAPIModule.Error,
connection: Optional[Union[PoolProxiedConnection, DBAPIConnection]],
cursor: Optional[DBAPICursor],
) -> bool:
if super().is_disconnect(e, connection, cursor):
return True
elif isinstance(e, self.loaded_dbapi.Error):
str_e = str(e).lower()
return "not connected" in str_e or "isn't valid" in str_e
else:
return False
def create_connect_args(self, url: URL) -> ConnectArgsType:
opts = url.translate_connect_args()
opts.update(url.query)
int_params = [
"connect_timeout",
"read_timeout",
"write_timeout",
"client_flag",
"port",
"pool_size",
]
bool_params = [
"local_infile",
"ssl_verify_cert",
"ssl",
"pool_reset_connection",
"compress",
]
for key in int_params:
util.coerce_kw_type(opts, key, int)
for key in bool_params:
util.coerce_kw_type(opts, key, bool)
# FOUND_ROWS must be set in CLIENT_FLAGS to enable
# supports_sane_rowcount.
client_flag = opts.get("client_flag", 0)
if self.dbapi is not None:
try:
CLIENT_FLAGS = __import__(
self.dbapi.__name__ + ".constants.CLIENT"
).constants.CLIENT
client_flag |= CLIENT_FLAGS.FOUND_ROWS
except (AttributeError, ImportError):
self.supports_sane_rowcount = False
opts["client_flag"] = client_flag
return [], opts
def _extract_error_code(self, exception: DBAPIModule.Error) -> int:
try:
rc: int = exception.errno
except:
rc = -1
return rc
def _detect_charset(self, connection: Connection) -> str:
return "utf8mb4"
def get_isolation_level_values(
self, dbapi_conn: DBAPIConnection
) -> Sequence[IsolationLevel]:
return (
"SERIALIZABLE",
"READ UNCOMMITTED",
"READ COMMITTED",
"REPEATABLE READ",
"AUTOCOMMIT",
)
def detect_autocommit_setting(self, dbapi_conn: DBAPIConnection) -> bool:
return bool(dbapi_conn.autocommit)
def set_isolation_level(
self, dbapi_connection: DBAPIConnection, level: IsolationLevel
) -> None:
if level == "AUTOCOMMIT":
dbapi_connection.autocommit = True
else:
dbapi_connection.autocommit = False
super().set_isolation_level(dbapi_connection, level)
def do_begin_twophase(self, connection: Connection, xid: Any) -> None:
connection.execute(
sql.text("XA BEGIN :xid").bindparams(
sql.bindparam("xid", xid, literal_execute=True)
)
)
def do_prepare_twophase(self, connection: Connection, xid: Any) -> None:
connection.execute(
sql.text("XA END :xid").bindparams(
sql.bindparam("xid", xid, literal_execute=True)
)
)
connection.execute(
sql.text("XA PREPARE :xid").bindparams(
sql.bindparam("xid", xid, literal_execute=True)
)
)
def do_rollback_twophase(
self,
connection: Connection,
xid: Any,
is_prepared: bool = True,
recover: bool = False,
) -> None:
if not is_prepared:
connection.execute(
sql.text("XA END :xid").bindparams(
sql.bindparam("xid", xid, literal_execute=True)
)
)
connection.execute(
sql.text("XA ROLLBACK :xid").bindparams(
sql.bindparam("xid", xid, literal_execute=True)
)
)
def do_commit_twophase(
self,
connection: Connection,
xid: Any,
is_prepared: bool = True,
recover: bool = False,
) -> None:
if not is_prepared:
self.do_prepare_twophase(connection, xid)
connection.execute(
sql.text("XA COMMIT :xid").bindparams(
sql.bindparam("xid", xid, literal_execute=True)
)
)
| MySQLDialect_mariadbconnector |
python | apache__airflow | providers/google/src/airflow/providers/google/cloud/operators/bigquery.py | {
"start": 19980,
"end": 26787
} | class ____(
_BigQueryDbHookMixin, SQLIntervalCheckOperator, _BigQueryOperatorsEncryptionConfigurationMixin
):
"""
Check that the values of metrics given as SQL expressions are within a tolerance of the older ones.
This method constructs a query like so ::
SELECT {metrics_threshold_dict_key} FROM {table}
WHERE {date_filter_column}=<date>
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:BigQueryIntervalCheckOperator`
:param table: the table name
:param days_back: number of days between ds and the ds we want to check
against. Defaults to 7 days
:param metrics_thresholds: a dictionary of ratios indexed by metrics, for
example 'COUNT(*)': 1.5 would require a 50 percent or less difference
between the current day, and the prior days_back.
:param use_legacy_sql: Whether to use legacy SQL (true)
or standard SQL (false).
:param encryption_configuration: (Optional) Custom encryption configuration (e.g., Cloud KMS keys).
.. code-block:: python
encryption_configuration = {
"kmsKeyName": "projects/PROJECT/locations/LOCATION/keyRings/KEY_RING/cryptoKeys/KEY",
}
:param gcp_conn_id: (Optional) The connection ID used to connect to Google Cloud.
:param location: The geographic location of the job. See details at:
https://cloud.google.com/bigquery/docs/locations#specifying_your_location
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
:param labels: a dictionary containing labels for the table, passed to BigQuery
:param deferrable: Run operator in the deferrable mode
:param poll_interval: (Deferrable mode only) polling period in seconds to check for the status of job.
Defaults to 4 seconds.
:param project_id: a string represents the BigQuery projectId
"""
template_fields: Sequence[str] = (
"table",
"gcp_conn_id",
"sql1",
"sql2",
"impersonation_chain",
"labels",
)
ui_color = BigQueryUIColors.CHECK.value
conn_id_field = "gcp_conn_id"
def __init__(
self,
*,
table: str,
metrics_thresholds: dict,
date_filter_column: str = "ds",
days_back: SupportsAbs[int] = -7,
gcp_conn_id: str = "google_cloud_default",
use_legacy_sql: bool = True,
location: str | None = None,
encryption_configuration: dict | None = None,
impersonation_chain: str | Sequence[str] | None = None,
labels: dict | None = None,
deferrable: bool = conf.getboolean("operators", "default_deferrable", fallback=False),
poll_interval: float = 4.0,
project_id: str = PROVIDE_PROJECT_ID,
**kwargs,
) -> None:
super().__init__(
table=table,
metrics_thresholds=metrics_thresholds,
date_filter_column=date_filter_column,
days_back=days_back,
**kwargs,
)
self.gcp_conn_id = gcp_conn_id
self.use_legacy_sql = use_legacy_sql
self.location = location
self.encryption_configuration = encryption_configuration
self.impersonation_chain = impersonation_chain
self.labels = labels
self.project_id = project_id
self.deferrable = deferrable
self.poll_interval = poll_interval
def _submit_job(
self,
hook: BigQueryHook,
sql: str,
job_id: str,
) -> BigQueryJob:
"""Submit a new job and get the job id for polling the status using Triggerer."""
configuration = {"query": {"query": sql, "useLegacySql": self.use_legacy_sql}}
self.include_encryption_configuration(configuration, "query")
return hook.insert_job(
configuration=configuration,
project_id=self.project_id or hook.project_id,
location=self.location,
job_id=job_id,
nowait=True,
)
def execute(self, context: Context):
if not self.deferrable:
super().execute(context)
else:
hook = BigQueryHook(gcp_conn_id=self.gcp_conn_id, impersonation_chain=self.impersonation_chain)
self.log.info("Using ratio formula: %s", self.ratio_formula)
if self.project_id is None:
self.project_id = hook.project_id
self.log.info("Executing SQL check: %s", self.sql1)
job_1 = self._submit_job(hook, sql=self.sql1, job_id="")
context["ti"].xcom_push(key="job_id", value=job_1.job_id)
self.log.info("Executing SQL check: %s", self.sql2)
job_2 = self._submit_job(hook, sql=self.sql2, job_id="")
self.defer(
timeout=self.execution_timeout,
trigger=BigQueryIntervalCheckTrigger(
conn_id=self.gcp_conn_id,
first_job_id=job_1.job_id,
second_job_id=job_2.job_id,
project_id=self.project_id,
table=self.table,
location=self.location or hook.location,
metrics_thresholds=self.metrics_thresholds,
date_filter_column=self.date_filter_column,
days_back=self.days_back,
ratio_formula=self.ratio_formula,
ignore_zero=self.ignore_zero,
poll_interval=self.poll_interval,
impersonation_chain=self.impersonation_chain,
),
method_name="execute_complete",
)
def execute_complete(self, context: Context, event: dict[str, Any]) -> None:
"""
Act as a callback for when the trigger fires.
This returns immediately. It relies on trigger to throw an exception,
otherwise it assumes execution was successful.
"""
if event["status"] == "error":
raise AirflowException(event["message"])
self.log.info(
"%s completed with response %s ",
self.task_id,
event["message"],
)
| BigQueryIntervalCheckOperator |
python | nedbat__coveragepy | tests/test_files.py | {
"start": 28336,
"end": 30081
} | class ____(CoverageTest):
"""Tests of `find_python_files`."""
def test_find_python_files(self) -> None:
self.make_file("sub/a.py")
self.make_file("sub/b.py")
self.make_file("sub/x.c") # nope: not .py
self.make_file("sub/ssub/__init__.py")
self.make_file("sub/ssub/s.py")
self.make_file("sub/ssub/~s.py") # nope: editor effluvia
self.make_file("sub/lab/exp.py") # nope: no __init__.py
self.make_file("sub/windows.pyw")
py_files = set(find_python_files("sub", include_namespace_packages=False))
self.assert_same_files(
py_files,
[
"sub/a.py",
"sub/b.py",
"sub/ssub/__init__.py",
"sub/ssub/s.py",
"sub/windows.pyw",
],
)
def test_find_python_files_include_namespace_packages(self) -> None:
self.make_file("sub/a.py")
self.make_file("sub/b.py")
self.make_file("sub/x.c") # nope: not .py
self.make_file("sub/ssub/__init__.py")
self.make_file("sub/ssub/s.py")
self.make_file("sub/ssub/~s.py") # nope: editor effluvia
self.make_file("sub/lab/exp.py")
self.make_file("sub/windows.pyw")
py_files = set(find_python_files("sub", include_namespace_packages=True))
self.assert_same_files(
py_files,
[
"sub/a.py",
"sub/b.py",
"sub/ssub/__init__.py",
"sub/ssub/s.py",
"sub/lab/exp.py",
"sub/windows.pyw",
],
)
@pytest.mark.skipif(not env.WINDOWS, reason="Only need to run Windows tests on Windows.")
| FindPythonFilesTest |
python | tiangolo__fastapi | tests/test_include_router_defaults_overrides.py | {
"start": 413,
"end": 492
} | class ____(JSONResponse):
media_type = "application/x-level-3"
| ResponseLevel3 |
python | mkdocs__mkdocs | mkdocs/structure/files.py | {
"start": 626,
"end": 1633
} | class ____(enum.Enum):
EXCLUDED = -3
"""The file is excluded and will not be processed."""
DRAFT = -2
"""The file is excluded from the final site, but will still be populated during `mkdocs serve`."""
NOT_IN_NAV = -1
"""The file is part of the site, but doesn't produce nav warnings."""
UNDEFINED = 0
"""Still needs to be computed based on the config. If the config doesn't kick in, acts the same as `included`."""
INCLUDED = 1
"""The file is part of the site. Documentation pages that are omitted from the nav will produce warnings."""
def all(self):
return True
def is_included(self):
return self.value > self.DRAFT.value
def is_excluded(self):
return self.value <= self.DRAFT.value
def is_in_serve(self):
return self.value >= self.DRAFT.value
def is_in_nav(self):
return self.value > self.NOT_IN_NAV.value
def is_not_in_nav(self):
return self.value <= self.NOT_IN_NAV.value
| InclusionLevel |
python | pydata__xarray | xarray/backends/scipy_.py | {
"start": 10788,
"end": 13995
} | class ____(BackendEntrypoint):
"""
Backend for netCDF files based on the scipy package.
It can open ".nc", ".cdf", and "nc..gz" files but will only be
selected as the default if the "netcdf4" and "h5netcdf" engines are
not available. It has the advantage that is is a lightweight engine
that has no system requirements (unlike netcdf4 and h5netcdf).
Additionally it can open gzip compressed (".gz") files.
For more information about the underlying library, visit:
https://docs.scipy.org/doc/scipy/reference/generated/scipy.io.netcdf_file.html
See Also
--------
backends.ScipyDataStore
backends.NetCDF4BackendEntrypoint
backends.H5netcdfBackendEntrypoint
"""
description = "Open netCDF files (.nc, .cdf and .nc.gz) using scipy in Xarray"
url = "https://docs.xarray.dev/en/stable/generated/xarray.backends.ScipyBackendEntrypoint.html"
def guess_can_open(
self,
filename_or_obj: T_PathFileOrDataStore,
) -> bool:
from xarray.core.utils import is_remote_uri
filename_or_obj = _normalize_filename_or_obj(filename_or_obj)
# scipy can only handle local files - check this before trying to read magic number
if isinstance(filename_or_obj, str) and is_remote_uri(filename_or_obj):
return False
magic_number = try_read_magic_number_from_file_or_path(filename_or_obj)
if magic_number is not None and magic_number.startswith(b"\x1f\x8b"):
with gzip.open(filename_or_obj) as f: # type: ignore[arg-type]
magic_number = try_read_magic_number_from_file_or_path(f)
if magic_number is not None:
return magic_number.startswith(b"CDF")
if isinstance(filename_or_obj, str | os.PathLike):
from pathlib import Path
suffix = "".join(Path(filename_or_obj).suffixes)
return suffix in {".nc", ".cdf", ".nc.gz"}
return False
def open_dataset(
self,
filename_or_obj: T_PathFileOrDataStore,
*,
mask_and_scale=True,
decode_times=True,
concat_characters=True,
decode_coords=True,
drop_variables: str | Iterable[str] | None = None,
use_cftime=None,
decode_timedelta=None,
mode="r",
format=None,
group=None,
mmap=None,
lock=None,
) -> Dataset:
filename_or_obj = _normalize_filename_or_obj(filename_or_obj)
store = ScipyDataStore(
filename_or_obj, mode=mode, format=format, group=group, mmap=mmap, lock=lock
)
store_entrypoint = StoreBackendEntrypoint()
with close_on_error(store):
ds = store_entrypoint.open_dataset(
store,
mask_and_scale=mask_and_scale,
decode_times=decode_times,
concat_characters=concat_characters,
decode_coords=decode_coords,
drop_variables=drop_variables,
use_cftime=use_cftime,
decode_timedelta=decode_timedelta,
)
return ds
BACKEND_ENTRYPOINTS["scipy"] = ("scipy", ScipyBackendEntrypoint)
| ScipyBackendEntrypoint |
python | pytorch__pytorch | torch/ao/nn/quantized/reference/modules/rnn.py | {
"start": 23654,
"end": 29848
} | class ____(RNNBase):
"""Reference Quantized GRU Module
We'll store weight_qparams for all the weights in _flat_weights, we need to pass in
a `weight_qparams_dict` that maps from weight name, e.g. weight_ih_l0,
to the weight_qparams for that weight
"""
def __init__(self, *args, **kwargs):
if "proj_size" in kwargs:
raise ValueError(
"proj_size argument is only supported for LSTM, not RNN or GRU"
)
super().__init__("GRU", *args, **kwargs)
def get_quantized_weight_bias_dict(self):
"""dictionary from flat_weight_name to quantized weight or (unquantized) bias
e.g.
{
"weight_ih_l0": quantized_weight,
"bias_ih_l0": unquantized_bias,
...
}
"""
quantized_weight_bias_dict = {}
for wn in self._flat_weights_names:
if hasattr(self, wn):
if wn.startswith("weight"):
weight_or_bias = get_quantized_weight(self, wn)
else:
weight_or_bias = getattr(self, wn)
else:
weight_or_bias = None
quantized_weight_bias_dict[wn] = weight_or_bias
return quantized_weight_bias_dict
def get_flat_weights(self):
flat_weights = []
for wn in self._flat_weights_names:
if hasattr(self, wn):
weight = getattr(self, wn)
if wn.startswith("weight"):
params = _get_weight_and_quantization_params(self, wn)
weight = _quantize_and_dequantize_weight(*params)
else:
weight = None
flat_weights.append(weight)
return flat_weights
def forward(self, input, hx=None): # noqa: F811
# Note: this is copied from the forward of GRU in https://github.com/pytorch/pytorch/blob/master/torch/nn/modules/rnn.py
# only changed self._flat_weights to self.get_flat_weights()
# TODO: maybe we can try inheriting from that class and define get_flat_weights
# as a @property? this might interfere with TorchScript, if we remove that
# requirement in the future we should be able to do this
orig_input = input
# xxx: isinstance check needs to be in conditional for TorchScript to compile
if isinstance(orig_input, PackedSequence):
input, batch_sizes, sorted_indices, unsorted_indices = input
max_batch_size = int(batch_sizes[0])
else:
batch_sizes = None
assert input.dim() in (
2,
3,
), (
f"GRU: Expected input to be 2-D or 3-D but received {input.dim()}-D tensor"
)
is_batched = input.dim() == 3
batch_dim = 0 if self.batch_first else 1
if not is_batched:
input = input.unsqueeze(batch_dim)
if hx is not None:
if hx.dim() != 2:
raise RuntimeError(
f"For unbatched 2-D input, hx should also be 2-D but got {hx.dim()}-D tensor"
)
hx = hx.unsqueeze(1)
else:
if hx is not None and hx.dim() != 3:
raise RuntimeError(
f"For batched 3-D input, hx should also be 3-D but got {hx.dim()}-D tensor"
)
max_batch_size = input.size(0) if self.batch_first else input.size(1)
sorted_indices = None
unsorted_indices = None
if hx is None:
num_directions = 2 if self.bidirectional else 1
hx = torch.zeros(
self.num_layers * num_directions,
max_batch_size,
self.hidden_size,
dtype=input.dtype,
device=input.device,
)
else:
# Each batch of the hidden state should match the input sequence that
# the user believes he/she is passing in.
hx = self.permute_hidden(hx, sorted_indices)
self.check_forward_args(input, hx, batch_sizes)
if batch_sizes is None:
result = _VF.gru(
input,
hx,
self.get_flat_weights(),
self.bias,
self.num_layers,
self.dropout,
self.training,
self.bidirectional,
self.batch_first,
)
else:
result = _VF.gru(
input,
batch_sizes,
hx,
self.get_flat_weights(),
self.bias,
self.num_layers,
self.dropout,
self.training,
self.bidirectional,
)
output = result[0]
hidden = result[1]
# xxx: isinstance check needs to be in conditional for TorchScript to compile
if isinstance(orig_input, PackedSequence):
output_packed = PackedSequence(
output,
# pyrefly: ignore [bad-argument-type]
batch_sizes,
sorted_indices,
unsorted_indices,
)
return output_packed, self.permute_hidden(hidden, unsorted_indices)
else:
if not is_batched: # type: ignore[possibly-undefined]
output = output.squeeze(batch_dim) # type: ignore[possibly-undefined]
hidden = hidden.squeeze(1)
return output, self.permute_hidden(hidden, unsorted_indices)
def _get_name(self):
return "QuantizedGRU(Reference)"
@classmethod
def from_float(cls, mod, weight_qparams_dict):
ref_mod = cls(
mod.input_size,
mod.hidden_size,
mod.num_layers,
mod.bias,
mod.batch_first,
mod.dropout,
mod.bidirectional,
weight_qparams_dict=weight_qparams_dict,
)
for wn in mod._flat_weights_names:
setattr(ref_mod, wn, getattr(mod, wn))
return ref_mod
| GRU |
python | ray-project__ray | rllib/utils/framework.py | {
"start": 7391,
"end": 8755
} | class ____:
def __init__(self, *a, **kw):
raise ImportError("Could not import `tensorflow`. Try pip install tensorflow.")
@DeveloperAPI
def tf_function(tf_module):
"""Conditional decorator for @tf.function.
Use @tf_function(tf) instead to avoid errors if tf is not installed."""
# The actual decorator to use (pass in `tf` (which could be None)).
def decorator(func):
# If tf not installed -> return function as is (won't be used anyways).
if tf_module is None or tf_module.executing_eagerly():
return func
# If tf installed, return @tf.function-decorated function.
return tf_module.function(func)
return decorator
@PublicAPI
def try_import_tfp(error: bool = False):
"""Tries importing tfp and returns the module (or None).
Args:
error: Whether to raise an error if tfp cannot be imported.
Returns:
The tfp module.
Raises:
ImportError: If error=True and tfp is not installed.
"""
if "RLLIB_TEST_NO_TF_IMPORT" in os.environ:
logger.warning("Not importing TensorFlow Probability for test purposes.")
return None
try:
import tensorflow_probability as tfp
return tfp
except ImportError as e:
if error:
raise e
return None
# Fake module for torch.nn.
| _FakeTfClassStub |
python | great-expectations__great_expectations | tests/integration/fluent/test_sql_datasources.py | {
"start": 23755,
"end": 38089
} | class ____:
@pytest.mark.parametrize(
"column_name",
[
# DDL: unquoted_lower_col ----------------------------------
param("unquoted_lower_col", id="str unquoted_lower_col"),
param("UNQUOTED_LOWER_COL", id="str UNQUOTED_LOWER_COL"),
# DDL: UNQUOTED_UPPER_COL ----------------------------------
param("unquoted_upper_col", id="str unquoted_upper_col"),
param("UNQUOTED_UPPER_COL", id="str UNQUOTED_UPPER_COL"),
# DDL: "quoted_lower_col"-----------------------------------
param("quoted_lower_col", id="str quoted_lower_col"),
param("QUOTED_LOWER_COL", id="str QUOTED_LOWER_COL"),
# DDL: "QUOTED_UPPER_COL" ----------------------------------
param("quoted_upper_col", id="str quoted_upper_col"),
param("QUOTED_UPPER_COL", id="str QUOTED_UPPER_COL"),
# DDL: "quotedMixed" -------------------------------------
param("quotedmixed", id="str quotedmixed"),
param("quotedMixed", id="str quotedMixed"),
param("QUOTEDMIXED", id="str QUOTEDMIXED"),
# DDL: "quoted.w.dots" -------------------------------------
param("quoted.w.dots", id="str quoted.w.dots"),
param("QUOTED.W.DOTS", id="str QUOTED.W.DOTS"),
],
)
def test_unquoted_params(
self,
context: EphemeralDataContext,
self_hosted_sql_datasources: SQLDatasource,
table_factory: TableFactory,
column_name: str | quoted_name,
request: pytest.FixtureRequest,
):
"""
Test column expectations when using unquoted column name parameters
(actual column may have DDL with quotes).
Test fails if the expectation fails regardless of dialect.
"""
param_id = request.node.callspec.id
datasource = self_hosted_sql_datasources
if column_name[0] in ("'", '"', "`"):
pytest.skip(f"see _desired_state tests for {column_name!r}")
elif _fails_expectation(param_id):
# apply marker this way so that xpasses can be seen in the report
request.applymarker(pytest.mark.xfail(run=False))
print(f"\ncolumn DDL:\n {COLUMN_DDL[column_name]}") # type: ignore[index] # FIXME
print(f"\n`column_name` parameter __repr__:\n {column_name!r}")
print(f"type:\n {type(column_name)}\n")
table_factory(
gx_engine=datasource.get_execution_engine(),
table_names={TEST_TABLE_NAME},
data=[
{
"id": 1,
"name": param_id,
"quoted_upper_col": "my column is uppercase",
"quoted_lower_col": "my column is lowercase",
"unquoted_upper_col": "whatever",
"unquoted_lower_col": "whatever",
"quoted_mixed_case": "Whatever",
"quoted_w_dots": "what.ever",
},
],
)
asset = datasource.add_table_asset("my_asset", table_name=TEST_TABLE_NAME)
print(f"asset:\n{asset!r}\n")
suite = context.suites.add(ExpectationSuite(name=f"{datasource.name}-{asset.name}"))
suite.add_expectation_configuration(
expectation_configuration=ExpectationConfiguration(
type="expect_column_values_to_match_regex",
kwargs={"column": column_name, "regex": r".*"},
)
)
suite.save()
batch_definition = asset.add_batch_definition_whole_table("my_batch_def")
validation_definition = context.validation_definitions.add(
ValidationDefinition(name="my_validation_def", suite=suite, data=batch_definition)
)
checkpoint = context.checkpoints.add(
checkpoint=Checkpoint(
name=f"{datasource.name}-{asset.name}_checkpoint",
validation_definitions=[validation_definition],
)
)
result = checkpoint.run()
_ = _get_exception_details(result, prettyprint=True)
assert result.success is True, "validation failed"
@pytest.mark.parametrize(
"column_name",
[
# DDL: unquoted_lower_col ----------------------------------
param('"unquoted_lower_col"', id='str "unquoted_lower_col"'),
# DDL: UNQUOTED_UPPER_COL ----------------------------------
param('"UNQUOTED_UPPER_COL"', id='str "UNQUOTED_UPPER_COL"'),
# DDL: "quoted_lower_col"-----------------------------------
param('"quoted_lower_col"', id='str "quoted_lower_col"'),
# DDL: "QUOTED_UPPER_COL" ----------------------------------
param('"QUOTED_UPPER_COL"', id='str "QUOTED_UPPER_COL"'),
# DDL: "quotedMixed" -------------------------------------
param('"quotedMixed"', id='str "quotedMixed"'),
# DDL: "quoted.w.dots" -------------------------------------
param('"quoted.w.dots"', id='str "quoted.w.dots"'),
],
)
def test_quoted_params(
self,
context: EphemeralDataContext,
self_hosted_sql_datasources: SQLDatasource,
table_factory: TableFactory,
column_name: str | quoted_name,
request: pytest.FixtureRequest,
):
"""
Test column expectations when using quoted column name parameters
(actual column may have DDL without quotes).
Test fails if the expectation fails regardless of dialect.
"""
param_id = request.node.callspec.id
datasource = self_hosted_sql_datasources
dialect = GXSqlDialect(datasource.get_engine().dialect.name)
if column_name[0] not in ("'", '"', "`"):
pytest.skip(f"see test_unquoted_params for {column_name!r}")
elif _is_quote_char_dialect_mismatch(dialect, column_name):
pytest.skip(f"quote char dialect mismatch: {column_name[0]}")
elif _fails_expectation(param_id):
# apply marker this way so that xpasses can be seen in the report
request.applymarker(pytest.mark.xfail(run=False))
schema: str | None = None
print(f"\ncolumn DDL:\n {COLUMN_DDL[column_name]}") # type: ignore[index] # FIXME
print(f"\n`column_name` parameter __repr__:\n {column_name!r}")
print(f"type:\n {type(column_name)}\n")
table_factory(
gx_engine=datasource.get_execution_engine(),
table_names={TEST_TABLE_NAME},
schema=schema,
data=[
{
"id": 1,
"name": param_id,
"quoted_upper_col": "my column is uppercase",
"quoted_lower_col": "my column is lowercase",
"unquoted_upper_col": "whatever",
"unquoted_lower_col": "whatever",
"quoted_mixed_case": "Whatever",
"quoted_w_dots": "what.ever",
},
],
)
asset = datasource.add_table_asset(
"my_asset", table_name=TEST_TABLE_NAME, schema_name=schema
)
print(f"asset:\n{asset!r}\n")
suite = context.suites.add(ExpectationSuite(name=f"{datasource.name}-{asset.name}"))
suite.add_expectation_configuration(
expectation_configuration=ExpectationConfiguration(
type="expect_column_values_to_match_regex",
kwargs={"column": column_name, "regex": r".*"},
)
)
suite.save()
batch_definition = asset.add_batch_definition_whole_table("my_batch_def")
validation_definition = context.validation_definitions.add(
ValidationDefinition(name="my_validation_def", suite=suite, data=batch_definition)
)
checkpoint = context.checkpoints.add(
checkpoint=Checkpoint(
name=f"{datasource.name}-{asset.name}_checkpoint",
validation_definitions=[validation_definition],
)
)
result = checkpoint.run()
assert result.success is True, "validation failed"
@pytest.mark.parametrize(
"column_name",
[
# DDL: unquoted_lower_col ----------------------------------
param("unquoted_lower_col", id="str unquoted_lower_col"),
param('"unquoted_lower_col"', id='str "unquoted_lower_col"'),
param("UNQUOTED_LOWER_COL", id="str UNQUOTED_LOWER_COL"),
param('"UNQUOTED_LOWER_COL"', id='str "UNQUOTED_LOWER_COL"'),
# DDL: UNQUOTED_UPPER_COL ----------------------------------
param("unquoted_upper_col", id="str unquoted_upper_col"),
param('"unquoted_upper_col"', id='str "unquoted_upper_col"'),
param("UNQUOTED_UPPER_COL", id="str UNQUOTED_UPPER_COL"),
param('"UNQUOTED_UPPER_COL"', id='str "UNQUOTED_UPPER_COL"'),
# DDL: "quoted_lower_col"-----------------------------------
param("quoted_lower_col", id="str quoted_lower_col"),
param('"quoted_lower_col"', id='str "quoted_lower_col"'),
param("QUOTED_LOWER_COL", id="str QUOTED_LOWER_COL"),
param('"QUOTED_LOWER_COL"', id='str "QUOTED_LOWER_COL"'),
# DDL: "QUOTED_UPPER_COL" ----------------------------------
param("quoted_upper_col", id="str quoted_upper_col"),
param('"quoted_upper_col"', id='str "quoted_upper_col"'),
param("QUOTED_UPPER_COL", id="str QUOTED_UPPER_COL"),
param('"QUOTED_UPPER_COL"', id='str "QUOTED_UPPER_COL"'),
# DDL: "quotedMixed" ---------------------------------------
param("quotedmixed", id="str quotedmixed"),
param("quotedMixed", id="str quotedMixed"),
param('"quotedMixed"', id='str "quotedMixed"'),
param("QUOTEDMIXED", id="str QUOTEDMIXED"),
# DDL: "quoted.w.dots" -------------------------------------
param("quoted.w.dots", id="str quoted.w.dots"),
param('"quoted.w.dots"', id='str "quoted.w.dots"'), # TODO: fix me
param("QUOTED.W.DOTS", id="str QUOTED.W.DOTS"),
param('"QUOTED.W.DOTS"', id='str "QUOTED.W.DOTS"'),
],
)
def test_desired_state(
self,
context: EphemeralDataContext,
self_hosted_sql_datasources: SQLDatasource,
table_factory: TableFactory,
column_name: str | quoted_name,
request: pytest.FixtureRequest,
):
"""
Perform a raw query to check if the column exists before running the expectation.
This is used to determine how the identifier behaves natively in the database and
therefore determine if the expectation should pass or fail.
An expectation is expected to succeed if the column 'exists' and fail if it does not.
If we want GX to behave the same way as each dialect/database,
these tests should be our guide.
However currently, GX does not behave the same way as the databases in all cases.
"""
param_id = request.node.callspec.id
datasource = self_hosted_sql_datasources
dialect = GXSqlDialect(datasource.get_engine().dialect.name)
original_column_name = column_name
if column_name.startswith('"') and column_name.endswith('"'):
# databricks uses backticks for quoting
column_name = quote_str(column_name[1:-1], dialect=dialect)
print(f"\ncolumn DDL:\n {COLUMN_DDL[original_column_name]}") # type: ignore[index] # FIXME
print(f"\n`column_name` parameter __repr__:\n {column_name!r}")
print(f"type:\n {type(column_name)}\n")
table_factory(
gx_engine=datasource.get_execution_engine(),
table_names={TEST_TABLE_NAME},
data=[
{
"id": 1,
"name": param_id,
"quoted_upper_col": "my column is uppercase",
"quoted_lower_col": "my column is lowercase",
"unquoted_upper_col": "whatever",
"unquoted_lower_col": "whatever",
"quoted_mixed_case": "Whatever",
"quoted_w_dots": "what.ever",
},
],
)
qualified_table_name: str = TEST_TABLE_NAME
# check the column exists so that we know what if the expectation should succeed or fail
column_exists = _raw_query_check_column_exists(
column_name,
qualified_table_name,
datasource.get_execution_engine(),
)
asset = datasource.add_table_asset("my_asset", table_name=TEST_TABLE_NAME)
print(f"asset:\n{asset!r}\n")
suite = context.suites.add(ExpectationSuite(name=f"{datasource.name}-{asset.name}"))
suite.add_expectation_configuration(
expectation_configuration=ExpectationConfiguration(
type="expect_column_values_to_match_regex",
kwargs={"column": column_name, "regex": r".*"},
)
)
suite.save()
batch_definition = asset.add_batch_definition_whole_table("my_batch_def")
validation_definition = context.validation_definitions.add(
ValidationDefinition(name="my_validation_def", suite=suite, data=batch_definition)
)
checkpoint = context.checkpoints.add(
checkpoint=Checkpoint(
name=f"{datasource.name}-{asset.name}_checkpoint",
validation_definitions=[validation_definition],
)
)
result = checkpoint.run()
try:
if column_exists:
assert result.success is True, "column exists but validation failed"
else:
assert result.success is False, "column does not exist but validation succeeded"
except AssertionError as ae:
reason = str(ae).splitlines()[0]
print(reason)
# xfail if the expectation doesn't behave as the dialect would
pytest.xfail(reason=reason)
if __name__ == "__main__":
pytest.main([__file__, "-vv", "-rXf"])
| TestColumnExpectations |
python | Textualize__textual | tests/command_palette/test_declare_sources.py | {
"start": 381,
"end": 594
} | class ____(Provider):
async def search(self, _: str) -> Hits:
def goes_nowhere_does_nothing() -> None:
pass
yield Hit(1, "Hit", goes_nowhere_does_nothing, "Hit")
| ExampleCommandSource |
python | django__django | tests/servers/tests.py | {
"start": 1188,
"end": 1598
} | class ____(ThreadedWSGIServer):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
# This event is set right after the first time a request closes its
# database connections.
self._connections_closed = threading.Event()
def _close_connections(self):
super()._close_connections()
self._connections_closed.set()
| CloseConnectionTestServer |
python | scrapy__scrapy | tests/test_engine.py | {
"start": 2882,
"end": 3070
} | class ____(MySpider):
custom_settings = {
"ITEM_PIPELINES": {
"tests.pipelines.ProcessWithZeroDivisionErrorPipeline": 300,
}
}
| ItemZeroDivisionErrorSpider |
python | spack__spack | lib/spack/spack/binary_distribution.py | {
"start": 38143,
"end": 39977
} | class ____(Uploader):
def __init__(
self,
mirror: spack.mirrors.mirror.Mirror,
force: bool,
update_index: bool,
base_image: Optional[str],
) -> None:
super().__init__(mirror, force, update_index)
self.target_image = spack.oci.oci.image_from_mirror(mirror)
self.base_image = ImageReference.from_string(base_image) if base_image else None
def push(
self, specs: List[spack.spec.Spec]
) -> Tuple[List[spack.spec.Spec], List[Tuple[spack.spec.Spec, BaseException]]]:
skipped, base_images, checksums, upload_errors = _oci_push(
target_image=self.target_image,
base_image=self.base_image,
installed_specs_with_deps=specs,
force=self.force,
tmpdir=self.tmpdir,
executor=self.executor,
)
self._base_images = base_images
self._checksums = checksums
# only update index if any binaries were uploaded
if self.update_index and len(skipped) + len(upload_errors) < len(specs):
_oci_update_index(self.target_image, self.tmpdir, self.executor)
return skipped, upload_errors
def tag(self, tag: str, roots: List[spack.spec.Spec]):
tagged_image = self.target_image.with_tag(tag)
# _push_oci may not populate self._base_images if binaries were already in the registry
for spec in roots:
_oci_update_base_images(
base_image=self.base_image,
target_image=self.target_image,
spec=spec,
base_image_cache=self._base_images,
)
_oci_put_manifest(
self._base_images, self._checksums, tagged_image, self.tmpdir, None, None, *roots
)
tty.info(f"Tagged {tagged_image}")
| OCIUploader |
python | huggingface__transformers | src/transformers/data/processors/squad.py | {
"start": 25360,
"end": 28133
} | class ____:
"""
Single squad example features to be fed to a model. Those features are model-specific and can be crafted from
[`~data.processors.squad.SquadExample`] using the
:method:*~transformers.data.processors.squad.squad_convert_examples_to_features* method.
Args:
input_ids: Indices of input sequence tokens in the vocabulary.
attention_mask: Mask to avoid performing attention on padding token indices.
token_type_ids: Segment token indices to indicate first and second portions of the inputs.
cls_index: the index of the CLS token.
p_mask: Mask identifying tokens that can be answers vs. tokens that cannot.
Mask with 1 for tokens than cannot be in the answer and 0 for token that can be in an answer
example_index: the index of the example
unique_id: The unique Feature identifier
paragraph_len: The length of the context
token_is_max_context:
List of booleans identifying which tokens have their maximum context in this feature object. If a token
does not have their maximum context in this feature object, it means that another feature object has more
information related to that token and should be prioritized over this feature for that token.
tokens: list of tokens corresponding to the input ids
token_to_orig_map: mapping between the tokens and the original text, needed in order to identify the answer.
start_position: start of the answer token index
end_position: end of the answer token index
encoding: optionally store the BatchEncoding with the fast-tokenizer alignment methods.
"""
def __init__(
self,
input_ids,
attention_mask,
token_type_ids,
cls_index,
p_mask,
example_index,
unique_id,
paragraph_len,
token_is_max_context,
tokens,
token_to_orig_map,
start_position,
end_position,
is_impossible,
qas_id: str | None = None,
encoding: BatchEncoding | None = None,
):
self.input_ids = input_ids
self.attention_mask = attention_mask
self.token_type_ids = token_type_ids
self.cls_index = cls_index
self.p_mask = p_mask
self.example_index = example_index
self.unique_id = unique_id
self.paragraph_len = paragraph_len
self.token_is_max_context = token_is_max_context
self.tokens = tokens
self.token_to_orig_map = token_to_orig_map
self.start_position = start_position
self.end_position = end_position
self.is_impossible = is_impossible
self.qas_id = qas_id
self.encoding = encoding
| SquadFeatures |
python | apache__airflow | providers/google/src/airflow/providers/google/cloud/operators/dataproc.py | {
"start": 55142,
"end": 56374
} | class ____(_DataprocStartStopClusterBaseOperator):
"""Start a cluster in a project."""
operator_extra_links = (DataprocClusterLink(),)
def execute(self, context: Context) -> dict | None:
self.log.info("Starting the cluster: %s", self.cluster_name)
cluster = super().execute(context)
DataprocClusterLink.persist(
context=context,
cluster_id=self.cluster_name,
project_id=self._get_project_id(),
region=self.region,
)
self.log.info("Cluster started")
return cluster
def _check_desired_cluster_state(self, cluster: Cluster) -> tuple[bool, str | None]:
if cluster.status.state == cluster.status.State.RUNNING:
return True, f'The cluster "{self.cluster_name}" already running!'
return False, None
def _get_operation(self) -> operation.Operation:
return self.hook.start_cluster(
region=self.region,
project_id=self._get_project_id(),
cluster_name=self.cluster_name,
cluster_uuid=self.cluster_uuid,
retry=self.retry,
timeout=self.timeout,
metadata=self.metadata,
)
| DataprocStartClusterOperator |
python | charliermarsh__ruff | crates/ruff_linter/resources/test/fixtures/flake8_annotations/annotation_presence.py | {
"start": 2925,
"end": 2993
} | class ____:
def __init__(self):
print(f"{self.attr=}")
| Class |
python | pytoolz__toolz | toolz/tests/test_dicttoolz.py | {
"start": 8048,
"end": 9037
} | class ____(TestDict):
"""Test CustomMapping as input and factory
Class attributes:
D: callable that inputs a dict and creates or returns a MutableMapping
kw: kwargs dict to specify "factory" keyword (if applicable)
"""
D = CustomMapping
kw = {'factory': lambda: CustomMapping()}
def test_environ():
# See: https://github.com/pytoolz/cytoolz/issues/127
assert keymap(identity, os.environ) == os.environ
assert valmap(identity, os.environ) == os.environ
assert itemmap(identity, os.environ) == os.environ
def test_merge_with_non_dict_mappings():
class Foo(Mapping):
def __init__(self, d):
self.d = d
def __iter__(self):
return iter(self.d)
def __getitem__(self, key):
return self.d[key]
def __len__(self):
return len(self.d)
d = Foo({1: 1})
assert merge(d) is d or merge(d) == {1: 1}
assert merge_with(sum, d) == {1: 1}
| TestCustomMapping |
python | walkccc__LeetCode | solutions/1058. Minimize Rounding Error to Meet Target/1058.py | {
"start": 0,
"end": 773
} | class ____:
def minimizeError(self, prices: list[str], target: int) -> str:
# A[i] := (costCeil - costFloor, costCeil, costFloor)
# The lower the costCeil - costFloor is, the cheaper to ceil it.
A = []
sumFloored = 0
sumCeiled = 0
for price in map(float, prices):
floored = math.floor(price)
ceiled = math.ceil(price)
sumFloored += floored
sumCeiled += ceiled
costFloor = price - floored
costCeil = ceiled - price
A.append((costCeil - costFloor, costCeil, costFloor))
if not sumFloored <= target <= sumCeiled:
return '-1'
A.sort()
nCeiled = target - sumFloored
return '{:.3f}'.format(sum(a[1] for a in A[:nCeiled]) +
sum(a[2] for a in A[nCeiled:]))
| Solution |
python | conda__conda | conda/core/path_actions.py | {
"start": 17805,
"end": 20955
} | class ____(LinkPathAction):
def __init__(
self,
transaction_context,
package_info,
extracted_package_dir,
source_short_path,
target_prefix,
target_short_path,
link_type,
prefix_placeholder,
file_mode,
source_path_data,
):
# This link_type used in execute(). Make sure we always respect LinkType.copy request.
link_type = LinkType.copy if link_type == LinkType.copy else LinkType.hardlink
super().__init__(
transaction_context,
package_info,
extracted_package_dir,
source_short_path,
target_prefix,
target_short_path,
link_type,
source_path_data,
)
self.prefix_placeholder = prefix_placeholder
self.file_mode = file_mode
self.intermediate_path = None
def verify(self):
validation_error = super().verify()
if validation_error:
return validation_error
if islink(self.source_full_path):
log.log(
TRACE,
"ignoring prefix update for symlink with source path %s",
self.source_full_path,
)
# return
raise RuntimeError(
f"Ignoring prefix update for symlink with source path {self.source_full_path}"
)
mkdir_p(self.transaction_context["temp_dir"])
self.intermediate_path = join(
self.transaction_context["temp_dir"], str(uuid4())
)
log.log(
TRACE, "copying %s => %s", self.source_full_path, self.intermediate_path
)
create_link(self.source_full_path, self.intermediate_path, LinkType.copy)
make_writable(self.intermediate_path)
try:
log.log(TRACE, "rewriting prefixes in %s", self.target_full_path)
update_prefix(
self.intermediate_path,
context.target_prefix_override or self.target_prefix,
self.prefix_placeholder,
self.file_mode,
subdir=self.package_info.repodata_record.subdir,
)
except _PaddingError:
raise PaddingError(
self.target_full_path,
self.prefix_placeholder,
len(self.prefix_placeholder),
)
sha256_in_prefix = compute_sum(self.intermediate_path, "sha256")
self.prefix_path_data = PathDataV1.from_objects(
self.prefix_path_data,
file_mode=self.file_mode,
path_type=PathType.hardlink,
prefix_placeholder=self.prefix_placeholder,
sha256_in_prefix=sha256_in_prefix,
)
self._verified = True
def execute(self):
if not self._verified:
self.verify()
source_path = self.intermediate_path or self.source_full_path
log.log(TRACE, "linking %s => %s", source_path, self.target_full_path)
create_link(source_path, self.target_full_path, self.link_type)
self._execute_successful = True
| PrefixReplaceLinkAction |
python | tensorflow__tensorflow | tensorflow/python/distribute/failure_handling/failure_handling.py | {
"start": 8307,
"end": 8940
} | class ____(TerminationConfig):
"""Configurations for GCP GPU VM."""
def __init__( # pylint: disable=super-init-not-called
self,
termination_watcher_fn=None,
exit_fn=None,
grace_period=None,
save_fn=None,
):
self.termination_watcher_fn = (
termination_watcher_fn
or failure_handling_util.termination_watcher_function_gce
)
self.exit_fn = exit_fn or failure_handling_util.gce_exit_fn
self.grace_period = (
grace_period if grace_period or grace_period == 0 else
failure_handling_util.GRACE_PERIOD_GCE)
self.save_fn = save_fn
| GcpGpuTerminationConfig |
python | astropy__astropy | astropy/io/votable/converters.py | {
"start": 3307,
"end": 7522
} | class ____:
"""
The base class for all converters. Each subclass handles
converting a specific VOTABLE data type to/from the TABLEDATA_ and
BINARY_ on-disk representations.
Parameters
----------
field : `~astropy.io.votable.tree.Field`
object describing the datatype
config : dict
The parser configuration dictionary
pos : tuple
The position in the XML file where the FIELD object was
found. Used for error messages.
"""
def __init__(self, field, config=None, pos=None):
pass
@staticmethod
def _parse_length(read):
return struct.unpack(">I", read(4))[0]
@staticmethod
def _write_length(length):
return struct.pack(">I", int(length))
def supports_empty_values(self, config):
"""
Returns True when the field can be completely empty.
"""
return config.get("version_1_3_or_later")
def parse(self, value, config=None, pos=None):
"""
Convert the string *value* from the TABLEDATA_ format into an
object with the correct native in-memory datatype and mask flag.
Parameters
----------
value : str
value in TABLEDATA format
Returns
-------
native : tuple
A two-element tuple of: value, mask.
The value as a Numpy array or scalar, and *mask* is True
if the value is missing.
"""
raise NotImplementedError("This datatype must implement a 'parse' method.")
def parse_scalar(self, value, config=None, pos=None):
"""
Parse a single scalar of the underlying type of the converter.
For non-array converters, this is equivalent to parse. For
array converters, this is used to parse a single
element of the array.
Parameters
----------
value : str
value in TABLEDATA format
Returns
-------
native : (2,) tuple
(value, mask)
The value as a Numpy array or scalar, and *mask* is True
if the value is missing.
"""
return self.parse(value, config, pos)
def output(self, value, mask):
"""
Convert the object *value* (in the native in-memory datatype)
to a unicode string suitable for serializing in the TABLEDATA_
format.
Parameters
----------
value
The value, the native type corresponding to this converter
mask : bool
If `True`, will return the string representation of a
masked value.
Returns
-------
tabledata_repr : unicode
"""
raise NotImplementedError("This datatype must implement a 'output' method.")
def binparse(self, read):
"""
Reads some number of bytes from the BINARY_ format
representation by calling the function *read*, and returns the
native in-memory object representation for the datatype
handled by *self*.
Parameters
----------
read : function
A function that given a number of bytes, returns a byte
string.
Returns
-------
native : (2,) tuple
(value, mask). The value as a Numpy array or scalar, and *mask* is
True if the value is missing.
"""
raise NotImplementedError("This datatype must implement a 'binparse' method.")
def binoutput(self, value, mask):
"""
Convert the object *value* in the native in-memory datatype to
a string of bytes suitable for serialization in the BINARY_
format.
Parameters
----------
value
The value, the native type corresponding to this converter
mask : bool
If `True`, will return the string representation of a
masked value.
Returns
-------
bytes : bytes
The binary representation of the value, suitable for
serialization in the BINARY_ format.
"""
raise NotImplementedError("This datatype must implement a 'binoutput' method.")
| Converter |
python | geekcomputers__Python | venv/Lib/site-packages/pip/_internal/resolution/resolvelib/candidates.py | {
"start": 18562,
"end": 19823
} | class ____(Candidate):
is_installed = False
source_link = None
def __init__(self, py_version_info: Optional[Tuple[int, ...]]) -> None:
if py_version_info is not None:
version_info = normalize_version_info(py_version_info)
else:
version_info = sys.version_info[:3]
self._version = Version(".".join(str(c) for c in version_info))
# We don't need to implement __eq__() and __ne__() since there is always
# only one RequiresPythonCandidate in a resolution, i.e. the host Python.
# The built-in object.__eq__() and object.__ne__() do exactly what we want.
def __str__(self) -> str:
return f"Python {self._version}"
@property
def project_name(self) -> NormalizedName:
return REQUIRES_PYTHON_IDENTIFIER
@property
def name(self) -> str:
return REQUIRES_PYTHON_IDENTIFIER
@property
def version(self) -> Version:
return self._version
def format_for_error(self) -> str:
return f"Python {self.version}"
def iter_dependencies(self, with_requires: bool) -> Iterable[Optional[Requirement]]:
return ()
def get_install_requirement(self) -> Optional[InstallRequirement]:
return None
| RequiresPythonCandidate |
python | vyperlang__vyper | vyper/codegen/memory_allocator.py | {
"start": 1003,
"end": 5437
} | class ____:
"""
Low-level memory allocator. Used to allocate and de-allocate memory slots.
This object should not be accessed directly. Memory allocation happens via
declaring variables within `Context`.
"""
next_mem: int
_ALLOCATION_LIMIT: int = 2**64
def __init__(self, start_position: int = MemoryPositions.RESERVED_MEMORY):
"""
Initializer.
Arguments
---------
start_position : int, optional
The initial offset to use as the free memory pointer. Offsets
prior to this value are considered permanently allocated.
"""
self.next_mem = start_position
self.size_of_mem = start_position
self.deallocated_mem: List[FreeMemory] = []
# Get the next unused memory location
def get_next_memory_position(self) -> int:
return self.next_mem
def allocate_memory(self, size: int) -> int:
"""
Allocate `size` bytes in memory.
*** No guarantees are made that allocated memory is clean! ***
If no memory was previously de-allocated, memory is expanded
and the free memory pointer is increased.
If sufficient space is available within de-allocated memory, the lowest
available offset is returned and that memory is now marked as allocated.
Arguments
---------
size : int
The number of bytes to allocate. Must be divisible by 32.
Returns
-------
int
Start offset of the newly allocated memory.
"""
if size % 32 != 0: # pragma: nocover
raise CompilerPanic(f"tried to allocate {size} bytes, only multiples of 32 supported.")
if size < 0: # pragma: nocover
# sanity check
raise CompilerPanic(f"tried to allocate {size} bytes")
# check for deallocated memory prior to expanding
for i, free_memory in enumerate(self.deallocated_mem):
if free_memory.size == size:
del self.deallocated_mem[i]
return free_memory.position
if free_memory.size > size:
return free_memory.partially_allocate(size)
# if no deallocated slots are available, expand memory
return self._expand_memory(size)
def _expand_memory(self, size: int) -> int:
"""
Allocate `size` bytes in memory, starting from the free memory pointer.
"""
if size % 32 != 0:
raise CompilerPanic("Memory misaligment, only multiples of 32 supported.")
before_value = self.next_mem
self.next_mem += size
self.size_of_mem = max(self.size_of_mem, self.next_mem)
if self.size_of_mem >= self._ALLOCATION_LIMIT:
# this should not be caught
raise MemoryAllocationException(
f"Tried to allocate {self.size_of_mem} bytes! "
f"(limit is {self._ALLOCATION_LIMIT} (2**64) bytes)"
)
return before_value
def deallocate_memory(self, pos: int, size: int) -> None:
"""
De-allocate memory.
Arguments
---------
pos : int
The initial memory position to de-allocate.
size : int
The number of bytes to de-allocate. Must be divisible by 32.
"""
if size % 32 != 0:
raise CompilerPanic("Memory misaligment, only multiples of 32 supported.")
self.deallocated_mem.append(FreeMemory(position=pos, size=size))
self.deallocated_mem.sort(key=lambda k: k.position)
if not self.deallocated_mem:
return
# iterate over deallocated memory and merge slots where possible
i = 1
active = self.deallocated_mem[0]
while len(self.deallocated_mem) > i:
next_slot = self.deallocated_mem[i]
if next_slot.position == active.position + active.size:
active.size += next_slot.size
self.deallocated_mem.remove(next_slot)
else:
active = next_slot
i += 1
# if the highest free memory slot ends at the edge of the
# allocated memory, reduce the free memory pointer
last = self.deallocated_mem[-1]
if last.position + last.size == self.next_mem:
self.next_mem = last.position
del self.deallocated_mem[-1]
| MemoryAllocator |
python | readthedocs__readthedocs.org | readthedocs/projects/views/private.py | {
"start": 30188,
"end": 30444
} | class ____(DomainMixin, ListViewWithForm):
def get_context_data(self, **kwargs):
ctx = super().get_context_data(**kwargs)
# Get the default docs domain
ctx["default_domain"] = settings.PUBLIC_DOMAIN
return ctx
| DomainList |
python | fluentpython__example-code-2e | 18-with-match/lispy/original/lis.py | {
"start": 460,
"end": 4234
} | class ____(object):
"A user-defined Scheme procedure."
def __init__(self, parms, body, env):
self.parms, self.body, self.env = parms, body, env
def __call__(self, *args):
env = Environment(dict(zip(self.parms, args)), self.env)
return eval(self.body, env)
################ Global Environment
def standard_env():
"An environment with some Scheme standard procedures."
env = {}
env.update(vars(math)) # sin, cos, sqrt, pi, ...
env.update({
'+':op.add, '-':op.sub, '*':op.mul, '/':op.truediv,
'>':op.gt, '<':op.lt, '>=':op.ge, '<=':op.le, '=':op.eq,
'abs': abs,
'append': op.add,
'apply': lambda proc, args: proc(*args),
'begin': lambda *x: x[-1],
'car': lambda x: x[0],
'cdr': lambda x: x[1:],
'cons': lambda x,y: [x] + y,
'eq?': op.is_,
'equal?': op.eq,
'length': len,
'list': lambda *x: list(x),
'list?': lambda x: isinstance(x,list),
'map': lambda *args: list(map(*args)),
'max': max,
'min': min,
'not': op.not_,
'null?': lambda x: x == [],
'number?': lambda x: isinstance(x, Number),
'procedure?': callable,
'round': round,
'symbol?': lambda x: isinstance(x, Symbol),
})
return env
global_env = standard_env()
################ Parsing: parse, tokenize, and read_from_tokens
def parse(program):
"Read a Scheme expression from a string."
return read_from_tokens(tokenize(program))
def tokenize(s):
"Convert a string into a list of tokens."
return s.replace('(',' ( ').replace(')',' ) ').split()
def read_from_tokens(tokens):
"Read an expression from a sequence of tokens."
if len(tokens) == 0:
raise SyntaxError('unexpected EOF while reading')
token = tokens.pop(0)
if '(' == token:
L = []
while tokens[0] != ')':
L.append(read_from_tokens(tokens))
tokens.pop(0) # pop off ')'
return L
elif ')' == token:
raise SyntaxError('unexpected )')
else:
return atom(token)
def atom(token):
"Numbers become numbers; every other token is a symbol."
try: return int(token)
except ValueError:
try: return float(token)
except ValueError:
return Symbol(token)
################ Interaction: A REPL
def repl(prompt='lis.py> '):
"A prompt-read-eval-print loop."
while True:
val = eval(parse(input(prompt)))
if val is not None:
print(lispstr(val))
def lispstr(exp):
"Convert a Python object back into a Lisp-readable string."
if isinstance(exp, List):
return '(' + ' '.join(map(lispstr, exp)) + ')'
else:
return str(exp)
################ eval
def eval(x, env=global_env):
"Evaluate an expression in an environment."
if isinstance(x, Symbol): # variable reference
return env[x]
elif not isinstance(x, List): # constant literal
return x
elif x[0] == 'quote': # (quote exp)
(_, exp) = x
return exp
elif x[0] == 'if': # (if test conseq alt)
(_, test, conseq, alt) = x
exp = (conseq if eval(test, env) else alt)
return eval(exp, env)
elif x[0] == 'define': # (define var exp)
(_, var, exp) = x
env[var] = eval(exp, env)
elif x[0] == 'lambda': # (lambda (var...) body)
(_, parms, body) = x
return Procedure(parms, body, env)
else: # (proc arg...)
proc = eval(x[0], env)
args = [eval(exp, env) for exp in x[1:]]
return proc(*args)
| Procedure |
python | allegroai__clearml | clearml/backend_api/services/v2_23/queues.py | {
"start": 89463,
"end": 90646
} | class ____(Response):
"""
Response of queues.remove_task endpoint.
:param removed: Number of tasks removed (0 or 1)
:type removed: int
"""
_service = "queues"
_action = "remove_task"
_version = "2.23"
_schema = {
"definitions": {},
"properties": {
"removed": {
"description": "Number of tasks removed (0 or 1)",
"enum": [0, 1],
"type": ["integer", "null"],
}
},
"type": "object",
}
def __init__(self, removed: Optional[int] = None, **kwargs: Any) -> None:
super(RemoveTaskResponse, self).__init__(**kwargs)
self.removed = removed
@schema_property("removed")
def removed(self) -> Optional[int]:
return self._property_removed
@removed.setter
def removed(self, value: Optional[int]) -> None:
if value is None:
self._property_removed = None
return
if isinstance(value, float) and value.is_integer():
value = int(value)
self.assert_isinstance(value, "removed", six.integer_types)
self._property_removed = value
| RemoveTaskResponse |
python | facebook__pyre-check | source/interprocedural_analyses/taint/test/integration/properties.py | {
"start": 4071,
"end": 5096
} | class ____:
def __init__(self, my_property: str) -> None:
self.my_property = my_property
def test_union_property_attribute_source():
obj: Union[TaintedGetterAndSetter, RegularAttribute]
if 1 > 2:
obj = TaintedGetterAndSetter()
else:
obj = RegularAttribute(_test_source())
return obj.my_property
def test_union_property_attribute_sink(x):
obj: Union[TaintedGetterAndSetter, RegularAttribute]
if 1 > 2:
obj = TaintedGetterAndSetter()
else:
obj = RegularAttribute(x)
_test_sink(obj.my_property)
def test_getattr_on_property(x: TaintedGetterAndSetter):
_test_sink(getattr(x, "my_property", ""))
def foo():
pass
def bar():
pass
def function_with_nested_properties():
# Property setters within a function, not a class
@property
def my_property(self) -> int:
foo()
return 0
@my_property.setter
def my_property(self, value) -> None:
_test_sink(_test_source())
bar()
| RegularAttribute |
python | GoogleCloudPlatform__python-docs-samples | dataflow/gemma/custom_model_gemma.py | {
"start": 1085,
"end": 2871
} | class ____(ModelHandler[str, PredictionResult, GemmaCausalLM]):
def __init__(
self,
model_name: str = "gemma_2B",
):
""" Implementation of the ModelHandler interface for Gemma using text as input.
Example Usage::
pcoll | RunInference(GemmaModelHandler())
Args:
model_name: The Gemma model name. Default is gemma_2B.
"""
self._model_name = model_name
self._env_vars = {}
def share_model_across_processes(self) -> bool:
""" Indicates if the model should be loaded once-per-VM rather than
once-per-worker-process on a VM. Because Gemma is a large language model,
this will always return True to avoid OOM errors.
"""
return True
def load_model(self) -> GemmaCausalLM:
"""Loads and initializes a model for processing."""
return keras_nlp.models.GemmaCausalLM.from_preset(self._model_name)
def run_inference(
self,
batch: Sequence[str],
model: GemmaCausalLM,
inference_args: Optional[dict[str, Any]] = None
) -> Iterable[PredictionResult]:
"""Runs inferences on a batch of text strings.
Args:
batch: A sequence of examples as text strings.
model: The Gemma model being used.
inference_args: Any additional arguments for an inference.
Returns:
An Iterable of type PredictionResult.
"""
# Loop each text string, and use a tuple to store the inference results.
predictions = []
for one_text in batch:
result = model.generate(one_text, max_length=64)
predictions.append(result)
return utils._convert_to_result(batch, predictions, self._model_name)
| GemmaModelHandler |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/memberAccess4.py | {
"start": 1281,
"end": 1506
} | class ____:
def __new__(cls):
return object.__new__(cls)
def curry[First, *Rest, Result](
function: Callable[[First, *Rest], Result],
) -> Callable[[*Rest], Callable[[First], Result]]: ...
| ClassWithNewOverride |
python | django__django | tests/generic_views/views.py | {
"start": 7969,
"end": 8028
} | class ____:
id = "non_model_1"
_meta = None
| NonModel |
python | apache__avro | lang/py/avro/errors.py | {
"start": 3274,
"end": 3399
} | class ____(AvroException):
"""Raised when there's a problem reading or writing file object containers."""
| DataFileException |
python | great-expectations__great_expectations | great_expectations/data_context/data_context/abstract_data_context.py | {
"start": 4382,
"end": 100136
} | class ____(ConfigPeer, ABC):
"""Base class for all Data Contexts that contains shared functionality.
The class encapsulates most store / core components and convenience methods used to access them, meaning the
majority of Data Context functionality lives here.
One of the primary responsibilities of the DataContext is managing CRUD operations for core GX objects:
.. list-table:: Supported CRUD Methods
:widths: 10 18 18 18 18
:header-rows: 1
* -
- Stores
- Datasources
- ExpectationSuites
- Checkpoints
* - `get`
- ❌
- ✅
- ✅
- ✅
* - `add`
- ✅
- ✅
- ✅
- ✅
* - `update`
- ❌
- ✅
- ✅
- ✅
* - `add_or_update`
- ❌
- ✅
- ✅
- ✅
* - `delete`
- ✅
- ✅
- ✅
- ✅
""" # noqa: E501 # FIXME CoP
# NOTE: <DataContextRefactor> These can become a property like ExpectationsStore.__name__ or placed in a separate # noqa: E501 # FIXME CoP
# test_yml_config module so AbstractDataContext is not so cluttered.
_ROOT_CONF_DIR = pathlib.Path.home() / ".great_expectations"
_ROOT_CONF_FILE = _ROOT_CONF_DIR / "great_expectations.conf"
_ETC_CONF_DIR = pathlib.Path("/etc")
_ETC_CONF_FILE = _ETC_CONF_DIR / "great_expectations.conf"
GLOBAL_CONFIG_PATHS = [_ROOT_CONF_FILE, _ETC_CONF_FILE]
DOLLAR_SIGN_ESCAPE_STRING = r"\$"
# instance attribute type annotations
fluent_config: GxConfig
def __init__(
self,
runtime_environment: Optional[dict] = None,
user_agent_str: Optional[str] = None,
) -> None:
"""
Constructor for AbstractDataContext. Will handle instantiation logic that is common to all DataContext objects
Args:
runtime_environment (dict): a dictionary of config variables that
override those set in config_variables.yml and the environment
user_agent_str (str | None): UserAgent string to be used in analytics events
""" # noqa: E501 # FIXME CoP
if runtime_environment is None:
runtime_environment = {}
self.runtime_environment = runtime_environment
self._user_agent_str = user_agent_str
self._config_provider = self._init_config_provider()
self._config_variables = self._load_config_variables()
self._variables = self._init_variables()
# config providers must be provisioned before loading zep_config
self.fluent_config = self._load_fluent_config(self._config_provider)
# Init plugin support
if self.plugins_directory is not None and os.path.exists( # noqa: PTH110 # FIXME CoP
self.plugins_directory
):
sys.path.append(self.plugins_directory)
# We want to have directories set up before initializing usage statistics so
# that we can obtain a context instance id
self._in_memory_instance_id: str | None = (
None # This variable *may* be used in case we cannot save an instance id
)
# Init stores
self._stores: dict = {}
self._init_primary_stores(self.project_config_with_variables_substituted.stores)
# The DatasourceStore is inherent to all DataContexts but is not an explicit part of the project config. # noqa: E501 # FIXME CoP
# As such, it must be instantiated separately.
self._datasource_store = self._init_datasource_store()
self._init_datasources()
# Init data_context_id
self._data_context_id = self._construct_data_context_id()
# Override the project_config data_context_id if an expectations_store was already set up
self.config.data_context_id = self._data_context_id
self._suite_parameter_dependencies: dict = {}
self._init_data_source_manager()
self._attach_fluent_config_datasources_and_build_data_connectors(self.fluent_config)
def _init_data_source_manager(self) -> None:
self._data_sources: DataSourceManager = DataSourceManager(self)
self._suites: SuiteFactory | None = None
if expectations_store := self.stores.get(self.expectations_store_name):
self._suites = SuiteFactory(
store=expectations_store,
)
self._checkpoints: CheckpointFactory | None = None
if checkpoint_store := self.stores.get(self.checkpoint_store_name):
self._checkpoints = CheckpointFactory(
store=checkpoint_store,
)
self._validation_definitions: ValidationDefinitionFactory = ValidationDefinitionFactory(
store=self.validation_definition_store
)
@property
@abstractmethod
def mode(self) -> Literal["cloud", "file", "ephemeral"]:
"""Context mode. Should correspond to the modes passed to `get_context`"""
...
def _init_config_provider(self) -> _ConfigurationProvider:
config_provider = _ConfigurationProvider()
self._register_providers(config_provider)
return config_provider
def _register_providers(self, config_provider: _ConfigurationProvider) -> None:
"""
Registers any relevant ConfigurationProvider instances to self._config_provider.
Note that order matters here - if there is a namespace collision, later providers will overwrite
the values derived from previous ones. The order of precedence is as follows:
- Config variables
- Environment variables
- Runtime environment
""" # noqa: E501 # FIXME CoP
config_variables_file_path = self._project_config.config_variables_file_path
if config_variables_file_path:
config_provider.register_provider(
_ConfigurationVariablesConfigurationProvider(
config_variables_file_path=config_variables_file_path,
root_directory=self.root_directory,
)
)
config_provider.register_provider(_EnvironmentConfigurationProvider())
config_provider.register_provider(
_RuntimeEnvironmentConfigurationProvider(self.runtime_environment)
)
@abstractmethod
def _init_project_config(
self, project_config: DataContextConfig | Mapping
) -> DataContextConfig:
raise NotImplementedError
@abstractmethod
def _init_variables(self) -> DataContextVariables:
raise NotImplementedError
def _save_project_config(self) -> None:
"""
Each DataContext will define how its project_config will be saved through its internal 'variables'.
- FileDataContext : Filesystem.
- CloudDataContext : Cloud endpoint
- Ephemeral : not saved, and logging message outputted
""" # noqa: E501 # FIXME CoP
return self.variables.save()
@public_api
def enable_analytics(self, enable: Optional[bool]) -> None:
"""
Enable or disable analytics for this DataContext.
With non-ephemeral contexts, this can be preserved via context.variables.save().
If set to None, the `GX_ANALYTICS_ENABLED` environment variable will be used.
"""
self.config.analytics_enabled = enable
self.variables.save()
def set_user_agent_str(self, user_agent_str: Optional[str]) -> None:
"""
Set the user agent string for this DataContext.
This method is used by GX internally for analytics tracking.
"""
self._user_agent_str = user_agent_str
@public_api
def update_project_config(
self, project_config: DataContextConfig | Mapping
) -> DataContextConfig:
"""Update the context's config with the values from another config object.
Args:
project_config: The config to use to update the context's internal state.
Returns:
The updated project config.
"""
self.config.update(project_config)
return self.config
# Properties
@property
def instance_id(self) -> str:
instance_id: Optional[str] = self.config_variables.get("instance_id")
if instance_id is None:
if self._in_memory_instance_id is not None:
return self._in_memory_instance_id
instance_id = str(uuid.uuid4())
self._in_memory_instance_id = instance_id
return instance_id
@property
def config_variables(self) -> Dict:
"""Loads config variables into cache, by calling _load_config_variables()
Returns: A dictionary containing config_variables from file or empty dictionary.
"""
if not self._config_variables:
self._config_variables = self._load_config_variables()
return self._config_variables
@property
@override
def config(self) -> DataContextConfig:
"""
Returns current DataContext's project_config
"""
# NOTE: <DataContextRefactor> _project_config is currently only defined in child classes.
# See if this can this be also defined in AbstractDataContext as abstract property
return self.variables.config
@property
def config_provider(self) -> _ConfigurationProvider:
return self._config_provider
@property
def root_directory(self) -> Optional[str]: # TODO: This should be a `pathlib.Path`
"""The root directory for configuration objects in the data context; the location in which
``great_expectations.yml`` is located.
"""
# NOTE: <DataContextRefactor> Why does this exist in AbstractDataContext? CloudDataContext and # noqa: E501 # FIXME CoP
# FileDataContext both use it. Determine whether this should stay here or in child classes
return getattr(self, "_context_root_directory", None)
@property
def project_config_with_variables_substituted(self) -> DataContextConfig:
return self.get_config_with_variables_substituted()
@property
def plugins_directory(self) -> Optional[str]:
"""The directory in which custom plugin modules should be placed."""
# NOTE: <DataContextRefactor> Why does this exist in AbstractDataContext? CloudDataContext and # noqa: E501 # FIXME CoP
# FileDataContext both use it. Determine whether this should stay here or in child classes
return self._normalize_absolute_or_relative_path(self.variables.plugins_directory)
@property
def stores(self) -> dict:
"""A single holder for all Stores in this context"""
return self._stores
@property
def datasource_store(self) -> DatasourceStore:
return self._datasource_store
@property
@public_api
def suites(self) -> SuiteFactory:
"""
Responsible for basic CRUD operations on a context's ExpectationSuites.
"""
if not self._suites:
raise gx_exceptions.DataContextError( # noqa: TRY003 # FIXME CoP
"DataContext requires a configured ExpectationsStore to persist ExpectationSuites."
)
return self._suites
@property
@public_api
def checkpoints(self) -> CheckpointFactory:
"""
Responsible for basic CRUD operations on a context's Checkpoints.
"""
if not self._checkpoints:
raise gx_exceptions.DataContextError( # noqa: TRY003 # FIXME CoP
"DataContext requires a configured CheckpointStore to persist Checkpoints."
)
return self._checkpoints
@property
@public_api
def validation_definitions(self) -> ValidationDefinitionFactory:
"""
Responsible for basic CRUD operations on a context's ValidationDefinitions.
"""
if not self._validation_definitions:
raise gx_exceptions.DataContextError( # noqa: TRY003 # FIXME CoP
"DataContext requires a configured ValidationDefinitionStore to persist "
"Validations."
)
return self._validation_definitions
@property
def expectations_store_name(self) -> Optional[str]:
return self.variables.expectations_store_name
@expectations_store_name.setter
@new_method_or_class(version="0.17.2")
def expectations_store_name(self, value: str) -> None:
"""Set the name of the expectations store.
Args:
value: New value for the expectations store name.
"""
self.variables.expectations_store_name = value
self._save_project_config()
@property
def expectations_store(self) -> ExpectationsStore:
return self.stores[self.expectations_store_name]
@property
def validation_results_store_name(self) -> Optional[str]:
return self.variables.validation_results_store_name
@validation_results_store_name.setter
@new_method_or_class(version="0.17.2")
def validation_results_store_name(self, value: str) -> None:
"""Set the name of the validations store.
Args:
value: New value for the validations store name.
"""
self.variables.validation_results_store_name = value
self._save_project_config()
@property
def validation_results_store(self) -> ValidationResultsStore:
return self.stores[self.validation_results_store_name]
@property
def validation_definition_store(self) -> ValidationDefinitionStore:
# Purposely not exposing validation_definition_store_name as a user-configurable property
return self.stores[DataContextConfigDefaults.DEFAULT_VALIDATION_DEFINITION_STORE_NAME.value]
@property
def checkpoint_store_name(self) -> Optional[str]:
from great_expectations.data_context.store.checkpoint_store import (
CheckpointStore,
)
if name := self.variables.checkpoint_store_name:
return name
if CheckpointStore.default_checkpoints_exist(
directory_path=self.root_directory # type: ignore[arg-type] # FIXME CoP
):
return DataContextConfigDefaults.DEFAULT_CHECKPOINT_STORE_NAME.value
return None
@checkpoint_store_name.setter
@new_method_or_class(version="0.17.2")
def checkpoint_store_name(self, value: str) -> None:
"""Set the name of the checkpoint store.
Args:
value: New value for the checkpoint store name.
"""
self.variables.checkpoint_store_name = value
self._save_project_config()
@property
def checkpoint_store(self) -> CheckpointStore:
return self.stores[self.checkpoint_store_name]
@property
@public_api
def data_sources(self) -> DataSourceManager:
"""
Responsible for basic CRUD operations on a context's DataSources.
"""
return self._data_sources
@property
def _include_rendered_content(self) -> bool:
return False
def _add_fluent_datasource(
self, datasource: Optional[FluentDatasource] = None, save_changes: bool = True, **kwargs
) -> FluentDatasource:
if datasource:
datasource_name = datasource.name
else:
datasource_name = kwargs.get("name", "")
if not datasource_name:
raise gx_exceptions.DataContextError( # noqa: TRY003 # FIXME CoP
"Can not write the fluent datasource, because no name was provided."
)
# We currently don't allow one to overwrite a datasource with this internal method
if datasource_name in self.data_sources.all():
raise gx_exceptions.DataContextError( # noqa: TRY003 # FIXME CoP
f"Can not write the fluent datasource {datasource_name} because a datasource of that " # noqa: E501 # FIXME CoP
"name already exists in the data context."
)
if not datasource:
ds_type = DataSourceManager.type_lookup[kwargs["type"]]
datasource = ds_type(**kwargs)
assert isinstance(datasource, FluentDatasource)
return_obj = self.data_sources.all().set_datasource(name=datasource_name, ds=datasource)
assert isinstance(return_obj, FluentDatasource)
return_obj._data_context = self
if save_changes:
self._save_project_config()
self.config.fluent_datasources[return_obj.name] = return_obj
return return_obj
def _update_fluent_datasource(
self, datasource: Optional[FluentDatasource] = None, **kwargs
) -> FluentDatasource:
if datasource:
datasource_name = datasource.name
else:
datasource_name = kwargs.get("name", "")
if not datasource_name:
raise gx_exceptions.DataContextError( # noqa: TRY003 # FIXME CoP
"Can not write the fluent datasource, because no name was provided."
)
if not datasource:
ds_type = DataSourceManager.type_lookup[kwargs["type"]]
updated_datasource = ds_type(**kwargs)
else:
updated_datasource = datasource
updated_datasource._rebuild_asset_data_connectors()
updated_datasource = self.data_sources.all().set_datasource(
name=datasource_name, ds=updated_datasource
)
if updated_datasource:
updated_datasource._data_context = self
self._save_project_config()
assert isinstance(updated_datasource, FluentDatasource)
self.config.fluent_datasources[datasource_name] = updated_datasource
return updated_datasource
def _delete_fluent_datasource(self, name: str, _call_store: bool = True) -> None:
"""
_call_store = False allows for local deletes without deleting the persisted storage datasource.
This should generally be avoided.
""" # noqa: E501 # FIXME CoP
self.fluent_config.pop_datasource(name, None)
datasource = self.data_sources.all().get(name)
if datasource:
if self._datasource_store.cloud_mode and _call_store:
self._datasource_store.delete(datasource)
else:
# Raise key error instead?
logger.info(f"No Datasource '{name}' to delete")
self.data_sources.all().pop(name, None)
del self.config.fluent_datasources[name]
def set_config(self, project_config: DataContextConfig) -> None:
self._project_config = project_config
self.variables.config = project_config
@overload
def add_datasource(
self,
name: str = ...,
initialize: bool = ...,
datasource: None = ...,
**kwargs,
) -> FluentDatasource | None:
"""
A `name` is provided.
`datasource` should not be provided.
"""
...
@overload
def add_datasource(
self,
name: None = ...,
initialize: bool = ...,
datasource: FluentDatasource = ...,
**kwargs,
) -> FluentDatasource | None:
"""
A `datasource` is provided.
`name` should not be provided.
"""
...
@new_argument(
argument_name="datasource",
version="0.15.49",
message="Pass in an existing Datasource instead of individual constructor arguments",
)
def add_datasource(
self,
name: str | None = None,
initialize: bool = True,
datasource: FluentDatasource | None = None,
**kwargs,
) -> FluentDatasource | None:
"""Add a new Datasource to the data context, with configuration provided as kwargs.
--Documentation--
- https://docs.greatexpectations.io/docs/terms/datasource
Args:
name: the name of the new Datasource to add
initialize: if False, add the Datasource to the config, but do not
initialize it, for example if a user needs to debug database connectivity.
datasource: an existing Datasource you wish to persist
kwargs: the configuration for the new Datasource
Returns:
Datasource instance added.
"""
return self._add_datasource(
name=name,
initialize=initialize,
datasource=datasource,
**kwargs,
)
@staticmethod
def _validate_add_datasource_args(
name: str | None,
datasource: FluentDatasource | None,
**kwargs,
) -> None:
if not ((datasource is None) ^ (name is None)):
error_message = (
"Must either pass in an existing 'datasource' or individual constructor arguments"
)
if datasource and name:
error_message += " (but not both)"
raise TypeError(error_message)
# "type" is only used in FDS so we check for its existence (equivalent for block-style would be "class_name" and "module_name") # noqa: E501 # FIXME CoP
if "type" in kwargs:
raise TypeError( # noqa: TRY003 # FIXME CoP
"Creation of fluent-datasources with individual arguments is not supported and should be done through the `context.data_sources` API." # noqa: E501 # FIXME CoP
)
def _add_datasource(
self,
name: str | None = None,
initialize: bool = True,
datasource: FluentDatasource | None = None,
**kwargs,
) -> FluentDatasource | None:
self._validate_add_datasource_args(name=name, datasource=datasource, **kwargs)
if isinstance(datasource, FluentDatasource):
self._add_fluent_datasource(
datasource=datasource,
)
else:
raise DataContextError("Datasource is not a FluentDatasource") # noqa: TRY003 # FIXME CoP
return datasource
def update_datasource(
self,
datasource: FluentDatasource,
) -> FluentDatasource:
"""Updates a Datasource that already exists in the store.
Args:
datasource: The Datasource object to update.
Returns:
The updated Datasource.
"""
return self._update_fluent_datasource(datasource=datasource)
@overload
def add_or_update_datasource(
self,
name: str = ...,
datasource: None = ...,
**kwargs,
) -> FluentDatasource:
"""
A `name` is provided.
`datasource` should not be provided.
"""
...
@overload
def add_or_update_datasource(
self,
name: None = ...,
datasource: FluentDatasource = ...,
**kwargs,
) -> FluentDatasource:
"""
A `datasource` is provided.
`name` should not be provided.
"""
...
@new_method_or_class(version="0.15.48")
@deprecated_method_or_class(version="1.3.0")
def add_or_update_datasource(
self,
name: str | None = None,
datasource: FluentDatasource | None = None,
**kwargs,
) -> FluentDatasource:
"""Add a new Datasource or update an existing one on the context depending on whether
it already exists or not. The configuration is provided as kwargs.
Args:
name: The name of the Datasource to add or update.
datasource: an existing Datasource you wish to persist.
kwargs: Any relevant keyword args to use when adding or updating the target Datasource named `name`.
Returns:
The Datasource added or updated by the input `kwargs`.
""" # noqa: E501 # FIXME CoP
# deprecated-v1.3.0
warnings.warn(
"add_or_update_datasource() from the DataContext is deprecated and will be removed "
"in a future version of GX. Please use `context.data_sources.add_or_update` instead.",
category=DeprecationWarning,
)
self._validate_add_datasource_args(name=name, datasource=datasource)
return_datasource: FluentDatasource
if "type" in kwargs:
assert name, 'Fluent Datasource kwargs must include the keyword "name"'
kwargs["name"] = name
if name in self.data_sources.all():
self._update_fluent_datasource(**kwargs)
else:
self._add_fluent_datasource(**kwargs)
return_datasource = self.data_sources.all()[name]
else:
if datasource is None:
raise ValueError("Either datasource or kwargs are required") # noqa: TRY003 # FIXME CoP
if datasource.name in self.data_sources.all():
self._update_fluent_datasource(datasource=datasource)
else:
self._add_fluent_datasource(datasource=datasource)
return_datasource = self.data_sources.all()[datasource.name]
return return_datasource
def get_site_names(self) -> List[str]:
"""Get a list of configured site names."""
return list(self.variables.data_docs_sites.keys()) # type: ignore[union-attr] # FIXME CoP
def get_config_with_variables_substituted(
self, config: Optional[DataContextConfig] = None
) -> DataContextConfig:
"""
Substitute vars in config of form ${var} or $(var) with values found in the following places,
in order of precedence: gx_cloud_config (for Data Contexts in GX Cloud mode), runtime_environment,
environment variables, config_variables, or gx_cloud_config_variable_defaults (allows certain variables to
be optional in GX Cloud mode).
""" # noqa: E501 # FIXME CoP
if not config:
config = self._project_config
return DataContextConfig(**self.config_provider.substitute_config(config))
def list_stores(self) -> List[Store]:
"""List currently-configured Stores on this context"""
stores = []
for (
name,
value,
) in self.variables.stores.items(): # type: ignore[union-attr] # FIXME CoP
store_config = copy.deepcopy(value)
store_config["name"] = name
masked_config = PasswordMasker.sanitize_config(store_config)
stores.append(masked_config)
return stores # type: ignore[return-value] # FIXME CoP
def list_active_stores(self) -> List[Store]:
"""
List active Stores on this context. Active stores are identified by setting the following parameters:
expectations_store_name,
validation_results_store_name,
checkpoint_store_name
""" # noqa: E501 # FIXME CoP
active_store_names: List[str] = [
self.expectations_store_name, # type: ignore[list-item] # FIXME CoP
self.validation_results_store_name, # type: ignore[list-item] # FIXME CoP
]
try:
active_store_names.append(self.checkpoint_store_name) # type: ignore[arg-type] # FIXME CoP
except (AttributeError, gx_exceptions.InvalidTopLevelConfigKeyError):
logger.info("Checkpoint store is not configured; omitting it from active stores")
return [
store
for store in self.list_stores()
if store.get("name") in active_store_names # type: ignore[arg-type,operator] # FIXME CoP
]
def get_datasource(self, name: str = "default") -> FluentDatasource:
"""Retrieve a given Datasource by name from the context's underlying DatasourceStore.
Args:
name: The name of the target datasource.
Returns:
The target datasource.
Raises:
ValueError: The input `datasource_name` is None.
"""
# deprecated-v1.2.0
warnings.warn(
"context.get_datasource is deprecated as of v1.2.0. "
"Please use context.data_sources.get instead",
category=DeprecationWarning,
)
try:
return self.data_sources.get(name)
except KeyError as e:
raise ValueError(str(e)) from e
def add_store(self, name: str, config: StoreConfigTypedDict) -> Store:
"""Add a new Store to the DataContext.
Args:
name: the name to associate with the created store.
config: the config to use to construct the store.
Returns:
The instantiated Store.
"""
store = self._build_store_from_config(name, config)
# Both the config and the actual stores need to be kept in sync
self.config.stores[name] = config
self._stores[name] = store
self._save_project_config()
return store
@public_api
@new_method_or_class(version="0.17.2")
def add_data_docs_site(self, site_name: str, site_config: DataDocsSiteConfigTypedDict) -> None:
"""Add a new Data Docs Site to the DataContext.
Example site config dicts can be found in our "Host and share Data Docs" guides.
Args:
site_name: New site name to add.
site_config: Config dict for the new site.
"""
if self.config.data_docs_sites is not None:
if site_name in self.config.data_docs_sites:
raise gx_exceptions.InvalidKeyError( # noqa: TRY003 # FIXME CoP
f"Data Docs Site `{site_name}` already exists in the Data Context."
)
sites = self.config.data_docs_sites
sites[site_name] = site_config
self.variables.data_docs_sites = sites
self._save_project_config()
@public_api
@new_method_or_class(version="0.17.2")
def list_data_docs_sites(
self,
) -> dict[str, DataDocsSiteConfigTypedDict]:
"""List all Data Docs Sites with configurations."""
if self.config.data_docs_sites is None:
return {}
else:
return self.config.data_docs_sites
@public_api
@new_method_or_class(version="0.17.2")
def update_data_docs_site(
self, site_name: str, site_config: DataDocsSiteConfigTypedDict
) -> None:
"""Update an existing Data Docs Site.
Example site config dicts can be found in our "Host and share Data Docs" guides.
Args:
site_name: Site name to update.
site_config: Config dict that replaces the existing.
"""
if self.config.data_docs_sites is not None:
if site_name not in self.config.data_docs_sites:
raise gx_exceptions.InvalidKeyError( # noqa: TRY003 # FIXME CoP
f"Data Docs Site `{site_name}` does not already exist in the Data Context."
)
sites = self.config.data_docs_sites
sites[site_name] = site_config
self.variables.data_docs_sites = sites
self._save_project_config()
@public_api
@new_method_or_class(version="0.17.2")
def delete_data_docs_site(self, site_name: str):
"""Delete an existing Data Docs Site.
Args:
site_name: Site name to delete.
"""
if self.config.data_docs_sites is not None:
if site_name not in self.config.data_docs_sites:
raise gx_exceptions.InvalidKeyError( # noqa: TRY003 # FIXME CoP
f"Data Docs Site `{site_name}` does not already exist in the Data Context."
)
sites = self.config.data_docs_sites
sites.pop(site_name)
self.variables.data_docs_sites = sites
self._save_project_config()
@new_method_or_class(version="0.15.48")
def delete_store(self, name: str) -> None:
"""Delete an existing Store from the DataContext.
Args:
name: The name of the Store to be deleted.
Raises:
StoreConfigurationError if the target Store is not found.
"""
if name not in self.config.stores and name not in self._stores:
raise gx_exceptions.StoreConfigurationError( # noqa: TRY003 # FIXME CoP
f'Attempted to delete a store named: "{name}". It is not a configured store.'
)
# Both the config and the actual stores need to be kept in sync
self.config.stores.pop(name, None)
self._stores.pop(name, None)
self._save_project_config()
def list_datasources(self) -> List[dict]:
"""List the configurations of the datasources associated with this context.
Note that any sensitive values are obfuscated before being returned.
Returns:
A list of dictionaries representing datasource configurations.
"""
return [ds.dict() for ds in self.data_sources.all().values()]
def delete_datasource(self, name: Optional[str]) -> None:
"""Delete a given Datasource by name.
Note that this method causes deletion from the underlying DatasourceStore.
Args:
name: The name of the target datasource.
Raises:
ValueError: The `datasource_name` isn't provided or cannot be found.
"""
if not name:
raise ValueError("Datasource names must be a datasource name") # noqa: TRY003 # FIXME CoP
self._delete_fluent_datasource(name)
self._save_project_config()
def get_validator( # noqa: PLR0913 # FIXME CoP
self,
datasource_name: Optional[str] = None,
data_connector_name: Optional[str] = None,
data_asset_name: Optional[str] = None,
batch: Optional[Batch] = None,
batch_list: Optional[List[Batch]] = None,
batch_request: Optional[Union[BatchRequestBase, FluentBatchRequest]] = None,
batch_request_list: Optional[List[BatchRequestBase]] = None,
batch_data: Optional[Any] = None,
data_connector_query: Optional[Union[IDDict, dict]] = None,
batch_identifiers: Optional[dict] = None,
limit: Optional[int] = None,
index: Optional[Union[int, list, tuple, slice, str]] = None,
custom_filter_function: Optional[Callable] = None,
sampling_method: Optional[str] = None,
sampling_kwargs: Optional[dict] = None,
partitioner_method: Optional[str] = None,
partitioner_kwargs: Optional[dict] = None,
runtime_parameters: Optional[dict] = None,
query: Optional[str] = None,
path: Optional[str] = None,
batch_filter_parameters: Optional[dict] = None,
batch_spec_passthrough: Optional[dict] = None,
expectation_suite_id: Optional[str] = None,
expectation_suite_name: Optional[str] = None,
expectation_suite: Optional[ExpectationSuite] = None,
create_expectation_suite_with_name: Optional[str] = None,
**kwargs,
) -> Validator:
"""Retrieve a Validator with a batch list and an `ExpectationSuite`.
`get_validator` first calls `get_batch_list` to retrieve a batch list, then creates or retrieves
an `ExpectationSuite` used to validate the Batches in the list.
Args:
datasource_name: The name of the Datasource that defines the Data Asset to retrieve the batch for
data_connector_name: The Data Connector within the datasource for the Data Asset
data_asset_name: The name of the Data Asset within the Data Connector
batch: The Batch to use with the Validator
batch_list: The List of Batches to use with the Validator
batch_request: Encapsulates all the parameters used here to retrieve a BatchList. Use either
`batch_request` or the other params (but not both)
batch_request_list: A List of `BatchRequest` to use with the Validator
batch_data: Provides runtime data for the batch; is added as the key `batch_data` to
the `runtime_parameters` dictionary of a BatchRequest
query: Provides runtime data for the batch; is added as the key `query` to
the `runtime_parameters` dictionary of a BatchRequest
path: Provides runtime data for the batch; is added as the key `path` to
the `runtime_parameters` dictionary of a BatchRequest
runtime_parameters: Specifies runtime parameters for the BatchRequest; can includes keys `batch_data`,
`query`, and `path`
data_connector_query: Used to specify connector query parameters; specifically `batch_filter_parameters`,
`limit`, `index`, and `custom_filter_function`
batch_identifiers: Any identifiers of batches for the BatchRequest
batch_filter_parameters: Filter parameters used in the data connector query
limit: Part of the data_connector_query, limits the number of batches in the batch list
index: Part of the data_connector_query, used to specify the index of which batch to return. Negative
numbers retrieve from the end of the list (ex: `-1` retrieves the last or latest batch)
custom_filter_function: A `Callable` function that accepts `batch_identifiers` and returns a `bool`
sampling_method: The method used to sample Batch data (see: Partitioning and Sampling)
sampling_kwargs: Arguments for the sampling method
partitioner_method: The method used to partition the Data Asset into Batches
partitioner_kwargs: Arguments for the partitioning method
batch_spec_passthrough: Arguments specific to the `ExecutionEngine` that aid in Batch retrieval
expectation_suite_id: The identifier of the ExpectationSuite to retrieve from the DataContext
(can be used in place of `expectation_suite_name`)
expectation_suite_name: The name of the ExpectationSuite to retrieve from the DataContext
expectation_suite: The ExpectationSuite to use with the validator
create_expectation_suite_with_name: Creates a Validator with a new ExpectationSuite with the provided name
**kwargs: Used to specify either `batch_identifiers` or `batch_filter_parameters`
Returns:
Validator: A Validator with the specified Batch list and ExpectationSuite
Raises:
DatasourceError: If the specified `datasource_name` does not exist in the DataContext
TypeError: If the specified types of the `batch_request` are not supported, or if the
`datasource_name` is not a `str`
ValueError: If more than one exclusive parameter is specified (ex: specifing more than one
of `batch_data`, `query` or `path`), or if the `ExpectationSuite` cannot be created or
retrieved using either the provided name or identifier
""" # noqa: E501 # FIXME CoP
expectation_suite = self._get_expectation_suite_from_inputs(
expectation_suite=expectation_suite,
expectation_suite_name=expectation_suite_name,
create_expectation_suite_with_name=create_expectation_suite_with_name,
expectation_suite_id=expectation_suite_id,
)
batch_list = self._get_batch_list_from_inputs(
datasource_name=datasource_name,
data_connector_name=data_connector_name,
data_asset_name=data_asset_name,
batch=batch,
batch_list=batch_list,
batch_request=batch_request,
batch_request_list=batch_request_list,
batch_data=batch_data,
data_connector_query=data_connector_query,
batch_identifiers=batch_identifiers,
limit=limit,
index=index,
custom_filter_function=custom_filter_function,
sampling_method=sampling_method,
sampling_kwargs=sampling_kwargs,
partitioner_method=partitioner_method,
partitioner_kwargs=partitioner_kwargs,
runtime_parameters=runtime_parameters,
query=query,
path=path,
batch_filter_parameters=batch_filter_parameters,
batch_spec_passthrough=batch_spec_passthrough,
**kwargs,
)
return self.get_validator_using_batch_list(
expectation_suite=expectation_suite,
batch_list=batch_list,
)
def _get_batch_list_from_inputs( # noqa: PLR0913 # FIXME CoP
self,
datasource_name: str | None,
data_connector_name: str | None,
data_asset_name: str | None,
batch: Batch | None,
batch_list: List[Batch] | None,
batch_request: BatchRequestBase | FluentBatchRequest | None,
batch_request_list: List[BatchRequestBase] | None,
batch_data: Any,
data_connector_query: Union[IDDict, dict] | None,
batch_identifiers: dict | None,
limit: int | None,
index: int | list | tuple | slice | str | None,
custom_filter_function: Callable | None,
sampling_method: str | None,
sampling_kwargs: dict | None,
partitioner_method: str | None,
partitioner_kwargs: dict | None,
runtime_parameters: dict | None,
query: str | None,
path: str | None,
batch_filter_parameters: dict | None,
batch_spec_passthrough: dict | None,
**kwargs,
) -> List[Batch]:
if (
sum(
bool(x)
for x in [
batch is not None,
batch_list is not None,
batch_request is not None,
batch_request_list is not None,
]
)
> 1
):
raise ValueError( # noqa: TRY003 # FIXME CoP
"No more than one of batch, batch_list, batch_request, or batch_request_list can be specified" # noqa: E501 # FIXME CoP
)
if batch_list:
return batch_list
if batch:
return [batch]
computed_batch_list: List[Batch] = []
if not batch_request_list:
# batch_request could actually be None here since we do explicit None checks in the
# sum check above while here we do a truthy check.
batch_request_list = [batch_request] # type: ignore[list-item] # FIXME CoP
for batch_req in batch_request_list:
computed_batch_list.append(
self.get_last_batch(
datasource_name=datasource_name,
data_connector_name=data_connector_name,
data_asset_name=data_asset_name,
batch_request=batch_req,
batch_data=batch_data,
data_connector_query=data_connector_query,
batch_identifiers=batch_identifiers,
limit=limit,
index=index,
custom_filter_function=custom_filter_function,
sampling_method=sampling_method,
sampling_kwargs=sampling_kwargs,
partitioner_method=partitioner_method,
partitioner_kwargs=partitioner_kwargs,
runtime_parameters=runtime_parameters,
query=query,
path=path,
batch_filter_parameters=batch_filter_parameters,
batch_spec_passthrough=batch_spec_passthrough,
**kwargs,
)
)
return computed_batch_list
def _get_expectation_suite_from_inputs(
self,
expectation_suite: ExpectationSuite | None = None,
expectation_suite_name: str | None = None,
create_expectation_suite_with_name: str | None = None,
expectation_suite_id: str | None = None,
) -> ExpectationSuite | None:
"""Get an expectation suite from optional inputs. Also validates inputs.
Args:
expectation_suite: An ExpectationSuite object
expectation_suite_name: The name of the ExpectationSuite to retrieve from the DataContext
create_expectation_suite_with_name: Creates a new ExpectationSuite with the provided name
expectation_suite_id: The identifier of the ExpectationSuite to retrieve from the DataContext
(can be used in place of `expectation_suite_name`)
Returns:
An ExpectationSuite instance
Raises:
ValueError if the inputs are not valid
""" # noqa: E501 # FIXME CoP
if (
sum(
bool(x)
for x in [
expectation_suite is not None,
expectation_suite_name is not None,
create_expectation_suite_with_name is not None,
expectation_suite_id is not None,
]
)
> 1
):
raise ValueError( # noqa: TRY003 # FIXME CoP
"No more than one of expectation_suite_name, "
f"{'expectation_suite_id, ' if expectation_suite_id else ''}"
"expectation_suite, or create_expectation_suite_with_name can be specified"
)
if expectation_suite_id is not None:
expectation_suite = next(
suite for suite in self.suites.all() if suite.id == expectation_suite_id
)
if expectation_suite_name is not None:
expectation_suite = self.suites.get(
expectation_suite_name,
)
if create_expectation_suite_with_name is not None:
expectation_suite = self.suites.add(
ExpectationSuite(name=create_expectation_suite_with_name)
)
return expectation_suite
# noinspection PyUnusedLocal
def get_validator_using_batch_list(
self,
expectation_suite: ExpectationSuite | None,
batch_list: Sequence[Union[Batch, FluentBatch]],
**kwargs: Optional[dict],
) -> Validator:
"""
Args:
expectation_suite ():
batch_list ():
**kwargs ():
Returns:
"""
if len(batch_list) == 0:
raise gx_exceptions.InvalidBatchRequestError( # noqa: TRY003 # FIXME CoP
"""Validator could not be created because BatchRequest returned an empty batch_list.
Please check your parameters and try again."""
)
# We get a single batch_definition so we can get the execution_engine here. All batches will share the same one # noqa: E501 # FIXME CoP
# So the batch itself doesn't matter. But we use -1 because that will be the latest batch loaded. # noqa: E501 # FIXME CoP
execution_engine: ExecutionEngine
batch = batch_list[-1]
assert isinstance(batch, FluentBatch)
execution_engine = batch.data.execution_engine
validator = Validator(
execution_engine=execution_engine,
interactive_evaluation=True,
expectation_suite=expectation_suite,
data_context=self,
batches=batch_list,
)
return validator
def get_last_batch( # noqa: PLR0913 # FIXME CoP
self,
datasource_name: Optional[str] = None,
data_connector_name: Optional[str] = None,
data_asset_name: Optional[str] = None,
batch_request: Optional[BatchRequestBase] = None,
batch_data: Optional[Any] = None,
data_connector_query: Optional[dict] = None,
batch_identifiers: Optional[dict] = None,
limit: Optional[int] = None,
index: Optional[Union[int, list, tuple, slice, str]] = None,
custom_filter_function: Optional[Callable] = None,
sampling_method: Optional[str] = None,
sampling_kwargs: Optional[dict] = None,
partitioner_method: Optional[str] = None,
partitioner_kwargs: Optional[dict] = None,
runtime_parameters: Optional[dict] = None,
query: Optional[str] = None,
path: Optional[str] = None,
batch_filter_parameters: Optional[dict] = None,
batch_spec_passthrough: Optional[dict] = None,
batch_parameters: Optional[Union[dict, BatchParameters]] = None,
**kwargs: Optional[dict],
) -> Batch:
"""Get the list of zero or more batches, based on a variety of flexible input types.
`get_batch_list` is the main user-facing API for getting batches.
In contrast to virtually all other methods in the class, it does not require typed or nested inputs.
Instead, this method is intended to help the user pick the right parameters
This method attempts to return any number of batches, including an empty list.
Args:
datasource_name: The name of the Datasource that defines the Data Asset to retrieve the batch for
data_connector_name: The Data Connector within the datasource for the Data Asset
data_asset_name: The name of the Data Asset within the Data Connector
batch_request: Encapsulates all the parameters used here to retrieve a BatchList. Use either
`batch_request` or the other params (but not both)
batch_data: Provides runtime data for the batch; is added as the key `batch_data` to
the `runtime_parameters` dictionary of a BatchRequest
query: Provides runtime data for the batch; is added as the key `query` to
the `runtime_parameters` dictionary of a BatchRequest
path: Provides runtime data for the batch; is added as the key `path` to
the `runtime_parameters` dictionary of a BatchRequest
runtime_parameters: Specifies runtime parameters for the BatchRequest; can includes keys `batch_data`,
`query`, and `path`
data_connector_query: Used to specify connector query parameters; specifically `batch_filter_parameters`,
`limit`, `index`, and `custom_filter_function`
batch_identifiers: Any identifiers of batches for the BatchRequest
batch_filter_parameters: Filter parameters used in the data connector query
limit: Part of the data_connector_query, limits the number of batches in the batch list
index: Part of the data_connector_query, used to specify the index of which batch to return. Negative
numbers retrieve from the end of the list (ex: `-1` retrieves the last or latest batch)
custom_filter_function: A `Callable` function that accepts `batch_identifiers` and returns a `bool`
sampling_method: The method used to sample Batch data (see: Partitioning and Sampling)
sampling_kwargs: Arguments for the sampling method
partitioner_method: The method used to partition the Data Asset into Batches
partitioner_kwargs: Arguments for the partitioning method
batch_spec_passthrough: Arguments specific to the `ExecutionEngine` that aid in Batch retrieval
batch_parameters: Options for `FluentBatchRequest`
**kwargs: Used to specify either `batch_identifiers` or `batch_filter_parameters`
Returns:
(Batch) The `list` of requested Batch instances
Raises:
DatasourceError: If the specified `datasource_name` does not exist in the DataContext
TypeError: If the specified types of the `batch_request` are not supported, or if the
`datasource_name` is not a `str`
ValueError: If more than one exclusive parameter is specified (ex: specifing more than one
of `batch_data`, `query` or `path`)
""" # noqa: E501 # FIXME CoP
return self._get_last_batch(
datasource_name=datasource_name,
data_connector_name=data_connector_name,
data_asset_name=data_asset_name,
batch_request=batch_request,
batch_data=batch_data,
data_connector_query=data_connector_query,
batch_identifiers=batch_identifiers,
limit=limit,
index=index,
custom_filter_function=custom_filter_function,
sampling_method=sampling_method,
sampling_kwargs=sampling_kwargs,
partitioner_method=partitioner_method,
partitioner_kwargs=partitioner_kwargs,
runtime_parameters=runtime_parameters,
query=query,
path=path,
batch_filter_parameters=batch_filter_parameters,
batch_spec_passthrough=batch_spec_passthrough,
batch_parameters=batch_parameters,
**kwargs,
)
def _get_last_batch( # noqa: PLR0913 # FIXME CoP
self,
datasource_name: Optional[str] = None,
data_connector_name: Optional[str] = None,
data_asset_name: Optional[str] = None,
batch_request: Optional[BatchRequestBase] = None,
batch_data: Optional[Any] = None,
data_connector_query: Optional[dict] = None,
batch_identifiers: Optional[dict] = None,
limit: Optional[int] = None,
index: Optional[Union[int, list, tuple, slice, str]] = None,
custom_filter_function: Optional[Callable] = None,
sampling_method: Optional[str] = None,
sampling_kwargs: Optional[dict] = None,
partitioner_method: Optional[str] = None,
partitioner_kwargs: Optional[dict] = None,
runtime_parameters: Optional[dict] = None,
query: Optional[str] = None,
path: Optional[str] = None,
batch_filter_parameters: Optional[dict] = None,
batch_spec_passthrough: Optional[dict] = None,
batch_parameters: Optional[Union[dict, BatchParameters]] = None,
**kwargs: Optional[dict],
) -> Batch:
result = get_batch_request_from_acceptable_arguments(
datasource_name=datasource_name,
data_connector_name=data_connector_name,
data_asset_name=data_asset_name,
batch_request=batch_request,
batch_data=batch_data,
data_connector_query=data_connector_query,
batch_identifiers=batch_identifiers,
limit=limit,
index=index,
custom_filter_function=custom_filter_function,
sampling_method=sampling_method,
sampling_kwargs=sampling_kwargs,
partitioner_method=partitioner_method,
partitioner_kwargs=partitioner_kwargs,
runtime_parameters=runtime_parameters,
query=query,
path=path,
batch_filter_parameters=batch_filter_parameters,
batch_spec_passthrough=batch_spec_passthrough,
batch_parameters=batch_parameters,
**kwargs,
)
datasource_name = result.datasource_name
datasource = self.data_sources.all().get(datasource_name)
if not datasource:
raise gx_exceptions.DatasourceError(
datasource_name,
"The given datasource could not be retrieved from the DataContext; "
"please confirm that your configuration is accurate.",
)
return datasource.get_batch(batch_request=result)
def _validate_datasource_names(self, datasource_names: list[str] | str | None) -> list[str]:
if datasource_names is None:
datasource_names = [datasource["name"] for datasource in self.list_datasources()]
elif isinstance(datasource_names, str):
datasource_names = [datasource_names]
elif not isinstance(datasource_names, list):
raise ValueError( # noqa: TRY003 # FIXME CoP
"Datasource names must be a datasource name, list of datasource names or None (to list all datasources)" # noqa: E501 # FIXME CoP
)
return datasource_names
def get_available_data_asset_names(
self,
datasource_names: str | list[str] | None = None,
batch_kwargs_generator_names: str | list[str] | None = None,
) -> dict[str, BlockConfigDataAssetNames | FluentDataAssetNames]:
"""Inspect datasource and batch kwargs generators to provide available data_asset objects.
Args:
datasource_names: List of datasources for which to provide available data asset name objects.
If None, return available data assets for all datasources.
batch_kwargs_generator_names: List of batch kwargs generators for which to provide available data_asset_name objects.
Returns:
data_asset_names: Dictionary describing available data assets
Raises:
ValueError: `datasource_names` is not None, a string, or list of strings.
""" # noqa: E501 # FIXME CoP
fluent_data_asset_names: dict[str, BlockConfigDataAssetNames | FluentDataAssetNames] = {}
datasource_names = self._validate_datasource_names(datasource_names)
# TODO: V1-222 batch_kwargs_generator_names is legacy and should be removed for V1
# TODO: conditional FDS vs BDS datasource logic should be removed for V1
if batch_kwargs_generator_names is not None:
if isinstance(batch_kwargs_generator_names, str):
batch_kwargs_generator_names = [batch_kwargs_generator_names]
if len(batch_kwargs_generator_names) == len(datasource_names):
for datasource_name in datasource_names:
datasource = self.data_sources.get(datasource_name)
fluent_data_asset_names[datasource_name] = sorted(datasource.get_asset_names())
elif len(batch_kwargs_generator_names) == 1:
datasource = self.data_sources.get(datasource_names[0])
fluent_data_asset_names[datasource_names[0]] = sorted(datasource.get_asset_names())
else:
raise ValueError( # noqa: TRY003 # FIXME CoP
"If providing batch kwargs generator, you must either specify one for each datasource or only " # noqa: E501 # FIXME CoP
"one datasource."
)
else: # generator_names is None
for datasource_name in datasource_names:
try:
datasource = self.data_sources.get(datasource_name)
fluent_data_asset_names[datasource_name] = sorted(datasource.get_asset_names())
except KeyError:
# handle the edge case of a non-existent datasource
fluent_data_asset_names[datasource_name] = {}
return fluent_data_asset_names
def build_batch_kwargs(
self,
datasource,
batch_kwargs_generator,
data_asset_name=None,
partition_id=None,
**kwargs,
):
"""Builds batch kwargs using the provided datasource, batch kwargs generator, and batch_parameters.
Args:
datasource (str): the name of the datasource for which to build batch_kwargs
batch_kwargs_generator (str): the name of the batch kwargs generator to use to build batch_kwargs
data_asset_name (str): an optional name batch_parameter
**kwargs: additional batch_parameters
Returns:
BatchKwargs
""" # noqa: E501 # FIXME CoP
datasource_obj = self.data_sources.get(datasource)
batch_kwargs = datasource_obj.build_batch_kwargs(
batch_kwargs_generator=batch_kwargs_generator,
data_asset_name=data_asset_name,
partition_id=partition_id,
**kwargs,
)
return batch_kwargs
def open_data_docs(
self,
resource_identifier: Optional[str] = None,
site_name: Optional[str] = None,
only_if_exists: bool = True,
) -> None:
"""
A stdlib cross-platform way to open a file in a browser.
Args:
resource_identifier: ExpectationSuiteIdentifier,
ValidationResultIdentifier or any other type's identifier. The
argument is optional - when not supplied, the method returns the
URL of the index page.
site_name: Optionally specify which site to open. If not specified,
open all docs found in the project.
only_if_exists: Optionally specify flag to pass to "self.get_docs_sites_urls()".
"""
return self._open_data_docs(
resource_identifier=resource_identifier,
site_name=site_name,
only_if_exists=only_if_exists,
)
def _open_data_docs(
self,
resource_identifier: Optional[str] = None,
site_name: Optional[str] = None,
only_if_exists: bool = True,
) -> None:
data_docs_urls = self.get_docs_sites_urls(
resource_identifier=resource_identifier,
site_name=site_name,
only_if_exists=only_if_exists,
)
nullable_urls = [site["site_url"] for site in data_docs_urls]
urls_to_open = [url for url in nullable_urls if url is not None]
if not urls_to_open:
raise gx.exceptions.NoDataDocsError
for url in urls_to_open:
logger.debug(f"Opening Data Docs found here: {url}")
self._open_url_in_browser(url)
@staticmethod
def _open_url_in_browser(url: str) -> None:
webbrowser.open(url)
def get_docs_sites_urls(
self,
resource_identifier: ExpectationSuiteIdentifier
| ValidationResultIdentifier
| str
| None = None,
site_name: Optional[str] = None,
only_if_exists: bool = True,
site_names: Optional[List[str]] = None,
) -> List[Dict[str, Optional[str]]]:
"""
Get URLs for a resource for all data docs sites.
This function will return URLs for any configured site even if the sites
have not been built yet.
Args:
resource_identifier (object): optional. It can be an identifier of
ExpectationSuite's, ValidationResults and other resources that
have typed identifiers. If not provided, the method will return
the URLs of the index page.
site_name: Optionally specify which site to open. If not specified,
return all urls in the project.
site_names: Optionally specify which sites are active. Sites not in
this list are not processed, even if specified in site_name.
Returns:
list: a list of URLs. Each item is the URL for the resource for a
data docs site
"""
unfiltered_sites = self.variables.data_docs_sites
# Filter out sites that are not in site_names
sites = (
{k: v for k, v in unfiltered_sites.items() if k in site_names} # type: ignore[union-attr] # FIXME CoP
if site_names
else unfiltered_sites
)
if not sites:
logger.debug("Found no data_docs_sites.")
return []
logger.debug(f"Found {len(sites)} data_docs_sites.")
if site_name:
if site_name not in sites:
raise gx_exceptions.DataContextError( # noqa: TRY003 # FIXME CoP
f"Could not find site named {site_name}. Please check your configurations"
)
site = sites[site_name]
site_builder = self._load_site_builder_from_site_config(site)
url = site_builder.get_resource_url(
resource_identifier=resource_identifier, only_if_exists=only_if_exists
)
return [{"site_name": site_name, "site_url": url}]
site_urls: List[Dict[str, Optional[str]]] = []
for _site_name, site_config in sites.items():
site_builder = self._load_site_builder_from_site_config(site_config)
url = site_builder.get_resource_url(
resource_identifier=resource_identifier, only_if_exists=only_if_exists
)
site_urls.append({"site_name": _site_name, "site_url": url})
return site_urls
def _load_site_builder_from_site_config(self, site_config) -> SiteBuilder:
default_module_name = "great_expectations.render.renderer.site_builder"
site_builder = instantiate_class_from_config(
config=site_config,
runtime_environment={
"data_context": self,
"root_directory": self.root_directory,
},
config_defaults={"module_name": default_module_name},
)
if not site_builder:
raise gx_exceptions.ClassInstantiationError(
module_name=default_module_name,
package_name=None,
class_name=site_config["class_name"],
)
return site_builder
def clean_data_docs(self, site_name=None) -> bool:
"""
Clean a given data docs site.
This removes all files from the configured Store.
Args:
site_name (str): Optional, the name of the site to clean. If not
specified, all sites will be cleaned.
"""
data_docs_sites = self.variables.data_docs_sites
if not data_docs_sites:
raise gx_exceptions.DataContextError( # noqa: TRY003 # FIXME CoP
"No data docs sites were found on this DataContext, therefore no sites will be cleaned.", # noqa: E501 # FIXME CoP
)
data_docs_site_names = list(data_docs_sites.keys())
if site_name:
if site_name not in data_docs_site_names:
raise gx_exceptions.DataContextError( # noqa: TRY003 # FIXME CoP
f"The specified site name `{site_name}` does not exist in this project."
)
return self._clean_data_docs_site(site_name)
cleaned = []
for existing_site_name in data_docs_site_names:
cleaned.append(self._clean_data_docs_site(existing_site_name))
return all(cleaned)
def _clean_data_docs_site(self, site_name: str) -> bool:
sites = self.variables.data_docs_sites
if not sites:
return False
site_config = sites.get(site_name)
site_builder = instantiate_class_from_config(
config=site_config,
runtime_environment={
"data_context": self,
"root_directory": self.root_directory,
},
config_defaults={"module_name": "great_expectations.render.renderer.site_builder"},
)
site_builder.clean_site()
return True
@staticmethod
def _get_global_config_value(
environment_variable: str,
conf_file_section: Optional[str] = None,
conf_file_option: Optional[str] = None,
) -> Optional[str]:
"""
Method to retrieve config value.
Looks for config value in environment_variable and config file section
Args:
environment_variable (str): name of environment_variable to retrieve
conf_file_section (str): section of config
conf_file_option (str): key in section
Returns:
Optional string representing config value
"""
assert (conf_file_section and conf_file_option) or (
not conf_file_section and not conf_file_option
), "Must pass both 'conf_file_section' and 'conf_file_option' or neither."
if environment_variable and os.environ.get( # noqa: TID251 # FIXME CoP
environment_variable, ""
):
return os.environ.get(environment_variable) # noqa: TID251 # FIXME CoP
if conf_file_section and conf_file_option:
for config_path in AbstractDataContext.GLOBAL_CONFIG_PATHS:
config: configparser.ConfigParser = configparser.ConfigParser()
config.read(config_path)
config_value: Optional[str] = config.get(
conf_file_section, conf_file_option, fallback=None
)
if config_value:
return config_value
return None
@staticmethod
def _get_metric_configuration_tuples( # noqa: C901 # FIXME CoP
metric_configuration: Union[str, dict], base_kwargs: Optional[dict] = None
) -> List[Tuple[str, Union[dict, Any]]]:
if base_kwargs is None:
base_kwargs = {}
if isinstance(metric_configuration, str):
return [(metric_configuration, base_kwargs)]
metric_configurations_list = []
for kwarg_name in metric_configuration:
if not isinstance(metric_configuration[kwarg_name], dict):
raise gx_exceptions.DataContextError( # noqa: TRY003 # FIXME CoP
"Invalid metric_configuration: each key must contain a dictionary."
)
if (
kwarg_name == "metric_kwargs_id"
): # this special case allows a hash of multiple kwargs
for metric_kwargs_id in metric_configuration[kwarg_name]:
if base_kwargs != {}:
raise gx_exceptions.DataContextError( # noqa: TRY003 # FIXME CoP
"Invalid metric_configuration: when specifying "
"metric_kwargs_id, no other keys or values may be defined."
)
if not isinstance(metric_configuration[kwarg_name][metric_kwargs_id], list):
raise gx_exceptions.DataContextError( # noqa: TRY003 # FIXME CoP
"Invalid metric_configuration: each value must contain a list."
)
metric_configurations_list += [
(metric_name, {"metric_kwargs_id": metric_kwargs_id})
for metric_name in metric_configuration[kwarg_name][metric_kwargs_id]
]
else:
for kwarg_value in metric_configuration[kwarg_name]:
base_kwargs.update({kwarg_name: kwarg_value})
if not isinstance(metric_configuration[kwarg_name][kwarg_value], list):
raise gx_exceptions.DataContextError( # noqa: TRY003 # FIXME CoP
"Invalid metric_configuration: each value must contain a list."
)
for nested_configuration in metric_configuration[kwarg_name][kwarg_value]:
metric_configurations_list += (
AbstractDataContext._get_metric_configuration_tuples(
nested_configuration, base_kwargs=base_kwargs
)
)
return metric_configurations_list
@classmethod
def get_or_create_data_context_config(
cls, project_config: DataContextConfig | Mapping
) -> DataContextConfig:
"""Utility method to take in an input config and ensure its conversion to a rich
DataContextConfig. If the input is already of the appropriate type, the function
exits early.
Args:
project_config: The input config to be evaluated.
Returns:
An instance of DataContextConfig.
Raises:
ValidationError if the input config does not adhere to the required shape of a DataContextConfig.
""" # noqa: E501 # FIXME CoP
if isinstance(project_config, DataContextConfig):
return project_config
# Roundtrip through schema validation to remove any illegal fields add/or restore any missing fields. # noqa: E501 # FIXME CoP
project_config_dict = dataContextConfigSchema.dump(project_config)
project_config_dict = dataContextConfigSchema.load(project_config_dict)
context_config: DataContextConfig = DataContextConfig(**project_config_dict)
return context_config
@overload
def _normalize_absolute_or_relative_path(self, path: str) -> str: ...
@overload
def _normalize_absolute_or_relative_path(self, path: None) -> None: ...
def _normalize_absolute_or_relative_path(self, path: Optional[str]) -> Optional[str]:
"""
Why does this exist in AbstractDataContext? CloudDataContext and FileDataContext both use it
"""
if path is None:
return None
if os.path.isabs(path): # noqa: PTH117 # FIXME CoP
return path
else:
return os.path.join(self.root_directory, path) # type: ignore[arg-type] # noqa: PTH118 # FIXME CoP
def _load_config_variables(self) -> Dict:
config_var_provider = self.config_provider.get_provider(
_ConfigurationVariablesConfigurationProvider
)
if config_var_provider:
return config_var_provider.get_values()
return {}
def _build_store_from_config(self, name: str, config: dict | StoreConfigTypedDict) -> Store:
module_name = "great_expectations.data_context.store"
# Set expectations_store.store_backend_id to the data_context_id from the project_config if
# the expectations_store does not yet exist by:
# adding the data_context_id from the project_config
# to the store_config under the key manually_initialize_store_backend_id
if (name == self.expectations_store_name) and config.get("store_backend"):
config["store_backend"].update(
{"manually_initialize_store_backend_id": self.variables.data_context_id}
)
# Set suppress_store_backend_id = True if store is inactive and has a store_backend.
if (
name not in [store["name"] for store in self.list_active_stores()] # type: ignore[index] # FIXME CoP
and config.get("store_backend") is not None
):
config["store_backend"].update({"suppress_store_backend_id": True})
new_store = Store.build_store_from_config(
name=name,
config=config,
module_name=module_name,
runtime_environment={
"root_directory": self.root_directory,
},
)
self._stores[name] = new_store
return new_store
# properties
@property
def variables(self) -> DataContextVariables:
if self._variables is None:
self._variables = self._init_variables()
return self._variables
@property
def progress_bars(self) -> Optional[ProgressBarsConfig]:
return self.variables.progress_bars
# TODO: All datasources should now be fluent so we should be able to delete this
@property
def fluent_datasources(self) -> Dict[str, FluentDatasource]:
return {
name: ds
for (name, ds) in self.data_sources.all().items()
if isinstance(ds, FluentDatasource)
}
@property
def data_context_id(self) -> uuid.UUID | None:
return self.variables.data_context_id
def _init_primary_stores(self, store_configs: Dict[str, StoreConfigTypedDict]) -> None:
"""Initialize all Stores for this DataContext.
Stores are a good fit for reading/writing objects that:
1. follow a clear key-value pattern, and
2. are usually edited programmatically, using the Context
Note that stores do NOT manage plugins.
"""
for store_name, store_config in store_configs.items():
self._build_store_from_config(store_name, store_config)
@abstractmethod
def _init_datasource_store(self) -> DatasourceStore:
"""Internal utility responsible for creating a DatasourceStore to persist and manage a user's Datasources.
Please note that the DatasourceStore lacks the same extensibility that other analagous Stores do; a default
implementation is provided based on the user's environment but is not customizable.
""" # noqa: E501 # FIXME CoP
raise NotImplementedError
def _update_config_variables(self) -> None:
"""Updates config_variables cache by re-calling _load_config_variables().
Necessary after running methods that modify config AND could contain config_variables for credentials
(example is add_datasource())
""" # noqa: E501 # FIXME CoP
self._config_variables = self._load_config_variables()
@classmethod
def _scaffold_root_conf(cls) -> bool:
"""
Set up an empty root conf file ($HOME/.great_expectations/great_expectations.conf)
Returns:
Whether or not directory/file creation was successful.
"""
try:
cls._ROOT_CONF_DIR.mkdir(exist_ok=True)
cls._ROOT_CONF_FILE.touch()
except OSError as e:
logger.info(
f"Something went wrong when trying to write the user's conf file to disk: {e}"
)
return False
return True
def _init_datasources(self) -> None:
"""Initialize the datasources in store"""
self._datasources: DatasourceDict = CacheableDatasourceDict(
context=self,
datasource_store=self._datasource_store,
)
config: DataContextConfig = self.config
if self._datasource_store.cloud_mode:
for fds in config.fluent_datasources.values():
datasource = self._add_fluent_datasource(**fds)
datasource._rebuild_asset_data_connectors()
def _construct_data_context_id(self) -> uuid.UUID | None:
# Choose the id of the currently-configured expectations store, if it is a persistent store
expectations_store = self.stores[self.expectations_store_name]
if isinstance(expectations_store.store_backend, TupleStoreBackend):
# suppress_warnings since a warning will already have been issued during the store creation # noqa: E501 # FIXME CoP
# if there was an invalid store config
return expectations_store.store_backend_id_warnings_suppressed
# Otherwise choose the id stored in the project_config
else:
return self.variables.data_context_id
def get_validation_result( # noqa: C901 # FIXME CoP
self,
expectation_suite_name,
run_id=None,
batch_identifier=None,
validation_results_store_name=None,
failed_only=False,
) -> ExpectationValidationResult | dict:
"""Get validation results from a configured store.
Args:
expectation_suite_name: expectation_suite name for which to get validation result (default: "default")
run_id: run_id for which to get validation result (if None, fetch the latest result by alphanumeric sort)
validation_results_store_name: the name of the store from which to get validation results
failed_only: if True, filter the result to return only failed expectations
Returns:
validation_result
""" # noqa: E501 # FIXME CoP
if validation_results_store_name is None:
validation_results_store_name = self.validation_results_store_name
selected_store = self.stores[validation_results_store_name]
if run_id is None or batch_identifier is None:
# Get most recent run id
# NOTE : This method requires a (potentially very inefficient) list_keys call.
# It should probably move to live in an appropriate Store class,
# but when we do so, that Store will need to function as more than just a key-value Store. # noqa: E501 # FIXME CoP
key_list = selected_store.list_keys()
filtered_key_list = []
for key in key_list:
if run_id is not None and key.run_id != run_id:
continue
if batch_identifier is not None and key.batch_identifier != batch_identifier:
continue
filtered_key_list.append(key)
# run_id_set = set([key.run_id for key in filtered_key_list])
if len(filtered_key_list) == 0:
logger.warning("No valid run_id values found.")
return {}
filtered_key_list = sorted(filtered_key_list, key=lambda x: x.run_id)
if run_id is None:
run_id = filtered_key_list[-1].run_id
if batch_identifier is None:
batch_identifier = filtered_key_list[-1].batch_identifier
key = ValidationResultIdentifier(
expectation_suite_identifier=ExpectationSuiteIdentifier(name=expectation_suite_name),
run_id=run_id,
batch_identifier=batch_identifier,
)
results_dict = selected_store.get(key)
validation_result = (
results_dict.get_failed_validation_results() if failed_only else results_dict
)
if self._include_rendered_content:
for expectation_validation_result in validation_result.results:
expectation_validation_result.render()
return validation_result
def store_validation_result_metrics(
self, requested_metrics, validation_results, target_store_name
) -> None:
self._store_metrics(
requested_metrics=requested_metrics,
validation_results=validation_results,
target_store_name=target_store_name,
)
def _store_metrics(self, requested_metrics, validation_results, target_store_name) -> None:
"""
requested_metrics is a dictionary like this:
requested_metrics:
*: The asterisk here matches *any* expectation suite name
use the 'kwargs' key to request metrics that are defined by kwargs,
for example because they are defined only for a particular column
- column:
Age:
- expect_column_min_to_be_between.result.observed_value
- statistics.evaluated_expectations
- statistics.successful_expectations
"""
expectation_suite_name = validation_results.meta["expectation_suite_name"]
run_id = validation_results.meta["run_id"]
data_asset_name = validation_results.meta.get("active_batch_definition", {}).get(
"data_asset_name"
)
for expectation_suite_dependency, metrics_list in requested_metrics.items():
if (expectation_suite_dependency != "*") and ( # noqa: PLR1714 # FIXME CoP
expectation_suite_dependency != expectation_suite_name
):
continue
if not isinstance(metrics_list, list):
raise gx_exceptions.DataContextError( # noqa: TRY003 # FIXME CoP
"Invalid requested_metrics configuration: metrics requested for "
"each expectation suite must be a list."
)
for metric_configuration in metrics_list:
metric_configurations = AbstractDataContext._get_metric_configuration_tuples(
metric_configuration
)
for metric_name, metric_kwargs in metric_configurations:
try:
metric_value = validation_results.get_metric(metric_name, **metric_kwargs)
self.stores[target_store_name].set(
ValidationMetricIdentifier(
run_id=run_id,
data_asset_name=data_asset_name,
expectation_suite_identifier=ExpectationSuiteIdentifier(
expectation_suite_name
),
metric_name=metric_name,
metric_kwargs_id=get_metric_kwargs_id(metric_kwargs=metric_kwargs),
),
metric_value,
)
except gx_exceptions.UnavailableMetricError:
# This will happen frequently in larger pipelines
logger.debug(
f"metric {metric_name} was requested by another expectation suite but is not available in " # noqa: E501 # FIXME CoP
"this validation result."
)
@public_api
def build_data_docs(
self,
site_names: list[str] | None = None,
resource_identifiers: list[ExpectationSuiteIdentifier]
| list[ValidationResultIdentifier]
| None = None,
dry_run: bool = False,
build_index: bool = True,
) -> dict[str, str]:
"""Build Data Docs for your project.
--Documentation--
- https://docs.greatexpectations.io/docs/terms/data_docs/
Args:
site_names: if specified, build data docs only for these sites, otherwise,
build all the sites specified in the context's config
resource_identifiers: a list of resource identifiers (ExpectationSuiteIdentifier,
ValidationResultIdentifier). If specified, rebuild HTML
(or other views the data docs sites are rendering) only for
the resources in this list. This supports incremental build
of data docs sites (e.g., when a new validation result is created)
and avoids full rebuild.
dry_run: a flag, if True, the method returns a structure containing the
URLs of the sites that *would* be built, but it does not build
these sites.
build_index: a flag if False, skips building the index page
Returns:
A dictionary with the names of the updated data documentation sites as keys and the location info
of their index.html files as values
Raises:
ClassInstantiationError: Site config in your Data Context config is not valid.
""" # noqa: E501 # FIXME CoP
return self._build_data_docs(
site_names=site_names,
resource_identifiers=resource_identifiers,
dry_run=dry_run,
build_index=build_index,
)
def _build_data_docs(
self,
site_names: list[str] | None = None,
resource_identifiers: list | None = None,
dry_run: bool = False,
build_index: bool = True,
) -> dict:
logger.debug("Starting DataContext.build_data_docs")
index_page_locator_infos = {}
sites = self.variables.data_docs_sites
if sites:
logger.debug("Found data_docs_sites. Building sites...")
for site_name, site_config in sites.items():
logger.debug(
f"Building Data Docs Site {site_name}",
)
if (site_names and (site_name in site_names)) or not site_names:
complete_site_config = site_config
module_name = "great_expectations.render.renderer.site_builder"
site_builder: SiteBuilder = self._init_site_builder_for_data_docs_site_creation(
site_name=site_name,
site_config=site_config,
)
if not site_builder:
raise gx_exceptions.ClassInstantiationError(
module_name=module_name,
package_name=None,
class_name=complete_site_config["class_name"],
)
if dry_run:
index_page_locator_infos[site_name] = site_builder.get_resource_url(
only_if_exists=False
)
else:
index_page_resource_identifier_tuple = site_builder.build(
resource_identifiers,
build_index=build_index,
)
if index_page_resource_identifier_tuple:
index_page_locator_infos[site_name] = (
index_page_resource_identifier_tuple[0]
)
else:
logger.debug("No data_docs_config found. No site(s) built.")
return index_page_locator_infos
def _init_site_builder_for_data_docs_site_creation(
self,
site_name: str,
site_config: dict,
) -> SiteBuilder:
site_builder: SiteBuilder = instantiate_class_from_config(
config=site_config,
runtime_environment={
"data_context": self,
"root_directory": self.root_directory,
"site_name": site_name,
},
config_defaults={
"class_name": "SiteBuilder",
"module_name": "great_expectations.render.renderer.site_builder",
},
)
return site_builder
@public_api
@new_method_or_class(version="0.16.15")
def view_validation_result(self, result: CheckpointResult) -> None:
"""
Opens a validation result in a browser.
Args:
result: The result of a Checkpoint run.
"""
self._view_validation_result(result)
def _view_validation_result(self, result: CheckpointResult) -> None:
validation_result_identifier = tuple(result.run_results.keys())[0]
self.open_data_docs(resource_identifier=validation_result_identifier) # type: ignore[arg-type] # FIXME CoP
def escape_all_config_variables(
self,
value: T,
dollar_sign_escape_string: str = DOLLAR_SIGN_ESCAPE_STRING,
skip_if_substitution_variable: bool = True,
) -> T:
"""
Replace all `$` characters with the DOLLAR_SIGN_ESCAPE_STRING
Args:
value: config variable value
dollar_sign_escape_string: replaces instances of `$`
skip_if_substitution_variable: skip if the value is of the form ${MYVAR} or $MYVAR
Returns:
input value with all `$` characters replaced with the escape string
"""
if isinstance(value, (dict, OrderedDict)):
return {
k: self.escape_all_config_variables(
value=v,
dollar_sign_escape_string=dollar_sign_escape_string,
skip_if_substitution_variable=skip_if_substitution_variable,
)
for k, v in value.items()
}
elif isinstance(value, list):
return [
self.escape_all_config_variables(
value=v,
dollar_sign_escape_string=dollar_sign_escape_string,
skip_if_substitution_variable=skip_if_substitution_variable,
)
for v in value
]
if skip_if_substitution_variable:
if parse_substitution_variable(value) is None:
return value.replace("$", dollar_sign_escape_string)
return value
return value.replace("$", dollar_sign_escape_string)
def save_config_variable(
self,
name: str,
value: Any,
skip_if_substitution_variable: bool = True,
) -> None:
r"""Save config variable value
Escapes $ unless they are used in substitution variables e.g. the $ characters in ${SOME_VAR} or $SOME_VAR are not escaped
Args:
name: name of the property
value: the value to save for the property
skip_if_substitution_variable: set to False to escape $ in values in substitution variable form e.g. ${SOME_VAR} -> r"\${SOME_VAR}" or $SOME_VAR -> r"\$SOME_VAR"
Returns:
None
""" # noqa: E501 # FIXME CoP
config_variables = self.config_variables
value = self.escape_all_config_variables(
value,
self.DOLLAR_SIGN_ESCAPE_STRING,
skip_if_substitution_variable=skip_if_substitution_variable,
)
config_variables[name] = value
# Required to call _variables instead of variables property because we don't want to trigger substitutions # noqa: E501 # FIXME CoP
config = self._variables.config
config_variables_filepath = config.config_variables_file_path
if not config_variables_filepath:
raise gx_exceptions.InvalidConfigError( # noqa: TRY003 # FIXME CoP
"'config_variables_file_path' property is not found in config - setting it is required to use this feature" # noqa: E501 # FIXME CoP
)
config_variables_filepath = os.path.join( # noqa: PTH118 # FIXME CoP
self.root_directory, # type: ignore[arg-type] # FIXME CoP
config_variables_filepath,
)
os.makedirs( # noqa: PTH103 # FIXME CoP
os.path.dirname(config_variables_filepath), # noqa: PTH120 # FIXME CoP
exist_ok=True,
)
if not os.path.isfile(config_variables_filepath): # noqa: PTH113 # FIXME CoP
logger.info(f"Creating new substitution_variables file at {config_variables_filepath}")
with open(config_variables_filepath, "w") as template:
template.write(CONFIG_VARIABLES_TEMPLATE)
with open(config_variables_filepath, "w") as config_variables_file:
yaml.dump(config_variables, config_variables_file)
def _load_fluent_config(self, config_provider: _ConfigurationProvider) -> GxConfig:
"""Called at beginning of DataContext __init__ after config_providers init."""
logger.debug(
f"{self.__class__.__name__} has not implemented `_load_fluent_config()` returning empty `GxConfig`" # noqa: E501 # FIXME CoP
)
return GxConfig(fluent_datasources=[])
def _attach_fluent_config_datasources_and_build_data_connectors(self, config: GxConfig):
"""Called at end of __init__"""
for datasource in config.datasources:
ds_name = datasource.name
logger.info(f"Loaded '{ds_name}' from fluent config")
datasource._rebuild_asset_data_connectors()
# since we are loading the datasource from existing config, we do not need to save it
self._add_fluent_datasource(datasource=datasource, save_changes=False)
def _synchronize_fluent_datasources(self) -> Dict[str, FluentDatasource]:
"""
Update `self.fluent_config.fluent_datasources` with any newly added datasources.
Should be called before serializing `fluent_config`.
"""
fluent_datasources = self.fluent_datasources
if fluent_datasources:
self.fluent_config.update_datasources(datasources=fluent_datasources)
return self.fluent_config.get_datasources_as_dict()
def prepare_checkpoint_run(
self,
checkpoint: gx.Checkpoint,
batch_parameters: Dict[str, Any],
expectation_parameters: SuiteParameterDict,
) -> None:
"""Context specific preparation for a checkpoint run.
Defaults to a no-op but can be overriden for context specific checkpoint run preparation.
The preparation can update the input arguments in place.
"""
...
| AbstractDataContext |
python | readthedocs__readthedocs.org | readthedocs/api/v3/serializers.py | {
"start": 15228,
"end": 18157
} | class ____(BaseLinksSerializer):
_self = serializers.SerializerMethodField()
versions = serializers.SerializerMethodField()
builds = serializers.SerializerMethodField()
environmentvariables = serializers.SerializerMethodField()
redirects = serializers.SerializerMethodField()
subprojects = serializers.SerializerMethodField()
superproject = serializers.SerializerMethodField()
translations = serializers.SerializerMethodField()
notifications = serializers.SerializerMethodField()
sync_versions = serializers.SerializerMethodField()
def get__self(self, obj):
path = reverse("projects-detail", kwargs={"project_slug": obj.slug})
return self._absolute_url(path)
def get_versions(self, obj):
path = reverse(
"projects-versions-list",
kwargs={
"parent_lookup_project__slug": obj.slug,
},
)
return self._absolute_url(path)
def get_environmentvariables(self, obj):
path = reverse(
"projects-environmentvariables-list",
kwargs={
"parent_lookup_project__slug": obj.slug,
},
)
return self._absolute_url(path)
def get_redirects(self, obj):
path = reverse(
"projects-redirects-list",
kwargs={
"parent_lookup_project__slug": obj.slug,
},
)
return self._absolute_url(path)
def get_builds(self, obj):
path = reverse(
"projects-builds-list",
kwargs={
"parent_lookup_project__slug": obj.slug,
},
)
return self._absolute_url(path)
def get_subprojects(self, obj):
path = reverse(
"projects-subprojects-list",
kwargs={
"parent_lookup_parent__slug": obj.slug,
},
)
return self._absolute_url(path)
def get_superproject(self, obj):
path = reverse(
"projects-superproject",
kwargs={
"project_slug": obj.slug,
},
)
return self._absolute_url(path)
def get_sync_versions(self, obj):
path = reverse(
"projects-sync-versions",
kwargs={
"project_slug": obj.slug,
},
)
return self._absolute_url(path)
def get_translations(self, obj):
path = reverse(
"projects-translations-list",
kwargs={
"parent_lookup_main_language_project__slug": obj.slug,
},
)
return self._absolute_url(path)
def get_notifications(self, obj):
path = reverse(
"projects-notifications-list",
kwargs={
"parent_lookup_project__slug": obj.slug,
},
)
return self._absolute_url(path)
| ProjectLinksSerializer |
python | django__django | tests/fixtures_regress/models.py | {
"start": 1605,
"end": 1681
} | class ____(CommonFeature):
pass
# Models to regression test #11428
| Feature |
python | tensorflow__tensorflow | tensorflow/python/compiler/xla/experimental/xla_sharding_test.py | {
"start": 2749,
"end": 8194
} | class ____(test_util.TensorFlowTestCase):
"""Tests for non-member functions in the module xla_sharding.py."""
def setUp(self):
super().setUp()
context.enable_xla_sharding_for_resource_variables()
def _graph_has_xla_sharding_op(self, graph):
for node in graph.node:
if node.op == 'XlaSharding' and any(
'ReadVariableOp' in input for input in node.input
):
return True
return False
def test_replicate_annotates_tensor_correctly(self):
@def_function.function
def replicate_helper(tensor):
self.assertIsNone(xla_sharding.get_tensor_sharding(tensor))
replicated_tensor = xla_sharding.replicate(tensor)
replicated_sharding = xla_sharding.get_tensor_sharding(replicated_tensor)
self.assertIsNotNone(replicated_sharding)
self.assertIsNone(
xla_sharding.get_sharding_tile_shape(replicated_sharding))
return replicated_tensor
in_tensor = array_ops.ones([4, 5, 6], dtype=dtypes.float32)
result = replicate_helper(array_ops.ones([4, 5, 6], dtype=dtypes.float32))
self.assertAllEqual(in_tensor, result)
var = variables.Variable(initial_value=in_tensor, name='var')
graph = replicate_helper.get_concrete_function(var).graph.as_graph_def()
self.assertTrue(self._graph_has_xla_sharding_op(graph))
def test_tile_annotates_tensor_correctly(self):
@def_function.function
def tile_helper(tensor):
self.assertIsNone(xla_sharding.get_tensor_sharding(tensor))
tiled_tensor = xla_sharding.tile(tensor, np.array([2, 1, 6]))
self.assertIsInstance(tiled_tensor, type(tensor))
tiled_sharding = xla_sharding.get_tensor_sharding(tiled_tensor)
tile_shape = xla_sharding.get_sharding_tile_shape(tiled_sharding)
# This is the shape of the tile assignment [2, 1, 6]
expected_shape = [3]
self.assertEqual(expected_shape, tile_shape)
return tiled_tensor
in_tensor = array_ops.ones([4, 5, 6], dtype=dtypes.float32)
result = tile_helper(array_ops.ones([4, 5, 6], dtype=dtypes.float32))
self.assertAllEqual(in_tensor, result)
var = variables.Variable(initial_value=in_tensor, name='var')
graph = tile_helper.get_concrete_function(var).graph.as_graph_def()
self.assertTrue(self._graph_has_xla_sharding_op(graph))
def test_split_annotates_tensor_correctly(self):
@def_function.function
def split_helper(tensor):
self.assertIsNone(xla_sharding.get_tensor_sharding(tensor))
split_tensor = xla_sharding.split(tensor, 2, 3)
self.assertIsInstance(split_tensor, type(tensor))
split_sharding = xla_sharding.get_tensor_sharding(split_tensor)
split_shape = xla_sharding.get_sharding_tile_shape(split_sharding)
expected_shape = [1, 1, 3]
self.assertEqual(expected_shape, split_shape)
return split_tensor
in_tensor = array_ops.ones([4, 5, 6], dtype=dtypes.float32)
result = split_helper(array_ops.ones([4, 5, 6], dtype=dtypes.float32))
self.assertAllEqual(in_tensor, result)
var = variables.Variable(initial_value=in_tensor, name='var')
graph = split_helper.get_concrete_function(var).graph.as_graph_def()
self.assertTrue(self._graph_has_xla_sharding_op(graph))
def test_split_raises_error_with_incommensurate_dimensions(self):
@def_function.function
def split_helper(tensor):
split_tensor = xla_sharding.split(tensor, 0, 8)
return split_tensor
with self.assertRaises(ValueError):
_ = split_helper(array_ops.ones([4, 5, 6], dtype=dtypes.float32))
# TODO(drm): Modify split() so that this call raises an error since
# 8 does not divide 9 (currently only checks that 8 is smaller than 9,
# which it is, but this is not good for splitting).
# with self.assertRaises(ValueError):
# _ = split_helper(array_ops.ones([9, 5, 6], dtype=dtypes.float32))
def test_copy_sharding_succeeds_with_identically_shaped_tensors(self):
@def_function.function
def copy_helper(tensor):
tensor_src = array_ops.identity(tensor)
tensor_src = xla_sharding.split(tensor, 2, 3)
sharding_src = xla_sharding.get_tensor_sharding(tensor_src)
shape_src = xla_sharding.get_sharding_tile_shape(sharding_src)
self.assertEqual([1, 1, 3], shape_src)
tensor_dest = array_ops.identity(tensor)
self.assertIsNone(xla_sharding.get_tensor_sharding(tensor_dest))
xla_sharding.copy_sharding(tensor_src, tensor_dest)
sharding_dest = xla_sharding.get_tensor_sharding(tensor_dest)
shape_dest = xla_sharding.get_sharding_tile_shape(sharding_dest)
self.assertEqual([1, 1, 3], shape_dest)
return tensor_dest
in_tensor = array_ops.ones([4, 5, 6], dtype=dtypes.float32)
result = copy_helper(array_ops.ones([4, 5, 6], dtype=dtypes.float32))
self.assertAllEqual(in_tensor, result)
var = variables.Variable(initial_value=in_tensor, name='var')
graph = copy_helper.get_concrete_function(var).graph.as_graph_def()
self.assertTrue(self._graph_has_xla_sharding_op(graph))
def test_get_sharding_tile_shape_returns_none_on_none_input(self):
self.assertIsNone(xla_sharding.get_sharding_tile_shape(None))
def test_get_sharding_tile_shape_raises_error_on_nonparsable_input(self):
bad_proto_data = b'\x0f'
with self.assertRaises(DecodeError):
xla_sharding.get_sharding_tile_shape(bad_proto_data)
if __name__ == '__main__':
absltest.main()
| XlaShardingTest |
python | getsentry__sentry | tests/sentry/tasks/test_post_process.py | {
"start": 154041,
"end": 163407
} | class ____(BasePostProgressGroupMixin, SnubaTestCase):
DEFAULT_FORWARDER_CONFIGS = {
DataForwarderProviderSlug.SQS: {
"queue_url": "https://sqs.us-east-1.amazonaws.com/123456789/test-queue",
"region": "us-east-1",
"access_key": "test-key",
"secret_key": "test-secret",
},
DataForwarderProviderSlug.SPLUNK: {
"instance_url": "https://splunk.example.com",
"token": "test-token",
"index": "main",
},
DataForwarderProviderSlug.SEGMENT: {
"write_key": "test-write-key",
},
}
def create_event(self, data, project_id, assert_no_errors=True):
return self.store_event(data=data, project_id=project_id, assert_no_errors=assert_no_errors)
def call_post_process_group(
self, is_new, is_regression, is_new_group_environment, event, cache_key=None
):
if cache_key is None:
cache_key = write_event_to_cache(event)
post_process_group(
is_new=is_new,
is_regression=is_regression,
is_new_group_environment=is_new_group_environment,
cache_key=cache_key,
group_id=event.group_id,
project_id=event.project_id,
)
return cache_key
def setup_forwarder(self, provider, is_enabled=True, **config_overrides):
config = self.DEFAULT_FORWARDER_CONFIGS[provider].copy()
config.update(config_overrides)
data_forwarder = self.create_data_forwarder(
organization=self.project.organization,
provider=provider.value, # Convert enum to string value
config=config,
is_enabled=is_enabled,
)
data_forwarder_project = self.create_data_forwarder_project(
data_forwarder=data_forwarder,
project=self.project,
is_enabled=True,
)
return data_forwarder, data_forwarder_project
@with_feature("organizations:data-forwarding-revamp-access")
def test_process_data_forwarding_no_forwarders(self):
event = self.create_event(
data={"message": "test message", "level": "error"},
project_id=self.project.id,
)
self.call_post_process_group(
is_new=True,
is_regression=False,
is_new_group_environment=False,
event=event,
)
@with_feature("organizations:data-forwarding-revamp-access")
@patch("data_forwarding.amazon_sqs.forwarder.AmazonSQSForwarder.forward_event")
def test_process_data_forwarding_sqs_enabled(self, mock_forward):
mock_forward.return_value = True
_, data_forwarder_project = self.setup_forwarder(DataForwarderProviderSlug.SQS)
event = self.create_event(
data={"message": "test message", "level": "error"},
project_id=self.project.id,
)
self.call_post_process_group(
is_new=True,
is_regression=False,
is_new_group_environment=False,
event=event,
)
assert mock_forward.call_count == 1
call_args = mock_forward.call_args
assert call_args[0][1] == data_forwarder_project
@with_feature("organizations:data-forwarding-revamp-access")
@patch("data_forwarding.amazon_sqs.forwarder.AmazonSQSForwarder.forward_event")
def test_process_data_forwarding_sqs_with_s3_bucket(self, mock_forward):
"""Test SQS forwarder with S3 bucket configured for large payloads."""
mock_forward.return_value = True
_, data_forwarder_project = self.setup_forwarder(
DataForwarderProviderSlug.SQS, s3_bucket="my-sentry-events-bucket"
)
event = self.create_event(
data={"message": "test message", "level": "error"},
project_id=self.project.id,
)
self.call_post_process_group(
is_new=True,
is_regression=False,
is_new_group_environment=False,
event=event,
)
# Verify the forwarder was called
assert mock_forward.call_count == 1
call_args = mock_forward.call_args
assert call_args[0][1] == data_forwarder_project
# Verify the config includes S3 bucket
assert call_args[0][1].get_config()["s3_bucket"] == "my-sentry-events-bucket"
@with_feature("organizations:data-forwarding-revamp-access")
@patch("data_forwarding.splunk.forwarder.SplunkForwarder.forward_event")
def test_process_data_forwarding_splunk_enabled(self, mock_forward):
mock_forward.return_value = True
self.setup_forwarder(DataForwarderProviderSlug.SPLUNK)
event = self.create_event(
data={"message": "test message", "level": "error"},
project_id=self.project.id,
)
self.call_post_process_group(
is_new=True,
is_regression=False,
is_new_group_environment=False,
event=event,
)
assert mock_forward.call_count == 1
@with_feature("organizations:data-forwarding-revamp-access")
@patch("data_forwarding.segment.forwarder.SegmentForwarder.forward_event")
def test_process_data_forwarding_segment_enabled(self, mock_forward):
mock_forward.return_value = True
self.setup_forwarder(DataForwarderProviderSlug.SEGMENT)
event = self.create_event(
data={"message": "test message", "level": "error"},
project_id=self.project.id,
)
self.call_post_process_group(
is_new=True,
is_regression=False,
is_new_group_environment=False,
event=event,
)
assert mock_forward.call_count == 1
@with_feature("organizations:data-forwarding-revamp-access")
@patch("data_forwarding.amazon_sqs.forwarder.AmazonSQSForwarder.forward_event")
def test_process_data_forwarding_disabled_forwarder(self, mock_forward):
self.setup_forwarder(DataForwarderProviderSlug.SQS, is_enabled=False)
event = self.create_event(
data={"message": "test message", "level": "error"},
project_id=self.project.id,
)
self.call_post_process_group(
is_new=True,
is_regression=False,
is_new_group_environment=False,
event=event,
)
assert mock_forward.call_count == 0
@with_feature("organizations:data-forwarding-revamp-access")
@patch("data_forwarding.amazon_sqs.forwarder.AmazonSQSForwarder.forward_event")
@patch("data_forwarding.splunk.forwarder.SplunkForwarder.forward_event")
def test_process_data_forwarding_multiple_forwarders(
self, mock_splunk_forward, mock_sqs_forward
):
mock_sqs_forward.return_value = True
mock_splunk_forward.return_value = True
self.setup_forwarder(DataForwarderProviderSlug.SQS)
self.setup_forwarder(DataForwarderProviderSlug.SPLUNK)
event = self.create_event(
data={"message": "test message", "level": "error"},
project_id=self.project.id,
)
self.call_post_process_group(
is_new=True,
is_regression=False,
is_new_group_environment=False,
event=event,
)
assert mock_sqs_forward.call_count == 1
assert mock_splunk_forward.call_count == 1
@with_feature("organizations:data-forwarding-revamp-access")
@patch("data_forwarding.amazon_sqs.forwarder.AmazonSQSForwarder.forward_event")
@patch("data_forwarding.splunk.forwarder.SplunkForwarder.forward_event")
def test_process_data_forwarding_one_forwarder_fails(
self, mock_splunk_forward, mock_sqs_forward
):
"""Test that when one forwarder fails, other forwarders still execute."""
mock_sqs_forward.side_effect = Exception("SQS connection failed")
mock_splunk_forward.return_value = True
self.setup_forwarder(DataForwarderProviderSlug.SQS)
self.setup_forwarder(DataForwarderProviderSlug.SPLUNK)
event = self.create_event(
data={"message": "test message", "level": "error"},
project_id=self.project.id,
)
self.call_post_process_group(
is_new=True,
is_regression=False,
is_new_group_environment=False,
event=event,
)
# Both forwarders should be called despite SQS failure
assert mock_sqs_forward.call_count == 1
assert mock_splunk_forward.call_count == 1
@patch("data_forwarding.amazon_sqs.forwarder.AmazonSQSForwarder.forward_event")
def test_process_data_forwarding_revamp_access_flag_disabled(self, mock_forward):
"""Test that data forwarding is skipped when the revamp-access feature flag is disabled."""
self.setup_forwarder(DataForwarderProviderSlug.SQS)
event = self.create_event(
data={"message": "test message", "level": "error"},
project_id=self.project.id,
)
self.call_post_process_group(
is_new=True,
is_regression=False,
is_new_group_environment=False,
event=event,
)
# should not be called when feature flag is disabled
assert mock_forward.call_count == 0
| ProcessDataForwardingTest |
python | dagster-io__dagster | python_modules/libraries/dagster-dg-cli/dagster_dg_cli/cli/scaffold/branch/planning.py | {
"start": 1387,
"end": 13375
} | class ____:
"""Main orchestrator for plan generation and refinement."""
def __init__(self, claude_client: "ClaudeSDKClient", diagnostics: ClaudeDiagnostics):
"""Initialize the plan generator.
Args:
claude_client: Client for AI interactions
diagnostics: Diagnostics service for logging
"""
self.claude_client = claude_client
self.diagnostics = diagnostics
def generate_initial_plan(
self,
context: PlanningContext,
output_channel: "OutputChannel",
) -> GeneratedPlan:
"""Generate initial implementation plan from user input.
Args:
context: Planning context with user input and project information
Returns:
Generated implementation plan as markdown content
Raises:
Exception: If plan generation fails
"""
self.diagnostics.info(
category="planning_generation_start",
message="Starting initial plan generation",
data={
"user_input_length": len(context.prompt_text),
},
)
prompt = self._create_initial_plan_prompt(context)
allowed_tools = ALLOWED_COMMANDS_PLANNING.copy()
# Use Claude to generate structured plan as markdown
messages = asyncio.run(
self.claude_client.scaffold_with_streaming(
prompt=prompt,
model=context.model,
allowed_tools=allowed_tools,
output_channel=output_channel,
disallowed_tools=["Bash(python:*)", "WebSearch", "WebFetch"],
verbose=context.verbose,
)
)
# Extract the plan content from Claude's response
plan_content = self._extract_plan_from_messages(messages, output_channel)
plan = GeneratedPlan(
markdown_content=plan_content,
metadata={
"generation_method": "prompt_driven_claude_generation",
"messages_count": len(messages),
"user_input": context.prompt_text,
},
)
self.diagnostics.info(
category="planning_generation_completed",
message="Initial plan generation completed",
data={
"plan_content_length": len(plan_content),
"messages_count": len(messages),
},
)
return plan
def refine_plan(
self,
context: PlanningContext,
current_plan: GeneratedPlan,
user_feedback: str,
output_channel: "OutputChannel",
) -> GeneratedPlan:
"""Refine existing plan based on user feedback.
Args:
current_plan: The current plan to refine
user_feedback: User feedback for plan improvements
Returns:
Refined implementation plan
"""
self.diagnostics.info(
category="planning_refinement_start",
message="Starting plan refinement",
data={
"current_plan_length": len(current_plan.markdown_content),
"feedback_length": len(user_feedback),
},
)
prompt = self._create_refinement_prompt(current_plan, user_feedback)
allowed_tools = ALLOWED_COMMANDS_PLANNING.copy()
messages = asyncio.run(
self.claude_client.scaffold_with_streaming(
prompt=prompt,
model=context.model,
allowed_tools=allowed_tools,
output_channel=output_channel,
disallowed_tools=["Bash(python:*)", "WebSearch", "WebFetch"],
verbose=context.verbose,
)
)
# Extract refined plan content from Claude's response
refined_content = self._extract_plan_from_messages(messages, output_channel)
refined_plan = GeneratedPlan(
markdown_content=refined_content,
metadata={
**current_plan.metadata,
"refinement_method": "prompt_driven_claude_refinement",
"refinement_messages": len(messages),
"user_feedback": user_feedback,
},
)
self.diagnostics.info(
category="planning_refinement_completed",
message="Plan refinement completed",
data={
"refined_content_length": len(refined_content),
},
)
return refined_plan
def _create_initial_plan_prompt(self, context: PlanningContext) -> str:
"""Create a natural prompt for initial plan generation by loading from template file.
Args:
context: Planning context with user input and project information
Returns:
Natural language prompt for Claude to generate a plan
"""
from pathlib import Path
# Load prompt template from external file
prompt_path = Path(__file__).parent / "prompts" / "planning_prompt.md"
template = prompt_path.read_text()
context_info = ""
if context.project_structure:
context_info = f"\n\nProject Structure Overview: {context.project_structure}"
# Format template with actual values
return template.format(
user_input=context.prompt_text,
context_info=context_info,
)
def _create_refinement_prompt(self, current_plan: GeneratedPlan, user_feedback: str) -> str:
"""Create a natural prompt for plan refinement.
Args:
current_plan: The current plan to refine
user_feedback: User's feedback for improvements
Returns:
Natural language prompt for Claude to refine the plan
"""
return f"""You are refining an implementation plan based on user feedback.
Here is the current plan:
{current_plan.markdown_content}
---
User Feedback: {user_feedback}
---
Please generate an improved version of the implementation plan that addresses the user's feedback. Keep the same markdown structure and format, but incorporate the requested changes. Be specific about what you're modifying and why.
Provide the complete updated plan in the same markdown format as before."""
def _extract_plan_from_messages(self, messages: list, output_channel: "OutputChannel") -> str:
"""Extract plan content from Claude's response messages.
Args:
messages: List of message objects from Claude client
Returns:
The plan content as markdown text
"""
ensure_claude_sdk_python_version()
from claude_code_sdk.types import ResultMessage
self.diagnostics.debug(
category="plan_extraction_start",
message="Extracting plan from Claude messages",
data={"message_count": len(messages)},
)
# Look specifically for ResultMessage and extract the text content
plan_content = None
success_result_found = False
result_message = None
for i, message in enumerate(messages):
self.diagnostics.debug(
category="plan_message_processing",
message=f"Processing message {i}",
data={
"message_class": type(message).__name__,
},
)
if isinstance(message, ResultMessage):
success_result_found = True
result_message = message
if message.result and message.result.strip():
plan_content = message.result.strip()
self.diagnostics.info(
category="plan_success_result_found",
message=f"Found ResultMessage with {len(plan_content)} chars of content",
data={
"message_index": i,
},
)
break # Found what we need, stop processing
else:
self.diagnostics.error(
category="plan_success_result_empty",
message="Found ResultMessage but result is empty",
data={"message_index": i},
)
# If we didn't find AssistantMessage, error and exit
if not success_result_found:
message_types = [type(msg).__name__ for msg in messages]
self.diagnostics.error(
category="plan_no_success_result",
message="No ResultMessage found in Claude response",
data={
"message_count": len(messages),
"message_types": message_types,
},
)
raise Exception(
f"Expected ResultMessage from Claude SDK but got {len(messages)} message(s) "
f"of types: {message_types}. This indicates a problem with Claude SDK communication "
f"or model response. Check your Claude SDK installation and try again."
)
# If we found ResultMessage but no content, error and exit
if not plan_content:
raise Exception(
"Found ResultMessage but the text content was empty. "
"Claude generated a response but provided no plan content."
)
combined_content = plan_content
# Display success summary to user
if result_message:
output_channel.write(
f"✅ Plan generation completed ({len(plan_content):,} characters, ${result_message.total_cost_usd:.2f}, {format_duration(result_message.duration_ms)})."
)
self.diagnostics.debug(
category="plan_extraction_result",
message="Plan extraction completed",
data={
"content_length": len(combined_content),
"has_implementation_plan": "# Implementation Plan" in combined_content,
},
)
# Final validation - this should not happen since we validated above, but just in case
if not combined_content or len(combined_content) < 100:
raise Exception(
f"AssistantMessage contained only {len(combined_content)} characters of content, "
f"which is too short for a meaningful plan. Expected at least 100 characters."
)
return combined_content
def get_user_plan_approval(plan: GeneratedPlan) -> tuple[bool, Optional[str]]:
"""Get user approval for a generated plan.
Args:
plan: The plan to review
Returns:
Tuple of (approved, feedback) where approved indicates if the user
approved the plan, and feedback contains refinement suggestions
"""
click.echo("\n" + "=" * 60)
click.echo("IMPLEMENTATION PLAN REVIEW")
click.echo("=" * 60)
click.echo("")
# Display the plan content directly
click.echo(plan.markdown_content)
click.echo("")
click.echo("=" * 60)
while True:
choice = click.prompt(
"Plan Review Options:\n"
" [a]pprove - Execute this plan as-is\n"
" [r]efine - Provide feedback to improve the plan\n"
" [c]ancel - Cancel the operation\n\n"
"Your choice",
type=click.Choice(["a", "r", "c", "approve", "refine", "cancel"], case_sensitive=False),
default="approve",
).lower()
if choice in ("a", "approve"):
return True, None
elif choice in ("r", "refine"):
feedback = click.prompt(
"\nWhat would you like to change about this plan?\n"
"Be specific about steps, files, or approaches you'd like modified",
type=str,
)
return False, feedback
elif choice in ("c", "cancel"):
raise click.ClickException("Operation cancelled by user")
| PlanGenerator |
python | pallets__werkzeug | src/werkzeug/datastructures/structures.py | {
"start": 19922,
"end": 27446
} | class ____(MultiDict[K, V]):
"""Works like a regular :class:`MultiDict` but preserves the
order of the fields. To convert the ordered multi dict into a
list you can use the :meth:`items` method and pass it ``multi=True``.
In general an :class:`OrderedMultiDict` is an order of magnitude
slower than a :class:`MultiDict`.
.. admonition:: note
Due to a limitation in Python you cannot convert an ordered
multi dict into a regular dict by using ``dict(multidict)``.
Instead you have to use the :meth:`to_dict` method, otherwise
the internal bucket objects are exposed.
.. deprecated:: 3.1
Will be removed in Werkzeug 3.2. Use ``MultiDict`` instead.
"""
def __init__(
self,
mapping: (
MultiDict[K, V]
| cabc.Mapping[K, V | list[V] | tuple[V, ...] | set[V]]
| cabc.Iterable[tuple[K, V]]
| None
) = None,
) -> None:
import warnings
warnings.warn(
"'OrderedMultiDict' is deprecated and will be removed in Werkzeug"
" 3.2. Use 'MultiDict' instead.",
DeprecationWarning,
stacklevel=2,
)
super().__init__()
self._first_bucket: _omd_bucket[K, V] | None = None
self._last_bucket: _omd_bucket[K, V] | None = None
if mapping is not None:
self.update(mapping)
def __eq__(self, other: object) -> bool:
if not isinstance(other, MultiDict):
return NotImplemented
if isinstance(other, _OrderedMultiDict):
iter1 = iter(self.items(multi=True))
iter2 = iter(other.items(multi=True))
try:
for k1, v1 in iter1:
k2, v2 = next(iter2)
if k1 != k2 or v1 != v2:
return False
except StopIteration:
return False
try:
next(iter2)
except StopIteration:
return True
return False
if len(self) != len(other):
return False
for key, values in self.lists():
if other.getlist(key) != values:
return False
return True
__hash__ = None # type: ignore[assignment]
def __reduce_ex__(self, protocol: t.SupportsIndex) -> t.Any:
return type(self), (list(self.items(multi=True)),)
def __getstate__(self) -> t.Any:
return list(self.items(multi=True))
def __setstate__(self, values: t.Any) -> None:
self.clear()
for key, value in values:
self.add(key, value)
def __getitem__(self, key: K) -> V:
if key in self:
return dict.__getitem__(self, key)[0].value # type: ignore[index,no-any-return]
raise exceptions.BadRequestKeyError(key)
def __setitem__(self, key: K, value: V) -> None:
self.poplist(key)
self.add(key, value)
def __delitem__(self, key: K) -> None:
self.pop(key)
def keys(self) -> cabc.Iterable[K]: # type: ignore[override]
return (key for key, _ in self.items())
def __iter__(self) -> cabc.Iterator[K]:
return iter(self.keys())
def values(self) -> cabc.Iterable[V]: # type: ignore[override]
return (value for key, value in self.items())
def items(self, multi: bool = False) -> cabc.Iterable[tuple[K, V]]: # type: ignore[override]
ptr = self._first_bucket
if multi:
while ptr is not None:
yield ptr.key, ptr.value
ptr = ptr.next
else:
returned_keys = set()
while ptr is not None:
if ptr.key not in returned_keys:
returned_keys.add(ptr.key)
yield ptr.key, ptr.value
ptr = ptr.next
def lists(self) -> cabc.Iterable[tuple[K, list[V]]]:
returned_keys = set()
ptr = self._first_bucket
while ptr is not None:
if ptr.key not in returned_keys:
yield ptr.key, self.getlist(ptr.key)
returned_keys.add(ptr.key)
ptr = ptr.next
def listvalues(self) -> cabc.Iterable[list[V]]:
for _key, values in self.lists():
yield values
def add(self, key: K, value: V) -> None:
dict.setdefault(self, key, []).append(_omd_bucket(self, key, value)) # type: ignore[misc]
@t.overload
def getlist(self, key: K) -> list[V]: ...
@t.overload
def getlist(self, key: K, type: cabc.Callable[[V], T]) -> list[T]: ...
def getlist(
self, key: K, type: cabc.Callable[[V], T] | None = None
) -> list[V] | list[T]:
rv: list[_omd_bucket[K, V]]
try:
rv = dict.__getitem__(self, key) # type: ignore[index]
except KeyError:
return []
if type is None:
return [x.value for x in rv]
result = []
for item in rv:
try:
result.append(type(item.value))
except (ValueError, TypeError):
pass
return result
def setlist(self, key: K, new_list: cabc.Iterable[V]) -> None:
self.poplist(key)
for value in new_list:
self.add(key, value)
def setlistdefault(self, key: t.Any, default_list: t.Any = None) -> t.NoReturn:
raise TypeError("setlistdefault is unsupported for ordered multi dicts")
def update( # type: ignore[override]
self,
mapping: (
MultiDict[K, V]
| cabc.Mapping[K, V | list[V] | tuple[V, ...] | set[V]]
| cabc.Iterable[tuple[K, V]]
),
) -> None:
for key, value in iter_multi_items(mapping):
self.add(key, value)
def poplist(self, key: K) -> list[V]:
buckets: cabc.Iterable[_omd_bucket[K, V]] = dict.pop(self, key, ()) # type: ignore[arg-type]
for bucket in buckets:
bucket.unlink(self)
return [x.value for x in buckets]
@t.overload
def pop(self, key: K) -> V: ...
@t.overload
def pop(self, key: K, default: V) -> V: ...
@t.overload
def pop(self, key: K, default: T) -> V | T: ...
def pop(
self,
key: K,
default: V | T = _missing, # type: ignore[assignment]
) -> V | T:
buckets: list[_omd_bucket[K, V]]
try:
buckets = dict.pop(self, key) # type: ignore[arg-type]
except KeyError:
if default is not _missing:
return default
raise exceptions.BadRequestKeyError(key) from None
for bucket in buckets:
bucket.unlink(self)
return buckets[0].value
def popitem(self) -> tuple[K, V]:
key: K
buckets: list[_omd_bucket[K, V]]
try:
key, buckets = dict.popitem(self) # type: ignore[arg-type]
except KeyError as e:
raise exceptions.BadRequestKeyError(e.args[0]) from None
for bucket in buckets:
bucket.unlink(self)
return key, buckets[0].value
def popitemlist(self) -> tuple[K, list[V]]:
key: K
buckets: list[_omd_bucket[K, V]]
try:
key, buckets = dict.popitem(self) # type: ignore[arg-type]
except KeyError as e:
raise exceptions.BadRequestKeyError(e.args[0]) from None
for bucket in buckets:
bucket.unlink(self)
return key, [x.value for x in buckets]
| _OrderedMultiDict |
python | dagster-io__dagster | python_modules/libraries/dagster-cloud-cli/dagster_cloud_cli/ui.py | {
"start": 136,
"end": 2819
} | class ____(typer.Exit):
def __init__(self, message=None, *args, **kwargs):
self.message = message
super().__init__(*args, **kwargs)
def as_code(text: str) -> str:
"""Returns the given text colored and in backticks."""
return typer.style(f"`{text}`", fg=typer.colors.CYAN)
def red(text: str) -> str:
return typer.style(text, fg=typer.colors.RED)
def blue(text: str) -> str:
return typer.style(text, fg=typer.colors.BLUE)
def yellow(text: str) -> str:
return typer.style(text, fg=typer.colors.YELLOW)
def green(text: str) -> str:
return typer.style(text, fg=typer.colors.GREEN)
def warn(message: str) -> None:
"""Prints a warning message."""
print(f"{yellow('Warning:')} {message}")
def error(message: str, code: int = 1) -> Exception:
"""Prints an error message and returns an exception."""
print(f"{red('Error:')} {message}")
return ExitWithMessage(message=message, code=code)
def censor_token(token: str) -> str:
return ("*" * (len(token) - 6)) + token[-6:]
def list_input(
prompt: str,
choices: list[Any],
default: Optional[Union[str, questionary.Choice, dict[str, Any]]] = None,
) -> str:
"""Presents the user with a list of choices that can be navigated with
the up and down arrows.
"""
return questionary.select(prompt, choices=choices, default=default).unsafe_ask()
def choice(value: Any, name: str) -> Any:
return questionary.Choice(title=name, value=value)
def input(prompt: str, default: str = "", validate: Optional[Callable] = None) -> str: # noqa: A001
"""Prompts the user for text input."""
return questionary.text(prompt, default=default, validate=validate).unsafe_ask()
def password_input(prompt: str, default: str = "") -> str:
"""Prompts the user for text input, hidden by dots."""
return questionary.password(prompt, default=default).unsafe_ask()
def print(*args, **kwargs): # noqa: A001
"""Echos text to the console."""
return typer.echo(*args, **kwargs)
def print_json(data: Any):
return typer.echo(json.dumps(data, indent=2))
def print_yaml(data: Any):
return typer.echo(yaml.dump(data))
def erase_previous_line(number_of_lines: int = 1) -> None:
"""Erases the previous line of output, which can then be printed over."""
return typer.echo("\033[F\033[K" * number_of_lines + "\033[F")
def confirm(prompt: str, default: bool = True) -> bool:
"""Asks the user to respond yes or no to the prompt."""
return questionary.confirm(prompt, default=default).unsafe_ask()
def bool_input(prompt: str, default: bool = False) -> bool:
return confirm(prompt, default=default)
| ExitWithMessage |
python | sympy__sympy | sympy/plotting/pygletplot/plot_surface.py | {
"start": 118,
"end": 3803
} | class ____(PlotModeBase):
default_rot_preset = 'perspective'
def _on_calculate_verts(self):
self.u_interval = self.intervals[0]
self.u_set = list(self.u_interval.frange())
self.v_interval = self.intervals[1]
self.v_set = list(self.v_interval.frange())
self.bounds = [[S.Infinity, S.NegativeInfinity, 0],
[S.Infinity, S.NegativeInfinity, 0],
[S.Infinity, S.NegativeInfinity, 0]]
evaluate = self._get_evaluator()
self._calculating_verts_pos = 0.0
self._calculating_verts_len = float(
self.u_interval.v_len*self.v_interval.v_len)
verts = []
b = self.bounds
for u in self.u_set:
column = []
for v in self.v_set:
try:
_e = evaluate(u, v) # calculate vertex
except ZeroDivisionError:
_e = None
if _e is not None: # update bounding box
for axis in range(3):
b[axis][0] = min([b[axis][0], _e[axis]])
b[axis][1] = max([b[axis][1], _e[axis]])
column.append(_e)
self._calculating_verts_pos += 1.0
verts.append(column)
for axis in range(3):
b[axis][2] = b[axis][1] - b[axis][0]
if b[axis][2] == 0.0:
b[axis][2] = 1.0
self.verts = verts
self.push_wireframe(self.draw_verts(False, False))
self.push_solid(self.draw_verts(False, True))
def _on_calculate_cverts(self):
if not self.verts or not self.color:
return
def set_work_len(n):
self._calculating_cverts_len = float(n)
def inc_work_pos():
self._calculating_cverts_pos += 1.0
set_work_len(1)
self._calculating_cverts_pos = 0
self.cverts = self.color.apply_to_surface(self.verts,
self.u_set,
self.v_set,
set_len=set_work_len,
inc_pos=inc_work_pos)
self.push_solid(self.draw_verts(True, True))
def calculate_one_cvert(self, u, v):
vert = self.verts[u][v]
return self.color(vert[0], vert[1], vert[2],
self.u_set[u], self.v_set[v])
def draw_verts(self, use_cverts, use_solid_color):
def f():
for u in range(1, len(self.u_set)):
pgl.glBegin(pgl.GL_QUAD_STRIP)
for v in range(len(self.v_set)):
pa = self.verts[u - 1][v]
pb = self.verts[u][v]
if pa is None or pb is None:
pgl.glEnd()
pgl.glBegin(pgl.GL_QUAD_STRIP)
continue
if use_cverts:
ca = self.cverts[u - 1][v]
cb = self.cverts[u][v]
if ca is None:
ca = (0, 0, 0)
if cb is None:
cb = (0, 0, 0)
else:
if use_solid_color:
ca = cb = self.default_solid_color
else:
ca = cb = self.default_wireframe_color
pgl.glColor3f(*ca)
pgl.glVertex3f(*pa)
pgl.glColor3f(*cb)
pgl.glVertex3f(*pb)
pgl.glEnd()
return f
| PlotSurface |
python | eventlet__eventlet | tests/greendns_test.py | {
"start": 609,
"end": 7735
} | class ____(tests.LimitedTestCase):
def test_default_fname(self):
hr = greendns.HostsResolver()
assert os.path.exists(hr.fname)
def test_readlines_lines(self):
hr = _make_host_resolver()
hr.hosts.write(b'line0\n')
hr.hosts.flush()
assert list(hr._readlines()) == ['line0']
hr._last_stat = 0
hr.hosts.write(b'line1\n')
hr.hosts.flush()
assert list(hr._readlines()) == ['line0', 'line1']
# Test reading of varied newline styles
hr._last_stat = 0
hr.hosts.seek(0)
hr.hosts.truncate()
hr.hosts.write(b'\naa\r\nbb\r cc \n\n\tdd ee')
hr.hosts.flush()
assert list(hr._readlines()) == ['aa', 'bb', 'cc', 'dd ee']
# Test comments, including inline comments
hr._last_stat = 0
hr.hosts.seek(0)
hr.hosts.truncate()
hr.hosts.write(b'''\
# First couple lines
# are comments.
line1
#comment
line2 # inline comment
''')
hr.hosts.flush()
assert list(hr._readlines()) == ['line1', 'line2']
def test_readlines_missing_file(self):
hr = _make_host_resolver()
hr.hosts.close()
hr._last_stat = 0
assert list(hr._readlines()) == []
def test_load_no_contents(self):
hr = _make_host_resolver()
hr._load()
assert not hr._v4
assert not hr._v6
assert not hr._aliases
def test_load_v4_v6_cname_aliases(self):
hr = _make_host_resolver()
hr.hosts.write(b'1.2.3.4 v4.example.com v4\n'
b'dead:beef::1 v6.example.com v6\n')
hr.hosts.flush()
hr._load()
assert hr._v4 == {'v4.example.com': '1.2.3.4', 'v4': '1.2.3.4'}
assert hr._v6 == {'v6.example.com': 'dead:beef::1',
'v6': 'dead:beef::1'}
assert hr._aliases == {'v4': 'v4.example.com',
'v6': 'v6.example.com'}
def test_load_v6_link_local(self):
hr = _make_host_resolver()
hr.hosts.write(b'fe80:: foo\n'
b'fe80:dead:beef::1 bar\n')
hr.hosts.flush()
hr._load()
assert not hr._v4
assert not hr._v6
def test_query_A(self):
hr = _make_host_resolver()
hr._v4 = {'v4.example.com': '1.2.3.4'}
ans = hr.query('v4.example.com')
assert ans[0].address == '1.2.3.4'
ans = hr.query('v4.example.com')
assert ans[0].address == '1.2.3.4'
ans = hr.query(b'v4.example.com')
assert ans[0].address == '1.2.3.4'
def test_query_ans_types(self):
# This assumes test_query_A above succeeds
hr = _make_host_resolver()
hr._v4 = {'v4.example.com': '1.2.3.4'}
hr._last_stat = time.time()
ans = hr.query('v4.example.com')
assert isinstance(ans, greendns.dns.resolver.Answer)
assert ans.response is None
assert ans.qname == dns.name.from_text('v4.example.com')
assert ans.rdtype == dns.rdatatype.A
assert ans.rdclass == dns.rdataclass.IN
assert ans.canonical_name == dns.name.from_text('v4.example.com')
assert ans.expiration
assert isinstance(ans.rrset, dns.rrset.RRset)
assert ans.rrset.rdtype == dns.rdatatype.A
assert ans.rrset.rdclass == dns.rdataclass.IN
ttl = greendns.HOSTS_TTL
assert ttl - 1 <= ans.rrset.ttl <= ttl + 1
rr = ans.rrset[0]
assert isinstance(rr, greendns.dns.rdtypes.IN.A.A)
assert rr.rdtype == dns.rdatatype.A
assert rr.rdclass == dns.rdataclass.IN
assert rr.address == '1.2.3.4'
def test_query_AAAA(self):
hr = _make_host_resolver()
hr._v6 = {'v6.example.com': 'dead:beef::1'}
ans = hr.query('v6.example.com', dns.rdatatype.AAAA)
assert ans[0].address == 'dead:beef::1'
def test_query_unknown_raises(self):
hr = _make_host_resolver()
with tests.assert_raises(greendns.dns.resolver.NoAnswer):
hr.query('example.com')
def test_query_unknown_no_raise(self):
hr = _make_host_resolver()
ans = hr.query('example.com', raise_on_no_answer=False)
assert isinstance(ans, greendns.dns.resolver.Answer)
assert ans.response is None
assert ans.qname == dns.name.from_text('example.com')
assert ans.rdtype == dns.rdatatype.A
assert ans.rdclass == dns.rdataclass.IN
assert ans.canonical_name == dns.name.from_text('example.com')
assert ans.expiration
assert isinstance(ans.rrset, greendns.dns.rrset.RRset)
assert ans.rrset.rdtype == dns.rdatatype.A
assert ans.rrset.rdclass == dns.rdataclass.IN
assert len(ans.rrset) == 0
def test_query_CNAME(self):
hr = _make_host_resolver()
hr._aliases = {'host': 'host.example.com'}
ans = hr.query('host', dns.rdatatype.CNAME)
assert ans[0].target == dns.name.from_text('host.example.com')
assert str(ans[0].target) == 'host.example.com.'
def test_query_unknown_type(self):
hr = _make_host_resolver()
with tests.assert_raises(greendns.dns.resolver.NoAnswer):
hr.query('example.com', dns.rdatatype.MX)
def test_getaliases(self):
hr = _make_host_resolver()
hr._aliases = {'host': 'host.example.com',
'localhost': 'host.example.com'}
res = set(hr.getaliases('host'))
assert res == {'host.example.com', 'localhost'}
def test_getaliases_unknown(self):
hr = _make_host_resolver()
assert hr.getaliases('host.example.com') == []
def test_getaliases_fqdn(self):
hr = _make_host_resolver()
hr._aliases = {'host': 'host.example.com'}
res = set(hr.getaliases('host.example.com'))
assert res == {'host'}
def test_hosts_case_insensitive(self):
name = 'example.com'
hr = _make_host_resolver()
hr.hosts.write(b'1.2.3.4 ExAmPlE.CoM\n')
hr.hosts.flush()
hr._load()
ans = hr.query(name)
rr = ans.rrset[0]
assert isinstance(rr, greendns.dns.rdtypes.IN.A.A)
assert rr.rdtype == dns.rdatatype.A
assert rr.rdclass == dns.rdataclass.IN
assert rr.address == '1.2.3.4'
def _make_mock_base_resolver():
"""A mocked base resolver class"""
class RR:
pass
class Resolver:
aliases = ['cname.example.com']
raises = None
rr = RR()
rr6 = RR()
def query(self, *args, **kwargs):
self.args = args
self.kwargs = kwargs
if self.raises:
raise self.raises()
if hasattr(self, 'rrset'):
rrset = self.rrset
else:
if self.rr6 and self.args[1] == dns.rdatatype.AAAA:
rrset = [self.rr6]
else:
rrset = [self.rr]
return greendns.HostsAnswer('foo', 1, 1, rrset, False)
def getaliases(self, *args, **kwargs):
return self.aliases
return Resolver
| TestHostsResolver |
python | jina-ai__jina | tests/integration/runtime_signal_handling/test_runtime_captures_signals.py | {
"start": 385,
"end": 2515
} | class ____(Executor):
def __init__(self, dir=None, *args, **kwargs):
super().__init__(*args, **kwargs)
self.dir = dir
self.request_count = 0
@requests
def slow_count(self, **kwargs):
time.sleep(0.5)
self.request_count += 1
def close(self):
super().close()
with open(f'{self.dir}/test.txt', 'w', encoding='utf-8') as fp:
fp.write(f'proper close;{self.request_count}')
def _create_test_data_message():
req = list(
request_generator(
'/', DocumentArray([Document(text='input document') for _ in range(10)])
)
)[0]
return req
@pytest.mark.parametrize('signal', [signal.SIGTERM, signal.SIGINT])
def test_executor_runtimes(signal, tmpdir):
import time
args = _generate_pod_args()
def run(args):
args.uses = {
'jtype': 'DummyExecutor',
'with': {'dir': str(tmpdir)},
'metas': {'workspace': str(tmpdir)},
}
executor_native(args)
process = multiprocessing.Process(target=run, args=(args,))
process.start()
time.sleep(0.5)
send_request_sync(_create_test_data_message(), target=f'{args.host}:{args.port[0]}')
time.sleep(0.1)
os.kill(process.pid, signal)
process.join()
with open(f'{tmpdir}/test.txt', 'r', encoding='utf-8') as fp:
output = fp.read()
split = output.split(';')
assert split[0] == 'proper close'
assert split[1] == '1'
@pytest.mark.parametrize('signal', [signal.SIGTERM, signal.SIGINT])
@pytest.mark.parametrize('protocol', ['grpc', 'http', 'websocket'])
def test_gateway(signal, protocol):
import time
def run():
args = set_gateway_parser().parse_args(
[
'--protocol',
protocol,
'--graph-description',
'{}',
'--deployments-addresses',
'{}',
]
)
gateway(args)
process = multiprocessing.Process(target=run)
process.start()
time.sleep(0.5)
os.kill(process.pid, signal)
process.join()
| DummyExecutor |
python | crytic__slither | slither/__main__.py | {
"start": 24342,
"end": 33904
} | class ____(logging.Formatter):
def format(self, record: logging.LogRecord) -> str:
# for i, msg in enumerate(record.msg):
if record.msg.startswith("Compilation warnings/errors on "):
txt = record.args[1] # type:ignore
txt = txt.split("\n") # type:ignore
txt = [red(x) if "Error" in x else x for x in txt]
txt = "\n".join(txt)
record.args = (record.args[0], txt) # type:ignore
return super().format(record)
# endregion
###################################################################################
###################################################################################
# region Main
###################################################################################
###################################################################################
def main() -> None:
# Codebase with complex domninators can lead to a lot of SSA recursive call
sys.setrecursionlimit(1500)
detectors, printers = get_detectors_and_printers()
main_impl(all_detector_classes=detectors, all_printer_classes=printers)
# pylint: disable=too-many-statements,too-many-branches,too-many-locals
def main_impl(
all_detector_classes: List[Type[AbstractDetector]],
all_printer_classes: List[Type[AbstractPrinter]],
) -> None:
"""
:param all_detector_classes: A list of all detectors that can be included/excluded.
:param all_printer_classes: A list of all printers that can be included.
"""
# Set logger of Slither to info, to catch warnings related to the arg parsing
logger.setLevel(logging.INFO)
args = parse_args(all_detector_classes, all_printer_classes)
cp: Optional[cProfile.Profile] = None
if args.perf:
cp = cProfile.Profile()
cp.enable()
# Set colorization option
set_colorization_enabled(False if args.disable_color else sys.stdout.isatty())
# Define some variables for potential JSON output
json_results: Dict[str, Any] = {}
output_error = None
outputting_json = args.json is not None
outputting_json_stdout = args.json == "-"
outputting_sarif = args.sarif is not None
outputting_sarif_stdout = args.sarif == "-"
outputting_zip = args.zip is not None
if args.zip_type not in ZIP_TYPES_ACCEPTED:
to_log = f'Zip type not accepted, it must be one of {",".join(ZIP_TYPES_ACCEPTED.keys())}'
logger.error(to_log)
# If we are outputting JSON, capture all standard output. If we are outputting to stdout, we block typical stdout
# output.
if outputting_json or outputting_sarif:
StandardOutputCapture.enable(outputting_json_stdout or outputting_sarif_stdout)
printer_classes = choose_printers(args, all_printer_classes)
detector_classes = choose_detectors(args, all_detector_classes)
default_log = logging.INFO if not args.debug else logging.DEBUG
for (l_name, l_level) in [
("Slither", default_log),
("Contract", default_log),
("Function", default_log),
("Node", default_log),
("Parsing", default_log),
("Detectors", default_log),
("FunctionSolc", default_log),
("ExpressionParsing", default_log),
("TypeParsing", default_log),
("SSA_Conversion", default_log),
("Printers", default_log),
# ('CryticCompile', default_log)
]:
logger_level = logging.getLogger(l_name)
logger_level.setLevel(l_level)
console_handler = logging.StreamHandler()
console_handler.setLevel(logging.INFO)
console_handler.setFormatter(FormatterCryticCompile())
crytic_compile_error = logging.getLogger(("CryticCompile"))
crytic_compile_error.addHandler(console_handler)
crytic_compile_error.propagate = False
crytic_compile_error.setLevel(logging.INFO)
results_detectors: List[Dict] = []
results_printers: List[Output] = []
try:
filename = args.filename
# Determine if we are handling ast from solc
if args.solc_ast or (filename.endswith(".json") and not is_supported(filename)):
globbed_filenames = glob.glob(filename, recursive=True)
filenames = glob.glob(os.path.join(filename, "*.json"))
if not filenames:
filenames = globbed_filenames
number_contracts = 0
slither_instances = []
for filename in filenames:
(
slither_instance,
results_detectors_tmp,
results_printers_tmp,
number_contracts_tmp,
) = process_single(filename, args, detector_classes, printer_classes)
number_contracts += number_contracts_tmp
results_detectors += results_detectors_tmp
results_printers += results_printers_tmp
slither_instances.append(slither_instance)
# Rely on CryticCompile to discern the underlying type of compilations.
else:
(
slither_instances,
results_detectors,
results_printers,
number_contracts,
) = process_all(filename, args, detector_classes, printer_classes)
# Determine if we are outputting JSON
if outputting_json or outputting_zip or output_to_sarif:
# Add our compilation information to JSON
if "compilations" in args.json_types:
compilation_results = []
for slither_instance in slither_instances:
assert slither_instance.crytic_compile
compilation_results.append(
generate_standard_export(slither_instance.crytic_compile)
)
json_results["compilations"] = compilation_results
# Add our detector results to JSON if desired.
if results_detectors and "detectors" in args.json_types:
json_results["detectors"] = results_detectors
# Add our printer results to JSON if desired.
if results_printers and "printers" in args.json_types:
json_results["printers"] = results_printers
# Add our detector types to JSON
if "list-detectors" in args.json_types:
detectors, _ = get_detectors_and_printers()
json_results["list-detectors"] = output_detectors_json(detectors)
# Add our detector types to JSON
if "list-printers" in args.json_types:
_, printers = get_detectors_and_printers()
json_results["list-printers"] = output_printers_json(printers)
# Output our results to markdown if we wish to compile a checklist.
if args.checklist:
output_results_to_markdown(
results_detectors, args.checklist_limit, args.show_ignored_findings
)
# Don't print the number of result for printers
if number_contracts == 0:
logger.warning(red("No contract was analyzed"))
if printer_classes:
logger.info("%s analyzed (%d contracts)", filename, number_contracts)
else:
logger.info(
"%s analyzed (%d contracts with %d detectors), %d result(s) found",
filename,
number_contracts,
len(detector_classes),
len(results_detectors),
)
except SlitherException as slither_exception:
output_error = str(slither_exception)
traceback.print_exc()
logging.error(red("Error:"))
logging.error(red(output_error))
logging.error("Please report an issue to https://github.com/crytic/slither/issues")
# If we are outputting JSON, capture the redirected output and disable the redirect to output the final JSON.
if outputting_json:
if "console" in args.json_types:
json_results["console"] = {
"stdout": StandardOutputCapture.get_stdout_output(),
"stderr": StandardOutputCapture.get_stderr_output(),
}
StandardOutputCapture.disable()
output_to_json(None if outputting_json_stdout else args.json, output_error, json_results)
if outputting_sarif:
StandardOutputCapture.disable()
output_to_sarif(
None if outputting_sarif_stdout else args.sarif, json_results, detector_classes
)
if outputting_zip:
output_to_zip(args.zip, output_error, json_results, args.zip_type)
if args.perf and cp:
cp.disable()
stats = pstats.Stats(cp).sort_stats("cumtime")
stats.print_stats()
fail_on = FailOnLevel(args.fail_on)
if fail_on == FailOnLevel.HIGH:
fail_on_detection = any(result["impact"] == "High" for result in results_detectors)
elif fail_on == FailOnLevel.MEDIUM:
fail_on_detection = any(
result["impact"] in ["Medium", "High"] for result in results_detectors
)
elif fail_on == FailOnLevel.LOW:
fail_on_detection = any(
result["impact"] in ["Low", "Medium", "High"] for result in results_detectors
)
elif fail_on == FailOnLevel.PEDANTIC:
fail_on_detection = bool(results_detectors)
else:
fail_on_detection = False
# Exit with them appropriate status code
if output_error or fail_on_detection:
sys.exit(-1)
else:
sys.exit(0)
if __name__ == "__main__":
main()
# endregion
| FormatterCryticCompile |
python | huggingface__transformers | src/transformers/models/evolla/modeling_evolla.py | {
"start": 19325,
"end": 20460
} | class ____(nn.Module):
def __init__(self, config):
super().__init__()
self.config = config
self.layer = nn.ModuleList([EvollaSaProtLayer(config) for _ in range(config.num_hidden_layers)])
self.emb_layer_norm_after = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.gradient_checkpointing = False
@can_return_tuple
def forward(
self,
hidden_states,
attention_mask=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
**kwargs: Unpack[TransformersKwargs],
):
for i, layer_module in enumerate(self.layer):
hidden_states = layer_module(
hidden_states,
attention_mask=attention_mask,
encoder_hidden_states=encoder_hidden_states,
encoder_attention_mask=encoder_attention_mask,
**kwargs,
)
if self.emb_layer_norm_after:
hidden_states = self.emb_layer_norm_after(hidden_states)
return BaseModelOutputWithCrossAttentions(last_hidden_state=hidden_states)
| EvollaSaProtEncoder |
python | pytorch__pytorch | torch/fx/experimental/migrate_gradual_types/constraint.py | {
"start": 16240,
"end": 17089
} | class ____:
"""
Boolean variable
"""
def __init__(self, c):
"""
:param c: character or number
"""
self.c = c
def __repr__(self):
return f"BV({self.c})"
def __eq__(self, other):
if isinstance(other, BVar):
return self.c == other.c
else:
return False
def is_algebraic_expression(constraint):
if isinstance(constraint, BinConstraintD):
return constraint.op in [op_add, op_sub, op_div, op_mul, op_mod]
else:
return isinstance(constraint, Prod)
def is_bool_expr(constraint):
if isinstance(constraint, BinConstraintD):
return constraint.op in [op_gt, op_lt, op_neq, op_eq]
else:
return isinstance(constraint, (BVar, Conj, Disj))
def is_dim(d):
return isinstance(d, (DVar, int)) or d == Dyn
| BVar |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-iterable/source_iterable/streams.py | {
"start": 16093,
"end": 16178
} | class ____(IterableExportStreamAdjustableRange):
data_field = "emailSend"
| EmailSend |
python | celery__celery | t/unit/app/test_schedules.py | {
"start": 19208,
"end": 39088
} | class ____:
def setup_method(self):
self.now = self.app.now()
self.next_minute = 60 - self.now.second - 1e-6 * self.now.microsecond
self.every_minute = self.crontab()
self.quarterly = self.crontab(minute='*/15')
self.hourly = self.crontab(minute=30)
self.daily = self.crontab(hour=7, minute=30)
self.weekly = self.crontab(hour=7, minute=30, day_of_week='thursday')
self.monthly = self.crontab(
hour=7, minute=30, day_of_week='thursday', day_of_month='8-14',
)
self.monthly_moy = self.crontab(
hour=22, day_of_week='*', month_of_year='2',
day_of_month='26,27,28',
)
self.yearly = self.crontab(
hour=7, minute=30, day_of_week='thursday',
day_of_month='8-14', month_of_year=3,
)
def crontab(self, *args, **kwargs):
return crontab(*args, app=self.app, **kwargs)
def test_default_crontab_spec(self):
c = self.crontab()
assert c.minute == set(range(60))
assert c.hour == set(range(24))
assert c.day_of_week == set(range(7))
assert c.day_of_month == set(range(1, 32))
assert c.month_of_year == set(range(1, 13))
def test_simple_crontab_spec(self):
c = self.crontab(minute=30)
assert c.minute == {30}
assert c.hour == set(range(24))
assert c.day_of_week == set(range(7))
assert c.day_of_month == set(range(1, 32))
assert c.month_of_year == set(range(1, 13))
@pytest.mark.parametrize('minute,expected', [
(30, {30}),
('30', {30}),
((30, 40, 50), {30, 40, 50}),
((30, 40, 50, 51), {30, 40, 50, 51})
])
def test_crontab_spec_minute_formats(self, minute, expected):
c = self.crontab(minute=minute)
assert c.minute == expected
@pytest.mark.parametrize('minute', [60, '0-100'])
def test_crontab_spec_invalid_minute(self, minute):
with pytest.raises(ValueError):
self.crontab(minute=minute)
@pytest.mark.parametrize('hour,expected', [
(6, {6}),
('5', {5}),
((4, 8, 12), {4, 8, 12}),
])
def test_crontab_spec_hour_formats(self, hour, expected):
c = self.crontab(hour=hour)
assert c.hour == expected
@pytest.mark.parametrize('hour', [24, '0-30'])
def test_crontab_spec_invalid_hour(self, hour):
with pytest.raises(ValueError):
self.crontab(hour=hour)
@pytest.mark.parametrize('day_of_week,expected', [
(5, {5}),
('5', {5}),
('fri', {5}),
('tuesday,sunday,fri', {0, 2, 5}),
('mon-fri', {1, 2, 3, 4, 5}),
('*/2', {0, 2, 4, 6}),
])
def test_crontab_spec_dow_formats(self, day_of_week, expected):
c = self.crontab(day_of_week=day_of_week)
assert c.day_of_week == expected
@pytest.mark.parametrize('day_of_week', [
'fooday-barday', '1,4,foo', '7', '12',
])
def test_crontab_spec_invalid_dow(self, day_of_week):
with pytest.raises(ValueError):
self.crontab(day_of_week=day_of_week)
@pytest.mark.parametrize('day_of_month,expected', [
(5, {5}),
('5', {5}),
('2,4,6', {2, 4, 6}),
('*/5', {1, 6, 11, 16, 21, 26, 31}),
])
def test_crontab_spec_dom_formats(self, day_of_month, expected):
c = self.crontab(day_of_month=day_of_month)
assert c.day_of_month == expected
@pytest.mark.parametrize('day_of_month', [0, '0-10', 32, '31,32'])
def test_crontab_spec_invalid_dom(self, day_of_month):
with pytest.raises(ValueError):
self.crontab(day_of_month=day_of_month)
@pytest.mark.parametrize('month_of_year,expected', [
(1, {1}),
('1', {1}),
('feb', {2}),
('Mar', {3}),
('april', {4}),
('may,jun,jul', {5, 6, 7}),
('aug-oct', {8, 9, 10}),
('2,4,6', {2, 4, 6}),
('*/2', {1, 3, 5, 7, 9, 11}),
('2-12/2', {2, 4, 6, 8, 10, 12}),
])
def test_crontab_spec_moy_formats(self, month_of_year, expected):
c = self.crontab(month_of_year=month_of_year)
assert c.month_of_year == expected
@pytest.mark.parametrize('month_of_year', [0, '0-5', 13, '12,13', 'jaan', 'sebtember'])
def test_crontab_spec_invalid_moy(self, month_of_year):
with pytest.raises(ValueError):
self.crontab(month_of_year=month_of_year)
def seconds_almost_equal(self, a, b, precision):
for index, skew in enumerate((+1, -1, 0)):
try:
assertions.assertAlmostEqual(a, b + skew, precision)
except Exception as exc:
# AssertionError != builtins.AssertionError in pytest
if 'AssertionError' in str(exc):
if index + 1 >= 3:
raise
else:
break
def test_every_minute_execution_is_due(self):
last_ran = self.now - timedelta(seconds=61)
due, remaining = self.every_minute.is_due(last_ran)
self.assert_relativedelta(self.every_minute, last_ran)
assert due
self.seconds_almost_equal(remaining, self.next_minute, 1)
def assert_relativedelta(self, due, last_ran):
try:
from dateutil.relativedelta import relativedelta
except ImportError:
return
l1, d1, n1 = due.remaining_delta(last_ran)
l2, d2, n2 = due.remaining_delta(last_ran, ffwd=relativedelta)
if not isinstance(d1, relativedelta):
assert l1 == l2
for field, value in d1._fields().items():
assert getattr(d1, field) == value
assert not d2.years
assert not d2.months
assert not d2.days
assert not d2.leapdays
assert not d2.hours
assert not d2.minutes
assert not d2.seconds
assert not d2.microseconds
def test_every_minute_execution_is_not_due(self):
last_ran = self.now - timedelta(seconds=self.now.second)
due, remaining = self.every_minute.is_due(last_ran)
assert not due
self.seconds_almost_equal(remaining, self.next_minute, 1)
def test_execution_is_due_on_saturday(self):
# 29th of May 2010 is a saturday
with patch_crontab_nowfun(self.hourly, datetime(2010, 5, 29, 10, 30)):
last_ran = self.now - timedelta(seconds=61)
due, remaining = self.every_minute.is_due(last_ran)
assert due
self.seconds_almost_equal(remaining, self.next_minute, 1)
def test_execution_is_due_on_sunday(self):
# 30th of May 2010 is a sunday
with patch_crontab_nowfun(self.hourly, datetime(2010, 5, 30, 10, 30)):
last_ran = self.now - timedelta(seconds=61)
due, remaining = self.every_minute.is_due(last_ran)
assert due
self.seconds_almost_equal(remaining, self.next_minute, 1)
def test_execution_is_due_on_monday(self):
# 31st of May 2010 is a monday
with patch_crontab_nowfun(self.hourly, datetime(2010, 5, 31, 10, 30)):
last_ran = self.now - timedelta(seconds=61)
due, remaining = self.every_minute.is_due(last_ran)
assert due
self.seconds_almost_equal(remaining, self.next_minute, 1)
def test_every_hour_execution_is_due(self):
with patch_crontab_nowfun(self.hourly, datetime(2010, 5, 10, 10, 30)):
due, remaining = self.hourly.is_due(datetime(2010, 5, 10, 6, 30))
assert due
assert remaining == 60 * 60
def test_every_hour_execution_is_not_due(self):
with patch_crontab_nowfun(self.hourly, datetime(2010, 5, 10, 10, 29)):
due, remaining = self.hourly.is_due(datetime(2010, 5, 10, 9, 30))
assert not due
assert remaining == 60
def test_first_quarter_execution_is_due(self):
with patch_crontab_nowfun(
self.quarterly, datetime(2010, 5, 10, 10, 15)):
due, remaining = self.quarterly.is_due(
datetime(2010, 5, 10, 6, 30),
)
assert due
assert remaining == 15 * 60
def test_second_quarter_execution_is_due(self):
with patch_crontab_nowfun(
self.quarterly, datetime(2010, 5, 10, 10, 30)):
due, remaining = self.quarterly.is_due(
datetime(2010, 5, 10, 6, 30),
)
assert due
assert remaining == 15 * 60
def test_first_quarter_execution_is_not_due(self):
with patch_crontab_nowfun(
self.quarterly, datetime(2010, 5, 10, 10, 14)):
due, remaining = self.quarterly.is_due(
datetime(2010, 5, 10, 10, 0),
)
assert not due
assert remaining == 60
def test_second_quarter_execution_is_not_due(self):
with patch_crontab_nowfun(
self.quarterly, datetime(2010, 5, 10, 10, 29)):
due, remaining = self.quarterly.is_due(
datetime(2010, 5, 10, 10, 15),
)
assert not due
assert remaining == 60
def test_daily_execution_is_due(self):
with patch_crontab_nowfun(self.daily, datetime(2010, 5, 10, 7, 30)):
due, remaining = self.daily.is_due(datetime(2010, 5, 9, 7, 30))
assert due
assert remaining == 24 * 60 * 60
def test_daily_execution_is_not_due(self):
with patch_crontab_nowfun(self.daily, datetime(2010, 5, 10, 10, 30)):
due, remaining = self.daily.is_due(datetime(2010, 5, 10, 7, 30))
assert not due
assert remaining == 21 * 60 * 60
def test_weekly_execution_is_due(self):
with patch_crontab_nowfun(self.weekly, datetime(2010, 5, 6, 7, 30)):
due, remaining = self.weekly.is_due(datetime(2010, 4, 30, 7, 30))
assert due
assert remaining == 7 * 24 * 60 * 60
def test_weekly_execution_is_not_due(self):
with patch_crontab_nowfun(self.weekly, datetime(2010, 5, 7, 10, 30)):
due, remaining = self.weekly.is_due(datetime(2010, 5, 6, 7, 30))
assert not due
assert remaining == 6 * 24 * 60 * 60 - 3 * 60 * 60
def test_monthly_execution_is_due(self):
with patch_crontab_nowfun(self.monthly, datetime(2010, 5, 13, 7, 30)):
due, remaining = self.monthly.is_due(datetime(2010, 4, 8, 7, 30))
assert due
assert remaining == 28 * 24 * 60 * 60
def test_monthly_execution_is_not_due(self):
with patch_crontab_nowfun(self.monthly, datetime(2010, 5, 9, 10, 30)):
due, remaining = self.monthly.is_due(datetime(2010, 4, 8, 7, 30))
assert not due
assert remaining == 4 * 24 * 60 * 60 - 3 * 60 * 60
def test_monthly_moy_execution_is_due(self):
with patch_crontab_nowfun(
self.monthly_moy, datetime(2014, 2, 26, 22, 0)):
due, remaining = self.monthly_moy.is_due(
datetime(2013, 7, 4, 10, 0),
)
assert due
assert remaining == 60.0
@pytest.mark.skip('TODO: unstable test')
def test_monthly_moy_execution_is_not_due(self):
with patch_crontab_nowfun(
self.monthly_moy, datetime(2013, 6, 28, 14, 30)):
due, remaining = self.monthly_moy.is_due(
datetime(2013, 6, 28, 22, 14),
)
assert not due
attempt = (
time.mktime(datetime(2014, 2, 26, 22, 0).timetuple()) -
time.mktime(datetime(2013, 6, 28, 14, 30).timetuple()) -
60 * 60
)
assert remaining == attempt
def test_monthly_moy_execution_is_due2(self):
with patch_crontab_nowfun(
self.monthly_moy, datetime(2014, 2, 26, 22, 0)):
due, remaining = self.monthly_moy.is_due(
datetime(2013, 2, 28, 10, 0),
)
assert due
assert remaining == 60.0
def test_monthly_moy_execution_is_not_due2(self):
with patch_crontab_nowfun(
self.monthly_moy, datetime(2014, 2, 26, 21, 0)):
due, remaining = self.monthly_moy.is_due(
datetime(2013, 6, 28, 22, 14),
)
assert not due
attempt = 60 * 60
assert remaining == attempt
def test_yearly_execution_is_due(self):
with patch_crontab_nowfun(self.yearly, datetime(2010, 3, 11, 7, 30)):
due, remaining = self.yearly.is_due(datetime(2009, 3, 12, 7, 30))
assert due
assert remaining == 364 * 24 * 60 * 60
def test_yearly_execution_is_not_due(self):
with patch_crontab_nowfun(self.yearly, datetime(2010, 3, 7, 10, 30)):
due, remaining = self.yearly.is_due(datetime(2009, 3, 12, 7, 30))
assert not due
assert remaining == 4 * 24 * 60 * 60 - 3 * 60 * 60
def test_execution_not_due_if_task_not_run_at_last_feasible_time_outside_deadline(
self):
"""If the crontab schedule was added after the task was due, don't
immediately fire the task again"""
# could have feasibly been run on 12/5 at 7:30, but wasn't.
self.app.conf.beat_cron_starting_deadline = 3600
last_run = datetime(2022, 12, 4, 10, 30)
now = datetime(2022, 12, 5, 10, 30)
expected_next_execution_time = datetime(2022, 12, 6, 7, 30)
expected_remaining = (
expected_next_execution_time - now).total_seconds()
# Run the daily (7:30) crontab with the current date
with patch_crontab_nowfun(self.daily, now):
due, remaining = self.daily.is_due(last_run)
assert remaining == expected_remaining
assert not due
def test_execution_not_due_if_task_not_run_at_last_feasible_time_no_deadline_set(
self):
"""Same as above test except there's no deadline set, so it should be
due"""
last_run = datetime(2022, 12, 4, 10, 30)
now = datetime(2022, 12, 5, 10, 30)
expected_next_execution_time = datetime(2022, 12, 6, 7, 30)
expected_remaining = (
expected_next_execution_time - now).total_seconds()
# Run the daily (7:30) crontab with the current date
with patch_crontab_nowfun(self.daily, now):
due, remaining = self.daily.is_due(last_run)
assert remaining == expected_remaining
assert due
def test_execution_due_if_task_not_run_at_last_feasible_time_within_deadline(
self):
# Could have feasibly been run on 12/5 at 7:30, but wasn't. We are
# still within a 1 hour deadline from the
# last feasible run, so the task should still be due.
self.app.conf.beat_cron_starting_deadline = 3600
last_run = datetime(2022, 12, 4, 10, 30)
now = datetime(2022, 12, 5, 8, 0)
expected_next_execution_time = datetime(2022, 12, 6, 7, 30)
expected_remaining = (
expected_next_execution_time - now).total_seconds()
# run the daily (7:30) crontab with the current date
with patch_crontab_nowfun(self.daily, now):
due, remaining = self.daily.is_due(last_run)
assert remaining == expected_remaining
assert due
def test_execution_due_if_task_not_run_at_any_feasible_time_within_deadline(
self):
# Could have feasibly been run on 12/4 at 7:30, or 12/5 at 7:30,
# but wasn't. We are still within a 1 hour
# deadline from the last feasible run (12/5), so the task should
# still be due.
self.app.conf.beat_cron_starting_deadline = 3600
last_run = datetime(2022, 12, 3, 10, 30)
now = datetime(2022, 12, 5, 8, 0)
expected_next_execution_time = datetime(2022, 12, 6, 7, 30)
expected_remaining = (
expected_next_execution_time - now).total_seconds()
# Run the daily (7:30) crontab with the current date
with patch_crontab_nowfun(self.daily, now):
due, remaining = self.daily.is_due(last_run)
assert remaining == expected_remaining
assert due
def test_execution_not_due_if_task_not_run_at_any_feasible_time_outside_deadline(
self):
"""Verifies that remaining is still the time to the next
feasible run date even though the original feasible date
was passed over in favor of a newer one."""
# Could have feasibly been run on 12/4 or 12/5 at 7:30,
# but wasn't.
self.app.conf.beat_cron_starting_deadline = 3600
last_run = datetime(2022, 12, 3, 10, 30)
now = datetime(2022, 12, 5, 11, 0)
expected_next_execution_time = datetime(2022, 12, 6, 7, 30)
expected_remaining = (
expected_next_execution_time - now).total_seconds()
# run the daily (7:30) crontab with the current date
with patch_crontab_nowfun(self.daily, now):
due, remaining = self.daily.is_due(last_run)
assert remaining == expected_remaining
assert not due
def test_execution_not_due_if_last_run_in_future(self):
# Should not run if the last_run hasn't happened yet.
last_run = datetime(2022, 12, 6, 7, 30)
now = datetime(2022, 12, 5, 10, 30)
expected_next_execution_time = datetime(2022, 12, 7, 7, 30)
expected_remaining = (
expected_next_execution_time - now).total_seconds()
# Run the daily (7:30) crontab with the current date
with patch_crontab_nowfun(self.daily, now):
due, remaining = self.daily.is_due(last_run)
assert not due
assert remaining == expected_remaining
def test_execution_not_due_if_last_run_at_last_feasible_time(self):
# Last feasible time is 12/5 at 7:30
last_run = datetime(2022, 12, 5, 7, 30)
now = datetime(2022, 12, 5, 10, 30)
expected_next_execution_time = datetime(2022, 12, 6, 7, 30)
expected_remaining = (
expected_next_execution_time - now).total_seconds()
# Run the daily (7:30) crontab with the current date
with patch_crontab_nowfun(self.daily, now):
due, remaining = self.daily.is_due(last_run)
assert remaining == expected_remaining
assert not due
def test_execution_not_due_if_last_run_past_last_feasible_time(self):
# Last feasible time is 12/5 at 7:30
last_run = datetime(2022, 12, 5, 8, 30)
now = datetime(2022, 12, 5, 10, 30)
expected_next_execution_time = datetime(2022, 12, 6, 7, 30)
expected_remaining = (
expected_next_execution_time - now).total_seconds()
# Run the daily (7:30) crontab with the current date
with patch_crontab_nowfun(self.daily, now):
due, remaining = self.daily.is_due(last_run)
assert remaining == expected_remaining
assert not due
def test_execution_due_for_negative_utc_timezone_with_day_of_month(self):
# UTC-8
tzname = "America/Los_Angeles"
self.app.timezone = tzname
tz = ZoneInfo(tzname)
# set day_of_month to test on _delta_to_next
crontab = self.crontab(minute=0, day_of_month='27-31')
# last_run_at: '2023/01/28T23:00:00-08:00'
last_run_at = datetime(2023, 1, 28, 23, 0, tzinfo=tz)
# now: '2023/01/29T00:00:00-08:00'
now = datetime(2023, 1, 29, 0, 0, tzinfo=tz)
with patch_crontab_nowfun(crontab, now):
due, remaining = crontab.is_due(last_run_at)
assert (due, remaining) == (True, 3600)
| test_crontab_is_due |
python | doocs__leetcode | solution/1300-1399/1351.Count Negative Numbers in a Sorted Matrix/Solution2.py | {
"start": 0,
"end": 138
} | class ____:
def countNegatives(self, grid: List[List[int]]) -> int:
return sum(bisect_left(row[::-1], 0) for row in grid)
| Solution |
python | pypa__warehouse | warehouse/legacy/action_routing.py | {
"start": 40,
"end": 937
} | class ____:
def __init__(self, action: str, info):
self.action_name = action
def text(self) -> str:
return f"pypi_action = {self.action_name}"
phash = text
def __call__(self, context, request) -> bool:
return self.action_name == request.params.get(":action", None)
def add_pypi_action_route(config, name, action, **kwargs):
config.add_route(name, "/pypi", pypi_action=action, **kwargs)
def add_pypi_action_redirect(config, action, target, **kwargs):
config.add_redirect("/pypi", target, pypi_action=action, **kwargs)
def includeme(config):
config.add_route_predicate("pypi_action", PyPIActionPredicate)
config.add_directive(
"add_pypi_action_route", add_pypi_action_route, action_wrap=False
)
config.add_directive(
"add_pypi_action_redirect", add_pypi_action_redirect, action_wrap=False
)
| PyPIActionPredicate |
python | getsentry__sentry | tests/sentry/workflow_engine/endpoints/validators/test_base_workflow.py | {
"start": 741,
"end": 3919
} | class ____(TestCase):
def setUp(self) -> None:
self.context = {
"organization": self.organization,
"request": self.make_request(),
}
self.valid_data = {
"name": "test",
"enabled": True,
"actionFilters": [],
"config": {
"frequency": 30,
},
"triggers": {
"logicType": "any",
"conditions": [],
},
}
def test_valid_data(self) -> None:
validator = WorkflowValidator(data=self.valid_data, context=self.context)
assert validator.is_valid() is True
@mock.patch(
"sentry.workflow_engine.registry.action_handler_registry.get",
return_value=MockActionHandler,
)
@mock.patch(
"sentry.notifications.notification_action.registry.action_validator_registry.get",
return_value=MockActionValidatorTranslator,
)
def test_valid_data__with_action_filters(
self, mock_action_handler: mock.MagicMock, mock_action_validator: mock.MagicMock
) -> None:
self.valid_data["actionFilters"] = [
{
"logicType": "any",
"conditions": [],
"actions": [
{
"type": Action.Type.SLACK,
"config": {"foo": "bar"},
"data": {"baz": "bar"},
"integrationId": self.integration.id,
}
],
}
]
validator = WorkflowValidator(data=self.valid_data, context=self.context)
assert validator.is_valid() is True
@mock.patch(
"sentry.workflow_engine.registry.action_handler_registry.get",
return_value=MockActionHandler,
)
def test_valid_data__with_invalid_action_filters(
self, mock_action_handler: mock.MagicMock
) -> None:
self.valid_data["actionFilters"] = [
{
"logicType": "any",
"conditions": [],
"actions": [
{
"type": Action.Type.SLACK,
"config": {},
"integrationId": self.integration.id,
}
],
}
]
validator = WorkflowValidator(data=self.valid_data, context=self.context)
assert validator.is_valid() is False
def test_invalid_data__no_name(self) -> None:
self.valid_data["name"] = ""
validator = WorkflowValidator(data=self.valid_data, context=self.context)
assert validator.is_valid() is False
def test_invalid_data__incorrect_config(self) -> None:
self.valid_data["config"] = {"foo": "bar"}
validator = WorkflowValidator(data=self.valid_data, context=self.context)
assert validator.is_valid() is False
def test_invalid_data__invalid_trigger(self) -> None:
self.valid_data["triggers"] = {"foo": "bar"}
validator = WorkflowValidator(data=self.valid_data, context=self.context)
assert validator.is_valid() is False
| TestWorkflowValidator |
python | dagster-io__dagster | python_modules/libraries/dagster-omni/dagster_omni_tests/test_translation.py | {
"start": 1874,
"end": 4291
} | class ____(TestTranslation):
def test_translation(
self,
attributes: Mapping[str, Any],
assertion: Callable[[AssetSpec], bool],
key_modifier: Optional[Callable[[dg.AssetKey], dg.AssetKey]],
) -> None:
body = {
"type": "dagster_omni.OmniComponent",
"attributes": {
"workspace": {
"base_url": "https://test.omniapp.co",
"api_key": "test-key",
},
},
}
body["attributes"]["translation"] = attributes
with setup_omni_component(defs_yaml_contents=body) as (_, defs):
specs = [
spec
for spec in defs.get_all_asset_specs()
if "dagster/auto_created_stub_asset" not in spec.metadata
]
assert len(specs) == 1
spec = specs[0]
expected_key = dg.AssetKey(["analytics", "reports", "User Analysis"])
if key_modifier:
expected_key = key_modifier(expected_key)
assert assertion(spec)
assert spec.key == expected_key
def test_per_object_type_translation() -> None:
body = {
"type": "dagster_omni.OmniComponent",
"attributes": {
"workspace": {
"base_url": "https://test.omniapp.co",
"api_key": "test-key",
},
"translation": {
"metadata": {"foo_global": "bar_global", "foo_document": "OVERRIDE_ME"},
"for_document": {
"key_prefix": "document_prefix",
"metadata": {"foo": "bar", "foo_document": "bar_document"},
},
"for_query": {"key_prefix": "query_prefix"},
},
},
}
with setup_omni_component(defs_yaml_contents=body) as (_, defs):
specs = [
spec
for spec in defs.get_all_asset_specs()
if "dagster/auto_created_stub_asset" not in spec.metadata
]
assert len(specs) == 1
spec = specs[0]
assert spec.key == dg.AssetKey(["document_prefix", "analytics", "reports", "User Analysis"])
assert spec.metadata["foo_global"] == "bar_global"
assert spec.metadata["foo_document"] == "bar_document"
assert spec.deps == [dg.AssetDep(dg.AssetKey(["query_prefix", "users"]))]
| TestOmniTranslation |
python | getsentry__sentry | fixtures/safe_migrations_apps/bad_flow_delete_field_pending_with_fk_constraint_app/migrations/0002_delete_without_pending.py | {
"start": 190,
"end": 541
} | class ____(CheckedMigration):
atomic = False
dependencies = [
("bad_flow_delete_field_pending_with_fk_constraint_app", "0001_initial"),
]
operations = [
SafeRemoveField(
model_name="testtable",
name="fk_table",
deletion_action=DeletionAction.MOVE_TO_PENDING,
),
]
| Migration |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.