language stringclasses 1
value | repo stringclasses 346
values | path stringlengths 6 201 | class_span dict | source stringlengths 21 2.38M | target stringlengths 1 96 |
|---|---|---|---|---|---|
python | sphinx-doc__sphinx | sphinx/builders/linkcheck.py | {
"start": 1446,
"end": 1986
} | class ____(StrEnum):
BROKEN = 'broken'
IGNORED = 'ignored'
RATE_LIMITED = 'rate-limited'
REDIRECTED = 'redirected'
TIMEOUT = 'timeout'
UNCHECKED = 'unchecked'
UNKNOWN = 'unknown'
WORKING = 'working'
logger = logging.getLogger(__name__)
# matches to foo:// and // (a protocol relative URL)
uri_re = re.compile('([a-z]+:)?//')
DEFAULT_REQUEST_HEADERS = {
'Accept': 'text/html,application/xhtml+xml;q=0.9,*/*;q=0.8',
}
CHECK_IMMEDIATELY = 0
QUEUE_POLL_SECS = 1
DEFAULT_DELAY = 60.0
@object.__new__
| _Status |
python | apache__airflow | providers/google/src/airflow/providers/google/cloud/operators/pubsub.py | {
"start": 27517,
"end": 32447
} | class ____(GoogleCloudBaseOperator):
"""
Publish messages to a PubSub topic.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:PubSubPublishMessageOperator`
Each Task publishes all provided messages to the same topic
in a single Google Cloud project. If the topic does not exist, this
task will fail. ::
m1 = {"data": b"Hello, World!", "attributes": {"type": "greeting"}}
m2 = {"data": b"Knock, knock"}
m3 = {"attributes": {"foo": ""}}
m4 = {"data": b"Who's there?", "attributes": {"ordering_key": "knock_knock"}}
t1 = PubSubPublishMessageOperator(
project_id="my-project",
topic="my_topic",
messages=[m1, m2, m3],
create_topic=True,
dag=dag,
)
t2 = PubSubPublishMessageOperator(
project_id="my-project",
topic="my_topic",
messages=[m4],
create_topic=True,
enable_message_ordering=True,
dag=dag,
)
``project_id``, ``topic``, and ``messages`` are templated so you can use Jinja templating
in their values.
:param project_id: Optional, the Google Cloud project ID in which to work (templated).
If set to None or missing, the default project_id from the Google Cloud connection is used.
:param topic: the topic to which to publish. Do not include the
full topic path. In other words, instead of
``projects/{project}/topics/{topic}``, provide only
``{topic}``. (templated)
:param messages: a list of messages to be published to the
topic. Each message is a dict with one or more of the
following keys-value mappings:
* 'data': a bytestring (utf-8 encoded)
* 'attributes': {'key1': 'value1', ...}
Each message must contain at least a non-empty 'data' value
or an attribute dict with at least one key (templated). See
https://cloud.google.com/pubsub/docs/reference/rest/v1/PubsubMessage
:param gcp_conn_id: The connection ID to use connecting to
Google Cloud.
:param enable_message_ordering: If true, messages published with the same
ordering_key in PubsubMessage will be delivered to the subscribers in the order
in which they are received by the Pub/Sub system. Otherwise, they may be
delivered in any order. Default is False.
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
"""
template_fields: Sequence[str] = (
"project_id",
"topic",
"messages",
"enable_message_ordering",
"impersonation_chain",
)
ui_color = "#0273d4"
def __init__(
self,
*,
topic: str,
messages: list,
project_id: str = PROVIDE_PROJECT_ID,
gcp_conn_id: str = "google_cloud_default",
enable_message_ordering: bool = False,
impersonation_chain: str | Sequence[str] | None = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.project_id = project_id
self.topic = topic
self.messages = messages
self.gcp_conn_id = gcp_conn_id
self.enable_message_ordering = enable_message_ordering
self.impersonation_chain = impersonation_chain
@cached_property
def pubsub_hook(self):
return PubSubHook(
gcp_conn_id=self.gcp_conn_id,
impersonation_chain=self.impersonation_chain,
enable_message_ordering=self.enable_message_ordering,
)
def execute(self, context: Context) -> None:
self.log.info("Publishing to topic %s", self.topic)
self.pubsub_hook.publish(project_id=self.project_id, topic=self.topic, messages=self.messages)
self.log.info("Published to topic %s", self.topic)
def get_openlineage_facets_on_complete(self, _) -> OperatorLineage:
from airflow.providers.common.compat.openlineage.facet import Dataset
from airflow.providers.openlineage.extractors import OperatorLineage
project_id = self.project_id or self.pubsub_hook.project_id
output_dataset = [Dataset(namespace="pubsub", name=f"topic:{project_id}:{self.topic}")]
return OperatorLineage(outputs=output_dataset)
| PubSubPublishMessageOperator |
python | scipy__scipy | scipy/io/tests/test_wavfile.py | {
"start": 12301,
"end": 19094
} | class ____:
def __init__(self, fp):
self.fp = fp
def seekable(self):
return False
def read(self, size=-1, /):
return self.fp.read(size)
def close(self):
self.fp.close()
def test_streams():
for filename in ['test-44100Hz-le-1ch-4bytes.wav',
'test-8000Hz-le-2ch-1byteu.wav',
'test-44100Hz-2ch-32bit-float-le.wav',
'test-44100Hz-2ch-32bit-float-be.wav',
'test-8000Hz-le-5ch-9S-5bit.wav',
'test-8000Hz-le-4ch-9S-12bit.wav',
'test-8000Hz-le-3ch-5S-24bit.wav',
'test-1234Hz-le-1ch-10S-20bit-extra.wav',
'test-8000Hz-le-3ch-5S-36bit.wav',
'test-8000Hz-le-3ch-5S-45bit.wav',
'test-8000Hz-le-3ch-5S-53bit.wav',
'test-8000Hz-le-3ch-5S-64bit.wav',
'test-44100Hz-be-1ch-4bytes.wav', # RIFX
'test-44100Hz-le-1ch-4bytes-rf64.wav']:
dfname = datafile(filename)
with open(dfname, 'rb') as fp1, open(dfname, 'rb') as fp2:
rate1, data1 = wavfile.read(fp1)
rate2, data2 = wavfile.read(Nonseekable(fp2))
rate3, data3 = wavfile.read(dfname, mmap=False)
assert_equal(rate1, rate3)
assert_equal(rate2, rate3)
assert_equal(data1, data3)
assert_equal(data2, data3)
def test_read_unknown_filetype_fail():
# Not an RIFF
for mmap in [False, True]:
filename = 'example_1.nc'
with open(datafile(filename), 'rb') as fp:
with raises(ValueError, match="CDF.*'RIFF', 'RIFX', and 'RF64' supported"):
wavfile.read(fp, mmap=mmap)
def test_read_unknown_riff_form_type():
# RIFF, but not WAVE form
for mmap in [False, True]:
filename = 'Transparent Busy.ani'
with open(datafile(filename), 'rb') as fp:
with raises(ValueError, match='Not a WAV file.*ACON'):
wavfile.read(fp, mmap=mmap)
def test_read_unknown_wave_format():
# RIFF and WAVE, but not supported format
for mmap in [False, True]:
filename = 'test-8000Hz-le-1ch-1byte-ulaw.wav'
with open(datafile(filename), 'rb') as fp:
with raises(ValueError, match='Unknown wave file format.*MULAW.*'
'Supported formats'):
wavfile.read(fp, mmap=mmap)
def test_read_early_eof_with_data():
# File ends inside 'data' chunk, but we keep incomplete data
for mmap in [False, True]:
filename = 'test-44100Hz-le-1ch-4bytes-early-eof.wav'
with open(datafile(filename), 'rb') as fp:
with warns(wavfile.WavFileWarning, match='Reached EOF'):
rate, data = wavfile.read(fp, mmap=mmap)
assert data.size > 0
assert rate == 44100
# also test writing (gh-12176)
data[0] = 0
def test_read_early_eof():
# File ends after 'fact' chunk at boundary, no data read
for mmap in [False, True]:
filename = 'test-44100Hz-le-1ch-4bytes-early-eof-no-data.wav'
with open(datafile(filename), 'rb') as fp:
with raises(ValueError, match="Unexpected end of file."):
wavfile.read(fp, mmap=mmap)
def test_read_incomplete_chunk():
# File ends inside 'fmt ' chunk ID, no data read
for mmap in [False, True]:
filename = 'test-44100Hz-le-1ch-4bytes-incomplete-chunk.wav'
with open(datafile(filename), 'rb') as fp:
with raises(ValueError, match="Incomplete chunk ID.*b'f'"):
wavfile.read(fp, mmap=mmap)
def test_read_inconsistent_header():
# File header's size fields contradict each other
for mmap in [False, True]:
filename = 'test-8000Hz-le-3ch-5S-24bit-inconsistent.wav'
with open(datafile(filename), 'rb') as fp:
with raises(ValueError, match="header is invalid"):
wavfile.read(fp, mmap=mmap)
# signed 8-bit integer PCM is not allowed
# unsigned > 8-bit integer PCM is not allowed
# 8- or 16-bit float PCM is not expected
# g and q are platform-dependent, so not included
@pytest.mark.parametrize("dt_str", ["<i2", "<i4", "<i8", "<f4", "<f8",
">i2", ">i4", ">i8", ">f4", ">f8", '|u1'])
@pytest.mark.parametrize("channels", [1, 2, 5])
@pytest.mark.parametrize("rate", [8000, 32000])
@pytest.mark.parametrize("mmap", [False, True])
@pytest.mark.parametrize("realfile", [False, True])
def test_write_roundtrip(realfile, mmap, rate, channels, dt_str, tmpdir):
dtype = np.dtype(dt_str)
if realfile:
tmpfile = str(tmpdir.join(str(threading.get_native_id()), 'temp.wav'))
os.makedirs(os.path.dirname(tmpfile), exist_ok=True)
else:
tmpfile = BytesIO()
data = np.random.rand(100, channels)
if channels == 1:
data = data[:, 0]
if dtype.kind == 'f':
# The range of the float type should be in [-1, 1]
data = data.astype(dtype)
else:
data = (data*128).astype(dtype)
wavfile.write(tmpfile, rate, data)
rate2, data2 = wavfile.read(tmpfile, mmap=mmap)
assert_equal(rate, rate2)
assert_(data2.dtype.byteorder in ('<', '=', '|'), msg=data2.dtype)
assert_array_equal(data, data2)
# also test writing (gh-12176)
if realfile:
data2[0] = 0
else:
with pytest.raises(ValueError, match='read-only'):
data2[0] = 0
if realfile and mmap and IS_PYPY and sys.platform == 'win32':
# windows cannot remove a dead file held by a mmap but not collected
# in PyPy; since the filename gets reused in this test, clean this up
break_cycles()
break_cycles()
@pytest.mark.parametrize("dtype", [np.float16])
def test_wavfile_dtype_unsupported(tmpdir, dtype):
tmpfile = str(tmpdir.join('temp.wav'))
rng = np.random.default_rng(1234)
data = rng.random((100, 5)).astype(dtype)
rate = 8000
with pytest.raises(ValueError, match="Unsupported"):
wavfile.write(tmpfile, rate, data)
def test_seek_emulating_reader_invalid_seek():
# Dummy data for the reader
reader = wavfile.SeekEmulatingReader(BytesIO(b'\x00\x00'))
# Test SEEK_END with an invalid whence value
with pytest.raises(UnsupportedOperation):
reader.seek(0, 5) # Invalid whence value
# Test with negative seek value
with pytest.raises(UnsupportedOperation):
reader.seek(-1, 0) # Negative position with SEEK_SET
# Test SEEK_END with valid parameters (should not raise)
pos = reader.seek(0, os.SEEK_END) # Valid usage
assert pos == 2, f"Failed to seek to end, got position {pos}"
| Nonseekable |
python | google__pytype | pytype/tests/test_methods1.py | {
"start": 134,
"end": 25909
} | class ____(test_base.BaseTest):
"""Tests for methods."""
def test_flow_and_replacement_sanity(self):
self.Check("""
def f(x):
if x:
x = 42
y = x
x = 1
return x + 4
assert_type(f(4), int)
""")
def test_multiple_returns(self):
self.Check("""
def f(x):
if x:
return 1
else:
return 1.5
assert_type(f(0), float)
assert_type(f(1), int)
""")
def test_loops_sanity(self):
ty = self.Infer("""
def f():
x = 4
y = -10
for i in range(1000):
x = x + (i+y)
y = i
return x
f()
""")
self.assertTypesMatchPytd(ty, "def f() -> int: ...")
def test_add_int(self):
self.Check("""
def f(x):
return x + 1
assert_type(f(3.2), float)
assert_type(f(3), int)
""")
def test_conjugate(self):
self.Check("""
def f(x, y):
return x.conjugate()
assert_type(f(int(), int()), int)
""")
def test_class_sanity(self):
ty = self.Infer("""
class A:
def __init__(self):
self.x = 1
def get_x(self):
return self.x
def set_x(self, x):
self.x = x
a = A()
y = a.x
x1 = a.get_x()
a.set_x(1.2)
x2 = a.get_x()
""")
self.assertTypesMatchPytd(
ty,
"""
from typing import Any, Union
a: A
x1: int
x2: float
y: int
class A:
x: Any
def __init__(self) -> None : ...
def get_x(self) -> int: ...
def set_x(self, x) -> None: ...
""",
)
def test_boolean_op(self):
self.Check("""
def f(x, y):
return 1 < x < 10
return 1 > x > 10
assert_type(f(1, 2), bool)
""")
def test_is(self):
self.Check("""
def f(a, b):
return a is b
assert_type(f(1, 2), bool)
""")
def test_is_not(self):
self.Check("""
def f(a, b):
return a is not b
assert_type(f(1, 2), bool)
""")
def test_unpack(self):
self.Check("""
from typing import Tuple
def f(x):
a, b = x
return (a, b)
assert_type(f((1, 2)), Tuple[int, int])
""")
def test_convert(self):
ty = self.Infer("""
def f(x):
return repr(x)
f(1)
""")
self.assertTypesMatchPytd(ty, "def f(x) -> str: ...")
def test_not(self):
ty = self.Infer("""
def f(x):
return not x
f(1)
""")
self.assertTypesMatchPytd(ty, "def f(x) -> bool: ...")
def test_positive(self):
self.Check("""
def f(x):
return +x
assert_type(f(1), int)
""")
def test_negative(self):
self.Check("""
def f(x):
return -x
assert_type(f(1), int)
""")
def test_invert(self):
self.Check("""
def f(x):
return ~x
assert_type(f(1), int)
""")
def test_inheritance(self):
ty = self.Infer("""
class Base:
def get_suffix(self):
return u""
class Leaf(Base):
def __init__(self):
pass
def test():
l1 = Leaf()
return l1.get_suffix()
if __name__ == "__main__":
test()
""")
self.assertTypesMatchPytd(
ty,
"""
class Base:
def get_suffix(self) -> str: ...
class Leaf(Base):
def __init__(self) -> None: ...
def test() -> str: ...
""",
)
def test_property(self):
ty = self.Infer("""
class A:
@property
def my_property(self):
return 1
def foo(self):
return self.my_property
def test():
x = A()
return x.foo()
test()
""")
self.assertTypesMatchPytd(
ty,
"""
from typing import Annotated
class A:
my_property: Annotated[int, 'property']
def foo(self) -> int: ...
def test() -> int: ...
""",
)
def test_explicit_property(self):
ty = self.Infer("""
class B:
def _my_getter(self):
return 1
def _my_setter(self):
pass
my_property = property(_my_getter, _my_setter)
def test():
b = B()
b.my_property = 3
return b.my_property
test()
""")
self.assertTypesMatchPytd(
ty,
"""
from typing import Annotated
class B:
def _my_getter(self) -> int: ...
def _my_setter(self) -> None: ...
my_property: Annotated[int, 'property']
def test() -> int: ...
""",
)
def test_inherited_property(self):
self.Check("""
class A:
@property
def bar(self):
return 42
class B(A):
def foo(self):
return super(B, self).bar + 42
""")
def test_error_in_property(self):
self.CheckWithErrors("""
class Foo:
@property
def f(self):
return self.nonexistent # attribute-error
""")
def test_generators(self):
ty = self.Infer("""
def f():
yield 3
def g():
for x in f():
return x
g()
""")
self.assertTypesMatchPytd(
ty,
"""
from typing import Any, Generator
def f() -> Generator[int, Any, None]: ...
def g() -> int | None: ...
""",
)
def test_list_generator(self):
ty = self.Infer("""
def f():
yield 3
def g():
for x in list(f()):
return x
g()
""")
self.assertTypesMatchPytd(
ty,
"""
from typing import Any, Generator
def f() -> Generator[int, Any, None]: ...
def g() -> int | None: ...
""",
)
def test_recursion(self):
ty = self.Infer("""
def f():
if __random__:
return f()
else:
return 3
f()
""")
self.assertTypesMatchPytd(
ty,
"""
from typing import Any
def f() -> Any: ...
""",
)
def test_in_not_in(self):
ty = self.Infer("""
def f(x):
if __random__:
return x in [x]
else:
return x not in [x]
f(3)
""")
self.assertTypesMatchPytd(ty, "def f(x) -> bool: ...")
def test_complex_cfg(self):
ty = self.Infer("""
def g(h):
return 2
def h():
return 1
def f(x):
if x:
while x:
pass
while x:
pass
assert x
return g(h())
if __name__ == "__main__":
f(0)
""")
self.assertTypesMatchPytd(
ty,
"""
def g(h) -> int: ...
def h() -> int: ...
def f(x) -> int: ...
""",
)
def test_branch_and_loop_cfg(self):
ty = self.Infer("""
def g():
pass
def f():
if True:
while True:
pass
return False
g()
f()
""")
self.assertTypesMatchPytd(
ty,
"""
from typing import Any
def g() -> None: ...
def f() -> Any: ...
""",
)
def test_closure(self):
self.Check("""
def f(x, y):
closure = lambda: x + y
return closure()
assert_type(f(1, 2), int)
""")
def test_deep_closure(self):
ty = self.Infer("""
def f():
x = 3
def g():
def h():
return x
return h
return g()()
f()
""")
self.assertTypesMatchPytd(ty, "def f() -> int: ...")
def test_two_closures(self):
ty = self.Infer("""
def f():
def g():
return 3
def h():
return g
return h()()
f()
""")
self.assertTypesMatchPytd(ty, "def f() -> int: ...")
def test_closure_binding_arguments(self):
self.Check("""
def f(x):
y = 1
def g(z):
return x + y + z
return g(1)
assert_type(f(1), int)
""")
def test_closure_on_multi_type(self):
ty = self.Infer("""
def f():
if __random__:
x = 1
else:
x = 3.5
return (lambda: x)()
f()
""")
self.assertTypesMatchPytd(ty, "def f() -> int | float: ...")
def test_call_kwargs(self):
self.Check("""
def f(x, y=3):
return x + y
assert_type(f(40, **{"y": 2}), int)
""")
def test_call_args(self):
self.Check("""
def f(x):
return x
args = (3,)
assert_type(f(*args), int)
""")
def test_call_args_kwargs(self):
self.Check("""
def f(x):
return x
args = (3,)
kwargs = {}
assert_type(f(*args, **kwargs), int)
""")
def test_call_positional_as_keyword(self):
self.Check("""
def f(named):
return named
assert_type(f(named=3), int)
""")
def test_two_keywords(self):
self.Check("""
def f(x, y):
return x if x else y
assert_type(f(x=3, y=4), int)
""")
def test_two_distinct_keyword_params(self):
f = """
def f(x, y):
return x if x else y
"""
self.Check(f + """
assert_type(f(x=3, y="foo"), int)
""")
self.Check(f + """
assert_type(f(y="foo", x=3), int)
""")
def test_starstar(self):
self.Check("""
def f(x):
return x
assert_type(f(**{"x": 3}), int)
""")
def test_starstar2(self):
self.Check("""
def f(x):
return x
kwargs = {}
kwargs['x'] = 3
assert_type(f(**kwargs), int)
""")
def test_starstar3(self):
self.Check("""
def f(x):
return x
kwargs = dict(x=3)
assert_type(f(**kwargs), int)
""")
def test_starargs_type(self):
self.Check("""
from typing import Tuple
def f(*args, **kwds):
return args
assert_type(f(3), Tuple[int])
""")
def test_starargs_type2(self):
self.Check("""
from typing import Tuple
def f(nr, *args):
return args
assert_type(f("foo", 4), Tuple[int])
""")
def test_starargs_deep(self):
ty = self.Infer("""
def f(*args):
return args
def g(x, *args):
return args
def h(x, y, *args):
return args
""")
self.assertTypesMatchPytd(
ty,
"""
def f(*args) -> tuple: ...
def g(x, *args) -> tuple: ...
def h(x, y, *args) -> tuple: ...
""",
)
def test_starargs_pass_through(self):
ty = self.Infer("""
class Foo:
def __init__(self, *args, **kwargs):
super(Foo, self).__init__(*args, **kwargs)
""")
self.assertTypesMatchPytd(
ty,
"""
class Foo:
def __init__(self, *args, **kwargs) -> NoneType: ...
""",
)
def test_empty_starargs_type(self):
self.Check("""
from typing import Tuple
def f(nr, *args):
return args
assert_type(f(3), Tuple[()])
""")
def test_starstar_kwargs_type(self):
self.Check("""
from typing import Dict
def f(*args, **kwargs):
return kwargs
assert_type(f(foo=3, bar=4), Dict[str, int])
""")
def test_starstar_kwargs_type2(self):
self.Check("""
from typing import Dict
def f(x, y, **kwargs):
return kwargs
assert_type(f("foo", "bar", z=3), Dict[str, int])
""")
def test_empty_starstar_kwargs_type(self):
self.Check("""
def f(nr, **kwargs):
return kwargs
assert_type(f(3), "dict[nothing, nothing]")
""")
def test_starstar_deep(self):
ty = self.Infer("""
class Foo:
def __init__(self, **kwargs):
self.kwargs = kwargs
""")
self.assertTypesMatchPytd(
ty,
"""
from typing import Any
class Foo:
def __init__(self, **kwargs) -> NoneType: ...
kwargs = ... # type: dict[str, Any]
""",
)
def test_starstar_deep2(self):
ty = self.Infer("""
def f(**kwargs):
return kwargs
def g(x, **kwargs):
return kwargs
def h(x, y, **kwargs):
return kwargs
""")
self.assertTypesMatchPytd(
ty,
"""
from typing import Any
def f(**kwargs) -> dict[str, Any]: ...
def g(x, **kwargs) -> dict[str, Any]: ...
def h(x, y, **kwargs) -> dict[str, Any]: ...
""",
)
def test_builtin_starargs(self):
with test_utils.Tempdir() as d:
d.create_file(
"myjson.pyi",
"""
from typing import Any
def loads(s: str, encoding: Any = ...) -> Any: ...
""",
)
ty = self.Infer(
"""
import myjson
def f(*args):
return myjson.loads(*args)
""",
pythonpath=[d.path],
)
self.assertTypesMatchPytd(
ty,
"""
import myjson
from typing import Any
def f(*args) -> Any: ...
""",
)
def test_builtin_starstarargs(self):
with test_utils.Tempdir() as d:
d.create_file(
"myjson.pyi",
"""
from typing import Any
def loads(s: str, encoding: Any = ...) -> Any: ...
""",
)
ty = self.Infer(
"""
import myjson
def f(**args):
return myjson.loads(**args)
""",
pythonpath=[d.path],
)
self.assertTypesMatchPytd(
ty,
"""
import myjson
from typing import Any
def f(**args) -> Any: ...
""",
)
def test_builtin_keyword(self):
with test_utils.Tempdir() as d:
d.create_file(
"myjson.pyi",
"""
from typing import Any
def loads(s: str, encoding: Any = ...) -> Any: ...
""",
)
ty = self.Infer(
"""
import myjson
def f():
return myjson.loads(s="{}")
""",
pythonpath=[d.path],
)
self.assertTypesMatchPytd(
ty,
"""
import myjson
from typing import Any
def f() -> Any: ...
""",
)
def test_none_or_function(self):
ty, _ = self.InferWithErrors("""
def g():
return 3
def f():
if __random__:
x = None
else:
x = g
if __random__:
return x() # not-callable
f()
""")
self.assertTypesMatchPytd(
ty,
"""
from typing import Optional
def g() -> int: ...
def f() -> Optional[int]: ...
""",
)
def test_define_classmethod(self):
ty = self.Infer("""
class A:
@classmethod
def myclassmethod(*args):
return 3
def f():
a = A()
return a.myclassmethod
f()
""")
self.assertTypesMatchPytd(
ty,
"""
from typing import Callable
class A:
@classmethod
def myclassmethod(*args) -> int: ...
def f() -> Callable: ...
""",
)
def test_classmethod_smoke(self):
self.Check("""
class A:
@classmethod
def mystaticmethod(x, y):
return x + y
""")
def test_invalid_classmethod(self):
ty, err = self.InferWithErrors("""
def f(x):
return 42
class A:
@classmethod # not-callable[e]>=3.11
@f
def myclassmethod(*args): # not-callable[e]<3.11
return 3
""")
self.assertTypesMatchPytd(
ty,
"""
from typing import Any
def f(x) -> int: ...
class A:
myclassmethod: Any
""",
)
self.assertErrorSequences(
err,
{
"e": [
"int",
"not callable",
"@classmethod applied",
"not a function",
]
},
)
def test_staticmethod_smoke(self):
self.Check("""
class A:
@staticmethod
def mystaticmethod(x, y):
return x + y
""")
def test_classmethod(self):
ty = self.Infer("""
class A:
@classmethod
def myclassmethod(cls):
return 3
def f():
return A().myclassmethod()
f()
""")
self.assertTypesMatchPytd(
ty,
"""
from typing import Type
class A:
@classmethod
def myclassmethod(cls: Type[A]) -> int: ...
def f() -> int: ...
""",
)
def test_inherited_classmethod(self):
self.Check("""
class A:
@classmethod
def myclassmethod(cls):
return 3
class B(A):
@classmethod
def myclassmethod(cls):
return super(B, cls).myclassmethod()
""")
def test_staticmethod(self):
ty = self.Infer("""
class A:
@staticmethod
def mystaticmethod(x, y):
return x + y
def f():
return A.mystaticmethod(1, 2)
f()
""")
self.assertTypesMatchPytd(
ty,
"""
from typing import Any
class A:
@staticmethod
def mystaticmethod(x, y) -> Any: ...
def f() -> int: ...
""",
)
def test_simple_staticmethod(self):
self.Check("""
class MyClass:
@staticmethod
def static_method():
return None
MyClass().static_method()
""")
def test_default_return_type(self):
ty = self.Infer("""
def f(x=""):
x = list(x)
f()
""")
self.assertTypesMatchPytd(ty, "def f(x=...) -> None: ...")
def test_lookup(self):
ty = self.Infer("""
class Cloneable:
def __init__(self):
pass
def clone(self):
return type(self)()
Cloneable().clone()
""")
cls = ty.Lookup("Cloneable")
method = cls.Lookup("clone")
self.assertEqual(
pytd_utils.Print(method),
"def clone(self: _TCloneable) -> _TCloneable: ...",
)
@test_base.skip("pytype thinks 'clone' returns a TypeVar(bound=Cloneable)")
def test_simple_clone(self):
ty = self.Infer("""
class Cloneable:
def clone(self):
return Cloneable()
""")
self.assertTypesMatchPytd(
ty,
"""
class Cloneable:
def clone(self) -> Cloneable: ...
""",
)
def test_decorator(self):
ty = self.Infer("""
class MyStaticMethodDecorator:
def __init__(self, func):
self.__func__ = func
def __get__(self, obj, cls):
return self.__func__
class A:
@MyStaticMethodDecorator
def mystaticmethod(x, y):
return x + y
def f():
return A.mystaticmethod(1, 2)
f()
""")
self.assertTypesMatchPytd(
ty,
"""
from typing import Any
class MyStaticMethodDecorator:
__func__: Any
def __init__(self, func) -> None: ...
def __get__(self, obj, cls) -> Any: ...
class A:
mystaticmethod: Any
def f() -> int: ...
""",
)
def test_unknown_decorator(self):
ty = self.Infer("""
@__any_object__
def f():
return 3j
f()
""")
self.assertTypesMatchPytd(
ty,
"""
from typing import Any
f: Any
""",
)
def test_func_name(self):
ty = self.Infer("""
def f():
pass
f.func_name = 3.1415
def g():
return f.func_name
g()
""")
self.assertTypesMatchPytd(
ty,
"""
def f() -> None: ...
def g() -> float: ...
""",
)
def test_register(self):
ty, _ = self.InferWithErrors("""
class Foo:
pass
def f():
lookup = {}
lookup[''] = Foo
return lookup.get('')() # not-callable
""")
self.assertTypesMatchPytd(
ty,
"""
class Foo: ...
def f() -> Foo: ...
""",
)
def test_copy_method(self):
ty = self.Infer("""
class Foo:
def mymethod(self, x, y):
return 3
myfunction = Foo.mymethod
""")
self.assertTypesMatchPytd(
ty,
"""
class Foo:
def mymethod(self, x, y) -> int: ...
def myfunction(self: Foo, x, y) -> int: ...
""",
)
def test_assign_method(self):
ty = self.Infer("""
class Foo:
pass
def myfunction(self, x, y):
return 3
Foo.mymethod = myfunction
""")
self.assertTypesMatchPytd(
ty,
"""
class Foo:
def mymethod(self, x, y) -> int: ...
def myfunction(self: Foo, x, y) -> int: ...
""",
)
def test_function_attr(self):
ty = self.Infer("""
import os
def f():
pass
class Foo:
def method(self):
pass
foo = Foo()
f.x = 3
Foo.method.x = "bar"
foo.method.x = 3j # overwrites previous line
os.chmod.x = 3.14
a = f.x
b = Foo.method.x
c = foo.method.x
d = os.chmod.x
""")
self.assertTypesMatchPytd(
ty,
"""
import os
def f() -> NoneType: ...
class Foo:
def method(self) -> NoneType: ...
foo = ... # type: Foo
a = ... # type: int
b = ... # type: complex
c = ... # type: complex
d = ... # type: float
""",
)
def test_json(self):
ty = self.Infer("""
import json
""")
self.assertTypesMatchPytd(
ty,
"""
import json
""",
)
def test_new(self):
ty = self.Infer("""
x = str.__new__(str)
""")
self.assertTypesMatchPytd(
ty,
"""
x = ... # type: str
""",
)
def test_override_new(self):
ty = self.Infer("""
class Foo(str):
def __new__(cls, string):
return str.__new__(cls, string)
""")
self.assertTypesMatchPytd(
ty,
"""
from typing import Type, TypeVar
_TFoo = TypeVar("_TFoo", bound=Foo)
class Foo(str):
def __new__(cls: Type[_TFoo], string) -> _TFoo: ...
""",
)
def test_inherit_new(self):
ty = self.Infer("""
class Foo(str): pass
foo = Foo()
""")
self.assertTypesMatchPytd(
ty,
"""
class Foo(str): ...
foo = ... # type: Foo
""",
)
def test_attribute_in_new(self):
ty = self.Infer("""
class Foo:
def __new__(cls, name):
self = super(Foo, cls).__new__(cls)
self.name = name
return self
""")
self.assertTypesMatchPytd(
ty,
"""
from typing import Any, Type, TypeVar
_TFoo = TypeVar("_TFoo", bound=Foo)
class Foo:
name = ... # type: Any
def __new__(cls: Type[_TFoo], name) -> _TFoo: ...
""",
)
def test_attributes_in_new_and_init(self):
ty = self.Infer("""
class Foo:
def __new__(cls):
self = super(Foo, cls).__new__(cls)
self.name = "Foo"
return self
def __init__(self):
self.nickname = 400
""")
self.assertTypesMatchPytd(
ty,
"""
from typing import Type, TypeVar
_TFoo = TypeVar("_TFoo", bound=Foo)
class Foo:
name = ... # type: str
nickname = ... # type: int
def __new__(cls: Type[_TFoo]) -> _TFoo: ...
def __init__(self) -> None : ...
""",
)
def test_variable_product_complexity_limit(self):
ty = self.Infer("""
class A:
def __new__(cls, w, x, y, z):
pass
class B(A):
pass
class C(A):
pass
class D(A):
pass
options = [
(1, 2, 3, 4),
(5, 6, 7, 8),
(9, 10, 11, 12),
(13, 14, 15, 16),
(17, 18, 19, 20),
]
for w, x, y, z in options:
A(w, x, y, z)
B(w, x, y, z)
C(w, x, y, z)
D(w, x, y, z)
""")
self.assertTypesMatchPytd(
ty,
"""
from typing import List, Tuple
class A:
def __new__(cls, w, x, y, z) -> None: ...
class B(A): ...
class C(A): ...
class D(A): ...
options = ... # type: List[Tuple[int, int, int, int]]
w = ... # type: int
x = ... # type: int
y = ... # type: int
z = ... # type: int
""",
)
def test_return_self(self):
ty = self.Infer("""
class Foo:
def __enter__(self):
return self
""")
self.assertTypesMatchPytd(
ty,
"""
from typing import TypeVar
_TFoo = TypeVar("_TFoo", bound=Foo)
class Foo:
def __enter__(self: _TFoo) -> _TFoo: ...
""",
)
def test_attribute_in_inherited_new(self):
ty = self.Infer("""
class Foo:
def __new__(cls, name):
self = super(Foo, cls).__new__(cls)
self.name = name
return self
class Bar(Foo):
def __new__(cls):
return super(Bar, cls).__new__(cls, "")
""")
self.assertTypesMatchPytd(
ty,
"""
from typing import Any, Type, TypeVar
_TFoo = TypeVar("_TFoo", bound=Foo)
_TBar = TypeVar("_TBar", bound=Bar)
class Foo:
name = ... # type: Any
def __new__(cls: Type[_TFoo], name) -> _TFoo: ...
class Bar(Foo):
name = ... # type: str
def __new__(cls: Type[_TBar]) -> _TBar: ...
""",
)
def test_pyi_classmethod_and_staticmethod(self):
# Test that we can access method properties on imported classmethods.
with self.DepTree([(
"t.pyi",
"""
class A:
@classmethod
def foo(): ...
@staticmethod
def bar(): ...
""",
)]):
self.Check("""
import t
a = t.A.foo.__name__
b = t.A.bar.__name__
assert_type(a, str)
assert_type(b, str)
""")
if __name__ == "__main__":
test_base.main()
| MethodsTest |
python | apache__airflow | airflow-core/tests/unit/models/test_cleartasks.py | {
"start": 1800,
"end": 27998
} | class ____:
@pytest.fixture(autouse=True, scope="class")
def clean(self):
db.clear_db_runs()
db.clear_db_serialized_dags()
yield
db.clear_db_runs()
db.clear_db_serialized_dags()
def test_clear_task_instances(self, dag_maker):
# Explicitly needs catchup as True as test is creating history runs
with dag_maker(
"test_clear_task_instances",
start_date=DEFAULT_DATE,
end_date=DEFAULT_DATE + datetime.timedelta(days=10),
catchup=True,
) as dag:
task0 = EmptyOperator(task_id="0")
task1 = EmptyOperator(task_id="1", retries=2)
dr = dag_maker.create_dagrun(
state=State.RUNNING,
run_type=DagRunType.SCHEDULED,
)
ti0, ti1 = sorted(dr.task_instances, key=lambda ti: ti.task_id)
ti0.refresh_from_task(task0)
ti1.refresh_from_task(task1)
ti0.run()
ti1.run()
with create_session() as session:
# do the incrementing of try_number ordinarily handled by scheduler
ti0.try_number += 1
ti1.try_number += 1
ti0 = session.merge(ti0)
ti1 = session.merge(ti1)
session.commit()
# we use order_by(task_id) here because for the test DAG structure of ours
# this is equivalent to topological sort. It would not work in general case
# but it works for our case because we specifically constructed test DAGS
# in the way that those two sort methods are equivalent
qry = session.query(TI).filter(TI.dag_id == dag.dag_id).order_by(TI.task_id).all()
clear_task_instances(qry, session)
ti0.refresh_from_db(session)
ti1.refresh_from_db(session)
# Next try to run will be try 2
assert ti0.state is None
assert ti0.try_number == 1
assert ti0.max_tries == 1
assert ti1.state is None
assert ti1.try_number == 1
assert ti1.max_tries == 3
def test_clear_task_instances_external_executor_id(self, dag_maker):
with dag_maker(
"test_clear_task_instances_external_executor_id",
start_date=DEFAULT_DATE,
end_date=DEFAULT_DATE + datetime.timedelta(days=10),
) as dag:
EmptyOperator(task_id="task0")
ti0 = dag_maker.create_dagrun().task_instances[0]
ti0.state = State.SUCCESS
ti0.external_executor_id = "some_external_executor_id"
with create_session() as session:
session.add(ti0)
session.commit()
# we use order_by(task_id) here because for the test DAG structure of ours
# this is equivalent to topological sort. It would not work in general case
# but it works for our case because we specifically constructed test DAGS
# in the way that those two sort methods are equivalent
qry = session.query(TI).filter(TI.dag_id == dag.dag_id).order_by(TI.task_id).all()
clear_task_instances(qry, session)
ti0.refresh_from_db()
assert ti0.state is None
assert ti0.external_executor_id is None
def test_clear_task_instances_next_method(self, dag_maker, session):
with dag_maker(
"test_clear_task_instances_next_method",
start_date=DEFAULT_DATE,
end_date=DEFAULT_DATE + datetime.timedelta(days=10),
):
EmptyOperator(task_id="task0")
ti0 = dag_maker.create_dagrun().task_instances[0]
ti0.state = State.DEFERRED
ti0.next_method = "next_method"
ti0.next_kwargs = {}
session.add(ti0)
session.commit()
clear_task_instances([ti0], session)
ti0.refresh_from_db()
assert ti0.next_method is None
assert ti0.next_kwargs is None
@pytest.mark.parametrize(
("state", "last_scheduling"), [(DagRunState.QUEUED, None), (DagRunState.RUNNING, DEFAULT_DATE)]
)
def test_clear_task_instances_dr_state(self, state, last_scheduling, dag_maker):
"""
Test that DR state is set to None after clear.
And that DR.last_scheduling_decision is handled OK.
start_date is also set to None
"""
# Explicitly needs catchup as True as test is creating history runs
with dag_maker(
"test_clear_task_instances",
start_date=DEFAULT_DATE,
end_date=DEFAULT_DATE + datetime.timedelta(days=10),
catchup=True,
serialized=True,
) as dag:
EmptyOperator(task_id="0")
EmptyOperator(task_id="1", retries=2)
dr = dag_maker.create_dagrun(
state=DagRunState.SUCCESS,
run_type=DagRunType.SCHEDULED,
)
ti0, ti1 = sorted(dr.task_instances, key=lambda ti: ti.task_id)
dr.last_scheduling_decision = DEFAULT_DATE
ti0.state = TaskInstanceState.SUCCESS
ti1.state = TaskInstanceState.SUCCESS
session = dag_maker.session
session.flush()
# we use order_by(task_id) here because for the test DAG structure of ours
# this is equivalent to topological sort. It would not work in general case
# but it works for our case because we specifically constructed test DAGS
# in the way that those two sort methods are equivalent
qry = session.query(TI).filter(TI.dag_id == dag.dag_id).order_by(TI.task_id).all()
assert session.query(TaskInstanceHistory).count() == 0
clear_task_instances(qry, session, dag_run_state=state)
session.flush()
# 2 TIs were cleared so 2 history records should be created
assert session.query(TaskInstanceHistory).count() == 2
session.refresh(dr)
assert dr.state == state
assert dr.start_date is None if state == DagRunState.QUEUED else dr.start_date
assert dr.last_scheduling_decision == last_scheduling
@pytest.mark.parametrize("state", [DagRunState.QUEUED, DagRunState.RUNNING])
def test_clear_task_instances_on_running_dr(self, state, dag_maker):
"""
Test that DagRun state, start_date and last_scheduling_decision
are not changed after clearing TI in an unfinished DagRun.
"""
# Explicitly needs catchup as True as test is creating history runs
with dag_maker(
"test_clear_task_instances",
start_date=DEFAULT_DATE,
end_date=DEFAULT_DATE + datetime.timedelta(days=10),
catchup=True,
) as dag:
EmptyOperator(task_id="0")
EmptyOperator(task_id="1", retries=2)
dr = dag_maker.create_dagrun(
state=state,
run_type=DagRunType.SCHEDULED,
)
ti0, ti1 = sorted(dr.task_instances, key=lambda ti: ti.task_id)
dr.last_scheduling_decision = DEFAULT_DATE
ti0.state = TaskInstanceState.SUCCESS
ti1.state = TaskInstanceState.SUCCESS
session = dag_maker.session
session.flush()
# we use order_by(task_id) here because for the test DAG structure of ours
# this is equivalent to topological sort. It would not work in general case
# but it works for our case because we specifically constructed test DAGS
# in the way that those two sort methods are equivalent
qry = session.query(TI).filter(TI.dag_id == dag.dag_id).order_by(TI.task_id).all()
clear_task_instances(qry, session)
session.flush()
session.refresh(dr)
assert dr.state == state
if state == DagRunState.QUEUED:
assert dr.start_date is None
if state == DagRunState.RUNNING:
assert dr.start_date
assert dr.last_scheduling_decision == DEFAULT_DATE
@pytest.mark.parametrize(
("state", "last_scheduling"),
[
(DagRunState.SUCCESS, None),
(DagRunState.SUCCESS, DEFAULT_DATE),
(DagRunState.FAILED, None),
(DagRunState.FAILED, DEFAULT_DATE),
],
)
def test_clear_task_instances_on_finished_dr(self, state, last_scheduling, dag_maker):
"""
Test that DagRun state, start_date and last_scheduling_decision
are changed after clearing TI in a finished DagRun.
"""
# Explicitly needs catchup as True as test is creating history runs
with dag_maker(
"test_clear_task_instances",
start_date=DEFAULT_DATE,
end_date=DEFAULT_DATE + datetime.timedelta(days=10),
catchup=True,
serialized=True,
) as dag:
EmptyOperator(task_id="0")
EmptyOperator(task_id="1", retries=2)
dr = dag_maker.create_dagrun(
state=state,
run_type=DagRunType.SCHEDULED,
)
ti0, ti1 = sorted(dr.task_instances, key=lambda ti: ti.task_id)
dr.last_scheduling_decision = DEFAULT_DATE
ti0.state = TaskInstanceState.SUCCESS
ti1.state = TaskInstanceState.SUCCESS
session = dag_maker.session
session.flush()
# we use order_by(task_id) here because for the test DAG structure of ours
# this is equivalent to topological sort. It would not work in general case
# but it works for our case because we specifically constructed test DAGS
# in the way that those two sort methods are equivalent
qry = session.query(TI).filter(TI.dag_id == dag.dag_id).order_by(TI.task_id).all()
clear_task_instances(qry, session)
session.flush()
session.refresh(dr)
assert dr.state == DagRunState.QUEUED
assert dr.start_date is None
assert dr.last_scheduling_decision is None
@pytest.mark.parametrize("delete_tasks", [True, False])
def test_clear_task_instances_maybe_task_removed(self, delete_tasks, dag_maker, session):
"""This verifies the behavior of clear_task_instances re task removal.
When clearing a TI, if the best available serdag for that task doesn't have the
task anymore, then it has different logic re setting max tries."""
with dag_maker("test_clear_task_instances_without_task") as dag:
task0 = EmptyOperator(task_id="task0")
task1 = EmptyOperator(task_id="task1", retries=2)
dr = dag_maker.create_dagrun(
state=State.RUNNING,
run_type=DagRunType.SCHEDULED,
)
ti0, ti1 = sorted(dr.task_instances, key=lambda ti: ti.task_id)
ti0.refresh_from_task(task0)
ti1.refresh_from_task(task1)
# simulate running this task
# do the incrementing of try_number ordinarily handled by scheduler
ti0.try_number += 1
ti1.try_number += 1
ti0.state = "success"
ti1.state = "success"
dr.state = "success"
session.commit()
# apparently max tries starts out at task.retries
# doesn't really make sense
# then, it later gets updated depending on what happens
assert ti0.max_tries == 0
assert ti1.max_tries == 2
if delete_tasks:
# Remove the task from dag.
dag.task_dict.clear()
dag.task_group.children.clear()
assert ti1.max_tries == 2
sync_dag_to_db(dag, session=session)
session.refresh(ti1)
assert ti0.try_number == 1
assert ti0.max_tries == 0
assert ti1.try_number == 1
assert ti1.max_tries == 2
clear_task_instances([ti0, ti1], session)
# When no task is found, max_tries will be maximum of original max_tries or try_number.
session.refresh(ti0)
session.refresh(ti1)
assert ti0.try_number == 1
assert ti0.max_tries == 1
assert ti0.state is None
assert ti1.try_number == 1
assert ti1.state is None
if delete_tasks:
assert ti1.max_tries == 2
else:
assert ti1.max_tries == 3
session.refresh(dr)
assert dr.state == "queued"
def test_clear_task_instances_without_dag_param(self, dag_maker, session):
# Explicitly needs catchup as True as test is creating history runs
with dag_maker(
"test_clear_task_instances_without_dag_param",
start_date=DEFAULT_DATE,
end_date=DEFAULT_DATE + datetime.timedelta(days=10),
session=session,
catchup=True,
) as dag:
task0 = EmptyOperator(task_id="task0")
task1 = EmptyOperator(task_id="task1", retries=2)
dr = dag_maker.create_dagrun(
state=State.RUNNING,
run_type=DagRunType.SCHEDULED,
)
ti0, ti1 = sorted(dr.task_instances, key=lambda ti: ti.task_id)
ti0.refresh_from_task(task0)
ti1.refresh_from_task(task1)
with create_session() as session:
# do the incrementing of try_number ordinarily handled by scheduler
ti0.try_number += 1
ti1.try_number += 1
session.merge(ti0)
session.merge(ti1)
session.commit()
ti0.run(session=session)
ti1.run(session=session)
# we use order_by(task_id) here because for the test DAG structure of ours
# this is equivalent to topological sort. It would not work in general case
# but it works for our case because we specifically constructed test DAGS
# in the way that those two sort methods are equivalent
qry = session.query(TI).filter(TI.dag_id == dag.dag_id).order_by(TI.task_id).all()
clear_task_instances(qry, session)
ti0.refresh_from_db(session=session)
ti1.refresh_from_db(session=session)
assert ti0.try_number == 1
assert ti0.max_tries == 1
assert ti1.try_number == 1
assert ti1.max_tries == 3
def test_clear_task_instances_in_multiple_dags(self, dag_maker, session):
with dag_maker("test_clear_task_instances_in_multiple_dags0", session=session):
EmptyOperator(task_id="task0")
dr0 = dag_maker.create_dagrun(
state=State.RUNNING,
run_type=DagRunType.SCHEDULED,
)
with dag_maker("test_clear_task_instances_in_multiple_dags1", session=session):
EmptyOperator(task_id="task1", retries=2)
dr1 = dag_maker.create_dagrun(
state=State.RUNNING,
run_type=DagRunType.SCHEDULED,
)
ti0 = dr0.task_instances[0]
ti1 = dr1.task_instances[0]
# simulate running the task
# do the incrementing of try_number ordinarily handled by scheduler
ti0.try_number += 1
ti1.try_number += 1
session.commit()
clear_task_instances([ti0, ti1], session)
session.refresh(ti0)
session.refresh(ti1)
assert ti0.try_number == 1
assert ti0.max_tries == 1
assert ti1.try_number == 1
assert ti1.max_tries == 3
def test_clear_task_instances_with_task_reschedule(self, dag_maker):
"""Test that TaskReschedules are deleted correctly when TaskInstances are cleared"""
# Explicitly needs catchup as True as test is creating history runs
with dag_maker(
"test_clear_task_instances_with_task_reschedule",
start_date=DEFAULT_DATE,
end_date=DEFAULT_DATE + datetime.timedelta(days=10),
catchup=True,
) as dag:
task0 = PythonSensor(task_id="0", python_callable=lambda: False, mode="reschedule")
task1 = PythonSensor(task_id="1", python_callable=lambda: False, mode="reschedule")
dr = dag_maker.create_dagrun(
state=State.RUNNING,
run_type=DagRunType.SCHEDULED,
)
ti0, ti1 = sorted(dr.task_instances, key=lambda ti: ti.task_id)
ti0.refresh_from_task(task0)
ti1.refresh_from_task(task1)
with create_session() as session:
# do the incrementing of try_number ordinarily handled by scheduler
ti0.try_number += 1
ti1.try_number += 1
session.merge(ti0)
session.merge(ti1)
session.commit()
ti0.run()
ti1.run()
with create_session() as session:
def count_task_reschedule(ti):
return session.query(TaskReschedule).filter(TaskReschedule.ti_id == ti.id).count()
assert count_task_reschedule(ti0) == 1
assert count_task_reschedule(ti1) == 1
# we use order_by(task_id) here because for the test DAG structure of ours
# this is equivalent to topological sort. It would not work in general case
# but it works for our case because we specifically constructed test DAGS
# in the way that those two sort methods are equivalent
qry = (
session.query(TI)
.filter(TI.dag_id == dag.dag_id, TI.task_id == ti0.task_id)
.order_by(TI.task_id)
.all()
)
clear_task_instances(qry, session)
assert count_task_reschedule(ti0) == 0
assert count_task_reschedule(ti1) == 1
@pytest.mark.parametrize(
("state", "state_recorded"),
[
(TaskInstanceState.SUCCESS, TaskInstanceState.SUCCESS),
(TaskInstanceState.FAILED, TaskInstanceState.FAILED),
(TaskInstanceState.SKIPPED, TaskInstanceState.SKIPPED),
(TaskInstanceState.UP_FOR_RETRY, TaskInstanceState.FAILED),
(TaskInstanceState.UP_FOR_RESCHEDULE, TaskInstanceState.FAILED),
(TaskInstanceState.RUNNING, TaskInstanceState.FAILED),
(TaskInstanceState.QUEUED, TaskInstanceState.FAILED),
(TaskInstanceState.SCHEDULED, TaskInstanceState.FAILED),
(None, TaskInstanceState.FAILED),
(TaskInstanceState.RESTARTING, TaskInstanceState.FAILED),
],
)
def test_task_instance_history_record(self, state, state_recorded, dag_maker):
"""Test that task instance history record is created with approapriate state"""
# Explicitly needs catchup as True as test is creating history runs
with dag_maker(
"test_clear_task_instances",
start_date=DEFAULT_DATE,
end_date=DEFAULT_DATE + datetime.timedelta(days=10),
catchup=True,
) as dag:
EmptyOperator(task_id="0")
EmptyOperator(task_id="1", retries=2)
dr = dag_maker.create_dagrun(
state=DagRunState.RUNNING,
run_type=DagRunType.SCHEDULED,
)
ti0, ti1 = sorted(dr.task_instances, key=lambda ti: ti.task_id)
ti0.state = state
ti1.state = state
session = dag_maker.session
session.flush()
qry = session.query(TI).filter(TI.dag_id == dag.dag_id).order_by(TI.task_id).all()
clear_task_instances(qry, session)
session.flush()
session.refresh(dr)
ti_history = session.scalars(select(TaskInstanceHistory.state)).all()
assert [ti_history[0], ti_history[1]] == [str(state_recorded), str(state_recorded)]
def test_dag_clear(self, dag_maker, session):
with dag_maker("test_dag_clear") as dag:
EmptyOperator(task_id="test_dag_clear_task_0")
EmptyOperator(task_id="test_dag_clear_task_1", retries=2)
dr = dag_maker.create_dagrun(
state=State.RUNNING,
run_type=DagRunType.SCHEDULED,
)
ti0, ti1 = sorted(dr.task_instances, key=lambda ti: ti.task_id)
ti0.try_number += 1
session.commit()
# Next try to run will be try 1
assert ti0.try_number == 1
dag.clear(session=session)
session.commit()
assert ti0.try_number == 1
assert ti0.state == State.NONE
assert ti0.max_tries == 1
assert ti1.max_tries == 2
ti1.try_number += 1
session.commit()
assert ti1.try_number == 1
assert ti1.max_tries == 2
dag.clear(session=session)
# after clear dag, we have 2 remaining tries
assert ti1.max_tries == 3
assert ti1.try_number == 1
# after clear dag, ti0 has no remaining tries
assert ti0.try_number == 1
assert ti0.max_tries == 1
def test_dags_clear(self, dag_maker, session):
dags, tis = [], []
num_of_dags = 5
for i in range(num_of_dags):
with dag_maker(
f"test_dag_clear_{i}",
schedule=datetime.timedelta(days=1),
serialized=True,
start_date=DEFAULT_DATE,
end_date=DEFAULT_DATE + datetime.timedelta(days=10),
):
task = EmptyOperator(task_id=f"test_task_clear_{i}", owner="test")
dr = dag_maker.create_dagrun(
run_id=f"scheduled_{i}",
logical_date=DEFAULT_DATE,
state=State.RUNNING,
run_type=DagRunType.SCHEDULED,
session=session,
data_interval=(DEFAULT_DATE, DEFAULT_DATE),
run_after=DEFAULT_DATE,
triggered_by=DagRunTriggeredByType.TEST,
)
ti = dr.task_instances[0]
ti.task = task
dags.append(dag_maker.dag)
tis.append(ti)
# test clear all dags
for i in range(num_of_dags):
session.get(TaskInstance, tis[i].id).try_number += 1
session.commit()
tis[i].run()
assert tis[i].state == State.SUCCESS
assert tis[i].try_number == 1
assert tis[i].max_tries == 0
session.commit()
def _get_ti(old_ti):
return session.scalar(
select(TI).where(
TI.dag_id == old_ti.dag_id,
TI.task_id == old_ti.task_id,
TI.map_index == old_ti.map_index,
TI.run_id == old_ti.run_id,
)
)
SerializedDAG.clear_dags(dags)
session.commit()
for i in range(num_of_dags):
ti = _get_ti(tis[i])
assert ti.state == State.NONE
assert ti.try_number == 1
assert ti.max_tries == 1
# test dry_run
for i, dag in enumerate(dags):
ti = _get_ti(tis[i])
ti.try_number += 1
session.commit()
ti.refresh_from_task(dag.get_task(ti.task_id))
ti.run(session=session)
assert ti.state == State.SUCCESS
assert ti.try_number == 2
assert ti.max_tries == 1
session.commit()
SerializedDAG.clear_dags(dags, dry_run=True)
session.commit()
for i in range(num_of_dags):
ti = _get_ti(tis[i])
assert ti.state == State.SUCCESS
assert ti.try_number == 2
assert ti.max_tries == 1
# test only_failed
ti_fail = random.choice(tis)
ti_fail = _get_ti(ti_fail)
ti_fail.state = State.FAILED
session.commit()
SerializedDAG.clear_dags(dags, only_failed=True)
for ti_in in tis:
ti = _get_ti(ti_in)
if ti.dag_id == ti_fail.dag_id:
assert ti.state == State.NONE
assert ti.try_number == 2
assert ti.max_tries == 2
else:
assert ti.state == State.SUCCESS
assert ti.try_number == 2
assert ti.max_tries == 1
@pytest.mark.parametrize("run_on_latest_version", [True, False])
def test_clear_task_instances_with_run_on_latest_version(self, run_on_latest_version, dag_maker, session):
# Explicitly needs catchup as True as test is creating history runs
with dag_maker(
"test_clear_task_instances",
start_date=DEFAULT_DATE,
end_date=DEFAULT_DATE + datetime.timedelta(days=10),
catchup=True,
bundle_version="v1",
):
task0 = EmptyOperator(task_id="0")
task1 = EmptyOperator(task_id="1", retries=2)
dr = dag_maker.create_dagrun(
state=State.RUNNING,
run_type=DagRunType.SCHEDULED,
)
old_dag_version = DagVersion.get_latest_version(dr.dag_id)
ti0, ti1 = sorted(dr.task_instances, key=lambda ti: ti.task_id)
ti0.refresh_from_task(task0)
ti1.refresh_from_task(task1)
ti0.run()
ti1.run()
dr.state = DagRunState.SUCCESS
session.merge(dr)
session.flush()
with dag_maker(
"test_clear_task_instances",
start_date=DEFAULT_DATE,
end_date=DEFAULT_DATE + datetime.timedelta(days=10),
catchup=True,
bundle_version="v2",
) as dag:
EmptyOperator(task_id="0")
new_dag_version = DagVersion.get_latest_version(dag.dag_id)
assert old_dag_version.id != new_dag_version.id
qry = session.query(TI).filter(TI.dag_id == dag.dag_id).order_by(TI.task_id).all()
clear_task_instances(qry, session, run_on_latest_version=run_on_latest_version)
session.commit()
dr = session.query(DagRun).filter(DagRun.dag_id == dag.dag_id).one()
if run_on_latest_version:
assert dr.created_dag_version_id == new_dag_version.id
assert dr.bundle_version == new_dag_version.bundle_version
assert TaskInstanceState.REMOVED in [ti.state for ti in dr.task_instances]
for ti in dr.task_instances:
assert ti.dag_version_id == new_dag_version.id
else:
assert dr.created_dag_version_id == old_dag_version.id
assert dr.bundle_version == old_dag_version.bundle_version
assert TaskInstanceState.REMOVED not in [ti.state for ti in dr.task_instances]
for ti in dr.task_instances:
assert ti.dag_version_id == old_dag_version.id
| TestClearTasks |
python | kamyu104__LeetCode-Solutions | Python/find-resultant-array-after-removing-anagrams.py | {
"start": 516,
"end": 947
} | class ____(object):
def removeAnagrams(self, words):
"""
:type words: List[str]
:rtype: List[str]
"""
result = []
prev = None
for x in words:
s = sorted(x)
if prev and prev == s:
continue
prev = s
result.append(x)
return result
# Time: O(n * llogl)
# Space: O(l)
import collections
# sort
| Solution2 |
python | sqlalchemy__sqlalchemy | lib/sqlalchemy/dialects/postgresql/json.py | {
"start": 7597,
"end": 14430
} | class ____(JSON):
"""Represent the PostgreSQL JSONB type.
The :class:`_postgresql.JSONB` type stores arbitrary JSONB format data,
e.g.::
data_table = Table(
"data_table",
metadata,
Column("id", Integer, primary_key=True),
Column("data", JSONB),
)
with engine.connect() as conn:
conn.execute(
data_table.insert(), data={"key1": "value1", "key2": "value2"}
)
The :class:`_postgresql.JSONB` type includes all operations provided by
:class:`_types.JSON`, including the same behaviors for indexing
operations.
It also adds additional operators specific to JSONB, including
:meth:`.JSONB.Comparator.has_key`, :meth:`.JSONB.Comparator.has_all`,
:meth:`.JSONB.Comparator.has_any`, :meth:`.JSONB.Comparator.contains`,
:meth:`.JSONB.Comparator.contained_by`,
:meth:`.JSONB.Comparator.delete_path`,
:meth:`.JSONB.Comparator.path_exists` and
:meth:`.JSONB.Comparator.path_match`.
Like the :class:`_types.JSON` type, the :class:`_postgresql.JSONB`
type does not detect
in-place changes when used with the ORM, unless the
:mod:`sqlalchemy.ext.mutable` extension is used.
Custom serializers and deserializers
are shared with the :class:`_types.JSON` class,
using the ``json_serializer``
and ``json_deserializer`` keyword arguments. These must be specified
at the dialect level using :func:`_sa.create_engine`. When using
psycopg2, the serializers are associated with the jsonb type using
``psycopg2.extras.register_default_jsonb`` on a per-connection basis,
in the same way that ``psycopg2.extras.register_default_json`` is used
to register these handlers with the json type.
.. seealso::
:class:`_types.JSON`
.. warning::
**For applications that have indexes against JSONB subscript
expressions**
SQLAlchemy 2.0.42 made a change in how the subscript operation for
:class:`.JSONB` is rendered, from ``-> 'element'`` to ``['element']``,
for PostgreSQL versions greater than 14. This change caused an
unintended side effect for indexes that were created against
expressions that use subscript notation, e.g.
``Index("ix_entity_json_ab_text", data["a"]["b"].astext)``. If these
indexes were generated with the older syntax e.g. ``((entity.data ->
'a') ->> 'b')``, they will not be used by the PostgreSQL query planner
when a query is made using SQLAlchemy 2.0.42 or higher on PostgreSQL
versions 14 or higher. This occurs because the new text will resemble
``(entity.data['a'] ->> 'b')`` which will fail to produce the exact
textual syntax match required by the PostgreSQL query planner.
Therefore, for users upgrading to SQLAlchemy 2.0.42 or higher, existing
indexes that were created against :class:`.JSONB` expressions that use
subscripting would need to be dropped and re-created in order for them
to work with the new query syntax, e.g. an expression like
``((entity.data -> 'a') ->> 'b')`` would become ``(entity.data['a'] ->>
'b')``.
.. seealso::
:ticket:`12868` - discussion of this issue
"""
__visit_name__ = "JSONB"
operator_classes = OperatorClass.JSON | OperatorClass.CONCATENABLE
class Comparator(JSON.Comparator[_T]):
"""Define comparison operations for :class:`_types.JSON`."""
type: JSONB
def has_key(self, other: Any) -> ColumnElement[bool]:
"""Boolean expression. Test for presence of a key (equivalent of
the ``?`` operator). Note that the key may be a SQLA expression.
"""
return self.operate(HAS_KEY, other, result_type=sqltypes.Boolean)
def has_all(self, other: Any) -> ColumnElement[bool]:
"""Boolean expression. Test for presence of all keys in jsonb
(equivalent of the ``?&`` operator)
"""
return self.operate(HAS_ALL, other, result_type=sqltypes.Boolean)
def has_any(self, other: Any) -> ColumnElement[bool]:
"""Boolean expression. Test for presence of any key in jsonb
(equivalent of the ``?|`` operator)
"""
return self.operate(HAS_ANY, other, result_type=sqltypes.Boolean)
def contains(self, other: Any, **kwargs: Any) -> ColumnElement[bool]:
"""Boolean expression. Test if keys (or array) are a superset
of/contained the keys of the argument jsonb expression
(equivalent of the ``@>`` operator).
kwargs may be ignored by this operator but are required for API
conformance.
"""
return self.operate(CONTAINS, other, result_type=sqltypes.Boolean)
def contained_by(self, other: Any) -> ColumnElement[bool]:
"""Boolean expression. Test if keys are a proper subset of the
keys of the argument jsonb expression
(equivalent of the ``<@`` operator).
"""
return self.operate(
CONTAINED_BY, other, result_type=sqltypes.Boolean
)
def delete_path(
self, array: Union[List[str], _pg_array[str]]
) -> ColumnElement[JSONB]:
"""JSONB expression. Deletes field or array element specified in
the argument array (equivalent of the ``#-`` operator).
The input may be a list of strings that will be coerced to an
``ARRAY`` or an instance of :meth:`_postgres.array`.
.. versionadded:: 2.0
"""
if not isinstance(array, _pg_array):
array = _pg_array(array)
right_side = cast(array, ARRAY(sqltypes.TEXT))
return self.operate(DELETE_PATH, right_side, result_type=JSONB)
def path_exists(self, other: Any) -> ColumnElement[bool]:
"""Boolean expression. Test for presence of item given by the
argument JSONPath expression (equivalent of the ``@?`` operator).
.. versionadded:: 2.0
"""
return self.operate(
PATH_EXISTS, other, result_type=sqltypes.Boolean
)
def path_match(self, other: Any) -> ColumnElement[bool]:
"""Boolean expression. Test if JSONPath predicate given by the
argument JSONPath expression matches
(equivalent of the ``@@`` operator).
Only the first item of the result is taken into account.
.. versionadded:: 2.0
"""
return self.operate(
PATH_MATCH, other, result_type=sqltypes.Boolean
)
comparator_factory = Comparator
| JSONB |
python | modin-project__modin | modin/core/execution/python/common/engine_wrapper.py | {
"start": 849,
"end": 2810
} | class ____:
"""Python engine wrapper serving for the compatibility purpose with other engines."""
@classmethod
def deploy(cls, func, f_args=None, f_kwargs=None, num_returns=1):
"""
Run the passed function.
Parameters
----------
func : callable
f_args : sequence, optional
Positional arguments to pass to the `func`.
f_kwargs : dict, optional
Keyword arguments to pass to the `func`.
num_returns : int, default: 1
Number of return values from the `func`.
Returns
-------
object
Returns the result of the `func`.
"""
args = [] if f_args is None else f_args
kwargs = {} if f_kwargs is None else f_kwargs
return func(*args, **kwargs)
@classmethod
def is_future(cls, item):
"""
Check if the item is a Future.
Parameters
----------
item : object
Returns
-------
boolean
Always return false.
"""
return False
@classmethod
def materialize(cls, obj_id):
"""
Get the data from the data storage.
The method only serves for the compatibility purpose, what it actually
does is just return the passed value as is.
Parameters
----------
obj_id : object
Returns
-------
object
The passed `obj_id` itself.
"""
return obj_id
@classmethod
def put(cls, data, **kwargs):
"""
Put data into the data storage.
The method only serves for the compatibility purpose, what it actually
does is just return the passed value as is.
Parameters
----------
data : object
**kwargs : dict
Returns
-------
object
The passed `data` itself.
"""
return data
| PythonWrapper |
python | pexpect__pexpect | tests/test_which.py | {
"start": 169,
"end": 10913
} | class ____(PexpectTestCase.PexpectTestCase):
" Tests for pexpect.which(). "
def test_which_finds_ls(self):
" which() can find ls(1). "
exercise = pexpect.which("ls")
assert exercise is not None
assert exercise.startswith('/')
def test_path_from_env(self):
" executable found from optional env argument "
bin_name = 'pexpect-test-path-from-env'
tempdir = tempfile.mkdtemp()
try:
bin_path = os.path.join(tempdir, bin_name)
with open(bin_path, 'w') as f:
f.write('# test file not to be run')
try:
os.chmod(bin_path, 0o700)
found_path = pexpect.which(bin_name, env={'PATH': tempdir})
finally:
os.remove(bin_path)
self.assertEqual(bin_path, found_path)
finally:
os.rmdir(tempdir)
def test_os_defpath_which(self):
" which() finds an executable in $PATH and returns its abspath. "
bin_dir = tempfile.mkdtemp()
if sys.getfilesystemencoding() in ('ascii', 'ANSI_X3.4-1968'):
prefix = 'ascii-'
else:
prefix = u'ǝpoɔıun-'
temp_obj = tempfile.NamedTemporaryFile(
suffix=u'.sh', prefix=prefix,
dir=bin_dir, delete=False)
bin_path = temp_obj.name
fname = os.path.basename(temp_obj.name)
save_path = os.environ['PATH']
save_defpath = os.defpath
try:
# setup
os.environ['PATH'] = ''
os.defpath = bin_dir
with open(bin_path, 'w') as fp:
pass
# given non-executable,
os.chmod(bin_path, 0o400)
# exercise absolute and relative,
assert pexpect.which(bin_path) is None
assert pexpect.which(fname) is None
# given executable,
os.chmod(bin_path, 0o700)
# exercise absolute and relative,
assert pexpect.which(bin_path) == bin_path
assert pexpect.which(fname) == bin_path
finally:
# restore,
os.environ['PATH'] = save_path
os.defpath = save_defpath
# destroy scratch files and folders,
if os.path.exists(bin_path):
os.unlink(bin_path)
if os.path.exists(bin_dir):
os.rmdir(bin_dir)
def test_path_search_which(self):
" which() finds an executable in $PATH and returns its abspath. "
fname = 'gcc'
bin_dir = tempfile.mkdtemp()
bin_path = os.path.join(bin_dir, fname)
save_path = os.environ['PATH']
try:
# setup
os.environ['PATH'] = bin_dir
with open(bin_path, 'w') as fp:
pass
# given non-executable,
os.chmod(bin_path, 0o400)
# exercise absolute and relative,
assert pexpect.which(bin_path) is None
assert pexpect.which(fname) is None
# given executable,
os.chmod(bin_path, 0o700)
# exercise absolute and relative,
assert pexpect.which(bin_path) == bin_path
assert pexpect.which(fname) == bin_path
finally:
# restore,
os.environ['PATH'] = save_path
# destroy scratch files and folders,
if os.path.exists(bin_path):
os.unlink(bin_path)
if os.path.exists(bin_dir):
os.rmdir(bin_dir)
def test_which_follows_symlink(self):
" which() follows symlinks and returns its path. "
fname = 'original'
symname = 'extra-crispy'
bin_dir = tempfile.mkdtemp()
bin_path = os.path.join(bin_dir, fname)
sym_path = os.path.join(bin_dir, symname)
save_path = os.environ['PATH']
try:
# setup
os.environ['PATH'] = bin_dir
with open(bin_path, 'w') as fp:
pass
os.chmod(bin_path, 0o400)
os.symlink(bin_path, sym_path)
# should not be found because symlink points to non-executable
assert pexpect.which(symname) is None
# but now it should -- because it is executable
os.chmod(bin_path, 0o700)
assert pexpect.which(symname) == sym_path
finally:
# restore,
os.environ['PATH'] = save_path
# destroy scratch files, symlinks, and folders,
if os.path.exists(sym_path):
os.unlink(sym_path)
if os.path.exists(bin_path):
os.unlink(bin_path)
if os.path.exists(bin_dir):
os.rmdir(bin_dir)
def test_which_should_not_match_folders(self):
" Which does not match folders, even though they are executable. "
# make up a path and insert a folder that is 'executable', a naive
# implementation might match (previously pexpect versions 3.2 and
# sh versions 1.0.8, reported by @lcm337.)
fname = 'g++'
bin_dir = tempfile.mkdtemp()
bin_dir2 = os.path.join(bin_dir, fname)
save_path = os.environ['PATH']
try:
os.environ['PATH'] = bin_dir
os.mkdir(bin_dir2, 0o755)
# should not be found because it is not executable *file*,
# but rather, has the executable bit set, as a good folder
# should -- it should not be returned because it fails isdir()
exercise = pexpect.which(fname)
assert exercise is None
finally:
# restore,
os.environ['PATH'] = save_path
# destroy scratch folders,
for _dir in (bin_dir2, bin_dir,):
if os.path.exists(_dir):
os.rmdir(_dir)
def test_which_should_match_other_group_user(self):
" which() returns executables by other, group, and user ownership. "
# create an executable and test that it is found using which() for
# each of the 'other', 'group', and 'user' permission bits.
fname = 'g77'
bin_dir = tempfile.mkdtemp()
bin_path = os.path.join(bin_dir, fname)
save_path = os.environ['PATH']
try:
# setup
os.environ['PATH'] = bin_dir
# an interpreted script requires the ability to read,
# whereas a binary program requires only to be executable.
#
# to gain access to a binary program, we make a copy of
# the existing system program echo(1).
bin_echo = None
for pth in ('/bin/echo', '/usr/bin/echo'):
if os.path.exists(pth):
bin_echo = pth
break
bin_which = None
for pth in ('/bin/which', '/usr/bin/which'):
if os.path.exists(pth):
bin_which = pth
break
if not bin_echo or not bin_which:
pytest.skip('needs `echo` and `which` binaries')
shutil.copy(bin_echo, bin_path)
isroot = os.getuid() == 0
for should_match, mode in (
# note that although the file may have matching 'group' or
# 'other' executable permissions, it is *not* executable
# because the current uid is the owner of the file -- which
# takes precedence
(False, 0o000), # ----------, no
(isroot, 0o001), # ---------x, no
(isroot, 0o010), # ------x---, no
(True, 0o100), # ---x------, yes
(False, 0o002), # --------w-, no
(False, 0o020), # -----w----, no
(False, 0o200), # --w-------, no
(isroot, 0o003), # --------wx, no
(isroot, 0o030), # -----wx---, no
(True, 0o300), # --wx------, yes
(False, 0o004), # -------r--, no
(False, 0o040), # ----r-----, no
(False, 0o400), # -r--------, no
(isroot, 0o005), # -------r-x, no
(isroot, 0o050), # ----r-x---, no
(True, 0o500), # -r-x------, yes
(False, 0o006), # -------rw-, no
(False, 0o060), # ----rw----, no
(False, 0o600), # -rw-------, no
(isroot, 0o007), # -------rwx, no
(isroot, 0o070), # ----rwx---, no
(True, 0o700), # -rwx------, yes
(isroot, 0o4001), # ---S-----x, no
(isroot, 0o4010), # ---S--x---, no
(True, 0o4100), # ---s------, yes
(isroot, 0o4003), # ---S----wx, no
(isroot, 0o4030), # ---S-wx---, no
(True, 0o4300), # --ws------, yes
(isroot, 0o2001), # ------S--x, no
(isroot, 0o2010), # ------s---, no
(True, 0o2100), # ---x--S---, yes
):
mode_str = '{0:0>4o}'.format(mode)
# given file mode,
os.chmod(bin_path, mode)
# exercise whether we may execute
can_execute = True
try:
subprocess.Popen(fname).wait() == 0
except OSError as err:
if err.errno != errno.EACCES:
raise
# permission denied
can_execute = False
assert should_match == can_execute, (
should_match, can_execute, mode_str)
# exercise whether which(1) would match
proc = subprocess.Popen((bin_which, fname),
env={'PATH': bin_dir},
stdout=subprocess.PIPE)
bin_which_match = bool(not proc.wait())
assert should_match == bin_which_match, (
should_match, bin_which_match, mode_str)
# finally, exercise pexpect's which(1) matches
# the same.
pexpect_match = bool(pexpect.which(fname))
assert should_match == pexpect_match == bin_which_match, (
should_match, pexpect_match, bin_which_match, mode_str)
finally:
# restore,
os.environ['PATH'] = save_path
# destroy scratch files and folders,
if os.path.exists(bin_path):
os.unlink(bin_path)
if os.path.exists(bin_dir):
os.rmdir(bin_dir)
| TestCaseWhich |
python | apache__airflow | task-sdk/src/airflow/sdk/api/datamodels/_generated.py | {
"start": 1191,
"end": 1437
} | class ____(BaseModel):
"""
Schema for AssetAliasModel used in AssetEventDagRunReference.
"""
model_config = ConfigDict(
extra="forbid",
)
name: Annotated[str, Field(title="Name")]
| AssetAliasReferenceAssetEventDagRun |
python | google__pytype | pytype/tests/test_tracebacks1.py | {
"start": 95,
"end": 2363
} | class ____(test_base.BaseTest):
"""Tests for tracebacks in error messages."""
def test_no_traceback(self):
errors = self.CheckWithErrors("""
def f(x):
"hello" + 42 # unsupported-operands[e]
f("world")
""")
self.assertErrorRegexes(errors, {"e": r"expects str$"})
def test_same_traceback(self):
errors = self.CheckWithErrors("""
def f(x, _):
x + 42 # unsupported-operands[e]
def g(x):
f("hello", x)
g("world")
""")
self.assertErrorRegexes(errors, {"e": r"Called from.*:\n line 4, in g"})
def test_different_tracebacks(self):
errors = self.CheckWithErrors("""
def f(x):
x + 42 # unsupported-operands[e1] # unsupported-operands[e2]
f("hello")
f("world")
""")
self.assertErrorRegexes(
errors,
{
"e1": r"Called from.*:\n line 3, in current file",
"e2": r"Called from.*:\n line 4, in current file",
},
)
def test_comprehension(self):
errors = self.CheckWithErrors("""
def f():
return {x.upper() for x in range(10)} # attribute-error[e]
""")
self.assertErrorRegexes(errors, {"e": r"upper.*int$"})
(error,) = errors.errorlog
self.assertEqual(error.methodname, "f")
def test_comprehension_in_traceback(self):
errors = self.CheckWithErrors("""
def f(x):
return x.upper() # attribute-error[e]
def g():
return {f(x) for x in range(10)}
""")
self.assertErrorRegexes(errors, {"e": r"Called from.*:\n line 4, in g$"})
def test_no_argument_function(self):
errors = self.CheckWithErrors("""
def f():
return None.attr # attribute-error[e]
f()
""")
self.assertErrorRegexes(errors, {"e": r"attr.*None$"})
def test_max_callsites(self):
errors = self.CheckWithErrors("""
def f(s):
return "hello, " + s # unsupported-operands[e1] # unsupported-operands[e2] # unsupported-operands[e3]
f(0)
f(1)
f(2)
f(3)
""")
# We limit the number of tracebacks shown for the same error.
self.assertErrorRegexes(
errors, {"e1": r"line 3", "e2": r"line 4", "e3": r"line 5"}
)
if __name__ == "__main__":
test_base.main()
| TracebackTest |
python | Textualize__textual | docs/examples/guide/screens/screen01.py | {
"start": 643,
"end": 840
} | class ____(App):
CSS_PATH = "screen01.tcss"
SCREENS = {"bsod": BSOD}
BINDINGS = [("b", "push_screen('bsod')", "BSOD")]
if __name__ == "__main__":
app = BSODApp()
app.run()
| BSODApp |
python | doocs__leetcode | solution/1100-1199/1136.Parallel Courses/Solution.py | {
"start": 0,
"end": 659
} | class ____:
def minimumSemesters(self, n: int, relations: List[List[int]]) -> int:
g = defaultdict(list)
indeg = [0] * n
for prev, nxt in relations:
prev, nxt = prev - 1, nxt - 1
g[prev].append(nxt)
indeg[nxt] += 1
q = deque(i for i, v in enumerate(indeg) if v == 0)
ans = 0
while q:
ans += 1
for _ in range(len(q)):
i = q.popleft()
n -= 1
for j in g[i]:
indeg[j] -= 1
if indeg[j] == 0:
q.append(j)
return -1 if n else ans
| Solution |
python | getsentry__sentry | src/sentry/flags/models.py | {
"start": 2092,
"end": 3494
} | class ____(Model):
__relocation_scope__ = RelocationScope.Excluded
ACTION_TYPES = (
(ActionEnum.CREATED, "created"),
(ActionEnum.UPDATED, "updated"),
(ActionEnum.DELETED, "deleted"),
)
CREATED_BY_TYPE_TYPES = (
(CreatedByTypeEnum.EMAIL, "email"),
(CreatedByTypeEnum.NAME, "name"),
(CreatedByTypeEnum.ID, "id"),
)
PROVIDER_TYPES = (
(ProviderEnum.GENERIC, "generic"),
(ProviderEnum.FLAGPOLE, "flagpole"),
(ProviderEnum.LAUNCHDARKLY, "launchdarkly"),
(ProviderEnum.UNLEASH, "unleash"),
(ProviderEnum.STATSIG, "statsig"),
)
action = models.PositiveSmallIntegerField(choices=ACTION_TYPES)
created_at = models.DateTimeField(default=timezone.now)
created_by = models.CharField(max_length=100, null=True)
created_by_type = models.PositiveSmallIntegerField(choices=CREATED_BY_TYPE_TYPES, null=True)
flag = models.CharField(max_length=100)
organization_id = HybridCloudForeignKey("sentry.Organization", null=False, on_delete="CASCADE")
provider = models.PositiveSmallIntegerField(choices=PROVIDER_TYPES, null=True)
tags = models.JSONField()
class Meta:
app_label = "flags"
db_table = "flags_audit_log"
indexes = (models.Index(fields=("flag",)),)
__repr__ = sane_repr("organization_id", "flag")
@region_silo_model
| FlagAuditLogModel |
python | apache__airflow | shared/secrets_masker/src/airflow_shared/secrets_masker/secrets_masker.py | {
"start": 6182,
"end": 22123
} | class ____(logging.Filter):
"""Redact secrets from logs."""
replacer: Pattern | None = None
patterns: set[str]
ALREADY_FILTERED_FLAG = "__SecretsMasker_filtered"
MAX_RECURSION_DEPTH = 5
_has_warned_short_secret = False
mask_secrets_in_logs = False
min_length_to_mask = 5
secret_mask_adapter = None
def __init__(self):
super().__init__()
self.patterns = set()
self.sensitive_variables_fields = []
@classmethod
def __init_subclass__(cls, **kwargs):
super().__init_subclass__(**kwargs)
if cls._redact is not SecretsMasker._redact:
sig = inspect.signature(cls._redact)
# Compat for older versions of the OpenLineage plugin which subclasses this -- call the method
# without the replacement character
for param in sig.parameters.values():
if param.name == "replacement" or param.kind == param.VAR_KEYWORD:
break
else:
# Block only runs if no break above.
f = cls._redact
@functools.wraps(f)
def _redact(*args, replacement: str = "***", **kwargs):
return f(*args, **kwargs)
cls._redact = _redact
...
@classmethod
def enable_log_masking(cls) -> None:
"""Enable secret masking in logs."""
cls.mask_secrets_in_logs = True
@classmethod
def disable_log_masking(cls) -> None:
"""Disable secret masking in logs."""
cls.mask_secrets_in_logs = False
@classmethod
def is_log_masking_enabled(cls) -> bool:
"""Check if secret masking in logs is enabled."""
return cls.mask_secrets_in_logs
@cached_property
def _record_attrs_to_ignore(self) -> Iterable[str]:
# Doing log.info(..., extra={'foo': 2}) sets extra properties on
# record, i.e. record.foo. And we need to filter those too. Fun
#
# Create a record, and look at what attributes are on it, and ignore
# all the default ones!
record = logging.getLogRecordFactory()(
# name, level, pathname, lineno, msg, args, exc_info, func=None, sinfo=None,
"x",
logging.INFO,
__file__,
1,
"",
(),
exc_info=None,
func="funcname",
)
return frozenset(record.__dict__).difference({"msg", "args"})
def _redact_exception_with_context(self, exception):
# Exception class may not be modifiable (e.g. declared by an
# extension module such as JDBC).
with contextlib.suppress(AttributeError):
exception.args = (self.redact(v) for v in exception.args)
if exception.__context__:
self._redact_exception_with_context(exception.__context__)
if exception.__cause__ and exception.__cause__ is not exception.__context__:
self._redact_exception_with_context(exception.__cause__)
def filter(self, record) -> bool:
if not self.is_log_masking_enabled():
return True
if self.ALREADY_FILTERED_FLAG in record.__dict__:
# Filters are attached to multiple handlers and logs, keep a
# "private" flag that stops us needing to process it more than once
return True
if self.replacer:
for k, v in record.__dict__.items():
if k not in self._record_attrs_to_ignore:
record.__dict__[k] = self.redact(v)
if record.exc_info and record.exc_info[1] is not None:
exc = record.exc_info[1]
self._redact_exception_with_context(exc)
record.__dict__[self.ALREADY_FILTERED_FLAG] = True
return True
# Default on `max_depth` is to support versions of the OpenLineage plugin (not the provider) which called
# this function directly. New versions of that provider, and this class itself call it with a value
def _redact_all(
self,
item: Redactable,
depth: int,
max_depth: int = MAX_RECURSION_DEPTH,
*,
replacement: str = "***",
) -> Redacted:
if depth > max_depth or isinstance(item, str):
return replacement
if isinstance(item, dict):
return {
dict_key: self._redact_all(subval, depth + 1, max_depth, replacement=replacement)
for dict_key, subval in item.items()
}
if isinstance(item, (tuple, set)):
# Turn set in to tuple!
return tuple(
self._redact_all(subval, depth + 1, max_depth, replacement=replacement) for subval in item
)
if isinstance(item, list):
return list(
self._redact_all(subval, depth + 1, max_depth, replacement=replacement) for subval in item
)
return item
def _redact(
self, item: Redactable, name: str | None, depth: int, max_depth: int, replacement: str = "***"
) -> Redacted:
# Avoid spending too much effort on redacting on deeply nested
# structures. This also avoid infinite recursion if a structure has
# reference to self.
if depth > max_depth:
return item
try:
if name and self.should_hide_value_for_key(name):
return self._redact_all(item, depth, max_depth, replacement=replacement)
if isinstance(item, dict):
to_return = {
dict_key: self._redact(
subval, name=dict_key, depth=(depth + 1), max_depth=max_depth, replacement=replacement
)
for dict_key, subval in item.items()
}
return to_return
if isinstance(item, Enum):
return self._redact(
item=item.value, name=name, depth=depth, max_depth=max_depth, replacement=replacement
)
if _is_v1_env_var(item):
tmp = item.to_dict()
if self.should_hide_value_for_key(tmp.get("name", "")) and "value" in tmp:
tmp["value"] = replacement
else:
return self._redact(
item=tmp, name=name, depth=depth, max_depth=max_depth, replacement=replacement
)
return tmp
if isinstance(item, str):
if self.replacer:
# We can't replace specific values, but the key-based redacting
# can still happen, so we can't short-circuit, we need to walk
# the structure.
return self.replacer.sub(replacement, str(item))
return item
if isinstance(item, (tuple, set)):
# Turn set in to tuple!
return tuple(
self._redact(
subval, name=None, depth=(depth + 1), max_depth=max_depth, replacement=replacement
)
for subval in item
)
if isinstance(item, list):
return [
self._redact(
subval, name=None, depth=(depth + 1), max_depth=max_depth, replacement=replacement
)
for subval in item
]
return item
# I think this should never happen, but it does not hurt to leave it just in case
# Well. It happened (see https://github.com/apache/airflow/issues/19816#issuecomment-983311373)
# but it caused infinite recursion, to avoid this we mark the log as already filtered.
except Exception as exc:
log.warning(
"Unable to redact value of type %s, please report this via "
"<https://github.com/apache/airflow/issues>. Error was: %s: %s",
type(item),
type(exc).__name__,
exc,
extra={self.ALREADY_FILTERED_FLAG: True},
)
# Rather than expose sensitive info, lets play it safe
return "<redaction-failed>"
def _merge(
self,
new_item: Redacted,
old_item: Redactable,
*,
name: str | None,
depth: int,
max_depth: int,
force_sensitive: bool = False,
replacement: str,
) -> Redacted:
"""Merge a redacted item with its original unredacted counterpart."""
if depth > max_depth:
if isinstance(new_item, str) and new_item == "***":
return old_item
return new_item
try:
# Determine if we should treat this as sensitive
is_sensitive = force_sensitive or (name is not None and self.should_hide_value_for_key(name))
if isinstance(new_item, dict) and isinstance(old_item, dict):
merged = {}
for key in new_item.keys():
if key in old_item:
# For dicts, pass the key as name unless we're in sensitive mode
child_name = None if is_sensitive else key
merged[key] = self._merge(
new_item[key],
old_item[key],
name=child_name,
depth=depth + 1,
max_depth=max_depth,
force_sensitive=is_sensitive,
replacement=replacement,
)
else:
merged[key] = new_item[key]
return merged
if isinstance(new_item, (list, tuple)) and type(old_item) is type(new_item):
merged_list = []
for i in range(len(new_item)):
if i < len(old_item):
# In sensitive mode, check if individual item is redacted
if is_sensitive and isinstance(new_item[i], str) and new_item[i] == "***":
merged_list.append(old_item[i])
else:
merged_list.append(
self._merge(
new_item[i],
old_item[i],
name=None,
depth=depth + 1,
max_depth=max_depth,
force_sensitive=is_sensitive,
replacement=replacement,
)
)
else:
merged_list.append(new_item[i])
if isinstance(new_item, list):
return list(merged_list)
return tuple(merged_list)
if isinstance(new_item, set) and isinstance(old_item, set):
# Sets are unordered, we cannot restore original items.
return new_item
if _is_v1_env_var(new_item) and _is_v1_env_var(old_item):
# TODO: Handle Kubernetes V1EnvVar objects if needed
return new_item
if is_sensitive and isinstance(new_item, str) and new_item == "***":
return old_item
return new_item
except (TypeError, AttributeError, ValueError):
return new_item
def redact(
self,
item: Redactable,
name: str | None = None,
max_depth: int | None = None,
replacement: str = "***",
) -> Redacted:
"""
Redact an any secrets found in ``item``, if it is a string.
If ``name`` is given, and it's a "sensitive" name (see
:func:`should_hide_value_for_key`) then all string values in the item
is redacted.
"""
return self._redact(
item, name, depth=0, max_depth=max_depth or self.MAX_RECURSION_DEPTH, replacement=replacement
)
def merge(
self,
new_item: Redacted,
old_item: Redactable,
name: str | None = None,
max_depth: int | None = None,
replacement: str = "***",
) -> Redacted:
"""
Merge a redacted item with its original unredacted counterpart.
Takes a user-modified redacted item and merges it with the original unredacted item.
For sensitive fields that still contain "***" (or whatever the ``replacement`` is specified as), the
original value is restored. For fields that have been updated, the new value is preserved.
"""
return self._merge(
new_item,
old_item,
name=name,
depth=0,
max_depth=max_depth or self.MAX_RECURSION_DEPTH,
force_sensitive=False,
replacement=replacement,
)
def _adaptations(self, secret: str) -> Generator[str, None, None]:
"""Yield the secret along with any adaptations to the secret that should be masked."""
yield secret
if self.secret_mask_adapter:
# This can return an iterable of secrets to mask OR a single secret as a string
secret_or_secrets = self.secret_mask_adapter(secret)
if not isinstance(secret_or_secrets, str):
# if its not a string, it must be an iterable
yield from secret_or_secrets
else:
yield secret_or_secrets
def should_hide_value_for_key(self, name):
"""
Return if the value for this given name should be hidden.
Name might be a Variable name, or key in conn.extra_dejson, for example.
"""
from airflow import settings
if isinstance(name, str) and settings.HIDE_SENSITIVE_VAR_CONN_FIELDS:
name = name.strip().lower()
return any(s in name for s in self.sensitive_variables_fields)
return False
def add_mask(self, secret: JsonValue, name: str | None = None):
"""Add a new secret to be masked to this filter instance."""
if isinstance(secret, dict):
for k, v in secret.items():
self.add_mask(v, k)
elif isinstance(secret, str):
if not secret:
return
if secret.lower() in SECRETS_TO_SKIP_MASKING:
return
min_length = self.min_length_to_mask
if len(secret) < min_length:
if not SecretsMasker._has_warned_short_secret:
log.warning(
"Skipping masking for a secret as it's too short (<%d chars)",
min_length,
extra={self.ALREADY_FILTERED_FLAG: True},
)
SecretsMasker._has_warned_short_secret = True
return
new_mask = False
for s in self._adaptations(secret):
if s:
if len(s) < min_length:
continue
if s.lower() in SECRETS_TO_SKIP_MASKING:
continue
pattern = re.escape(s)
if pattern not in self.patterns and (not name or self.should_hide_value_for_key(name)):
self.patterns.add(pattern)
new_mask = True
if new_mask:
self.replacer = re.compile("|".join(self.patterns))
elif isinstance(secret, collections.abc.Iterable):
for v in secret:
self.add_mask(v, name)
def reset_masker(self):
"""Reset the patterns and the replacer in the masker instance."""
self.patterns = set()
self.replacer = None
| SecretsMasker |
python | pytest-dev__pytest | src/_pytest/_code/code.py | {
"start": 45900,
"end": 46703
} | class ____(ExceptionRepr):
chain: Sequence[tuple[ReprTraceback, ReprFileLocation | None, str | None]]
def __init__(
self,
chain: Sequence[tuple[ReprTraceback, ReprFileLocation | None, str | None]],
) -> None:
# reprcrash and reprtraceback of the outermost (the newest) exception
# in the chain.
super().__init__(
reprtraceback=chain[-1][0],
reprcrash=chain[-1][1],
)
self.chain = chain
def toterminal(self, tw: TerminalWriter) -> None:
for element in self.chain:
element[0].toterminal(tw)
if element[2] is not None:
tw.line("")
tw.line(element[2], yellow=True)
super().toterminal(tw)
@dataclasses.dataclass(eq=False)
| ExceptionChainRepr |
python | doocs__leetcode | solution/1100-1199/1156.Swap For Longest Repeated Character Substring/Solution.py | {
"start": 0,
"end": 475
} | class ____:
def maxRepOpt1(self, text: str) -> int:
cnt = Counter(text)
n = len(text)
ans = i = 0
while i < n:
j = i
while j < n and text[j] == text[i]:
j += 1
l = j - i
k = j + 1
while k < n and text[k] == text[i]:
k += 1
r = k - j - 1
ans = max(ans, min(l + r + 1, cnt[text[i]]))
i = j
return ans
| Solution |
python | huggingface__transformers | tests/models/qwen3_vl/test_modeling_qwen3_vl.py | {
"start": 6196,
"end": 11860
} | class ____(ModelTesterMixin, GenerationTesterMixin, unittest.TestCase):
"""
Model tester for `Qwen3VLForConditionalGeneration`.
"""
all_model_classes = (
(
Qwen3VLModel,
Qwen3VLForConditionalGeneration,
)
if is_torch_available()
else ()
)
def setUp(self):
self.model_tester = Qwen3VLVisionText2TextModelTester(self)
self.config_tester = ConfigTester(self, config_class=Qwen3VLConfig, has_text_modality=False)
def test_config(self):
self.config_tester.run_common_tests()
def test_mismatching_num_image_tokens(self):
"""
Tests that VLMs through an error with explicit message saying what is wrong
when number of images don't match number of image tokens in the text.
Also we need to test multi-image cases when one prompr has multiple image tokens.
"""
config, input_dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
model = model_class(config).to(torch_device)
model.eval()
_ = model(**input_dict) # successful forward with no modifications
curr_input_dict = copy.deepcopy(input_dict)
# remove one image but leave the image token in text
patch_size = config.vision_config.patch_size
one_img_length = (self.model_tester.image_size**2) // (patch_size**2)
curr_input_dict["pixel_values"] = curr_input_dict["pixel_values"][-one_img_length:, ...]
curr_input_dict["image_grid_thw"] = curr_input_dict["image_grid_thw"][-1:, ...]
with self.assertRaises(ValueError):
_ = model(**curr_input_dict)
# simulate multi-image case by concatenating inputs where each has exactly one image/image-token
input_ids = curr_input_dict["input_ids"][:1]
pixel_values = curr_input_dict["pixel_values"][:one_img_length]
image_grid_thw = curr_input_dict["image_grid_thw"][:1]
input_ids = torch.cat([input_ids, input_ids], dim=0)
# one image and two image tokens raise an error
with self.assertRaises(ValueError):
_ = model(
input_ids=input_ids,
pixel_values=pixel_values,
image_grid_thw=image_grid_thw,
)
# two images and two image tokens don't raise an error
pixel_values = torch.cat([pixel_values, pixel_values], dim=0)
image_grid_thw = torch.cat([image_grid_thw, image_grid_thw], dim=0)
_ = model(
input_ids=input_ids,
pixel_values=pixel_values,
image_grid_thw=image_grid_thw,
)
def test_video_forward(self):
config, _ = self.model_tester.prepare_config_and_inputs_for_common()
B = self.model_tester.batch_size
C = config.vision_config.in_chans
T = config.vision_config.temporal_patch_size
P = config.vision_config.patch_size
input_ids = ids_tensor([B, self.model_tester.seq_length], self.model_tester.vocab_size)
F = 4
patch_H = self.model_tester.image_size // P
patch_W = self.model_tester.image_size // P
patch_T = F // T
patches_per_video = patch_T * patch_H * patch_W
pathed_per_frame = patch_H * patch_W
pixel_values_videos = floats_tensor(
[
# first dim: batch_size * num_patches
B * patches_per_video,
# second dim: in_channels * temporal_patch_size * patch_size^2
C * T * (P**2),
]
)
# qwen3vl use timestamps for video, so split it into patch_T sub-videos
video_grid_thw = torch.tensor([[1, patch_H, patch_W] for _ in range(patch_T)] * B)
# sanity check
self.assertEqual(pixel_values_videos.shape[0], video_grid_thw.prod(dim=1).sum().item())
# Insert video token sequence
input_ids[:, -1] = self.model_tester.pad_token_id
input_ids[input_ids == self.model_tester.video_token_id] = self.model_tester.pad_token_id
input_ids[input_ids == self.model_tester.image_token_id] = self.model_tester.pad_token_id
input_ids[input_ids == self.model_tester.vision_start_token_id] = self.model_tester.pad_token_id
input_ids[:, self.model_tester.num_image_tokens] = self.model_tester.video_token_id
insertion_point = self.model_tester.num_image_tokens
self.assertLessEqual((B * patches_per_video) + insertion_point, self.model_tester.seq_length)
for b in range(B):
# each frame is separated by a vision_start_token_id
for frame_idx in range(patch_T):
input_ids[b, insertion_point + frame_idx * (pathed_per_frame + 1)] = (
self.model_tester.vision_start_token_id
)
input_ids[
b,
insertion_point + frame_idx * (pathed_per_frame + 1) + 1 : insertion_point
+ (frame_idx + 1) * (pathed_per_frame + 1),
] = self.model_tester.video_token_id
for model_class in self.all_model_classes:
# TODO:we should remove this because we use timestamps for video
model = model_class(config).to(torch_device)
outputs = model(
input_ids=input_ids,
pixel_values_videos=pixel_values_videos,
video_grid_thw=video_grid_thw,
)
self.assertIsNotNone(outputs)
| Qwen3VLModelTest |
python | pola-rs__polars | py-polars/src/polars/lazyframe/engine_config.py | {
"start": 214,
"end": 1925
} | class ____:
"""
Configuration options for the GPU execution engine.
Use this if you want control over details of the execution.
Parameters
----------
device : int, default None
Select the GPU used to run the query. If not provided, the
query uses the current CUDA device.
memory_resource : rmm.mr.DeviceMemoryResource, default None
Provide a memory resource for GPU memory allocations.
.. warning::
If passing a `memory_resource`, you must ensure that it is valid
for the selected `device`. See the `RMM documentation
<https://github.com/rapidsai/rmm?tab=readme-ov-file#multiple-devices>`_
for more details.
raise_on_fail : bool, default False
If True, do not fall back to the Polars CPU engine if the GPU
engine cannot execute the query, but instead raise an error.
"""
device: int | None
"""Device on which to run query."""
memory_resource: DeviceMemoryResource | None
"""Memory resource to use for device allocations."""
raise_on_fail: bool
"""
Whether unsupported queries should raise an error, rather than falling
back to the CPU engine.
"""
config: Mapping[str, Any]
"""Additional configuration options for the engine."""
def __init__(
self,
*,
device: int | None = None,
memory_resource: Any | None = None,
raise_on_fail: bool = False,
**kwargs: Any,
) -> None:
self.device = device
self.memory_resource = memory_resource
# Avoids need for changes in cudf-polars
kwargs["raise_on_fail"] = raise_on_fail
self.config = kwargs
| GPUEngine |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/memberAccess14.py | {
"start": 1174,
"end": 1440
} | class ____(C[float]): ...
reveal_type(C.prop, expected_text="CachedSlotProperty[C[Unknown], int]")
reveal_type(D.prop, expected_text="CachedSlotProperty[D, int]")
c = C("")
reveal_type(c.prop, expected_text="int")
d = D(1)
reveal_type(d.prop, expected_text="int")
| D |
python | huggingface__transformers | tests/models/blt/test_modeling_blt.py | {
"start": 8812,
"end": 18339
} | class ____(unittest.TestCase):
def setup(self):
cleanup(torch_device, gc_collect=True)
def tearDown(self):
# TODO (joao): automatic compilation, i.e. compilation when `cache_implementation="static"` is used, leaves
# some memory allocated in the cache, which means some object is not being released properly. This causes some
# unoptimal memory usage, e.g. after certain tests a 7B model in FP16 no longer fits in a 24GB GPU.
# Investigate the root cause.
cleanup(torch_device, gc_collect=True)
@slow
@require_read_token
def test_model(self):
NUM_TOKENS_TO_GENERATE = 200
EXPECTED_TEXT = "my name is alex and i am a student at the university of michigan. i am a senior majoring in computer science and minoring in mathematics. i am also a member of the michigan math club and the michigan computer s"
prompt = "my name is"
model = BltForCausalLM.from_pretrained("itazap/blt-1b-hf", device_map="auto", attn_implementation="sdpa")
tokenizer = AutoTokenizer.from_pretrained("itazap/blt-1b-hf")
inputs = tokenizer(prompt, return_tensors="pt").to(model.device)
generated_ids = model.generate(
**inputs, max_new_tokens=NUM_TOKENS_TO_GENERATE, do_sample=False, use_cache=False
)
output_text = tokenizer.decode(generated_ids[0], skip_special_tokens=True)
self.assertEqual(output_text, EXPECTED_TEXT)
@slow
@require_read_token
def test_model_logits(self):
EXPECTED_OUTPUT = torch.tensor(
[
[
-10.4948,
-10.7065,
-6.1813,
-10.5545,
-10.3428,
-9.1493,
-8.4937,
-8.6382,
-9.2159,
-9.5907,
-9.3679,
-8.4184,
-9.0655,
-3.4436,
2.9616,
-10.3157,
-6.3723,
-6.0133,
-9.7100,
-9.2128,
-8.8064,
-9.8179,
-9.7516,
-9.4681,
-9.7715,
-9.4897,
-9.0491,
-9.8098,
-9.4648,
-9.3294,
],
[
-13.3010,
-13.1910,
-5.7230,
-13.2895,
-13.4864,
-8.7140,
-7.0275,
-7.0182,
-10.1362,
-10.3762,
-9.9086,
-7.8049,
-8.8660,
-5.2711,
-3.5778,
-12.5346,
-9.1609,
-6.7925,
-10.3717,
-9.2650,
-10.6393,
-11.4807,
-11.2128,
-10.9615,
-10.5806,
-10.8873,
-11.0651,
-11.3471,
-10.5437,
-9.9688,
],
]
).to(torch_device)
input_ids = [1, 42, 21, 12, 43, 23, 1, 4]
model = BltForCausalLM.from_pretrained("itazap/blt-1b-hf", attn_implementation="sdpa", device_map="auto")
with torch.no_grad():
output = model(torch.tensor([input_ids]).to(torch_device))[0]
torch.testing.assert_close(EXPECTED_OUTPUT, output[0, :2, :30], rtol=1e-4, atol=1e-4)
@slow
@require_read_token
@require_torch_bf16
def test_model_bf16(self):
"""Test Blt model with bfloat16 precision."""
NUM_TOKENS_TO_GENERATE = 200
EXPECTED_TEXT = "my name is alex and i am a student at the university of michigan in the college of arts and sciences. i am a senior majoring in computer science and minoring in mathematics. i am also a member of the michigan m"
prompt = "my name is"
model = BltForCausalLM.from_pretrained(
"itazap/blt-1b-hf", device_map="auto", attn_implementation="sdpa", torch_dtype=torch.bfloat16
)
tokenizer = AutoTokenizer.from_pretrained("itazap/blt-1b-hf")
inputs = tokenizer(prompt, return_tensors="pt").to(model.device)
generated_ids = model.generate(
**inputs, max_new_tokens=NUM_TOKENS_TO_GENERATE, do_sample=False, use_cache=False
)
output_text = tokenizer.decode(generated_ids[0], skip_special_tokens=True)
self.assertEqual(output_text, EXPECTED_TEXT)
@slow
@require_read_token
@require_torch_bf16
def test_model_logits_bf16(self):
"""Test Blt model logits with bfloat16 precision."""
EXPECTED_OUTPUT = torch.tensor(
[
[
-10.5000,
-10.6875,
-6.1875,
-10.5625,
-10.3125,
-9.1875,
-8.5000,
-8.6875,
-9.1875,
-9.5625,
-9.3750,
-8.5000,
-9.0625,
-3.4219,
2.9531,
-10.3125,
-6.4062,
-6.0000,
-9.6875,
-9.1875,
-8.8125,
-9.8125,
-9.7500,
-9.4375,
-9.8125,
-9.5000,
-9.0000,
-9.8125,
-9.4375,
-9.3125,
],
[
-13.2500,
-13.1875,
-5.6875,
-13.3125,
-13.5000,
-8.7500,
-7.0625,
-7.0312,
-10.1250,
-10.3750,
-9.8750,
-7.8438,
-8.8750,
-5.2812,
-3.5625,
-12.5000,
-9.1875,
-6.8125,
-10.3750,
-9.3125,
-10.6250,
-11.5000,
-11.2500,
-11.0000,
-10.5625,
-10.8750,
-11.0625,
-11.3750,
-10.5625,
-10.0000,
],
]
).to(torch_device)
input_ids = [1, 42, 21, 12, 43, 23, 1, 4]
model = BltForCausalLM.from_pretrained(
"itazap/blt-1b-hf", device_map="auto", attn_implementation="sdpa", torch_dtype=torch.bfloat16
)
with torch.no_grad():
output = model(torch.tensor([input_ids]).to(torch_device))[0]
torch.testing.assert_close(EXPECTED_OUTPUT, output[0, :2, :30], rtol=1e-3, atol=1e-3)
@slow
@require_read_token
def test_model_eager(self):
"""Test Blt model with bfloat16 precision using eager attention implementation."""
NUM_TOKENS_TO_GENERATE = 200
EXPECTED_TEXT = "my name is alex and i am a student at the university of michigan. i am a senior majoring in computer science and minoring in mathematics. i am also a member of the michigan math club and the michigan computer s"
prompt = "my name is"
model = BltForCausalLM.from_pretrained("itazap/blt-1b-hf", device_map="auto", attn_implementation="eager")
tokenizer = AutoTokenizer.from_pretrained("itazap/blt-1b-hf")
inputs = tokenizer(prompt, return_tensors="pt").to(model.device)
generated_ids = model.generate(
**inputs, max_new_tokens=NUM_TOKENS_TO_GENERATE, do_sample=False, use_cache=False
)
output_text = tokenizer.decode(generated_ids[0], skip_special_tokens=True)
self.assertEqual(output_text, EXPECTED_TEXT)
@slow
@require_read_token
@require_torch_bf16
def test_model_bf16_static_cache(self):
"""Test Blt model with bfloat16 precision and static cache."""
NUM_TOKENS_TO_GENERATE = 200
EXPECTED_TEXT = "my name is alex and i am a student at the university of michigan in the college of arts and sciences. i am a senior majoring in computer science and minoring in mathematics. i am also a member of the michigan m"
prompt = "my name is"
model = BltForCausalLM.from_pretrained(
"itazap/blt-1b-hf", device_map="auto", attn_implementation="sdpa", torch_dtype=torch.bfloat16
)
model.generation_config.cache_implementation = "static"
tokenizer = AutoTokenizer.from_pretrained("itazap/blt-1b-hf")
inputs = tokenizer(prompt, return_tensors="pt").to(model.device)
generated_ids = model.generate(
**inputs, max_new_tokens=NUM_TOKENS_TO_GENERATE, do_sample=False, use_cache=False
)
output_text = tokenizer.decode(generated_ids[0], skip_special_tokens=True)
self.assertEqual(output_text, EXPECTED_TEXT)
| BltIntegrationTest |
python | pytorch__pytorch | test/dynamo/test_modules.py | {
"start": 23968,
"end": 24333
} | class ____(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.linear1 = torch.nn.Linear(10, 10)
def forward(self, x):
if self.__class__.__name__ == "ABC":
return 10
if self.linear1.__class__.__name__ == "Linear":
return F.relu(self.linear1(x) + 10)
return 11
| ModuleNameString |
python | FactoryBoy__factory_boy | factory/builder.py | {
"start": 10091,
"end": 12923
} | class ____:
"""Resolve a set of declarations.
Attributes are set at instantiation time, values are computed lazily.
Attributes:
__initialized (bool): whether this object's __init__ as run. If set,
setting any attribute will be prevented.
__declarations (dict): maps attribute name to their declaration
__values (dict): maps attribute name to computed value
__pending (str list): names of the attributes whose value is being
computed. This allows to detect cyclic lazy attribute definition.
__step (BuildStep): the BuildStep related to this resolver.
This allows to have the value of a field depend on the value of
another field
"""
__initialized = False
def __init__(self, declarations, step, sequence):
self.__declarations = declarations
self.__step = step
self.__values = {}
self.__pending = []
self.__initialized = True
@property
def factory_parent(self):
return self.__step.parent_step.stub if self.__step.parent_step else None
def __repr__(self):
return '<Resolver for %r>' % self.__step
def __getattr__(self, name):
"""Retrieve an attribute's value.
This will compute it if needed, unless it is already on the list of
attributes being computed.
"""
if name in self.__pending:
raise errors.CyclicDefinitionError(
"Cyclic lazy attribute definition for %r; cycle found in %r." %
(name, self.__pending))
elif name in self.__values:
return self.__values[name]
elif name in self.__declarations:
declaration = self.__declarations[name]
value = declaration.declaration
if enums.get_builder_phase(value) == enums.BuilderPhase.ATTRIBUTE_RESOLUTION:
self.__pending.append(name)
try:
value = value.evaluate_pre(
instance=self,
step=self.__step,
overrides=declaration.context,
)
finally:
last = self.__pending.pop()
assert name == last
self.__values[name] = value
return value
else:
raise AttributeError(
"The parameter %r is unknown. Evaluated attributes are %r, "
"definitions are %r." % (name, self.__values, self.__declarations))
def __setattr__(self, name, value):
"""Prevent setting attributes once __init__ is done."""
if not self.__initialized:
return super().__setattr__(name, value)
else:
raise AttributeError('Setting of object attributes is not allowed')
| Resolver |
python | Netflix__metaflow | metaflow/flowspec.py | {
"start": 2176,
"end": 2378
} | class ____(Enum):
FLOW_MUTATORS = 1
FLOW_DECORATORS = 2
CONFIGS = 3
CACHED_PARAMETERS = 4
SET_CONFIG_PARAMETERS = 5 # Parameters that now have a ConfigValue (converted)
| FlowStateItems |
python | optuna__optuna | optuna/samplers/_partial_fixed.py | {
"start": 472,
"end": 3862
} | class ____(BaseSampler):
"""Sampler with partially fixed parameters.
Example:
After several steps of optimization, you can fix the value of ``y`` and re-optimize it.
.. testcode::
import optuna
def objective(trial):
x = trial.suggest_float("x", -1, 1)
y = trial.suggest_int("y", -1, 1)
return x**2 + y
study = optuna.create_study()
study.optimize(objective, n_trials=10)
best_params = study.best_params
fixed_params = {"y": best_params["y"]}
partial_sampler = optuna.samplers.PartialFixedSampler(fixed_params, study.sampler)
study.sampler = partial_sampler
study.optimize(objective, n_trials=10)
Args:
fixed_params:
A dictionary of parameters to be fixed.
base_sampler:
A sampler which samples unfixed parameters.
"""
def __init__(self, fixed_params: dict[str, Any], base_sampler: BaseSampler) -> None:
self._fixed_params = fixed_params
self._base_sampler = base_sampler
def reseed_rng(self) -> None:
self._base_sampler.reseed_rng()
def infer_relative_search_space(
self, study: Study, trial: FrozenTrial
) -> dict[str, BaseDistribution]:
search_space = self._base_sampler.infer_relative_search_space(study, trial)
# Remove fixed params from relative search space to return fixed values.
for param_name in self._fixed_params.keys():
if param_name in search_space:
del search_space[param_name]
return search_space
def sample_relative(
self,
study: Study,
trial: FrozenTrial,
search_space: dict[str, BaseDistribution],
) -> dict[str, Any]:
# Fixed params are never sampled here.
return self._base_sampler.sample_relative(study, trial, search_space)
def sample_independent(
self,
study: Study,
trial: FrozenTrial,
param_name: str,
param_distribution: BaseDistribution,
) -> Any:
if param_name not in self._fixed_params:
# Unfixed params are sampled here.
return self._base_sampler.sample_independent(
study, trial, param_name, param_distribution
)
else:
# Fixed params are sampled here.
# Check if a parameter value is contained in the range of this distribution.
param_value = self._fixed_params[param_name]
param_value_in_internal_repr = param_distribution.to_internal_repr(param_value)
contained = param_distribution._contains(param_value_in_internal_repr)
if not contained:
optuna_warn(
f"Fixed parameter '{param_name}' with value {param_value} is out of range "
f"for distribution {param_distribution}."
)
return param_value
def before_trial(self, study: Study, trial: FrozenTrial) -> None:
self._base_sampler.before_trial(study, trial)
def after_trial(
self,
study: Study,
trial: FrozenTrial,
state: TrialState,
values: Sequence[float] | None,
) -> None:
self._base_sampler.after_trial(study, trial, state, values)
| PartialFixedSampler |
python | encode__django-rest-framework | tests/test_permissions.py | {
"start": 941,
"end": 1209
} | class ____(generics.RetrieveUpdateDestroyAPIView):
queryset = BasicModel.objects.all()
serializer_class = BasicSerializer
authentication_classes = [authentication.BasicAuthentication]
permission_classes = [permissions.DjangoModelPermissions]
| InstanceView |
python | falconry__falcon | tests/asgi/test_request_body_asgi.py | {
"start": 308,
"end": 3597
} | class ____:
def test_empty_body(self, client, resource):
client.app.add_route('/', resource)
client.simulate_request(path='/', body='')
stream = resource.captured_req.stream
assert stream.tell() == 0
def test_tiny_body(self, client, resource):
client.app.add_route('/', resource)
expected_body = '.'
headers = {'capture-req-body-bytes': '1'}
client.simulate_request(path='/', body=expected_body, headers=headers)
stream = resource.captured_req.stream
assert resource.captured_req_body == expected_body.encode('utf-8')
assert stream.tell() == 1
def test_tiny_body_overflow(self, client, resource):
client.app.add_route('/', resource)
expected_body = '.'
expected_len = len(expected_body)
# Read too many bytes; shouldn't block
headers = {'capture-req-body-bytes': str(len(expected_body) + 1)}
client.simulate_request(path='/', body=expected_body, headers=headers)
stream = resource.captured_req.stream
assert resource.captured_req_body == expected_body.encode('utf-8')
assert stream.tell() == expected_len
@pytest.mark.parametrize(
'body_length, content_length',
[
(1, 0),
(2, 1),
(3, 2),
(100, None),
(100, 50),
(8192, 50),
],
)
async def test_content_length_smaller_than_body(self, body_length, content_length):
body_in = os.urandom(body_length)
scope = testing.create_scope(content_length=content_length)
req_event_emitter = testing.ASGIRequestEventEmitter(body=body_in)
req_event_emitter._emit_empty_chunks = False
first_event = await req_event_emitter.emit()
req = falcon.asgi.Request(scope, req_event_emitter, first_event=first_event)
body_out = await req.bounded_stream.read()
assert body_out == body_in[:content_length]
def test_read_body(self, client, resource):
client.app.add_route('/', resource)
expected_body = testing.rand_string(SIZE_1_KB // 2, SIZE_1_KB)
expected_len = len(expected_body)
headers = {
'Content-Length': str(expected_len),
'Capture-Req-Body-Bytes': '-1',
}
client.simulate_request(path='/', body=expected_body, headers=headers)
content_len = resource.captured_req.get_header('content-length')
assert content_len == str(expected_len)
stream = resource.captured_req.stream
assert resource.captured_req_body == expected_body.encode('utf-8')
assert stream.tell() == expected_len
def test_bounded_stream_alias(self):
scope = testing.create_scope()
req_event_emitter = testing.ASGIRequestEventEmitter(b'', disconnect_at=0)
req = falcon.asgi.Request(scope, req_event_emitter)
assert req.bounded_stream is req.stream
def test_request_repr(self):
scope = testing.create_scope()
req_event_emitter = testing.ASGIRequestEventEmitter(b'', disconnect_at=0)
req = falcon.asgi.Request(scope, req_event_emitter)
_repr = f'<{req.__class__.__name__}: {req.method} {req.url!r}>'
assert req.__repr__() == _repr
| TestRequestBody |
python | sqlalchemy__sqlalchemy | test/sql/test_metadata.py | {
"start": 75060,
"end": 80909
} | class ____(fixtures.TestBase):
def test_multi_integer_no_autoinc(self):
pk = PrimaryKeyConstraint(Column("a", Integer), Column("b", Integer))
t = Table("t", MetaData())
t.append_constraint(pk)
is_(pk._autoincrement_column, None)
def test_multi_integer_multi_autoinc(self):
pk = PrimaryKeyConstraint(
Column("a", Integer, autoincrement=True),
Column("b", Integer, autoincrement=True),
)
t = Table("t", MetaData())
t.append_constraint(pk)
assert_raises_message(
exc.ArgumentError,
"Only one Column may be marked",
lambda: pk._autoincrement_column,
)
def test_single_integer_no_autoinc(self):
pk = PrimaryKeyConstraint(Column("a", Integer))
t = Table("t", MetaData())
t.append_constraint(pk)
is_(pk._autoincrement_column, pk.columns["a"])
def test_single_string_no_autoinc(self):
pk = PrimaryKeyConstraint(Column("a", String))
t = Table("t", MetaData())
t.append_constraint(pk)
is_(pk._autoincrement_column, None)
def test_single_string_illegal_autoinc(self):
t = Table("t", MetaData(), Column("a", String, autoincrement=True))
pk = PrimaryKeyConstraint(t.c.a)
t.append_constraint(pk)
assert_raises_message(
exc.ArgumentError,
"Column type VARCHAR on column 't.a'",
lambda: pk._autoincrement_column,
)
def test_float_illegal_autoinc(self):
"""test that Float is not acceptable if autoincrement=True
note this changed in 2.1 with #5252 where Numeric/Float were split out
"""
t = Table("t", MetaData(), Column("a", Float, autoincrement=True))
pk = PrimaryKeyConstraint(t.c.a)
t.append_constraint(pk)
with expect_raises_message(
exc.ArgumentError,
"Column type FLOAT on column 't.a' is not compatible "
"with autoincrement=True",
):
pk._autoincrement_column,
def test_numeric_nonzero_scale_illegal_autoinc(self):
"""test that Numeric() with non-zero scale is not acceptable if
autoincrement=True"""
t = Table(
"t", MetaData(), Column("a", Numeric(10, 5), autoincrement=True)
)
pk = PrimaryKeyConstraint(t.c.a)
t.append_constraint(pk)
with expect_raises_message(
exc.ArgumentError,
r"Column type NUMERIC\(10, 5\) with non-zero scale 5",
):
pk._autoincrement_column,
def test_numeric_zero_scale_autoinc_not_auto(self):
"""test that Numeric() is not automatically assigned to
autoincrement"""
t = Table(
"t", MetaData(), Column("a", Numeric(10, 0), primary_key=True)
)
is_(t.autoincrement_column, None)
def test_integer_autoinc_is_auto(self):
"""test that Integer() is automatically assigned to autoincrement"""
t = Table("t", MetaData(), Column("a", Integer, primary_key=True))
is_(t.autoincrement_column, t.c.a)
def test_numeric_zero_scale_autoinc_explicit_ok(self):
"""test that Numeric() with zero scale is acceptable if
autoincrement=True"""
t = Table(
"t",
MetaData(),
Column("a", Numeric(10, 0), autoincrement=True, primary_key=True),
)
is_(t.autoincrement_column, t.c.a)
def test_single_integer_default(self):
t = Table(
"t",
MetaData(),
Column("a", Integer, autoincrement=True, default=lambda: 1),
)
pk = PrimaryKeyConstraint(t.c.a)
t.append_constraint(pk)
is_(pk._autoincrement_column, t.c.a)
def test_single_integer_server_default(self):
# new as of 1.1; now that we have three states for autoincrement,
# if the user puts autoincrement=True with a server_default, trust
# them on it
t = Table(
"t",
MetaData(),
Column(
"a", Integer, autoincrement=True, server_default=func.magic()
),
)
pk = PrimaryKeyConstraint(t.c.a)
t.append_constraint(pk)
is_(pk._autoincrement_column, t.c.a)
def test_implicit_autoinc_but_fks(self):
m = MetaData()
Table("t1", m, Column("id", Integer, primary_key=True))
t2 = Table("t2", MetaData(), Column("a", Integer, ForeignKey("t1.id")))
pk = PrimaryKeyConstraint(t2.c.a)
t2.append_constraint(pk)
is_(pk._autoincrement_column, None)
def test_explicit_autoinc_but_fks(self):
m = MetaData()
Table("t1", m, Column("id", Integer, primary_key=True))
t2 = Table(
"t2",
MetaData(),
Column("a", Integer, ForeignKey("t1.id"), autoincrement=True),
)
pk = PrimaryKeyConstraint(t2.c.a)
t2.append_constraint(pk)
is_(pk._autoincrement_column, t2.c.a)
t3 = Table(
"t3",
MetaData(),
Column(
"a", Integer, ForeignKey("t1.id"), autoincrement="ignore_fk"
),
)
pk = PrimaryKeyConstraint(t3.c.a)
t3.append_constraint(pk)
is_(pk._autoincrement_column, t3.c.a)
def test_no_kw_args(self):
with expect_raises_message(
TypeError,
r"Table\(\) takes at least two positional-only arguments",
check_context=False,
):
Table(name="foo", metadata=MetaData())
with expect_raises_message(
TypeError,
r"Table\(\) takes at least two positional-only arguments",
check_context=False,
):
Table("foo", metadata=MetaData())
| PKAutoIncrementTest |
python | plotly__plotly.py | plotly/graph_objs/layout/mapbox/_bounds.py | {
"start": 235,
"end": 4635
} | class ____(_BaseLayoutHierarchyType):
_parent_path_str = "layout.mapbox"
_path_str = "layout.mapbox.bounds"
_valid_props = {"east", "north", "south", "west"}
@property
def east(self):
"""
Sets the maximum longitude of the map (in degrees East) if
`west`, `south` and `north` are declared.
The 'east' property is a number and may be specified as:
- An int or float
Returns
-------
int|float
"""
return self["east"]
@east.setter
def east(self, val):
self["east"] = val
@property
def north(self):
"""
Sets the maximum latitude of the map (in degrees North) if
`east`, `west` and `south` are declared.
The 'north' property is a number and may be specified as:
- An int or float
Returns
-------
int|float
"""
return self["north"]
@north.setter
def north(self, val):
self["north"] = val
@property
def south(self):
"""
Sets the minimum latitude of the map (in degrees North) if
`east`, `west` and `north` are declared.
The 'south' property is a number and may be specified as:
- An int or float
Returns
-------
int|float
"""
return self["south"]
@south.setter
def south(self, val):
self["south"] = val
@property
def west(self):
"""
Sets the minimum longitude of the map (in degrees East) if
`east`, `south` and `north` are declared.
The 'west' property is a number and may be specified as:
- An int or float
Returns
-------
int|float
"""
return self["west"]
@west.setter
def west(self, val):
self["west"] = val
@property
def _prop_descriptions(self):
return """\
east
Sets the maximum longitude of the map (in degrees East)
if `west`, `south` and `north` are declared.
north
Sets the maximum latitude of the map (in degrees North)
if `east`, `west` and `south` are declared.
south
Sets the minimum latitude of the map (in degrees North)
if `east`, `west` and `north` are declared.
west
Sets the minimum longitude of the map (in degrees East)
if `east`, `south` and `north` are declared.
"""
def __init__(
self, arg=None, east=None, north=None, south=None, west=None, **kwargs
):
"""
Construct a new Bounds object
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of
:class:`plotly.graph_objs.layout.mapbox.Bounds`
east
Sets the maximum longitude of the map (in degrees East)
if `west`, `south` and `north` are declared.
north
Sets the maximum latitude of the map (in degrees North)
if `east`, `west` and `south` are declared.
south
Sets the minimum latitude of the map (in degrees North)
if `east`, `west` and `north` are declared.
west
Sets the minimum longitude of the map (in degrees East)
if `east`, `south` and `north` are declared.
Returns
-------
Bounds
"""
super().__init__("bounds")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError("""\
The first argument to the plotly.graph_objs.layout.mapbox.Bounds
constructor must be a dict or
an instance of :class:`plotly.graph_objs.layout.mapbox.Bounds`""")
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
self._set_property("east", arg, east)
self._set_property("north", arg, north)
self._set_property("south", arg, south)
self._set_property("west", arg, west)
self._process_kwargs(**dict(arg, **kwargs))
self._skip_invalid = False
| Bounds |
python | readthedocs__readthedocs.org | readthedocs/notifications/migrations/0003_notification_indexes.py | {
"start": 148,
"end": 691
} | class ____(migrations.Migration):
safe = Safe.after_deploy()
dependencies = [
("notifications", "0002_notification_format_values"),
]
operations = [
migrations.AlterModelOptions(
name="notification",
options={},
),
migrations.AddIndex(
model_name="notification",
index=models.Index(
fields=["attached_to_content_type", "attached_to_id"],
name="notificatio_attache_c6aa1d_idx",
),
),
]
| Migration |
python | pytorch__pytorch | tools/test/heuristics/test_interface.py | {
"start": 349,
"end": 1169
} | class ____(unittest.TestCase):
def assert_test_scores_almost_equal(
self, d1: dict[TestRun, float], d2: dict[TestRun, float]
) -> None:
# Check that dictionaries are the same, except for floating point errors
self.assertEqual(set(d1.keys()), set(d2.keys()))
for k, v in d1.items():
self.assertAlmostEqual(v, d2[k], msg=f"{k}: {v} != {d2[k]}")
def make_heuristic(self, classname: str) -> Any:
# Create a dummy heuristic class
class Heuristic(interface.HeuristicInterface):
def get_prediction_confidence(
self, tests: list[str]
) -> interface.TestPrioritizations:
# Return junk
return interface.TestPrioritizations([], {})
return type(classname, (Heuristic,), {})
| TestTD |
python | django-import-export__django-import-export | tests/core/tests/test_resources/test_modelresource/test_resource_transactions.py | {
"start": 340,
"end": 4414
} | class ____(TransactionTestCase):
@skipUnlessDBFeature("supports_transactions")
def test_m2m_import_with_transactions(self):
resource = BookResource()
cat1 = Category.objects.create(name="Cat 1")
headers = ["id", "name", "categories"]
row = [None, "FooBook", str(cat1.pk)]
dataset = tablib.Dataset(row, headers=headers)
result = resource.import_data(dataset, dry_run=True, use_transactions=True)
row_diff = result.rows[0].diff
id_diff = row_diff[0]
# id diff should exist because in rollbacked transaction
# FooBook has been saved
self.assertTrue(id_diff)
categories_diff = row_diff[8]
self.assertEqual(strip_tags(categories_diff), force_str(cat1.pk))
# check that it is really rollbacked
self.assertFalse(Book.objects.filter(name="FooBook"))
@skipUnlessDBFeature("supports_transactions")
def test_m2m_import_with_transactions_error(self):
resource = ProfileResource()
headers = ["id", "user"]
# 'user' is a required field, the database will raise an error.
row = [None, None]
dataset = tablib.Dataset(row, headers=headers)
result = resource.import_data(dataset, dry_run=True, use_transactions=True)
# Ensure the error raised by the database has been saved.
self.assertTrue(result.has_errors())
# Ensure the rollback has worked properly.
self.assertEqual(Profile.objects.count(), 0)
@skipUnlessDBFeature("supports_transactions")
def test_integrity_error_rollback_on_savem2m(self):
# savepoint_rollback() after an IntegrityError gives
# TransactionManagementError (#399)
class CategoryResourceRaisesIntegrityError(CategoryResource):
def save_m2m(self, instance, *args, **kwargs):
# force raising IntegrityError
Category.objects.create(name=instance.name)
resource = CategoryResourceRaisesIntegrityError()
headers = ["id", "name"]
rows = [
[None, "foo"],
]
dataset = tablib.Dataset(*rows, headers=headers)
result = resource.import_data(
dataset,
use_transactions=True,
)
self.assertTrue(result.has_errors())
def test_rollback_on_validation_errors_false(self):
"""Should create only one instance as the second one
raises a ``ValidationError``"""
resource = AuthorResource()
headers = ["id", "name", "birthday"]
rows = [
["", "A.A.Milne", ""],
["", "123", "1992test-01-18"], # raises ValidationError
]
dataset = tablib.Dataset(*rows, headers=headers)
result = resource.import_data(
dataset,
use_transactions=True,
rollback_on_validation_errors=False,
)
# Ensure the validation error raised by the database has been saved.
self.assertTrue(result.has_validation_errors())
# Ensure that valid row resulted in an instance created.
self.assertEqual(Author.objects.count(), 1)
def test_rollback_on_validation_errors_true(self):
"""
Should not create any instances as the second one raises a ``ValidationError``
and ``rollback_on_validation_errors`` flag is set
"""
resource = AuthorResource()
headers = ["id", "name", "birthday"]
rows = [
["", "A.A.Milne", ""],
["", "123", "1992test-01-18"], # raises ValidationError
]
dataset = tablib.Dataset(*rows, headers=headers)
result = resource.import_data(
dataset,
use_transactions=True,
rollback_on_validation_errors=True,
)
# Ensure the validation error raised by the database has been saved.
self.assertTrue(result.has_validation_errors())
# Ensure the rollback has worked properly, no instances were created.
self.assertFalse(Author.objects.exists())
| ModelResourceTransactionTest |
python | dagster-io__dagster | python_modules/dagster-graphql/dagster_graphql/schema/dagster_types.py | {
"start": 3639,
"end": 3792
} | class ____(graphene.ObjectType):
class Meta:
interfaces = (GrapheneDagsterType,)
name = "RegularDagsterType"
| GrapheneRegularDagsterType |
python | sqlalchemy__sqlalchemy | lib/sqlalchemy/orm/bulk_persistence.py | {
"start": 65889,
"end": 73478
} | class ____(_BulkUDCompileState, DeleteDMLState):
@classmethod
def create_for_statement(cls, statement, compiler, **kw):
self = cls.__new__(cls)
dml_strategy = statement._annotations.get(
"dml_strategy", "unspecified"
)
if (
dml_strategy == "core_only"
or dml_strategy == "unspecified"
and "parententity" not in statement.table._annotations
):
DeleteDMLState.__init__(self, statement, compiler, **kw)
return self
toplevel = not compiler.stack
orm_level_statement = statement
ext_info = statement.table._annotations["parententity"]
self.mapper = mapper = ext_info.mapper
self._init_global_attributes(
statement,
compiler,
toplevel=toplevel,
process_criteria_for_toplevel=toplevel,
)
new_stmt = statement._clone()
if new_stmt.table._annotations["parententity"] is mapper:
new_stmt.table = mapper.local_table
new_crit = cls._adjust_for_extra_criteria(
self.global_attributes, mapper
)
if new_crit:
new_stmt = new_stmt.where(*new_crit)
# do this first as we need to determine if there is
# DELETE..FROM
DeleteDMLState.__init__(self, new_stmt, compiler, **kw)
use_supplemental_cols = False
if not toplevel:
synchronize_session = None
else:
synchronize_session = compiler._annotations.get(
"synchronize_session", None
)
can_use_returning = compiler._annotations.get(
"can_use_returning", None
)
if can_use_returning is not False:
# even though pre_exec has determined basic
# can_use_returning for the dialect, if we are to use
# RETURNING we need to run can_use_returning() at this level
# unconditionally because is_delete_using was not known
# at the pre_exec level
can_use_returning = (
synchronize_session == "fetch"
and self.can_use_returning(
compiler.dialect,
mapper,
is_multitable=self.is_multitable,
is_delete_using=compiler._annotations.get(
"is_delete_using", False
),
)
)
if can_use_returning:
use_supplemental_cols = True
new_stmt = new_stmt.return_defaults(*new_stmt.table.primary_key)
if toplevel:
new_stmt = self._setup_orm_returning(
compiler,
orm_level_statement,
new_stmt,
dml_mapper=mapper,
use_supplemental_cols=use_supplemental_cols,
)
self.statement = new_stmt
return self
@classmethod
def orm_execute_statement(
cls,
session: Session,
statement: dml.Delete,
params: _CoreAnyExecuteParams,
execution_options: OrmExecuteOptionsParameter,
bind_arguments: _BindArguments,
conn: Connection,
) -> _result.Result:
update_options = execution_options.get(
"_sa_orm_update_options", cls.default_update_options
)
if update_options._dml_strategy == "bulk":
raise sa_exc.InvalidRequestError(
"Bulk ORM DELETE not supported right now. "
"Statement may be invoked at the "
"Core level using "
"session.connection().execute(stmt, parameters)"
)
if update_options._dml_strategy not in ("orm", "auto", "core_only"):
raise sa_exc.ArgumentError(
"Valid strategies for ORM DELETE strategy are 'orm', 'auto', "
"'core_only'"
)
return super().orm_execute_statement(
session, statement, params, execution_options, bind_arguments, conn
)
@classmethod
def can_use_returning(
cls,
dialect: Dialect,
mapper: Mapper[Any],
*,
is_multitable: bool = False,
is_update_from: bool = False,
is_delete_using: bool = False,
is_executemany: bool = False,
) -> bool:
# normal answer for "should we use RETURNING" at all.
normal_answer = (
dialect.delete_returning and mapper.local_table.implicit_returning
)
if not normal_answer:
return False
# now get into special workarounds because MariaDB supports
# DELETE...RETURNING but not DELETE...USING...RETURNING.
if is_delete_using:
# is_delete_using hint was passed. use
# additional dialect feature (True for PG, False for MariaDB)
return dialect.delete_returning_multifrom
elif is_multitable and not dialect.delete_returning_multifrom:
# is_delete_using hint was not passed, but we determined
# at compile time that this is in fact a DELETE..USING.
# it's too late to continue since we did not pre-SELECT.
# raise that we need that hint up front.
raise sa_exc.CompileError(
f'Dialect "{dialect.name}" does not support RETURNING '
"with DELETE..USING; for synchronize_session='fetch', "
"please add the additional execution option "
"'is_delete_using=True' to the statement to indicate that "
"a separate SELECT should be used for this backend."
)
return True
@classmethod
def _do_post_synchronize_evaluate(
cls, session, statement, result, update_options
):
matched_objects = cls._get_matched_objects_on_criteria(
update_options,
session.identity_map.all_states(),
)
to_delete = []
for _, state, dict_, is_partially_expired in matched_objects:
if is_partially_expired:
state._expire(dict_, session.identity_map._modified)
else:
to_delete.append(state)
if to_delete:
session._remove_newly_deleted(to_delete)
@classmethod
def _do_post_synchronize_fetch(
cls, session, statement, result, update_options
):
target_mapper = update_options._subject_mapper
returned_defaults_rows = result.returned_defaults_rows
if returned_defaults_rows:
pk_rows = cls._interpret_returning_rows(
result, target_mapper, returned_defaults_rows
)
matched_rows = [
tuple(row) + (update_options._identity_token,)
for row in pk_rows
]
else:
matched_rows = update_options._matched_rows
for row in matched_rows:
primary_key = row[0:-1]
identity_token = row[-1]
# TODO: inline this and call remove_newly_deleted
# once
identity_key = target_mapper.identity_key_from_primary_key(
list(primary_key),
identity_token=identity_token,
)
if identity_key in session.identity_map:
session._remove_newly_deleted(
[
attributes.instance_state(
session.identity_map[identity_key]
)
]
)
| _BulkORMDelete |
python | rapidsai__cudf | python/dask_cudf/dask_cudf/_expr/collection.py | {
"start": 567,
"end": 2452
} | class ____(FrameBase):
def _prepare_cov_corr(self, min_periods, numeric_only):
# Upstream version of this method sets min_periods
# to 2 by default (which is not supported by cudf)
# TODO: Remove when cudf supports both min_periods
# and numeric_only
# See: https://github.com/rapidsai/cudf/issues/12626
# See: https://github.com/rapidsai/cudf/issues/9009
self._meta.cov(min_periods=min_periods)
frame = self
if numeric_only:
numerics = self._meta._get_numeric_data()
if len(numerics.columns) != len(self.columns):
frame = frame[list(numerics.columns)]
return frame, min_periods
# var can be removed if cudf#15179 is addressed.
# See: https://github.com/rapidsai/cudf/issues/14935
def var(
self,
axis=0,
skipna=True,
ddof=1,
numeric_only=False,
split_every=False,
**kwargs,
):
_raise_if_object_series(self, "var")
axis = self._validate_axis(axis)
self._meta.var(axis=axis, skipna=skipna, numeric_only=numeric_only)
frame = self
if is_dataframe_like(self._meta) and numeric_only:
# Convert to pandas - cudf does something weird here
index = self._meta.to_pandas().var(numeric_only=True).index
frame = frame[list(index)]
return new_collection(
frame.expr.var(
axis, skipna, ddof, numeric_only, split_every=split_every
)
)
def rename_axis(
self, mapper=no_default, index=no_default, columns=no_default, axis=0
):
from dask_cudf._expr.expr import RenameAxisCudf
return new_collection(
RenameAxisCudf(
self, mapper=mapper, index=index, columns=columns, axis=axis
)
)
| CudfFrameBase |
python | mkdocs__mkdocs | mkdocs/config/config_options.py | {
"start": 10199,
"end": 10858
} | class ____(ListOfItems[LegacyConfig]):
"""
Deprecated: Use `ListOfItems(SubConfig(...))` instead of `ConfigItems(...)`.
Validates a list of mappings that all must match the same set of
options.
"""
@overload
def __init__(self, *config_options: PlainConfigSchemaItem):
...
@overload
def __init__(self, *config_options: PlainConfigSchemaItem, required: bool):
...
def __init__(self, *config_options: PlainConfigSchemaItem, required=None) -> None:
super().__init__(SubConfig(*config_options), default=[])
self._legacy_required = required
self.required = bool(required)
| ConfigItems |
python | kamyu104__LeetCode-Solutions | Python/rectangle-overlap.py | {
"start": 29,
"end": 455
} | class ____(object):
def isRectangleOverlap(self, rec1, rec2):
"""
:type rec1: List[int]
:type rec2: List[int]
:rtype: bool
"""
def intersect(p_left, p_right, q_left, q_right):
return max(p_left, q_left) < min(p_right, q_right)
return (intersect(rec1[0], rec1[2], rec2[0], rec2[2]) and
intersect(rec1[1], rec1[3], rec2[1], rec2[3]))
| Solution |
python | kubernetes-client__python | kubernetes/client/models/v1_weighted_pod_affinity_term.py | {
"start": 383,
"end": 4882
} | class ____(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'pod_affinity_term': 'V1PodAffinityTerm',
'weight': 'int'
}
attribute_map = {
'pod_affinity_term': 'podAffinityTerm',
'weight': 'weight'
}
def __init__(self, pod_affinity_term=None, weight=None, local_vars_configuration=None): # noqa: E501
"""V1WeightedPodAffinityTerm - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._pod_affinity_term = None
self._weight = None
self.discriminator = None
self.pod_affinity_term = pod_affinity_term
self.weight = weight
@property
def pod_affinity_term(self):
"""Gets the pod_affinity_term of this V1WeightedPodAffinityTerm. # noqa: E501
:return: The pod_affinity_term of this V1WeightedPodAffinityTerm. # noqa: E501
:rtype: V1PodAffinityTerm
"""
return self._pod_affinity_term
@pod_affinity_term.setter
def pod_affinity_term(self, pod_affinity_term):
"""Sets the pod_affinity_term of this V1WeightedPodAffinityTerm.
:param pod_affinity_term: The pod_affinity_term of this V1WeightedPodAffinityTerm. # noqa: E501
:type: V1PodAffinityTerm
"""
if self.local_vars_configuration.client_side_validation and pod_affinity_term is None: # noqa: E501
raise ValueError("Invalid value for `pod_affinity_term`, must not be `None`") # noqa: E501
self._pod_affinity_term = pod_affinity_term
@property
def weight(self):
"""Gets the weight of this V1WeightedPodAffinityTerm. # noqa: E501
weight associated with matching the corresponding podAffinityTerm, in the range 1-100. # noqa: E501
:return: The weight of this V1WeightedPodAffinityTerm. # noqa: E501
:rtype: int
"""
return self._weight
@weight.setter
def weight(self, weight):
"""Sets the weight of this V1WeightedPodAffinityTerm.
weight associated with matching the corresponding podAffinityTerm, in the range 1-100. # noqa: E501
:param weight: The weight of this V1WeightedPodAffinityTerm. # noqa: E501
:type: int
"""
if self.local_vars_configuration.client_side_validation and weight is None: # noqa: E501
raise ValueError("Invalid value for `weight`, must not be `None`") # noqa: E501
self._weight = weight
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, V1WeightedPodAffinityTerm):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, V1WeightedPodAffinityTerm):
return True
return self.to_dict() != other.to_dict()
| V1WeightedPodAffinityTerm |
python | openai__openai-python | tests/test_transform.py | {
"start": 1247,
"end": 1286
} | class ____(TypedDict):
bar: Bar2
| Foo2 |
python | scipy__scipy | scipy/stats/_continuous_distns.py | {
"start": 347596,
"end": 349337
} | class ____(rv_continuous):
r"""A Tukey-Lamdba continuous random variable.
%(before_notes)s
Notes
-----
A flexible distribution, able to represent and interpolate between the
following distributions:
- Cauchy (:math:`lambda = -1`)
- logistic (:math:`lambda = 0`)
- approx Normal (:math:`lambda = 0.14`)
- uniform from -1 to 1 (:math:`lambda = 1`)
`tukeylambda` takes a real number :math:`lambda` (denoted ``lam``
in the implementation) as a shape parameter.
%(after_notes)s
%(example)s
"""
_support_mask = rv_continuous._open_support_mask
def _argcheck(self, lam):
return np.isfinite(lam)
def _shape_info(self):
return [_ShapeInfo("lam", False, (-np.inf, np.inf), (False, False))]
def _get_support(self, lam):
b = xpx.apply_where(lam > 0, lam,
lambda lam: 1/lam,
fill_value=np.inf)
return -b, b
def _pdf(self, x, lam):
Fx = np.asarray(sc.tklmbda(x, lam))
Px = Fx**(lam-1.0) + (np.asarray(1-Fx))**(lam-1.0)
with np.errstate(divide='ignore'):
Px = 1.0/np.asarray(Px)
return np.where((lam <= 0) | (abs(x) < 1.0/np.asarray(lam)), Px, 0.0)
def _cdf(self, x, lam):
return sc.tklmbda(x, lam)
def _ppf(self, q, lam):
return sc.boxcox(q, lam) - sc.boxcox1p(-q, lam)
def _stats(self, lam):
return 0, _tlvar(lam), 0, _tlkurt(lam)
def _entropy(self, lam):
def integ(p):
return np.log(pow(p, lam-1)+pow(1-p, lam-1))
return integrate.quad(integ, 0, 1)[0]
tukeylambda = tukeylambda_gen(name='tukeylambda')
| tukeylambda_gen |
python | getlogbook__logbook | src/logbook/ticketing.py | {
"start": 3187,
"end": 10725
} | class ____(BackendBase):
"""Implements a backend that is writing into a database SQLAlchemy can
interface.
This backend takes some additional options:
`table_prefix`
an optional table prefix for all tables created by
the logbook ticketing handler.
`metadata`
an optional SQLAlchemy metadata object for the table creation.
`autocreate_tables`
can be set to `False` to disable the automatic
creation of the logbook tables.
"""
def setup_backend(self):
from sqlalchemy import MetaData, create_engine
from sqlalchemy.orm import scoped_session, sessionmaker
engine_or_uri = self.options.pop("uri", None)
metadata = self.options.pop("metadata", None)
table_prefix = self.options.pop("table_prefix", "logbook_")
if hasattr(engine_or_uri, "execute"):
self.engine = engine_or_uri
else:
# Pool recycle keeps connections from going stale,
# which happens in MySQL Databases
# Pool size is more custom for out stack
self.engine = create_engine(engine_or_uri, pool_recycle=360, pool_size=1000)
# Create session factory using session maker
session = sessionmaker()
# Bind to the engined
session.configure(bind=self.engine)
# Scoped session is a thread safe solution for
# interaction with the Database
self.session = scoped_session(session)
if metadata is None:
metadata = MetaData()
self.table_prefix = table_prefix
self.metadata = metadata
self.create_tables()
if self.options.get("autocreate_tables", True):
self.metadata.create_all(bind=self.engine)
def create_tables(self):
"""Creates the tables required for the handler on the class and
metadata.
"""
import sqlalchemy as db
def table(name, *args, **kwargs):
return db.Table(self.table_prefix + name, self.metadata, *args, **kwargs)
self.tickets = table(
"tickets",
db.Column("ticket_id", db.Integer, primary_key=True),
db.Column("record_hash", db.String(40), unique=True),
db.Column("level", db.Integer),
db.Column("channel", db.String(120)),
db.Column("location", db.String(512)),
db.Column("module", db.String(256)),
db.Column("last_occurrence_time", db.DateTime),
db.Column("occurrence_count", db.Integer),
db.Column("solved", db.Boolean),
db.Column("app_id", db.String(80)),
)
self.occurrences = table(
"occurrences",
db.Column("occurrence_id", db.Integer, primary_key=True),
db.Column(
"ticket_id",
db.Integer,
db.ForeignKey(self.table_prefix + "tickets.ticket_id"),
),
db.Column("time", db.DateTime),
db.Column("data", db.Text),
db.Column("app_id", db.String(80)),
)
def _order(self, q, table, order_by):
if order_by[0] == "-":
return q.order_by(table.c[order_by[1:]].desc())
return q.order_by(table.c[order_by])
def record_ticket(self, record, data, hash, app_id):
"""Records a log record as ticket."""
# Can use the session instead engine.connection and transaction
s = self.session
try:
q = self.tickets.select().where(self.tickets.c.record_hash == hash)
row = s.execute(q).one_or_none()
if row is None:
row = s.execute(
self.tickets.insert().values(
record_hash=hash,
level=record.level,
channel=record.channel or "",
location="%s:%d" % (record.filename, record.lineno), # noqa: UP031
module=record.module or "<unknown>",
occurrence_count=0,
solved=False,
app_id=app_id,
)
)
ticket_id = row.inserted_primary_key[0]
else:
ticket_id = row.ticket_id
s.execute(
self.occurrences.insert().values(
ticket_id=ticket_id,
time=record.time,
app_id=app_id,
data=json.dumps(data),
)
)
s.execute(
self.tickets.update()
.where(self.tickets.c.ticket_id == ticket_id)
.values(
occurrence_count=self.tickets.c.occurrence_count + 1,
last_occurrence_time=record.time,
solved=False,
)
)
s.commit()
except Exception:
s.rollback()
raise
# Closes the session and removes it from the pool
s.remove()
def count_tickets(self):
"""Returns the number of tickets."""
from sqlalchemy import func, select
with self.engine.begin() as conn:
return conn.scalar(select(func.count()).select_from(self.tickets))
def get_tickets(self, order_by="-last_occurrence_time", limit=50, offset=0):
"""Selects tickets from the database."""
with self.engine.begin() as conn:
return [
Ticket(self, row)
for row in conn.execute(
self._order(self.tickets.select(), self.tickets, order_by)
.limit(limit)
.offset(offset)
)
]
def solve_ticket(self, ticket_id):
"""Marks a ticket as solved."""
with self.engine.begin() as conn:
conn.execute(
self.tickets.update()
.where(self.tickets.c.ticket_id == ticket_id)
.values(solved=True)
)
def delete_ticket(self, ticket_id):
"""Deletes a ticket from the database."""
with self.engine.begin() as conn:
conn.execute(
self.occurrences.delete().where(
self.occurrences.c.ticket_id == ticket_id
)
)
conn.execute(
self.tickets.delete().where(self.tickets.c.ticket_id == ticket_id)
)
def get_ticket(self, ticket_id):
"""Return a single ticket with all occurrences."""
with self.engine.begin() as conn:
row = conn.execute(
self.tickets.select().where(self.tickets.c.ticket_id == ticket_id)
).one_or_none()
if row is not None:
return Ticket(self, row)
def get_occurrences(self, ticket, order_by="-time", limit=50, offset=0):
"""Selects occurrences from the database for a ticket."""
with self.engine.begin() as conn:
return [
Occurrence(self, row)
for row in conn.execute(
self._order(
self.occurrences.select().where(
self.occurrences.c.ticket_id == ticket
),
self.occurrences,
order_by,
)
.limit(limit)
.offset(offset)
)
]
| SQLAlchemyBackend |
python | facebookresearch__faiss | tests/test_clustering.py | {
"start": 5332,
"end": 7765
} | class ____(unittest.TestCase):
def test_redo(self):
d = 64
n = 1000
rs = np.random.RandomState(123)
x = rs.uniform(size=(n, d)).astype('float32')
# make sure that doing 10 redos yields a better objective than just 1
clus = faiss.Clustering(d, 20)
clus.nredo = 1
clus.train(x, faiss.IndexFlatL2(d))
obj1 = clus.iteration_stats.at(clus.iteration_stats.size() - 1).obj
clus = faiss.Clustering(d, 20)
clus.nredo = 10
clus.train(x, faiss.IndexFlatL2(d))
obj10 = clus.iteration_stats.at(clus.iteration_stats.size() - 1).obj
self.assertGreater(obj1, obj10)
def test_redo_cosine(self):
# test redo with cosine distance (inner prod, so objectives are reversed)
d = 64
n = 1000
rs = np.random.RandomState(123)
x = rs.uniform(size=(n, d)).astype('float32')
faiss.normalize_L2(x)
# make sure that doing 10 redos yields a better objective than just 1
# for cosine distance, it is IP so higher is better
clus = faiss.Clustering(d, 20)
clus.nredo = 1
clus.train(x, faiss.IndexFlatIP(d))
obj1 = clus.iteration_stats.at(clus.iteration_stats.size() - 1).obj
clus = faiss.Clustering(d, 20)
clus.nredo = 10
clus.train(x, faiss.IndexFlatIP(d))
obj10 = clus.iteration_stats.at(clus.iteration_stats.size() - 1).obj
self.assertGreater(obj10, obj1)
def test_progressive_dim(self):
d = 32
n = 10000
k = 50
xt, _, _ = get_dataset_2(d, n, 0, 0)
# basic kmeans
kmeans = faiss.Kmeans(d, k)
kmeans.train(xt)
clus = faiss.ProgressiveDimClustering(d, k)
clus.verbose
clus.verbose = True
clus.progressive_dim_steps
clus.progressive_dim_steps = 5
fac = faiss.ProgressiveDimIndexFactory()
clus.train(n, faiss.swig_ptr(xt), fac)
stats = clus.iteration_stats
stats = [stats.at(i) for i in range(stats.size())]
obj = np.array([st.obj for st in stats])
# clustering objective should be a tad better
self.assertLess(obj[-1], kmeans.obj[-1])
# same test w/ Kmeans wrapper
kmeans2 = faiss.Kmeans(d, k, progressive_dim_steps=5)
kmeans2.train(xt)
self.assertLess(kmeans2.obj[-1], kmeans.obj[-1])
| TestCompositeClustering |
python | PrefectHQ__prefect | src/prefect/serializers.py | {
"start": 4517,
"end": 5445
} | class ____(Serializer[D]):
"""
Serializes objects using the pickle protocol.
- Uses `cloudpickle` by default. See `picklelib` for using alternative libraries.
- Stores the version of the pickle library to check for compatibility during
deserialization.
- Wraps pickles in base64 for safe transmission.
"""
type: str = Field(default="pickle", frozen=True)
picklelib: str = "cloudpickle"
picklelib_version: Optional[str] = None
@field_validator("picklelib")
def check_picklelib(cls, value: str) -> str:
return validate_picklelib(value)
def dumps(self, obj: D) -> bytes:
pickler = from_qualified_name(self.picklelib)
blob = pickler.dumps(obj)
return base64.encodebytes(blob)
def loads(self, blob: bytes) -> D:
pickler = from_qualified_name(self.picklelib)
return pickler.loads(base64.decodebytes(blob))
| PickleSerializer |
python | huggingface__transformers | src/transformers/models/switch_transformers/configuration_switch_transformers.py | {
"start": 777,
"end": 9054
} | class ____(PreTrainedConfig):
r"""
This is the configuration class to store the configuration of a [`SwitchTransformersModel`]. It is used to
instantiate a SwitchTransformers model according to the specified arguments, defining the model architecture.
Instantiating a configuration with the defaults will yield a similar configuration to that of the
SwitchTransformers [google/switch-base-8](https://huggingface.co/google/switch-base-8) architecture.
Configuration objects inherit from [`PreTrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PreTrainedConfig`] for more information.
Arguments:
vocab_size (`int`, *optional*, defaults to 32128):
Vocabulary size of the SwitchTransformers model. Defines the number of different tokens that can be
represented by the `inputs_ids` passed when calling [`SwitchTransformersModel`].
d_model (`int`, *optional*, defaults to 768):
Size of the encoder layers and the pooler layer.
d_kv (`int`, *optional*, defaults to 64):
Size of the key, query, value projections per attention head. `d_kv` has to be equal to `d_model //
num_heads`.
d_ff (`int`, *optional*, defaults to 2048):
Size of the intermediate feed forward layer in each `SwitchTransformersBlock`.
expert_capacity (`int`, *optional*, defaults to 64):
Number of tokens that can be stored in each expert. If set to 1, the model will behave like a regular
Transformer.
num_layers (`int`, *optional*, defaults to 12):
Number of dense hidden layers in the Transformer encoder layer.
num_sparse_encoder_layers (`int`, *optional*, defaults to 3):
Number of sparse (MoE) dense hidden layers in the Transformer encoder layer.
num_decoder_layers (`int`, *optional*, defaults to 12):
Number of hidden layers in the Transformer decoder. Will use the same value as `num_layers` if not set.
num_sparse_decoder_layers (`int`, *optional*, defaults to 3):
Number of sparse (MoE) dense hidden layers in the Transformer decoder layer.
num_heads (`int`, *optional*, defaults to 12):
Number of attention heads for each attention layer in the Transformer encoder.
num_experts (`int`, *optional*, defaults to 8):
Number of experts for each SwitchTransformer layer.
router_bias (`bool`, *optional*, defaults to `False`):
Whether to add a bias to the router.
router_jitter_noise (`float`, *optional*, defaults to 0.01):
Amount of noise to add to the router.
router_dtype (`str`, *optional*, default to `"float32"`):
The `dtype` used for the routers. It is preferable to keep the `dtype` to `"float32"` as specified in the
*selective precision* discussion in [the paper](https://huggingface.co/papers/2101.03961).
router_ignore_padding_tokens (`bool`, *optional*, defaults to `False`):
Whether to ignore padding tokens when routing.
relative_attention_num_buckets (`int`, *optional*, defaults to 32):
The number of buckets to use for each attention layer.
relative_attention_max_distance (`int`, *optional*, defaults to 128):
The maximum distance of the longer sequences for the bucket separation.
dropout_rate (`float`, *optional*, defaults to 0.1):
The ratio for all dropout layers.
layer_norm_eps (`float`, *optional*, defaults to 1e-6):
The epsilon used by the layer normalization layers.
router_z_loss_coef (`float`, *optional*, defaults to 0.001):
The z loss factor for the total loss.
router_aux_loss_coef (`float`, *optional*, defaults to 0.001):
The aux loss factor for the total loss.
initializer_factor (`float`, *optional*, defaults to 1.0):
A factor for initializing all weight matrices (should be kept to 1, used internally for initialization
testing).
dense_act_fn (`string`, *optional*, defaults to `"relu"`):
Type of feed forward layer to be used. Should be one of `"relu"` or `"gated-gelu"`. SwitchTransformersv1.1
uses the `"gated-gelu"` feed forward projection. Original SwitchTransformers uses `"relu"`.
add_router_probs (`bool`, *optional*, defaults to `False`):
Whether to output router probabilities to compute router auxiliary loss.
use_cache (`bool`, *optional*, defaults to `True`):
Whether or not the model should return the last key/values attentions (not used by all models).
"""
model_type = "switch_transformers"
keys_to_ignore_at_inference = ["past_key_values"]
attribute_map = {"hidden_size": "d_model", "num_attention_heads": "num_heads", "num_hidden_layers": "num_layers"}
def __init__(
self,
vocab_size=32128,
d_model=768,
d_kv=64,
d_ff=2048,
expert_capacity=64,
num_layers=12,
num_sparse_encoder_layers=3,
num_decoder_layers=12,
num_sparse_decoder_layers=3,
num_heads=12,
num_experts=8,
router_bias=False,
router_jitter_noise=0.01,
router_dtype="float32",
router_ignore_padding_tokens=False,
relative_attention_num_buckets=32,
relative_attention_max_distance=128,
dropout_rate=0.1,
layer_norm_epsilon=1e-6,
router_z_loss_coef=0.001,
router_aux_loss_coef=0.001,
initializer_factor=1.0,
dense_act_fn="relu",
is_encoder_decoder=True,
add_router_probs=False,
use_cache=True,
pad_token_id=0,
eos_token_id=1,
**kwargs,
):
self.vocab_size = vocab_size
self.d_model = d_model
self.d_kv = d_kv
self.d_ff = d_ff
self.num_sparse_encoder_layers = num_sparse_encoder_layers
self.num_layers = num_layers
self.num_decoder_layers = (
num_decoder_layers if num_decoder_layers is not None else self.num_layers
) # default = symmetry
self.num_sparse_decoder_layers = num_sparse_decoder_layers
# This tells us, each how many encoder layer we'll have to set a sparse layer.
if self.num_sparse_encoder_layers > 0:
self.encoder_sparse_step = self.num_layers // self.num_sparse_encoder_layers
else:
self.encoder_sparse_step = self.num_layers # HACK: this will create 0 sparse layers
# This tells us, each how many encoder layer we'll have to set a sparse layer.
if self.num_sparse_decoder_layers > 0:
self.decoder_sparse_step = self.num_decoder_layers // self.num_sparse_decoder_layers
else:
self.decoder_sparse_step = self.num_decoder_layers # HACK: this will create 0 sparse layers
self.num_heads = num_heads
self.num_experts = num_experts
self.expert_capacity = expert_capacity
self.router_bias = router_bias
self.router_jitter_noise = router_jitter_noise
if router_dtype not in ["float32", "float16", "bfloat16"]:
raise ValueError(f"`router_dtype` must be one of 'float32', 'float16' or 'bfloat16', got {router_dtype}")
self.router_dtype = router_dtype
self.router_ignore_padding_tokens = router_ignore_padding_tokens
self.relative_attention_num_buckets = relative_attention_num_buckets
self.relative_attention_max_distance = relative_attention_max_distance
self.dropout_rate = dropout_rate
self.layer_norm_epsilon = layer_norm_epsilon
self.initializer_factor = initializer_factor
self.use_cache = use_cache
self.add_router_probs = add_router_probs
self.router_z_loss_coef = router_z_loss_coef
self.router_aux_loss_coef = router_aux_loss_coef
self.dense_act_fn = dense_act_fn
super().__init__(
pad_token_id=pad_token_id,
eos_token_id=eos_token_id,
is_encoder_decoder=is_encoder_decoder,
**kwargs,
)
__all__ = ["SwitchTransformersConfig"]
| SwitchTransformersConfig |
python | urllib3__urllib3 | test/test_no_ssl.py | {
"start": 380,
"end": 729
} | class ____:
@classmethod
def setup_class(cls) -> None:
sys.modules.pop("ssl", None)
sys.modules.pop("_ssl", None)
module_stash.stash()
sys.meta_path.insert(0, ssl_blocker)
@classmethod
def teardown_class(cls) -> None:
sys.meta_path.remove(ssl_blocker)
module_stash.pop()
| TestWithoutSSL |
python | pydata__xarray | xarray/backends/memory.py | {
"start": 207,
"end": 1525
} | class ____(AbstractWritableDataStore):
"""
Stores dimensions, variables and attributes in ordered dictionaries, making
this store fast compared to stores which save to disk.
This store exists purely for internal testing purposes.
"""
def __init__(self, variables=None, attributes=None):
self._variables = {} if variables is None else variables
self._attributes = {} if attributes is None else attributes
def get_attrs(self):
return self._attributes
def get_variables(self):
res = {}
for k, v in self._variables.items():
v = v.copy(deep=True)
res[k] = v
v._data = indexing.LazilyIndexedArray(v._data)
return res
def get_dimensions(self):
return {d: s for v in self._variables.values() for d, s in v.dims.items()}
def prepare_variable(self, k, v, *args, **kwargs):
new_var = Variable(v.dims, np.empty_like(v), v.attrs)
self._variables[k] = new_var
return new_var, v.data
def set_attribute(self, k, v):
# copy to imitate writing to disk.
self._attributes[k] = copy.deepcopy(v)
def set_dimension(self, dim, length, unlimited_dims=None):
# in this model, dimensions are accounted for in the variables
pass
| InMemoryDataStore |
python | django-compressor__django-compressor | compressor/exceptions.py | {
"start": 684,
"end": 812
} | class ____(Exception):
"""
This exception is raised when a template does not exist.
"""
pass
| TemplateDoesNotExist |
python | plotly__plotly.py | plotly/graph_objs/_deprecations.py | {
"start": 4554,
"end": 5461
} | class ____(dict):
"""
plotly.graph_objs.ColorBar is deprecated.
Please replace it with one of the following more specific types
- plotly.graph_objs.scatter.marker.ColorBar
- plotly.graph_objs.surface.ColorBar
- etc.
"""
def __init__(self, *args, **kwargs):
"""
plotly.graph_objs.ColorBar is deprecated.
Please replace it with one of the following more specific types
- plotly.graph_objs.scatter.marker.ColorBar
- plotly.graph_objs.surface.ColorBar
- etc.
"""
warnings.warn(
"""plotly.graph_objs.ColorBar is deprecated.
Please replace it with one of the following more specific types
- plotly.graph_objs.scatter.marker.ColorBar
- plotly.graph_objs.surface.ColorBar
- etc.
""",
DeprecationWarning,
)
super().__init__(*args, **kwargs)
| ColorBar |
python | getsentry__sentry | src/sentry/integrations/opsgenie/integration.py | {
"start": 4922,
"end": 9533
} | class ____(IntegrationInstallation):
def get_keyring_client(self, keyid: int | str) -> OpsgenieClient:
org_integration = self.org_integration
assert org_integration, "OrganizationIntegration is required"
team = get_team(team_id=keyid, org_integration=org_integration)
assert team, "Cannot get client for unknown team"
return OpsgenieClient(
integration=self.model,
integration_key=team["integration_key"],
)
def get_client(self) -> Any:
raise NotImplementedError("Use get_keyring_client instead.")
def get_organization_config(self) -> Sequence[Any]:
fields = [
{
"name": "team_table",
"type": "table",
"label": "Opsgenie integrations",
"help": "Your keys have to be associated with a Sentry integration in Opsgenie. You can update, delete, or add them here. You’ll need to update alert rules individually for any added or deleted keys.",
"addButtonText": "",
"columnLabels": {
"team": "Label",
"integration_key": "Integration Key",
},
"columnKeys": ["team", "integration_key"],
"confirmDeleteMessage": "Any alert rules associated with this integration will stop working. The rules will still exist but will show a `removed` team.",
}
]
return fields
def update_organization_config(self, data: MutableMapping[str, Any]) -> None:
from sentry.integrations.services.integration import integration_service
# add the integration ID to a newly added row
if not self.org_integration:
return
teams = data["team_table"]
unsaved_teams = [team for team in teams if team["id"] == ""]
# this is not instantaneous, so you could add the same team a bunch of times in a row
# but I don't anticipate this being too much of an issue
added_names = {team["team"] for team in teams if team not in unsaved_teams}
existing_team_key_pairs = {
(team["team"], team["integration_key"]) for team in teams if team not in unsaved_teams
}
integration = integration_service.get_integration(
organization_integration_id=self.org_integration.id, status=ObjectStatus.ACTIVE
)
if not integration:
raise IntegrationError("Integration does not exist")
for team in unsaved_teams:
if team["team"] in added_names:
raise ValidationError({"duplicate_name": ["Duplicate team name."]})
team["id"] = str(self.org_integration.id) + "-" + team["team"]
invalid_keys = []
with record_event(OnCallInteractionType.VERIFY_KEYS).capture() as lifecycle:
for team in teams:
# skip if team, key pair already exist in config
if (team["team"], team["integration_key"]) in existing_team_key_pairs:
continue
integration_key = team["integration_key"]
# validate integration keys
client = OpsgenieClient(
integration=integration,
integration_key=integration_key,
)
# call an API to test the integration key
try:
client.get_alerts()
except ApiError as e:
if e.code == 429:
raise ApiRateLimitedError(
"Too many requests. Please try updating one team/key at a time."
)
elif e.code == 401:
invalid_keys.append(integration_key)
elif e.json and e.json.get("message"):
raise ApiError(e.json["message"])
else:
raise
if invalid_keys:
lifecycle.record_halt(
OnCallIntegrationsHaltReason.INVALID_KEY,
extra={"invalid_keys": invalid_keys, "integration_id": integration.id},
)
raise ApiUnauthorized(f"Invalid integration key: {str(invalid_keys)}")
return super().update_organization_config(data)
def schedule_migrate_opsgenie_plugin(self):
migrate_opsgenie_plugin.apply_async(
kwargs={
"integration_id": self.model.id,
"organization_id": self.organization_id,
}
)
| OpsgenieIntegration |
python | astropy__astropy | astropy/convolution/tests/test_convolve.py | {
"start": 14928,
"end": 24946
} | class ____:
def test_list(self):
"""
Test that convolve works correctly when inputs are lists
"""
x = [[1, 1, 1], [1, 1, 1], [1, 1, 1]]
z = convolve(x, x, boundary="fill", fill_value=1, normalize_kernel=True)
assert_array_almost_equal_nulp(z, x, 10)
z = convolve(x, x, boundary="fill", fill_value=1, normalize_kernel=False)
assert_array_almost_equal_nulp(z, np.array(x, float) * 9, 10)
@pytest.mark.parametrize("dtype_array", VALID_DTYPES)
@pytest.mark.parametrize("dtype_kernel", VALID_DTYPES)
def test_dtype(self, dtype_array, dtype_kernel):
"""
Test that 32- and 64-bit floats are correctly handled
"""
x = np.array(
[[1.0, 2.0, 3.0], [4.0, 5.0, 6.0], [7.0, 8.0, 9.0]], dtype=dtype_array
)
y = np.array(
[[0.0, 0.0, 0.0], [0.0, 1.0, 0.0], [0.0, 0.0, 0.0]], dtype=dtype_kernel
)
z = convolve(x, y)
assert x.dtype == z.dtype
@pytest.mark.parametrize("boundary", BOUNDARY_OPTIONS)
def test_unity_1x1_none(self, boundary):
"""
Test that a 1x1 unit kernel returns the same array
"""
x = np.array([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0], [7.0, 8.0, 9.0]], dtype=">f8")
y = np.array([[1.0]], dtype=">f8")
z = convolve(x, y, boundary=boundary)
assert np.all(z == x)
@pytest.mark.parametrize("boundary", BOUNDARY_OPTIONS)
def test_unity_3x3(self, boundary):
"""
Test that a 3x3 unit kernel returns the same array (except when
boundary is None).
"""
x = np.array([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0], [7.0, 8.0, 9.0]], dtype=">f8")
y = np.array([[0.0, 0.0, 0.0], [0.0, 1.0, 0.0], [0.0, 0.0, 0.0]], dtype=">f8")
z = convolve(x, y, boundary=boundary)
if boundary is None:
assert np.all(
z
== np.array(
[[0.0, 0.0, 0.0], [0.0, 5.0, 0.0], [0.0, 0.0, 0.0]], dtype=">f8"
)
)
else:
assert np.all(z == x)
@pytest.mark.parametrize("boundary", BOUNDARY_OPTIONS)
def test_uniform_3x3(self, boundary):
"""
Test that the different modes are producing the correct results using
a 3x3 uniform kernel.
"""
x = np.array([[0.0, 0.0, 3.0], [1.0, 0.0, 0.0], [0.0, 2.0, 0.0]], dtype=">f8")
y = np.array([[1.0, 1.0, 1.0], [1.0, 1.0, 1.0], [1.0, 1.0, 1.0]], dtype=">f8")
z = convolve(x, y, boundary=boundary, normalize_kernel=False)
if boundary is None:
assert_array_almost_equal_nulp(
z,
np.array(
[[0.0, 0.0, 0.0], [0.0, 6.0, 0.0], [0.0, 0.0, 0.0]], dtype=">f8"
),
10,
)
elif boundary == "fill":
assert_array_almost_equal_nulp(
z,
np.array(
[[1.0, 4.0, 3.0], [3.0, 6.0, 5.0], [3.0, 3.0, 2.0]], dtype=">f8"
),
10,
)
elif boundary == "wrap":
assert_array_almost_equal_nulp(
z,
np.array(
[[6.0, 6.0, 6.0], [6.0, 6.0, 6.0], [6.0, 6.0, 6.0]], dtype=">f8"
),
10,
)
else:
assert_array_almost_equal_nulp(
z,
np.array(
[[2.0, 7.0, 12.0], [4.0, 6.0, 8.0], [6.0, 5.0, 4.0]], dtype=">f8"
),
10,
)
@pytest.mark.parametrize("boundary", BOUNDARY_OPTIONS)
def test_unity_3x3_withnan(self, boundary):
"""
Test that a 3x3 unit kernel returns the same array (except when
boundary is None). This version includes a NaN value in the original
array.
"""
x = np.array(
[[1.0, 2.0, 3.0], [4.0, np.nan, 6.0], [7.0, 8.0, 9.0]], dtype=">f8"
)
y = np.array([[0.0, 0.0, 0.0], [0.0, 1.0, 0.0], [0.0, 0.0, 0.0]], dtype=">f8")
z = convolve(x, y, boundary=boundary, nan_treatment="fill", preserve_nan=True)
assert np.isnan(z[1, 1])
x = np.nan_to_num(z)
z = np.nan_to_num(z)
if boundary is None:
assert np.all(
z
== np.array(
[[0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0]], dtype=">f8"
)
)
else:
assert np.all(z == x)
@pytest.mark.parametrize("boundary", BOUNDARY_OPTIONS)
def test_uniform_3x3_withnanfilled(self, boundary):
"""
Test that the different modes are producing the correct results using
a 3x3 uniform kernel. This version includes a NaN value in the
original array.
"""
x = np.array(
[[0.0, 0.0, 4.0], [1.0, np.nan, 0.0], [0.0, 3.0, 0.0]], dtype=">f8"
)
y = np.array([[1.0, 1.0, 1.0], [1.0, 1.0, 1.0], [1.0, 1.0, 1.0]], dtype=">f8")
z = convolve(
x, y, boundary=boundary, nan_treatment="fill", normalize_kernel=False
)
if boundary is None:
assert_array_almost_equal_nulp(
z,
np.array(
[[0.0, 0.0, 0.0], [0.0, 8.0, 0.0], [0.0, 0.0, 0.0]], dtype=">f8"
),
10,
)
elif boundary == "fill":
assert_array_almost_equal_nulp(
z,
np.array(
[[1.0, 5.0, 4.0], [4.0, 8.0, 7.0], [4.0, 4.0, 3.0]], dtype=">f8"
),
10,
)
elif boundary == "wrap":
assert_array_almost_equal_nulp(
z,
np.array(
[[8.0, 8.0, 8.0], [8.0, 8.0, 8.0], [8.0, 8.0, 8.0]], dtype=">f8"
),
10,
)
elif boundary == "extend":
assert_array_almost_equal_nulp(
z,
np.array(
[[2.0, 9.0, 16.0], [5.0, 8.0, 11.0], [8.0, 7.0, 6.0]], dtype=">f8"
),
10,
)
else:
raise ValueError("Invalid boundary specification")
@pytest.mark.parametrize("boundary", BOUNDARY_OPTIONS)
def test_uniform_3x3_withnaninterped(self, boundary):
"""
Test that the different modes are producing the correct results using
a 3x3 uniform kernel. This version includes a NaN value in the
original array.
"""
x = np.array(
[[0.0, 0.0, 4.0], [1.0, np.nan, 0.0], [0.0, 3.0, 0.0]], dtype=">f8"
)
y = np.array([[1.0, 1.0, 1.0], [1.0, 1.0, 1.0], [1.0, 1.0, 1.0]], dtype=">f8")
z = convolve(
x, y, boundary=boundary, nan_treatment="interpolate", normalize_kernel=True
)
if boundary is None:
assert_array_almost_equal_nulp(
z,
np.array(
[[0.0, 0.0, 0.0], [0.0, 1.0, 0.0], [0.0, 0.0, 0.0]], dtype=">f8"
),
10,
)
elif boundary == "fill":
assert_array_almost_equal_nulp(
z,
np.array(
[
[1.0 / 8, 5.0 / 8, 4.0 / 8],
[4.0 / 8, 8.0 / 8, 7.0 / 8],
[4.0 / 8, 4.0 / 8, 3.0 / 8],
],
dtype=">f8",
),
10,
)
elif boundary == "wrap":
assert_array_almost_equal_nulp(
z,
np.array(
[[1.0, 1.0, 1.0], [1.0, 1.0, 1.0], [1.0, 1.0, 1.0]], dtype=">f8"
),
10,
)
elif boundary == "extend":
assert_array_almost_equal_nulp(
z,
np.array(
[
[2.0 / 8, 9.0 / 8, 16.0 / 8],
[5.0 / 8, 8.0 / 8, 11.0 / 8],
[8.0 / 8, 7.0 / 8, 6.0 / 8],
],
dtype=">f8",
),
10,
)
else:
raise ValueError("Invalid boundary specification")
@pytest.mark.parametrize("boundary", BOUNDARY_OPTIONS)
def test_non_normalized_kernel_2D(self, boundary):
x = np.array([[0.0, 0.0, 4.0], [1.0, 2.0, 0.0], [0.0, 3.0, 0.0]], dtype="float")
y = np.array(
[[1.0, -1.0, 1.0], [-1.0, 0.0, -1.0], [1.0, -1.0, 1.0]], dtype="float"
)
z = convolve(
x, y, boundary=boundary, nan_treatment="fill", normalize_kernel=False
)
if boundary is None:
assert_array_almost_equal_nulp(
z,
np.array(
[[0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0]], dtype="float"
),
10,
)
elif boundary == "fill":
assert_array_almost_equal_nulp(
z,
np.array(
[[1.0, -5.0, 2.0], [1.0, 0.0, -3.0], [-2.0, -1.0, -1.0]],
dtype="float",
),
10,
)
elif boundary == "wrap":
assert_array_almost_equal_nulp(
z,
np.array(
[[0.0, -8.0, 6.0], [5.0, 0.0, -4.0], [2.0, 3.0, -4.0]],
dtype="float",
),
10,
)
elif boundary == "extend":
assert_array_almost_equal_nulp(
z,
np.array(
[[2.0, -1.0, -2.0], [0.0, 0.0, 1.0], [2.0, -4.0, 2.0]],
dtype="float",
),
10,
)
else:
raise ValueError("Invalid boundary specification")
| TestConvolve2D |
python | walkccc__LeetCode | solutions/461. Hamming Distance/461.py | {
"start": 0,
"end": 182
} | class ____:
def hammingDistance(self, x: int, y: int) -> int:
ans = 0
while x > 0 or y > 0:
ans += (x & 1) ^ (y & 1)
x >>= 1
y >>= 1
return ans
| Solution |
python | jina-ai__jina | jina/proto/docarray_v1/pb/jina_pb2_grpc.py | {
"start": 4250,
"end": 5205
} | class ____(object):
"""*
jina gRPC service for DataRequests.
This is used to send requests to Executors when a list of requests is not needed
"""
@staticmethod
def process_single_data(
request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None,
):
return grpc.experimental.unary_unary(
request,
target,
'/jina.JinaSingleDataRequestRPC/process_single_data',
jina__pb2.DataRequestProto.SerializeToString,
jina__pb2.DataRequestProto.FromString,
options,
channel_credentials,
insecure,
call_credentials,
compression,
wait_for_ready,
timeout,
metadata,
)
| JinaSingleDataRequestRPC |
python | Lightning-AI__lightning | src/lightning/fabric/plugins/precision/amp.py | {
"start": 1155,
"end": 5052
} | class ____(Precision):
"""Plugin for Automatic Mixed Precision (AMP) training with ``torch.autocast``.
Args:
precision: Whether to use ``torch.float16`` (``'16-mixed'``) or ``torch.bfloat16`` (``'bf16-mixed'``).
device: The device for ``torch.autocast``.
scaler: An optional :class:`torch.cuda.amp.GradScaler` to use.
"""
def __init__(
self,
precision: Literal["16-mixed", "bf16-mixed"],
device: str,
scaler: Optional["torch.amp.GradScaler"] = None,
) -> None:
if precision not in ("16-mixed", "bf16-mixed"):
raise ValueError(
f"Passed `{type(self).__name__}(precision={precision!r})`."
" Precision must be '16-mixed' or 'bf16-mixed'."
)
self.precision = precision
if scaler is None and self.precision == "16-mixed":
scaler = torch.amp.GradScaler(device=device) if _TORCH_GREATER_EQUAL_2_4 else torch.cuda.amp.GradScaler()
if scaler is not None and self.precision == "bf16-mixed":
raise ValueError(f"`precision='bf16-mixed'` does not use a scaler, found {scaler}.")
self.device = device
self.scaler = scaler
self._desired_input_dtype = torch.bfloat16 if self.precision == "bf16-mixed" else torch.float16
@override
def forward_context(self) -> AbstractContextManager:
return torch.autocast(self.device, dtype=self._desired_input_dtype)
@override
def convert_input(self, data: Any) -> Any:
return apply_to_collection(data, function=_convert_fp_tensor, dtype=Tensor, dst_type=self._desired_input_dtype)
@override
def convert_output(self, data: Any) -> Any:
return apply_to_collection(data, function=_convert_fp_tensor, dtype=Tensor, dst_type=torch.get_default_dtype())
@override
def backward(self, tensor: Tensor, model: Optional[Module], *args: Any, **kwargs: Any) -> None:
if self.scaler is not None:
tensor = self.scaler.scale(tensor)
super().backward(tensor, model, *args, **kwargs)
@override
def optimizer_step(
self,
optimizer: Optimizable,
**kwargs: Any,
) -> Any:
if self.scaler is None:
# skip scaler logic, as bfloat16 does not require scaler
return super().optimizer_step(optimizer, **kwargs)
if isinstance(optimizer, LBFGS):
raise TypeError("AMP and the LBFGS optimizer are not compatible.")
# note: the scaler will skip the `optimizer.step` if nonfinite gradients are found
step_output = self.scaler.step(optimizer, **kwargs) # type: ignore[arg-type]
self.scaler.update()
return step_output
@override
def state_dict(self) -> dict[str, Any]:
if self.scaler is not None:
return self.scaler.state_dict()
return {}
@override
def load_state_dict(self, state_dict: dict[str, Any]) -> None:
if self.scaler is not None:
self.scaler.load_state_dict(state_dict)
@override
def unscale_gradients(self, optimizer: Optimizer) -> None:
scaler = self.scaler
if scaler is not None:
if _optimizer_handles_unscaling(optimizer):
raise NotImplementedError("Gradient clipping is not implemented for optimizers handling the unscaling.")
scaler.unscale_(optimizer)
def _optimizer_handles_unscaling(optimizer: Any) -> bool:
"""Determines whether a PyTorch optimizer handles unscaling gradients in the step method rather than through the
:class:`torch.cuda.amp.GradScaler`.
Since, the current implementation of this function checks a PyTorch internal variable on the optimizer, the return
value will only be reliable for built-in PyTorch optimizers.
"""
return getattr(optimizer, "_step_supports_amp_scaling", False)
| MixedPrecision |
python | dagster-io__dagster | python_modules/libraries/dagster-airbyte/dagster_airbyte/managed/generated/sources.py | {
"start": 104717,
"end": 106018
} | class ____(GeneratedAirbyteSource):
@public
def __init__(
self,
name: str,
username: str,
password: str,
database: str,
account: Optional[str] = None,
host: Optional[str] = None,
engine: Optional[str] = None,
):
"""Airbyte Source for Firebolt.
Documentation can be found at https://docs.airbyte.com/integrations/sources/firebolt
Args:
name (str): The name of the destination.
username (str): Firebolt email address you use to login.
password (str): Firebolt password.
account (Optional[str]): Firebolt account to login.
host (Optional[str]): The host name of your Firebolt database.
database (str): The database to connect to.
engine (Optional[str]): Engine name or url to connect to.
"""
self.username = check.str_param(username, "username")
self.password = check.str_param(password, "password")
self.account = check.opt_str_param(account, "account")
self.host = check.opt_str_param(host, "host")
self.database = check.str_param(database, "database")
self.engine = check.opt_str_param(engine, "engine")
super().__init__("Firebolt", name)
| FireboltSource |
python | jazzband__django-polymorphic | example/orders/models.py | {
"start": 175,
"end": 517
} | class ____(models.Model):
"""
An example order that has polymorphic relations
"""
title = models.CharField(_("Title"), max_length=200)
class Meta:
verbose_name = _("Organisation")
verbose_name_plural = _("Organisations")
ordering = ("title",)
def __str__(self):
return self.title
| Order |
python | dagster-io__dagster | examples/docs_snippets/docs_snippets/concepts/ops_jobs_graphs/unit_tests.py | {
"start": 49,
"end": 99
} | class ____(dg.Config):
num: int = 1
| AddOneConfig |
python | PrefectHQ__prefect | tests/utilities/schema_tools/test_validation.py | {
"start": 14841,
"end": 15998
} | class ____:
@pytest.fixture
def schema(self) -> dict:
return {
"title": "Parameters",
"type": "object",
"properties": {
"param": {"title": "param", "position": 0, "type": "array", "items": {}}
},
"required": ["param"],
}
@pytest.mark.parametrize(
"obj, expected",
[
({"param": [1, 2, 3]}, True),
({"param": "not an array"}, False),
({}, False),
({"param": None}, False),
],
)
def test_is_valid(self, schema, obj, expected):
assert is_valid(obj, schema) == expected
@pytest.mark.parametrize(
"obj, expected_errors",
[
({"param": [1, 2, 3]}, []),
({"param": "not an array"}, ["'not an array' is not of type 'array'"]),
({}, ["'param' is a required property"]),
({"param": None}, ["None is not of type 'array'"]),
],
)
def test_validate(self, schema, obj, expected_errors):
errors = validate(obj, schema)
assert [e.message for e in errors] == expected_errors
| TestArray |
python | fluentpython__example-code-2e | 11-pythonic-obj/vector2d_v3_slots.py | {
"start": 1749,
"end": 3319
} | class ____:
__match_args__ = ('x', 'y') # <1>
__slots__ = ('__x', '__y') # <2>
typecode = 'd'
# end::VECTOR2D_V3_SLOTS[]
def __init__(self, x, y):
self.__x = float(x)
self.__y = float(y)
@property
def x(self):
return self.__x
@property
def y(self):
return self.__y
def __iter__(self):
return (i for i in (self.x, self.y))
def __repr__(self):
class_name = type(self).__name__
return '{}({!r}, {!r})'.format(class_name, *self)
def __str__(self):
return str(tuple(self))
def __bytes__(self):
return (bytes([ord(self.typecode)]) +
bytes(array(self.typecode, self)))
def __eq__(self, other):
return tuple(self) == tuple(other)
def __hash__(self):
return hash((self.x, self.y))
def __abs__(self):
return math.hypot(self.x, self.y)
def __bool__(self):
return bool(abs(self))
def angle(self):
return math.atan2(self.y, self.x)
def __format__(self, fmt_spec=''):
if fmt_spec.endswith('p'):
fmt_spec = fmt_spec[:-1]
coords = (abs(self), self.angle())
outer_fmt = '<{}, {}>'
else:
coords = self
outer_fmt = '({}, {})'
components = (format(c, fmt_spec) for c in coords)
return outer_fmt.format(*components)
@classmethod
def frombytes(cls, octets):
typecode = chr(octets[0])
memv = memoryview(octets[1:]).cast(typecode)
return cls(*memv)
| Vector2d |
python | walkccc__LeetCode | solutions/2134. Minimum Swaps to Group All 1's Together II/2134.py | {
"start": 0,
"end": 415
} | class ____:
def minSwaps(self, nums: list[int]) -> int:
n = len(nums)
k = nums.count(1)
ones = 0 # the number of ones in the window
maxOnes = 0 # the maximum number of ones in the window
for i in range(n * 2):
if i >= k and nums[i % n - k]: # Magic in Python :)
ones -= 1
if nums[i % n]:
ones += 1
maxOnes = max(maxOnes, ones)
return k - maxOnes
| Solution |
python | doocs__leetcode | solution/2300-2399/2341.Maximum Number of Pairs in Array/Solution.py | {
"start": 0,
"end": 187
} | class ____:
def numberOfPairs(self, nums: List[int]) -> List[int]:
cnt = Counter(nums)
s = sum(v // 2 for v in cnt.values())
return [s, len(nums) - s * 2]
| Solution |
python | langchain-ai__langchain | libs/partners/huggingface/langchain_huggingface/chat_models/huggingface.py | {
"start": 10862,
"end": 44566
} | class ____(BaseChatModel):
r"""Hugging Face LLM's as ChatModels.
Works with `HuggingFaceTextGenInference`, `HuggingFaceEndpoint`,
`HuggingFaceHub`, and `HuggingFacePipeline` LLMs.
Upon instantiating this class, the model_id is resolved from the url
provided to the LLM, and the appropriate tokenizer is loaded from
the HuggingFace Hub.
Setup:
Install `langchain-huggingface` and ensure your Hugging Face token
is saved.
```bash
pip install langchain-huggingface
```
```python
from huggingface_hub import login
login() # You will be prompted for your HF key, which will then be saved locally
```
Key init args — completion params:
llm:
LLM to be used.
Key init args — client params:
custom_get_token_ids:
Optional encoder to use for counting tokens.
metadata:
Metadata to add to the run trace.
tags:
Tags to add to the run trace.
verbose:
Whether to print out response text.
See full list of supported init args and their descriptions in the params
section.
Instantiate:
```python
from langchain_huggingface import HuggingFaceEndpoint,
ChatHuggingFace
model = HuggingFaceEndpoint(
repo_id="microsoft/Phi-3-mini-4k-instruct",
task="text-generation",
max_new_tokens=512,
do_sample=False,
repetition_penalty=1.03,
)
chat = ChatHuggingFace(llm=model, verbose=True)
```
Invoke:
```python
messages = [
("system", "You are a helpful translator. Translate the user
sentence to French."),
("human", "I love programming."),
]
chat(...).invoke(messages)
```
```python
AIMessage(content='Je ai une passion pour le programme.\n\nIn
French, we use "ai" for masculine subjects and "a" for feminine
subjects. Since "programming" is gender-neutral in English, we
will go with the masculine "programme".\n\nConfirmation: "J\'aime
le programme." is more commonly used. The sentence above is
technically accurate, but less commonly used in spoken French as
"ai" is used less frequently in everyday speech.',
response_metadata={'token_usage': ChatCompletionOutputUsage
(completion_tokens=100, prompt_tokens=55, total_tokens=155),
'model': '', 'finish_reason': 'length'},
id='run-874c24b7-0272-4c99-b259-5d6d7facbc56-0')
```
Stream:
```python
for chunk in chat.stream(messages):
print(chunk)
```
```python
content='Je ai une passion pour le programme.\n\nIn French, we use
"ai" for masculine subjects and "a" for feminine subjects.
Since "programming" is gender-neutral in English,
we will go with the masculine "programme".\n\nConfirmation:
"J\'aime le programme." is more commonly used. The sentence
above is technically accurate, but less commonly used in spoken
French as "ai" is used less frequently in everyday speech.'
response_metadata={'token_usage': ChatCompletionOutputUsage
(completion_tokens=100, prompt_tokens=55, total_tokens=155),
'model': '', 'finish_reason': 'length'}
id='run-7d7b1967-9612-4f9a-911a-b2b5ca85046a-0'
```
Async:
```python
await chat.ainvoke(messages)
```
```python
AIMessage(content='Je déaime le programming.\n\nLittérale : Je
(j\'aime) déaime (le) programming.\n\nNote: "Programming" in
French is "programmation". But here, I used "programming" instead
of "programmation" because the user said "I love programming"
instead of "I love programming (in French)", which would be
"J\'aime la programmation". By translating the sentence
literally, I preserved the original meaning of the user\'s
sentence.', id='run-fd850318-e299-4735-b4c6-3496dc930b1d-0')
```
Tool calling:
```python
from pydantic import BaseModel, Field
class GetWeather(BaseModel):
'''Get the current weather in a given location'''
location: str = Field(..., description="The city and state,
e.g. San Francisco, CA")
class GetPopulation(BaseModel):
'''Get the current population in a given location'''
location: str = Field(..., description="The city and state,
e.g. San Francisco, CA")
chat_with_tools = chat.bind_tools([GetWeather, GetPopulation])
ai_msg = chat_with_tools.invoke("Which city is hotter today and
which is bigger: LA or NY?")
ai_msg.tool_calls
```
```python
[
{
"name": "GetPopulation",
"args": {"location": "Los Angeles, CA"},
"id": "0",
}
]
```
Response metadata
```python
ai_msg = chat.invoke(messages)
ai_msg.response_metadata
```
```python
{
"token_usage": ChatCompletionOutputUsage(
completion_tokens=100, prompt_tokens=8, total_tokens=108
),
"model": "",
"finish_reason": "length",
}
```
""" # noqa: E501
llm: Any
"""LLM, must be of type HuggingFaceTextGenInference, HuggingFaceEndpoint,
HuggingFaceHub, or HuggingFacePipeline."""
tokenizer: Any = None
"""Tokenizer for the model. Only used for HuggingFacePipeline."""
model_id: str | None = None
"""Model ID for the model. Only used for HuggingFaceEndpoint."""
temperature: float | None = None
"""What sampling temperature to use."""
stop: str | list[str] | None = Field(default=None, alias="stop_sequences")
"""Default stop sequences."""
presence_penalty: float | None = None
"""Penalizes repeated tokens."""
frequency_penalty: float | None = None
"""Penalizes repeated tokens according to frequency."""
seed: int | None = None
"""Seed for generation"""
logprobs: bool | None = None
"""Whether to return logprobs."""
top_logprobs: int | None = None
"""Number of most likely tokens to return at each token position, each with
an associated log probability. `logprobs` must be set to true
if this parameter is used."""
logit_bias: dict[int, int] | None = None
"""Modify the likelihood of specified tokens appearing in the completion."""
streaming: bool = False
"""Whether to stream the results or not."""
stream_usage: bool | None = None
"""Whether to include usage metadata in streaming output. If True, an additional
message chunk will be generated during the stream including usage metadata."""
n: int | None = None
"""Number of chat completions to generate for each prompt."""
top_p: float | None = None
"""Total probability mass of tokens to consider at each step."""
max_tokens: int | None = None
"""Maximum number of tokens to generate."""
model_kwargs: dict[str, Any] = Field(default_factory=dict)
"""Holds any model parameters valid for `create` call not explicitly specified."""
def __init__(self, **kwargs: Any):
super().__init__(**kwargs)
# Inherit properties from the LLM if they weren't explicitly set
self._inherit_llm_properties()
self._resolve_model_id()
def _inherit_llm_properties(self) -> None:
"""Inherit properties from the wrapped LLM instance if not explicitly set."""
if not hasattr(self, "llm") or self.llm is None:
return
# Map of ChatHuggingFace properties to LLM properties
property_mappings = {
"temperature": "temperature",
"max_tokens": "max_new_tokens", # Different naming convention
"top_p": "top_p",
"seed": "seed",
"streaming": "streaming",
"stop": "stop_sequences",
}
# Inherit properties from LLM and not explicitly set here
for chat_prop, llm_prop in property_mappings.items():
if hasattr(self.llm, llm_prop):
llm_value = getattr(self.llm, llm_prop)
chat_value = getattr(self, chat_prop, None)
if not chat_value and llm_value:
setattr(self, chat_prop, llm_value)
# Handle special cases for HuggingFaceEndpoint
if _is_huggingface_endpoint(self.llm):
# Inherit additional HuggingFaceEndpoint specific properties
endpoint_mappings = {
"frequency_penalty": "repetition_penalty",
}
for chat_prop, llm_prop in endpoint_mappings.items():
if hasattr(self.llm, llm_prop):
llm_value = getattr(self.llm, llm_prop)
chat_value = getattr(self, chat_prop, None)
if chat_value is None and llm_value is not None:
setattr(self, chat_prop, llm_value)
# Inherit model_kwargs if not explicitly set
if (
not self.model_kwargs
and hasattr(self.llm, "model_kwargs")
and isinstance(self.llm.model_kwargs, dict)
):
self.model_kwargs = self.llm.model_kwargs.copy()
@model_validator(mode="after")
def validate_llm(self) -> Self:
if (
not _is_huggingface_hub(self.llm)
and not _is_huggingface_textgen_inference(self.llm)
and not _is_huggingface_endpoint(self.llm)
and not _is_huggingface_pipeline(self.llm)
):
msg = (
"Expected llm to be one of HuggingFaceTextGenInference, "
"HuggingFaceEndpoint, HuggingFaceHub, HuggingFacePipeline "
f"received {type(self.llm)}"
)
raise TypeError(msg)
return self
@model_validator(mode="after")
def _set_model_profile(self) -> Self:
"""Set model profile if not overridden."""
if self.profile is None and self.model_id:
self.profile = _get_default_model_profile(self.model_id)
return self
def _create_chat_result(self, response: dict) -> ChatResult:
generations = []
token_usage = response.get("usage", {})
for res in response["choices"]:
message = _convert_dict_to_message(res["message"])
if token_usage and isinstance(message, AIMessage):
message.usage_metadata = {
"input_tokens": token_usage.get("prompt_tokens", 0),
"output_tokens": token_usage.get("completion_tokens", 0),
"total_tokens": token_usage.get("total_tokens", 0),
}
generation_info = {"finish_reason": res.get("finish_reason")}
if "logprobs" in res:
generation_info["logprobs"] = res["logprobs"]
gen = ChatGeneration(
message=message,
generation_info=generation_info,
)
generations.append(gen)
llm_output = {
"token_usage": token_usage,
"model_name": self.model_id,
"system_fingerprint": response.get("system_fingerprint", ""),
}
return ChatResult(generations=generations, llm_output=llm_output)
def _generate(
self,
messages: list[BaseMessage],
stop: list[str] | None = None,
run_manager: CallbackManagerForLLMRun | None = None,
stream: bool | None = None, # noqa: FBT001
**kwargs: Any,
) -> ChatResult:
should_stream = stream if stream is not None else self.streaming
if _is_huggingface_textgen_inference(self.llm):
message_dicts, params = self._create_message_dicts(messages, stop)
answer = self.llm.client.chat(messages=message_dicts, **kwargs)
return self._create_chat_result(answer)
if _is_huggingface_endpoint(self.llm):
if should_stream:
stream_iter = self._stream(
messages, stop=stop, run_manager=run_manager, **kwargs
)
return generate_from_stream(stream_iter)
message_dicts, params = self._create_message_dicts(messages, stop)
params = {
"stop": stop,
**params,
**({"stream": stream} if stream is not None else {}),
**kwargs,
}
answer = self.llm.client.chat_completion(messages=message_dicts, **params)
return self._create_chat_result(answer)
llm_input = self._to_chat_prompt(messages)
if should_stream:
stream_iter = self.llm._stream(
llm_input, stop=stop, run_manager=run_manager, **kwargs
)
return generate_from_stream(stream_iter)
llm_result = self.llm._generate(
prompts=[llm_input], stop=stop, run_manager=run_manager, **kwargs
)
return self._to_chat_result(llm_result)
async def _agenerate(
self,
messages: list[BaseMessage],
stop: list[str] | None = None,
run_manager: AsyncCallbackManagerForLLMRun | None = None,
stream: bool | None = None, # noqa: FBT001
**kwargs: Any,
) -> ChatResult:
if _is_huggingface_textgen_inference(self.llm):
message_dicts, params = self._create_message_dicts(messages, stop)
answer = await self.llm.async_client.chat(messages=message_dicts, **kwargs)
return self._create_chat_result(answer)
if _is_huggingface_endpoint(self.llm):
should_stream = stream if stream is not None else self.streaming
if should_stream:
stream_iter = self._astream(
messages, stop=stop, run_manager=run_manager, **kwargs
)
return await agenerate_from_stream(stream_iter)
message_dicts, params = self._create_message_dicts(messages, stop)
params = {
**params,
**({"stream": stream} if stream is not None else {}),
**kwargs,
}
answer = await self.llm.async_client.chat_completion(
messages=message_dicts, **params
)
return self._create_chat_result(answer)
if _is_huggingface_pipeline(self.llm):
msg = "async generation is not supported with HuggingFacePipeline"
raise NotImplementedError(msg)
llm_input = self._to_chat_prompt(messages)
llm_result = await self.llm._agenerate(
prompts=[llm_input], stop=stop, run_manager=run_manager, **kwargs
)
return self._to_chat_result(llm_result)
def _should_stream_usage(
self, *, stream_usage: bool | None = None, **kwargs: Any
) -> bool | None:
"""Determine whether to include usage metadata in streaming output.
For backwards compatibility, we check for `stream_options` passed
explicitly to kwargs or in the model_kwargs and override self.stream_usage.
"""
stream_usage_sources = [ # order of precedence
stream_usage,
kwargs.get("stream_options", {}).get("include_usage"),
self.model_kwargs.get("stream_options", {}).get("include_usage"),
self.stream_usage,
]
for source in stream_usage_sources:
if isinstance(source, bool):
return source
return self.stream_usage
def _stream(
self,
messages: list[BaseMessage],
stop: list[str] | None = None,
run_manager: CallbackManagerForLLMRun | None = None,
*,
stream_usage: bool | None = None,
**kwargs: Any,
) -> Iterator[ChatGenerationChunk]:
if _is_huggingface_endpoint(self.llm):
stream_usage = self._should_stream_usage(
stream_usage=stream_usage, **kwargs
)
if stream_usage:
kwargs["stream_options"] = {"include_usage": stream_usage}
message_dicts, params = self._create_message_dicts(messages, stop)
params = {**params, **kwargs, "stream": True}
default_chunk_class: type[BaseMessageChunk] = AIMessageChunk
for chunk in self.llm.client.chat_completion(
messages=message_dicts, **params
):
if len(chunk["choices"]) == 0:
if usage := chunk.get("usage"):
usage_msg = AIMessageChunk(
content="",
additional_kwargs={},
response_metadata={},
usage_metadata={
"input_tokens": usage.get("prompt_tokens", 0),
"output_tokens": usage.get("completion_tokens", 0),
"total_tokens": usage.get("total_tokens", 0),
},
)
yield ChatGenerationChunk(message=usage_msg)
continue
choice = chunk["choices"][0]
message_chunk = _convert_chunk_to_message_chunk(
chunk, default_chunk_class
)
generation_info = {}
if finish_reason := choice.get("finish_reason"):
generation_info["finish_reason"] = finish_reason
generation_info["model_name"] = self.model_id
logprobs = choice.get("logprobs")
if logprobs:
generation_info["logprobs"] = logprobs
default_chunk_class = message_chunk.__class__
generation_chunk = ChatGenerationChunk(
message=message_chunk, generation_info=generation_info or None
)
if run_manager:
run_manager.on_llm_new_token(
generation_chunk.text, chunk=generation_chunk, logprobs=logprobs
)
yield generation_chunk
else:
llm_input = self._to_chat_prompt(messages)
stream_iter = self.llm._stream(
llm_input, stop=stop, run_manager=run_manager, **kwargs
)
for chunk in stream_iter: # chunk is a GenerationChunk
chat_chunk = ChatGenerationChunk(
message=AIMessageChunk(content=chunk.text),
generation_info=chunk.generation_info,
)
yield chat_chunk
async def _astream(
self,
messages: list[BaseMessage],
stop: list[str] | None = None,
run_manager: AsyncCallbackManagerForLLMRun | None = None,
*,
stream_usage: bool | None = None,
**kwargs: Any,
) -> AsyncIterator[ChatGenerationChunk]:
stream_usage = self._should_stream_usage(stream_usage=stream_usage, **kwargs)
if stream_usage:
kwargs["stream_options"] = {"include_usage": stream_usage}
message_dicts, params = self._create_message_dicts(messages, stop)
params = {**params, **kwargs, "stream": True}
default_chunk_class: type[BaseMessageChunk] = AIMessageChunk
async for chunk in await self.llm.async_client.chat_completion(
messages=message_dicts, **params
):
if len(chunk["choices"]) == 0:
if usage := chunk.get("usage"):
usage_msg = AIMessageChunk(
content="",
additional_kwargs={},
response_metadata={},
usage_metadata={
"input_tokens": usage.get("prompt_tokens", 0),
"output_tokens": usage.get("completion_tokens", 0),
"total_tokens": usage.get("total_tokens", 0),
},
)
yield ChatGenerationChunk(message=usage_msg)
continue
choice = chunk["choices"][0]
message_chunk = _convert_chunk_to_message_chunk(chunk, default_chunk_class)
generation_info = {}
if finish_reason := choice.get("finish_reason"):
generation_info["finish_reason"] = finish_reason
generation_info["model_name"] = self.model_id
logprobs = choice.get("logprobs")
if logprobs:
generation_info["logprobs"] = logprobs
default_chunk_class = message_chunk.__class__
generation_chunk = ChatGenerationChunk(
message=message_chunk, generation_info=generation_info or None
)
if run_manager:
await run_manager.on_llm_new_token(
token=generation_chunk.text,
chunk=generation_chunk,
logprobs=logprobs,
)
yield generation_chunk
def _to_chat_prompt(
self,
messages: list[BaseMessage],
) -> str:
"""Convert a list of messages into a prompt format expected by wrapped LLM."""
if not messages:
msg = "At least one HumanMessage must be provided!"
raise ValueError(msg)
if not isinstance(messages[-1], HumanMessage):
msg = "Last message must be a HumanMessage!"
raise ValueError(msg)
messages_dicts = [self._to_chatml_format(m) for m in messages]
return self.tokenizer.apply_chat_template(
messages_dicts, tokenize=False, add_generation_prompt=True
)
def _to_chatml_format(self, message: BaseMessage) -> dict:
"""Convert LangChain message to ChatML format."""
if isinstance(message, SystemMessage):
role = "system"
elif isinstance(message, AIMessage):
role = "assistant"
elif isinstance(message, HumanMessage):
role = "user"
else:
msg = f"Unknown message type: {type(message)}"
raise ValueError(msg)
return {"role": role, "content": message.content}
@staticmethod
def _to_chat_result(llm_result: LLMResult) -> ChatResult:
chat_generations = []
for g in llm_result.generations[0]:
chat_generation = ChatGeneration(
message=AIMessage(content=g.text), generation_info=g.generation_info
)
chat_generations.append(chat_generation)
return ChatResult(
generations=chat_generations, llm_output=llm_result.llm_output
)
def _resolve_model_id(self) -> None:
"""Resolve the model_id from the LLM's inference_server_url."""
from huggingface_hub import list_inference_endpoints # type: ignore[import]
if _is_huggingface_hub(self.llm) or (
hasattr(self.llm, "repo_id") and self.llm.repo_id
):
self.model_id = self.llm.repo_id
return
if _is_huggingface_textgen_inference(self.llm):
endpoint_url: str | None = self.llm.inference_server_url
if _is_huggingface_pipeline(self.llm):
from transformers import AutoTokenizer # type: ignore[import]
self.model_id = self.model_id or self.llm.model_id
self.tokenizer = (
AutoTokenizer.from_pretrained(self.model_id)
if self.tokenizer is None
else self.tokenizer
)
return
if _is_huggingface_endpoint(self.llm):
self.model_id = self.llm.repo_id or self.llm.model
return
endpoint_url = self.llm.endpoint_url
available_endpoints = list_inference_endpoints("*")
for endpoint in available_endpoints:
if endpoint.url == endpoint_url:
self.model_id = endpoint.repository
if not self.model_id:
msg = (
"Failed to resolve model_id:"
f"Could not find model id for inference server: {endpoint_url}"
"Make sure that your Hugging Face token has access to the endpoint."
)
raise ValueError(msg)
def bind_tools(
self,
tools: Sequence[dict[str, Any] | type | Callable | BaseTool],
*,
tool_choice: dict | str | bool | None = None,
**kwargs: Any,
) -> Runnable[LanguageModelInput, AIMessage]:
"""Bind tool-like objects to this chat model.
Assumes model is compatible with OpenAI tool-calling API.
Args:
tools: A list of tool definitions to bind to this chat model.
Supports any tool definition handled by
`langchain_core.utils.function_calling.convert_to_openai_tool`.
tool_choice: Which tool to require the model to call.
Must be the name of the single provided function or
`'auto'` to automatically determine which function to call
(if any), or a dict of the form:
{"type": "function", "function": {"name": <<tool_name>>}}.
**kwargs: Any additional parameters to pass to the
`langchain.runnable.Runnable` constructor.
"""
formatted_tools = [convert_to_openai_tool(tool) for tool in tools]
if tool_choice is not None and tool_choice:
if len(formatted_tools) != 1:
msg = (
"When specifying `tool_choice`, you must provide exactly one "
f"tool. Received {len(formatted_tools)} tools."
)
raise ValueError(msg)
if isinstance(tool_choice, str):
if tool_choice not in ("auto", "none", "required"):
tool_choice = {
"type": "function",
"function": {"name": tool_choice},
}
elif isinstance(tool_choice, bool):
tool_choice = formatted_tools[0]
elif isinstance(tool_choice, dict):
if (
formatted_tools[0]["function"]["name"]
!= tool_choice["function"]["name"]
):
msg = (
f"Tool choice {tool_choice} was specified, but the only "
f"provided tool was {formatted_tools[0]['function']['name']}."
)
raise ValueError(msg)
else:
msg = (
f"Unrecognized tool_choice type. Expected str, bool or dict. "
f"Received: {tool_choice}"
)
raise ValueError(msg)
kwargs["tool_choice"] = tool_choice
return super().bind(tools=formatted_tools, **kwargs)
def with_structured_output(
self,
schema: dict | type[BaseModel] | None = None,
*,
method: Literal[
"function_calling", "json_mode", "json_schema"
] = "function_calling",
include_raw: bool = False,
**kwargs: Any,
) -> Runnable[LanguageModelInput, dict | BaseModel]:
"""Model wrapper that returns outputs formatted to match the given schema.
Args:
schema: The output schema. Can be passed in as:
- An OpenAI function/tool schema,
- A JSON Schema,
- A `TypedDict` class
Pydantic class is currently supported.
method: The method for steering model generation, one of:
- `'function_calling'`: uses tool-calling features.
- `'json_schema'`: uses dedicated structured output features.
- `'json_mode'`: uses JSON mode.
include_raw:
If `False` then only the parsed structured output is returned.
If an error occurs during model output parsing it will be raised.
If `True` then both the raw model response (a `BaseMessage`) and the
parsed model response will be returned.
If an error occurs during output parsing it will be caught and returned
as well.
The final output is always a `dict` with keys `'raw'`, `'parsed'`, and
`'parsing_error'`.
kwargs:
Additional parameters to pass to the underlying LLM's
`langchain_core.language_models.chat.BaseChatModel.bind`
method, such as `response_format` or `ls_structured_output_format`.
Returns:
A `Runnable` that takes same inputs as a
`langchain_core.language_models.chat.BaseChatModel`. If `include_raw` is
`False` and `schema` is a Pydantic class, `Runnable` outputs an instance
of `schema` (i.e., a Pydantic object). Otherwise, if `include_raw` is
`False` then `Runnable` outputs a `dict`.
If `include_raw` is `True`, then `Runnable` outputs a `dict` with keys:
- `'raw'`: `BaseMessage`
- `'parsed'`: `None` if there was a parsing error, otherwise the type
depends on the `schema` as described above.
- `'parsing_error'`: `BaseException | None`
"""
_ = kwargs.pop("strict", None)
if kwargs:
msg = f"Received unsupported arguments {kwargs}"
raise ValueError(msg)
is_pydantic_schema = isinstance(schema, type) and is_basemodel_subclass(schema)
if method == "function_calling":
if schema is None:
msg = (
"schema must be specified when method is 'function_calling'. "
"Received None."
)
raise ValueError(msg)
formatted_tool = convert_to_openai_tool(schema)
tool_name = formatted_tool["function"]["name"]
llm = self.bind_tools(
[schema],
tool_choice=tool_name,
ls_structured_output_format={
"kwargs": {"method": "function_calling"},
"schema": formatted_tool,
},
)
if is_pydantic_schema:
msg = "Pydantic schema is not supported for function calling"
raise NotImplementedError(msg)
output_parser: JsonOutputKeyToolsParser | JsonOutputParser = (
JsonOutputKeyToolsParser(key_name=tool_name, first_tool_only=True)
)
elif method == "json_schema":
if schema is None:
msg = (
"schema must be specified when method is 'json_schema'. "
"Received None."
)
raise ValueError(msg)
formatted_schema = convert_to_json_schema(schema)
llm = self.bind(
response_format={"type": "json_object", "schema": formatted_schema},
ls_structured_output_format={
"kwargs": {"method": "json_schema"},
"schema": schema,
},
)
output_parser = JsonOutputParser() # type: ignore[arg-type]
elif method == "json_mode":
llm = self.bind(
response_format={"type": "json_object"},
ls_structured_output_format={
"kwargs": {"method": "json_mode"},
"schema": schema,
},
)
output_parser = JsonOutputParser() # type: ignore[arg-type]
else:
msg = (
f"Unrecognized method argument. Expected one of 'function_calling' or "
f"'json_mode'. Received: '{method}'"
)
raise ValueError(msg)
if include_raw:
parser_assign = RunnablePassthrough.assign(
parsed=itemgetter("raw") | output_parser, parsing_error=lambda _: None
)
parser_none = RunnablePassthrough.assign(parsed=lambda _: None)
parser_with_fallback = parser_assign.with_fallbacks(
[parser_none], exception_key="parsing_error"
)
return RunnableMap(raw=llm) | parser_with_fallback
return llm | output_parser
def _create_message_dicts(
self, messages: list[BaseMessage], stop: list[str] | None
) -> tuple[list[dict[str, Any]], dict[str, Any]]:
params = self._default_params
if stop is not None:
params["stop"] = stop
message_dicts = [_convert_message_to_dict(m) for m in messages]
return message_dicts, params
@property
def _default_params(self) -> dict[str, Any]:
"""Get default parameters for calling Hugging Face Inference Providers API."""
params = {
"model": self.model_id,
"stream": self.streaming,
"n": self.n,
"temperature": self.temperature,
"stop": self.stop,
**(self.model_kwargs if self.model_kwargs else {}),
}
if self.max_tokens is not None:
params["max_tokens"] = self.max_tokens
return params
@property
def _llm_type(self) -> str:
return "huggingface-chat-wrapper"
| ChatHuggingFace |
python | celery__celery | t/unit/tasks/test_canvas.py | {
"start": 12956,
"end": 29452
} | class ____(CanvasCase):
def test_chain_of_chain_with_a_single_task(self):
s = self.add.s(1, 1)
assert chain([chain(s)]).tasks == list(chain(s).tasks)
@pytest.mark.parametrize("chain_type", (_chain, chain_subclass))
def test_clone_preserves_state(self, chain_type):
x = chain_type(self.add.s(i, i) for i in range(10))
assert x.clone().tasks == x.tasks
assert x.clone().kwargs == x.kwargs
assert x.clone().args == x.args
assert isinstance(x.clone(), chain_type)
def test_repr(self):
x = self.add.s(2, 2) | self.add.s(2)
assert repr(x) == f'{self.add.name}(2, 2) | add(2)'
def test_apply_async(self):
c = self.add.s(2, 2) | self.add.s(4) | self.add.s(8)
result = c.apply_async()
assert result.parent
assert result.parent.parent
assert result.parent.parent.parent is None
@pytest.mark.parametrize("chain_type", (_chain, chain_subclass))
def test_splices_chains(self, chain_type):
c = chain_type(
self.add.s(5, 5),
chain_type(self.add.s(6), self.add.s(7), self.add.s(8), app=self.app),
app=self.app,
)
c.freeze()
tasks, _ = c._frozen
assert len(tasks) == 4
assert isinstance(c, chain_type)
@pytest.mark.parametrize("chain_type", [_chain, chain_subclass])
def test_from_dict_no_tasks(self, chain_type):
assert chain_type.from_dict(dict(chain_type(app=self.app)), app=self.app)
assert isinstance(chain_type.from_dict(dict(chain_type(app=self.app)), app=self.app), chain_type)
@pytest.mark.parametrize("chain_type", [_chain, chain_subclass])
def test_from_dict_full_subtasks(self, chain_type):
c = chain_type(self.add.si(1, 2), self.add.si(3, 4), self.add.si(5, 6))
serialized = json.loads(json.dumps(c))
deserialized = chain_type.from_dict(serialized)
assert all(isinstance(task, Signature) for task in deserialized.tasks)
assert isinstance(deserialized, chain_type)
@pytest.mark.usefixtures('depends_on_current_app')
def test_app_falls_back_to_default(self):
from celery._state import current_app
assert chain().app is current_app
def test_handles_dicts(self):
c = chain(
self.add.s(5, 5), dict(self.add.s(8)), app=self.app,
)
c.freeze()
tasks, _ = c._frozen
assert all(isinstance(task, Signature) for task in tasks)
assert all(task.app is self.app for task in tasks)
def test_groups_in_chain_to_chord(self):
g1 = group([self.add.s(2, 2), self.add.s(4, 4)])
g2 = group([self.add.s(3, 3), self.add.s(5, 5)])
c = g1 | g2
assert isinstance(c, chord)
def test_prepare_steps_set_last_task_id_to_chain(self):
last_task = self.add.s(2).set(task_id='42')
c = self.add.s(4) | last_task
assert c.id is None
tasks, _ = c.prepare_steps((), {}, c.tasks, last_task_id=last_task.id)
assert c.id == last_task.id
def test_group_to_chord(self):
c = (
self.add.s(5) |
group([self.add.s(i, i) for i in range(5)], app=self.app) |
self.add.s(10) |
self.add.s(20) |
self.add.s(30)
)
c._use_link = True
tasks, results = c.prepare_steps((), {}, c.tasks)
assert tasks[-1].args[0] == 5
assert isinstance(tasks[-2], chord)
assert len(tasks[-2].tasks) == 5
body = tasks[-2].body
assert len(body.tasks) == 3
assert body.tasks[0].args[0] == 10
assert body.tasks[1].args[0] == 20
assert body.tasks[2].args[0] == 30
c2 = self.add.s(2, 2) | group(self.add.s(i, i) for i in range(10))
c2._use_link = True
tasks2, _ = c2.prepare_steps((), {}, c2.tasks)
assert isinstance(tasks2[0], group)
def test_group_to_chord__protocol_2__or(self):
c = (
group([self.add.s(i, i) for i in range(5)], app=self.app) |
self.add.s(10) |
self.add.s(20) |
self.add.s(30)
)
assert isinstance(c, chord)
def test_group_to_chord__protocol_2(self):
c = chain(
group([self.add.s(i, i) for i in range(5)], app=self.app),
self.add.s(10),
self.add.s(20),
self.add.s(30)
)
assert isinstance(c, chord)
assert isinstance(c.body, _chain)
assert len(c.body.tasks) == 3
c2 = self.add.s(2, 2) | group(self.add.s(i, i) for i in range(10))
c2._use_link = False
tasks2, _ = c2.prepare_steps((), {}, c2.tasks)
assert isinstance(tasks2[0], group)
def test_chord_to_chain(self):
c = (
chord([self.add.s('x0', 'y0'), self.add.s('x1', 'y1')],
self.add.s(['foo'])) |
chain(self.add.s(['y']), self.add.s(['z']))
)
assert isinstance(c, _chain)
assert c.apply().get() == ['x0y0', 'x1y1', 'foo', 'y', 'z']
def test_chord_to_group(self):
c = (
chord([self.add.s('x0', 'y0'), self.add.s('x1', 'y1')],
self.add.s(['foo'])) |
group([self.add.s(['y']), self.add.s(['z'])])
)
assert isinstance(c, _chain)
assert c.apply().get() == [
['x0y0', 'x1y1', 'foo', 'y'],
['x0y0', 'x1y1', 'foo', 'z']
]
def test_chain_of_chord__or__group_of_single_task(self):
c = chord([signature('header')], signature('body'))
c = chain(c)
g = group(signature('t'))
new_chain = c | g # g should be chained with the body of c[0]
assert isinstance(new_chain, _chain)
assert isinstance(new_chain.tasks[0].body, _chain)
def test_chain_of_chord_upgrade_on_chaining(self):
c = chord([signature('header')], group(signature('body')))
c = chain(c)
t = signature('t')
new_chain = c | t # t should be chained with the body of c[0] and create a new chord
assert isinstance(new_chain, _chain)
assert isinstance(new_chain.tasks[0].body, chord)
@pytest.mark.parametrize(
"group_last_task",
[False, True],
)
def test_chain_of_chord_upgrade_on_chaining__protocol_2(
self, group_last_task):
c = chain(
group([self.add.s(i, i) for i in range(5)], app=self.app),
group([self.add.s(i, i) for i in range(10, 15)], app=self.app),
group([self.add.s(i, i) for i in range(20, 25)], app=self.app),
self.add.s(30) if not group_last_task else group(self.add.s(30),
app=self.app))
assert isinstance(c, _chain)
assert len(
c.tasks
) == 1, "Consecutive chords should be further upgraded to a single chord."
assert isinstance(c.tasks[0], chord)
def test_chain_of_chord_upgrade_on_chaining__protocol_3(self):
c = chain(
chain([self.add.s(i, i) for i in range(5)]),
group([self.add.s(i, i) for i in range(10, 15)], app=self.app),
chord([signature('header')], signature('body'), app=self.app),
group([self.add.s(i, i) for i in range(20, 25)], app=self.app))
assert isinstance(c, _chain)
assert isinstance(
c.tasks[-1], chord
), "Chord followed by a group should be upgraded to a single chord with chained body."
assert len(c.tasks) == 6
def test_apply_options(self):
class static(Signature):
def clone(self, *args, **kwargs):
return self
def s(*args, **kwargs):
return static(self.add, args, kwargs, type=self.add, app=self.app)
c = s(2, 2) | s(4) | s(8)
r1 = c.apply_async(task_id='some_id')
assert r1.id == 'some_id'
c.apply_async(group_id='some_group_id')
assert c.tasks[-1].options['group_id'] == 'some_group_id'
c.apply_async(chord='some_chord_id')
assert c.tasks[-1].options['chord'] == 'some_chord_id'
c.apply_async(link=[s(32)])
assert c.tasks[-1].options['link'] == [s(32)]
c.apply_async(link_error=[s('error')])
for task in c.tasks:
assert task.options['link_error'] == [s('error')]
def test_apply_options_none(self):
class static(Signature):
def clone(self, *args, **kwargs):
return self
def _apply_async(self, *args, **kwargs):
self.args = args
self.kwargs = kwargs
c = static(self.add, (2, 2), type=self.add, app=self.app, priority=5)
c.apply_async(priority=4)
assert c.kwargs['priority'] == 4
c.apply_async(priority=None)
assert c.kwargs['priority'] == 5
def test_reverse(self):
x = self.add.s(2, 2) | self.add.s(2)
assert isinstance(signature(x), _chain)
assert isinstance(signature(dict(x)), _chain)
def test_always_eager(self):
self.app.conf.task_always_eager = True
assert ~(self.add.s(4, 4) | self.add.s(8)) == 16
def test_chain_always_eager(self):
self.app.conf.task_always_eager = True
from celery import _state, result
fixture_task_join_will_block = _state.task_join_will_block
try:
_state.task_join_will_block = _state.orig_task_join_will_block
result.task_join_will_block = _state.orig_task_join_will_block
@self.app.task(shared=False)
def chain_add():
return (self.add.s(4, 4) | self.add.s(8)).apply_async()
r = chain_add.apply_async(throw=True).get()
assert r.get() == 16
finally:
_state.task_join_will_block = fixture_task_join_will_block
result.task_join_will_block = fixture_task_join_will_block
def test_apply(self):
x = chain(self.add.s(4, 4), self.add.s(8), self.add.s(10))
res = x.apply()
assert isinstance(res, EagerResult)
assert res.get() == 26
assert res.parent.get() == 16
assert res.parent.parent.get() == 8
assert res.parent.parent.parent is None
def test_kwargs_apply(self):
x = chain(self.add.s(), self.add.s(8), self.add.s(10))
res = x.apply(kwargs={'x': 1, 'y': 1}).get()
assert res == 20
def test_single_expresion(self):
x = chain(self.add.s(1, 2)).apply()
assert x.get() == 3
assert x.parent is None
def test_empty_chain_returns_none(self):
assert chain(app=self.app)() is None
assert chain(app=self.app).apply_async() is None
def test_call_no_tasks(self):
x = chain()
assert not x()
def test_call_with_tasks(self):
x = self.add.s(2, 2) | self.add.s(4)
x.apply_async = Mock()
x(2, 2, foo=1)
x.apply_async.assert_called_with((2, 2), {'foo': 1})
def test_from_dict_no_args__with_args(self):
x = dict(self.add.s(2, 2) | self.add.s(4))
x['args'] = None
assert isinstance(chain.from_dict(x), _chain)
x['args'] = (2,)
assert isinstance(chain.from_dict(x), _chain)
def test_accepts_generator_argument(self):
x = chain(self.add.s(i) for i in range(10))
assert x.tasks[0].type, self.add
assert x.type
def test_chord_sets_result_parent(self):
g = (self.add.s(0, 0) |
group(self.add.s(i, i) for i in range(1, 10)) |
self.add.s(2, 2) |
self.add.s(4, 4))
res = g.freeze()
assert isinstance(res, AsyncResult)
assert not isinstance(res, GroupResult)
assert isinstance(res.parent, AsyncResult)
assert not isinstance(res.parent, GroupResult)
assert isinstance(res.parent.parent, GroupResult)
assert isinstance(res.parent.parent.parent, AsyncResult)
assert not isinstance(res.parent.parent.parent, GroupResult)
assert res.parent.parent.parent.parent is None
seen = set()
node = res
while node:
assert node.id not in seen
seen.add(node.id)
node = node.parent
def test_append_to_empty_chain(self):
x = chain()
x |= self.add.s(1, 1)
x |= self.add.s(1)
x.freeze()
tasks, _ = x._frozen
assert len(tasks) == 2
assert x.apply().get() == 3
@pytest.mark.usefixtures('depends_on_current_app')
def test_chain_single_child_result(self):
child_sig = self.add.si(1, 1)
chain_sig = chain(child_sig)
assert chain_sig.tasks[0] is child_sig
with patch.object(
# We want to get back the result of actually applying the task
child_sig, "apply_async",
) as mock_apply, patch.object(
# The child signature may be clone by `chain.prepare_steps()`
child_sig, "clone", return_value=child_sig,
):
res = chain_sig()
# `_prepare_chain_from_options()` sets this `chain` kwarg with the
# subsequent tasks which would be run - nothing in this case
mock_apply.assert_called_once_with(chain=[])
assert res is mock_apply.return_value
@pytest.mark.usefixtures('depends_on_current_app')
def test_chain_single_child_group_result(self):
child_sig = self.add.si(1, 1)
# The group will `clone()` the child during instantiation so mock it
with patch.object(child_sig, "clone", return_value=child_sig):
group_sig = group(child_sig)
# Now we can construct the chain signature which is actually under test
chain_sig = chain(group_sig)
assert chain_sig.tasks[0].tasks[0] is child_sig
with patch.object(
# We want to get back the result of actually applying the task
child_sig, "apply_async",
) as mock_apply, patch.object(
# The child signature may be clone by `chain.prepare_steps()`
child_sig, "clone", return_value=child_sig,
):
res = chain_sig()
# `_prepare_chain_from_options()` sets this `chain` kwarg with the
# subsequent tasks which would be run - nothing in this case
mock_apply.assert_called_once_with(chain=[])
assert res is mock_apply.return_value
def test_chain_flattening_keep_links_of_inner_chain(self):
def link_chain(sig):
sig.link(signature('link_b'))
sig.link_error(signature('link_ab'))
return sig
inner_chain = link_chain(chain(signature('a'), signature('b')))
assert inner_chain.options['link'][0] == signature('link_b')
assert inner_chain.options['link_error'][0] == signature('link_ab')
assert inner_chain.tasks[0] == signature('a')
assert inner_chain.tasks[0].options == {}
assert inner_chain.tasks[1] == signature('b')
assert inner_chain.tasks[1].options == {}
flat_chain = chain(inner_chain, signature('c'))
assert flat_chain.options == {}
assert flat_chain.tasks[0].name == 'a'
assert 'link' not in flat_chain.tasks[0].options
assert signature(flat_chain.tasks[0].options['link_error'][0]) == signature('link_ab')
assert flat_chain.tasks[1].name == 'b'
assert 'link' in flat_chain.tasks[1].options, "b is missing the link from inner_chain.options['link'][0]"
assert signature(flat_chain.tasks[1].options['link'][0]) == signature('link_b')
assert signature(flat_chain.tasks[1].options['link_error'][0]) == signature('link_ab')
def test_group_in_center_of_chain(self):
t1 = chain(self.add.si(1, 1), group(self.add.si(1, 1), self.add.si(1, 1)),
self.add.si(1, 1) | self.add.si(1, 1))
t2 = chord([self.add.si(1, 1), self.add.si(1, 1)], t1)
t2.freeze() # should not raise
def test_upgrade_to_chord_on_chain(self):
group1 = group(self.add.si(10, 10), self.add.si(10, 10))
group2 = group(self.xsum.s(), self.xsum.s())
chord1 = group1 | group2
chain1 = (self.xsum.si([5]) | self.add.s(1))
final_task = chain(chord1, chain1)
assert len(final_task.tasks) == 1 and isinstance(final_task.tasks[0], chord)
assert isinstance(final_task.tasks[0].body, chord)
assert final_task.tasks[0].body.body == chain1
| test_chain |
python | mlflow__mlflow | dev/proto_to_graphql/code_generator.py | {
"start": 443,
"end": 1839
} | class ____:
def __init__(self):
self.queries = set() # method_descriptor
self.mutations = set() # method_descriptor
self.inputs = [] # field_descriptor
self.outputs = set() # field_descriptor
self.types = [] # field_descriptor
self.enums = set() # enum_descriptor
self.method_names = set() # package_name_method_name
# Entry point for generating the GraphQL schema.
def generate_code():
state = GenerateSchemaState()
for file_descriptor in ONBOARDED_DESCRIPTORS:
for service_name, service_descriptor in file_descriptor.services_by_name.items():
for method_name, method_descriptor in service_descriptor.methods_by_name.items():
process_method(method_descriptor, state)
generated_schema = generate_schema(state)
os.makedirs(os.path.dirname(AUTOGENERATED_SCHEMA), exist_ok=True)
with open(AUTOGENERATED_SCHEMA, "w") as file:
file.write(generated_schema)
# Generate the sdl schema for typescript type generation.
sdl_schema = str(schema)
sdl_schema = f"""# GENERATED FILE. PLEASE DON'T MODIFY.
# Run uv run ./dev/proto_to_graphql/code_generator.py to regenerate.
{sdl_schema}
"""
with open(AUTOGENERATED_SDL_SCHEMA, "w") as f:
f.write(sdl_schema)
def main():
generate_code()
if __name__ == "__main__":
main()
| GenerateSchemaState |
python | getsentry__sentry | src/sentry/identity/slack/provider.py | {
"start": 284,
"end": 2778
} | class ____(OAuth2Provider):
key = IntegrationProviderSlug.SLACK.value
name = "Slack"
# This identity provider is used for authorizing the Slack application
# through their Bot token (or legacy Workspace Token if enabled) flow.
oauth_scopes = ("identity.basic", "identity.email")
# Only used during installation for Bot apps in order to request "links:read"
# user_scope, needed for unfurling.
user_scopes = ()
def get_oauth_authorize_url(self) -> str:
return "https://slack.com/oauth/v2/authorize"
# XXX(epurkhiser): While workspace tokens _do_ support the oauth.access
# endpoint, it will not include the authorizing_user, so we continue to use
# the deprecated oauth.token endpoint until we are able to migrate to a bot
# app which uses oauth.access.
def get_oauth_access_token_url(self) -> str:
return "https://slack.com/api/oauth.v2.access"
def get_oauth_client_id(self):
return options.get("slack.client-id")
def get_oauth_client_secret(self):
return options.get("slack.client-secret")
def get_user_scopes(self):
return self.config.get("user_scopes", self.user_scopes)
def get_pipeline_views(self) -> list[PipelineView[IdentityPipeline]]:
return [
SlackOAuth2LoginView(
authorize_url=self.get_oauth_authorize_url(),
client_id=self.get_oauth_client_id(),
scope=" ".join(self.get_oauth_scopes()),
user_scope=" ".join(self.get_user_scopes()),
),
OAuth2CallbackView(
access_token_url=self.get_oauth_access_token_url(),
client_id=self.get_oauth_client_id(),
client_secret=self.get_oauth_client_secret(),
),
]
def get_oauth_data(self, payload):
# TODO(epurkhiser): This flow isn't actually used right now in sentry.
# In slack-bot world we would need to make an API call to the 'me'
# endpoint to get their user ID here.
return super().get_oauth_data(payload)
def build_identity(self, data):
data = data["data"]
return {
"type": IntegrationProviderSlug.SLACK.value,
# TODO(epurkhiser): See note above
"id": data["user"]["id"],
"email": data["user"]["email"],
"scopes": sorted(data["scope"].split(",")),
"data": self.get_oauth_data(data),
}
| SlackIdentityProvider |
python | walkccc__LeetCode | solutions/932. Beautiful Array/932.py | {
"start": 0,
"end": 576
} | class ____:
def beautifulArray(self, n: int) -> list[int]:
arr = [i for i in range(1, n + 1)]
def partition(l: int, r: int, mask: int) -> int:
nextSwapped = l
for i in range(l, r + 1):
if arr[i] & mask:
arr[i], arr[nextSwapped] = arr[nextSwapped], arr[i]
nextSwapped += 1
return nextSwapped - 1
def divide(l: int, r: int, mask: int) -> None:
if l >= r:
return
m = partition(l, r, mask)
divide(l, m, mask << 1)
divide(m + 1, r, mask << 1)
divide(0, n - 1, 1)
return arr
| Solution |
python | numba__numba | numba/core/typing/mathdecl.py | {
"start": 1960,
"end": 2039
} | class ____(Math_converter):
pass
@infer_global(math.copysign)
| Math_floor_ceil |
python | pytorch__pytorch | test/export/test_export.py | {
"start": 5287,
"end": 5371
} | class ____:
x: Tensor
y: List[Tensor]
z: Dict[str, Tensor]
@dataclass
| Inp1 |
python | tensorflow__tensorflow | tensorflow/python/ops/gradients_test.py | {
"start": 27335,
"end": 28589
} | class ____(test_util.TensorFlowTestCase):
@test_util.run_v1_only("b/120545219")
def testHessianVectorProduct(self):
# Manually compute the Hessian explicitly for a low-dimensional problem
# and check that HessianVectorProduct matches multiplication by the
# explicit Hessian.
# Specifically, the Hessian of f(x) = x^T A x is
# H = A + A^T.
# We expect HessianVectorProduct(f(x), x, v) to be H v.
m = 4
rng = np.random.RandomState([1, 2, 3])
mat_value = rng.randn(m, m).astype("float32")
v_value = rng.randn(m, 1).astype("float32")
x_value = rng.randn(m, 1).astype("float32")
hess_value = mat_value + mat_value.T
hess_v_value = np.dot(hess_value, v_value)
for use_gpu in [False, True]:
with self.cached_session(use_gpu=use_gpu):
mat = constant_op.constant(mat_value)
v = constant_op.constant(v_value)
x = constant_op.constant(x_value)
mat_x = math_ops.matmul(mat, x, name="Ax")
x_mat_x = math_ops.matmul(array_ops.transpose(x), mat_x, name="xAx")
hess_v = gradients_impl._hessian_vector_product(x_mat_x, [x], [v])[0]
hess_v_actual = self.evaluate(hess_v)
self.assertAllClose(hess_v_value, hess_v_actual)
| HessianVectorProductTest |
python | getsentry__sentry | src/sentry/backup/comparators.py | {
"start": 25172,
"end": 26466
} | class ____(JSONScrubbingComparator):
"""
Some exports from earlier sentry versions encode simple option values
as string integers, while newer versions of sentry encode those values as string.
If either side is a string, cast both to strings and compare.
"""
def compare(self, on: InstanceID, left: Any, right: Any) -> list[ComparatorFinding]:
findings = []
fields = sorted(self.fields)
for f in fields:
left_field = left["fields"].get(f)
right_field = right["fields"].get(f)
if left_field == right_field:
continue
if isinstance(left_field, str):
right_field = str(right_field)
elif isinstance(right_field, str):
left_field = str(left_field)
if left_field != right_field:
findings.append(
ComparatorFinding(
kind=self.get_kind(),
on=on,
left_pk=left["pk"],
right_pk=right["pk"],
reason=f"""the left value ({left_field}) of `{f}` was not equal to the right value ({right_field})""",
)
)
return findings
| OptionValueComparator |
python | apache__airflow | providers/amazon/tests/unit/amazon/aws/operators/test_sagemaker_notebook.py | {
"start": 4480,
"end": 5626
} | class ____:
@mock.patch.object(SageMakerHook, "conn")
def test_stop_notebook_without_wait_for_completion(self, mock_hook_conn, hook):
operator = SageMakerStopNotebookOperator(
task_id="stop_test", instance_name=INSTANCE_NAME, wait_for_completion=False
)
operator.execute(None)
hook.conn.stop_notebook_instance.assert_called_once()
mock_hook_conn.get_waiter.assert_not_called()
@mock.patch.object(SageMakerHook, "conn")
def test_stop_notebook_wait_for_completion(self, mock_hook_conn, hook):
operator = SageMakerStopNotebookOperator(
task_id="stop_test", instance_name=INSTANCE_NAME, wait_for_completion=True
)
operator.execute(None)
hook.conn.stop_notebook_instance.assert_called_once()
mock_hook_conn.get_waiter.assert_called_once_with("notebook_instance_stopped")
def test_template_fields(self):
operator = SageMakerStopNotebookOperator(
task_id="stop_test", instance_name=INSTANCE_NAME, wait_for_completion=False
)
validate_template_fields(operator)
| TestSageMakerStopNotebookOperator |
python | encode__django-rest-framework | tests/test_filters.py | {
"start": 13780,
"end": 13924
} | class ____(serializers.ModelSerializer):
class Meta:
model = SearchFilterModelM2M
fields = '__all__'
| SearchFilterM2MSerializer |
python | apache__airflow | airflow-e2e-tests/tests/airflow_e2e_tests/e2e_test_utils/clients.py | {
"start": 4723,
"end": 5207
} | class ____:
"""Client for interacting with the Task SDK API."""
def __init__(self):
pass
@cached_property
def client(self):
from airflow.sdk.api.client import Client
client = Client(base_url=f"http://{DOCKER_COMPOSE_HOST_PORT}/execution", token="not-a-token")
return client
def health_check(self):
response = self.client.get("health/ping", headers={"Airflow-API-Version": "2025-08-10"})
return response
| TaskSDKClient |
python | tensorflow__tensorflow | tensorflow/python/data/kernel_tests/snapshot_test.py | {
"start": 16923,
"end": 38451
} | class ____(tf_record_test_base.TFRecordTestBase,
parameterized.TestCase):
def setUp(self):
super(LegacySnapshotTest, self).setUp()
self.removeTFRecords()
tmpdir = self.get_temp_dir()
tmpdir = os.path.join(tmpdir, "snapshot")
os.mkdir(tmpdir)
self.snapshot_dir = tmpdir
def tearDown(self):
super(LegacySnapshotTest, self).tearDown()
shutil.rmtree(self.snapshot_dir)
def removeTFRecords(self):
for filename in self._filenames:
os.remove(filename)
self._filenames = []
def setUpTFRecord(self, num_files=10, num_records=10):
self._num_files = num_files
self._num_records = num_records
self._filenames = self._createFiles()
def makeSnapshotDirectory(self):
return self.snapshot_dir
def assertSnapshotDirectoryContains(self, directory, num_fingerprints,
num_runs_per_fp, num_snapshot_files):
# Ignore the graphdef pbtxts we write for debugging purposes and temporary
# files that are an artifact of how TF writes files.
dirlist = listdir_and_filter(
directory, lambda p: not (is_graphdef_file(p) or is_temp_file(p)))
self.assertLen(dirlist, num_fingerprints)
for i in range(num_fingerprints):
fingerprint_dir = os.path.join(directory, dirlist[i])
fingerprint_dir_list = listdir_and_filter(fingerprint_dir,
lambda p: not is_temp_file(p))
self.assertLen(fingerprint_dir_list, num_runs_per_fp + 1)
self.assertEqual(fingerprint_dir_list[num_runs_per_fp],
"snapshot.metadata")
for j in range(num_runs_per_fp):
run_dir = os.path.join(fingerprint_dir, fingerprint_dir_list[j])
run_dirlist = sorted(os.listdir(run_dir))
self.assertLen(run_dirlist, num_snapshot_files)
file_counter = 0
for filename in run_dirlist:
self.assertEqual(filename, "%08d.snapshot" % file_counter)
file_counter += 1
@combinations.generate(test_base.default_test_combinations())
def testWriteDifferentPipelinesInOneDirectory(self):
tmpdir = self.snapshot_dir
dataset = dataset_ops.Dataset.range(1000)
dataset = dataset.apply(snapshot.legacy_snapshot(tmpdir))
self.assertDatasetProduces(dataset, list(range(1000)))
dataset = dataset_ops.Dataset.range(1001)
dataset = dataset.apply(snapshot.legacy_snapshot(tmpdir))
self.assertDatasetProduces(dataset, list(range(1001)))
self.assertSnapshotDirectoryContains(tmpdir, 2, 1, 1)
@combinations.generate(test_base.default_test_combinations())
def testWriteSnapshotMultipleSimultaneous(self):
tmpdir = self.snapshot_dir
dataset1 = dataset_ops.Dataset.range(1000)
dataset1 = dataset1.apply(snapshot.legacy_snapshot(tmpdir))
next1 = self.getNext(dataset1)
dataset2 = dataset_ops.Dataset.range(1000)
dataset2 = dataset2.apply(snapshot.legacy_snapshot(tmpdir))
next2 = self.getNext(dataset2)
for i in range(0, 1000):
self.assertEqual(i, self.evaluate(next1()))
self.assertEqual(i, self.evaluate(next2()))
# we check that only one copy of the metadata has been written, and the
# one that lost the race would be in passthrough mode.
self.assertSnapshotDirectoryContains(tmpdir, 1, 1, 1)
@combinations.generate(test_base.default_test_combinations())
def testGetNextCreatesDir(self):
tmpdir = self.snapshot_dir
# We create two iterators but call getNext on only one.
dataset1 = dataset_ops.Dataset.range(1000)
dataset1 = dataset1.apply(snapshot.legacy_snapshot(tmpdir))
next1 = self.getNext(dataset1)
dataset2 = dataset_ops.Dataset.range(1001)
dataset2 = dataset2.apply(snapshot.legacy_snapshot(tmpdir))
_ = self.getNext(dataset2)
for _ in range(1000):
self.evaluate(next1())
# We check that only one directory is created.
self.assertSnapshotDirectoryContains(tmpdir, 1, 1, 1)
@combinations.generate(
combinations.times(
test_base.default_test_combinations(),
combinations.combine(compression=[
snapshot.COMPRESSION_NONE, snapshot.COMPRESSION_GZIP,
snapshot.COMPRESSION_SNAPPY
])))
def testWriteSnapshotSimpleSuccessful(self, compression):
tmpdir = self.snapshot_dir
dataset = dataset_ops.Dataset.range(1000)
dataset = dataset.apply(
snapshot.legacy_snapshot(tmpdir, compression=compression))
self.assertDatasetProduces(dataset, list(range(1000)))
self.assertSnapshotDirectoryContains(tmpdir, 1, 1, 1)
@combinations.generate(
combinations.times(
test_base.default_test_combinations(),
combinations.combine(compression=[
snapshot.COMPRESSION_NONE, snapshot.COMPRESSION_GZIP,
snapshot.COMPRESSION_SNAPPY
])))
def testWriteSnapshotRepeatAfterwards(self, compression):
tmpdir = self.snapshot_dir
dataset = dataset_ops.Dataset.range(10)
dataset = dataset.apply(
snapshot.legacy_snapshot(tmpdir, compression=compression))
dataset = dataset.repeat(10)
self.assertDatasetProduces(dataset, list(range(10)) * 10)
self.assertSnapshotDirectoryContains(tmpdir, 1, 1, 1)
@combinations.generate(
combinations.times(
test_base.default_test_combinations(),
combinations.combine(compression=[
snapshot.COMPRESSION_NONE, snapshot.COMPRESSION_GZIP,
snapshot.COMPRESSION_SNAPPY
])))
def testWriteSnapshotMixTypes(self, compression):
tmpdir = self.snapshot_dir
dataset = dataset_ops.Dataset.range(10)
def map_fn(x):
return (x, string_ops.as_string(x), string_ops.as_string(2 * x), 2 * x)
dataset = dataset.map(map_fn)
dataset = dataset.apply(
snapshot.legacy_snapshot(tmpdir, compression=compression))
dataset = dataset.repeat(10)
expected = []
for i in range(10):
expected.append((i, str(i), str(2 * i), 2 * i))
self.assertDatasetProduces(dataset, expected * 10)
self.assertSnapshotDirectoryContains(tmpdir, 1, 1, 1)
@combinations.generate(test_base.default_test_combinations())
def testSpecifySnapshotNameWriteAndRead(self):
tmpdir = self.snapshot_dir
dataset = dataset_ops.Dataset.range(10)
dataset = dataset.apply(
snapshot.legacy_snapshot(tmpdir, snapshot_name="my_custom_snapshot"))
dataset = dataset.repeat(10)
self.assertDatasetProduces(dataset, list(range(10)) * 10)
self.assertSnapshotDirectoryContains(tmpdir, 1, 1, 1)
self.assertTrue(
os.path.exists(os.path.join(tmpdir, "custom-my_custom_snapshot")))
self.assertTrue(
os.path.exists(
os.path.join(tmpdir, "custom-my_custom_snapshot", "custom")))
@combinations.generate(test_base.default_test_combinations())
def testForcePassthroughMode(self):
tmpdir = self.snapshot_dir
dataset = dataset_ops.Dataset.range(10)
dataset = dataset.apply(
snapshot.legacy_snapshot(tmpdir, mode="passthrough"))
dataset = dataset.repeat(10)
self.assertDatasetProduces(dataset, list(range(10)) * 10)
self.assertSnapshotDirectoryContains(tmpdir, 0, 0, 0)
@combinations.generate(test_base.default_test_combinations())
def testForceWriteMode(self):
tmpdir = self.snapshot_dir
dataset = dataset_ops.Dataset.range(10)
dataset = dataset.apply(snapshot.legacy_snapshot(tmpdir, mode="write"))
dataset = dataset.repeat(10)
self.assertDatasetProduces(dataset, list(range(10)) * 10)
# We will end up writing 10 different runs.
self.assertSnapshotDirectoryContains(tmpdir, 1, 10, 1)
@combinations.generate(test_base.default_test_combinations())
def testForceReadMode(self):
tmpdir = self.snapshot_dir
# We write a copy of the snapshot first.
dataset = dataset_ops.Dataset.range(10)
dataset = dataset.apply(
snapshot.legacy_snapshot(
tmpdir, mode="write", snapshot_name="my_custom_snapshot"))
self.assertDatasetProduces(dataset, list(range(10)))
# We move the run to a new name.
shutil.move(
os.path.join(tmpdir, "custom-my_custom_snapshot"),
os.path.join(tmpdir, "custom-my_custom_snapshot_2"))
# Even though the snapshot.metadata is pointing to the old run that no
# longer exists after we moved, we force it to read from the run we specify.
dataset = dataset_ops.Dataset.range(10)
dataset = dataset.apply(
snapshot.legacy_snapshot(
tmpdir, mode="read", snapshot_name="my_custom_snapshot_2"))
self.assertDatasetProduces(dataset, list(range(10)))
# We should still have one snapshot and one run.
self.assertSnapshotDirectoryContains(tmpdir, 1, 1, 1)
@combinations.generate(test_base.default_test_combinations())
def testForceReadNonexistentSnapshot(self):
tmpdir = self.snapshot_dir
dataset = dataset_ops.Dataset.range(10)
with self.assertRaises(errors.NotFoundError):
dataset = dataset.apply(snapshot.legacy_snapshot(tmpdir, mode="read"))
get_next = self.getNext(dataset)
self.evaluate(get_next())
@combinations.generate(test_base.default_test_combinations())
def testForceReadNonexistentNamedSnapshot(self):
tmpdir = self.snapshot_dir
dataset = dataset_ops.Dataset.range(10)
with self.assertRaises(errors.NotFoundError):
dataset = dataset.apply(
snapshot.legacy_snapshot(
tmpdir, mode="read", snapshot_name="my_nonexistent_snapshot"))
get_next = self.getNext(dataset)
self.evaluate(get_next())
@combinations.generate(
combinations.times(
test_base.default_test_combinations(),
combinations.combine(compression=[
snapshot.COMPRESSION_NONE, snapshot.COMPRESSION_GZIP,
snapshot.COMPRESSION_SNAPPY
])))
def testReadSnapshotBackAfterWrite(self, compression):
self.setUpTFRecord()
filenames = self._filenames
expected = [
b"Record %d of file %d" % (r, f) # pylint:disable=g-complex-comprehension
for f in range(0, 10)
for r in range(0, 10)
]
tmpdir = self.snapshot_dir
dataset = core_readers._TFRecordDataset(filenames)
dataset = dataset.apply(
snapshot.legacy_snapshot(tmpdir, compression=compression))
self.assertDatasetProduces(dataset, expected)
# remove the original files and try to read the data back only from snapshot
self.removeTFRecords()
dataset2 = core_readers._TFRecordDataset(filenames)
dataset2 = dataset2.apply(
snapshot.legacy_snapshot(tmpdir, compression=compression))
self.assertDatasetProduces(dataset2, expected)
@combinations.generate(test_base.default_test_combinations())
def testReadShuffledSnapshotAfterWrite(self):
self.setUpTFRecord(num_files=10, num_records=50)
filenames = self._filenames
expected = [
b"Record %d of file %d" % (r, f) # pylint:disable=g-complex-comprehension
for f in range(0, 10)
for r in range(0, 50)
]
tmpdir = self.snapshot_dir
dataset = core_readers._TFRecordDataset(filenames)
dataset = dataset.apply(
snapshot.legacy_snapshot(tmpdir, shard_size_bytes=100))
self.assertDatasetProduces(dataset, expected)
# remove the original files and try to read the data back only from snapshot
self.removeTFRecords()
dataset2 = core_readers._TFRecordDataset(filenames)
dataset2 = dataset2.apply(
snapshot.legacy_snapshot(
tmpdir, shard_size_bytes=100, shuffle_on_read=True))
shuffled_elements = self.getDatasetOutput(dataset2)
# make sure that we don't read the file back in the same order.
self.assertNotEqual(shuffled_elements, expected)
self.assertCountEqual(shuffled_elements, expected)
# make sure all the elements are still there
dataset3 = core_readers._TFRecordDataset(filenames)
dataset3 = dataset3.apply(
snapshot.legacy_snapshot(
tmpdir, shard_size_bytes=100, shuffle_on_read=True))
self.assertDatasetProduces(dataset3, expected, assert_items_equal=True)
@combinations.generate(test_base.default_test_combinations())
def testReadShuffledSnapshotWithSeedAfterWrite(self):
self.setUpTFRecord(num_files=10, num_records=50)
filenames = self._filenames
expected = [
b"Record %d of file %d" % (r, f) # pylint:disable=g-complex-comprehension
for f in range(0, 10)
for r in range(0, 50)
]
tmpdir = self.snapshot_dir
dataset = core_readers._TFRecordDataset(filenames)
dataset = dataset.apply(
snapshot.legacy_snapshot(tmpdir, shard_size_bytes=10))
self.assertDatasetProduces(dataset, expected)
# remove the original files and try to read the data back only from snapshot
self.removeTFRecords()
dataset2 = core_readers._TFRecordDataset(filenames)
dataset2 = dataset2.apply(
snapshot.legacy_snapshot(
tmpdir,
shard_size_bytes=10,
shuffle_on_read=True,
shuffle_seed=123456))
next2 = self.getNext(dataset2)
dataset3 = core_readers._TFRecordDataset(filenames)
dataset3 = dataset3.apply(
snapshot.legacy_snapshot(
tmpdir,
shard_size_bytes=10,
shuffle_on_read=True,
shuffle_seed=123456))
next3 = self.getNext(dataset3)
# make sure that the items are read back in the same order for both datasets
for _ in range(500):
res2 = self.evaluate(next2())
res3 = self.evaluate(next3())
self.assertEqual(res2, res3)
@combinations.generate(
combinations.times(
test_base.default_test_combinations(),
combinations.combine(compression=[
snapshot.COMPRESSION_NONE, snapshot.COMPRESSION_GZIP,
snapshot.COMPRESSION_SNAPPY
])))
def testReadSnapshotParallelAfterWrite(self, compression):
self.setUpTFRecord(5, 500)
filenames = self._filenames
expected = [
b"Record %d of file %d" % (r, f) # pylint:disable=g-complex-comprehension
for f in range(0, 5)
for r in range(0, 500)
]
tmpdir = self.snapshot_dir
dataset = core_readers._TFRecordDataset(filenames)
dataset = dataset.apply(
snapshot.legacy_snapshot(
tmpdir,
shard_size_bytes=1024 * 1024,
num_reader_threads=2,
reader_buffer_size=10,
compression=compression))
self.assertDatasetProduces(dataset, expected, assert_items_equal=True)
# remove the original files and try to read the data back only from
# snapshot.
self.removeTFRecords()
dataset2 = core_readers._TFRecordDataset(filenames)
dataset2 = dataset2.apply(
snapshot.legacy_snapshot(
tmpdir,
shard_size_bytes=1024 * 1024,
num_reader_threads=2,
reader_buffer_size=10,
compression=compression))
self.assertDatasetProduces(dataset2, expected, assert_items_equal=True)
# Not testing Snappy here because Snappy reads currently require a lot of
# memory.
@combinations.generate(
combinations.times(
test_base.default_test_combinations(),
combinations.times(
combinations.combine(compression=[
snapshot.COMPRESSION_NONE, snapshot.COMPRESSION_GZIP
]),
combinations.combine(threads=2, size=[1, 2]) +
combinations.combine(threads=8, size=[1, 4, 8]))))
def testReadSnapshotBackAfterMultiThreadedWrite(self, compression, threads,
size):
self.setUpTFRecord()
filenames = self._filenames
expected = [
b"Record %d of file %d" % (r, f) # pylint:disable=g-complex-comprehension
for f in range(0, 10)
for r in range(0, 10)
]
tmpdir = self.snapshot_dir
dataset = core_readers._TFRecordDataset(filenames)
dataset = dataset.apply(
snapshot.legacy_snapshot(
tmpdir,
compression=compression,
num_writer_threads=threads,
writer_buffer_size=size))
self.assertDatasetProduces(dataset, expected)
# remove the original files and try to read the data back only from
# snapshot
self.removeTFRecords()
dataset2 = core_readers._TFRecordDataset(filenames)
dataset2 = dataset2.apply(
snapshot.legacy_snapshot(tmpdir, compression=compression))
self.assertDatasetProduces(dataset2, expected, assert_items_equal=True)
@combinations.generate(test_base.default_test_combinations())
def testSameFingerprintWithDifferentInitializationOrder(self):
tmpdir = self.snapshot_dir
dataset1 = dataset_ops.Dataset.range(0, 100)
dataset2 = dataset_ops.Dataset.range(100, 200)
dataset3 = dataset_ops.Dataset.range(200, 300)
dataset = dataset1.concatenate(dataset2).concatenate(dataset3)
dataset = dataset.apply(snapshot.legacy_snapshot(tmpdir))
self.assertDatasetProduces(dataset, list(range(300)))
dataset4 = dataset_ops.Dataset.range(200, 300)
dataset5 = dataset_ops.Dataset.range(100, 200)
dataset6 = dataset_ops.Dataset.range(0, 100)
dataset = dataset6.concatenate(dataset5).concatenate(dataset4)
dataset = dataset.apply(snapshot.legacy_snapshot(tmpdir))
self.assertDatasetProduces(dataset, list(range(300)))
self.assertSnapshotDirectoryContains(tmpdir, 1, 1, 1)
@combinations.generate(test_base.default_test_combinations())
def testExpiredSnapshotRewrite(self):
tmpdir = self.snapshot_dir
dataset1 = dataset_ops.Dataset.range(1000)
dataset1 = dataset1.apply(
snapshot.legacy_snapshot(tmpdir, pending_snapshot_expiry_seconds=1))
next1 = self.getNext(dataset1)
# Don't finish reading dataset1, so it is never finalized
for _ in range(500):
self.evaluate(next1())
self.assertSnapshotDirectoryContains(tmpdir, 1, 1, 1)
time.sleep(2)
# Creating dataset2 after we run through dataset1 due to eager mode, where
# the snapshot state is determined immediately upon dataset creation. We
# only want to determine the snapshot state for dataset2 after the first
# snapshot has expired.
dataset2 = dataset_ops.Dataset.range(1000)
dataset2 = dataset2.apply(
snapshot.legacy_snapshot(tmpdir, pending_snapshot_expiry_seconds=1))
next2 = self.getNext(dataset2)
for _ in range(500):
self.evaluate(next2())
self.assertSnapshotDirectoryContains(tmpdir, 1, 2, 1)
@combinations.generate(test_base.default_test_combinations())
def testSnapshotArgsCreateNewSnapshot(self):
tmpdir = self.snapshot_dir
dataset1 = dataset_ops.Dataset.range(1000)
dataset1 = dataset1.apply(
snapshot.legacy_snapshot(tmpdir, shard_size_bytes=10000))
next1 = self.getNext(dataset1)
for _ in range(1000):
self.evaluate(next1())
self.assertSnapshotDirectoryContains(tmpdir, 1, 1, 1)
# Create second snapshot with a different shard_size_bytes
dataset2 = dataset_ops.Dataset.range(1000)
dataset2 = dataset1.apply(
snapshot.legacy_snapshot(tmpdir, shard_size_bytes=20000))
next2 = self.getNext(dataset2)
for _ in range(1000):
self.evaluate(next2())
self.assertSnapshotDirectoryContains(tmpdir, 2, 1, 1)
@combinations.generate(
combinations.times(
test_base.default_test_combinations(),
combinations.combine(compression=[
snapshot.COMPRESSION_NONE, snapshot.COMPRESSION_GZIP,
snapshot.COMPRESSION_SNAPPY
])))
def testSpecifyShardSize(self, compression):
tmpdir = self.snapshot_dir
dataset = dataset_ops.Dataset.from_tensor_slices([1.0])
dataset = dataset.map(lambda x: gen_array_ops.broadcast_to(x, [1024, 1024]))
dataset = dataset.repeat(10)
dataset = dataset.apply(
snapshot.legacy_snapshot(
tmpdir, shard_size_bytes=10 * 1024 * 1024, compression=compression))
next_fn = self.getNext(dataset)
for _ in range(10):
self.evaluate(next_fn())
num_files = 1
if compression == snapshot.COMPRESSION_NONE:
num_files = 3
self.assertSnapshotDirectoryContains(tmpdir, 1, 1, num_files)
@combinations.generate(test_base.default_test_combinations())
def testAdditionalOperationsAfterReadBack(self):
self.setUpTFRecord()
filenames = self._filenames
expected = [
b"Record %d of file %d" % (r, f) # pylint:disable=g-complex-comprehension
for f in range(0, 10)
for r in range(0, 10)
]
tmpdir = self.snapshot_dir
dataset = core_readers._TFRecordDataset(filenames)
dataset = dataset.apply(snapshot.legacy_snapshot(tmpdir))
self.assertDatasetProduces(dataset, expected)
# remove the original files and try to read the data back only from snapshot
self.removeTFRecords()
dataset2 = core_readers._TFRecordDataset(filenames)
dataset2 = dataset2.apply(snapshot.legacy_snapshot(tmpdir))
self.assertDatasetProduces(dataset2, expected)
expected_after = [
b"cord %d of file %d" % (r, f) # pylint:disable=g-complex-comprehension
for f in range(0, 10)
for r in range(0, 10)
]
dataset3 = core_readers._TFRecordDataset(filenames)
dataset3 = dataset3.apply(snapshot.legacy_snapshot(tmpdir))
dataset3 = dataset3.map(lambda x: string_ops.substr_v2(x, 2, 1000))
self.assertDatasetProduces(dataset3, expected_after)
| LegacySnapshotTest |
python | simonw__datasette | datasette/views/__init__.py | {
"start": 0,
"end": 60
} | class ____:
"Base class for all documented contexts"
| Context |
python | dask__dask | dask/dataframe/dask_expr/_groupby.py | {
"start": 21207,
"end": 22107
} | class ____(GroupByReduction):
_parameters = [
"frame",
"ddof",
"numeric_only",
"split_out",
"split_every",
"sort",
"dropna",
"observed",
"shuffle_method",
]
_defaults = {
"split_out": 1,
"sort": None,
"observed": None,
"dropna": None,
"split_every": None,
"shuffle_method": None,
}
reduction_aggregate = staticmethod(_var_agg)
reduction_combine = staticmethod(_var_combine)
chunk = staticmethod(_var_chunk)
@functools.cached_property
def aggregate_kwargs(self):
return {
"ddof": self.ddof,
"numeric_only": self.numeric_only,
**super().aggregate_kwargs,
}
@functools.cached_property
def chunk_kwargs(self):
return {"numeric_only": self.numeric_only, **super().chunk_kwargs}
| Var |
python | eventlet__eventlet | tests/greendns_test.py | {
"start": 19872,
"end": 30730
} | class ____(tests.LimitedTestCase):
def _make_mock_resolve_cname(self):
"""A stubbed out cname function"""
class ResolveCname:
qname = None
cname = 'cname.example.com'
def __call__(self, host):
self.qname = host
return self.cname
resolve_cname = ResolveCname()
return resolve_cname
def setUp(self):
self._old_resolve = greendns.resolve
self._old_resolve_cname = greendns.resolve_cname
self._old_orig_getaddrinfo = greendns.socket.getaddrinfo
def tearDown(self):
greendns.resolve = self._old_resolve
greendns.resolve_cname = self._old_resolve_cname
greendns.socket.getaddrinfo = self._old_orig_getaddrinfo
def test_getaddrinfo(self):
greendns.resolve = _make_mock_resolve()
greendns.resolve.add('example.com', '127.0.0.2')
greendns.resolve.add('example.com', '::1')
res = greendns.getaddrinfo('example.com', 'domain')
addr = ('127.0.0.2', 53)
tcp = (socket.AF_INET, socket.SOCK_STREAM, socket.IPPROTO_TCP, addr)
udp = (socket.AF_INET, socket.SOCK_DGRAM, socket.IPPROTO_UDP, addr)
addr = ('::1', 53, 0, 0)
tcp6 = (socket.AF_INET6, socket.SOCK_STREAM, socket.IPPROTO_TCP, addr)
udp6 = (socket.AF_INET6, socket.SOCK_DGRAM, socket.IPPROTO_UDP, addr)
filt_res = [ai[:3] + (ai[4],) for ai in res]
assert tcp in filt_res
assert udp in filt_res
assert tcp6 in filt_res
assert udp6 in filt_res
def test_getaddrinfo_idn(self):
greendns.resolve = _make_mock_resolve()
idn_name = 'евентлет.com'
greendns.resolve.add(idn_name.encode('idna').decode('ascii'), '127.0.0.2')
res = greendns.getaddrinfo(idn_name, 'domain')
addr = ('127.0.0.2', 53)
tcp = (socket.AF_INET, socket.SOCK_STREAM, socket.IPPROTO_TCP, addr)
udp = (socket.AF_INET, socket.SOCK_DGRAM, socket.IPPROTO_UDP, addr)
filt_res = [ai[:3] + (ai[4],) for ai in res]
assert tcp in filt_res
assert udp in filt_res
def test_getaddrinfo_inet(self):
greendns.resolve = _make_mock_resolve()
greendns.resolve.add('example.com', '127.0.0.2')
res = greendns.getaddrinfo('example.com', 'domain', socket.AF_INET)
addr = ('127.0.0.2', 53)
tcp = (socket.AF_INET, socket.SOCK_STREAM, socket.IPPROTO_TCP, addr)
udp = (socket.AF_INET, socket.SOCK_DGRAM, socket.IPPROTO_UDP, addr)
assert tcp in [ai[:3] + (ai[4],) for ai in res]
assert udp in [ai[:3] + (ai[4],) for ai in res]
def test_getaddrinfo_inet6(self):
greendns.resolve = _make_mock_resolve()
greendns.resolve.add('example.com', '::1')
res = greendns.getaddrinfo('example.com', 'domain', socket.AF_INET6)
addr = ('::1', 53, 0, 0)
tcp = (socket.AF_INET6, socket.SOCK_STREAM, socket.IPPROTO_TCP, addr)
udp = (socket.AF_INET6, socket.SOCK_DGRAM, socket.IPPROTO_UDP, addr)
assert tcp in [ai[:3] + (ai[4],) for ai in res]
assert udp in [ai[:3] + (ai[4],) for ai in res]
def test_getaddrinfo_only_a_ans(self):
greendns.resolve = _make_mock_resolve()
greendns.resolve.add('example.com', '1.2.3.4')
res = greendns.getaddrinfo('example.com', None)
addr = [('1.2.3.4', 0)] * len(res)
assert addr == [ai[-1] for ai in res]
def test_getaddrinfo_only_aaaa_ans(self):
greendns.resolve = _make_mock_resolve()
greendns.resolve.add('example.com', 'dead:beef::1')
res = greendns.getaddrinfo('example.com', None)
addr = [('dead:beef::1', 0, 0, 0)] * len(res)
assert addr == [ai[-1] for ai in res]
def test_getaddrinfo_hosts_only_ans_with_timeout(self):
def clear_raises(res_self):
res_self.raises = None
return greendns.dns.resolver.NoAnswer()
hostsres = _make_mock_base_resolver()
hostsres.raises = clear_raises
hostsres.rr.address = '1.2.3.4'
greendns.resolver = greendns.ResolverProxy(hostsres())
res = _make_mock_base_resolver()
res.raises = greendns.dns.exception.Timeout
greendns.resolver._resolver = res()
result = greendns.getaddrinfo('example.com', None, 0)
addr = [('1.2.3.4', 0)] * len(result)
assert addr == [ai[-1] for ai in result]
def test_getaddrinfo_hosts_only_ans_with_error(self):
def clear_raises(res_self):
res_self.raises = None
return greendns.dns.resolver.NoAnswer()
hostsres = _make_mock_base_resolver()
hostsres.raises = clear_raises
hostsres.rr.address = '1.2.3.4'
greendns.resolver = greendns.ResolverProxy(hostsres())
res = _make_mock_base_resolver()
res.raises = greendns.dns.exception.DNSException
greendns.resolver._resolver = res()
result = greendns.getaddrinfo('example.com', None, 0)
addr = [('1.2.3.4', 0)] * len(result)
assert addr == [ai[-1] for ai in result]
def test_getaddrinfo_bytes(self):
greendns.resolve = _make_mock_resolve()
greendns.resolve.add('example.com', '1.2.3.4')
res = greendns.getaddrinfo(b'example.com', b'0')
addr = [('1.2.3.4', 0)] * len(res)
assert addr == [ai[-1] for ai in res]
def test_getaddrinfo_hosts_only_timeout(self):
hostsres = _make_mock_base_resolver()
hostsres.raises = greendns.dns.resolver.NoAnswer
greendns.resolver = greendns.ResolverProxy(hostsres())
res = _make_mock_base_resolver()
res.raises = greendns.dns.exception.Timeout
greendns.resolver._resolver = res()
with tests.assert_raises(socket.gaierror):
greendns.getaddrinfo('example.com', None, 0)
def test_getaddrinfo_hosts_only_dns_error(self):
hostsres = _make_mock_base_resolver()
hostsres.raises = greendns.dns.resolver.NoAnswer
greendns.resolver = greendns.ResolverProxy(hostsres())
res = _make_mock_base_resolver()
res.raises = greendns.dns.exception.DNSException
greendns.resolver._resolver = res()
with tests.assert_raises(socket.gaierror):
greendns.getaddrinfo('example.com', None, 0)
def test_canonname(self):
greendns.resolve = _make_mock_resolve()
greendns.resolve.add('host.example.com', '1.2.3.4')
greendns.resolve_cname = self._make_mock_resolve_cname()
res = greendns.getaddrinfo('host.example.com', None,
0, 0, 0, socket.AI_CANONNAME)
assert res[0][3] == 'cname.example.com'
def test_host_none(self):
res = greendns.getaddrinfo(None, 80)
for addr in {ai[-1] for ai in res}:
assert addr in [('127.0.0.1', 80), ('::1', 80, 0, 0)]
def test_host_none_passive(self):
res = greendns.getaddrinfo(None, 80, 0, 0, 0, socket.AI_PASSIVE)
for addr in {ai[-1] for ai in res}:
assert addr in [('0.0.0.0', 80), ('::', 80, 0, 0)]
def test_v4mapped(self):
greendns.resolve = _make_mock_resolve()
greendns.resolve.add('example.com', '1.2.3.4')
res = greendns.getaddrinfo('example.com', 80,
socket.AF_INET6, 0, 0, socket.AI_V4MAPPED)
addrs = {ai[-1] for ai in res}
assert addrs == {('::ffff:1.2.3.4', 80, 0, 0)}
def test_v4mapped_all(self):
greendns.resolve = _make_mock_resolve()
greendns.resolve.add('example.com', '1.2.3.4')
greendns.resolve.add('example.com', 'dead:beef::1')
res = greendns.getaddrinfo('example.com', 80, socket.AF_INET6, 0, 0,
socket.AI_V4MAPPED | socket.AI_ALL)
addrs = {ai[-1] for ai in res}
for addr in addrs:
assert addr in [('::ffff:1.2.3.4', 80, 0, 0),
('dead:beef::1', 80, 0, 0)]
def test_numericserv(self):
greendns.resolve = _make_mock_resolve()
greendns.resolve.add('example.com', '1.2.3.4')
with tests.assert_raises(socket.gaierror):
greendns.getaddrinfo('example.com', 'www', 0, 0, 0, socket.AI_NUMERICSERV)
def test_numerichost(self):
greendns.resolve = _make_mock_resolve()
greendns.resolve.add('example.com', '1.2.3.4')
with tests.assert_raises(socket.gaierror):
greendns.getaddrinfo('example.com', 80, 0, 0, 0, socket.AI_NUMERICHOST)
def test_noport(self):
greendns.resolve = _make_mock_resolve()
greendns.resolve.add('example.com', '1.2.3.4')
ai = greendns.getaddrinfo('example.com', None)
assert ai[0][-1][1] == 0
def test_AI_ADDRCONFIG(self):
# When the users sets AI_ADDRCONFIG but only has an IPv4
# address configured we will iterate over the results, but the
# call for the IPv6 address will fail rather then return an
# empty list. In that case we should catch the exception and
# only return the ones which worked.
def getaddrinfo(addr, port, family, socktype, proto, aiflags):
if addr == '127.0.0.1':
return [(socket.AF_INET, 1, 0, '', ('127.0.0.1', 0))]
elif addr == '::1' and aiflags & socket.AI_ADDRCONFIG:
raise OSError(socket.EAI_ADDRFAMILY,
'Address family for hostname not supported')
elif addr == '::1' and not aiflags & socket.AI_ADDRCONFIG:
return [(socket.AF_INET6, 1, 0, '', ('::1', 0, 0, 0))]
greendns.socket.getaddrinfo = getaddrinfo
greendns.resolve = _make_mock_resolve()
greendns.resolve.add('localhost', '127.0.0.1')
greendns.resolve.add('localhost', '::1')
res = greendns.getaddrinfo('localhost', None,
0, 0, 0, socket.AI_ADDRCONFIG)
assert res == [(socket.AF_INET, 1, 0, '', ('127.0.0.1', 0))]
def test_AI_ADDRCONFIG_noaddr(self):
# If AI_ADDRCONFIG is used but there is no address we need to
# get an exception, not an empty list.
def getaddrinfo(addr, port, family, socktype, proto, aiflags):
raise OSError(socket.EAI_ADDRFAMILY,
'Address family for hostname not supported')
greendns.socket.getaddrinfo = getaddrinfo
greendns.resolve = _make_mock_resolve()
try:
greendns.getaddrinfo('::1', None, 0, 0, 0, socket.AI_ADDRCONFIG)
except OSError as e:
assert e.errno == socket.EAI_ADDRFAMILY
def test_getaddrinfo_type_parameter(self):
greendns.resolve = _make_mock_resolve()
greendns.resolve.add('localhost', '127.0.0.1')
greendns.getaddrinfo('localhost', None, type=0)
| TestGetaddrinfo |
python | mwaskom__seaborn | seaborn/_core/properties.py | {
"start": 5305,
"end": 5709
} | class ____(Property):
"""The position of visual marks with respect to the axes of the plot."""
legend = False
normed = False
# =================================================================================== #
# Properties with numeric values where scale range can be defined as an interval
# =================================================================================== #
| Coordinate |
python | huggingface__transformers | src/transformers/utils/generic.py | {
"start": 35614,
"end": 37088
} | class ____(MutableMapping):
"""
Dict-like object keeping track of a class-wide mapping, as well as a local one. Allows to have library-wide
modifications though the class mapping, as well as local modifications in a single file with the local mapping.
"""
# Class instance object, so that a call to `register` can be reflected into all other files correctly, even if
# a new instance is created (in order to locally override a given function)
_global_mapping = {}
def __init__(self):
self._local_mapping = {}
def __getitem__(self, key):
# First check if instance has a local override
if key in self._local_mapping:
return self._local_mapping[key]
return self._global_mapping[key]
def __setitem__(self, key, value):
# Allow local update of the default functions without impacting other instances
self._local_mapping.update({key: value})
def __delitem__(self, key):
del self._local_mapping[key]
def __iter__(self):
# Ensure we use all keys, with the overwritten ones on top
return iter({**self._global_mapping, **self._local_mapping})
def __len__(self):
return len(self._global_mapping.keys() | self._local_mapping.keys())
@classmethod
def register(cls, key: str, value: Callable):
cls._global_mapping.update({key: value})
def valid_keys(self) -> list[str]:
return list(self.keys())
| GeneralInterface |
python | automl__auto-sklearn | autosklearn/experimental/askl2.py | {
"start": 4547,
"end": 21391
} | class ____(AutoSklearnClassifier):
def __init__(
self,
time_left_for_this_task: int = 3600,
per_run_time_limit=None,
ensemble_size: int | None = None,
ensemble_class: AbstractEnsemble | None = EnsembleSelection,
ensemble_kwargs: Dict[str, Any] | None = None,
ensemble_nbest: Union[float, int] = 50,
max_models_on_disc: int = 50,
seed: int = 1,
memory_limit: int = 3072,
tmp_folder: Optional[str] = None,
delete_tmp_folder_after_terminate: bool = True,
n_jobs: Optional[int] = None,
dask_client: Optional[dask.distributed.Client] = None,
disable_evaluator_output: bool = False,
smac_scenario_args: Optional[Dict[str, Any]] = None,
logging_config: Optional[Dict[str, Any]] = None,
metric: Optional[Scorer] = None,
scoring_functions: Optional[List[Scorer]] = None,
load_models: bool = True,
dataset_compression: Union[bool, Mapping[str, Any]] = True,
allow_string_features: bool = True,
disable_progress_bar: bool = False,
):
"""
Parameters
----------
time_left_for_this_task : int, optional (default=3600)
Time limit in seconds for the search of appropriate
models. By increasing this value, *auto-sklearn* has a higher
chance of finding better models.
per_run_time_limit : int, optional (default=1/10 of time_left_for_this_task)
Time limit for a single call to the machine learning model.
Model fitting will be terminated if the machine learning
algorithm runs over the time limit. Set this value high enough so
that typical machine learning algorithms can be fit on the
training data.
ensemble_size : int, optional
Number of models added to the ensemble built by *Ensemble
selection from libraries of models*. Models are drawn with
replacement. If set to ``0`` no ensemble is fit.
Deprecated - will be removed in Auto-sklearn 0.16. Please pass
this argument via ``ensemble_kwargs={"ensemble_size": int}``
if you want to change the ensemble size for ensemble selection.
ensemble_class : Type[AbstractEnsemble], optional (default=EnsembleSelection)
Class implementing the post-hoc ensemble algorithm. Set to
``None`` to disable ensemble building or use ``SingleBest``
to obtain only use the single best model instead of an
ensemble.
ensemble_kwargs : Dict, optional
Keyword arguments that are passed to the ensemble class upon
initialization.
max_models_on_disc: int, optional (default=50),
Defines the maximum number of models that are kept in the disc.
The additional number of models are permanently deleted. Due to the
nature of this variable, it sets the upper limit on how many models
can be used for an ensemble.
It must be an integer greater or equal than 1.
If set to None, all models are kept on the disc.
seed : int, optional (default=1)
Used to seed SMAC. Will determine the output file names.
memory_limit : int, optional (3072)
Memory limit in MB for the machine learning algorithm.
`auto-sklearn` will stop fitting the machine learning algorithm if
it tries to allocate more than ``memory_limit`` MB.
**Important notes:**
* If ``None`` is provided, no memory limit is set.
* In case of multi-processing, ``memory_limit`` will be *per job*, so the total usage is
``n_jobs x memory_limit``.
* The memory limit also applies to the ensemble creation process.
tmp_folder : string, optional (None)
folder to store configuration output and log files, if ``None``
automatically use ``/tmp/autosklearn_tmp_$pid_$random_number``
delete_tmp_folder_after_terminate: string, optional (True)
remove tmp_folder, when finished. If tmp_folder is None
tmp_dir will always be deleted
n_jobs : int, optional, experimental
The number of jobs to run in parallel for ``fit()``. ``-1`` means
using all processors.
**Important notes**:
* By default, Auto-sklearn uses one core.
* Ensemble building is not affected by ``n_jobs`` but can be controlled by the number
of models in the ensemble.
* ``predict()`` is not affected by ``n_jobs`` (in contrast to most scikit-learn models)
* If ``dask_client`` is ``None``, a new dask client is created.
dask_client : dask.distributed.Client, optional
User-created dask client, can be used to start a dask cluster and then
attach auto-sklearn to it.
disable_evaluator_output: bool or list, optional (False)
If True, disable model and prediction output. Cannot be used
together with ensemble building. ``predict()`` cannot be used when
setting this True. Can also be used as a list to pass more
fine-grained information on what to save. Allowed elements in the
list are:
* ``'y_optimization'`` : do not save the predictions for the
optimization/validation set, which would later on be used to build
an ensemble.
* ``model`` : do not save any model files
smac_scenario_args : dict, optional (None)
Additional arguments inserted into the scenario of SMAC. See the
`SMAC documentation <https://automl.github.io/SMAC3/main/api/smac.scenario.html#smac.scenario.Scenario>`_
for a list of available arguments.
logging_config : dict, optional (None)
dictionary object specifying the logger configuration. If None,
the default logging.yaml file is used, which can be found in
the directory ``util/logging.yaml`` relative to the installation.
metric : Scorer, optional (None)
An instance of :class:`autosklearn.metrics.Scorer` as created by
:meth:`autosklearn.metrics.make_scorer`. These are the `Built-in
Metrics`_.
If None is provided, a default metric is selected depending on the task.
scoring_functions : List[Scorer], optional (None)
List of scorers which will be calculated for each pipeline and results will be
available via ``cv_results``
load_models : bool, optional (True)
Whether to load the models after fitting Auto-sklearn.
disable_progress_bar: bool = False
Whether to disable the progress bar that is displayed in the console
while fitting to the training data.
Attributes
----------
cv_results_ : dict of numpy (masked) ndarrays
A dict with keys as column headers and values as columns, that can be
imported into a pandas ``DataFrame``.
Not all keys returned by scikit-learn are supported yet.
""" # noqa (links are too long)
self.required_training = False # Boolean to indicate if selectors were trained.
include_estimators = [
"extra_trees",
"passive_aggressive",
"random_forest",
"sgd",
"gradient_boosting",
"mlp",
]
include_preprocessors = ["no_preprocessing"]
include = {
"classifier": include_estimators,
"feature_preprocessor": include_preprocessors,
}
self.train_selectors(selected_metric=metric)
super().__init__(
time_left_for_this_task=time_left_for_this_task,
per_run_time_limit=per_run_time_limit,
initial_configurations_via_metalearning=0,
ensemble_size=ensemble_size,
ensemble_class=ensemble_class,
ensemble_kwargs=ensemble_kwargs,
ensemble_nbest=ensemble_nbest,
max_models_on_disc=max_models_on_disc,
seed=seed,
memory_limit=memory_limit,
include=include,
exclude=None,
resampling_strategy=None,
resampling_strategy_arguments=None,
tmp_folder=tmp_folder,
delete_tmp_folder_after_terminate=delete_tmp_folder_after_terminate,
n_jobs=n_jobs,
dask_client=dask_client,
disable_evaluator_output=disable_evaluator_output,
get_smac_object_callback=None,
smac_scenario_args=smac_scenario_args,
logging_config=logging_config,
metadata_directory=None,
metric=metric,
scoring_functions=scoring_functions,
load_models=load_models,
allow_string_features=allow_string_features,
disable_progress_bar=disable_progress_bar,
)
def train_selectors(self, selected_metric=None):
self.selector_metrics = (balanced_accuracy, roc_auc, log_loss)
self.selector_files = {}
self.this_directory = pathlib.Path(__file__).resolve().parent
if selected_metric is not None:
metric_list = [selected_metric]
else:
metric_list = self.selector_metrics
for metric in metric_list:
training_data_file = (
self.this_directory / metric.name / "askl2_training_data.json"
)
with open(training_data_file) as fh:
training_data = json.load(fh)
fh.seek(0)
m = hashlib.md5()
m.update(fh.read().encode("utf8"))
training_data_hash = m.hexdigest()[:10]
selector_filename = "askl2_selector_%s_%s_%s_%s.pkl" % (
autosklearn.__version__,
sklearn.__version__,
metric.name,
training_data_hash,
)
selector_directory = os.environ.get("XDG_CACHE_HOME")
if selector_directory is None:
selector_directory = pathlib.Path.home()
selector_directory = (
pathlib.Path(selector_directory).joinpath("auto-sklearn").expanduser()
)
self.selector_files[metric.name] = selector_directory / selector_filename
metafeatures = pd.DataFrame(training_data["metafeatures"])
self.strategies = training_data["strategies"]
y_values = pd.DataFrame(
training_data["y_values"],
columns=self.strategies,
index=metafeatures.index,
)
minima_for_methods = training_data["minima_for_methods"]
maxima_for_methods = training_data["maxima_for_methods"]
default_strategies = training_data["tie_break_order"]
if not self.selector_files[metric.name].exists():
self.required_training = True
selector = autosklearn.experimental.selector.OVORF(
configuration=training_data["configuration"],
random_state=np.random.RandomState(1),
n_estimators=500,
tie_break_order=default_strategies,
)
selector = autosklearn.experimental.selector.FallbackWrapper(
selector, default_strategies
)
selector.fit(
X=metafeatures,
y=y_values,
minima=minima_for_methods,
maxima=maxima_for_methods,
)
self.selector_files[metric.name].parent.mkdir(
exist_ok=True, parents=True
)
try:
with open(self.selector_files[metric.name], "wb") as fh:
pickle.dump(selector, fh)
except Exception as e:
print(
"AutoSklearn2Classifier needs to create a selector file under "
"the user's home directory or XDG_CACHE_HOME. Nevertheless "
"the path {} is not writable.".format(
self.selector_files[metric.name]
)
)
raise e
def fit(
self,
X,
y,
X_test=None,
y_test=None,
metric=None,
feat_type=None,
dataset_name=None,
):
# TODO
# regularly check https://github.com/scikit-learn/scikit-learn/issues/15336
# whether histogram gradient boosting in scikit-learn finally support
# sparse data
is_sparse = scipy.sparse.issparse(X)
if is_sparse:
include_estimators = [
"extra_trees",
"passive_aggressive",
"random_forest",
"sgd",
"mlp",
]
else:
include_estimators = [
"extra_trees",
"passive_aggressive",
"random_forest",
"sgd",
"gradient_boosting",
"mlp",
]
self.include["classifier"] = include_estimators
if self.metric is None:
if len(y.shape) == 1 or y.shape[1] == 1:
self.metric = accuracy
else:
self.metric = log_loss
if self.metric in self.selector_metrics:
metric_name = self.metric.name
selector_file = self.selector_files[metric_name]
else:
metric_name = "balanced_accuracy"
selector_file = self.selector_files[metric_name]
with open(selector_file, "rb") as fh:
selector = pickle.load(fh)
metafeatures = pd.DataFrame(
{dataset_name: [X.shape[1], X.shape[0]]}
).transpose()
selection = np.argmax(selector.predict(metafeatures))
automl_policy = self.strategies[selection]
setting = {
"RF_None_holdout_iterative_es_if": {
"resampling_strategy": "holdout-iterative-fit",
"fidelity": None,
},
"RF_None_3CV_iterative_es_if": {
"resampling_strategy": "cv-iterative-fit",
"folds": 3,
"fidelity": None,
},
"RF_None_5CV_iterative_es_if": {
"resampling_strategy": "cv-iterative-fit",
"folds": 5,
"fidelity": None,
},
"RF_None_10CV_iterative_es_if": {
"resampling_strategy": "cv-iterative-fit",
"folds": 10,
"fidelity": None,
},
"RF_SH-eta4-i_holdout_iterative_es_if": {
"resampling_strategy": "holdout-iterative-fit",
"fidelity": "SH",
},
"RF_SH-eta4-i_3CV_iterative_es_if": {
"resampling_strategy": "cv-iterative-fit",
"folds": 3,
"fidelity": "SH",
},
"RF_SH-eta4-i_5CV_iterative_es_if": {
"resampling_strategy": "cv-iterative-fit",
"folds": 5,
"fidelity": "SH",
},
"RF_SH-eta4-i_10CV_iterative_es_if": {
"resampling_strategy": "cv-iterative-fit",
"folds": 10,
"fidelity": "SH",
},
}[automl_policy]
resampling_strategy = setting["resampling_strategy"]
if resampling_strategy == "cv-iterative-fit":
resampling_strategy_kwargs = {"folds": setting["folds"]}
else:
resampling_strategy_kwargs = None
portfolio_file = (
self.this_directory
/ metric_name
/ "askl2_portfolios"
/ ("%s.json" % automl_policy)
)
with open(portfolio_file) as fh:
portfolio_json = json.load(fh)
portfolio = portfolio_json["portfolio"]
if setting["fidelity"] == "SH":
smac_callback = SHObjectCallback("iterations", 4, 5.0, portfolio)
else:
smac_callback = SmacObjectCallback(portfolio)
self.resampling_strategy = resampling_strategy
self.resampling_strategy_arguments = resampling_strategy_kwargs
self.get_smac_object_callback = smac_callback
return super().fit(
X=X,
y=y,
X_test=X_test,
y_test=y_test,
feat_type=feat_type,
dataset_name=dataset_name,
)
| AutoSklearn2Classifier |
python | apache__airflow | providers/fab/tests/unit/fab/auth_manager/test_fab_auth_manager.py | {
"start": 5479,
"end": 30235
} | class ____:
@mock.patch("flask_login.utils._get_user")
def test_get_user(self, mock_current_user, minimal_app_for_auth_api, auth_manager):
user = Mock()
user.is_anonymous.return_value = True
mock_current_user.return_value = user
with minimal_app_for_auth_api.app_context():
assert auth_manager.get_user() == user
@mock.patch("flask_login.utils._get_user")
def test_get_user_from_flask_g(self, mock_current_user, minimal_app_for_auth_api, auth_manager):
session_user = Mock()
session_user.is_anonymous = True
mock_current_user.return_value = session_user
flask_g_user = Mock()
flask_g_user.is_anonymous = False
with minimal_app_for_auth_api.app_context():
with user_set(minimal_app_for_auth_api, flask_g_user):
assert auth_manager.get_user() == flask_g_user
def test_deserialize_user(self, flask_app, auth_manager_with_appbuilder):
user = create_user(flask_app, "test")
result = auth_manager_with_appbuilder.deserialize_user({"sub": str(user.id)})
assert user.get_id() == result.get_id()
def test_serialize_user(self, flask_app, auth_manager_with_appbuilder):
user = create_user(flask_app, "test")
result = auth_manager_with_appbuilder.serialize_user(user)
assert result == {"sub": str(user.id)}
@mock.patch.object(FabAuthManager, "get_user")
def test_is_logged_in(self, mock_get_user, auth_manager_with_appbuilder):
user = Mock()
user.is_anonymous.return_value = True
mock_get_user.return_value = user
assert auth_manager_with_appbuilder.is_logged_in() is False
@mock.patch.object(FabAuthManager, "get_user")
def test_is_logged_in_with_inactive_user(self, mock_get_user, auth_manager_with_appbuilder):
user = Mock()
user.is_anonymous.return_value = False
user.is_active.return_value = True
mock_get_user.return_value = user
assert auth_manager_with_appbuilder.is_logged_in() is False
@pytest.mark.parametrize(
("api_name", "method", "user_permissions", "expected_result"),
chain(
*[
(
# With permission
(
api_name,
"POST",
[(ACTION_CAN_CREATE, resource_type)],
True,
),
# With permission
(
api_name,
"GET",
[(ACTION_CAN_READ, resource_type)],
True,
),
# With permission (with several user permissions)
(
api_name,
"DELETE",
[(ACTION_CAN_DELETE, resource_type), (ACTION_CAN_CREATE, "resource_test")],
True,
),
# With permission
(
api_name,
"MENU",
[(ACTION_CAN_ACCESS_MENU, resource_type)],
True,
),
# Without permission
(
api_name,
"POST",
[(ACTION_CAN_READ, resource_type), (ACTION_CAN_CREATE, "resource_test")],
False,
),
)
for api_name, resource_type in IS_AUTHORIZED_METHODS_SIMPLE.items()
]
),
)
def test_is_authorized(self, api_name, method, user_permissions, expected_result, auth_manager):
user = Mock()
user.perms = user_permissions
result = getattr(auth_manager, api_name)(
method=method,
user=user,
)
assert result == expected_result
@pytest.mark.parametrize(
("method", "dag_access_entity", "dag_details", "user_permissions", "expected_result"),
[
# Scenario 1 #
# With global permissions on Dags
(
"GET",
None,
None,
[(ACTION_CAN_READ, RESOURCE_DAG)],
True,
),
# Without permission on DAGs
(
"GET",
None,
None,
[(ACTION_CAN_READ, "resource_test")],
False,
),
# With specific DAG permissions but no specific DAG requested
(
"GET",
None,
None,
[(ACTION_CAN_READ, "DAG:test_dag_id")],
True,
),
# With multiple specific DAG permissions, no specific DAG requested
(
"GET",
None,
None,
[(ACTION_CAN_READ, "DAG:test_dag_id"), (ACTION_CAN_READ, "DAG:test_dag_id2")],
True,
),
# With specific DAG permissions and wrong method
(
"POST",
None,
None,
[(ACTION_CAN_READ, "DAG:test_dag_id")],
False,
),
# With correct POST permissions
(
"POST",
None,
None,
[(ACTION_CAN_CREATE, RESOURCE_DAG)],
True,
),
# Mixed permissions - some DAG, some non-DAG
(
"GET",
None,
None,
[(ACTION_CAN_READ, "DAG:test_dag_id"), (ACTION_CAN_READ, "resource_test")],
True,
),
# DAG sub-entity with specific DAG permissions but no specific DAG requested
(
"GET",
DagAccessEntity.RUN,
None,
[(ACTION_CAN_READ, "DAG:test_dag_id"), (ACTION_CAN_READ, RESOURCE_DAG_RUN)],
True,
),
# DAG sub-entity access with no DAG permissions, no specific DAG requested
(
"GET",
DagAccessEntity.RUN,
None,
[(ACTION_CAN_READ, RESOURCE_DAG_RUN)],
False,
),
# DAG sub-entity with specific DAG permissions but missing sub-entity permission
(
"GET",
DagAccessEntity.TASK_INSTANCE,
None,
[(ACTION_CAN_READ, "DAG:test_dag_id")],
False,
),
# Multiple DAG access entities with proper permissions
(
"DELETE",
DagAccessEntity.TASK,
None,
[(ACTION_CAN_EDIT, "DAG:test_dag_id"), (ACTION_CAN_DELETE, RESOURCE_TASK_INSTANCE)],
True,
),
# User with specific DAG permissions but wrong method for sub-entity
(
"POST",
DagAccessEntity.RUN,
None,
[(ACTION_CAN_READ, "DAG:test_dag_id"), (ACTION_CAN_READ, RESOURCE_DAG_RUN)],
False,
),
# Scenario 2 #
# On specific DAG with global permissions on Dags
(
"GET",
None,
DagDetails(id="test_dag_id"),
[(ACTION_CAN_READ, RESOURCE_DAG)],
True,
),
# With permission on a specific DAG
(
"GET",
None,
DagDetails(id="test_dag_id"),
[(ACTION_CAN_READ, "DAG:test_dag_id"), (ACTION_CAN_READ, "DAG:test_dag_id2")],
True,
),
# Without permission on a specific DAG (wrong method)
(
"POST",
None,
DagDetails(id="test_dag_id"),
[(ACTION_CAN_READ, "DAG:test_dag_id")],
False,
),
# Without permission on a specific DAG
(
"GET",
None,
DagDetails(id="test_dag_id2"),
[(ACTION_CAN_READ, "DAG:test_dag_id")],
False,
),
# Scenario 3 #
# With global permissions on DAGs
(
"GET",
DagAccessEntity.RUN,
DagDetails(id="test_dag_id"),
[(ACTION_CAN_READ, RESOURCE_DAG), (ACTION_CAN_READ, RESOURCE_DAG_RUN)],
True,
),
# Without read permissions on a specific DAG
(
"GET",
DagAccessEntity.TASK_INSTANCE,
DagDetails(id="test_dag_id"),
[(ACTION_CAN_READ, RESOURCE_TASK_INSTANCE)],
False,
),
# With read permissions on a specific DAG but not on the DAG run
(
"GET",
DagAccessEntity.TASK_INSTANCE,
DagDetails(id="test_dag_id"),
[(ACTION_CAN_READ, "DAG:test_dag_id"), (ACTION_CAN_READ, RESOURCE_TASK_INSTANCE)],
False,
),
# With read permissions on a specific DAG and on the DAG run
(
"GET",
DagAccessEntity.TASK_INSTANCE,
DagDetails(id="test_dag_id"),
[
(ACTION_CAN_READ, "DAG:test_dag_id"),
(ACTION_CAN_READ, RESOURCE_TASK_INSTANCE),
(ACTION_CAN_READ, RESOURCE_DAG_RUN),
],
True,
),
# With edit permissions on a specific DAG and delete on the DAG access entity
(
"DELETE",
DagAccessEntity.TASK,
DagDetails(id="test_dag_id"),
[(ACTION_CAN_EDIT, "DAG:test_dag_id"), (ACTION_CAN_DELETE, RESOURCE_TASK_INSTANCE)],
True,
),
# With edit permissions on a specific DAG and create on the DAG access entity
(
"POST",
DagAccessEntity.RUN,
DagDetails(id="test_dag_id"),
[(ACTION_CAN_EDIT, "DAG:test_dag_id"), (ACTION_CAN_CREATE, RESOURCE_DAG_RUN)],
True,
),
# Without permissions to edit the DAG
(
"POST",
DagAccessEntity.RUN,
DagDetails(id="test_dag_id"),
[(ACTION_CAN_CREATE, RESOURCE_DAG_RUN)],
False,
),
# Without read permissions on a specific DAG
(
"GET",
DagAccessEntity.TASK_LOGS,
DagDetails(id="test_dag_id"),
[(ACTION_CAN_READ, RESOURCE_TASK_INSTANCE)],
False,
),
# Use deprecated prefix "DAG Run" to assign permissions specifically on dag runs
(
"GET",
DagAccessEntity.RUN,
DagDetails(id="test_dag_id"),
[(ACTION_CAN_READ, "DAG:test_dag_id"), (ACTION_CAN_READ, "DAG Run:test_dag_id")],
True,
),
],
)
def test_is_authorized_dag(
self,
method,
dag_access_entity,
dag_details,
user_permissions,
expected_result,
auth_manager_with_appbuilder,
):
user = Mock()
user.perms = user_permissions
user.id = 1
result = auth_manager_with_appbuilder.is_authorized_dag(
method=method, access_entity=dag_access_entity, details=dag_details, user=user
)
assert result == expected_result
@pytest.mark.skipif(
AIRFLOW_V_3_1_PLUS is not True, reason="HITL test will be skipped if Airflow version < 3.1.0"
)
@pytest.mark.parametrize(
("method", "dag_access_entity", "dag_details", "user_permissions", "expected_result"),
HITL_ENDPOINT_TESTS if AIRFLOW_V_3_1_PLUS else [],
)
@mock.patch.object(FabAuthManager, "get_authorized_dag_ids")
def test_is_authorized_dag_hitl_detail(
self,
mock_get_authorized_dag_ids,
method,
dag_access_entity,
dag_details,
user_permissions,
expected_result,
auth_manager_with_appbuilder,
):
dag_permissions = [perm[1] for perm in user_permissions if perm[1].startswith("DAG:")]
dag_ids = {perm.replace("DAG:", "") for perm in dag_permissions}
mock_get_authorized_dag_ids.return_value = dag_ids
user = Mock()
user.perms = user_permissions
user.id = 1
result = auth_manager_with_appbuilder.is_authorized_dag(
method=method, access_entity=dag_access_entity, details=dag_details, user=user
)
assert result == expected_result
@pytest.mark.parametrize(
("access_view", "user_permissions", "expected_result"),
[
# With permission (jobs)
(
AccessView.JOBS,
[(ACTION_CAN_READ, RESOURCE_JOB)],
True,
),
# With permission (plugins)
(
AccessView.PLUGINS,
[(ACTION_CAN_READ, RESOURCE_PLUGIN)],
True,
),
# With permission (providers)
(
AccessView.PROVIDERS,
[(ACTION_CAN_READ, RESOURCE_PROVIDER)],
True,
),
# With permission (triggers)
(
AccessView.TRIGGERS,
[(ACTION_CAN_READ, RESOURCE_TRIGGER)],
True,
),
# With permission (website)
(
AccessView.WEBSITE,
[(ACTION_CAN_READ, RESOURCE_WEBSITE)],
True,
),
# Without permission
(
AccessView.WEBSITE,
[(ACTION_CAN_READ, "resource_test"), (ACTION_CAN_CREATE, RESOURCE_WEBSITE)],
False,
),
# Without permission
(
AccessView.WEBSITE,
[(ACTION_CAN_READ, RESOURCE_TRIGGER)],
False,
),
# Docs (positive)
(
AccessView.DOCS,
[(ACTION_CAN_ACCESS_MENU, RESOURCE_DOCS)],
True,
),
# Without permission
(
AccessView.DOCS,
[(ACTION_CAN_READ, RESOURCE_DOCS)],
False,
),
],
)
def test_is_authorized_view(self, access_view, user_permissions, expected_result, auth_manager):
user = Mock()
user.perms = user_permissions
result = auth_manager.is_authorized_view(access_view=access_view, user=user)
assert result == expected_result
@pytest.mark.parametrize(
("method", "resource_name", "user_permissions", "expected_result"),
[
(
"GET",
"custom_resource",
[(ACTION_CAN_READ, "custom_resource")],
True,
),
(
"GET",
"custom_resource",
[(ACTION_CAN_EDIT, "custom_resource")],
False,
),
(
"GET",
"custom_resource",
[(ACTION_CAN_READ, "custom_resource2")],
False,
),
(
"DUMMY",
"custom_resource",
[("DUMMY", "custom_resource")],
True,
),
],
)
def test_is_authorized_custom_view(
self,
method: ResourceMethod | str,
resource_name: str,
user_permissions,
expected_result,
auth_manager,
):
user = Mock()
user.perms = user_permissions
result = auth_manager.is_authorized_custom_view(method=method, resource_name=resource_name, user=user)
assert result == expected_result
@pytest.mark.parametrize(
("menu_items", "user_permissions", "expected_result"),
[
(
[MenuItem.ASSETS, MenuItem.DAGS],
[(ACTION_CAN_ACCESS_MENU, RESOURCE_ASSET), (ACTION_CAN_ACCESS_MENU, RESOURCE_DAG)],
[MenuItem.ASSETS, MenuItem.DAGS],
),
(
[MenuItem.ASSETS, MenuItem.DAGS],
[(ACTION_CAN_READ, RESOURCE_ASSET), (ACTION_CAN_READ, RESOURCE_DAG)],
[],
),
(
[MenuItem.AUDIT_LOG, MenuItem.VARIABLES],
[(ACTION_CAN_ACCESS_MENU, RESOURCE_AUDIT_LOG), (ACTION_CAN_READ, RESOURCE_VARIABLE)],
[MenuItem.AUDIT_LOG],
),
(
[],
[],
[],
),
],
)
def test_filter_authorized_menu_items(
self,
menu_items: list[MenuItem],
user_permissions,
expected_result,
auth_manager,
):
user = Mock()
user.perms = user_permissions
result = auth_manager.filter_authorized_menu_items(menu_items, user=user)
assert result == expected_result
def test_get_authorized_connections(self, auth_manager):
session = Mock()
session.execute.return_value.scalars.return_value.all.return_value = ["conn1", "conn2"]
result = auth_manager.get_authorized_connections(user=Mock(), method="GET", session=session)
assert result == {"conn1", "conn2"}
@pytest.mark.parametrize(
("method", "user_permissions", "expected_results"),
[
# Scenario 1
# With global read permissions on Dags
(
"GET",
[(ACTION_CAN_READ, RESOURCE_DAG)],
{"test_dag1", "test_dag2", "Connections"},
),
# Scenario 2
# With global edit permissions on Dags
(
"PUT",
[(ACTION_CAN_EDIT, RESOURCE_DAG)],
{"test_dag1", "test_dag2", "Connections"},
),
# Scenario 3
# With DAG-specific permissions
(
"GET",
[(ACTION_CAN_READ, "DAG:test_dag1")],
{"test_dag1"},
),
# Scenario 4
# With no permissions
(
"GET",
[],
set(),
),
# Scenario 5
# With read permissions but edit is requested
(
"PUT",
[(ACTION_CAN_READ, RESOURCE_DAG)],
set(),
),
# Scenario 7
# With read permissions but edit is requested
(
"PUT",
[(ACTION_CAN_READ, "DAG:test_dag1")],
set(),
),
# Scenario 8
# With DAG-specific permissions
(
"PUT",
[(ACTION_CAN_EDIT, "DAG:test_dag1"), (ACTION_CAN_EDIT, "DAG:test_dag2")],
{"test_dag1", "test_dag2"},
),
# Scenario 9
# With non-DAG related permissions
(
"GET",
[(ACTION_CAN_READ, "DAG:test_dag1"), (ACTION_CAN_READ, RESOURCE_CONNECTION)],
{"test_dag1"},
),
],
)
def test_get_authorized_dag_ids(
self, method, user_permissions, expected_results, auth_manager_with_appbuilder, flask_app, dag_maker
):
with dag_maker("test_dag1"):
EmptyOperator(task_id="task1")
if AIRFLOW_V_3_1_PLUS:
sync_dag_to_db(dag_maker.dag)
with dag_maker("test_dag2"):
EmptyOperator(task_id="task2")
if AIRFLOW_V_3_1_PLUS:
sync_dag_to_db(dag_maker.dag)
with dag_maker("Connections"):
EmptyOperator(task_id="task3")
if AIRFLOW_V_3_1_PLUS:
sync_dag_to_db(dag_maker.dag)
dag_maker.session.commit()
dag_maker.session.close()
user = create_user(
flask_app,
username="username",
role_name="test",
permissions=user_permissions,
)
auth_manager_with_appbuilder.security_manager.sync_perm_for_dag("test_dag1")
auth_manager_with_appbuilder.security_manager.sync_perm_for_dag("test_dag2")
results = auth_manager_with_appbuilder.get_authorized_dag_ids(user=user, method=method)
assert results == expected_results
delete_user(flask_app, "username")
def test_get_authorized_pools(self, auth_manager):
session = Mock()
session.execute.return_value.scalars.return_value.all.return_value = ["pool1", "pool2"]
result = auth_manager.get_authorized_pools(user=Mock(), method="GET", session=session)
assert result == {"pool1", "pool2"}
def test_get_authorized_variables(self, auth_manager):
session = Mock()
session.execute.return_value.scalars.return_value.all.return_value = ["var1", "var2"]
result = auth_manager.get_authorized_variables(user=Mock(), method="GET", session=session)
assert result == {"var1", "var2"}
def test_security_manager_return_fab_security_manager_override(self, auth_manager_with_appbuilder):
assert isinstance(auth_manager_with_appbuilder.security_manager, FabAirflowSecurityManagerOverride)
def test_security_manager_return_custom_provided(self, flask_app, auth_manager_with_appbuilder):
class TestSecurityManager(FabAirflowSecurityManagerOverride):
pass
flask_app.config["SECURITY_MANAGER_CLASS"] = TestSecurityManager
# Invalidate the cache
del auth_manager_with_appbuilder.__dict__["security_manager"]
assert isinstance(auth_manager_with_appbuilder.security_manager, TestSecurityManager)
def test_security_manager_wrong_inheritance_raise_exception(
self, flask_app, auth_manager_with_appbuilder
):
class TestSecurityManager:
pass
flask_app.config["SECURITY_MANAGER_CLASS"] = TestSecurityManager
# Invalidate the cache
del auth_manager_with_appbuilder.__dict__["security_manager"]
with pytest.raises(
AirflowConfigException,
match="Your CUSTOM_SECURITY_MANAGER must extend FabAirflowSecurityManagerOverride.",
):
auth_manager_with_appbuilder.security_manager
def test_get_url_login(self, auth_manager):
result = auth_manager.get_url_login()
assert result == f"{AUTH_MANAGER_FASTAPI_APP_PREFIX}/login/"
def test_get_url_logout(self, auth_manager):
result = auth_manager.get_url_logout()
assert result == f"{AUTH_MANAGER_FASTAPI_APP_PREFIX}/logout"
@mock.patch.object(FabAuthManager, "_is_authorized", return_value=True)
def test_get_extra_menu_items(self, _, auth_manager_with_appbuilder, flask_app):
result = auth_manager_with_appbuilder.get_extra_menu_items(user=Mock())
assert len(result) == 5
assert all(item.href.startswith(AUTH_MANAGER_FASTAPI_APP_PREFIX) for item in result)
def test_get_db_manager(self, auth_manager):
result = auth_manager.get_db_manager()
assert result == "airflow.providers.fab.auth_manager.models.db.FABDBManager"
@pytest.mark.db_test
@pytest.mark.parametrize("skip_init", [False, True])
@conf_vars(
{("database", "external_db_managers"): "airflow.providers.fab.auth_manager.models.db.FABDBManager"}
)
@mock.patch("airflow.providers.fab.auth_manager.models.db.FABDBManager")
@mock.patch("airflow.utils.db.create_global_lock", new=MagicMock)
@mock.patch("airflow.utils.db.drop_airflow_models")
@mock.patch("airflow.utils.db.drop_airflow_moved_tables")
@mock.patch("airflow.utils.db.initdb")
@mock.patch("airflow.settings.engine.connect")
def test_resetdb(
mock_connect,
mock_init,
mock_drop_moved,
mock_drop_airflow,
mock_fabdb_manager,
skip_init,
):
session_mock = MagicMock()
resetdb(session_mock, skip_init=skip_init)
mock_drop_airflow.assert_called_once_with(mock_connect.return_value)
mock_drop_moved.assert_called_once_with(mock_connect.return_value)
if skip_init:
mock_init.assert_not_called()
else:
mock_init.assert_called_once_with(session=session_mock)
| TestFabAuthManager |
python | jazzband__django-simple-history | simple_history/tests/tests/test_utils.py | {
"start": 14600,
"end": 19021
} | class ____(TestCase):
def setUp(self):
self.data = [
Poll(id=1, question="Question 1", pub_date=timezone.now()),
Poll(id=2, question="Question 2", pub_date=timezone.now()),
Poll(id=3, question="Question 3", pub_date=timezone.now()),
Poll(id=4, question="Question 4", pub_date=timezone.now()),
Poll(id=5, question="Question 5", pub_date=timezone.now()),
]
bulk_create_with_history(self.data, Poll)
self.data[3].question = "Updated question"
def test_bulk_update_history(self):
bulk_update_with_history(
self.data,
Poll,
fields=["question"],
)
self.assertEqual(Poll.objects.count(), 5)
self.assertEqual(Poll.objects.get(id=4).question, "Updated question")
self.assertEqual(Poll.history.count(), 10)
self.assertEqual(Poll.history.filter(history_type="~").count(), 5)
@override_settings(SIMPLE_HISTORY_ENABLED=False)
def test_bulk_update_history_without_history_enabled(self):
self.assertEqual(Poll.history.count(), 5)
# because setup called with enabled settings
bulk_update_with_history(
self.data,
Poll,
fields=["question"],
)
self.assertEqual(Poll.objects.count(), 5)
self.assertEqual(Poll.objects.get(id=4).question, "Updated question")
self.assertEqual(Poll.history.count(), 5)
self.assertEqual(Poll.history.filter(history_type="~").count(), 0)
def test_bulk_update_history_with_default_user(self):
user = User.objects.create_user("tester", "tester@example.com")
bulk_update_with_history(
self.data, Poll, fields=["question"], default_user=user
)
self.assertTrue(
all(
[
history.history_user == user
for history in Poll.history.filter(history_type="~")
]
)
)
def test_bulk_update_history_with_default_change_reason(self):
bulk_update_with_history(
self.data,
Poll,
fields=["question"],
default_change_reason="my change reason",
)
self.assertTrue(
all(
[
history.history_change_reason == "my change reason"
for history in Poll.history.filter(history_type="~")
]
)
)
def test_bulk_update_history_with_default_date(self):
date = datetime(2020, 7, 1)
bulk_update_with_history(
self.data, Poll, fields=["question"], default_date=date
)
self.assertTrue(
all(
[
history.history_date == date
for history in Poll.history.filter(history_type="~")
]
)
)
def test_bulk_update_history_num_queries_is_two(self):
with self.assertNumQueries(2):
bulk_update_with_history(
self.data,
Poll,
fields=["question"],
)
def test_bulk_update_history_on_model_without_history_raises_error(self):
self.data = [
Place(id=1, name="Place 1"),
Place(id=2, name="Place 2"),
Place(id=3, name="Place 3"),
]
Place.objects.bulk_create(self.data)
self.data[0].name = "test"
with self.assertRaises(NotHistoricalModelError):
bulk_update_with_history(self.data, Place, fields=["name"])
def test_num_queries_when_batch_size_is_less_than_total(self):
with self.assertNumQueries(6):
bulk_update_with_history(self.data, Poll, fields=["question"], batch_size=2)
def test_bulk_update_history_with_batch_size(self):
bulk_update_with_history(self.data, Poll, fields=["question"], batch_size=2)
self.assertEqual(Poll.objects.count(), 5)
self.assertEqual(Poll.history.filter(history_type="~").count(), 5)
@skipUnless(django.VERSION >= (4, 0), "Requires Django 4.0 or above")
def test_bulk_update_with_history_returns_rows_updated(self):
rows_updated = bulk_update_with_history(
self.data,
Poll,
fields=["question"],
)
self.assertEqual(rows_updated, 5)
| BulkUpdateWithHistoryTestCase |
python | scrapy__scrapy | tests/test_downloadermiddleware_useragent.py | {
"start": 181,
"end": 2114
} | class ____:
def get_spider_and_mw(self, default_useragent):
crawler = get_crawler(Spider, {"USER_AGENT": default_useragent})
spider = crawler._create_spider("foo")
return spider, UserAgentMiddleware.from_crawler(crawler)
def test_default_agent(self):
_, mw = self.get_spider_and_mw("default_useragent")
req = Request("http://scrapytest.org/")
assert mw.process_request(req) is None
assert req.headers["User-Agent"] == b"default_useragent"
def test_remove_agent(self):
# settings USER_AGENT to None should remove the user agent
spider, mw = self.get_spider_and_mw("default_useragent")
spider.user_agent = None
mw.spider_opened(spider)
req = Request("http://scrapytest.org/")
assert mw.process_request(req) is None
assert req.headers.get("User-Agent") is None
def test_spider_agent(self):
spider, mw = self.get_spider_and_mw("default_useragent")
spider.user_agent = "spider_useragent"
mw.spider_opened(spider)
req = Request("http://scrapytest.org/")
assert mw.process_request(req) is None
assert req.headers["User-Agent"] == b"spider_useragent"
def test_header_agent(self):
spider, mw = self.get_spider_and_mw("default_useragent")
spider.user_agent = "spider_useragent"
mw.spider_opened(spider)
req = Request(
"http://scrapytest.org/", headers={"User-Agent": "header_useragent"}
)
assert mw.process_request(req) is None
assert req.headers["User-Agent"] == b"header_useragent"
def test_no_agent(self):
spider, mw = self.get_spider_and_mw(None)
spider.user_agent = None
mw.spider_opened(spider)
req = Request("http://scrapytest.org/")
assert mw.process_request(req) is None
assert "User-Agent" not in req.headers
| TestUserAgentMiddleware |
python | django__django | tests/model_fields/models.py | {
"start": 4069,
"end": 4172
} | class ____(models.Model):
title = models.CharField(max_length=100)
body = models.TextField()
| Post |
python | PrefectHQ__prefect | src/integrations/prefect-github/prefect_github/schemas/graphql_schema.py | {
"start": 578235,
"end": 578554
} | class ____(sgqlc.types.Type):
"""
See source code for more info.
"""
__schema__ = graphql_schema
__field_names__ = ("cursor", "node")
cursor = sgqlc.types.Field(sgqlc.types.non_null(String), graphql_name="cursor")
node = sgqlc.types.Field("ReleaseAsset", graphql_name="node")
| ReleaseAssetEdge |
python | weaviate__weaviate-python-client | weaviate/collections/classes/internal.py | {
"start": 6620,
"end": 6883
} | class ____(Generic[P, R]):
"""A group of objects returned in a group by query."""
name: str
min_distance: float
max_distance: float
number_of_objects: int
objects: List[GroupByObject[P, R]]
rerank_score: Optional[float]
@dataclass
| Group |
python | pypa__pip | tests/lib/__init__.py | {
"start": 6084,
"end": 6875
} | class ____(Mapping[StrPath, FoundFile]):
def __init__(self, paths: Mapping[str, FoundFile]) -> None:
self._paths = {pathlib.Path(k): v for k, v in paths.items()}
def __contains__(self, o: object) -> bool:
if isinstance(o, pathlib.Path):
return o in self._paths
elif isinstance(o, str):
return pathlib.Path(o) in self._paths
return False
def __len__(self) -> int:
return len(self._paths)
def __getitem__(self, k: StrPath) -> FoundFile:
if isinstance(k, pathlib.Path):
return self._paths[k]
elif isinstance(k, str):
return self._paths[pathlib.Path(k)]
raise KeyError(k)
def __iter__(self) -> Iterator[pathlib.Path]:
return iter(self._paths)
| FoundFiles |
python | bottlepy__bottle | bottle.py | {
"start": 5527,
"end": 6442
} | class ____:
""" Property that maps to a key in a local dict-like attribute. """
def __init__(self, attr, key=None, read_only=False):
self.attr, self.key, self.read_only = attr, key, read_only
def __call__(self, func):
functools.update_wrapper(self, func, updated=[])
self.getter, self.key = func, self.key or func.__name__
return self
def __get__(self, obj, cls):
if obj is None: return self
key, storage = self.key, getattr(obj, self.attr)
if key not in storage: storage[key] = self.getter(obj)
return storage[key]
def __set__(self, obj, value):
if self.read_only: raise AttributeError("Read-Only property.")
getattr(obj, self.attr)[self.key] = value
def __delete__(self, obj):
if self.read_only: raise AttributeError("Read-Only property.")
del getattr(obj, self.attr)[self.key]
| DictProperty |
python | PyCQA__bandit | tests/unit/formatters/test_yaml.py | {
"start": 386,
"end": 3605
} | class ____(testtools.TestCase):
def setUp(self):
super().setUp()
conf = config.BanditConfig()
self.manager = manager.BanditManager(conf, "file")
(tmp_fd, self.tmp_fname) = tempfile.mkstemp()
self.context = {
"filename": self.tmp_fname,
"lineno": 4,
"linerange": [4],
}
self.check_name = "hardcoded_bind_all_interfaces"
self.issue = issue.Issue(
bandit.MEDIUM,
123,
bandit.MEDIUM,
"Possible binding to all interfaces.",
)
self.candidates = [
issue.Issue(bandit.LOW, 123, bandit.LOW, "Candidate A", lineno=1),
issue.Issue(bandit.HIGH, 123, bandit.HIGH, "Candiate B", lineno=2),
]
self.manager.out_file = self.tmp_fname
self.issue.fname = self.context["filename"]
self.issue.lineno = self.context["lineno"]
self.issue.linerange = self.context["linerange"]
self.issue.test = self.check_name
self.manager.results.append(self.issue)
self.manager.metrics = metrics.Metrics()
# mock up the metrics
for key in ["_totals", "binding.py"]:
self.manager.metrics.data[key] = {"loc": 4, "nosec": 2}
for criteria, default in constants.CRITERIA:
for rank in constants.RANKING:
self.manager.metrics.data[key][f"{criteria}.{rank}"] = 0
@mock.patch("bandit.core.manager.BanditManager.get_issue_list")
def test_report(self, get_issue_list):
self.manager.files_list = ["binding.py"]
self.manager.scores = [
{
"SEVERITY": [0] * len(constants.RANKING),
"CONFIDENCE": [0] * len(constants.RANKING),
}
]
get_issue_list.return_value = collections.OrderedDict(
[(self.issue, self.candidates)]
)
with open(self.tmp_fname, "w") as tmp_file:
b_json.report(
self.manager,
tmp_file,
self.issue.severity,
self.issue.confidence,
)
with open(self.tmp_fname) as f:
data = yaml.load(f.read(), Loader=yaml.SafeLoader)
self.assertIsNotNone(data["generated_at"])
self.assertEqual(self.tmp_fname, data["results"][0]["filename"])
self.assertEqual(
self.issue.severity, data["results"][0]["issue_severity"]
)
self.assertEqual(
self.issue.confidence, data["results"][0]["issue_confidence"]
)
self.assertEqual(self.issue.text, data["results"][0]["issue_text"])
self.assertEqual(
self.context["lineno"], data["results"][0]["line_number"]
)
self.assertEqual(
self.context["linerange"], data["results"][0]["line_range"]
)
self.assertEqual(self.check_name, data["results"][0]["test_name"])
self.assertIn("candidates", data["results"][0])
self.assertIn("more_info", data["results"][0])
self.assertIsNotNone(data["results"][0]["more_info"])
| YamlFormatterTests |
python | psf__black | tests/data/cases/preview_long_strings__regression.py | {
"start": 37239,
"end": 39299
} | class ____(xxxx.xxxxxxxxxxxxx):
def xxxxxxx_xxxxxx(xxxx):
assert xxxxxxx_xxxx in [
x.xxxxx.xxxxxx.xxxxx.xxxxxx,
x.xxxxx.xxxxxx.xxxxx.xxxx,
], (
"xxxxxxxxxxx xxxxxxx xxxx (xxxxxx xxxx) %x xxx xxxxx" % xxxxxxx_xxxx
)
value.__dict__[key] = (
"test" # set some Thrift field to non-None in the struct aa bb cc dd ee
)
RE_ONE_BACKSLASH = {
"asdf_hjkl_jkl": re.compile(
r"(?<!([0-9]\ ))(?<=(^|\ ))([A-Z]+(\ )?|[0-9](\ )|[a-z](\ )){4,7}([A-Z]|[0-9]|[a-z])($|\b)(?!(\ ?([0-9]\ )|(\.)))"
),
}
RE_TWO_BACKSLASHES = {
"asdf_hjkl_jkl": re.compile(
r"(?<!([0-9]\ ))(?<=(^|\ ))([A-Z]+(\ )?|[0-9](\ )|[a-z](\\"
r" )){4,7}([A-Z]|[0-9]|[a-z])($|\b)(?!(\ ?([0-9]\ )|(\.)))"
),
}
RE_THREE_BACKSLASHES = {
"asdf_hjkl_jkl": re.compile(
r"(?<!([0-9]\ ))(?<=(^|\ ))([A-Z]+(\ )?|[0-9](\ )|[a-z](\\\ )){4,7}([A-Z]|[0-9]|[a-z])($|\b)(?!(\ ?([0-9]\ )|(\.)))"
),
}
# We do NOT split on f-string expressions.
print(
"Lorem ipsum dolor sit amet, consetetur sadipscing elitr, sed diam."
f" {[f'{i}' for i in range(10)]}"
)
x = (
"This is a long string which contains an f-expr that should not split"
f" {{{[i for i in range(5)]}}}."
)
# The parens should NOT be removed in this case.
(
"my very long string that should get formatted if I'm careful to make sure it goes"
" over 88 characters which it has now"
)
# The parens should NOT be removed in this case.
(
"my very long string that should get formatted if I'm careful to make sure it goes"
" over 88 characters which it has now"
)
# The parens should NOT be removed in this case.
(
"my very long string"
" that should get formatted"
" if I'm careful to make sure"
" it goes over 88 characters which"
" it has now"
)
def _legacy_listen_examples():
text += (
" \"listen for the '%(event_name)s' event\"\n"
"\n # ... (event logic logic logic) ...\n"
% {
"since": since,
}
)
| xxxxxxxxxxxxxxxxxxxxx |
python | sphinx-doc__sphinx | sphinx/domains/cpp/__init__.py | {
"start": 16694,
"end": 16757
} | class ____(CPPObject):
object_type = 'member'
| CPPMemberObject |
python | spyder-ide__spyder | spyder/plugins/application/widgets/status.py | {
"start": 2376,
"end": 5180
} | class ____(BaseTimerStatus):
"""Status bar widget for current file read/write mode."""
ID = "inapp_appeal_status"
CONF_SECTION = "main"
INTERACT_ON_CLICK = True
DAYS_TO_SHOW_AGAIN = 15
def __init__(self, parent=None):
super().__init__(parent)
self._is_shown = False
self._appeal_dialog = None
# We don't need to show a label for this widget
self.label_value.setVisible(False)
# Update status every hour
self.set_interval(60 * 60 * 1000)
# Show appeal on click
self.sig_clicked.connect(self._on_click)
# ---- Private API
# -------------------------------------------------------------------------
def _on_click(self):
"""Handle widget clicks."""
if self._appeal_dialog is None:
self._appeal_dialog = InAppAppealDialog(self)
if self._appeal_dialog.isVisible():
self._appeal_dialog.hide()
else:
self._appeal_dialog.show()
# ---- Public API
# -------------------------------------------------------------------------
def show_appeal(self):
try:
if self._appeal_dialog is None:
self._appeal_dialog = InAppAppealDialog(self)
if not self._appeal_dialog.isVisible():
self._appeal_dialog.show()
except QtModuleNotInstalledError:
# QtWebEngineWidgets is optional, so just open the URL in the
# default browser.
# See spyder-ide/spyder#24905 for the details.
webbrowser.open("https://www.spyder-ide.org/donate")
# ---- StatusBarWidget API
# -------------------------------------------------------------------------
def get_icon(self):
return self.create_icon("inapp_appeal")
def update_status(self):
"""
Show widget for a day after a certain number of days, then hide it.
"""
today = datetime.date.today()
last_date = self.get_conf("last_inapp_appeal", default="")
if last_date:
delta = today - datetime.date.fromisoformat(last_date)
if 0 < delta.days < self.DAYS_TO_SHOW_AGAIN:
self.setVisible(False)
else:
self.setVisible(True)
self.set_conf("last_inapp_appeal", str(today))
else:
self.set_conf("last_inapp_appeal", str(today))
def get_tooltip(self):
return _("Help Spyder!")
# ---- Qt methods
# -------------------------------------------------------------------------
def showEvent(self, event):
super().showEvent(event)
# Hide widget if necessary at startup
if not self._is_shown:
self.update_status()
self._is_shown = True
| InAppAppealStatus |
python | plotly__plotly.py | plotly/graph_objs/violin/_legendgrouptitle.py | {
"start": 233,
"end": 2932
} | class ____(_BaseTraceHierarchyType):
_parent_path_str = "violin"
_path_str = "violin.legendgrouptitle"
_valid_props = {"font", "text"}
@property
def font(self):
"""
Sets this legend group's title font.
The 'font' property is an instance of Font
that may be specified as:
- An instance of :class:`plotly.graph_objs.violin.legendgrouptitle.Font`
- A dict of string/value properties that will be passed
to the Font constructor
Returns
-------
plotly.graph_objs.violin.legendgrouptitle.Font
"""
return self["font"]
@font.setter
def font(self, val):
self["font"] = val
@property
def text(self):
"""
Sets the title of the legend group.
The 'text' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["text"]
@text.setter
def text(self, val):
self["text"] = val
@property
def _prop_descriptions(self):
return """\
font
Sets this legend group's title font.
text
Sets the title of the legend group.
"""
def __init__(self, arg=None, font=None, text=None, **kwargs):
"""
Construct a new Legendgrouptitle object
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of
:class:`plotly.graph_objs.violin.Legendgrouptitle`
font
Sets this legend group's title font.
text
Sets the title of the legend group.
Returns
-------
Legendgrouptitle
"""
super().__init__("legendgrouptitle")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError("""\
The first argument to the plotly.graph_objs.violin.Legendgrouptitle
constructor must be a dict or
an instance of :class:`plotly.graph_objs.violin.Legendgrouptitle`""")
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
self._set_property("font", arg, font)
self._set_property("text", arg, text)
self._process_kwargs(**dict(arg, **kwargs))
self._skip_invalid = False
| Legendgrouptitle |
python | pandas-dev__pandas | asv_bench/benchmarks/frame_methods.py | {
"start": 22553,
"end": 22937
} | class ____:
params = (
[True, False],
["float64", "Float64", "float64[pyarrow]"],
)
param_names = ["dtype"]
def setup(self, inplace, dtype):
self.df = DataFrame(np.random.randn(100_000, 10), dtype=dtype)
self.mask = self.df < 0
def time_where(self, inplace, dtype):
self.df.where(self.mask, other=0.0, inplace=inplace)
| Where |
python | pypa__warehouse | tests/unit/macaroons/test_caveats.py | {
"start": 5078,
"end": 6048
} | class ____:
def test_verify_not_before(self):
not_before = int(time.time()) + 60
expiry = not_before + 60
caveat = Expiration(expires_at=expiry, not_before=not_before)
result = caveat.verify(pretend.stub(), pretend.stub(), pretend.stub())
assert result == Failure("token is expired")
def test_verify_already_expired(self):
not_before = int(time.time()) - 10
expiry = not_before - 5
caveat = Expiration(expires_at=expiry, not_before=not_before)
result = caveat.verify(pretend.stub(), pretend.stub(), pretend.stub())
assert result == Failure("token is expired")
def test_verify_ok(self):
not_before = int(time.time()) - 10
expiry = int(time.time()) + 60
caveat = Expiration(expires_at=expiry, not_before=not_before)
result = caveat.verify(pretend.stub(), pretend.stub(), pretend.stub())
assert result == Success()
| TestExpirationCaveat |
python | Lightning-AI__lightning | tests/tests_pytorch/checkpointing/test_model_checkpoint.py | {
"start": 35546,
"end": 35718
} | class ____(BoringModel):
def on_validation_end(self):
if not self.trainer.sanity_checking:
raise RuntimeError("Trouble!")
| TroubledModelOnValidationEnd |
python | tiangolo__fastapi | docs_src/dependencies/tutorial008c.py | {
"start": 71,
"end": 657
} | class ____(Exception):
pass
def get_username():
try:
yield "Rick"
except InternalError:
print("Oops, we didn't raise again, Britney 😱")
@app.get("/items/{item_id}")
def get_item(item_id: str, username: str = Depends(get_username)):
if item_id == "portal-gun":
raise InternalError(
f"The portal gun is too dangerous to be owned by {username}"
)
if item_id != "plumbus":
raise HTTPException(
status_code=404, detail="Item not found, there's only a plumbus here"
)
return item_id
| InternalError |
python | getsentry__sentry | tests/sentry/preprod/test_models.py | {
"start": 708,
"end": 7363
} | class ____(PreprodArtifactModelTestBase):
"""Tests for get_sibling_artifacts_for_commit method."""
def test_get_sibling_artifacts_for_commit_single_artifact(self):
"""Test getting artifacts when there's only one artifact for the commit."""
commit_comparison = CommitComparison.objects.create(
organization_id=self.organization.id,
head_sha="a" * 40,
base_sha="b" * 40,
provider="github",
head_repo_name="owner/repo",
base_repo_name="owner/repo",
head_ref="feature/test",
base_ref="main",
)
artifact = PreprodArtifact.objects.create(
project=self.project,
state=PreprodArtifact.ArtifactState.PROCESSED,
app_id="com.example.app",
commit_comparison=commit_comparison,
)
artifacts = list(artifact.get_sibling_artifacts_for_commit())
assert len(artifacts) == 1
assert artifacts[0] == artifact
def test_get_sibling_artifacts_for_commit_multiple_artifacts_same_commit(self):
"""Test getting artifacts when multiple artifacts exist for the same commit (monorepo)."""
commit_comparison = CommitComparison.objects.create(
organization_id=self.organization.id,
head_sha="a" * 40,
base_sha="b" * 40,
provider="github",
head_repo_name="owner/repo",
base_repo_name="owner/repo",
head_ref="feature/test",
base_ref="main",
)
artifacts = []
app_ids = ["com.example.android", "com.example.ios", "com.example.web"]
for app_id in app_ids:
artifact = PreprodArtifact.objects.create(
project=self.project,
state=PreprodArtifact.ArtifactState.PROCESSED,
app_id=app_id,
commit_comparison=commit_comparison,
)
artifacts.append(artifact)
sibling_artifacts = list(artifacts[0].get_sibling_artifacts_for_commit())
assert len(sibling_artifacts) == 3
assert set(sibling_artifacts) == set(artifacts)
def test_get_sibling_artifacts_for_commit_different_commits_excluded(self):
"""Test that artifacts from different commits are excluded."""
commit_comparison_1 = CommitComparison.objects.create(
organization_id=self.organization.id,
head_sha="a" * 40,
base_sha="b" * 40,
provider="github",
head_repo_name="owner/repo",
base_repo_name="owner/repo",
head_ref="feature/test1",
base_ref="main",
)
commit_comparison_2 = CommitComparison.objects.create(
organization_id=self.organization.id,
head_sha="c" * 40,
base_sha="d" * 40,
provider="github",
head_repo_name="owner/repo",
base_repo_name="owner/repo",
head_ref="feature/test2",
base_ref="main",
)
artifact_1 = PreprodArtifact.objects.create(
project=self.project,
state=PreprodArtifact.ArtifactState.PROCESSED,
app_id="com.example.app1",
commit_comparison=commit_comparison_1,
)
artifact_2 = PreprodArtifact.objects.create(
project=self.project,
state=PreprodArtifact.ArtifactState.PROCESSED,
app_id="com.example.app2",
commit_comparison=commit_comparison_2,
)
artifacts_1 = list(artifact_1.get_sibling_artifacts_for_commit())
assert len(artifacts_1) == 1
assert artifacts_1[0] == artifact_1
artifacts_2 = list(artifact_2.get_sibling_artifacts_for_commit())
assert len(artifacts_2) == 1
assert artifacts_2[0] == artifact_2
def test_get_sibling_artifacts_for_commit_cross_org_security(self):
"""Test that artifacts from different organizations are excluded for security."""
# Create second organization
other_org = self.create_organization(name="other_org")
other_project = self.create_project(organization=other_org, name="other_project")
# Create commit comparison for first org
commit_comparison_org1 = CommitComparison.objects.create(
organization_id=self.organization.id,
head_sha="a" * 40,
base_sha="b" * 40,
provider="github",
head_repo_name="owner/repo",
base_repo_name="owner/repo",
head_ref="feature/test",
base_ref="main",
)
# Create commit comparison for second org with same commit SHA
commit_comparison_org2 = CommitComparison.objects.create(
organization_id=other_org.id,
head_sha="a" * 40, # Same SHA as org1
base_sha="b" * 40,
provider="github",
head_repo_name="owner/repo",
base_repo_name="owner/repo",
head_ref="feature/test",
base_ref="main",
)
# Create artifacts in each org
artifact_org1 = PreprodArtifact.objects.create(
project=self.project,
state=PreprodArtifact.ArtifactState.PROCESSED,
app_id="com.example.app1",
commit_comparison=commit_comparison_org1,
)
artifact_org2 = PreprodArtifact.objects.create(
project=other_project,
state=PreprodArtifact.ArtifactState.PROCESSED,
app_id="com.example.app2",
commit_comparison=commit_comparison_org2,
)
# Query for org1 artifacts should only return org1 artifacts
artifacts_org1 = list(artifact_org1.get_sibling_artifacts_for_commit())
assert len(artifacts_org1) == 1
assert artifacts_org1[0] == artifact_org1
# Query for org2 artifacts should only return org2 artifacts
artifacts_org2 = list(artifact_org2.get_sibling_artifacts_for_commit())
assert len(artifacts_org2) == 1
assert artifacts_org2[0] == artifact_org2
def test_get_sibling_artifacts_for_commit_no_commit_comparison(self):
"""Test that method returns empty queryset when artifact has no commit_comparison."""
artifact = PreprodArtifact.objects.create(
project=self.project,
state=PreprodArtifact.ArtifactState.PROCESSED,
app_id="com.example.app",
commit_comparison=None,
)
artifacts = list(artifact.get_sibling_artifacts_for_commit())
assert len(artifacts) == 0
@region_silo_test
| PreprodArtifactSiblingArtifactsTest |
python | dagster-io__dagster | python_modules/dagster-graphql/dagster_graphql/schema/partition_sets.py | {
"start": 2584,
"end": 2902
} | class ____(graphene.Union):
class Meta:
types = (
GrapheneAddDynamicPartitionSuccess,
GrapheneUnauthorizedError,
GraphenePythonError,
GrapheneDuplicateDynamicPartitionError,
)
name = "AddDynamicPartitionResult"
| GrapheneAddDynamicPartitionResult |
python | realpython__materials | build-a-rest-api-frontend/source_code_final/models.py | {
"start": 101,
"end": 422
} | class ____(db.Model):
__tablename__ = "note"
id = db.Column(db.Integer, primary_key=True)
person_id = db.Column(db.Integer, db.ForeignKey("person.id"))
content = db.Column(db.String, nullable=False)
timestamp = db.Column(
db.DateTime, default=datetime.utcnow, onupdate=datetime.utcnow
)
| Note |
python | kamyu104__LeetCode-Solutions | Python/finding-the-number-of-visible-mountains.py | {
"start": 46,
"end": 670
} | class ____(object):
def visibleMountains(self, peaks):
"""
:type peaks: List[List[int]]
:rtype: int
"""
peaks.sort(key=lambda x: (x[0]-x[1], -(x[0]+x[1]))) # rotate points by 45 degrees and we only care the largest new y in the same new x
result = mx = 0
for i in xrange(len(peaks)):
if peaks[i][0]+peaks[i][1] <= mx:
continue
mx = peaks[i][0]+peaks[i][1]
if i+1 == len(peaks) or peaks[i+1] != peaks[i]:
result += 1
return result
# Time: O(nlogn)
# Space: O(n)
# sort, mono stack
| Solution |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.