language
stringclasses 1
value | repo
stringclasses 346
values | path
stringlengths 6
201
| class_span
dict | source
stringlengths 21
2.38M
| target
stringlengths 1
96
|
|---|---|---|---|---|---|
python
|
numpy__numpy
|
tools/swig/test/testFortran.py
|
{
"start": 1407,
"end": 1674
}
|
class ____(FortranTestCase):
def __init__(self, methodName="runTest"):
FortranTestCase.__init__(self, methodName)
self.typeStr = "schar"
self.typeCode = "b"
######################################################################
|
scharTestCase
|
python
|
mlflow__mlflow
|
mlflow/genai/scorers/base.py
|
{
"start": 1843,
"end": 3673
}
|
class ____:
"""
Dataclass defining the serialization schema for Scorer objects.
"""
# Core scorer fields
name: str
aggregations: list[str] | None = None
description: str | None = None
# Version metadata
mlflow_version: str = mlflow.__version__
serialization_version: int = _SERIALIZATION_VERSION
# Builtin scorer fields (for scorers from mlflow.genai.scorers.builtin_scorers)
builtin_scorer_class: str | None = None
builtin_scorer_pydantic_data: dict[str, Any] | None = None
# Decorator scorer fields (for @scorer decorated functions)
call_source: str | None = None
call_signature: str | None = None
original_func_name: str | None = None
# InstructionsJudge fields (for make_judge created judges)
instructions_judge_pydantic_data: dict[str, Any] | None = None
def __post_init__(self):
"""Validate that exactly one type of scorer fields is present."""
has_builtin_fields = self.builtin_scorer_class is not None
has_decorator_fields = self.call_source is not None
has_instructions_fields = self.instructions_judge_pydantic_data is not None
# Count how many field types are present
field_count = sum([has_builtin_fields, has_decorator_fields, has_instructions_fields])
if field_count == 0:
raise ValueError(
"SerializedScorer must have either builtin scorer fields "
"(builtin_scorer_class), decorator scorer fields (call_source), "
"or instructions judge fields (instructions_judge_pydantic_data) present"
)
if field_count > 1:
raise ValueError(
"SerializedScorer cannot have multiple types of scorer fields "
"present simultaneously"
)
|
SerializedScorer
|
python
|
tensorflow__tensorflow
|
tensorflow/python/distribute/experimental/rpc/rpc_ops.py
|
{
"start": 2293,
"end": 4513
}
|
class ____(object):
"""A Server base class for accepting RPCs for registered tf.functions.
Functions can be registered on the server and are exposed via RPCs.
"""
@staticmethod
def create(rpc_layer, address):
"""Create TF RPC server at given address.
Args:
rpc_layer: Communication layer between client and server. Only "grpc" rpc
layer is supported at the moment.
address: Address where RPC server is hosted.
Returns:
An instance of `tf.distribute.experimental.rpc.Server` class.
Raises:
A ValueError if rpc_layer other than "grpc" is used. Only GRPC
is supported at the moment.
Example usage:
>>> import portpicker
>>> @tf.function(input_signature=[
... tf.TensorSpec([], tf.int32),
... tf.TensorSpec([], tf.int32)])
... def remote_fn(a, b):
... return tf.add(a, b)
>>> port = portpicker.pick_unused_port()
>>> address = "localhost:{}".format(port)
>>> server = tf.distribute.experimental.rpc.Server.create("grpc", address)
>>> server.register("addition", remote_fn)
>>> server.start()
"""
if rpc_layer != "grpc":
raise ValueError("Only GRPC backend is supported at the moment.")
return GrpcServer(address=address)
def register(self, method_name: str,
func: Union[def_function.Function,
tf_function.ConcreteFunction]):
"""Method for registering tf.function on server.
Registered methods can be invoked remotely from clients.
Args:
method_name: Name of the tf.function. Clients use this method_name to make
RPCs.
func: A `tf.function` or ConcreteFunction to register.
"""
raise NotImplementedError("Please use create_server method to create a"
"concrete subclass of Server.")
def start(self):
"""Starts the RPC server on provided address.
Server listens for new requests from client, once it is started.
"""
raise NotImplementedError("Please use create_server method to create a"
"concrete subclass of Server.")
@tf_export("distribute.experimental.rpc.Client", v1=[])
|
Server
|
python
|
scikit-image__scikit-image
|
tests/skimage/feature/test_texture.py
|
{
"start": 9188,
"end": 12925
}
|
class ____:
def setup_method(self):
self.image = np.array(
[
[255, 6, 255, 0, 141, 0],
[48, 250, 204, 166, 223, 63],
[8, 0, 159, 50, 255, 30],
[167, 255, 63, 40, 128, 255],
[0, 255, 30, 34, 255, 24],
[146, 241, 255, 0, 189, 126],
],
dtype=np.uint8,
)
@run_in_parallel()
def test_default(self):
lbp = local_binary_pattern(self.image, 8, 1, 'default')
ref = np.array(
[
[0, 251, 0, 255, 96, 255],
[143, 0, 20, 153, 64, 56],
[238, 255, 12, 191, 0, 252],
[129, 64.0, 62, 159, 199, 0],
[255, 4, 255, 175, 0, 254],
[3, 5, 0, 255, 4, 24],
]
)
np.testing.assert_array_equal(lbp, ref)
def test_ror(self):
lbp = local_binary_pattern(self.image, 8, 1, 'ror')
ref = np.array(
[
[0, 127, 0, 255, 3, 255],
[31, 0, 5, 51, 1, 7],
[119, 255, 3, 127, 0, 63],
[3, 1, 31, 63, 31, 0],
[255, 1, 255, 95, 0, 127],
[3, 5, 0, 255, 1, 3],
]
)
np.testing.assert_array_equal(lbp, ref)
@pytest.mark.parametrize('dtype', [np.float16, np.float32, np.float64])
def test_float_warning(self, dtype):
image = self.image.astype(dtype)
msg = "Applying `local_binary_pattern` to floating-point images"
with expected_warnings([msg]):
lbp = local_binary_pattern(image, 8, 1, 'ror')
ref = np.array(
[
[0, 127, 0, 255, 3, 255],
[31, 0, 5, 51, 1, 7],
[119, 255, 3, 127, 0, 63],
[3, 1, 31, 63, 31, 0],
[255, 1, 255, 95, 0, 127],
[3, 5, 0, 255, 1, 3],
]
)
np.testing.assert_array_equal(lbp, ref)
def test_uniform(self):
lbp = local_binary_pattern(self.image, 8, 1, 'uniform')
ref = np.array(
[
[0, 7, 0, 8, 2, 8],
[5, 0, 9, 9, 1, 3],
[9, 8, 2, 7, 0, 6],
[2, 1, 5, 6, 5, 0],
[8, 1, 8, 9, 0, 7],
[2, 9, 0, 8, 1, 2],
]
)
np.testing.assert_array_equal(lbp, ref)
def test_var(self):
# Test idea: mean of variance is estimate of overall variance.
# Fix random seed for test stability.
np.random.seed(13141516)
# Create random image with known variance.
image = np.random.rand(500, 500)
target_std = 0.3
image = image / image.std() * target_std
# Use P=4 to avoid interpolation effects
P, R = 4, 1
msg = "Applying `local_binary_pattern` to floating-point images"
with expected_warnings([msg]):
lbp = local_binary_pattern(image, P, R, 'var')
# Take central part to avoid border effect.
lbp = lbp[5:-5, 5:-5]
# The LBP variance is biased (ddof=0), correct for that.
expected = target_std**2 * (P - 1) / P
np.testing.assert_almost_equal(lbp.mean(), expected, 4)
def test_nri_uniform(self):
lbp = local_binary_pattern(self.image, 8, 1, 'nri_uniform')
ref = np.array(
[
[0, 54, 0, 57, 12, 57],
[34, 0, 58, 58, 3, 22],
[58, 57, 15, 50, 0, 47],
[10, 3, 40, 42, 35, 0],
[57, 7, 57, 58, 0, 56],
[9, 58, 0, 57, 7, 14],
]
)
np.testing.assert_array_almost_equal(lbp, ref)
|
TestLBP
|
python
|
readthedocs__readthedocs.org
|
readthedocs/builds/filters.py
|
{
"start": 533,
"end": 2939
}
|
class ____(ModelFilterSet):
"""Project build list dashboard filter."""
STATE_ACTIVE = "active"
STATE_SUCCESS = "succeeded"
STATE_FAILED = "failed"
STATE_CHOICES = (
(STATE_ACTIVE, _("Active")),
(STATE_SUCCESS, _("Build successful")),
(STATE_FAILED, _("Build failed")),
)
TYPE_NORMAL = "normal"
TYPE_EXTERNAL = "external"
TYPE_CHOICES = (
(TYPE_NORMAL, _("Normal")),
(TYPE_EXTERNAL, _("Pull/merge request")),
)
# Attribute filter fields
version__slug = FilteredModelChoiceFilter(
label=_("Version"),
empty_label=_("All versions"),
to_field_name="slug",
queryset_method="get_version_queryset",
method="get_version",
)
state = ChoiceFilter(
label=_("State"),
choices=STATE_CHOICES,
empty_label=_("Any"),
method="get_state",
)
version__type = ChoiceFilter(
label=_("Type"),
choices=TYPE_CHOICES,
empty_label=_("Any"),
method="get_version_type",
)
def __init__(self, *args, project=None, **kwargs):
self.project = project
super().__init__(*args, **kwargs)
def get_version(self, queryset, _, version):
return queryset.filter(version__slug=version.slug)
def get_version_queryset(self):
# Copied from the version listing view. We need this here as this is
# what allows the build version list to populate. Otherwise the
# ``all()`` queryset method is used.
return self.project.versions(manager=INTERNAL).public(
user=self.request.user,
)
def get_state(self, queryset, _, value):
if value == self.STATE_ACTIVE:
queryset = queryset.exclude(state__in=BUILD_FINAL_STATES)
elif value == self.STATE_SUCCESS:
queryset = queryset.filter(state=BUILD_STATE_FINISHED, success=True)
elif value == self.STATE_FAILED:
queryset = queryset.filter(
state__in=BUILD_FINAL_STATES,
success=False,
)
return queryset
def get_version_type(self, queryset, _, value):
if value == self.TYPE_NORMAL:
queryset = queryset.exclude(version__type=EXTERNAL)
elif value == self.TYPE_EXTERNAL:
queryset = queryset.filter(version__type=EXTERNAL)
return queryset
|
BuildListFilter
|
python
|
gevent__gevent
|
src/greentest/3.9/test_threading.py
|
{
"start": 2385,
"end": 30789
}
|
class ____(BaseTestCase):
# Create a bunch of threads, let each do some work, wait until all are
# done.
def test_various_ops(self):
# This takes about n/3 seconds to run (about n/3 clumps of tasks,
# times about 1 second per clump).
NUMTASKS = 10
# no more than 3 of the 10 can run at once
sema = threading.BoundedSemaphore(value=3)
mutex = threading.RLock()
numrunning = Counter()
threads = []
for i in range(NUMTASKS):
t = TestThread("<thread %d>"%i, self, sema, mutex, numrunning)
threads.append(t)
self.assertIsNone(t.ident)
self.assertRegex(repr(t), r'^<TestThread\(.*, initial\)>$')
t.start()
if hasattr(threading, 'get_native_id'):
native_ids = set(t.native_id for t in threads) | {threading.get_native_id()}
self.assertNotIn(None, native_ids)
self.assertEqual(len(native_ids), NUMTASKS + 1)
if verbose:
print('waiting for all tasks to complete')
for t in threads:
t.join()
self.assertFalse(t.is_alive())
self.assertNotEqual(t.ident, 0)
self.assertIsNotNone(t.ident)
self.assertRegex(repr(t), r'^<TestThread\(.*, stopped -?\d+\)>$')
if verbose:
print('all tasks done')
self.assertEqual(numrunning.get(), 0)
def test_ident_of_no_threading_threads(self):
# The ident still must work for the main thread and dummy threads.
self.assertIsNotNone(threading.currentThread().ident)
def f():
ident.append(threading.currentThread().ident)
done.set()
done = threading.Event()
ident = []
with support.wait_threads_exit():
tid = _thread.start_new_thread(f, ())
done.wait()
self.assertEqual(ident[0], tid)
# Kill the "immortal" _DummyThread
del threading._active[ident[0]]
# run with a small(ish) thread stack size (256 KiB)
def test_various_ops_small_stack(self):
if verbose:
print('with 256 KiB thread stack size...')
try:
threading.stack_size(262144)
except _thread.error:
raise unittest.SkipTest(
'platform does not support changing thread stack size')
self.test_various_ops()
threading.stack_size(0)
# run with a large thread stack size (1 MiB)
def test_various_ops_large_stack(self):
if verbose:
print('with 1 MiB thread stack size...')
try:
threading.stack_size(0x100000)
except _thread.error:
raise unittest.SkipTest(
'platform does not support changing thread stack size')
self.test_various_ops()
threading.stack_size(0)
def test_foreign_thread(self):
# Check that a "foreign" thread can use the threading module.
def f(mutex):
# Calling current_thread() forces an entry for the foreign
# thread to get made in the threading._active map.
threading.current_thread()
mutex.release()
mutex = threading.Lock()
mutex.acquire()
with support.wait_threads_exit():
tid = _thread.start_new_thread(f, (mutex,))
# Wait for the thread to finish.
mutex.acquire()
self.assertIn(tid, threading._active)
self.assertIsInstance(threading._active[tid], threading._DummyThread)
#Issue 29376
self.assertTrue(threading._active[tid].is_alive())
self.assertRegex(repr(threading._active[tid]), '_DummyThread')
del threading._active[tid]
# PyThreadState_SetAsyncExc() is a CPython-only gimmick, not (currently)
# exposed at the Python level. This test relies on ctypes to get at it.
def test_PyThreadState_SetAsyncExc(self):
ctypes = import_module("ctypes")
set_async_exc = ctypes.pythonapi.PyThreadState_SetAsyncExc
set_async_exc.argtypes = (ctypes.c_ulong, ctypes.py_object)
class AsyncExc(Exception):
pass
exception = ctypes.py_object(AsyncExc)
# First check it works when setting the exception from the same thread.
tid = threading.get_ident()
self.assertIsInstance(tid, int)
self.assertGreater(tid, 0)
try:
result = set_async_exc(tid, exception)
# The exception is async, so we might have to keep the VM busy until
# it notices.
while True:
pass
except AsyncExc:
pass
else:
# This code is unreachable but it reflects the intent. If we wanted
# to be smarter the above loop wouldn't be infinite.
self.fail("AsyncExc not raised")
try:
self.assertEqual(result, 1) # one thread state modified
except UnboundLocalError:
# The exception was raised too quickly for us to get the result.
pass
# `worker_started` is set by the thread when it's inside a try/except
# block waiting to catch the asynchronously set AsyncExc exception.
# `worker_saw_exception` is set by the thread upon catching that
# exception.
worker_started = threading.Event()
worker_saw_exception = threading.Event()
class Worker(threading.Thread):
def run(self):
self.id = threading.get_ident()
self.finished = False
try:
while True:
worker_started.set()
time.sleep(0.1)
except AsyncExc:
self.finished = True
worker_saw_exception.set()
t = Worker()
t.daemon = True # so if this fails, we don't hang Python at shutdown
t.start()
if verbose:
print(" started worker thread")
# Try a thread id that doesn't make sense.
if verbose:
print(" trying nonsensical thread id")
result = set_async_exc(-1, exception)
self.assertEqual(result, 0) # no thread states modified
# Now raise an exception in the worker thread.
if verbose:
print(" waiting for worker thread to get started")
ret = worker_started.wait()
self.assertTrue(ret)
if verbose:
print(" verifying worker hasn't exited")
self.assertFalse(t.finished)
if verbose:
print(" attempting to raise asynch exception in worker")
result = set_async_exc(t.id, exception)
self.assertEqual(result, 1) # one thread state modified
if verbose:
print(" waiting for worker to say it caught the exception")
worker_saw_exception.wait(timeout=support.SHORT_TIMEOUT)
self.assertTrue(t.finished)
if verbose:
print(" all OK -- joining worker")
if t.finished:
t.join()
# else the thread is still running, and we have no way to kill it
def test_limbo_cleanup(self):
# Issue 7481: Failure to start thread should cleanup the limbo map.
def fail_new_thread(*args):
raise threading.ThreadError()
_start_new_thread = threading._start_new_thread
threading._start_new_thread = fail_new_thread
try:
t = threading.Thread(target=lambda: None)
self.assertRaises(threading.ThreadError, t.start)
self.assertFalse(
t in threading._limbo,
"Failed to cleanup _limbo map on failure of Thread.start().")
finally:
threading._start_new_thread = _start_new_thread
def test_finalize_running_thread(self):
# Issue 1402: the PyGILState_Ensure / _Release functions may be called
# very late on python exit: on deallocation of a running thread for
# example.
import_module("ctypes")
rc, out, err = assert_python_failure("-c", """if 1:
import ctypes, sys, time, _thread
# This lock is used as a simple event variable.
ready = _thread.allocate_lock()
ready.acquire()
# Module globals are cleared before __del__ is run
# So we save the functions in class dict
class C:
ensure = ctypes.pythonapi.PyGILState_Ensure
release = ctypes.pythonapi.PyGILState_Release
def __del__(self):
state = self.ensure()
self.release(state)
def waitingThread():
x = C()
ready.release()
time.sleep(100)
_thread.start_new_thread(waitingThread, ())
ready.acquire() # Be sure the other thread is waiting.
sys.exit(42)
""")
self.assertEqual(rc, 42)
def test_finalize_with_trace(self):
# Issue1733757
# Avoid a deadlock when sys.settrace steps into threading._shutdown
assert_python_ok("-c", """if 1:
import sys, threading
# A deadlock-killer, to prevent the
# testsuite to hang forever
def killer():
import os, time
time.sleep(2)
print('program blocked; aborting')
os._exit(2)
t = threading.Thread(target=killer)
t.daemon = True
t.start()
# This is the trace function
def func(frame, event, arg):
threading.current_thread()
return func
sys.settrace(func)
""")
def test_join_nondaemon_on_shutdown(self):
# Issue 1722344
# Raising SystemExit skipped threading._shutdown
rc, out, err = assert_python_ok("-c", """if 1:
import threading
from time import sleep
def child():
sleep(1)
# As a non-daemon thread we SHOULD wake up and nothing
# should be torn down yet
print("Woke up, sleep function is:", sleep)
threading.Thread(target=child).start()
raise SystemExit
""")
self.assertEqual(out.strip(),
b"Woke up, sleep function is: <built-in function sleep>")
self.assertEqual(err, b"")
def test_enumerate_after_join(self):
# Try hard to trigger #1703448: a thread is still returned in
# threading.enumerate() after it has been join()ed.
enum = threading.enumerate
old_interval = sys.getswitchinterval()
try:
for i in range(1, 100):
sys.setswitchinterval(i * 0.0002)
t = threading.Thread(target=lambda: None)
t.start()
t.join()
l = enum()
self.assertNotIn(t, l,
"#1703448 triggered after %d trials: %s" % (i, l))
finally:
sys.setswitchinterval(old_interval)
def test_no_refcycle_through_target(self):
class RunSelfFunction(object):
def __init__(self, should_raise):
# The links in this refcycle from Thread back to self
# should be cleaned up when the thread completes.
self.should_raise = should_raise
self.thread = threading.Thread(target=self._run,
args=(self,),
kwargs={'yet_another':self})
self.thread.start()
def _run(self, other_ref, yet_another):
if self.should_raise:
raise SystemExit
cyclic_object = RunSelfFunction(should_raise=False)
weak_cyclic_object = weakref.ref(cyclic_object)
cyclic_object.thread.join()
del cyclic_object
self.assertIsNone(weak_cyclic_object(),
msg=('%d references still around' %
sys.getrefcount(weak_cyclic_object())))
raising_cyclic_object = RunSelfFunction(should_raise=True)
weak_raising_cyclic_object = weakref.ref(raising_cyclic_object)
raising_cyclic_object.thread.join()
del raising_cyclic_object
self.assertIsNone(weak_raising_cyclic_object(),
msg=('%d references still around' %
sys.getrefcount(weak_raising_cyclic_object())))
def test_old_threading_api(self):
# Just a quick sanity check to make sure the old method names are
# still present
t = threading.Thread()
t.isDaemon()
t.setDaemon(True)
t.getName()
t.setName("name")
e = threading.Event()
e.isSet()
threading.activeCount()
def test_repr_daemon(self):
t = threading.Thread()
self.assertNotIn('daemon', repr(t))
t.daemon = True
self.assertIn('daemon', repr(t))
def test_daemon_param(self):
t = threading.Thread()
self.assertFalse(t.daemon)
t = threading.Thread(daemon=False)
self.assertFalse(t.daemon)
t = threading.Thread(daemon=True)
self.assertTrue(t.daemon)
@unittest.skipUnless(hasattr(os, 'fork'), 'needs os.fork()')
def test_fork_at_exit(self):
# bpo-42350: Calling os.fork() after threading._shutdown() must
# not log an error.
code = textwrap.dedent("""
import atexit
import os
import sys
from test.support import wait_process
# Import the threading module to register its "at fork" callback
import threading
def exit_handler():
pid = os.fork()
if not pid:
print("child process ok", file=sys.stderr, flush=True)
# child process
sys.exit()
else:
wait_process(pid, exitcode=0)
# exit_handler() will be called after threading._shutdown()
atexit.register(exit_handler)
""")
_, out, err = assert_python_ok("-c", code)
self.assertEqual(out, b'')
self.assertEqual(err.rstrip(), b'child process ok')
@unittest.skipUnless(hasattr(os, 'fork'), 'test needs fork()')
def test_dummy_thread_after_fork(self):
# Issue #14308: a dummy thread in the active list doesn't mess up
# the after-fork mechanism.
code = """if 1:
import _thread, threading, os, time
def background_thread(evt):
# Creates and registers the _DummyThread instance
threading.current_thread()
evt.set()
time.sleep(10)
evt = threading.Event()
_thread.start_new_thread(background_thread, (evt,))
evt.wait()
assert threading.active_count() == 2, threading.active_count()
if os.fork() == 0:
assert threading.active_count() == 1, threading.active_count()
os._exit(0)
else:
os.wait()
"""
_, out, err = assert_python_ok("-c", code)
self.assertEqual(out, b'')
self.assertEqual(err, b'')
@unittest.skipUnless(hasattr(os, 'fork'), "needs os.fork()")
def test_is_alive_after_fork(self):
# Try hard to trigger #18418: is_alive() could sometimes be True on
# threads that vanished after a fork.
old_interval = sys.getswitchinterval()
self.addCleanup(sys.setswitchinterval, old_interval)
# Make the bug more likely to manifest.
test.support.setswitchinterval(1e-6)
for i in range(20):
t = threading.Thread(target=lambda: None)
t.start()
pid = os.fork()
if pid == 0:
os._exit(11 if t.is_alive() else 10)
else:
t.join()
support.wait_process(pid, exitcode=10)
def test_main_thread(self):
main = threading.main_thread()
self.assertEqual(main.name, 'MainThread')
self.assertEqual(main.ident, threading.current_thread().ident)
self.assertEqual(main.ident, threading.get_ident())
def f():
self.assertNotEqual(threading.main_thread().ident,
threading.current_thread().ident)
th = threading.Thread(target=f)
th.start()
th.join()
@unittest.skipUnless(hasattr(os, 'fork'), "test needs os.fork()")
@unittest.skipUnless(hasattr(os, 'waitpid'), "test needs os.waitpid()")
def test_main_thread_after_fork(self):
code = """if 1:
import os, threading
from test import support
pid = os.fork()
if pid == 0:
main = threading.main_thread()
print(main.name)
print(main.ident == threading.current_thread().ident)
print(main.ident == threading.get_ident())
else:
support.wait_process(pid, exitcode=0)
"""
_, out, err = assert_python_ok("-c", code)
data = out.decode().replace('\r', '')
self.assertEqual(err, b"")
self.assertEqual(data, "MainThread\nTrue\nTrue\n")
@unittest.skipIf(sys.platform in platforms_to_skip, "due to known OS bug")
@unittest.skipUnless(hasattr(os, 'fork'), "test needs os.fork()")
@unittest.skipUnless(hasattr(os, 'waitpid'), "test needs os.waitpid()")
def test_main_thread_after_fork_from_nonmain_thread(self):
code = """if 1:
import os, threading, sys
from test import support
def f():
pid = os.fork()
if pid == 0:
main = threading.main_thread()
print(main.name)
print(main.ident == threading.current_thread().ident)
print(main.ident == threading.get_ident())
# stdout is fully buffered because not a tty,
# we have to flush before exit.
sys.stdout.flush()
else:
support.wait_process(pid, exitcode=0)
th = threading.Thread(target=f)
th.start()
th.join()
"""
_, out, err = assert_python_ok("-c", code)
data = out.decode().replace('\r', '')
self.assertEqual(err, b"")
self.assertEqual(data, "Thread-1\nTrue\nTrue\n")
def test_main_thread_during_shutdown(self):
# bpo-31516: current_thread() should still point to the main thread
# at shutdown
code = """if 1:
import gc, threading
main_thread = threading.current_thread()
assert main_thread is threading.main_thread() # sanity check
class RefCycle:
def __init__(self):
self.cycle = self
def __del__(self):
print("GC:",
threading.current_thread() is main_thread,
threading.main_thread() is main_thread,
threading.enumerate() == [main_thread])
RefCycle()
gc.collect() # sanity check
x = RefCycle()
"""
_, out, err = assert_python_ok("-c", code)
data = out.decode()
self.assertEqual(err, b"")
self.assertEqual(data.splitlines(),
["GC: True True True"] * 2)
def test_finalization_shutdown(self):
# bpo-36402: Py_Finalize() calls threading._shutdown() which must wait
# until Python thread states of all non-daemon threads get deleted.
#
# Test similar to SubinterpThreadingTests.test_threads_join_2(), but
# test the finalization of the main interpreter.
code = """if 1:
import os
import threading
import time
import random
def random_sleep():
seconds = random.random() * 0.010
time.sleep(seconds)
class Sleeper:
def __del__(self):
random_sleep()
tls = threading.local()
def f():
# Sleep a bit so that the thread is still running when
# Py_Finalize() is called.
random_sleep()
tls.x = Sleeper()
random_sleep()
threading.Thread(target=f).start()
random_sleep()
"""
rc, out, err = assert_python_ok("-c", code)
self.assertEqual(err, b"")
def test_tstate_lock(self):
# Test an implementation detail of Thread objects.
started = _thread.allocate_lock()
finish = _thread.allocate_lock()
started.acquire()
finish.acquire()
def f():
started.release()
finish.acquire()
time.sleep(0.01)
# The tstate lock is None until the thread is started
t = threading.Thread(target=f)
self.assertIs(t._tstate_lock, None)
t.start()
started.acquire()
self.assertTrue(t.is_alive())
# The tstate lock can't be acquired when the thread is running
# (or suspended).
tstate_lock = t._tstate_lock
self.assertFalse(tstate_lock.acquire(timeout=0), False)
finish.release()
# When the thread ends, the state_lock can be successfully
# acquired.
self.assertTrue(tstate_lock.acquire(timeout=support.SHORT_TIMEOUT), False)
# But is_alive() is still True: we hold _tstate_lock now, which
# prevents is_alive() from knowing the thread's end-of-life C code
# is done.
self.assertTrue(t.is_alive())
# Let is_alive() find out the C code is done.
tstate_lock.release()
self.assertFalse(t.is_alive())
# And verify the thread disposed of _tstate_lock.
self.assertIsNone(t._tstate_lock)
t.join()
def test_repr_stopped(self):
# Verify that "stopped" shows up in repr(Thread) appropriately.
started = _thread.allocate_lock()
finish = _thread.allocate_lock()
started.acquire()
finish.acquire()
def f():
started.release()
finish.acquire()
t = threading.Thread(target=f)
t.start()
started.acquire()
self.assertIn("started", repr(t))
finish.release()
# "stopped" should appear in the repr in a reasonable amount of time.
# Implementation detail: as of this writing, that's trivially true
# if .join() is called, and almost trivially true if .is_alive() is
# called. The detail we're testing here is that "stopped" shows up
# "all on its own".
LOOKING_FOR = "stopped"
for i in range(500):
if LOOKING_FOR in repr(t):
break
time.sleep(0.01)
self.assertIn(LOOKING_FOR, repr(t)) # we waited at least 5 seconds
t.join()
def test_BoundedSemaphore_limit(self):
# BoundedSemaphore should raise ValueError if released too often.
for limit in range(1, 10):
bs = threading.BoundedSemaphore(limit)
threads = [threading.Thread(target=bs.acquire)
for _ in range(limit)]
for t in threads:
t.start()
for t in threads:
t.join()
threads = [threading.Thread(target=bs.release)
for _ in range(limit)]
for t in threads:
t.start()
for t in threads:
t.join()
self.assertRaises(ValueError, bs.release)
@cpython_only
def test_frame_tstate_tracing(self):
# Issue #14432: Crash when a generator is created in a C thread that is
# destroyed while the generator is still used. The issue was that a
# generator contains a frame, and the frame kept a reference to the
# Python state of the destroyed C thread. The crash occurs when a trace
# function is setup.
def noop_trace(frame, event, arg):
# no operation
return noop_trace
def generator():
while 1:
yield "generator"
def callback():
if callback.gen is None:
callback.gen = generator()
return next(callback.gen)
callback.gen = None
old_trace = sys.gettrace()
sys.settrace(noop_trace)
try:
# Install a trace function
threading.settrace(noop_trace)
# Create a generator in a C thread which exits after the call
import _testcapi
_testcapi.call_in_temporary_c_thread(callback)
# Call the generator in a different Python thread, check that the
# generator didn't keep a reference to the destroyed thread state
for test in range(3):
# The trace function is still called here
callback()
finally:
sys.settrace(old_trace)
@cpython_only
def test_shutdown_locks(self):
for daemon in (False, True):
with self.subTest(daemon=daemon):
event = threading.Event()
thread = threading.Thread(target=event.wait, daemon=daemon)
# Thread.start() must add lock to _shutdown_locks,
# but only for non-daemon thread
thread.start()
tstate_lock = thread._tstate_lock
if not daemon:
self.assertIn(tstate_lock, threading._shutdown_locks)
else:
self.assertNotIn(tstate_lock, threading._shutdown_locks)
# unblock the thread and join it
event.set()
thread.join()
# Thread._stop() must remove tstate_lock from _shutdown_locks.
# Daemon threads must never add it to _shutdown_locks.
self.assertNotIn(tstate_lock, threading._shutdown_locks)
def test_locals_at_exit(self):
# bpo-19466: thread locals must not be deleted before destructors
# are called
rc, out, err = assert_python_ok("-c", """if 1:
import threading
class Atexit:
def __del__(self):
print("thread_dict.atexit = %r" % thread_dict.atexit)
thread_dict = threading.local()
thread_dict.atexit = "value"
atexit = Atexit()
""")
self.assertEqual(out.rstrip(), b"thread_dict.atexit = 'value'")
def test_leak_without_join(self):
# bpo-37788: Test that a thread which is not joined explicitly
# does not leak. Test written for reference leak checks.
def noop(): pass
with support.wait_threads_exit():
threading.Thread(target=noop).start()
# Thread.join() is not called
def test_import_from_another_thread(self):
# bpo-1596321: If the threading module is first import from a thread
# different than the main thread, threading._shutdown() must handle
# this case without logging an error at Python exit.
code = textwrap.dedent('''
import _thread
import sys
event = _thread.allocate_lock()
event.acquire()
def import_threading():
import threading
event.release()
if 'threading' in sys.modules:
raise Exception('threading is already imported')
_thread.start_new_thread(import_threading, ())
# wait until the threading module is imported
event.acquire()
event.release()
if 'threading' not in sys.modules:
raise Exception('threading is not imported')
# don't wait until the thread completes
''')
rc, out, err = assert_python_ok("-c", code)
self.assertEqual(out, b'')
self.assertEqual(err, b'')
|
ThreadTests
|
python
|
celery__celery
|
t/unit/backends/test_base.py
|
{
"start": 7330,
"end": 10442
}
|
class ____:
def setup_method(self):
self.b = BaseBackend(self.app)
@self.app.task(shared=False)
def callback(result):
pass
self.callback = callback
def test__forget(self):
with pytest.raises(NotImplementedError):
self.b._forget('SOMExx-N0Nex1stant-IDxx-')
def test_forget(self):
with pytest.raises(NotImplementedError):
self.b.forget('SOMExx-N0nex1stant-IDxx-')
def test_on_chord_part_return(self):
self.b.on_chord_part_return(None, None, None)
def test_apply_chord(self, unlock='celery.chord_unlock'):
self.app.tasks[unlock] = Mock()
header_result_args = (
uuid(),
[self.app.AsyncResult(x) for x in range(3)],
)
self.b.apply_chord(header_result_args, self.callback.s())
assert self.app.tasks[unlock].apply_async.call_count
def test_chord_unlock_queue(self, unlock='celery.chord_unlock'):
self.app.tasks[unlock] = Mock()
header_result_args = (
uuid(),
[self.app.AsyncResult(x) for x in range(3)],
)
body = self.callback.s()
self.b.apply_chord(header_result_args, body)
called_kwargs = self.app.tasks[unlock].apply_async.call_args[1]
assert called_kwargs['queue'] == 'testcelery'
routing_queue = Mock()
routing_queue.name = "routing_queue"
self.app.amqp.router.route = Mock(return_value={
"queue": routing_queue
})
self.b.apply_chord(header_result_args, body)
assert self.app.amqp.router.route.call_args[0][1] == body.name
called_kwargs = self.app.tasks[unlock].apply_async.call_args[1]
assert called_kwargs["queue"] == "routing_queue"
self.b.apply_chord(header_result_args, body.set(queue='test_queue'))
called_kwargs = self.app.tasks[unlock].apply_async.call_args[1]
assert called_kwargs['queue'] == 'test_queue'
@self.app.task(shared=False, queue='test_queue_two')
def callback_queue(result):
pass
self.b.apply_chord(header_result_args, callback_queue.s())
called_kwargs = self.app.tasks[unlock].apply_async.call_args[1]
assert called_kwargs['queue'] == 'test_queue_two'
with self.Celery() as app2:
@app2.task(name='callback_different_app', shared=False)
def callback_different_app(result):
pass
callback_different_app_signature = self.app.signature('callback_different_app')
self.b.apply_chord(header_result_args, callback_different_app_signature)
called_kwargs = self.app.tasks[unlock].apply_async.call_args[1]
assert called_kwargs['queue'] == 'routing_queue'
callback_different_app_signature.set(queue='test_queue_three')
self.b.apply_chord(header_result_args, callback_different_app_signature)
called_kwargs = self.app.tasks[unlock].apply_async.call_args[1]
assert called_kwargs['queue'] == 'test_queue_three'
|
test_BaseBackend_interface
|
python
|
PrefectHQ__prefect
|
tests/runner/test_runner.py
|
{
"start": 2992,
"end": 3099
}
|
class ____:
@flow
@classmethod
def dummy_flow_classmethod(cls):
pass
|
ClassNameClassmethod
|
python
|
charliermarsh__ruff
|
crates/ruff_linter/resources/test/fixtures/pyupgrade/UP004.py
|
{
"start": 992,
"end": 1036
}
|
class ____(object):
...
import builtins
|
B
|
python
|
pydantic__pydantic
|
pydantic/deprecated/config.py
|
{
"start": 2508,
"end": 2663
}
|
class ____(metaclass=_ExtraMeta):
allow: Literal['allow'] = 'allow'
ignore: Literal['ignore'] = 'ignore'
forbid: Literal['forbid'] = 'forbid'
|
Extra
|
python
|
huggingface__transformers
|
src/transformers/models/marian/modeling_marian.py
|
{
"start": 19069,
"end": 19953
}
|
class ____(PreTrainedModel):
config: MarianConfig
base_model_prefix = "model"
supports_gradient_checkpointing = True
_supports_flash_attn = True
_supports_sdpa = True
_supports_flex_attn = True
_can_compile_fullgraph = True
@torch.no_grad()
def _init_weights(self, module):
super()._init_weights(module)
if isinstance(module, MarianSinusoidalPositionalEmbedding):
init.copy_(module.weight, module.create_weight())
@property
def dummy_inputs(self):
pad_token = self.config.pad_token_id
input_ids = torch.tensor([[0, 6, 10, 4, 2], [0, 8, 12, 2, pad_token]], device=self.device)
dummy_inputs = {
"attention_mask": input_ids.ne(pad_token),
"input_ids": input_ids,
"decoder_input_ids": input_ids,
}
return dummy_inputs
|
MarianPreTrainedModel
|
python
|
airbytehq__airbyte
|
airbyte-integrations/connectors/source-github/source_github/github_schema.py
|
{
"start": 1030641,
"end": 1031164
}
|
class ____(sgqlc.types.Type):
"""Autogenerated return type of UpdateIpAllowListEntry"""
__schema__ = github_schema
__field_names__ = ("client_mutation_id", "ip_allow_list_entry")
client_mutation_id = sgqlc.types.Field(String, graphql_name="clientMutationId")
"""A unique identifier for the client performing the mutation."""
ip_allow_list_entry = sgqlc.types.Field("IpAllowListEntry", graphql_name="ipAllowListEntry")
"""The IP allow list entry that was updated."""
|
UpdateIpAllowListEntryPayload
|
python
|
getsentry__sentry
|
src/sentry/projects/services/project_key/model.py
|
{
"start": 380,
"end": 781
}
|
class ____(Enum):
store = "store"
api = "api"
def as_orm_role(self) -> Any:
from sentry.models.projectkey import ProjectKey
if self == ProjectKeyRole.store:
return ProjectKey.roles.store
elif self == ProjectKeyRole.api:
return ProjectKey.roles.api
else:
raise ValueError("Unexpected project key role enum")
|
ProjectKeyRole
|
python
|
coleifer__peewee
|
peewee.py
|
{
"start": 269444,
"end": 269806
}
|
class ____(ModelTupleCursorWrapper):
def initialize(self):
self._initialize_columns()
attributes = []
for i in range(self.ncols):
attributes.append(self.columns[i])
self.tuple_class = collections.namedtuple('Row', attributes)
self.constructor = lambda row: self.tuple_class(*row)
|
ModelNamedTupleCursorWrapper
|
python
|
numba__numba
|
numba/tests/test_dictobject.py
|
{
"start": 36339,
"end": 42573
}
|
class ____(MemoryLeakMixin, TestCase):
def test_str_key(self):
@njit
def foo():
d = Dict.empty(
key_type=types.unicode_type,
value_type=types.int32,
)
d["123"] = 123
d["321"] = 321
return d
d = foo()
self.assertEqual(d['123'], 123)
self.assertEqual(d['321'], 321)
expect = {'123': 123, '321': 321}
self.assertEqual(dict(d), expect)
# Test insert replacement
d['123'] = 231
expect['123'] = 231
self.assertEqual(d['123'], 231)
self.assertEqual(dict(d), expect)
# Test dictionary growth
nelem = 100
for i in range(nelem):
d[str(i)] = i
expect[str(i)] = i
for i in range(nelem):
self.assertEqual(d[str(i)], i)
self.assertEqual(dict(d), expect)
def test_str_val(self):
@njit
def foo():
d = Dict.empty(
key_type=types.int32,
value_type=types.unicode_type,
)
d[123] = "123"
d[321] = "321"
return d
d = foo()
self.assertEqual(d[123], '123')
self.assertEqual(d[321], '321')
expect = {123: '123', 321: '321'}
self.assertEqual(dict(d), expect)
# Test insert replacement
d[123] = "231"
expect[123] = "231"
self.assertEqual(dict(d), expect)
# Test dictionary growth
nelem = 1
for i in range(nelem):
d[i] = str(i)
expect[i] = str(i)
for i in range(nelem):
self.assertEqual(d[i], str(i))
self.assertEqual(dict(d), expect)
def test_str_key_array_value(self):
np.random.seed(123)
d = Dict.empty(
key_type=types.unicode_type,
value_type=types.float64[:],
)
expect = []
expect.append(np.random.random(10))
d['mass'] = expect[-1]
expect.append(np.random.random(20))
d['velocity'] = expect[-1]
for i in range(100):
expect.append(np.random.random(i))
d[str(i)] = expect[-1]
self.assertEqual(len(d), len(expect))
self.assertPreciseEqual(d['mass'], expect[0])
self.assertPreciseEqual(d['velocity'], expect[1])
# Ordering is kept
for got, exp in zip(d.values(), expect):
self.assertPreciseEqual(got, exp)
# Try deleting
self.assertTrue('mass' in d)
self.assertTrue('velocity' in d)
del d['mass']
self.assertFalse('mass' in d)
del d['velocity']
self.assertFalse('velocity' in d)
del expect[0:2]
for i in range(90):
k, v = d.popitem()
w = expect.pop()
self.assertPreciseEqual(v, w)
# Trigger a resize
expect.append(np.random.random(10))
d["last"] = expect[-1]
# Ordering is kept
for got, exp in zip(d.values(), expect):
self.assertPreciseEqual(got, exp)
def test_dict_of_dict_int_keyval(self):
def inner_numba_dict():
d = Dict.empty(
key_type=types.intp,
value_type=types.intp,
)
return d
d = Dict.empty(
key_type=types.intp,
value_type=types.DictType(types.intp, types.intp),
)
def usecase(d, make_inner_dict):
for i in range(100):
mid = make_inner_dict()
for j in range(i + 1):
mid[j] = j * 10000
d[i] = mid
return d
got = usecase(d, inner_numba_dict)
expect = usecase({}, dict)
self.assertIsInstance(expect, dict)
self.assertEqual(dict(got), expect)
# Delete items
for where in [12, 3, 6, 8, 10]:
del got[where]
del expect[where]
self.assertEqual(dict(got), expect)
def test_dict_of_dict_npm(self):
inner_dict_ty = types.DictType(types.intp, types.intp)
@njit
def inner_numba_dict():
d = Dict.empty(
key_type=types.intp,
value_type=types.intp,
)
return d
@njit
def foo(count):
d = Dict.empty(
key_type=types.intp,
value_type=inner_dict_ty,
)
for i in range(count):
d[i] = inner_numba_dict()
for j in range(i + 1):
d[i][j] = j
return d
d = foo(100)
ct = 0
for k, dd in d.items():
ct += 1
self.assertEqual(len(dd), k + 1)
for kk, vv in dd.items():
self.assertEqual(kk, vv)
self.assertEqual(ct, 100)
def test_delitem(self):
d = Dict.empty(types.int64, types.unicode_type)
d[1] = 'apple'
@njit
def foo(x, k):
del x[1]
foo(d, 1)
self.assertEqual(len(d), 0)
self.assertFalse(d)
def test_getitem_return_type(self):
# Dict.__getitem__ must return non-optional type.
d = Dict.empty(types.int64, types.int64[:])
d[1] = np.arange(10, dtype=np.int64)
@njit
def foo(d):
d[1] += 100
return d[1]
foo(d)
# Return type is an array, not optional
retty = foo.nopython_signatures[0].return_type
self.assertIsInstance(retty, types.Array)
self.assertNotIsInstance(retty, types.Optional)
# Value is correctly updated
self.assertPreciseEqual(d[1], np.arange(10, dtype=np.int64) + 100)
def test_storage_model_mismatch(self):
# https://github.com/numba/numba/issues/4520
# check for storage model mismatch in refcount ops generation
dct = Dict()
ref = [
("a", True, "a"),
("b", False, "b"),
("c", False, "c"),
]
# populate
for x in ref:
dct[x] = x
# test
for i, x in enumerate(ref):
self.assertEqual(dct[x], x)
|
TestDictRefctTypes
|
python
|
apache__airflow
|
helm-tests/tests/helm_tests/webserver/test_ingress_flower.py
|
{
"start": 932,
"end": 9864
}
|
class ____:
"""Tests ingress flower."""
def test_should_pass_validation_with_just_ingress_enabled_v1(self):
render_chart(
values={"flower": {"enabled": True}, "ingress": {"flower": {"enabled": True}}},
show_only=["templates/flower/flower-ingress.yaml"],
) # checks that no validation exception is raised
def test_should_pass_validation_with_just_ingress_enabled_v1beta1(self):
render_chart(
values={"flower": {"enabled": True}, "ingress": {"flower": {"enabled": True}}},
show_only=["templates/flower/flower-ingress.yaml"],
kubernetes_version="1.16.0",
) # checks that no validation exception is raised
def test_should_allow_more_than_one_annotation(self):
docs = render_chart(
values={
"ingress": {"flower": {"enabled": True, "annotations": {"aa": "bb", "cc": "dd"}}},
"flower": {"enabled": True},
},
show_only=["templates/flower/flower-ingress.yaml"],
)
assert jmespath.search("metadata.annotations", docs[0]) == {"aa": "bb", "cc": "dd"}
def test_should_set_ingress_class_name(self):
docs = render_chart(
values={
"ingress": {"enabled": True, "flower": {"ingressClassName": "foo"}},
"flower": {"enabled": True},
},
show_only=["templates/flower/flower-ingress.yaml"],
)
assert jmespath.search("spec.ingressClassName", docs[0]) == "foo"
def test_should_ingress_hosts_objs_have_priority_over_host(self):
docs = render_chart(
values={
"flower": {"enabled": True},
"ingress": {
"flower": {
"enabled": True,
"tls": {"enabled": True, "secretName": "oldsecret"},
"hosts": [
{"name": "*.a-host", "tls": {"enabled": True, "secretName": "newsecret1"}},
{"name": "b-host", "tls": {"enabled": True, "secretName": "newsecret2"}},
{"name": "c-host", "tls": {"enabled": True, "secretName": "newsecret1"}},
{"name": "d-host", "tls": {"enabled": False, "secretName": ""}},
{"name": "e-host"},
],
"host": "old-host",
},
},
},
show_only=["templates/flower/flower-ingress.yaml"],
)
assert jmespath.search("spec.rules[*].host", docs[0]) == [
"*.a-host",
"b-host",
"c-host",
"d-host",
"e-host",
]
assert jmespath.search("spec.tls[*]", docs[0]) == [
{"hosts": ["*.a-host"], "secretName": "newsecret1"},
{"hosts": ["b-host"], "secretName": "newsecret2"},
{"hosts": ["c-host"], "secretName": "newsecret1"},
]
def test_should_ingress_hosts_strs_have_priority_over_host(self):
docs = render_chart(
values={
"flower": {"enabled": True},
"ingress": {
"flower": {
"enabled": True,
"tls": {"enabled": True, "secretName": "secret"},
"hosts": ["*.a-host", "b-host", "c-host", "d-host"],
"host": "old-host",
},
},
},
show_only=["templates/flower/flower-ingress.yaml"],
)
assert jmespath.search("spec.rules[*].host", docs[0]) == ["*.a-host", "b-host", "c-host", "d-host"]
assert jmespath.search("spec.tls[*]", docs[0]) == [
{"hosts": ["*.a-host", "b-host", "c-host", "d-host"], "secretName": "secret"}
]
def test_should_ingress_deprecated_host_and_top_level_tls_still_work(self):
docs = render_chart(
values={
"flower": {"enabled": True},
"ingress": {
"flower": {
"enabled": True,
"tls": {"enabled": True, "secretName": "supersecret"},
"host": "old-host",
},
},
},
show_only=["templates/flower/flower-ingress.yaml"],
)
assert (
["old-host"]
== jmespath.search("spec.rules[*].host", docs[0])
== list(itertools.chain.from_iterable(jmespath.search("spec.tls[*].hosts", docs[0])))
)
def test_should_ingress_host_entry_not_exist(self):
docs = render_chart(
values={"flower": {"enabled": True}, "ingress": {"flower": {"enabled": True}}},
show_only=["templates/flower/flower-ingress.yaml"],
)
assert not jmespath.search("spec.rules[*].host", docs[0])
@pytest.mark.parametrize(
("global_value", "flower_value", "expected"),
[
(None, None, False),
(None, False, False),
(None, True, True),
(False, None, False),
(True, None, True),
(False, True, True), # We will deploy it if _either_ are true
(True, False, True),
],
)
def test_ingress_created(self, global_value, flower_value, expected):
values = {"flower": {"enabled": True}, "ingress": {}}
if global_value is not None:
values["ingress"]["enabled"] = global_value
if flower_value is not None:
values["ingress"]["flower"] = {"enabled": flower_value}
if values["ingress"] == {}:
del values["ingress"]
docs = render_chart(values=values, show_only=["templates/flower/flower-ingress.yaml"])
assert expected == (len(docs) == 1)
def test_ingress_not_created_flower_disabled(self):
docs = render_chart(
values={
"ingress": {
"flower": {"enabled": True},
}
},
show_only=["templates/flower/flower-ingress.yaml"],
)
assert len(docs) == 0
def test_should_add_component_specific_labels(self):
docs = render_chart(
values={
"ingress": {"enabled": True},
"flower": {
"enabled": True,
"labels": {"test_label": "test_label_value"},
},
},
show_only=["templates/flower/flower-ingress.yaml"],
)
assert "test_label" in jmespath.search("metadata.labels", docs[0])
assert jmespath.search("metadata.labels", docs[0])["test_label"] == "test_label_value"
def test_can_ingress_hosts_be_templated(self):
docs = render_chart(
values={
"testValues": {
"scalar": "aa",
"list": ["bb", "cc"],
"dict": {
"key": "dd",
},
},
"flower": {"enabled": True},
"ingress": {
"flower": {
"enabled": True,
"hosts": [
{"name": "*.{{ .Release.Namespace }}.example.com"},
{"name": "{{ .Values.testValues.scalar }}.example.com"},
{"name": "{{ index .Values.testValues.list 1 }}.example.com"},
{"name": "{{ .Values.testValues.dict.key }}.example.com"},
],
},
},
},
show_only=["templates/flower/flower-ingress.yaml"],
namespace="airflow",
)
assert jmespath.search("spec.rules[*].host", docs[0]) == [
"*.airflow.example.com",
"aa.example.com",
"cc.example.com",
"dd.example.com",
]
def test_backend_service_name(self):
docs = render_chart(
values={"ingress": {"enabled": True}, "flower": {"enabled": True}},
show_only=["templates/flower/flower-ingress.yaml"],
)
assert (
jmespath.search("spec.rules[0].http.paths[0].backend.service.name", docs[0])
== "release-name-flower"
)
def test_backend_service_name_with_fullname_override(self):
docs = render_chart(
values={
"fullnameOverride": "test-basic",
"useStandardNaming": True,
"ingress": {"enabled": True},
"flower": {"enabled": True},
},
show_only=["templates/flower/flower-ingress.yaml"],
)
assert (
jmespath.search("spec.rules[0].http.paths[0].backend.service.name", docs[0])
== "test-basic-flower"
)
|
TestIngressFlower
|
python
|
facebook__pyre-check
|
tools/upgrade/commands/codemods.py
|
{
"start": 652,
"end": 2929
}
|
class ____(Command):
def __init__(self, *, repository: Repository, only_fix_error_code: int) -> None:
super().__init__(repository)
self._only_fix_error_code: int = only_fix_error_code
@staticmethod
def from_arguments(
arguments: argparse.Namespace, repository: Repository
) -> "MissingOverrideReturnAnnotations":
return MissingOverrideReturnAnnotations(
repository=repository, only_fix_error_code=arguments.only_fix_error_code
)
@classmethod
def add_arguments(cls, parser: argparse.ArgumentParser) -> None:
super(MissingOverrideReturnAnnotations, cls).add_arguments(parser)
parser.set_defaults(command=cls.from_arguments)
parser.add_argument(
"--only-fix-error-code",
type=int,
help="Only add fixmes for errors with this specific error code.",
default=None,
)
@override
def run(self) -> None:
errors = Errors.from_stdin(self._only_fix_error_code)
for path, errors_for_path in errors.paths_to_errors.items():
LOG.info("Patching errors in `%s`.", path)
errors_for_path = sorted(
errors_for_path, key=lambda error: error["line"], reverse=True
)
path = pathlib.Path(path)
lines = path.read_text().split("\n")
for error in errors_for_path:
if error["code"] != 15:
continue
line = error["line"] - 1
match = re.match(r".*`(.*)`\.", error["description"])
if not match:
continue
annotation = match.groups()[0]
# Find last closing parenthesis in after line.
LOG.info("Looking at %d: %s", line, lines[line])
while True:
if "):" in lines[line]:
lines[line] = lines[line].replace("):", ") -> %s:" % annotation)
LOG.info("%d: %s", line, lines[line])
break
else:
line = line + 1
LOG.warning("Writing patched %s", str(path))
path.write_text("\n".join(lines))
|
MissingOverrideReturnAnnotations
|
python
|
conda__conda
|
conda/common/_os/windows.py
|
{
"start": 2263,
"end": 2506
}
|
class ____(IntEnum):
HIDE = 0
MAXIMIZE = 3
MINIMIZE = 6
RESTORE = 9
SHOW = 5
SHOWDEFAULT = 10
SHOWMAXIMIZED = 3
SHOWMINIMIZED = 2
SHOWMINNOACTIVE = 7
SHOWNA = 8
SHOWNOACTIVATE = 4
SHOWNORMAL = 1
|
SW
|
python
|
huggingface__transformers
|
src/transformers/models/ernie/modeling_ernie.py
|
{
"start": 46521,
"end": 50428
}
|
class ____(ErniePreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.ernie = ErnieModel(config)
self.cls = ErnieOnlyNSPHead(config)
# Initialize weights and apply final processing
self.post_init()
@can_return_tuple
@auto_docstring
def forward(
self,
input_ids: Optional[torch.Tensor] = None,
attention_mask: Optional[torch.Tensor] = None,
token_type_ids: Optional[torch.Tensor] = None,
task_type_ids: Optional[torch.Tensor] = None,
position_ids: Optional[torch.Tensor] = None,
inputs_embeds: Optional[torch.Tensor] = None,
labels: Optional[torch.Tensor] = None,
**kwargs: Unpack[TransformersKwargs],
) -> Union[tuple[torch.Tensor], NextSentencePredictorOutput]:
r"""
task_type_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
Task type embedding is a special embedding to represent the characteristic of different tasks, such as
word-aware pre-training task, structure-aware pre-training task and semantic-aware pre-training task. We
assign a `task_type_id` to each task and the `task_type_id` is in the range `[0,
config.task_type_vocab_size-1]
labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
Labels for computing the next sequence prediction (classification) loss. Input should be a sequence pair
(see `input_ids` docstring). Indices should be in `[0, 1]`:
- 0 indicates sequence B is a continuation of sequence A,
- 1 indicates sequence B is a random sequence.
Example:
```python
>>> from transformers import AutoTokenizer, ErnieForNextSentencePrediction
>>> import torch
>>> tokenizer = AutoTokenizer.from_pretrained("nghuyong/ernie-1.0-base-zh")
>>> model = ErnieForNextSentencePrediction.from_pretrained("nghuyong/ernie-1.0-base-zh")
>>> prompt = "In Italy, pizza served in formal settings, such as at a restaurant, is presented unsliced."
>>> next_sentence = "The sky is blue due to the shorter wavelength of blue light."
>>> encoding = tokenizer(prompt, next_sentence, return_tensors="pt")
>>> outputs = model(**encoding, labels=torch.LongTensor([1]))
>>> logits = outputs.logits
>>> assert logits[0, 0] < logits[0, 1] # next sentence was random
```
"""
if "next_sentence_label" in kwargs:
warnings.warn(
"The `next_sentence_label` argument is deprecated and will be removed in a future version, use"
" `labels` instead.",
FutureWarning,
)
labels = kwargs.pop("next_sentence_label")
outputs = self.ernie(
input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
task_type_ids=task_type_ids,
position_ids=position_ids,
inputs_embeds=inputs_embeds,
return_dict=True,
**kwargs,
)
pooled_output = outputs[1]
seq_relationship_scores = self.cls(pooled_output)
next_sentence_loss = None
if labels is not None:
loss_fct = CrossEntropyLoss()
next_sentence_loss = loss_fct(seq_relationship_scores.view(-1, 2), labels.view(-1))
return NextSentencePredictorOutput(
loss=next_sentence_loss,
logits=seq_relationship_scores,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
@auto_docstring(
custom_intro="""
Ernie Model transformer with a sequence classification/regression head on top (a linear layer on top of the pooled
output) e.g. for GLUE tasks.
"""
)
|
ErnieForNextSentencePrediction
|
python
|
xlwings__xlwings
|
xlwings/constants.py
|
{
"start": 48287,
"end": 48525
}
|
class ____:
xlBeginsWith = 2 # from enum XlContainsOperator
xlContains = 0 # from enum XlContainsOperator
xlDoesNotContain = 1 # from enum XlContainsOperator
xlEndsWith = 3 # from enum XlContainsOperator
|
ContainsOperator
|
python
|
huggingface__transformers
|
src/transformers/models/zamba2/modular_zamba2.py
|
{
"start": 3463,
"end": 3509
}
|
class ____(ZambaRMSNorm):
pass
|
Zamba2RMSNorm
|
python
|
scrapy__scrapy
|
tests/spiders.py
|
{
"start": 6538,
"end": 6777
}
|
class ____(SimpleSpider):
name = "asyncdef_asyncio_gen"
async def parse(self, response):
await asyncio.sleep(0.2)
yield {"foo": 42}
self.logger.info(f"Got response {response.status}")
|
AsyncDefAsyncioGenSpider
|
python
|
buildout__buildout
|
src/zc/buildout/easy_install.py
|
{
"start": 67641,
"end": 82809
}
|
class ____(zc.buildout.UserError):
"""A specified version is incompatible with a given requirement.
"""
IncompatibleVersionError = IncompatibleConstraintError # Backward compatibility
def call_pip_install(spec, dest, editable=False):
"""
Call `pip install` from a subprocess to install a
distribution specified by `spec` into `dest`.
For normal (non-editable) installs, returns all the paths inside `dest`
created by the above. For editable installs, it returns the package name.
These very different return values may seem strange, but it is because
what needs to happen afterwards is very different for the two cases.
"""
args = [sys.executable, '-m', 'pip', 'install', '--no-deps', '-t', dest]
package_index_url = index_url()
if package_index_url:
if not urllib.parse.urlsplit(package_index_url).scheme:
# pip 25+ does not accept a directory as index, which buildout
# does support.
package_index_url = Path(package_index_url)
if package_index_url.exists():
package_index_url = package_index_url.expanduser().resolve().as_uri()
else:
package_index_url = None
if package_index_url:
# We could pass the index in the '--index-url' parameter.
# But then some of our tests start failing when we pass an index
# with only a few zc.buildout distributions. Reason is that pip
# will try to find setuptools there as well, as this is needed as
# build-system for most packages. And this fails.
# So we pass the index as *extra* url.
args.extend(["--extra-index-url", package_index_url])
level = logger.getEffectiveLevel()
if level >= logging.INFO:
args.append('-q')
else:
args.append('-v')
if editable:
args.append('-e')
args.append(spec)
try:
from pip._internal.cli.cmdoptions import no_python_version_warning
HAS_WARNING_OPTION = True
except ImportError:
HAS_WARNING_OPTION = False
if HAS_WARNING_OPTION:
if not hasattr(call_pip_install, 'displayed'):
call_pip_install.displayed = True
else:
args.append('--no-python-version-warning')
env = os.environ.copy()
python_path = pip_path[:]
python_path.append(env.get('PYTHONPATH', ''))
env['PYTHONPATH'] = os.pathsep.join(python_path)
if level <= logging.DEBUG:
logger.debug('Running pip install:\n"%s"\npath=%s\n',
'" "'.join(args), pip_path)
sys.stdout.flush() # We want any pending output first
# This will quit the buildout process if there is an error.
output = get_subprocess_output(list(args), env=env)
if level <= logging.DEBUG:
if output:
logger.debug(output)
logger.debug("Pip install completed successfully.")
logger.debug("Contents of %s:", dest)
for entry in os.listdir(dest):
logger.debug("- %s", entry)
split_entries = [os.path.splitext(entry) for entry in os.listdir(dest)]
if editable:
# On setuptools 79 and earlier, the egg-link file is created.
package_name = ""
for base, ext in split_entries:
if ext == ".egg-link":
logger.debug(
"Found .egg-link file after successful pip install of %s",
spec,
)
package_name = base
break
if package_name:
# For pkg_resources style namespaces a .pth file is created,
# for example `plone.app.something-nspkg.pth`. We only need this
# if the name was found. If name was not found, the namespaces
# will be checked in a different way further on.
for base, ext in split_entries:
if ext != ".pth" or not base.endswith("-nspkg"):
continue
logger.debug(
"Found -nspkg.pth file after successful pip install of %s",
spec,
)
logger.warning(
"WARNING: Package %s at %s is using old style namespace packages. "
"You should switch to native namespaces (PEP 420).",
package_name,
spec,
)
# We don't want to analyze this file, so we can only make an
# educated guess about the namespaces.
# Assume at most two dots, that is enough info for our warning.
# `a.b` -> `a`
# `a.b.c` -> `a\na.b`
# `a.b.c.d` -> `a\na.b`
# Get all names except the last one, and then keep the first two:
names = package_name.split(".")[:-1][:2]
if len(names) > 1:
names[1] = ".".join(names)
namespaces = "\n".join(names)
Installer._namespace_packages[package_name] = namespaces
break
return package_name
# With normal (non-editable) installs, there won't be an egg-link file created.
# The same is true for editable installs with setuptools 80+.
# In both cases, we need to look for the .dist-info directory.
try:
distinfo_dir = [
base + ext for base, ext in split_entries if ext == ".dist-info"
][0]
except IndexError:
logger.error(
"No .dist-info directory after successful pip install of %s",
spec)
raise
full_distinfo_dir = os.path.join(dest, distinfo_dir)
distrib = metadata.Distribution.at(full_distinfo_dir)
# On Python 3.10 we could use `distrib.name`.
name = distrib.metadata['Name']
if not name:
logger.error(
"Could not find package name in metadata after installing %s.",
spec,
)
sys.exit(1)
if editable:
namespaces_file = os.path.join(full_distinfo_dir, "namespace_packages.txt")
if os.path.exists(namespaces_file):
logger.warning(
"WARNING: Package %s at %s is using old style namespace packages. "
"You should switch to native namespaces (PEP 420).",
name,
spec,
)
with open(namespaces_file) as myfile:
Installer._namespace_packages[name] = myfile.read()
if not editable:
# TODO: we should no longer make an egg, but we still need some of this.
return make_egg_after_pip_install(dest, distinfo_dir)
return name
def check_namespace_init_file(ns_file):
"""Look for namespace declaration in file.
This can be spelled in different ways. It can be a one-liner:
__import__('pkg_resources').declare_namespace(__name__)
Or it can be a multi-line declaration, including comments, like this:
# See http://peak.telecommunity.com/DevCenter/setuptools#namespace-packages
try:
__import__("pkg_resources").declare_namespace(__name__)
except ImportError:
from pkgutil import extend_path
__path__ = extend_path(__path__, __name__)
"""
logger.debug("Checking namespace __init__.py file: %s", ns_file)
with open(ns_file, 'r') as myfile:
contents = [line for line in myfile.readlines() if line.strip() and not line.strip().startswith('#')]
if len(contents) == 0:
logger.debug("Found too few lines to be a namespace declaration.")
return False
if len(contents) > 6:
logger.debug("Found too many lines to be a namespace declaration.")
return False
for combo in [
("pkg_resources", "declare_namespace"),
("pkgutil", "extend_path"),
]:
found_first = False
for line in contents:
if combo[0] in line:
found_first = True
if found_first and combo[1] in line:
logger.debug("Found namespace declaration in %s", ns_file)
return True
logger.debug("No namespace declaration found in %s", ns_file)
return False
def find_namespace_init_files(directory):
logger.debug("Searching for namespace __init__.py files in %s", directory)
found_files = []
for root, dirs, files in os.walk(directory):
if root.endswith('__pycache__'):
continue
if len(files) != 1 or '__init__.py' not in files:
continue
ns_file = os.path.join(root, '__init__.py')
logger.debug('Found possible namespace __init__.py file: %s', ns_file)
if check_namespace_init_file(ns_file):
found_files.append(ns_file)
if found_files:
logger.debug("Found namespace __init__.py files: %s", found_files)
else:
logger.debug("No namespace __init__.py files found.")
return found_files
def make_egg_after_pip_install(dest, distinfo_dir):
"""build properly named egg directory"""
logger.debug('Making egg in %s from pip installation in %s', dest, distinfo_dir)
# `pip install` does not build the namespace aware __init__.py files.
# In the new situation we are happy with that. But actually, for some
# packages, pip *does* include these files, so we need to remove them.
# For example try `pip install zope.interface==4.1.3`. You get a warning:
# "DEPRECATION: zope.interface is being installed using the legacy
# 'setup.py install' method, because it does not have a 'pyproject.toml'""
# Apparently this has the side effect that the namespace files get installed.
# zope.interface 7.0.3 does not have this problem.
for ns_file in find_namespace_init_files(dest):
os.remove(ns_file)
logger.debug("Removed namespace __init__.py file: %s", ns_file)
# Remove `bin` directory if needed
# as there is no way to avoid script installation
# when running `pip install`
entry_points_file = os.path.join(dest, distinfo_dir, 'entry_points.txt')
if os.path.isfile(entry_points_file):
with open(entry_points_file, encoding='utf-8', errors="replace") as f:
content = f.read()
if "console_scripts" in content or "gui_scripts" in content:
bin_dir = os.path.join(dest, BIN_SCRIPTS)
if os.path.exists(bin_dir):
shutil.rmtree(bin_dir)
# Get actual project name from dist-info directory.
metadata_path = posixpath.join(dest, distinfo_dir, "METADATA")
# The encoding and errors arguments can be needed on Windows.
# See https://github.com/buildout/buildout/issues/722
with open(metadata_path, encoding='utf-8', errors="replace") as fp:
value = fp.read()
metadata = email.parser.Parser().parsestr(value)
project_name = metadata.get("Name")
# Make properly named new egg dir
distro = list(pkg_resources.find_distributions(dest))[0]
if project_name:
distro.project_name = project_name
base = "{}-{}".format(
distro.egg_name(), pkg_resources.get_supported_platform()
)
egg_name = base + '.egg'
new_distinfo_dir = base + '.dist-info'
egg_dir = os.path.join(dest, egg_name)
os.mkdir(egg_dir)
# Move ".dist-info" dir into new egg dir
os.rename(
os.path.join(dest, distinfo_dir),
os.path.join(egg_dir, new_distinfo_dir)
)
top_level_file = os.path.join(egg_dir, new_distinfo_dir, 'top_level.txt')
if os.path.isfile(top_level_file):
with open(top_level_file, encoding='utf-8', errors="replace") as f:
top_levels = filter(
(lambda x: len(x) != 0),
[line.strip() for line in f.readlines()]
)
else:
top_levels = ()
# Move all top_level modules or packages
for top_level in top_levels:
# as package
top_level_dir = os.path.join(dest, top_level)
if os.path.exists(top_level_dir):
shutil.move(top_level_dir, egg_dir)
continue
# as module
top_level_py = top_level_dir + '.py'
if os.path.exists(top_level_py):
shutil.move(top_level_py, egg_dir)
top_level_pyc = top_level_dir + '.pyc'
if os.path.exists(top_level_pyc):
shutil.move(top_level_pyc, egg_dir)
continue
record_file = os.path.join(egg_dir, new_distinfo_dir, 'RECORD')
if os.path.isfile(record_file):
with open(record_file, newline='', encoding='utf-8', errors="replace") as f:
all_files = [row[0] for row in csv.reader(f)]
# There might be some c extensions left over
for entry in all_files:
if entry.endswith(('.pyc', '.pyo')):
continue
dest_entry = os.path.join(dest, entry)
# work around pip install -t bug that leaves entries in RECORD
# that starts with '../../'
if not os.path.abspath(dest_entry).startswith(dest):
continue
egg_entry = os.path.join(egg_dir, entry)
if os.path.exists(dest_entry) and not os.path.exists(egg_entry):
egg_entry_dir = os.path.dirname(egg_entry)
if not os.path.exists(egg_entry_dir):
os.makedirs(egg_entry_dir)
os.rename(dest_entry, egg_entry)
return [egg_dir]
def unpack_egg(location, dest):
# Buildout 2 no longer installs zipped eggs,
# so we always want to unpack it.
# XXX The next line seems double now.
# dest = os.path.join(dest, os.path.basename(location))
setuptools.archive_util.unpack_archive(location, dest)
def unpack_wheel(location, dest):
wheel = Wheel(location)
# The egg_name method returns a string that includes:
# platform = None if self.platform == 'any' else get_platform()
# get_platform is imported from distutils.util, vendorized
# by setuptools, but this is really just: sysconfig.get_platform()
# This is the platform where Python got compiled. This may differ
# from the current platform, and this trips up the logic in
# pkg_resources.compatible_platforms. We have a patch for that.
# See the docstring of the Environment class above.
wheel.install_as_egg(os.path.join(dest, wheel.egg_name()))
UNPACKERS = {
'.egg': unpack_egg,
# '.whl': setuptools.archive_util.unpack_zipfile,
}
def _get_matching_dist_in_location(dist, location):
"""
Check if `locations` contain only the one intended dist.
Return the dist with metadata in the new location.
"""
# Getting the dist from the environment causes the distribution
# meta data to be read. Cloning isn't good enough. We must compare
# dist.parsed_version, not dist.version, because one or the other
# may be normalized (e.g., 3.3 becomes 3.3.0 when downloaded from
# PyPI.)
env = Environment([location])
dists = [ d for project_name in env for d in env[project_name] ]
dist_infos = [ (normalize_name(d.project_name), d.parsed_version) for d in dists ]
if dist_infos == [(normalize_name(dist.project_name), dist.parsed_version)]:
return dists.pop()
|
IncompatibleConstraintError
|
python
|
django__django
|
django/contrib/gis/db/models/functions.py
|
{
"start": 13036,
"end": 13071
}
|
class ____(FromWKB):
pass
|
FromWKT
|
python
|
pandas-dev__pandas
|
pandas/tests/series/test_subclass.py
|
{
"start": 213,
"end": 2131
}
|
class ____:
@pytest.mark.parametrize(
"idx_method, indexer, exp_data, exp_idx",
[
["loc", ["a", "b"], [1, 2], "ab"],
["iloc", [2, 3], [3, 4], "cd"],
],
)
def test_indexing_sliced(self, idx_method, indexer, exp_data, exp_idx):
s = tm.SubclassedSeries([1, 2, 3, 4], index=list("abcd"))
res = getattr(s, idx_method)[indexer]
exp = tm.SubclassedSeries(exp_data, index=list(exp_idx))
tm.assert_series_equal(res, exp)
def test_to_frame(self):
s = tm.SubclassedSeries([1, 2, 3, 4], index=list("abcd"), name="xxx")
res = s.to_frame()
exp = tm.SubclassedDataFrame({"xxx": [1, 2, 3, 4]}, index=list("abcd"))
tm.assert_frame_equal(res, exp)
def test_subclass_unstack(self):
# GH 15564
s = tm.SubclassedSeries([1, 2, 3, 4], index=[list("aabb"), list("xyxy")])
res = s.unstack()
exp = tm.SubclassedDataFrame({"x": [1, 3], "y": [2, 4]}, index=["a", "b"])
tm.assert_frame_equal(res, exp)
def test_subclass_empty_repr(self):
sub_series = tm.SubclassedSeries()
assert "SubclassedSeries" in repr(sub_series)
def test_asof(self):
N = 3
rng = pd.date_range("1/1/1990", periods=N, freq="53s")
s = tm.SubclassedSeries({"A": [np.nan, np.nan, np.nan]}, index=rng)
result = s.asof(rng[-2:])
assert isinstance(result, tm.SubclassedSeries)
def test_explode(self):
s = tm.SubclassedSeries([[1, 2, 3], "foo", [], [3, 4]])
result = s.explode()
assert isinstance(result, tm.SubclassedSeries)
def test_equals(self):
# https://github.com/pandas-dev/pandas/pull/34402
# allow subclass in both directions
s1 = pd.Series([1, 2, 3])
s2 = tm.SubclassedSeries([1, 2, 3])
assert s1.equals(s2)
assert s2.equals(s1)
|
TestSeriesSubclassing
|
python
|
apache__airflow
|
helm-tests/tests/helm_tests/airflow_core/test_api_server.py
|
{
"start": 24338,
"end": 28855
}
|
class ____:
"""Tests api-server service."""
def test_default_service(self):
docs = render_chart(
show_only=["templates/api-server/api-server-service.yaml"],
)
assert jmespath.search("metadata.name", docs[0]) == "release-name-api-server"
assert jmespath.search("metadata.annotations", docs[0]) is None
assert jmespath.search("spec.selector", docs[0]) == {
"tier": "airflow",
"component": "api-server",
"release": "release-name",
}
assert jmespath.search("spec.type", docs[0]) == "ClusterIP"
assert {"name": "api-server", "port": 8080} in jmespath.search("spec.ports", docs[0])
def test_overrides(self):
docs = render_chart(
values={
"ports": {"apiServer": 9000},
"apiServer": {
"service": {
"type": "LoadBalancer",
"loadBalancerIP": "127.0.0.1",
"annotations": {"foo": "bar"},
"loadBalancerSourceRanges": ["10.123.0.0/16"],
},
},
},
show_only=["templates/api-server/api-server-service.yaml"],
)
assert jmespath.search("metadata.annotations", docs[0]) == {"foo": "bar"}
assert jmespath.search("spec.type", docs[0]) == "LoadBalancer"
assert {"name": "api-server", "port": 9000} in jmespath.search("spec.ports", docs[0])
assert jmespath.search("spec.loadBalancerIP", docs[0]) == "127.0.0.1"
assert jmespath.search("spec.loadBalancerSourceRanges", docs[0]) == ["10.123.0.0/16"]
@pytest.mark.parametrize(
("ports", "expected_ports"),
[
([{"port": 8888}], [{"port": 8888}]), # name is optional with a single port
(
[
{
"name": "{{ .Release.Name }}",
"protocol": "UDP",
"port": "{{ .Values.ports.apiServer }}",
}
],
[{"name": "release-name", "protocol": "UDP", "port": 8080}],
),
([{"name": "only_sidecar", "port": "{{ int 9000 }}"}], [{"name": "only_sidecar", "port": 9000}]),
(
[
{"name": "api-server", "port": "{{ .Values.ports.apiServer }}"},
{"name": "sidecar", "port": 80, "targetPort": "sidecar"},
],
[
{"name": "api-server", "port": 8080},
{"name": "sidecar", "port": 80, "targetPort": "sidecar"},
],
),
],
)
def test_ports_overrides(self, ports, expected_ports):
docs = render_chart(
values={"apiServer": {"service": {"ports": ports}}},
show_only=["templates/api-server/api-server-service.yaml"],
)
assert jmespath.search("spec.ports", docs[0]) == expected_ports
def test_should_add_component_specific_labels(self):
docs = render_chart(
values={"apiServer": {"labels": {"test_label": "test_label_value"}}},
show_only=["templates/api-server/api-server-service.yaml"],
)
assert "test_label" in jmespath.search("metadata.labels", docs[0])
assert jmespath.search("metadata.labels", docs[0])["test_label"] == "test_label_value"
@pytest.mark.parametrize(
("ports", "expected_ports"),
[
(
[{"nodePort": "31000", "port": "8080"}],
[{"nodePort": 31000, "port": 8080}],
),
(
[{"port": "8080"}],
[{"port": 8080}],
),
],
)
def test_nodeport_service(self, ports, expected_ports):
docs = render_chart(
values={"apiServer": {"service": {"type": "NodePort", "ports": ports}}},
show_only=["templates/api-server/api-server-service.yaml"],
)
assert jmespath.search("spec.type", docs[0]) == "NodePort"
assert expected_ports == jmespath.search("spec.ports", docs[0])
def test_can_be_disabled(self):
"""
API server service can be disabled by configuration.
"""
docs = render_chart(
values={"apiServer": {"enabled": False}},
show_only=["templates/api-server/api-server-service.yaml"],
)
assert len(docs) == 0
|
TestAPIServerService
|
python
|
encode__starlette
|
starlette/routing.py
|
{
"start": 22382,
"end": 22721
}
|
class ____:
def __init__(self, router: Router):
self._router = router
async def __aenter__(self) -> None:
await self._router.startup()
async def __aexit__(self, *exc_info: object) -> None:
await self._router.shutdown()
def __call__(self: _T, app: object) -> _T:
return self
|
_DefaultLifespan
|
python
|
huggingface__transformers
|
tests/models/granitemoehybrid/test_modeling_granitemoehybrid.py
|
{
"start": 1594,
"end": 2642
}
|
class ____(BambaModelTester):
config_class = GraniteMoeHybridConfig
if is_torch_available():
model_class = GraniteMoeHybridModel
for_causal_lm_class = GraniteMoeHybridForCausalLM
def __init__(
self,
parent,
use_cache=False,
shared_intermediate_size=174,
layer_types=None,
):
super().__init__(parent)
self.shared_intermediate_size = shared_intermediate_size
self.layer_types = layer_types
self.use_cache = use_cache
def _update_layer_configs(self):
super()._update_layer_configs()
# GraniteMoeHybrid uses layer_types instead of attn_layer_indices
self.layer_types = ["mamba"] * self.num_hidden_layers
for idx in self.attn_layer_indices:
self.layer_types[idx] = "attention"
def get_config(self):
return super().get_config(
shared_intermediate_size=self.shared_intermediate_size,
layer_types=self.layer_types,
)
@require_torch
|
GraniteMoeHybridModelTester
|
python
|
geekcomputers__Python
|
Flappy Bird - created with tkinter/Flappy Bird.py
|
{
"start": 836,
"end": 2109
}
|
class ____:
def __init__(self):
self.image = pipe_image
self.x = screen_width
self.y = random.randint(150, screen_height - 150)
self.vel = 5
def update(self):
self.x -= self.vel
def draw(self, screen):
screen.blit(self.image, (self.x, self.y))
screen.blit(
pygame.transform.flip(self.image, False, True),
(self.x, self.y - screen_height),
)
def main():
clock = pygame.time.Clock()
bird = Bird()
pipes = [Pipe()]
score = 0
running = True
while running:
for event in pygame.event.get():
if event.type == pygame.QUIT:
running = False
if event.type == pygame.KEYDOWN and event.key == pygame.K_SPACE:
bird.flap()
bird.update()
for pipe in pipes:
pipe.update()
if pipe.x + pipe.image.get_width() < 0:
pipes.remove(pipe)
pipes.append(Pipe())
score += 1
screen.blit(background_image, (0, 0))
bird.draw(screen)
for pipe in pipes:
pipe.draw(screen)
pygame.display.update()
clock.tick(30)
pygame.quit()
if __name__ == "__main__":
main()
|
Pipe
|
python
|
getsentry__sentry
|
tests/sentry/integrations/slack/test_link_team.py
|
{
"start": 4283,
"end": 8124
}
|
class ____(SlackIntegrationLinkTeamTestBase):
def setUp(self) -> None:
super().setUp()
self.url = build_team_linking_url(
integration=self.integration,
slack_id=self.external_id,
channel_id=self.channel_id,
channel_name=self.channel_name,
response_url=self.response_url,
)
self.team = self.create_team(
organization=self.organization, name="Mariachi Band", members=[self.user]
)
def test_link_team(self) -> None:
"""Test that we successfully link a team to a Slack channel"""
response = self.get_success_response()
self.assertTemplateUsed(response, "sentry/integrations/slack/link-team.html")
response = self.get_success_response(data={"team": self.team.id})
self.assertTemplateUsed(response, "sentry/integrations/slack/post-linked-team.html")
external_actors = self.get_linked_teams()
assert len(external_actors) == 1
assert external_actors[0].team_id == self.team.id
assert self.mock_post.call_count == 1
text = self.mock_post.call_args.kwargs["text"]
assert (
f"The {self.team.slug} team will now receive issue alert notifications in the {external_actors[0].external_name} channel."
in text
)
with assume_test_silo_mode(SiloMode.CONTROL):
team_settings = NotificationSettingProvider.objects.filter(
team_id=self.team.id,
provider="slack",
type="alerts",
scope_type="team",
scope_identifier=self.team.id,
value="always",
)
assert len(team_settings) == 1
def test_link_team_valid_through_team_admin(self) -> None:
"""Test that we successfully link a team to a Slack channel as a valid team admin"""
self._create_user_valid_through_team_admin()
self.test_link_team()
def test_link_team_already_linked(self) -> None:
"""Test that if a team has already been linked to a Slack channel when a user tries
to link them again, we reject the attempt and reply with the ALREADY_LINKED_MESSAGE"""
self.link_team()
response = self.get_success_response(data={"team": self.team.id})
self.assertTemplateUsed(response, "sentry/integrations/slack/post-linked-team.html")
assert self.mock_post.call_count == 1
text = self.mock_post.call_args.kwargs["text"]
assert f"The {self.team.slug} team has already been linked to a Slack channel." in text
def test_error_page(self) -> None:
"""Test that we successfully render an error page when bad form data is sent."""
self.get_error_response(
data={"team": ["some", "garbage"]}, status_code=status.HTTP_400_BAD_REQUEST
)
def test_link_team_multiple_organizations(self) -> None:
# Create another organization and team for this user that is linked through `self.integration`.
organization2 = self.create_organization(owner=self.user)
team2 = self.create_team(organization=organization2, members=[self.user])
with assume_test_silo_mode(SiloMode.CONTROL):
self.create_organization_integration(
organization_id=organization2.id, integration=self.integration
)
# Team order should not matter.
for team in (self.team, team2):
response = self.get_success_response(data={"team": team.id})
self.assertTemplateUsed(response, "sentry/integrations/slack/post-linked-team.html")
external_actors = self.get_linked_teams(
organization=team.organization, team_ids=[team.id]
)
assert len(external_actors) == 1
|
SlackIntegrationLinkTeamTest
|
python
|
Textualize__textual
|
src/textual/notifications.py
|
{
"start": 1903,
"end": 3714
}
|
class ____:
"""Class for managing a collection of notifications."""
def __init__(self) -> None:
"""Initialise the notification collection."""
self._notifications: dict[str, Notification] = {}
def _reap(self) -> Self:
"""Remove any expired notifications from the notification collection."""
for notification in list(self._notifications.values()):
if notification.has_expired:
del self._notifications[notification.identity]
return self
def add(self, notification: Notification) -> Self:
"""Add the given notification to the collection of managed notifications.
Args:
notification: The notification to add.
Returns:
Self.
"""
self._reap()._notifications[notification.identity] = notification
return self
def clear(self) -> Self:
"""Clear all the notifications."""
self._notifications.clear()
return self
def __len__(self) -> int:
"""The number of notifications."""
return len(self._reap()._notifications)
def __iter__(self) -> Iterator[Notification]:
return iter(self._reap()._notifications.values())
def __contains__(self, notification: Notification) -> bool:
return notification.identity in self._notifications
def __delitem__(self, notification: Notification) -> None:
try:
del self._reap()._notifications[notification.identity]
except KeyError:
# An attempt to remove a notification we don't know about is a
# no-op. What matters here is that the notification is forgotten
# about, and it looks like a caller has tried to be
# belt-and-braces. We're fine with this.
pass
|
Notifications
|
python
|
huggingface__transformers
|
src/transformers/models/emu3/image_processing_emu3.py
|
{
"start": 1440,
"end": 2734
}
|
class ____(ImagesKwargs, total=False):
ratio: str
image_area: int
def smart_resize(
height: int, width: int, factor: int = 28, min_pixels: int = 56 * 56, max_pixels: int = 14 * 14 * 4 * 1280
):
"""Rescales the image so that the following conditions are met:
1. Both dimensions (height and width) are divisible by 'factor'.
2. The total number of pixels is within the range ['min_pixels', 'max_pixels'].
3. The aspect ratio of the image is maintained as closely as possible.
"""
if max(height, width) / min(height, width) > 200:
raise ValueError(
f"absolute aspect ratio must be smaller than 200, got {max(height, width) / min(height, width)}"
)
h_bar = round(height / factor) * factor
w_bar = round(width / factor) * factor
if h_bar * w_bar > max_pixels:
beta = math.sqrt((height * width) / max_pixels)
h_bar = max(factor, math.floor(height / beta / factor) * factor)
w_bar = max(factor, math.floor(width / beta / factor) * factor)
elif h_bar * w_bar < min_pixels:
beta = math.sqrt(min_pixels / (height * width))
h_bar = math.ceil(height * beta / factor) * factor
w_bar = math.ceil(width * beta / factor) * factor
return h_bar, w_bar
|
Emu3ImageProcessorKwargs
|
python
|
pytorch__pytorch
|
test/dynamo/cpython/3_13/test_ordered_dict.py
|
{
"start": 35580,
"end": 38695
}
|
class ____(OrderedDictTests,
CPythonOrderedDictSideEffects,
__TestCase):
module = c_coll
OrderedDict = c_coll.OrderedDict
check_sizeof = support.check_sizeof
@support.cpython_only
def test_sizeof_exact(self):
OrderedDict = self.OrderedDict
calcsize = struct.calcsize
size = support.calcobjsize
check = self.check_sizeof
basicsize = size('nQ2P' + '3PnPn2P')
keysize = calcsize('n2BI2n')
entrysize = calcsize('n2P')
p = calcsize('P')
nodesize = calcsize('Pn2P')
od = OrderedDict()
check(od, basicsize) # 8byte indices + 8*2//3 * entry table
od.x = 1
check(od, basicsize)
od.update([(i, i) for i in range(3)])
check(od, basicsize + keysize + 8*p + 8 + 5*entrysize + 3*nodesize)
od.update([(i, i) for i in range(3, 10)])
check(od, basicsize + keysize + 16*p + 16 + 10*entrysize + 10*nodesize)
check(od.keys(), size('P'))
check(od.items(), size('P'))
check(od.values(), size('P'))
itersize = size('iP2n2P')
check(iter(od), itersize)
check(iter(od.keys()), itersize)
check(iter(od.items()), itersize)
check(iter(od.values()), itersize)
def test_key_change_during_iteration(self):
OrderedDict = self.OrderedDict
od = OrderedDict.fromkeys('abcde')
self.assertEqual(list(od), list('abcde'))
with self.assertRaises(RuntimeError):
for i, k in enumerate(od):
od.move_to_end(k)
self.assertLess(i, 5)
with self.assertRaises(RuntimeError):
for k in od:
od['f'] = None
with self.assertRaises(RuntimeError):
for k in od:
del od['c']
self.assertEqual(list(od), list('bdeaf'))
def test_iterators_pickling(self):
OrderedDict = self.OrderedDict
pairs = [('c', 1), ('b', 2), ('a', 3), ('d', 4), ('e', 5), ('f', 6)]
od = OrderedDict(pairs)
for method_name in ('keys', 'values', 'items'):
meth = getattr(od, method_name)
expected = list(meth())[1:]
for i in range(pickle.HIGHEST_PROTOCOL + 1):
with self.subTest(method_name=method_name, protocol=i):
it = iter(meth())
next(it)
p = pickle.dumps(it, i)
unpickled = pickle.loads(p)
self.assertEqual(list(unpickled), expected)
self.assertEqual(list(it), expected)
@support.cpython_only
def test_weakref_list_is_not_traversed(self):
# Check that the weakref list is not traversed when collecting
# OrderedDict objects. See bpo-39778 for more information.
gc.collect()
x = self.OrderedDict()
x.cycle = x
cycle = []
cycle.append(cycle)
x_ref = weakref.ref(x)
cycle.append(x_ref)
del x, cycle, x_ref
gc.collect()
|
CPythonOrderedDictTests
|
python
|
kamyu104__LeetCode-Solutions
|
Python/number-of-ships-in-a-rectangle.py
|
{
"start": 471,
"end": 1393
}
|
class ____(object):
def countShips(self, sea, topRight, bottomLeft):
"""
:type sea: Sea
:type topRight: Point
:type bottomLeft: Point
:rtype: integer
"""
result = 0
if topRight.x >= bottomLeft.x and \
topRight.y >= bottomLeft.y and \
sea.hasShips(topRight, bottomLeft):
if (topRight.x, topRight.y) == (bottomLeft.x, bottomLeft.y):
return 1
mid_x, mid_y = (topRight.x+bottomLeft.x)//2, (topRight.y+bottomLeft.y)//2
result += self.countShips(sea, topRight, Point(mid_x+1, mid_y+1))
result += self.countShips(sea, Point(mid_x, topRight.y), Point(bottomLeft.x, mid_y+1))
result += self.countShips(sea, Point(topRight.x, mid_y), Point(mid_x+1, bottomLeft.y))
result += self.countShips(sea, Point(mid_x, mid_y), bottomLeft)
return result
|
Solution
|
python
|
django__django
|
tests/admin_utils/models.py
|
{
"start": 119,
"end": 244
}
|
class ____(models.Model):
domain = models.CharField(max_length=100)
def __str__(self):
return self.domain
|
Site
|
python
|
keras-team__keras
|
keras/src/ops/operation_test.py
|
{
"start": 1910,
"end": 2239
}
|
class ____(operation.Operation):
def __init__(self, alpha, *args, name=None):
super().__init__(name=name)
self.alpha = alpha
def call(self, x):
return self.alpha * x + self.beta
def compute_output_spec(self, x):
return keras_tensor.KerasTensor(x.shape, x.dtype)
|
OpWithArgsInConstructor
|
python
|
pyinstaller__pyinstaller
|
tests/unit/test_hookutils.py
|
{
"start": 996,
"end": 1819
}
|
class ____(object):
# Verify that removing a prefix from an empty string is OK.
def test_empty_string(self):
assert '' == hookutils.remove_prefix('', 'prefix')
# An empty prefix should pass the string through unmodified.
def test_emptystr_unmodif(self):
assert 'test' == hookutils.remove_prefix('test', '')
# If the string is the prefix, it should be empty at exit.
def test_string_prefix(self):
assert '' == hookutils.remove_prefix('test', 'test')
# Just the prefix should be removed.
def test_just_prefix(self):
assert 'ing' == hookutils.remove_prefix('testing', 'test')
# A matching string not as prefix should produce no modifications
def test_no_modific(self):
assert 'atest' == hookutils.remove_prefix('atest', 'test')
|
TestRemovePrefix
|
python
|
python-rapidjson__python-rapidjson
|
tests/test_dict_subclass.py
|
{
"start": 884,
"end": 1671
}
|
class ____(ObjectsAsKeyValuePairsDecoder):
def end_object(self, ordered_pairs):
# Adapted from https://stackoverflow.com/a/38307621
d = {}
for k, v in ordered_pairs:
if k in d:
if type(d[k]) == list:
d[k].append(v)
else:
newlist = []
newlist.append(d[k])
newlist.append(v)
d[k] = newlist
else:
d[k] = v
return d
def test_join_duplicated_keys():
jdk = JoinDuplicatedKeysDecoder()
result = jdk('{"a": 1, "b": {"b1": 1, "b2": 2}, "b": {"b1": 3, "b2": 2,"b4": 8}}')
assert result == {'a': 1, 'b': [{'b1': 1, 'b2': 2}, {'b1': 3, 'b2': 2, 'b4': 8}]}
|
JoinDuplicatedKeysDecoder
|
python
|
sphinx-doc__sphinx
|
tests/test_builders/test_build_epub.py
|
{
"start": 825,
"end": 23737
}
|
class ____:
"""Test helper for content.opf and toc.ncx"""
namespaces = {
'idpf': 'http://www.idpf.org/2007/opf',
'dc': 'http://purl.org/dc/elements/1.1/',
'ibooks': 'http://vocabulary.itunes.apple.com/rdf/ibooks/vocabulary-extensions-1.0/',
'ncx': 'http://www.daisy.org/z3986/2005/ncx/',
'xhtml': 'http://www.w3.org/1999/xhtml',
'epub': 'http://www.idpf.org/2007/ops',
}
def __init__(self, tree: ET.Element) -> None:
self.tree = tree
@classmethod
def fromstring(cls, string: str | bytes) -> Self:
tree = ET.fromstring(string) # NoQA: S314 # using known data in tests
return cls(tree)
def find(self, match: str) -> Self:
ret = self.tree.find(match, namespaces=self.namespaces)
assert ret is not None
return self.__class__(ret)
def findall(self, match: str) -> list[Self]:
ret = self.tree.findall(match, namespaces=self.namespaces)
return [self.__class__(e) for e in ret]
@property
def text(self) -> str | None:
return self.tree.text
@property
def attrib(self) -> dict[str, str]:
return self.tree.attrib
def get(self, key: str) -> str | None:
return self.tree.get(key)
def __iter__(self) -> Iterator[Self]:
for child in self.tree:
yield self.__class__(child)
@pytest.mark.sphinx('epub', testroot='basic')
def test_build_epub(app: SphinxTestApp) -> None:
app.build(force_all=True)
assert (app.outdir / 'mimetype').read_text(
encoding='utf8'
) == 'application/epub+zip'
assert (app.outdir / 'META-INF' / 'container.xml').exists()
# toc.ncx
toc = EPUBElementTree.fromstring(
(app.outdir / 'toc.ncx').read_text(encoding='utf8')
)
assert toc.find('./ncx:docTitle/ncx:text').text == 'Project name not set'
# toc.ncx / head
meta = list(toc.find('./ncx:head'))
assert meta[0].attrib == {'name': 'dtb:uid', 'content': 'unknown'}
assert meta[1].attrib == {'name': 'dtb:depth', 'content': '1'}
assert meta[2].attrib == {'name': 'dtb:totalPageCount', 'content': '0'}
assert meta[3].attrib == {'name': 'dtb:maxPageNumber', 'content': '0'}
# toc.ncx / navMap
navpoints = toc.findall('./ncx:navMap/ncx:navPoint')
assert len(navpoints) == 1
assert navpoints[0].attrib == {'id': 'navPoint1', 'playOrder': '1'}
assert navpoints[0].find('./ncx:content').attrib == {'src': 'index.xhtml'}
navlabel = navpoints[0].find('./ncx:navLabel/ncx:text')
assert navlabel.text == 'The basic Sphinx documentation for testing'
# content.opf
opf = EPUBElementTree.fromstring(
(app.outdir / 'content.opf').read_text(encoding='utf8')
)
# content.opf / metadata
metadata = opf.find('./idpf:metadata')
assert metadata.find('./dc:language').text == 'en'
assert metadata.find('./dc:title').text == 'Project name not set'
assert metadata.find('./dc:description').text == 'unknown'
assert metadata.find('./dc:creator').text == 'Author name not set'
assert metadata.find('./dc:contributor').text == 'unknown'
assert metadata.find('./dc:publisher').text == 'Author name not set'
assert metadata.find('./dc:rights').text is None
assert metadata.find("./idpf:meta[@property='ibooks:version']").text is None
assert (
metadata.find("./idpf:meta[@property='ibooks:specified-fonts']").text == 'true'
)
assert metadata.find("./idpf:meta[@property='ibooks:binding']").text == 'true'
assert (
metadata.find("./idpf:meta[@property='ibooks:scroll-axis']").text == 'vertical'
)
# content.opf / manifest
manifest = opf.find('./idpf:manifest')
items = list(manifest)
assert items[0].attrib == {
'id': 'ncx',
'href': 'toc.ncx',
'media-type': 'application/x-dtbncx+xml',
}
assert items[1].attrib == {
'id': 'nav',
'href': 'nav.xhtml',
'media-type': 'application/xhtml+xml',
'properties': 'nav',
}
assert items[2].attrib == {
'id': 'epub-0',
'href': 'genindex.xhtml',
'media-type': 'application/xhtml+xml',
}
assert items[3].attrib == {
'id': 'epub-1',
'href': 'index.xhtml',
'media-type': 'application/xhtml+xml',
}
for i, item in enumerate(items[2:]):
# items are named as epub-NN
assert item.get('id') == 'epub-%d' % i
# content.opf / spine
spine = opf.find('./idpf:spine')
itemrefs = list(spine)
assert spine.get('toc') == 'ncx'
assert spine.get('page-progression-direction') == 'ltr'
assert itemrefs[0].get('idref') == 'epub-1'
assert itemrefs[1].get('idref') == 'epub-0'
# content.opf / guide
reference = opf.find('./idpf:guide/idpf:reference')
assert reference.get('type') == 'toc'
assert reference.get('title') == 'Table of Contents'
assert reference.get('href') == 'index.xhtml'
# nav.xhtml
nav = EPUBElementTree.fromstring(
(app.outdir / 'nav.xhtml').read_text(encoding='utf8')
)
assert nav.attrib == {
'lang': 'en',
'{http://www.w3.org/XML/1998/namespace}lang': 'en',
}
assert nav.find('./xhtml:head/xhtml:title').text == 'Table of Contents'
# nav.xhtml / nav
navlist = nav.find('./xhtml:body/xhtml:nav')
tocs = navlist.findall('./xhtml:ol/xhtml:li')
assert navlist.find('./xhtml:h1').text == 'Table of Contents'
assert len(tocs) == 1
assert tocs[0].find('./xhtml:a').get('href') == 'index.xhtml'
assert (
tocs[0].find('./xhtml:a').text == 'The basic Sphinx documentation for testing'
)
@pytest.mark.sphinx(
'epub',
testroot='footnotes',
confoverrides={'epub_cover': ('_images/rimg.png', None)},
)
def test_epub_cover(app: SphinxTestApp) -> None:
app.build()
# content.opf / metadata
opf = EPUBElementTree.fromstring(
(app.outdir / 'content.opf').read_text(encoding='utf8')
)
cover_image = opf.find(
"./idpf:manifest/idpf:item[@href='%s']" % app.config.epub_cover[0]
)
cover = opf.find("./idpf:metadata/idpf:meta[@name='cover']")
assert cover
assert cover.get('content') == cover_image.get('id')
@pytest.mark.sphinx('epub', testroot='toctree')
def test_nested_toc(app: SphinxTestApp) -> None:
app.build()
# toc.ncx
toc = EPUBElementTree.fromstring((app.outdir / 'toc.ncx').read_bytes())
assert toc.find('./ncx:docTitle/ncx:text').text == 'Project name not set'
# toc.ncx / navPoint
def toc_navpoint_navinfo(
elem: EPUBElementTree,
) -> tuple[str | None, str | None, str | None, str | None]:
label = elem.find('./ncx:navLabel/ncx:text')
content = elem.find('./ncx:content')
return elem.get('id'), elem.get('playOrder'), content.get('src'), label.text
navpoints = toc.findall('./ncx:navMap/ncx:navPoint')
assert len(navpoints) == 4
assert toc_navpoint_navinfo(navpoints[0]) == (
'navPoint1',
'1',
'index.xhtml',
'Welcome to Sphinx Tests’s documentation!',
)
assert navpoints[0].findall('./ncx:navPoint') == []
# toc.ncx / nested navPoints
assert toc_navpoint_navinfo(navpoints[1]) == ('navPoint2', '2', 'foo.xhtml', 'foo')
navchildren = navpoints[1].findall('./ncx:navPoint')
assert len(navchildren) == 4
assert toc_navpoint_navinfo(navchildren[0]) == (
'navPoint3',
'2',
'foo.xhtml',
'foo',
)
assert toc_navpoint_navinfo(navchildren[1]) == (
'navPoint4',
'3',
'quux.xhtml',
'quux',
)
assert toc_navpoint_navinfo(navchildren[2]) == (
'navPoint5',
'4',
'foo.xhtml#foo-1',
'foo.1',
)
assert toc_navpoint_navinfo(navchildren[3]) == (
'navPoint8',
'6',
'foo.xhtml#foo-2',
'foo.2',
)
# nav.xhtml / nav
def nav_nav_navinfo(elem: EPUBElementTree) -> tuple[str | None, str | None]:
anchor = elem.find('./xhtml:a')
return anchor.get('href'), anchor.text
nav = EPUBElementTree.fromstring((app.outdir / 'nav.xhtml').read_bytes())
tocs = nav.findall('./xhtml:body/xhtml:nav/xhtml:ol/xhtml:li')
assert len(tocs) == 4
assert nav_nav_navinfo(tocs[0]) == (
'index.xhtml',
'Welcome to Sphinx Tests’s documentation!',
)
assert tocs[0].findall('./xhtml:ol') == []
# nav.xhtml / nested toc
assert nav_nav_navinfo(tocs[1]) == ('foo.xhtml', 'foo')
tocchildren = tocs[1].findall('./xhtml:ol/xhtml:li')
assert len(tocchildren) == 3
assert nav_nav_navinfo(tocchildren[0]) == ('quux.xhtml', 'quux')
assert nav_nav_navinfo(tocchildren[1]) == ('foo.xhtml#foo-1', 'foo.1')
assert nav_nav_navinfo(tocchildren[2]) == ('foo.xhtml#foo-2', 'foo.2')
grandchild = tocchildren[1].findall('./xhtml:ol/xhtml:li')
assert len(grandchild) == 1
assert nav_nav_navinfo(grandchild[0]) == ('foo.xhtml#foo-1-1', 'foo.1-1')
@pytest.mark.sphinx('epub', testroot='need-escaped')
def test_escaped_toc(app: SphinxTestApp) -> None:
app.build()
# toc.ncx
toc = EPUBElementTree.fromstring((app.outdir / 'toc.ncx').read_bytes())
assert toc.find('./ncx:docTitle/ncx:text').text == 'need <b>"escaped"</b> project'
# toc.ncx / navPoint
def navpoint_navinfo(
elem: EPUBElementTree,
) -> tuple[str | None, str | None, str | None, str | None]:
label = elem.find('./ncx:navLabel/ncx:text')
content = elem.find('./ncx:content')
ret = elem.get('id'), elem.get('playOrder'), content.get('src'), label.text
return ret
navpoints = toc.findall('./ncx:navMap/ncx:navPoint')
assert len(navpoints) == 4
assert navpoint_navinfo(navpoints[0]) == (
'navPoint1',
'1',
'index.xhtml',
"Welcome to Sphinx Tests's documentation!",
)
assert navpoints[0].findall('./ncx:navPoint') == []
# toc.ncx / nested navPoints
assert navpoint_navinfo(navpoints[1]) == ('navPoint2', '2', 'foo.xhtml', '<foo>')
navchildren = navpoints[1].findall('./ncx:navPoint')
assert len(navchildren) == 4
assert navpoint_navinfo(navchildren[0]) == ('navPoint3', '2', 'foo.xhtml', '<foo>')
assert navpoint_navinfo(navchildren[1]) == ('navPoint4', '3', 'quux.xhtml', 'quux')
assert navpoint_navinfo(navchildren[2]) == (
'navPoint5',
'4',
'foo.xhtml#foo-1',
'foo “1”',
)
assert navpoint_navinfo(navchildren[3]) == (
'navPoint8',
'6',
'foo.xhtml#foo-2',
'foo.2',
)
# nav.xhtml / nav
def nav_navinfo(elem: EPUBElementTree) -> tuple[str | None, str | None]:
anchor = elem.find('./xhtml:a')
return anchor.get('href'), anchor.text
nav = EPUBElementTree.fromstring((app.outdir / 'nav.xhtml').read_bytes())
tocs = nav.findall('./xhtml:body/xhtml:nav/xhtml:ol/xhtml:li')
assert len(tocs) == 4
assert nav_navinfo(tocs[0]) == (
'index.xhtml',
"Welcome to Sphinx Tests's documentation!",
)
assert tocs[0].findall('./xhtml:ol') == []
# nav.xhtml / nested toc
assert nav_navinfo(tocs[1]) == ('foo.xhtml', '<foo>')
tocchildren = tocs[1].findall('./xhtml:ol/xhtml:li')
assert len(tocchildren) == 3
assert nav_navinfo(tocchildren[0]) == ('quux.xhtml', 'quux')
assert nav_navinfo(tocchildren[1]) == ('foo.xhtml#foo-1', 'foo “1”')
assert nav_navinfo(tocchildren[2]) == ('foo.xhtml#foo-2', 'foo.2')
grandchild = tocchildren[1].findall('./xhtml:ol/xhtml:li')
assert len(grandchild) == 1
assert nav_navinfo(grandchild[0]) == ('foo.xhtml#foo-1-1', 'foo.1-1')
@pytest.mark.sphinx('epub', testroot='basic')
def test_epub_writing_mode(app: SphinxTestApp) -> None:
# horizontal (default)
app.build(force_all=True)
# horizontal / page-progression-direction
opf = EPUBElementTree.fromstring(
(app.outdir / 'content.opf').read_text(encoding='utf8')
)
assert opf.find('./idpf:spine').get('page-progression-direction') == 'ltr'
# horizontal / ibooks:scroll-axis
metadata = opf.find('./idpf:metadata')
assert (
metadata.find("./idpf:meta[@property='ibooks:scroll-axis']").text == 'vertical'
)
# horizontal / writing-mode (CSS)
css = (app.outdir / '_static' / 'epub.css').read_text(encoding='utf8')
assert 'writing-mode: horizontal-tb;' in css
# vertical
app.config.epub_writing_mode = 'vertical'
(app.outdir / 'index.xhtml').unlink() # forcely rebuild
app.build()
# vertical / page-progression-direction
opf = EPUBElementTree.fromstring(
(app.outdir / 'content.opf').read_text(encoding='utf8')
)
assert opf.find('./idpf:spine').get('page-progression-direction') == 'rtl'
# vertical / ibooks:scroll-axis
metadata = opf.find('./idpf:metadata')
assert (
metadata.find("./idpf:meta[@property='ibooks:scroll-axis']").text
== 'horizontal'
)
# vertical / writing-mode (CSS)
css = (app.outdir / '_static' / 'epub.css').read_text(encoding='utf8')
assert 'writing-mode: vertical-rl;' in css
@pytest.mark.sphinx('epub', testroot='epub-anchor-id')
def test_epub_anchor_id(app: SphinxTestApp) -> None:
app.build()
html = (app.outdir / 'index.xhtml').read_text(encoding='utf8')
assert '<p id="std-setting-STATICFILES_FINDERS">blah blah blah</p>' in html
assert (
'<span id="std-setting-STATICFILES_SECTION"></span><h1>blah blah blah</h1>'
) in html
assert (
'see <a class="reference internal" href="#std-setting-STATICFILES_FINDERS">'
) in html
@pytest.mark.sphinx('epub', testroot='html_assets')
def test_epub_assets(app: SphinxTestApp) -> None:
app.build(force_all=True)
# epub_sytlesheets (same as html_css_files)
content = (app.outdir / 'index.xhtml').read_text(encoding='utf8')
assert (
'<link rel="stylesheet" type="text/css" href="_static/css/style.css" />'
) in content
assert (
'<link media="print" rel="stylesheet" title="title" type="text/css" '
'href="https://example.com/custom.css" />'
) in content
@pytest.mark.sphinx(
'epub',
testroot='html_assets',
confoverrides={'epub_css_files': ['css/epub.css']},
)
def test_epub_css_files(app: SphinxTestApp) -> None:
app.build(force_all=True)
# epub_css_files
content = (app.outdir / 'index.xhtml').read_text(encoding='utf8')
assert (
'<link rel="stylesheet" type="text/css" href="_static/css/epub.css" />'
) in content
# files in html_css_files are not outputted
assert (
'<link rel="stylesheet" type="text/css" href="_static/css/style.css" />'
) not in content
assert (
'<link media="print" rel="stylesheet" title="title" type="text/css" '
'href="https://example.com/custom.css" />'
) not in content
@pytest.mark.sphinx('epub', testroot='roles-download')
def test_html_download_role(app: SphinxTestApp) -> None:
app.build()
assert not (app.outdir / '_downloads' / 'dummy.dat').exists()
content = (app.outdir / 'index.xhtml').read_text(encoding='utf8')
assert (
'<li><p><code class="xref download docutils literal notranslate">'
'<span class="pre">dummy.dat</span></code></p></li>'
) in content
assert (
'<li><p><code class="xref download docutils literal notranslate">'
'<span class="pre">not_found.dat</span></code></p></li>'
) in content
assert (
'<li><p><code class="xref download docutils literal notranslate">'
'<span class="pre">Sphinx</span> <span class="pre">logo</span></code>'
'<span class="link-target"> [https://www.sphinx-doc.org/en/master'
'/_static/sphinx-logo.svg]</span></p></li>'
) in content
@pytest.mark.sphinx('epub', testroot='toctree-duplicated')
def test_duplicated_toctree_entry(app: SphinxTestApp) -> None:
app.build(force_all=True)
assert 'WARNING: duplicated ToC entry found: foo.xhtml' in app.warning.getvalue()
@pytest.mark.skipif(
'DO_EPUBCHECK' not in os.environ,
reason='Skipped because DO_EPUBCHECK is not set',
)
@pytest.mark.sphinx('epub', testroot='root')
def test_run_epubcheck(app: SphinxTestApp) -> None:
app.build()
if not runnable(['java', '-version']):
pytest.skip('Unable to run Java; skipping test')
epubcheck = Path(os.environ.get('EPUBCHECK_PATH', '/usr/share/java/epubcheck.jar'))
if not epubcheck.exists():
pytest.skip('Could not find epubcheck; skipping test')
try:
subprocess.run(
['java', '-jar', epubcheck, app.outdir / 'SphinxTests.epub'], # NoQA: S607
capture_output=True,
check=True,
)
except CalledProcessError as exc:
print(exc.stdout.decode('utf-8'))
print(exc.stderr.decode('utf-8'))
msg = f'epubcheck exited with return code {exc.returncode}'
raise AssertionError(msg) from exc
def test_xml_name_pattern_check() -> None:
assert _XML_NAME_PATTERN.match('id-pub')
assert _XML_NAME_PATTERN.match('webpage')
assert not _XML_NAME_PATTERN.match('1bfda21')
@pytest.mark.usefixtures('_http_teapot')
@pytest.mark.sphinx('epub', testroot='images')
def test_copy_images(app: SphinxTestApp) -> None:
app.build()
images_dir = Path(app.outdir) / '_images'
images = {image.name for image in images_dir.rglob('*')}
images.discard('python-logo.png')
assert images == {
# 'ba30773957c3fe046897111afd65a80b81cad089.png', # epub: image from data:image/png URI in source
'img.png',
'rimg.png',
'rimg1.png',
'svgimg.svg',
'testimäge.png',
}
@pytest.mark.sphinx('epub', testroot='builder-dirhtml')
def test_epub_manifest_path_separator_normalization(app: SphinxTestApp) -> None:
"""Test that path separators are normalized to forward slashes
in EPUB manifests, even on Windows.
"""
app.build()
# Read the content.opf file
opf_path = app.outdir / 'content.opf'
assert opf_path.exists(), 'content.opf was not generated'
# Parse manifest and spine elements
# Verify that all idrefs in spine match ids in manifest
tree = ET.parse(str(opf_path)) # noqa: S314
root = tree.getroot()
# Define namespace
ns = {'opf': 'http://www.idpf.org/2007/opf'}
# Collect items from manifest
manifest_ids: set[str | None] = set()
manifest_hrefs: dict[str, str] = {}
for item in root.findall('.//opf:manifest/opf:item', ns):
item_id: str | None = item.get('id')
item_href: str | None = item.get('href')
manifest_ids.add(item_id)
if item_id is not None and item_href is not None:
manifest_hrefs[item_id] = item_href
# Check idrefs in spine
spine_idrefs = []
for itemref in root.findall('.//opf:spine/opf:itemref', ns):
idref: str | None = itemref.get('idref')
spine_idrefs.append(idref)
# Verify all spine idrefs exist in manifest
for idref in spine_idrefs:
assert idref in manifest_ids, (
f"spine idref '{idref}' does not exist in manifest"
)
# Verify hrefs do not contain backslashes
# (should be normalized to forward slashes even on Windows)
for item_id, href in manifest_hrefs.items():
assert '\\' not in href, (
f"manifest item '{item_id}' href '{href}' contains backslashes"
)
# Verify no duplicate IDs are assigned to the same href
href_to_ids: dict[str, list[str | None]] = {}
for item_id, href in manifest_hrefs.items():
# Normalize path for comparison
normalized_href = href.replace('\\', '/')
if normalized_href not in href_to_ids:
href_to_ids[normalized_href] = []
href_to_ids[normalized_href].append(item_id)
# Detect duplicate IDs
duplicates: dict[str, list[str | None]] = {
href: ids for href, ids in href_to_ids.items() if len(ids) > 1
}
assert not duplicates, f'Multiple IDs assigned to the same file: {duplicates}'
@pytest.mark.sphinx('epub', testroot='builder-dirhtml')
def test_epub_manifest_subdirectory_paths(app: SphinxTestApp) -> None:
"""Test that path separators are correctly normalized to forward slashes
even for paths containing subdirectories.
"""
app.build()
opf_path = app.outdir / 'content.opf'
assert opf_path.exists()
tree = ET.parse(str(opf_path)) # noqa: S314
root = tree.getroot()
ns: dict[str, str] = {'opf': 'http://www.idpf.org/2007/opf'}
# Check all manifest item hrefs
for item in root.findall('.//opf:manifest/opf:item', ns):
href: str | None = item.get('href')
if href is not None:
# Verify no backslashes are present
assert '\\' not in href, (
f"href '{href}' contains backslashes (should be forward slashes)"
)
# For paths with subdirectories, verify they are separated by forward slashes
if href is not None and '/' in href:
# Verify the path is correctly constructed
parts: list[str] = href.split('/')
assert all(parts), f"href '{href}' contains empty path segments"
@pytest.mark.sphinx('epub', testroot='basic')
def test_epub_spine_idref_consistency(app: SphinxTestApp) -> None:
"""Test that spine idrefs and manifest ids are consistent.
Verify that path separator normalization ensures the same file
is reliably referenced with the same ID.
"""
app.build()
opf_path = app.outdir / 'content.opf'
tree = ET.parse(str(opf_path)) # noqa: S314
root = tree.getroot()
ns: dict[str, str] = {'opf': 'http://www.idpf.org/2007/opf'}
# Create id→href mapping from manifest
id_to_href = {}
for item in root.findall('.//opf:manifest/opf:item', ns):
item_id: str | None = item.get('id')
item_href: str | None = item.get('href')
id_to_href[item_id] = item_href
# For each idref in spine, verify corresponding href exists
# and that href is unique
spine_hrefs = []
for itemref in root.findall('.//opf:spine/opf:itemref', ns):
idref: str | None = itemref.get('idref')
assert idref in id_to_href, f"manifest item not found for spine idref '{idref}'"
href = id_to_href[idref]
spine_hrefs.append(href)
# Warn if the same href is referenced multiple times
# (normally each file should appear only once in spine)
href_counts = Counter(spine_hrefs)
duplicated_hrefs: list[str | None] = [
href for href, count in href_counts.items() if count > 1
]
# Note: Some EPUBs may intentionally reference the same file multiple times,
# so this is logged as informational rather than a strict error
assert len(duplicated_hrefs) == 0
|
EPUBElementTree
|
python
|
sqlalchemy__sqlalchemy
|
test/sql/test_selectable.py
|
{
"start": 84535,
"end": 94730
}
|
class ____(fixtures.TestBase, AssertsExecutionResults):
def test_reduce(self):
meta = MetaData()
t1 = Table(
"t1",
meta,
Column("t1id", Integer, primary_key=True),
Column("t1data", String(30)),
)
t2 = Table(
"t2",
meta,
Column("t2id", Integer, ForeignKey("t1.t1id"), primary_key=True),
Column("t2data", String(30)),
)
t3 = Table(
"t3",
meta,
Column("t3id", Integer, ForeignKey("t2.t2id"), primary_key=True),
Column("t3data", String(30)),
)
eq_(
util.column_set(
sql_util.reduce_columns(
[
t1.c.t1id,
t1.c.t1data,
t2.c.t2id,
t2.c.t2data,
t3.c.t3id,
t3.c.t3data,
]
)
),
util.column_set(
[t1.c.t1id, t1.c.t1data, t2.c.t2data, t3.c.t3data]
),
)
def test_reduce_selectable(self):
metadata = MetaData()
engineers = Table(
"engineers",
metadata,
Column("engineer_id", Integer, primary_key=True),
Column("engineer_name", String(50)),
)
managers = Table(
"managers",
metadata,
Column("manager_id", Integer, primary_key=True),
Column("manager_name", String(50)),
)
s = (
select(engineers, managers)
.where(engineers.c.engineer_name == managers.c.manager_name)
.subquery()
)
eq_(
util.column_set(sql_util.reduce_columns(list(s.c), s)),
util.column_set(
[s.c.engineer_id, s.c.engineer_name, s.c.manager_id]
),
)
def test_reduce_generation(self):
m = MetaData()
t1 = Table(
"t1",
m,
Column("x", Integer, primary_key=True),
Column("y", Integer),
)
t2 = Table(
"t2",
m,
Column("z", Integer, ForeignKey("t1.x")),
Column("q", Integer),
)
s1 = select(t1, t2)
s2 = s1.reduce_columns(only_synonyms=False)
eq_(set(s2.selected_columns), {t1.c.x, t1.c.y, t2.c.q})
s2 = s1.reduce_columns()
eq_(set(s2.selected_columns), {t1.c.x, t1.c.y, t2.c.z, t2.c.q})
def test_reduce_only_synonym_fk(self):
m = MetaData()
t1 = Table(
"t1",
m,
Column("x", Integer, primary_key=True),
Column("y", Integer),
)
t2 = Table(
"t2",
m,
Column("x", Integer, ForeignKey("t1.x")),
Column("q", Integer, ForeignKey("t1.y")),
)
s1 = select(t1, t2)
s1 = s1.reduce_columns(only_synonyms=True)
eq_(
set(s1.selected_columns),
{
s1.selected_columns.x,
s1.selected_columns.y,
s1.selected_columns.q,
},
)
def test_reduce_only_synonym_lineage(self):
m = MetaData()
t1 = Table(
"t1",
m,
Column("x", Integer, primary_key=True),
Column("y", Integer),
Column("z", Integer),
)
# test that the first appearance in the columns clause
# wins - t1 is first, t1.c.x wins
s1 = select(t1).subquery()
s2 = select(t1, s1).where(t1.c.x == s1.c.x).where(s1.c.y == t1.c.z)
eq_(
set(s2.reduce_columns().selected_columns),
{t1.c.x, t1.c.y, t1.c.z, s1.c.y, s1.c.z},
)
# reverse order, s1.c.x wins
s1 = select(t1).subquery()
s2 = select(s1, t1).where(t1.c.x == s1.c.x).where(s1.c.y == t1.c.z)
eq_(
set(s2.reduce_columns().selected_columns),
{s1.c.x, t1.c.y, t1.c.z, s1.c.y, s1.c.z},
)
def test_reduce_aliased_join(self):
metadata = MetaData()
people = Table(
"people",
metadata,
Column(
"person_id",
Integer,
normalize_sequence(
config, Sequence("person_id_seq", optional=True)
),
primary_key=True,
),
Column("name", String(50)),
Column("type", String(30)),
)
engineers = Table(
"engineers",
metadata,
Column(
"person_id",
Integer,
ForeignKey("people.person_id"),
primary_key=True,
),
Column("status", String(30)),
Column("engineer_name", String(50)),
Column("primary_language", String(50)),
)
managers = Table(
"managers",
metadata,
Column(
"person_id",
Integer,
ForeignKey("people.person_id"),
primary_key=True,
),
Column("status", String(30)),
Column("manager_name", String(50)),
)
pjoin = (
people.outerjoin(engineers)
.outerjoin(managers)
.select()
.set_label_style(LABEL_STYLE_TABLENAME_PLUS_COL)
.alias("pjoin")
)
eq_(
util.column_set(
sql_util.reduce_columns(
[
pjoin.c.people_person_id,
pjoin.c.engineers_person_id,
pjoin.c.managers_person_id,
]
)
),
util.column_set([pjoin.c.people_person_id]),
)
def test_reduce_aliased_union(self):
metadata = MetaData()
item_table = Table(
"item",
metadata,
Column(
"id", Integer, ForeignKey("base_item.id"), primary_key=True
),
Column("dummy", Integer, default=0),
)
base_item_table = Table(
"base_item",
metadata,
Column("id", Integer, primary_key=True),
Column("child_name", String(255), default=None),
)
from sqlalchemy.orm.util import polymorphic_union
item_join = polymorphic_union(
{
"BaseItem": base_item_table.select()
.where(base_item_table.c.child_name == "BaseItem")
.subquery(),
"Item": base_item_table.join(item_table),
},
None,
"item_join",
)
eq_(
util.column_set(
sql_util.reduce_columns(
[item_join.c.id, item_join.c.dummy, item_join.c.child_name]
)
),
util.column_set(
[item_join.c.id, item_join.c.dummy, item_join.c.child_name]
),
)
def test_reduce_aliased_union_2(self):
metadata = MetaData()
page_table = Table(
"page", metadata, Column("id", Integer, primary_key=True)
)
magazine_page_table = Table(
"magazine_page",
metadata,
Column(
"page_id", Integer, ForeignKey("page.id"), primary_key=True
),
)
classified_page_table = Table(
"classified_page",
metadata,
Column(
"magazine_page_id",
Integer,
ForeignKey("magazine_page.page_id"),
primary_key=True,
),
)
# this is essentially the union formed by the ORM's
# polymorphic_union function. we define two versions with
# different ordering of selects.
#
# the first selectable has the "real" column
# classified_page.magazine_page_id
pjoin = union(
select(
page_table.c.id,
magazine_page_table.c.page_id,
classified_page_table.c.magazine_page_id,
).select_from(
page_table.join(magazine_page_table).join(
classified_page_table
)
),
select(
page_table.c.id,
magazine_page_table.c.page_id,
cast(null(), Integer).label("magazine_page_id"),
).select_from(page_table.join(magazine_page_table)),
).alias("pjoin")
eq_(
util.column_set(
sql_util.reduce_columns(
[pjoin.c.id, pjoin.c.page_id, pjoin.c.magazine_page_id]
)
),
util.column_set([pjoin.c.id]),
)
# the first selectable has a CAST, which is a placeholder for
# classified_page.magazine_page_id in the second selectable.
# reduce_columns needs to take into account all foreign keys
# derived from pjoin.c.magazine_page_id. the UNION construct
# currently makes the external column look like that of the
# first selectable only.
pjoin = union(
select(
page_table.c.id,
magazine_page_table.c.page_id,
cast(null(), Integer).label("magazine_page_id"),
).select_from(page_table.join(magazine_page_table)),
select(
page_table.c.id,
magazine_page_table.c.page_id,
classified_page_table.c.magazine_page_id,
).select_from(
page_table.join(magazine_page_table).join(
classified_page_table
)
),
).alias("pjoin")
eq_(
util.column_set(
sql_util.reduce_columns(
[pjoin.c.id, pjoin.c.page_id, pjoin.c.magazine_page_id]
)
),
util.column_set([pjoin.c.id]),
)
|
ReduceTest
|
python
|
apache__airflow
|
providers/google/src/airflow/providers/google/cloud/operators/dlp.py
|
{
"start": 83631,
"end": 87479
}
|
class ____(GoogleCloudBaseOperator):
"""
Lists InspectTemplates.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:CloudDLPListInspectTemplatesOperator`
:param organization_id: (Optional) The organization ID. Required to set this
field if parent resource is an organization.
:param project_id: (Optional) Google Cloud project ID where the
DLP Instance exists. Only set this field if the parent resource is
a project instead of an organization.
:param page_size: (Optional) The maximum number of resources contained in the
underlying API response.
:param order_by: (Optional) Optional comma separated list of fields to order by,
followed by asc or desc postfix.
:param retry: (Optional) A retry object used to retry requests.
If None is specified, requests will not be retried.
:param timeout: (Optional) The amount of time, in seconds, to wait for the request
to complete. Note that if retry is specified, the timeout applies to each
individual attempt.
:param metadata: (Optional) Additional metadata that is provided to the method.
:param gcp_conn_id: (Optional) The connection ID used to connect to Google Cloud.
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
"""
template_fields: Sequence[str] = (
"organization_id",
"project_id",
"gcp_conn_id",
"impersonation_chain",
)
operator_extra_links = (CloudDLPInspectTemplatesListLink(),)
def __init__(
self,
*,
organization_id: str | None = None,
project_id: str = PROVIDE_PROJECT_ID,
page_size: int | None = None,
order_by: str | None = None,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
gcp_conn_id: str = "google_cloud_default",
impersonation_chain: str | Sequence[str] | None = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.organization_id = organization_id
self.project_id = project_id
self.page_size = page_size
self.order_by = order_by
self.retry = retry
self.timeout = timeout
self.metadata = metadata
self.gcp_conn_id = gcp_conn_id
self.impersonation_chain = impersonation_chain
def execute(self, context: Context):
hook = CloudDLPHook(
gcp_conn_id=self.gcp_conn_id,
impersonation_chain=self.impersonation_chain,
)
templates = hook.list_inspect_templates(
organization_id=self.organization_id,
project_id=self.project_id,
page_size=self.page_size,
order_by=self.order_by,
retry=self.retry,
timeout=self.timeout,
metadata=self.metadata,
)
project_id = self.project_id or hook.project_id
if project_id:
CloudDLPInspectTemplatesListLink.persist(
context=context,
project_id=project_id,
)
return [InspectTemplate.to_dict(t) for t in templates]
|
CloudDLPListInspectTemplatesOperator
|
python
|
sphinx-doc__sphinx
|
tests/test_builders/test_build_linkcheck.py
|
{
"start": 1264,
"end": 2372
}
|
class ____(BaseHTTPRequestHandler):
protocol_version = 'HTTP/1.1'
def do_HEAD(self) -> None:
if self.path[1:].rstrip() in {'', 'anchor.html'}:
self.send_response(200, 'OK')
self.send_header('Content-Length', '0')
self.end_headers()
else:
self.send_response(404, 'Not Found')
self.send_header('Content-Length', '0')
self.end_headers()
def do_GET(self) -> None:
if self.path[1:].rstrip() == '':
content = b'ok\n\n'
elif self.path[1:].rstrip() == 'anchor.html':
doc = '<!DOCTYPE html><html><body><a id="found"></a></body></html>'
content = doc.encode('utf-8')
else:
content = b''
if content:
self.send_response(200, 'OK')
self.send_header('Content-Length', str(len(content)))
self.end_headers()
self.wfile.write(content)
else:
self.send_response(404, 'Not Found')
self.send_header('Content-Length', '0')
self.end_headers()
|
DefaultsHandler
|
python
|
google__jax
|
jax/_src/interpreters/partial_eval.py
|
{
"start": 110455,
"end": 116718
}
|
class ____: ...
dne_sentinel = DoesNotExist()
def infer_lambda_input_type(
axes_specs: Sequence[AbstractedAxesSpec] | None,
args: Sequence[Any]
) -> InputType:
ndims = [getattr(get_aval(x), 'ndim', 0) for x in args]
partial_specs = _canonicalize_specs(ndims, axes_specs)
specs = _complete_specs(args, partial_specs)
idxs, implicit_types = _collect_implicit(args, specs)
implicit_sig = [(ty, False) for ty in implicit_types]
explicit_sig = [(_arg_type(idxs, x, s), True) for x, s in zip(args, specs)]
input_type = (*implicit_sig, *explicit_sig)
lu._check_input_type(input_type)
return input_type
def _spec_to_dict(spec: AbstractedAxesSpec) -> dict[int, AbstractedAxisName]:
if isinstance(spec, tuple):
return {i: d for i, d in enumerate(spec) if d is not None}
else:
return spec
def _canonicalize_specs(
ndims: Sequence[int], specs: Sequence[AbstractedAxesSpec] | None
) -> list[dict[int, AbstractedAxisName]]:
if specs is None:
return [{}] * len(ndims)
else:
return [_spec_to_dict(s) for n, s in zip(ndims, specs)]
def _complete_specs(
args: Sequence[Any], partial_specs: list[dict[int, AbstractedAxisName]]
) -> list[dict[int, AbstractedAxisName]]:
# The abstracted axes specification in `partial_specs` is partial in the sense
# that there could be additional axis abstraction represented in `args` due to
# Tracers existing in the shapes of elements of `args`. The purpose of this
# function is to produce a full specification, for each argument mapping any
# abstracted axis positions to a name, introducing new names as needed for
# Tracers in axis sizes which don't already correspond to abstracted axis
# names (with one new name per unique Tracer object id).
# Identify each user-supplied name in partial_specs with a size.
sizes: dict[AbstractedAxisName, int | DynamicJaxprTracer] = {}
for x, spec in zip(args, partial_specs):
for i, name in spec.items():
d = sizes.setdefault(name, x.shape[i])
if d is not x.shape[i] and d != x.shape[i]:
raise TypeError(f"Provided size {d} for {name} does not match prior associated name for {name} : {x.shape[i]}")
# Introduce new names as needed for Tracers in shapes.
named_tracers: dict[TracerId, AbstractedAxisName] = {
id(d): name for name, d in sizes.items() if isinstance(d, Tracer)}
specs: list[dict[int, AbstractedAxisName]] = []
for x, spec in zip(args, partial_specs):
if isinstance(get_aval(x), DShapedArray):
spec = dict(spec)
for i, d in enumerate(x.shape):
if isinstance(d, Tracer):
spec[i] = named_tracers.get(id(d), TracerAsName(d))
specs.append(spec)
# Assert that `specs` is now complete in the sense that there are no Tracers
# which don't correspond to an AbstractedAxisName.
assert all(not spec or not any(isinstance(d, Tracer) and i not in spec
for i, d in enumerate(x.shape))
for x, spec in zip(args, specs))
return specs
def _collect_implicit(
args: Sequence[Any], specs: list[dict[int, AbstractedAxisName]]
) -> tuple[dict[AbstractedAxisName, DBIdx], list[AbstractValue]]:
# Given an explicit argument list and a specification of abstracted axes, we
# want to produce an InputType by identifying AbstractedAxisNames with DBIdxs
# and figuring out which AbstractedAxisNames correspond to implicit arguments.
idxs: dict[AbstractedAxisName, DBIdx] = {}
implicit_types: list[AbstractValue] = []
explicit_tracers: dict[TracerId, int] = {}
counter = it.count()
# Add implicit arguments to idxs.
for explicit_idx, (x, spec) in enumerate(zip(args, specs)):
for i, name in spec.items():
if name not in idxs and id(x.shape[i]) not in explicit_tracers:
idxs[name] = DBIdx(next(counter))
implicit_types.append(get_aval(x.shape[i]))
if isinstance(x, Tracer):
explicit_tracers.setdefault(id(x), explicit_idx) # use the first
# Now that we know the implicit args, add explicit args to idxs.
offset = len(implicit_types)
for x, spec in zip(args, specs):
for i, name in spec.items():
if id(x.shape[i]) in explicit_tracers:
idxs.setdefault(name, DBIdx(offset + explicit_tracers[id(x.shape[i])]))
return idxs, implicit_types
def _arg_type(
idxs: dict[AbstractedAxisName, DBIdx], x: Any,
spec: dict[int, AbstractedAxisName]
) -> AbstractValue:
# Produce an AbstractValue by substituting DBIdxs for AbstractedAxisNames.
aval = get_aval(x) # aval.shape could contain Tracers
if not spec: return aval
shape: list[int | DBIdx] = [idxs[spec[i]] if i in spec else d
for i, d in enumerate(aval.shape)]
assert not any(isinstance(d, Tracer) for d in shape)
return DShapedArray(tuple(shape), aval.dtype, False)
def _add_implicit_outputs(jaxpr: Jaxpr) -> tuple[Jaxpr, OutputType]:
invars = [*jaxpr.constvars, *jaxpr.invars]
expl_outvars = jaxpr.outvars
# First do a pass to collect implicit outputs, meaning variables which occur
# in explicit_outvars types but not in invars or to the left in outvars.
seen: set[Var] = set(invars)
impl_outvars = [seen.add(d) or d for x in expl_outvars if type(x) is Var and # type: ignore
(seen.add(x) or type(x.aval) is DShapedArray) # type: ignore
for d in x.aval.shape if type(d) is Var and d not in seen]
outvars = [*impl_outvars, *expl_outvars]
# Now assemble an OutputType by mapping vars in shapes to InDBIdx/OutDBIdx.
in_map : dict[Var, InDBIdx] = {v: InDBIdx(i) for i, v in enumerate( invars)}
out_map: dict[Var, OutDBIdx] = {x: OutDBIdx(i) for i, x in enumerate(outvars)
if type(x) is Var}
out_avals_ = (x.aval for x in outvars)
out_avals = [a.update(shape=tuple(in_map.get(d, out_map.get(d))
if type(d) is Var else d for d in a.shape))
if type(a) is DShapedArray else a for a in out_avals_]
kept_outs = [False] * len(impl_outvars) + [True] * len(expl_outvars)
out_type = tuple(zip(out_avals, kept_outs))
new_jaxpr = jaxpr.replace(outvars=outvars)
config.enable_checks.value and core.check_jaxpr(jaxpr)
return new_jaxpr, out_type
|
DoesNotExist
|
python
|
PrefectHQ__prefect
|
src/prefect/client/schemas/actions.py
|
{
"start": 14941,
"end": 16486
}
|
class ____(ActionBaseModel):
"""Data used by the Prefect REST API to create a task run"""
id: Optional[UUID] = Field(None, description="The ID to assign to the task run")
# TaskRunCreate states must be provided as StateCreate objects
state: Optional[StateCreate] = Field(
default=None, description="The state of the task run to create"
)
name: Optional[str] = Field(
default=None,
description="The name of the task run",
)
flow_run_id: Optional[UUID] = Field(default=None)
task_key: str = Field(
default=..., description="A unique identifier for the task being run."
)
dynamic_key: str = Field(
default=...,
description=(
"A dynamic key used to differentiate between multiple runs of the same task"
" within the same flow run."
),
)
cache_key: Optional[str] = Field(default=None)
cache_expiration: Optional[objects.DateTime] = Field(default=None)
task_version: Optional[str] = Field(default=None)
empirical_policy: objects.TaskRunPolicy = Field(
default_factory=objects.TaskRunPolicy,
)
tags: list[str] = Field(default_factory=list)
labels: KeyValueLabelsField = Field(default_factory=dict)
task_inputs: dict[
str,
list[
Union[
objects.TaskRunResult,
objects.FlowRunResult,
objects.Parameter,
objects.Constant,
]
],
] = Field(default_factory=dict)
|
TaskRunCreate
|
python
|
getsentry__sentry
|
tests/sentry/integrations/github/test_webhooks.py
|
{
"start": 6203,
"end": 13750
}
|
class ____(APITestCase):
base_url = "https://api.github.com"
def setUp(self) -> None:
self.url = "/extensions/github/webhook/"
self.secret = "b3002c3e321d4b7880360d397db2ccfd"
options.set("github-app.webhook-secret", self.secret)
@patch("sentry.integrations.github.client.get_jwt", return_value="jwt_token_1")
def test_installation_deleted(self, get_jwt: MagicMock) -> None:
future_expires = datetime.now().replace(microsecond=0) + timedelta(minutes=5)
integration = self.create_integration(
name="octocat",
organization=self.organization,
external_id="2",
provider="github",
metadata={"access_token": "1234", "expires_at": future_expires.isoformat()},
)
integration.add_organization(self.project.organization.id, self.user)
assert integration.status == ObjectStatus.ACTIVE
repo = self.create_repo(
self.project,
provider="integrations:github",
integration_id=integration.id,
)
with patch.object(GithubRequestParser, "get_regions_from_organizations", return_value=[]):
response = self.client.post(
path=self.url,
data=INSTALLATION_DELETE_EVENT_EXAMPLE,
content_type="application/json",
HTTP_X_GITHUB_EVENT="installation",
HTTP_X_HUB_SIGNATURE="sha1=6a660af7f5c9e5dbc98e83abdff07adf40fafdf4",
HTTP_X_HUB_SIGNATURE_256="sha256=037b8cddfa1697fecf60e1390138e11e117a04096a02a8c52c09ab808ce6555c",
HTTP_X_GITHUB_DELIVERY=str(uuid4()),
)
assert response.status_code == 204
integration = Integration.objects.get(external_id=2)
assert integration.external_id == "2"
assert integration.name == "octocat"
assert integration.status == ObjectStatus.DISABLED
with assume_test_silo_mode(SiloMode.REGION):
repo.refresh_from_db()
assert repo.status == ObjectStatus.DISABLED
@patch("sentry.integrations.github.client.get_jwt", return_value="jwt_token_1")
def test_installation_deleted_no_org_integration(self, get_jwt: MagicMock) -> None:
future_expires = datetime.now().replace(microsecond=0) + timedelta(minutes=5)
integration = self.create_integration(
name="octocat",
organization=self.organization,
external_id="2",
provider="github",
metadata={"access_token": "1234", "expires_at": future_expires.isoformat()},
)
integration.add_organization(self.project.organization.id, self.user)
assert integration.status == ObjectStatus.ACTIVE
# Set up condition that the OrganizationIntegration is deleted prior to the webhook event
OrganizationIntegration.objects.filter(
integration_id=integration.id,
organization_id=self.project.organization.id,
).delete()
response = self.client.post(
path=self.url,
data=INSTALLATION_DELETE_EVENT_EXAMPLE,
content_type="application/json",
HTTP_X_GITHUB_EVENT="installation",
HTTP_X_HUB_SIGNATURE="sha1=6a660af7f5c9e5dbc98e83abdff07adf40fafdf4",
HTTP_X_HUB_SIGNATURE_256="sha256=037b8cddfa1697fecf60e1390138e11e117a04096a02a8c52c09ab808ce6555c",
HTTP_X_GITHUB_DELIVERY=str(uuid4()),
)
assert response.status_code == 204
integration = Integration.objects.get(external_id=2)
assert integration.external_id == "2"
assert integration.name == "octocat"
assert integration.status == ObjectStatus.DISABLED
@patch(
"sentry.integrations.github.tasks.codecov_account_unlink.codecov_account_unlink.apply_async"
)
@patch("sentry.integrations.github.client.get_jwt", return_value="jwt_token_1")
@override_options(
{
"github-app.id": "123",
"github-app.webhook-secret": "b3002c3e321d4b7880360d397db2ccfd",
"hybrid_cloud.authentication.disabled_organization_shards": [],
"hybrid_cloud.authentication.disabled_user_shards": [],
}
)
def test_installation_deleted_triggers_codecov_unlink_when_app_ids_match(
self, get_jwt: MagicMock, mock_codecov_unlink: MagicMock
) -> None:
future_expires = datetime.now().replace(microsecond=0) + timedelta(minutes=5)
integration = self.create_integration(
name="octocat",
organization=self.organization,
external_id="2",
provider="github",
metadata={"access_token": "1234", "expires_at": future_expires.isoformat()},
)
integration.add_organization(self.project.organization.id, self.user)
with patch.object(GithubRequestParser, "get_regions_from_organizations", return_value=[]):
response = self.client.post(
path=self.url,
data=INSTALLATION_DELETE_EVENT_EXAMPLE,
content_type="application/json",
HTTP_X_GITHUB_EVENT="installation",
HTTP_X_HUB_SIGNATURE="sha1=6a660af7f5c9e5dbc98e83abdff07adf40fafdf4",
HTTP_X_HUB_SIGNATURE_256="sha256=037b8cddfa1697fecf60e1390138e11e117a04096a02a8c52c09ab808ce6555c",
HTTP_X_GITHUB_DELIVERY=str(uuid4()),
)
assert response.status_code == 204
mock_codecov_unlink.assert_called_once_with(
kwargs={
"integration_id": integration.id,
"organization_ids": [self.organization.id],
}
)
@patch(
"sentry.integrations.github.tasks.codecov_account_unlink.codecov_account_unlink.apply_async"
)
@patch("sentry.integrations.github.client.get_jwt", return_value="jwt_token_1")
@override_options(
{
"github-app.id": "different_app_id",
"github-app.webhook-secret": "b3002c3e321d4b7880360d397db2ccfd",
"hybrid_cloud.authentication.disabled_organization_shards": [],
"hybrid_cloud.authentication.disabled_user_shards": [],
}
)
def test_installation_deleted_skips_codecov_unlink_when_app_ids_dont_match(
self, get_jwt: MagicMock, mock_codecov_unlink: MagicMock
) -> None:
future_expires = datetime.now().replace(microsecond=0) + timedelta(minutes=5)
integration = self.create_integration(
name="octocat",
organization=self.organization,
external_id="2",
provider="github",
metadata={"access_token": "1234", "expires_at": future_expires.isoformat()},
)
integration.add_organization(self.project.organization.id, self.user)
with patch.object(GithubRequestParser, "get_regions_from_organizations", return_value=[]):
response = self.client.post(
path=self.url,
data=INSTALLATION_DELETE_EVENT_EXAMPLE,
content_type="application/json",
HTTP_X_GITHUB_EVENT="installation",
HTTP_X_HUB_SIGNATURE="sha1=6a660af7f5c9e5dbc98e83abdff07adf40fafdf4",
HTTP_X_HUB_SIGNATURE_256="sha256=037b8cddfa1697fecf60e1390138e11e117a04096a02a8c52c09ab808ce6555c",
HTTP_X_GITHUB_DELIVERY=str(uuid4()),
)
assert response.status_code == 204
mock_codecov_unlink.assert_not_called()
|
InstallationDeleteEventWebhookTest
|
python
|
lepture__authlib
|
authlib/integrations/flask_oauth2/requests.py
|
{
"start": 675,
"end": 1093
}
|
class ____(OAuth2Request):
def __init__(self, request: Request):
super().__init__(
method=request.method, uri=request.url, headers=request.headers
)
self._request = request
self.payload = FlaskOAuth2Payload(request)
@property
def args(self):
return self._request.args
@property
def form(self):
return self._request.form
|
FlaskOAuth2Request
|
python
|
PyCQA__pylint
|
doc/data/messages/a/abstract-method/good/function_raising_not_implemented_error.py
|
{
"start": 73,
"end": 139
}
|
class ____(Pet):
def make_sound(self):
print("Meeeow")
|
Cat
|
python
|
pytest-dev__pytest
|
testing/code/test_source.py
|
{
"start": 2417,
"end": 12966
}
|
class ____:
def setup_class(self) -> None:
self.source = Source(
"""\
def f(x):
assert (x ==
3 +
4)
"""
).strip()
def test_getstatement(self) -> None:
# print str(self.source)
ass = str(self.source[1:])
for i in range(1, 4):
# print "trying start in line %r" % self.source[i]
s = self.source.getstatement(i)
# x = s.deindent()
assert str(s) == ass
def test_getstatementrange_triple_quoted(self) -> None:
# print str(self.source)
source = Source(
"""hello('''
''')"""
)
s = source.getstatement(0)
assert s == source
s = source.getstatement(1)
assert s == source
def test_getstatementrange_within_constructs(self) -> None:
source = Source(
"""\
try:
try:
raise ValueError
except SomeThing:
pass
finally:
42
"""
)
assert len(source) == 7
# check all lineno's that could occur in a traceback
# assert source.getstatementrange(0) == (0, 7)
# assert source.getstatementrange(1) == (1, 5)
assert source.getstatementrange(2) == (2, 3)
assert source.getstatementrange(3) == (3, 4)
assert source.getstatementrange(4) == (4, 5)
# assert source.getstatementrange(5) == (0, 7)
assert source.getstatementrange(6) == (6, 7)
def test_getstatementrange_bug(self) -> None:
source = Source(
"""\
try:
x = (
y +
z)
except:
pass
"""
)
assert len(source) == 6
assert source.getstatementrange(2) == (1, 4)
def test_getstatementrange_bug2(self) -> None:
source = Source(
"""\
assert (
33
==
[
X(3,
b=1, c=2
),
]
)
"""
)
assert len(source) == 9
assert source.getstatementrange(5) == (0, 9)
def test_getstatementrange_ast_issue58(self) -> None:
source = Source(
"""\
def test_some():
for a in [a for a in
CAUSE_ERROR]: pass
x = 3
"""
)
assert getstatement(2, source).lines == source.lines[2:3]
assert getstatement(3, source).lines == source.lines[3:4]
def test_getstatementrange_out_of_bounds_py3(self) -> None:
source = Source("if xxx:\n from .collections import something")
r = source.getstatementrange(1)
assert r == (1, 2)
def test_getstatementrange_with_syntaxerror_issue7(self) -> None:
source = Source(":")
pytest.raises(SyntaxError, lambda: source.getstatementrange(0))
def test_getstartingblock_singleline() -> None:
class A:
def __init__(self, *args) -> None:
frame = sys._getframe(1)
self.source = Frame(frame).statement
x = A("x", "y")
values = [i for i in x.source.lines if i.strip()]
assert len(values) == 1
def test_getline_finally() -> None:
def c() -> None:
pass
with pytest.raises(TypeError) as excinfo:
teardown = None
try:
c(1) # type: ignore
finally:
if teardown:
teardown() # type: ignore[unreachable]
source = excinfo.traceback[-1].statement
assert str(source).strip() == "c(1) # type: ignore"
def test_getfuncsource_dynamic() -> None:
def f():
raise NotImplementedError()
def g():
pass # pragma: no cover
f_source = Source(f)
g_source = Source(g)
assert str(f_source).strip() == "def f():\n raise NotImplementedError()"
assert str(g_source).strip() == "def g():\n pass # pragma: no cover"
def test_getfuncsource_with_multiline_string() -> None:
def f():
c = """while True:
pass
"""
expected = '''\
def f():
c = """while True:
pass
"""
'''
assert str(Source(f)) == expected.rstrip()
def test_deindent() -> None:
from _pytest._code.source import deindent as deindent
assert deindent(["\tfoo", "\tbar"]) == ["foo", "bar"]
source = """\
def f():
def g():
pass
"""
lines = deindent(source.splitlines())
assert lines == ["def f():", " def g():", " pass"]
def test_source_of_class_at_eof_without_newline(_sys_snapshot, tmp_path: Path) -> None:
# this test fails because the implicit inspect.getsource(A) below
# does not return the "x = 1" last line.
source = Source(
"""
class A:
def method(self):
x = 1
"""
)
path = tmp_path.joinpath("a.py")
path.write_text(str(source), encoding="utf-8")
mod: Any = import_path(path, root=tmp_path, consider_namespace_packages=False)
s2 = Source(mod.A)
assert str(source).strip() == str(s2).strip()
if True:
def x():
pass
def test_source_fallback() -> None:
src = Source(x)
expected = """def x():
pass"""
assert str(src) == expected
def test_findsource_fallback() -> None:
from _pytest._code.source import findsource
src, lineno = findsource(x)
assert src is not None
assert "test_findsource_simple" in str(src)
assert src[lineno] == " def x():"
def test_findsource(monkeypatch) -> None:
from _pytest._code.source import findsource
filename = "<pytest-test_findsource>"
lines = ["if 1:\n", " def x():\n", " pass\n"]
co = compile("".join(lines), filename, "exec")
monkeypatch.setitem(linecache.cache, filename, (1, None, lines, filename))
src, lineno = findsource(co)
assert src is not None
assert "if 1:" in str(src)
d: dict[str, Any] = {}
eval(co, d)
src, lineno = findsource(d["x"])
assert src is not None
assert "if 1:" in str(src)
assert src[lineno] == " def x():"
def test_getfslineno() -> None:
def f(x) -> None:
raise NotImplementedError()
fspath, lineno = getfslineno(f)
assert isinstance(fspath, Path)
assert fspath.name == "test_source.py"
assert lineno == f.__code__.co_firstlineno - 1 # see findsource
class A:
pass
fspath, lineno = getfslineno(A)
_, A_lineno = inspect.findsource(A)
assert isinstance(fspath, Path)
assert fspath.name == "test_source.py"
assert lineno == A_lineno
assert getfslineno(3) == ("", -1)
class B:
pass
B.__name__ = B.__qualname__ = "B2"
# Since Python 3.13 this started working.
if sys.version_info >= (3, 13):
assert getfslineno(B)[1] != -1
else:
assert getfslineno(B)[1] == -1
def test_code_of_object_instance_with_call() -> None:
class A:
pass
pytest.raises(TypeError, lambda: Source(A()))
class WithCall:
def __call__(self) -> None:
pass
code = Code.from_function(WithCall())
assert "pass" in str(code.source())
class Hello:
def __call__(self) -> None:
pass
pytest.raises(TypeError, lambda: Code.from_function(Hello))
def getstatement(lineno: int, source) -> Source:
from _pytest._code.source import getstatementrange_ast
src = Source(source)
_ast, start, end = getstatementrange_ast(lineno, src)
return src[start:end]
def test_oneline() -> None:
source = getstatement(0, "raise ValueError")
assert str(source) == "raise ValueError"
def test_comment_and_no_newline_at_end() -> None:
from _pytest._code.source import getstatementrange_ast
source = Source(
[
"def test_basic_complex():",
" assert 1 == 2",
"# vim: filetype=pyopencl:fdm=marker",
]
)
_ast, _start, end = getstatementrange_ast(1, source)
assert end == 2
def test_oneline_and_comment() -> None:
source = getstatement(0, "raise ValueError\n#hello")
assert str(source) == "raise ValueError"
def test_comments() -> None:
source = '''def test():
"comment 1"
x = 1
# comment 2
# comment 3
assert False
"""
comment 4
"""
'''
for line in range(2, 6):
assert str(getstatement(line, source)) == " x = 1"
for line in range(6, 8):
assert str(getstatement(line, source)) == " assert False"
for line in range(8, 10):
assert str(getstatement(line, source)) == '"""\ncomment 4\n"""'
def test_comment_in_statement() -> None:
source = """test(foo=1,
# comment 1
bar=2)
"""
for line in range(1, 3):
assert (
str(getstatement(line, source))
== "test(foo=1,\n # comment 1\n bar=2)"
)
def test_source_with_decorator() -> None:
"""Test behavior with Source / Code().source with regard to decorators."""
@pytest.mark.foo
def deco_mark():
assert False
src = inspect.getsource(deco_mark)
assert textwrap.indent(str(Source(deco_mark)), " ") + "\n" == src
assert src.startswith(" @pytest.mark.foo")
@pytest.fixture
def deco_fixture():
assert False
src = inspect.getsource(deco_fixture._get_wrapped_function())
assert src == " @pytest.fixture\n def deco_fixture():\n assert False\n"
# Make sure the decorator is not a wrapped function
assert not str(Source(deco_fixture)).startswith("@functools.wraps(function)")
assert (
textwrap.indent(str(Source(deco_fixture._get_wrapped_function())), " ")
+ "\n"
== src
)
def test_single_line_else() -> None:
source = getstatement(1, "if False: 2\nelse: 3")
assert str(source) == "else: 3"
def test_single_line_finally() -> None:
source = getstatement(1, "try: 1\nfinally: 3")
assert str(source) == "finally: 3"
def test_issue55() -> None:
source = (
"def round_trip(dinp):\n assert 1 == dinp\n"
'def test_rt():\n round_trip("""\n""")\n'
)
s = getstatement(3, source)
assert str(s) == ' round_trip("""\n""")'
def test_multiline() -> None:
source = getstatement(
0,
"""\
raise ValueError(
23
)
x = 3
""",
)
assert str(source) == "raise ValueError(\n 23\n)"
|
TestSourceParsing
|
python
|
tensorflow__tensorflow
|
tensorflow/python/kernel_tests/linalg/sparse/csr_sparse_matrix_ops_test.py
|
{
"start": 56510,
"end": 69388
}
|
class ____(test.Benchmark):
def benchmark_sparse_matrix_mat_mul_gpu(self):
if not test_util.is_gpu_available():
return
sparsify = lambda m: array_ops.where(m > 2, m, array_ops.zeros_like(m))
# XW, X dense and W sparse
# X is shaped [{1, 8, 16}, 2000]
# W is shaped [2000, 4000]
for batch_size in [1, 8, 16]:
x_dense_shape = [batch_size, 2000]
w_dense_shape = [2000, 4000]
with ops.Graph().as_default(), ops.device("/gpu:0"):
x_mats = random_ops.random_normal(x_dense_shape, dtype=dtypes.float32)
w_mats = sparsify(
random_ops.random_normal(w_dense_shape, dtype=dtypes.float32))
nnz = array_ops.shape(array_ops.where(w_mats))[0]
ratio = math_ops.cast(nnz, dtypes.float32) / np.prod(w_dense_shape)
w_sm = dense_to_csr_sparse_matrix(w_mats)
with ops.name_scope("w_sm_var"):
w_sm_var = variable_scope.get_variable(
"sm", initializer=w_sm, use_resource=True)
w_sm_var_v = w_sm_var.read_value()
with ops.name_scope("w_var"):
w_var = variable_scope.get_variable(
"sm_dense", initializer=w_mats, use_resource=True)
w_var_v = w_var.read_value()
with ops.name_scope("b"):
x = variable_scope.get_variable(
"b", initializer=x_mats, use_resource=True)
x_v = x.read_value()
# X*W = (W'*X')'
xw_sparse = sparse_csr_matrix_ops.sparse_matrix_mat_mul(
w_sm_var_v,
x_v,
transpose_a=True,
transpose_b=True,
transpose_output=True)
xw_dense = math_ops.matmul(x_v, w_var_v)
with session.Session() as sess:
self.evaluate(
[w_var.initializer, w_sm_var.initializer, x.initializer])
nnz_value, ratio_value = self.evaluate((nnz, ratio))
name_template = (
"sparse_matrix_mat_mul_gpu_%s_W_2000x4000_batch_size_%d")
self.run_op_benchmark(
sess,
xw_sparse.op,
name=name_template % ("sparse", batch_size),
extras={
"percentage_nonzero": ratio_value,
"num_nonzero": nnz_value
},
min_iters=50)
self.run_op_benchmark(
sess,
xw_dense.op,
name=name_template % ("dense", batch_size),
extras={
"percentage_nonzero": ratio_value,
"num_nonzero": nnz_value
},
min_iters=50)
def benchmark_sparse_matrix_mat_vec_mul(self):
# num_rows, device, transpose.
cases = [
[2000, CPU, False],
[8000, CPU, False],
[12000, CPU, False],
[2000, CPU, True],
[8000, CPU, True],
[12000, CPU, True],
]
seed = 42
for num_rows, device, transpose in cases:
if device == GPU and not test_util.is_gpu_available():
continue
for num_threads in [1, 2, 4, 6, 8, 10]:
device_str = "cpu" if device == CPU else "gpu"
w_dense_shape = [num_rows, num_rows]
x_dense_shape = [num_rows, 1]
with ops.Graph().as_default(), ops.device(device):
random_seed.set_random_seed(seed)
x = random_ops.random_normal(x_dense_shape, dtype=dtypes.float32)
w_np = sparse.rand(
w_dense_shape[0],
w_dense_shape[1],
density=0.01,
dtype=np.float32,
random_state=np.random.RandomState(seed))
w_st = sparse_tensor.SparseTensor(
zip(w_np.row, w_np.col), w_np.data, w_np.shape)
w_st = sparse_ops.sparse_reorder(w_st)
nnz = array_ops.shape(w_st.values)[0]
ratio = math_ops.cast(nnz, dtypes.float32) / np.prod(w_np.shape)
w_sm = sparse_csr_matrix_ops.sparse_tensor_to_csr_sparse_matrix(
w_st.indices, w_st.values, w_st.dense_shape)
xw_sparse_matrix = sparse_csr_matrix_ops.sparse_matrix_mat_mul(
w_sm,
x,
transpose_a=transpose,
transpose_b=False,
transpose_output=False)
xw_sparse_tensor = sparse_ops.sparse_tensor_dense_matmul(
w_st, x, adjoint_a=transpose, adjoint_b=False)
with session.Session(
config=config_pb2.ConfigProto(
intra_op_parallelism_threads=num_threads)) as sess:
nnz_value, ratio_value = sess.run((nnz, ratio))
name_template = ("mat_vec_mul_%s_%s_W_%d_transpose_%s_threads_%d")
self.run_op_benchmark(
sess,
xw_sparse_matrix.op,
name=name_template %
(device_str, "sparse_matrix", num_rows, transpose, num_threads),
extras={
"percentage_nonzero": ratio_value,
"num_nonzero": nnz_value,
},
min_iters=10)
self.run_op_benchmark(
sess,
xw_sparse_tensor.op,
name=name_template %
(device_str, "sparse_tensor", num_rows, transpose, num_threads),
extras={
"percentage_nonzero": ratio_value,
"num_nonzero": nnz_value,
},
min_iters=10)
def benchmark_sparse_matrix_sparse_matmul(self):
density = 0.05
# pylint: disable=g-long-lambda
sparsify = lambda m: array_ops.where(m > 1. - density, m,
array_ops.zeros_like(m))
# pylint: enable=g-long-lambda
for batch_size in [1, 16]:
for num_threads in [1, 4, 12]:
dense_shape = [batch_size, 250, 250]
for device in [CPU, GPU]:
if device == GPU and not test_util.is_gpu_available():
continue
with ops.Graph().as_default(), ops.device(device):
x_mats = sparsify(
random_ops.random_uniform(dense_shape, dtype=dtypes.float32))
y_mats = sparsify(
random_ops.random_uniform(dense_shape, dtype=dtypes.float32))
nnz = array_ops.shape(array_ops.where(x_mats))[0] + array_ops.shape(
array_ops.where(y_mats))[0]
ratio = math_ops.cast(nnz,
dtypes.float32) / (2 * np.prod(dense_shape))
x_sm = dense_to_csr_sparse_matrix(x_mats)
y_sm = dense_to_csr_sparse_matrix(y_mats)
xy_sparse = sparse_csr_matrix_ops.sparse_matrix_sparse_mat_mul(
x_sm, y_sm, type=dtypes.float32)
with session.Session(
config=config_pb2.ConfigProto(
intra_op_parallelism_threads=num_threads)) as sess:
nnz_value, ratio_value = self.evaluate((nnz, ratio))
name_template = (
"sparse_matrix_sparse_matmul_%s_N_%d_batch_size_%d_threads_%d"
)
device_str = "cpu" if device == CPU else "gpu"
self.run_op_benchmark(
sess,
xy_sparse.op,
name=name_template %
(device_str, dense_shape[-1], batch_size, num_threads),
extras={
"percentage_nonzero": ratio_value,
"num_nonzero": nnz_value
},
min_iters=50)
def benchmark_sparse_dense_conversion(self):
sparsity = 0.05
for batch_size in [1, 16]:
for num_threads in [1, 4, 12]:
dense_shape = [batch_size, 750, 750]
for device in [CPU, GPU]:
if device == GPU and not test_util.is_gpu_available():
continue
with ops.Graph().as_default(), ops.device(device):
mats = random_ops.random_uniform(dense_shape, dtype=dtypes.float32)
mats_locs = array_ops.where(mats > 1.0 - sparsity)
sparse_matrices = sparse_csr_matrix_ops.dense_to_csr_sparse_matrix(
mats, mats_locs)
dense_matrices = sparse_csr_matrix_ops.csr_sparse_matrix_to_dense(
sparse_matrices, type=dtypes.float32)
nnz = math_ops.reduce_sum(
sparse_csr_matrix_ops.sparse_matrix_nnz(sparse_matrices))
ratio = math_ops.cast(nnz, dtypes.float32) / np.prod(dense_shape)
with session.Session(
config=config_pb2.ConfigProto(
intra_op_parallelism_threads=num_threads)) as sess:
nnz_value, ratio_value = self.evaluate((nnz, ratio))
device_str = "cpu" if device == CPU else "gpu"
name_template = (
"dense_to_sparse_matrix_%s_N_%d_batch_size_%d_num_threads_%d")
self.run_op_benchmark(
sess,
sparse_matrices.op,
name=name_template %
(device_str, dense_shape[-1], batch_size, num_threads),
extras={
"percentage_nonzero": ratio_value,
"num_nonzero": nnz_value,
},
min_iters=50)
name_template = (
"sparse_matrix_to_dense_%s_N_%d_batch_size_%d_num_threads_%d")
self.run_op_benchmark(
sess,
dense_matrices.op,
name=name_template %
(device_str, dense_shape[-1], batch_size, num_threads),
extras={
"percentage_nonzero": ratio_value,
"num_nonzero": nnz_value,
},
min_iters=50)
def benchmark_sparse_cholesky(self):
# TODO(anudhyan): Use conversions from SparseTensor instead of to get this
# benchmark working for larger matrices. For this to work without GPU, we
# need to write CPU kernels for SparseTensor conversions.
num_rows = 500
density = 0.01
# pylint: disable=g-long-lambda
sparsify = lambda m: array_ops.where(m > 1. - density, m,
array_ops.zeros_like(m))
# pylint: enable=g-long-lambda
for batch_size in [1, 16]:
for num_threads in [1, 4, 12]:
dense_shape = [batch_size, num_rows, num_rows]
with ops.Graph().as_default(), ops.device(CPU):
# Create a "random" SPD matrix, by choosing each entry of A between
# 0 and 1 at the specified density, and computing 0.5(A + At) + n*I.
# This ensures diagonal dominance which implies positive-definiteness.
dense_matrix = sparsify(
random_ops.random_uniform(dense_shape, dtype=dtypes.float32))
spd_dense_matrix = (
0.5 *
(dense_matrix + array_ops.transpose(dense_matrix, perm=[0, 2, 1]))
+ num_rows *
linalg_ops.eye(dense_shape[-1], batch_shape=[batch_size]))
# Convert to SparseMatrix and invoke Sparse Cholesky factorization
# with AMD Ordering.
sparse_matrix = dense_to_csr_sparse_matrix(spd_dense_matrix)
ordering_amd = sparse_csr_matrix_ops.sparse_matrix_ordering_amd(
sparse_matrix)
cholesky_sparse_matrix = (
sparse_csr_matrix_ops.sparse_matrix_sparse_cholesky(
sparse_matrix, ordering_amd, type=dtypes.float32))
nnz = math_ops.reduce_sum(
sparse_csr_matrix_ops.sparse_matrix_nnz(sparse_matrix))
ratio = math_ops.cast(nnz, dtypes.float32) / np.prod(dense_shape)
ordering_amd_name_template = (
"sparse_matrix_ordering_amd_cpu_N_%d_batch_size_%d_threads_%d")
sparse_cholesky_name_template = (
"sparse_matrix_sparse_cholesky_cpu_N_%d_batch_size_%d_threads_%d")
with session.Session(
config=config_pb2.ConfigProto(
intra_op_parallelism_threads=num_threads)) as sess:
nnz_value, ratio_value = self.evaluate((nnz, ratio))
self.run_op_benchmark(
sess,
ordering_amd.op,
name=ordering_amd_name_template %
(dense_shape[-1], batch_size, num_threads),
extras={
"percentage_nonzero": ratio_value,
"num_nonzero": nnz_value
},
min_iters=25)
self.run_op_benchmark(
sess,
cholesky_sparse_matrix.op,
name=sparse_cholesky_name_template %
(dense_shape[-1], batch_size, num_threads),
extras={
"percentage_nonzero": ratio_value,
"num_nonzero": nnz_value
},
min_iters=25)
if __name__ == "__main__":
test.main()
|
CSRSparseMatrixOpsBenchmark
|
python
|
pytorch__pytorch
|
test/distributed/test_store.py
|
{
"start": 8923,
"end": 10331
}
|
class ____(TestCase, StoreTestBase):
def setUp(self):
super().setUp()
self.file = tempfile.NamedTemporaryFile(delete=False)
def _create_store(self):
store = dist.FileStore(self.file.name, 1)
store.set_timeout(timedelta(seconds=300))
return store
def test_init_pg_and_rpc_with_same_file(self):
file = tempfile.NamedTemporaryFile(delete=False)
# Init RPC using file
rpc_backend_options = rpc.TensorPipeRpcBackendOptions()
rpc_backend_options.init_method = f"file://{file.name}"
rpc_backend_options._transports = tp_transports()
rpc.init_rpc(
"worker", rank=0, world_size=1, rpc_backend_options=rpc_backend_options
)
# Init PG using file
dist.init_process_group(
"gloo", rank=0, world_size=1, init_method=f"file://{file.name}"
)
dist.destroy_process_group()
assert os.path.exists(file.name)
rpc.shutdown()
os.remove(file.name)
def test_refcount(self):
file = tempfile.NamedTemporaryFile(delete=False)
store = dist.FileStore(file.name, 1)
store2 = dist.FileStore(file.name, 1)
del store
assert os.path.exists(file.name)
del store2
assert not os.path.exists(file.name)
@property
def num_keys_total(self):
return 6
@skip_if_win32()
|
FileStoreTest
|
python
|
Textualize__textual
|
src/textual/await_remove.py
|
{
"start": 362,
"end": 1345
}
|
class ____:
"""An awaitable that waits for nodes to be removed."""
def __init__(
self, tasks: list[Task], post_remove: CallbackType | None = None
) -> None:
self._tasks = tasks
self._post_remove = post_remove
self._caller = get_caller_file_and_line()
def __rich_repr__(self) -> rich.repr.Result:
yield "tasks", self._tasks
yield "post_remove", self._post_remove
yield "caller", self._caller, None
async def __call__(self) -> None:
await self
def __await__(self) -> Generator[None, None, None]:
current_task = asyncio.current_task()
tasks = [task for task in self._tasks if task is not current_task]
async def await_prune() -> None:
"""Wait for the prune operation to finish."""
await gather(*tasks)
if self._post_remove is not None:
await invoke(self._post_remove)
return await_prune().__await__()
|
AwaitRemove
|
python
|
milvus-io__pymilvus
|
pymilvus/exceptions.py
|
{
"start": 3729,
"end": 3842
}
|
class ____(MilvusException):
"""Raise when server version is incompatible"""
|
ServerVersionIncompatibleException
|
python
|
redis__redis-py
|
redis/asyncio/multidb/event.py
|
{
"start": 1042,
"end": 1797
}
|
class ____(AsyncEventListenerInterface):
"""
Re-subscribe the currently active pub / sub to a new active database.
"""
async def listen(self, event: AsyncActiveDatabaseChanged):
old_pubsub = event.command_executor.active_pubsub
if old_pubsub is not None:
# Re-assign old channels and patterns so they will be automatically subscribed on connection.
new_pubsub = event.new_database.client.pubsub(**event.kwargs)
new_pubsub.channels = old_pubsub.channels
new_pubsub.patterns = old_pubsub.patterns
await new_pubsub.on_connect(None)
event.command_executor.active_pubsub = new_pubsub
await old_pubsub.aclose()
|
ResubscribeOnActiveDatabaseChanged
|
python
|
apache__airflow
|
airflow-core/tests/unit/models/test_xcom.py
|
{
"start": 4014,
"end": 6907
}
|
class ____:
@conf_vars({("core", "xcom_backend"): "unit.models.test_xcom.CustomXCom"})
def test_resolve_xcom_class(self):
cls = resolve_xcom_backend()
assert issubclass(cls, CustomXCom)
@conf_vars({("core", "xcom_backend"): ""})
def test_resolve_xcom_class_fallback_to_basexcom(self):
cls = resolve_xcom_backend()
assert issubclass(cls, BaseXCom)
assert cls.serialize_value([1]) == [1]
@conf_vars({("core", "xcom_backend"): "to be removed"})
def test_resolve_xcom_class_fallback_to_basexcom_no_config(self):
from airflow.sdk.configuration import conf as sdk_conf
conf.remove_option("core", "xcom_backend")
sdk_conf.remove_option("core", "xcom_backend")
cls = resolve_xcom_backend()
assert issubclass(cls, BaseXCom)
assert cls.serialize_value([1]) == [1]
@skip_if_force_lowest_dependencies_marker
@mock.patch("airflow.sdk.execution_time.xcom.conf.getimport")
def test_set_serialize_call_current_signature(self, get_import, task_instance, mock_supervisor_comms):
"""
When XCom.serialize_value includes params logical_date, key, dag_id, task_id and run_id,
then XCom.set should pass all of them.
"""
serialize_watcher = MagicMock()
class CurrentSignatureXCom(BaseXCom):
@staticmethod
def serialize_value(
value,
key=None,
dag_id=None,
task_id=None,
run_id=None,
map_index=None,
):
serialize_watcher(
value=value,
key=key,
dag_id=dag_id,
task_id=task_id,
run_id=run_id,
map_index=map_index,
)
return json.dumps(value)
get_import.return_value = CurrentSignatureXCom
XCom = resolve_xcom_backend()
XCom.set(
key=XCom.XCOM_RETURN_KEY,
value={"my_xcom_key": "my_xcom_value"},
dag_id=task_instance.dag_id,
task_id=task_instance.task_id,
run_id=task_instance.run_id,
map_index=-1,
)
serialize_watcher.assert_called_once_with(
key=XCom.XCOM_RETURN_KEY,
value={"my_xcom_key": "my_xcom_value"},
dag_id=task_instance.dag_id,
task_id=task_instance.task_id,
run_id=task_instance.run_id,
map_index=-1,
)
@pytest.fixture
def push_simple_json_xcom(session):
def func(*, ti: TaskInstance, key: str, value):
return XComModel.set(
key=key,
value=value,
dag_id=ti.dag_id,
task_id=ti.task_id,
run_id=ti.run_id,
session=session,
)
return func
|
TestXCom
|
python
|
Netflix__metaflow
|
metaflow/plugins/env_escape/override_decorators.py
|
{
"start": 2668,
"end": 2992
}
|
class ____(object):
def __init__(self, class_path, deserializer):
self._class_path = class_path
self._deserializer = deserializer
@property
def class_path(self):
return self._class_path
@property
def deserializer(self):
return self._deserializer
|
LocalExceptionDeserializer
|
python
|
django__django
|
tests/urlpatterns_reverse/middleware.py
|
{
"start": 176,
"end": 315
}
|
class ____(MiddlewareMixin):
def process_request(self, request):
request.urlconf = urlconf_inner.__name__
|
ChangeURLconfMiddleware
|
python
|
walkccc__LeetCode
|
solutions/438. Find All Anagrams in a String/438.py
|
{
"start": 0,
"end": 430
}
|
class ____:
def findAnagrams(self, s: str, p: str) -> list[int]:
ans = []
count = collections.Counter(p)
required = len(p)
for r, c in enumerate(s):
count[c] -= 1
if count[c] >= 0:
required -= 1
if r >= len(p):
count[s[r - len(p)]] += 1
if count[s[r - len(p)]] > 0:
required += 1
if required == 0:
ans.append(r - len(p) + 1)
return ans
|
Solution
|
python
|
PrefectHQ__prefect
|
src/prefect/utilities/dockerutils.py
|
{
"start": 4240,
"end": 6532
}
|
class ____(Exception):
"""Raised when a Docker build fails"""
# Labels to apply to all images built with Prefect
IMAGE_LABELS = {
"io.prefect.version": prefect.__version__,
}
@silence_docker_warnings()
def build_image(
context: Path,
dockerfile: str = "Dockerfile",
tag: Optional[str] = None,
pull: bool = False,
platform: Optional[str] = None,
stream_progress_to: Optional[TextIO] = None,
**kwargs: Any,
) -> str:
"""Builds a Docker image, returning the image ID
Args:
context: the root directory for the Docker build context
dockerfile: the path to the Dockerfile, relative to the context
tag: the tag to give this image
pull: True to pull the base image during the build
stream_progress_to: an optional stream (like sys.stdout, or an io.TextIO) that
will collect the build output as it is reported by Docker
Returns:
The image ID
"""
if not context:
raise ValueError("context required to build an image")
if not Path(context).exists():
raise ValueError(f"Context path {context} does not exist")
kwargs = {key: kwargs[key] for key in kwargs if key not in ["decode", "labels"]}
image_id = None
with docker_client() as client:
events = client.api.build(
path=str(context),
tag=tag,
dockerfile=dockerfile,
pull=pull,
decode=True,
labels=IMAGE_LABELS,
platform=platform,
**kwargs,
)
try:
for event in events:
if "stream" in event:
if not stream_progress_to:
continue
stream_progress_to.write(event["stream"])
stream_progress_to.flush()
elif "aux" in event:
image_id = event["aux"]["ID"]
elif "error" in event:
raise BuildError(event["error"])
elif "message" in event:
raise BuildError(event["message"])
except docker.errors.APIError as e:
raise BuildError(e.explanation) from e
assert image_id, "The Docker daemon did not return an image ID"
return image_id
|
BuildError
|
python
|
apache__airflow
|
providers/cncf/kubernetes/src/airflow/providers/cncf/kubernetes/sensors/spark_kubernetes.py
|
{
"start": 1223,
"end": 5371
}
|
class ____(BaseSensorOperator):
"""
Checks sparkApplication object in kubernetes cluster.
.. seealso::
For more detail about Spark Application Object have a look at the reference:
https://github.com/GoogleCloudPlatform/spark-on-k8s-operator/blob/v1beta2-1.1.0-2.4.5/docs/api-docs.md#sparkapplication
:param application_name: spark Application resource name
:param namespace: the kubernetes namespace where the sparkApplication reside in
:param container_name: the kubernetes container name where the sparkApplication reside in
:param kubernetes_conn_id: The :ref:`kubernetes connection<howto/connection:kubernetes>`
to Kubernetes cluster.
:param attach_log: determines whether logs for driver pod should be appended to the sensor log
:param api_group: kubernetes api group of sparkApplication
:param api_version: kubernetes api version of sparkApplication
"""
template_fields: Sequence[str] = ("application_name", "namespace")
FAILURE_STATES = ("FAILED", "UNKNOWN")
SUCCESS_STATES = ("COMPLETED",)
def __init__(
self,
*,
application_name: str,
attach_log: bool = False,
namespace: str | None = None,
container_name: str = "spark-kubernetes-driver",
kubernetes_conn_id: str = "kubernetes_default",
api_group: str = "sparkoperator.k8s.io",
api_version: str = "v1beta2",
**kwargs,
) -> None:
super().__init__(**kwargs)
self.application_name = application_name
self.attach_log = attach_log
self.namespace = namespace
self.container_name = container_name
self.kubernetes_conn_id = kubernetes_conn_id
self.api_group = api_group
self.api_version = api_version
@cached_property
def hook(self) -> KubernetesHook:
return KubernetesHook(conn_id=self.kubernetes_conn_id)
def _log_driver(self, application_state: str, response: dict) -> None:
if not self.attach_log:
return
status_info = response["status"]
if "driverInfo" not in status_info:
return
driver_info = status_info["driverInfo"]
if "podName" not in driver_info:
return
driver_pod_name = driver_info["podName"]
namespace = response["metadata"]["namespace"]
log_method = self.log.error if application_state in self.FAILURE_STATES else self.log.info
try:
log = ""
for line in self.hook.get_pod_logs(
driver_pod_name, namespace=namespace, container=self.container_name
):
log += line.decode()
log_method(log)
except client.rest.ApiException as e:
self.log.warning(
"Could not read logs for pod %s. It may have been disposed.\n"
"Make sure timeToLiveSeconds is set on your SparkApplication spec.\n"
"underlying exception: %s",
driver_pod_name,
e,
)
def poke(self, context: Context) -> bool:
self.log.info("Poking: %s", self.application_name)
response = self.hook.get_custom_object(
group=self.api_group,
version=self.api_version,
plural="sparkapplications",
name=self.application_name,
namespace=self.namespace,
)
try:
application_state = response["status"]["applicationState"]["state"]
except KeyError:
return False
if self.attach_log and application_state in self.FAILURE_STATES + self.SUCCESS_STATES:
self._log_driver(application_state, response)
if application_state in self.FAILURE_STATES:
message = f"Spark application failed with state: {application_state}"
raise AirflowException(message)
if application_state in self.SUCCESS_STATES:
self.log.info("Spark application ended successfully")
return True
self.log.info("Spark application is still in state: %s", application_state)
return False
|
SparkKubernetesSensor
|
python
|
apache__airflow
|
providers/google/src/airflow/providers/google/cloud/transfers/gcs_to_local.py
|
{
"start": 1278,
"end": 5851
}
|
class ____(BaseOperator):
"""
Downloads a file from Google Cloud Storage.
If a filename is supplied, it writes the file to the specified location, alternatively one can
set the ``store_to_xcom_key`` parameter to True push the file content into xcom. When the file size
exceeds the maximum size for xcom it is recommended to write to a file.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:GCSToLocalFilesystemOperator`
:param bucket: The Google Cloud Storage bucket where the object is.
Must not contain 'gs://' prefix. (templated)
:param object_name: The name of the object to download in the Google cloud
storage bucket. (templated)
:param filename: The file path, including filename, on the local file system (where the
operator is being executed) that the file should be downloaded to. (templated)
If no filename passed, the downloaded data will not be stored on the local file
system.
:param store_to_xcom_key: If this param is set, the operator will push
the contents of the downloaded file to XCom with the key set in this
parameter. If not set, the downloaded data will not be pushed to XCom. (templated)
:param gcp_conn_id: (Optional) The connection ID used to connect to Google Cloud.
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
:param file_encoding: Optional encoding used to decode file_bytes into a serializable
string that is suitable for storing to XCom. (templated).
"""
template_fields: Sequence[str] = (
"bucket",
"object_name",
"filename",
"store_to_xcom_key",
"impersonation_chain",
"file_encoding",
)
ui_color = "#f0eee4"
def __init__(
self,
*,
bucket: str,
object_name: str,
filename: str | None = None,
store_to_xcom_key: str | None = None,
gcp_conn_id: str = "google_cloud_default",
impersonation_chain: str | Sequence[str] | None = None,
file_encoding: str = "utf-8",
**kwargs,
) -> None:
if filename is not None and store_to_xcom_key is not None:
raise ValueError("Either filename or store_to_xcom_key can be set")
super().__init__(**kwargs)
self.bucket = bucket
self.filename = filename
self.object_name = object_name
self.store_to_xcom_key = store_to_xcom_key
self.gcp_conn_id = gcp_conn_id
self.impersonation_chain = impersonation_chain
self.file_encoding = file_encoding
def execute(self, context: Context):
self.log.info("Executing download: %s, %s, %s", self.bucket, self.object_name, self.filename)
hook = GCSHook(
gcp_conn_id=self.gcp_conn_id,
impersonation_chain=self.impersonation_chain,
)
if self.store_to_xcom_key:
file_size = hook.get_size(bucket_name=self.bucket, object_name=self.object_name)
if file_size < MAX_XCOM_SIZE:
file_bytes = hook.download(bucket_name=self.bucket, object_name=self.object_name)
context["ti"].xcom_push(key=self.store_to_xcom_key, value=str(file_bytes, self.file_encoding))
else:
raise AirflowException("The size of the downloaded file is too large to push to XCom!")
else:
hook.download(bucket_name=self.bucket, object_name=self.object_name, filename=self.filename)
def get_openlineage_facets_on_start(self):
from airflow.providers.common.compat.openlineage.facet import Dataset
from airflow.providers.openlineage.extractors import OperatorLineage
return OperatorLineage(
inputs=[Dataset(namespace=f"gs://{self.bucket}", name=self.object_name)],
outputs=[Dataset(namespace="file", name=self.filename)] if self.filename else [],
)
|
GCSToLocalFilesystemOperator
|
python
|
django__django
|
tests/db_functions/math/test_cot.py
|
{
"start": 268,
"end": 2292
}
|
class ____(TestCase):
def test_null(self):
IntegerModel.objects.create()
obj = IntegerModel.objects.annotate(null_cot=Cot("normal")).first()
self.assertIsNone(obj.null_cot)
def test_decimal(self):
DecimalModel.objects.create(n1=Decimal("-12.9"), n2=Decimal("0.6"))
obj = DecimalModel.objects.annotate(n1_cot=Cot("n1"), n2_cot=Cot("n2")).first()
self.assertIsInstance(obj.n1_cot, Decimal)
self.assertIsInstance(obj.n2_cot, Decimal)
self.assertAlmostEqual(obj.n1_cot, Decimal(1 / math.tan(obj.n1)))
self.assertAlmostEqual(obj.n2_cot, Decimal(1 / math.tan(obj.n2)))
def test_float(self):
FloatModel.objects.create(f1=-27.5, f2=0.33)
obj = FloatModel.objects.annotate(f1_cot=Cot("f1"), f2_cot=Cot("f2")).first()
self.assertIsInstance(obj.f1_cot, float)
self.assertIsInstance(obj.f2_cot, float)
self.assertAlmostEqual(obj.f1_cot, 1 / math.tan(obj.f1))
self.assertAlmostEqual(obj.f2_cot, 1 / math.tan(obj.f2))
def test_integer(self):
IntegerModel.objects.create(small=-5, normal=15, big=-1)
obj = IntegerModel.objects.annotate(
small_cot=Cot("small"),
normal_cot=Cot("normal"),
big_cot=Cot("big"),
).first()
self.assertIsInstance(obj.small_cot, float)
self.assertIsInstance(obj.normal_cot, float)
self.assertIsInstance(obj.big_cot, float)
self.assertAlmostEqual(obj.small_cot, 1 / math.tan(obj.small))
self.assertAlmostEqual(obj.normal_cot, 1 / math.tan(obj.normal))
self.assertAlmostEqual(obj.big_cot, 1 / math.tan(obj.big))
def test_transform(self):
with register_lookup(DecimalField, Cot):
DecimalModel.objects.create(n1=Decimal("12.0"), n2=Decimal("0"))
DecimalModel.objects.create(n1=Decimal("1.0"), n2=Decimal("0"))
obj = DecimalModel.objects.filter(n1__cot__gt=0).get()
self.assertEqual(obj.n1, Decimal("1.0"))
|
CotTests
|
python
|
apache__airflow
|
providers/alibaba/src/airflow/providers/alibaba/cloud/operators/analyticdb_spark.py
|
{
"start": 5322,
"end": 8139
}
|
class ____(AnalyticDBSparkBaseOperator):
"""
Submits a Spark batch application to the underlying cluster; wraps the AnalyticDB Spark REST API.
:param file: path of the file containing the application to execute.
:param class_name: name of the application Java/Spark main class.
:param args: application command line arguments.
:param conf: Spark configuration properties.
:param jars: jars to be used in this application.
:param py_files: python files to be used in this application.
:param files: files to be used in this application.
:param driver_resource_spec: The resource specifications of the Spark driver.
:param executor_resource_spec: The resource specifications of each Spark executor.
:param num_executors: number of executors to launch for this application.
:param archives: archives to be used in this application.
:param name: name of this application.
:param cluster_id: The cluster ID of AnalyticDB MySQL 3.0 Data Lakehouse.
:param rg_name: The name of resource group in AnalyticDB MySQL 3.0 Data Lakehouse cluster.
"""
template_fields: Sequence[str] = ("spark_params",)
template_fields_renderers = {"spark_params": "json"}
def __init__(
self,
*,
file: str,
class_name: str | None = None,
args: Sequence[str | int | float] | None = None,
conf: dict[Any, Any] | None = None,
jars: Sequence[str] | None = None,
py_files: Sequence[str] | None = None,
files: Sequence[str] | None = None,
driver_resource_spec: str | None = None,
executor_resource_spec: str | None = None,
num_executors: int | str | None = None,
archives: Sequence[str] | None = None,
name: str | None = None,
cluster_id: str,
rg_name: str,
**kwargs: Any,
) -> None:
super().__init__(**kwargs)
spark_params = {
"file": file,
"class_name": class_name,
"args": args,
"conf": conf,
"jars": jars,
"py_files": py_files,
"files": files,
"driver_resource_spec": driver_resource_spec,
"executor_resource_spec": executor_resource_spec,
"num_executors": num_executors,
"archives": archives,
"name": name,
}
self.spark_params = spark_params
self._cluster_id = cluster_id
self._rg_name = rg_name
def execute(self, context: Context) -> Any:
submit_response = self.hook.submit_spark_app(
cluster_id=self._cluster_id, rg_name=self._rg_name, **self.spark_params
)
self.app_id = submit_response.body.data.app_id
self.monitor_application()
return self.app_id
|
AnalyticDBSparkBatchOperator
|
python
|
getsentry__sentry
|
tests/sentry/ratelimits/utils/test_enforce_rate_limit.py
|
{
"start": 1628,
"end": 1962
}
|
class ____(APITestCase):
endpoint = "unenforced-endpoint"
def test_unenforced_rate_limit(self) -> None:
"""Endpoints with enforce_rate_limit disabled shouldn't reject requests"""
with freeze_time("2000-01-01"):
self.get_success_response()
self.get_success_response()
|
UnEnforceRateLimitTest
|
python
|
apache__avro
|
lang/py/avro/ipc.py
|
{
"start": 15888,
"end": 17015
}
|
class ____:
"""Wrapper around a file-like object to write framed data."""
def __init__(self, writer):
self._writer = writer
# read-only properties
@property
def writer(self):
return self._writer
def write_framed_message(self, message):
message_length = len(message)
total_bytes_sent = 0
while message_length - total_bytes_sent > 0:
if message_length - total_bytes_sent > BUFFER_SIZE:
buffer_length = BUFFER_SIZE
else:
buffer_length = message_length - total_bytes_sent
self.write_buffer(message[total_bytes_sent : (total_bytes_sent + buffer_length)])
total_bytes_sent += buffer_length
# A message is always terminated by a zero-length buffer.
self.write_buffer_length(0)
def write_buffer(self, chunk):
buffer_length = len(chunk)
self.write_buffer_length(buffer_length)
self.writer.write(chunk)
def write_buffer_length(self, n):
self.writer.write(BIG_ENDIAN_INT_STRUCT.pack(n))
#
# Transceiver Implementations
#
|
FramedWriter
|
python
|
matplotlib__matplotlib
|
lib/matplotlib/backends/backend_pgf.py
|
{
"start": 29488,
"end": 34774
}
|
class ____(FigureCanvasBase):
filetypes = {"pgf": "LaTeX PGF picture",
"pdf": "LaTeX compiled PGF picture",
"png": "Portable Network Graphics", }
def get_default_filetype(self):
return 'pdf'
def _print_pgf_to_fh(self, fh, *, bbox_inches_restore=None):
header_text = """%% Creator: Matplotlib, PGF backend
%%
%% To include the figure in your LaTeX document, write
%% \\input{<filename>.pgf}
%%
%% Make sure the required packages are loaded in your preamble
%% \\usepackage{pgf}
%%
%% Also ensure that all the required font packages are loaded; for instance,
%% the lmodern package is sometimes necessary when using math font.
%% \\usepackage{lmodern}
%%
%% Figures using additional raster images can only be included by \\input if
%% they are in the same directory as the main LaTeX file. For loading figures
%% from other directories you can use the `import` package
%% \\usepackage{import}
%%
%% and then include the figures with
%% \\import{<path to file>}{<filename>.pgf}
%%
"""
# append the preamble used by the backend as a comment for debugging
header_info_preamble = ["%% Matplotlib used the following preamble"]
for line in _get_preamble().splitlines():
header_info_preamble.append("%% " + line)
header_info_preamble.append("%%")
header_info_preamble = "\n".join(header_info_preamble)
# get figure size in inch
w, h = self.figure.get_figwidth(), self.figure.get_figheight()
dpi = self.figure.dpi
# create pgfpicture environment and write the pgf code
fh.write(header_text)
fh.write(header_info_preamble)
fh.write("\n")
_writeln(fh, r"\begingroup")
_writeln(fh, r"\makeatletter")
_writeln(fh, r"\begin{pgfpicture}")
_writeln(fh,
r"\pgfpathrectangle{\pgfpointorigin}{\pgfqpoint{%fin}{%fin}}"
% (w, h))
_writeln(fh, r"\pgfusepath{use as bounding box, clip}")
renderer = MixedModeRenderer(self.figure, w, h, dpi,
RendererPgf(self.figure, fh),
bbox_inches_restore=bbox_inches_restore)
self.figure.draw(renderer)
# end the pgfpicture environment
_writeln(fh, r"\end{pgfpicture}")
_writeln(fh, r"\makeatother")
_writeln(fh, r"\endgroup")
def print_pgf(self, fname_or_fh, **kwargs):
"""
Output pgf macros for drawing the figure so it can be included and
rendered in latex documents.
"""
with cbook.open_file_cm(fname_or_fh, "w", encoding="utf-8") as file:
if not cbook.file_requires_unicode(file):
file = codecs.getwriter("utf-8")(file)
self._print_pgf_to_fh(file, **kwargs)
def print_pdf(self, fname_or_fh, *, metadata=None, **kwargs):
"""Use LaTeX to compile a pgf generated figure to pdf."""
w, h = self.figure.get_size_inches()
info_dict = _create_pdf_info_dict('pgf', metadata or {})
pdfinfo = ','.join(
_metadata_to_str(k, v) for k, v in info_dict.items())
# print figure to pgf and compile it with latex
with TemporaryDirectory() as tmpdir:
tmppath = pathlib.Path(tmpdir)
self.print_pgf(tmppath / "figure.pgf", **kwargs)
(tmppath / "figure.tex").write_text(
"\n".join([
_DOCUMENTCLASS,
r"\usepackage[pdfinfo={%s}]{hyperref}" % pdfinfo,
r"\usepackage[papersize={%fin,%fin}, margin=0in]{geometry}"
% (w, h),
r"\usepackage{pgf}",
_get_preamble(),
r"\begin{document}",
r"\centering",
r"\input{figure.pgf}",
r"\end{document}",
]), encoding="utf-8")
texcommand = mpl.rcParams["pgf.texsystem"]
cbook._check_and_log_subprocess(
[texcommand, "-interaction=nonstopmode", "-halt-on-error",
"figure.tex"], _log, cwd=tmpdir)
with ((tmppath / "figure.pdf").open("rb") as orig,
cbook.open_file_cm(fname_or_fh, "wb") as dest):
shutil.copyfileobj(orig, dest) # copy file contents to target
def print_png(self, fname_or_fh, **kwargs):
"""Use LaTeX to compile a pgf figure to pdf and convert it to png."""
converter = make_pdf_to_png_converter()
with TemporaryDirectory() as tmpdir:
tmppath = pathlib.Path(tmpdir)
pdf_path = tmppath / "figure.pdf"
png_path = tmppath / "figure.png"
self.print_pdf(pdf_path, **kwargs)
converter(pdf_path, png_path, dpi=self.figure.dpi)
with (png_path.open("rb") as orig,
cbook.open_file_cm(fname_or_fh, "wb") as dest):
shutil.copyfileobj(orig, dest) # copy file contents to target
def get_renderer(self):
return RendererPgf(self.figure, None)
def draw(self):
self.figure.draw_without_rendering()
return super().draw()
FigureManagerPgf = FigureManagerBase
@_Backend.export
|
FigureCanvasPgf
|
python
|
neetcode-gh__leetcode
|
python/0269-alien-dictionary.py
|
{
"start": 0,
"end": 1044
}
|
class ____:
def alienOrder(self, words: List[str]) -> str:
adj = {char: set() for word in words for char in word}
for i in range(len(words) - 1):
w1, w2 = words[i], words[i + 1]
minLen = min(len(w1), len(w2))
if len(w1) > len(w2) and w1[:minLen] == w2[:minLen]:
return ""
for j in range(minLen):
if w1[j] != w2[j]:
print(w1[j], w2[j])
adj[w1[j]].add(w2[j])
break
visited = {} # {char: bool} False visited, True current path
res = []
def dfs(char):
if char in visited:
return visited[char]
visited[char] = True
for neighChar in adj[char]:
if dfs(neighChar):
return True
visited[char] = False
res.append(char)
for char in adj:
if dfs(char):
return ""
res.reverse()
return "".join(res)
|
Solution
|
python
|
bokeh__bokeh
|
tests/unit/bokeh/core/property/test_include.py
|
{
"start": 1407,
"end": 4135
}
|
class ____:
def test_include_with_prefix(self) -> None:
class IncludesDelegateWithPrefix(HasProps):
z = bcpi.Include(IsDelegate, prefix="z")
o = IncludesDelegateWithPrefix()
assert o.z_x == 12
assert o.z_y == "hello"
assert not hasattr(o, 'z')
assert not hasattr(o, 'x')
assert not hasattr(o, 'y')
assert 'z' not in o.properties_with_values(include_defaults=True)
assert 'x' not in o.properties_with_values(include_defaults=True)
assert 'y' not in o.properties_with_values(include_defaults=True)
assert 'z_x' in o.properties_with_values(include_defaults=True)
assert 'z_y' in o.properties_with_values(include_defaults=True)
assert 'z_x' not in o.properties_with_values(include_defaults=False)
assert 'z_y' not in o.properties_with_values(include_defaults=False)
def test_include_without_prefix(self) -> None:
class IncludesDelegateWithoutPrefix(HasProps):
z = bcpi.Include(IsDelegate)
o = IncludesDelegateWithoutPrefix()
assert o.x == 12
assert o.y == "hello"
assert not hasattr(o, 'z')
assert 'x' in o.properties_with_values(include_defaults=True)
assert 'y' in o.properties_with_values(include_defaults=True)
assert 'x' not in o.properties_with_values(include_defaults=False)
assert 'y' not in o.properties_with_values(include_defaults=False)
def test_include_without_prefix_using_override(self) -> None:
class IncludesDelegateWithoutPrefixUsingOverride(HasProps):
z = bcpi.Include(IsDelegate)
y = Override(default="world") # override the Include changing just the default
o = IncludesDelegateWithoutPrefixUsingOverride()
assert o.x == 12
assert o.y == 'world'
assert not hasattr(o, 'z')
assert 'x' in o.properties_with_values(include_defaults=True)
assert 'y' in o.properties_with_values(include_defaults=True)
assert 'x' not in o.properties_with_values(include_defaults=False)
assert 'y' not in o.properties_with_values(include_defaults=False)
#-----------------------------------------------------------------------------
# Dev API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Private API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Code
#-----------------------------------------------------------------------------
Test___all__ = verify_all(bcpi, ALL)
|
Test_Include
|
python
|
walkccc__LeetCode
|
solutions/3082. Find the Sum of the Power of All Subsequences/3082-3.py
|
{
"start": 0,
"end": 384
}
|
class ____:
def sumOfPower(self, nums: list[int], k: int) -> int:
MOD = 1_000_000_007
# dp[i] := the number of subsequences in nums so far that sums to k
dp = [1] + [0] * k
for num in nums:
for i in range(k, -1, -1):
if i < num:
dp[i] = (dp[i] * 2) % MOD
else:
dp[i] = (dp[i] * 2 + dp[i - num]) % MOD
return dp[k]
|
Solution
|
python
|
PrefectHQ__prefect
|
src/integrations/prefect-github/prefect_github/schemas/graphql_schema.py
|
{
"start": 919084,
"end": 919467
}
|
class ____(sgqlc.types.Type, Node):
"""
See source code for more info.
"""
__schema__ = graphql_schema
__field_names__ = ("actor", "branch_protection_rule")
actor = sgqlc.types.Field("PushAllowanceActor", graphql_name="actor")
branch_protection_rule = sgqlc.types.Field(
BranchProtectionRule, graphql_name="branchProtectionRule"
)
|
PushAllowance
|
python
|
python__mypy
|
mypy/plugin.py
|
{
"start": 17252,
"end": 18438
}
|
class ____(NamedTuple):
arg_types: list[list[Type]] # List of actual caller types for each formal argument
arg_kinds: list[list[ArgKind]] # Ditto for argument kinds, see nodes.ARG_* constants
# Names of formal parameters from the callee definition,
# these will be sufficient in most cases.
callee_arg_names: list[str | None]
# Names of actual arguments in the call expression. For example,
# in a situation like this:
# def func(**kwargs) -> None:
# pass
# func(kw1=1, kw2=2)
# callee_arg_names will be ['kwargs'] and arg_names will be [['kw1', 'kw2']].
arg_names: list[list[str | None]]
default_return_type: Type # Return type inferred from signature
args: list[list[Expression]] # Actual expressions for each formal argument
context: Context # Relevant location context (e.g. for error messages)
api: CheckerPluginInterface
# A context for a method signature hook that infers a better signature for a
# method. Note that argument types aren't available yet. If you need them,
# you have to use a method hook instead.
# TODO: document ProperType in the plugin changelog/update issue.
|
FunctionContext
|
python
|
python-openxml__python-docx
|
src/docx/image/image.py
|
{
"start": 387,
"end": 6381
}
|
class ____:
"""Graphical image stream such as JPEG, PNG, or GIF with properties and methods
required by ImagePart."""
def __init__(self, blob: bytes, filename: str, image_header: BaseImageHeader):
super(Image, self).__init__()
self._blob = blob
self._filename = filename
self._image_header = image_header
@classmethod
def from_blob(cls, blob: bytes) -> Image:
"""Return a new |Image| subclass instance parsed from the image binary contained
in `blob`."""
stream = io.BytesIO(blob)
return cls._from_stream(stream, blob)
@classmethod
def from_file(cls, image_descriptor: str | IO[bytes]):
"""Return a new |Image| subclass instance loaded from the image file identified
by `image_descriptor`, a path or file-like object."""
if isinstance(image_descriptor, str):
path = image_descriptor
with open(path, "rb") as f:
blob = f.read()
stream = io.BytesIO(blob)
filename = os.path.basename(path)
else:
stream = image_descriptor
stream.seek(0)
blob = stream.read()
filename = None
return cls._from_stream(stream, blob, filename)
@property
def blob(self):
"""The bytes of the image 'file'."""
return self._blob
@property
def content_type(self) -> str:
"""MIME content type for this image, e.g. ``'image/jpeg'`` for a JPEG image."""
return self._image_header.content_type
@lazyproperty
def ext(self):
"""The file extension for the image.
If an actual one is available from a load filename it is used. Otherwise a
canonical extension is assigned based on the content type. Does not contain the
leading period, e.g. 'jpg', not '.jpg'.
"""
return os.path.splitext(self._filename)[1][1:]
@property
def filename(self):
"""Original image file name, if loaded from disk, or a generic filename if
loaded from an anonymous stream."""
return self._filename
@property
def px_width(self) -> int:
"""The horizontal pixel dimension of the image."""
return self._image_header.px_width
@property
def px_height(self) -> int:
"""The vertical pixel dimension of the image."""
return self._image_header.px_height
@property
def horz_dpi(self) -> int:
"""Integer dots per inch for the width of this image.
Defaults to 72 when not present in the file, as is often the case.
"""
return self._image_header.horz_dpi
@property
def vert_dpi(self) -> int:
"""Integer dots per inch for the height of this image.
Defaults to 72 when not present in the file, as is often the case.
"""
return self._image_header.vert_dpi
@property
def width(self) -> Inches:
"""A |Length| value representing the native width of the image, calculated from
the values of `px_width` and `horz_dpi`."""
return Inches(self.px_width / self.horz_dpi)
@property
def height(self) -> Inches:
"""A |Length| value representing the native height of the image, calculated from
the values of `px_height` and `vert_dpi`."""
return Inches(self.px_height / self.vert_dpi)
def scaled_dimensions(
self, width: int | Length | None = None, height: int | Length | None = None
) -> Tuple[Length, Length]:
"""(cx, cy) pair representing scaled dimensions of this image.
The native dimensions of the image are scaled by applying the following rules to
the `width` and `height` arguments.
* If both `width` and `height` are specified, the return value is (`width`,
`height`); no scaling is performed.
* If only one is specified, it is used to compute a scaling factor that is then
applied to the unspecified dimension, preserving the aspect ratio of the image.
* If both `width` and `height` are |None|, the native dimensions are returned.
The native dimensions are calculated using the dots-per-inch (dpi) value
embedded in the image, defaulting to 72 dpi if no value is specified, as is
often the case. The returned values are both |Length| objects.
"""
if width is None and height is None:
return self.width, self.height
if width is None:
assert height is not None
scaling_factor = float(height) / float(self.height)
width = round(self.width * scaling_factor)
if height is None:
scaling_factor = float(width) / float(self.width)
height = round(self.height * scaling_factor)
return Emu(width), Emu(height)
@lazyproperty
def sha1(self):
"""SHA1 hash digest of the image blob."""
return hashlib.sha1(self._blob).hexdigest()
@classmethod
def _from_stream(
cls,
stream: IO[bytes],
blob: bytes,
filename: str | None = None,
) -> Image:
"""Return an instance of the |Image| subclass corresponding to the format of the
image in `stream`."""
image_header = _ImageHeaderFactory(stream)
if filename is None:
filename = "image.%s" % image_header.default_ext
return cls(blob, filename, image_header)
def _ImageHeaderFactory(stream: IO[bytes]):
"""A |BaseImageHeader| subclass instance that can parse headers of image in `stream`."""
from docx.image import SIGNATURES
def read_32(stream: IO[bytes]):
stream.seek(0)
return stream.read(32)
header = read_32(stream)
for cls, offset, signature_bytes in SIGNATURES:
end = offset + len(signature_bytes)
found_bytes = header[offset:end]
if found_bytes == signature_bytes:
return cls.from_stream(stream)
raise UnrecognizedImageError
|
Image
|
python
|
encode__django-rest-framework
|
tests/test_throttling.py
|
{
"start": 1630,
"end": 1777
}
|
class ____(APIView):
throttle_classes = (NonTimeThrottle,)
def get(self, request):
return Response('foo')
|
MockView_NonTimeThrottling
|
python
|
plotly__plotly.py
|
plotly/graph_objs/_mesh3d.py
|
{
"start": 215,
"end": 94824
}
|
class ____(_BaseTraceType):
_parent_path_str = ""
_path_str = "mesh3d"
_valid_props = {
"alphahull",
"autocolorscale",
"cauto",
"cmax",
"cmid",
"cmin",
"color",
"coloraxis",
"colorbar",
"colorscale",
"contour",
"customdata",
"customdatasrc",
"delaunayaxis",
"facecolor",
"facecolorsrc",
"flatshading",
"hoverinfo",
"hoverinfosrc",
"hoverlabel",
"hovertemplate",
"hovertemplatefallback",
"hovertemplatesrc",
"hovertext",
"hovertextsrc",
"i",
"ids",
"idssrc",
"intensity",
"intensitymode",
"intensitysrc",
"isrc",
"j",
"jsrc",
"k",
"ksrc",
"legend",
"legendgroup",
"legendgrouptitle",
"legendrank",
"legendwidth",
"lighting",
"lightposition",
"meta",
"metasrc",
"name",
"opacity",
"reversescale",
"scene",
"showlegend",
"showscale",
"stream",
"text",
"textsrc",
"type",
"uid",
"uirevision",
"vertexcolor",
"vertexcolorsrc",
"visible",
"x",
"xcalendar",
"xhoverformat",
"xsrc",
"y",
"ycalendar",
"yhoverformat",
"ysrc",
"z",
"zcalendar",
"zhoverformat",
"zsrc",
}
@property
def alphahull(self):
"""
Determines how the mesh surface triangles are derived from the
set of vertices (points) represented by the `x`, `y` and `z`
arrays, if the `i`, `j`, `k` arrays are not supplied. For
general use of `mesh3d` it is preferred that `i`, `j`, `k` are
supplied. If "-1", Delaunay triangulation is used, which is
mainly suitable if the mesh is a single, more or less layer
surface that is perpendicular to `delaunayaxis`. In case the
`delaunayaxis` intersects the mesh surface at more than one
point it will result triangles that are very long in the
dimension of `delaunayaxis`. If ">0", the alpha-shape algorithm
is used. In this case, the positive `alphahull` value signals
the use of the alpha-shape algorithm, _and_ its value acts as
the parameter for the mesh fitting. If 0, the convex-hull
algorithm is used. It is suitable for convex bodies or if the
intention is to enclose the `x`, `y` and `z` point set into a
convex hull.
The 'alphahull' property is a number and may be specified as:
- An int or float
Returns
-------
int|float
"""
return self["alphahull"]
@alphahull.setter
def alphahull(self, val):
self["alphahull"] = val
@property
def autocolorscale(self):
"""
Determines whether the colorscale is a default palette
(`autocolorscale: true`) or the palette determined by
`colorscale`. In case `colorscale` is unspecified or
`autocolorscale` is true, the default palette will be chosen
according to whether numbers in the `color` array are all
positive, all negative or mixed.
The 'autocolorscale' property must be specified as a bool
(either True, or False)
Returns
-------
bool
"""
return self["autocolorscale"]
@autocolorscale.setter
def autocolorscale(self, val):
self["autocolorscale"] = val
@property
def cauto(self):
"""
Determines whether or not the color domain is computed with
respect to the input data (here `intensity`) or the bounds set
in `cmin` and `cmax` Defaults to `false` when `cmin` and `cmax`
are set by the user.
The 'cauto' property must be specified as a bool
(either True, or False)
Returns
-------
bool
"""
return self["cauto"]
@cauto.setter
def cauto(self, val):
self["cauto"] = val
@property
def cmax(self):
"""
Sets the upper bound of the color domain. Value should have the
same units as `intensity` and if set, `cmin` must be set as
well.
The 'cmax' property is a number and may be specified as:
- An int or float
Returns
-------
int|float
"""
return self["cmax"]
@cmax.setter
def cmax(self, val):
self["cmax"] = val
@property
def cmid(self):
"""
Sets the mid-point of the color domain by scaling `cmin` and/or
`cmax` to be equidistant to this point. Value should have the
same units as `intensity`. Has no effect when `cauto` is
`false`.
The 'cmid' property is a number and may be specified as:
- An int or float
Returns
-------
int|float
"""
return self["cmid"]
@cmid.setter
def cmid(self, val):
self["cmid"] = val
@property
def cmin(self):
"""
Sets the lower bound of the color domain. Value should have the
same units as `intensity` and if set, `cmax` must be set as
well.
The 'cmin' property is a number and may be specified as:
- An int or float
Returns
-------
int|float
"""
return self["cmin"]
@cmin.setter
def cmin(self, val):
self["cmin"] = val
@property
def color(self):
"""
Sets the color of the whole mesh
The 'color' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color: see https://plotly.com/python/css-colors/ for a list
- A number that will be interpreted as a color
according to mesh3d.colorscale
Returns
-------
str
"""
return self["color"]
@color.setter
def color(self, val):
self["color"] = val
@property
def coloraxis(self):
"""
Sets a reference to a shared color axis. References to these
shared color axes are "coloraxis", "coloraxis2", "coloraxis3",
etc. Settings for these shared color axes are set in the
layout, under `layout.coloraxis`, `layout.coloraxis2`, etc.
Note that multiple color scales can be linked to the same color
axis.
The 'coloraxis' property is an identifier of a particular
subplot, of type 'coloraxis', that may be specified as the string 'coloraxis'
optionally followed by an integer >= 1
(e.g. 'coloraxis', 'coloraxis1', 'coloraxis2', 'coloraxis3', etc.)
Returns
-------
str
"""
return self["coloraxis"]
@coloraxis.setter
def coloraxis(self, val):
self["coloraxis"] = val
@property
def colorbar(self):
"""
The 'colorbar' property is an instance of ColorBar
that may be specified as:
- An instance of :class:`plotly.graph_objs.mesh3d.ColorBar`
- A dict of string/value properties that will be passed
to the ColorBar constructor
Returns
-------
plotly.graph_objs.mesh3d.ColorBar
"""
return self["colorbar"]
@colorbar.setter
def colorbar(self, val):
self["colorbar"] = val
@property
def colorscale(self):
"""
Sets the colorscale. The colorscale must be an array containing
arrays mapping a normalized value to an rgb, rgba, hex, hsl,
hsv, or named color string. At minimum, a mapping for the
lowest (0) and highest (1) values are required. For example,
`[[0, 'rgb(0,0,255)'], [1, 'rgb(255,0,0)']]`. To control the
bounds of the colorscale in color space, use `cmin` and `cmax`.
Alternatively, `colorscale` may be a palette name string of the
following list: Blackbody,Bluered,Blues,Cividis,Earth,Electric,
Greens,Greys,Hot,Jet,Picnic,Portland,Rainbow,RdBu,Reds,Viridis,
YlGnBu,YlOrRd.
The 'colorscale' property is a colorscale and may be
specified as:
- A list of colors that will be spaced evenly to create the colorscale.
Many predefined colorscale lists are included in the sequential, diverging,
and cyclical modules in the plotly.colors package.
- A list of 2-element lists where the first element is the
normalized color level value (starting at 0 and ending at 1),
and the second item is a valid color string.
(e.g. [[0, 'green'], [0.5, 'red'], [1.0, 'rgb(0, 0, 255)']])
- One of the following named colorscales:
['aggrnyl', 'agsunset', 'algae', 'amp', 'armyrose', 'balance',
'blackbody', 'bluered', 'blues', 'blugrn', 'bluyl', 'brbg',
'brwnyl', 'bugn', 'bupu', 'burg', 'burgyl', 'cividis', 'curl',
'darkmint', 'deep', 'delta', 'dense', 'earth', 'edge', 'electric',
'emrld', 'fall', 'geyser', 'gnbu', 'gray', 'greens', 'greys',
'haline', 'hot', 'hsv', 'ice', 'icefire', 'inferno', 'jet',
'magenta', 'magma', 'matter', 'mint', 'mrybm', 'mygbm', 'oranges',
'orrd', 'oryel', 'oxy', 'peach', 'phase', 'picnic', 'pinkyl',
'piyg', 'plasma', 'plotly3', 'portland', 'prgn', 'pubu', 'pubugn',
'puor', 'purd', 'purp', 'purples', 'purpor', 'rainbow', 'rdbu',
'rdgy', 'rdpu', 'rdylbu', 'rdylgn', 'redor', 'reds', 'solar',
'spectral', 'speed', 'sunset', 'sunsetdark', 'teal', 'tealgrn',
'tealrose', 'tempo', 'temps', 'thermal', 'tropic', 'turbid',
'turbo', 'twilight', 'viridis', 'ylgn', 'ylgnbu', 'ylorbr',
'ylorrd'].
Appending '_r' to a named colorscale reverses it.
Returns
-------
str
"""
return self["colorscale"]
@colorscale.setter
def colorscale(self, val):
self["colorscale"] = val
@property
def contour(self):
"""
The 'contour' property is an instance of Contour
that may be specified as:
- An instance of :class:`plotly.graph_objs.mesh3d.Contour`
- A dict of string/value properties that will be passed
to the Contour constructor
Returns
-------
plotly.graph_objs.mesh3d.Contour
"""
return self["contour"]
@contour.setter
def contour(self, val):
self["contour"] = val
@property
def customdata(self):
"""
Assigns extra data each datum. This may be useful when
listening to hover, click and selection events. Note that,
"scatter" traces also appends customdata items in the markers
DOM elements
The 'customdata' property is an array that may be specified as a tuple,
list, numpy array, or pandas Series
Returns
-------
numpy.ndarray
"""
return self["customdata"]
@customdata.setter
def customdata(self, val):
self["customdata"] = val
@property
def customdatasrc(self):
"""
Sets the source reference on Chart Studio Cloud for
`customdata`.
The 'customdatasrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["customdatasrc"]
@customdatasrc.setter
def customdatasrc(self, val):
self["customdatasrc"] = val
@property
def delaunayaxis(self):
"""
Sets the Delaunay axis, which is the axis that is perpendicular
to the surface of the Delaunay triangulation. It has an effect
if `i`, `j`, `k` are not provided and `alphahull` is set to
indicate Delaunay triangulation.
The 'delaunayaxis' property is an enumeration that may be specified as:
- One of the following enumeration values:
['x', 'y', 'z']
Returns
-------
Any
"""
return self["delaunayaxis"]
@delaunayaxis.setter
def delaunayaxis(self, val):
self["delaunayaxis"] = val
@property
def facecolor(self):
"""
Sets the color of each face Overrides "color" and
"vertexcolor".
The 'facecolor' property is an array that may be specified as a tuple,
list, numpy array, or pandas Series
Returns
-------
numpy.ndarray
"""
return self["facecolor"]
@facecolor.setter
def facecolor(self, val):
self["facecolor"] = val
@property
def facecolorsrc(self):
"""
Sets the source reference on Chart Studio Cloud for
`facecolor`.
The 'facecolorsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["facecolorsrc"]
@facecolorsrc.setter
def facecolorsrc(self, val):
self["facecolorsrc"] = val
@property
def flatshading(self):
"""
Determines whether or not normal smoothing is applied to the
meshes, creating meshes with an angular, low-poly look via flat
reflections.
The 'flatshading' property must be specified as a bool
(either True, or False)
Returns
-------
bool
"""
return self["flatshading"]
@flatshading.setter
def flatshading(self, val):
self["flatshading"] = val
@property
def hoverinfo(self):
"""
Determines which trace information appear on hover. If `none`
or `skip` are set, no information is displayed upon hovering.
But, if `none` is set, click and hover events are still fired.
The 'hoverinfo' property is a flaglist and may be specified
as a string containing:
- Any combination of ['x', 'y', 'z', 'text', 'name'] joined with '+' characters
(e.g. 'x+y')
OR exactly one of ['all', 'none', 'skip'] (e.g. 'skip')
- A list or array of the above
Returns
-------
Any|numpy.ndarray
"""
return self["hoverinfo"]
@hoverinfo.setter
def hoverinfo(self, val):
self["hoverinfo"] = val
@property
def hoverinfosrc(self):
"""
Sets the source reference on Chart Studio Cloud for
`hoverinfo`.
The 'hoverinfosrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["hoverinfosrc"]
@hoverinfosrc.setter
def hoverinfosrc(self, val):
self["hoverinfosrc"] = val
@property
def hoverlabel(self):
"""
The 'hoverlabel' property is an instance of Hoverlabel
that may be specified as:
- An instance of :class:`plotly.graph_objs.mesh3d.Hoverlabel`
- A dict of string/value properties that will be passed
to the Hoverlabel constructor
Returns
-------
plotly.graph_objs.mesh3d.Hoverlabel
"""
return self["hoverlabel"]
@hoverlabel.setter
def hoverlabel(self, val):
self["hoverlabel"] = val
@property
def hovertemplate(self):
"""
Template string used for rendering the information that appear
on hover box. Note that this will override `hoverinfo`.
Variables are inserted using %{variable}, for example "y: %{y}"
as well as %{xother}, {%_xother}, {%_xother_}, {%xother_}. When
showing info for several points, "xother" will be added to
those with different x positions from the first point. An
underscore before or after "(x|y)other" will add a space on
that side, only when this field is shown. Numbers are formatted
using d3-format's syntax %{variable:d3-format}, for example
"Price: %{y:$.2f}".
https://github.com/d3/d3-format/tree/v1.4.5#d3-format for
details on the formatting syntax. Dates are formatted using
d3-time-format's syntax %{variable|d3-time-format}, for example
"Day: %{2019-01-01|%A}". https://github.com/d3/d3-time-
format/tree/v2.2.3#locale_format for details on the date
formatting syntax. Variables that can't be found will be
replaced with the specifier. For example, a template of "data:
%{x}, %{y}" will result in a value of "data: 1, %{y}" if x is 1
and y is missing. Variables with an undefined value will be
replaced with the fallback value. The variables available in
`hovertemplate` are the ones emitted as event data described at
this link https://plotly.com/javascript/plotlyjs-events/#event-
data. Additionally, all attributes that can be specified per-
point (the ones that are `arrayOk: true`) are available.
Anything contained in tag `<extra>` is displayed in the
secondary box, for example `<extra>%{fullData.name}</extra>`.
To hide the secondary box completely, use an empty tag
`<extra></extra>`.
The 'hovertemplate' property is a string and must be specified as:
- A string
- A number that will be converted to a string
- A tuple, list, or one-dimensional numpy array of the above
Returns
-------
str|numpy.ndarray
"""
return self["hovertemplate"]
@hovertemplate.setter
def hovertemplate(self, val):
self["hovertemplate"] = val
@property
def hovertemplatefallback(self):
"""
Fallback string that's displayed when a variable referenced in
a template is missing. If the boolean value 'false' is passed
in, the specifier with the missing variable will be displayed.
The 'hovertemplatefallback' property accepts values of any type
Returns
-------
Any
"""
return self["hovertemplatefallback"]
@hovertemplatefallback.setter
def hovertemplatefallback(self, val):
self["hovertemplatefallback"] = val
@property
def hovertemplatesrc(self):
"""
Sets the source reference on Chart Studio Cloud for
`hovertemplate`.
The 'hovertemplatesrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["hovertemplatesrc"]
@hovertemplatesrc.setter
def hovertemplatesrc(self, val):
self["hovertemplatesrc"] = val
@property
def hovertext(self):
"""
Same as `text`.
The 'hovertext' property is a string and must be specified as:
- A string
- A number that will be converted to a string
- A tuple, list, or one-dimensional numpy array of the above
Returns
-------
str|numpy.ndarray
"""
return self["hovertext"]
@hovertext.setter
def hovertext(self, val):
self["hovertext"] = val
@property
def hovertextsrc(self):
"""
Sets the source reference on Chart Studio Cloud for
`hovertext`.
The 'hovertextsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["hovertextsrc"]
@hovertextsrc.setter
def hovertextsrc(self, val):
self["hovertextsrc"] = val
@property
def i(self):
"""
A vector of vertex indices, i.e. integer values between 0 and
the length of the vertex vectors, representing the "first"
vertex of a triangle. For example, `{i[m], j[m], k[m]}`
together represent face m (triangle m) in the mesh, where `i[m]
= n` points to the triplet `{x[n], y[n], z[n]}` in the vertex
arrays. Therefore, each element in `i` represents a point in
space, which is the first vertex of a triangle.
The 'i' property is an array that may be specified as a tuple,
list, numpy array, or pandas Series
Returns
-------
numpy.ndarray
"""
return self["i"]
@i.setter
def i(self, val):
self["i"] = val
@property
def ids(self):
"""
Assigns id labels to each datum. These ids for object constancy
of data points during animation. Should be an array of strings,
not numbers or any other type.
The 'ids' property is an array that may be specified as a tuple,
list, numpy array, or pandas Series
Returns
-------
numpy.ndarray
"""
return self["ids"]
@ids.setter
def ids(self, val):
self["ids"] = val
@property
def idssrc(self):
"""
Sets the source reference on Chart Studio Cloud for `ids`.
The 'idssrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["idssrc"]
@idssrc.setter
def idssrc(self, val):
self["idssrc"] = val
@property
def intensity(self):
"""
Sets the intensity values for vertices or cells as defined by
`intensitymode`. It can be used for plotting fields on meshes.
The 'intensity' property is an array that may be specified as a tuple,
list, numpy array, or pandas Series
Returns
-------
numpy.ndarray
"""
return self["intensity"]
@intensity.setter
def intensity(self, val):
self["intensity"] = val
@property
def intensitymode(self):
"""
Determines the source of `intensity` values.
The 'intensitymode' property is an enumeration that may be specified as:
- One of the following enumeration values:
['vertex', 'cell']
Returns
-------
Any
"""
return self["intensitymode"]
@intensitymode.setter
def intensitymode(self, val):
self["intensitymode"] = val
@property
def intensitysrc(self):
"""
Sets the source reference on Chart Studio Cloud for
`intensity`.
The 'intensitysrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["intensitysrc"]
@intensitysrc.setter
def intensitysrc(self, val):
self["intensitysrc"] = val
@property
def isrc(self):
"""
Sets the source reference on Chart Studio Cloud for `i`.
The 'isrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["isrc"]
@isrc.setter
def isrc(self, val):
self["isrc"] = val
@property
def j(self):
"""
A vector of vertex indices, i.e. integer values between 0 and
the length of the vertex vectors, representing the "second"
vertex of a triangle. For example, `{i[m], j[m], k[m]}`
together represent face m (triangle m) in the mesh, where `j[m]
= n` points to the triplet `{x[n], y[n], z[n]}` in the vertex
arrays. Therefore, each element in `j` represents a point in
space, which is the second vertex of a triangle.
The 'j' property is an array that may be specified as a tuple,
list, numpy array, or pandas Series
Returns
-------
numpy.ndarray
"""
return self["j"]
@j.setter
def j(self, val):
self["j"] = val
@property
def jsrc(self):
"""
Sets the source reference on Chart Studio Cloud for `j`.
The 'jsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["jsrc"]
@jsrc.setter
def jsrc(self, val):
self["jsrc"] = val
@property
def k(self):
"""
A vector of vertex indices, i.e. integer values between 0 and
the length of the vertex vectors, representing the "third"
vertex of a triangle. For example, `{i[m], j[m], k[m]}`
together represent face m (triangle m) in the mesh, where `k[m]
= n` points to the triplet `{x[n], y[n], z[n]}` in the vertex
arrays. Therefore, each element in `k` represents a point in
space, which is the third vertex of a triangle.
The 'k' property is an array that may be specified as a tuple,
list, numpy array, or pandas Series
Returns
-------
numpy.ndarray
"""
return self["k"]
@k.setter
def k(self, val):
self["k"] = val
@property
def ksrc(self):
"""
Sets the source reference on Chart Studio Cloud for `k`.
The 'ksrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["ksrc"]
@ksrc.setter
def ksrc(self, val):
self["ksrc"] = val
@property
def legend(self):
"""
Sets the reference to a legend to show this trace in.
References to these legends are "legend", "legend2", "legend3",
etc. Settings for these legends are set in the layout, under
`layout.legend`, `layout.legend2`, etc.
The 'legend' property is an identifier of a particular
subplot, of type 'legend', that may be specified as the string 'legend'
optionally followed by an integer >= 1
(e.g. 'legend', 'legend1', 'legend2', 'legend3', etc.)
Returns
-------
str
"""
return self["legend"]
@legend.setter
def legend(self, val):
self["legend"] = val
@property
def legendgroup(self):
"""
Sets the legend group for this trace. Traces and shapes part of
the same legend group hide/show at the same time when toggling
legend items.
The 'legendgroup' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["legendgroup"]
@legendgroup.setter
def legendgroup(self, val):
self["legendgroup"] = val
@property
def legendgrouptitle(self):
"""
The 'legendgrouptitle' property is an instance of Legendgrouptitle
that may be specified as:
- An instance of :class:`plotly.graph_objs.mesh3d.Legendgrouptitle`
- A dict of string/value properties that will be passed
to the Legendgrouptitle constructor
Returns
-------
plotly.graph_objs.mesh3d.Legendgrouptitle
"""
return self["legendgrouptitle"]
@legendgrouptitle.setter
def legendgrouptitle(self, val):
self["legendgrouptitle"] = val
@property
def legendrank(self):
"""
Sets the legend rank for this trace. Items and groups with
smaller ranks are presented on top/left side while with
"reversed" `legend.traceorder` they are on bottom/right side.
The default legendrank is 1000, so that you can use ranks less
than 1000 to place certain items before all unranked items, and
ranks greater than 1000 to go after all unranked items. When
having unranked or equal rank items shapes would be displayed
after traces i.e. according to their order in data and layout.
The 'legendrank' property is a number and may be specified as:
- An int or float
Returns
-------
int|float
"""
return self["legendrank"]
@legendrank.setter
def legendrank(self, val):
self["legendrank"] = val
@property
def legendwidth(self):
"""
Sets the width (in px or fraction) of the legend for this
trace.
The 'legendwidth' property is a number and may be specified as:
- An int or float in the interval [0, inf]
Returns
-------
int|float
"""
return self["legendwidth"]
@legendwidth.setter
def legendwidth(self, val):
self["legendwidth"] = val
@property
def lighting(self):
"""
The 'lighting' property is an instance of Lighting
that may be specified as:
- An instance of :class:`plotly.graph_objs.mesh3d.Lighting`
- A dict of string/value properties that will be passed
to the Lighting constructor
Returns
-------
plotly.graph_objs.mesh3d.Lighting
"""
return self["lighting"]
@lighting.setter
def lighting(self, val):
self["lighting"] = val
@property
def lightposition(self):
"""
The 'lightposition' property is an instance of Lightposition
that may be specified as:
- An instance of :class:`plotly.graph_objs.mesh3d.Lightposition`
- A dict of string/value properties that will be passed
to the Lightposition constructor
Returns
-------
plotly.graph_objs.mesh3d.Lightposition
"""
return self["lightposition"]
@lightposition.setter
def lightposition(self, val):
self["lightposition"] = val
@property
def meta(self):
"""
Assigns extra meta information associated with this trace that
can be used in various text attributes. Attributes such as
trace `name`, graph, axis and colorbar `title.text`, annotation
`text` `rangeselector`, `updatemenues` and `sliders` `label`
text all support `meta`. To access the trace `meta` values in
an attribute in the same trace, simply use `%{meta[i]}` where
`i` is the index or key of the `meta` item in question. To
access trace `meta` in layout attributes, use
`%{data[n[.meta[i]}` where `i` is the index or key of the
`meta` and `n` is the trace index.
The 'meta' property accepts values of any type
Returns
-------
Any|numpy.ndarray
"""
return self["meta"]
@meta.setter
def meta(self, val):
self["meta"] = val
@property
def metasrc(self):
"""
Sets the source reference on Chart Studio Cloud for `meta`.
The 'metasrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["metasrc"]
@metasrc.setter
def metasrc(self, val):
self["metasrc"] = val
@property
def name(self):
"""
Sets the trace name. The trace name appears as the legend item
and on hover.
The 'name' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["name"]
@name.setter
def name(self, val):
self["name"] = val
@property
def opacity(self):
"""
Sets the opacity of the surface. Please note that in the case
of using high `opacity` values for example a value greater than
or equal to 0.5 on two surfaces (and 0.25 with four surfaces),
an overlay of multiple transparent surfaces may not perfectly
be sorted in depth by the webgl API. This behavior may be
improved in the near future and is subject to change.
The 'opacity' property is a number and may be specified as:
- An int or float in the interval [0, 1]
Returns
-------
int|float
"""
return self["opacity"]
@opacity.setter
def opacity(self, val):
self["opacity"] = val
@property
def reversescale(self):
"""
Reverses the color mapping if true. If true, `cmin` will
correspond to the last color in the array and `cmax` will
correspond to the first color.
The 'reversescale' property must be specified as a bool
(either True, or False)
Returns
-------
bool
"""
return self["reversescale"]
@reversescale.setter
def reversescale(self, val):
self["reversescale"] = val
@property
def scene(self):
"""
Sets a reference between this trace's 3D coordinate system and
a 3D scene. If "scene" (the default value), the (x,y,z)
coordinates refer to `layout.scene`. If "scene2", the (x,y,z)
coordinates refer to `layout.scene2`, and so on.
The 'scene' property is an identifier of a particular
subplot, of type 'scene', that may be specified as the string 'scene'
optionally followed by an integer >= 1
(e.g. 'scene', 'scene1', 'scene2', 'scene3', etc.)
Returns
-------
str
"""
return self["scene"]
@scene.setter
def scene(self, val):
self["scene"] = val
@property
def showlegend(self):
"""
Determines whether or not an item corresponding to this trace
is shown in the legend.
The 'showlegend' property must be specified as a bool
(either True, or False)
Returns
-------
bool
"""
return self["showlegend"]
@showlegend.setter
def showlegend(self, val):
self["showlegend"] = val
@property
def showscale(self):
"""
Determines whether or not a colorbar is displayed for this
trace.
The 'showscale' property must be specified as a bool
(either True, or False)
Returns
-------
bool
"""
return self["showscale"]
@showscale.setter
def showscale(self, val):
self["showscale"] = val
@property
def stream(self):
"""
The 'stream' property is an instance of Stream
that may be specified as:
- An instance of :class:`plotly.graph_objs.mesh3d.Stream`
- A dict of string/value properties that will be passed
to the Stream constructor
Returns
-------
plotly.graph_objs.mesh3d.Stream
"""
return self["stream"]
@stream.setter
def stream(self, val):
self["stream"] = val
@property
def text(self):
"""
Sets the text elements associated with the vertices. If trace
`hoverinfo` contains a "text" flag and "hovertext" is not set,
these elements will be seen in the hover labels.
The 'text' property is a string and must be specified as:
- A string
- A number that will be converted to a string
- A tuple, list, or one-dimensional numpy array of the above
Returns
-------
str|numpy.ndarray
"""
return self["text"]
@text.setter
def text(self, val):
self["text"] = val
@property
def textsrc(self):
"""
Sets the source reference on Chart Studio Cloud for `text`.
The 'textsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["textsrc"]
@textsrc.setter
def textsrc(self, val):
self["textsrc"] = val
@property
def uid(self):
"""
Assign an id to this trace, Use this to provide object
constancy between traces during animations and transitions.
The 'uid' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["uid"]
@uid.setter
def uid(self, val):
self["uid"] = val
@property
def uirevision(self):
"""
Controls persistence of some user-driven changes to the trace:
`constraintrange` in `parcoords` traces, as well as some
`editable: true` modifications such as `name` and
`colorbar.title`. Defaults to `layout.uirevision`. Note that
other user-driven trace attribute changes are controlled by
`layout` attributes: `trace.visible` is controlled by
`layout.legend.uirevision`, `selectedpoints` is controlled by
`layout.selectionrevision`, and `colorbar.(x|y)` (accessible
with `config: {editable: true}`) is controlled by
`layout.editrevision`. Trace changes are tracked by `uid`,
which only falls back on trace index if no `uid` is provided.
So if your app can add/remove traces before the end of the
`data` array, such that the same trace has a different index,
you can still preserve user-driven changes if you give each
trace a `uid` that stays with it as it moves.
The 'uirevision' property accepts values of any type
Returns
-------
Any
"""
return self["uirevision"]
@uirevision.setter
def uirevision(self, val):
self["uirevision"] = val
@property
def vertexcolor(self):
"""
Sets the color of each vertex Overrides "color". While Red,
green and blue colors are in the range of 0 and 255; in the
case of having vertex color data in RGBA format, the alpha
color should be normalized to be between 0 and 1.
The 'vertexcolor' property is an array that may be specified as a tuple,
list, numpy array, or pandas Series
Returns
-------
numpy.ndarray
"""
return self["vertexcolor"]
@vertexcolor.setter
def vertexcolor(self, val):
self["vertexcolor"] = val
@property
def vertexcolorsrc(self):
"""
Sets the source reference on Chart Studio Cloud for
`vertexcolor`.
The 'vertexcolorsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["vertexcolorsrc"]
@vertexcolorsrc.setter
def vertexcolorsrc(self, val):
self["vertexcolorsrc"] = val
@property
def visible(self):
"""
Determines whether or not this trace is visible. If
"legendonly", the trace is not drawn, but can appear as a
legend item (provided that the legend itself is visible).
The 'visible' property is an enumeration that may be specified as:
- One of the following enumeration values:
[True, False, 'legendonly']
Returns
-------
Any
"""
return self["visible"]
@visible.setter
def visible(self, val):
self["visible"] = val
@property
def x(self):
"""
Sets the X coordinates of the vertices. The nth element of
vectors `x`, `y` and `z` jointly represent the X, Y and Z
coordinates of the nth vertex.
The 'x' property is an array that may be specified as a tuple,
list, numpy array, or pandas Series
Returns
-------
numpy.ndarray
"""
return self["x"]
@x.setter
def x(self, val):
self["x"] = val
@property
def xcalendar(self):
"""
Sets the calendar system to use with `x` date data.
The 'xcalendar' property is an enumeration that may be specified as:
- One of the following enumeration values:
['chinese', 'coptic', 'discworld', 'ethiopian',
'gregorian', 'hebrew', 'islamic', 'jalali', 'julian',
'mayan', 'nanakshahi', 'nepali', 'persian', 'taiwan',
'thai', 'ummalqura']
Returns
-------
Any
"""
return self["xcalendar"]
@xcalendar.setter
def xcalendar(self, val):
self["xcalendar"] = val
@property
def xhoverformat(self):
"""
Sets the hover text formatting rulefor `x` using d3 formatting
mini-languages which are very similar to those in Python. For
numbers, see:
https://github.com/d3/d3-format/tree/v1.4.5#d3-format. And for
dates see: https://github.com/d3/d3-time-
format/tree/v2.2.3#locale_format. We add two items to d3's date
formatter: "%h" for half of the year as a decimal number as
well as "%{n}f" for fractional seconds with n digits. For
example, *2016-10-13 09:15:23.456* with tickformat
"%H~%M~%S.%2f" would display *09~15~23.46*By default the values
are formatted using `xaxis.hoverformat`.
The 'xhoverformat' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["xhoverformat"]
@xhoverformat.setter
def xhoverformat(self, val):
self["xhoverformat"] = val
@property
def xsrc(self):
"""
Sets the source reference on Chart Studio Cloud for `x`.
The 'xsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["xsrc"]
@xsrc.setter
def xsrc(self, val):
self["xsrc"] = val
@property
def y(self):
"""
Sets the Y coordinates of the vertices. The nth element of
vectors `x`, `y` and `z` jointly represent the X, Y and Z
coordinates of the nth vertex.
The 'y' property is an array that may be specified as a tuple,
list, numpy array, or pandas Series
Returns
-------
numpy.ndarray
"""
return self["y"]
@y.setter
def y(self, val):
self["y"] = val
@property
def ycalendar(self):
"""
Sets the calendar system to use with `y` date data.
The 'ycalendar' property is an enumeration that may be specified as:
- One of the following enumeration values:
['chinese', 'coptic', 'discworld', 'ethiopian',
'gregorian', 'hebrew', 'islamic', 'jalali', 'julian',
'mayan', 'nanakshahi', 'nepali', 'persian', 'taiwan',
'thai', 'ummalqura']
Returns
-------
Any
"""
return self["ycalendar"]
@ycalendar.setter
def ycalendar(self, val):
self["ycalendar"] = val
@property
def yhoverformat(self):
"""
Sets the hover text formatting rulefor `y` using d3 formatting
mini-languages which are very similar to those in Python. For
numbers, see:
https://github.com/d3/d3-format/tree/v1.4.5#d3-format. And for
dates see: https://github.com/d3/d3-time-
format/tree/v2.2.3#locale_format. We add two items to d3's date
formatter: "%h" for half of the year as a decimal number as
well as "%{n}f" for fractional seconds with n digits. For
example, *2016-10-13 09:15:23.456* with tickformat
"%H~%M~%S.%2f" would display *09~15~23.46*By default the values
are formatted using `yaxis.hoverformat`.
The 'yhoverformat' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["yhoverformat"]
@yhoverformat.setter
def yhoverformat(self, val):
self["yhoverformat"] = val
@property
def ysrc(self):
"""
Sets the source reference on Chart Studio Cloud for `y`.
The 'ysrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["ysrc"]
@ysrc.setter
def ysrc(self, val):
self["ysrc"] = val
@property
def z(self):
"""
Sets the Z coordinates of the vertices. The nth element of
vectors `x`, `y` and `z` jointly represent the X, Y and Z
coordinates of the nth vertex.
The 'z' property is an array that may be specified as a tuple,
list, numpy array, or pandas Series
Returns
-------
numpy.ndarray
"""
return self["z"]
@z.setter
def z(self, val):
self["z"] = val
@property
def zcalendar(self):
"""
Sets the calendar system to use with `z` date data.
The 'zcalendar' property is an enumeration that may be specified as:
- One of the following enumeration values:
['chinese', 'coptic', 'discworld', 'ethiopian',
'gregorian', 'hebrew', 'islamic', 'jalali', 'julian',
'mayan', 'nanakshahi', 'nepali', 'persian', 'taiwan',
'thai', 'ummalqura']
Returns
-------
Any
"""
return self["zcalendar"]
@zcalendar.setter
def zcalendar(self, val):
self["zcalendar"] = val
@property
def zhoverformat(self):
"""
Sets the hover text formatting rulefor `z` using d3 formatting
mini-languages which are very similar to those in Python. For
numbers, see:
https://github.com/d3/d3-format/tree/v1.4.5#d3-format. And for
dates see: https://github.com/d3/d3-time-
format/tree/v2.2.3#locale_format. We add two items to d3's date
formatter: "%h" for half of the year as a decimal number as
well as "%{n}f" for fractional seconds with n digits. For
example, *2016-10-13 09:15:23.456* with tickformat
"%H~%M~%S.%2f" would display *09~15~23.46*By default the values
are formatted using `zaxis.hoverformat`.
The 'zhoverformat' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["zhoverformat"]
@zhoverformat.setter
def zhoverformat(self, val):
self["zhoverformat"] = val
@property
def zsrc(self):
"""
Sets the source reference on Chart Studio Cloud for `z`.
The 'zsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["zsrc"]
@zsrc.setter
def zsrc(self, val):
self["zsrc"] = val
@property
def type(self):
return self._props["type"]
@property
def _prop_descriptions(self):
return """\
alphahull
Determines how the mesh surface triangles are derived
from the set of vertices (points) represented by the
`x`, `y` and `z` arrays, if the `i`, `j`, `k` arrays
are not supplied. For general use of `mesh3d` it is
preferred that `i`, `j`, `k` are supplied. If "-1",
Delaunay triangulation is used, which is mainly
suitable if the mesh is a single, more or less layer
surface that is perpendicular to `delaunayaxis`. In
case the `delaunayaxis` intersects the mesh surface at
more than one point it will result triangles that are
very long in the dimension of `delaunayaxis`. If ">0",
the alpha-shape algorithm is used. In this case, the
positive `alphahull` value signals the use of the
alpha-shape algorithm, _and_ its value acts as the
parameter for the mesh fitting. If 0, the convex-hull
algorithm is used. It is suitable for convex bodies or
if the intention is to enclose the `x`, `y` and `z`
point set into a convex hull.
autocolorscale
Determines whether the colorscale is a default palette
(`autocolorscale: true`) or the palette determined by
`colorscale`. In case `colorscale` is unspecified or
`autocolorscale` is true, the default palette will be
chosen according to whether numbers in the `color`
array are all positive, all negative or mixed.
cauto
Determines whether or not the color domain is computed
with respect to the input data (here `intensity`) or
the bounds set in `cmin` and `cmax` Defaults to `false`
when `cmin` and `cmax` are set by the user.
cmax
Sets the upper bound of the color domain. Value should
have the same units as `intensity` and if set, `cmin`
must be set as well.
cmid
Sets the mid-point of the color domain by scaling
`cmin` and/or `cmax` to be equidistant to this point.
Value should have the same units as `intensity`. Has no
effect when `cauto` is `false`.
cmin
Sets the lower bound of the color domain. Value should
have the same units as `intensity` and if set, `cmax`
must be set as well.
color
Sets the color of the whole mesh
coloraxis
Sets a reference to a shared color axis. References to
these shared color axes are "coloraxis", "coloraxis2",
"coloraxis3", etc. Settings for these shared color axes
are set in the layout, under `layout.coloraxis`,
`layout.coloraxis2`, etc. Note that multiple color
scales can be linked to the same color axis.
colorbar
:class:`plotly.graph_objects.mesh3d.ColorBar` instance
or dict with compatible properties
colorscale
Sets the colorscale. The colorscale must be an array
containing arrays mapping a normalized value to an rgb,
rgba, hex, hsl, hsv, or named color string. At minimum,
a mapping for the lowest (0) and highest (1) values are
required. For example, `[[0, 'rgb(0,0,255)'], [1,
'rgb(255,0,0)']]`. To control the bounds of the
colorscale in color space, use `cmin` and `cmax`.
Alternatively, `colorscale` may be a palette name
string of the following list: Blackbody,Bluered,Blues,C
ividis,Earth,Electric,Greens,Greys,Hot,Jet,Picnic,Portl
and,Rainbow,RdBu,Reds,Viridis,YlGnBu,YlOrRd.
contour
:class:`plotly.graph_objects.mesh3d.Contour` instance
or dict with compatible properties
customdata
Assigns extra data each datum. This may be useful when
listening to hover, click and selection events. Note
that, "scatter" traces also appends customdata items in
the markers DOM elements
customdatasrc
Sets the source reference on Chart Studio Cloud for
`customdata`.
delaunayaxis
Sets the Delaunay axis, which is the axis that is
perpendicular to the surface of the Delaunay
triangulation. It has an effect if `i`, `j`, `k` are
not provided and `alphahull` is set to indicate
Delaunay triangulation.
facecolor
Sets the color of each face Overrides "color" and
"vertexcolor".
facecolorsrc
Sets the source reference on Chart Studio Cloud for
`facecolor`.
flatshading
Determines whether or not normal smoothing is applied
to the meshes, creating meshes with an angular, low-
poly look via flat reflections.
hoverinfo
Determines which trace information appear on hover. If
`none` or `skip` are set, no information is displayed
upon hovering. But, if `none` is set, click and hover
events are still fired.
hoverinfosrc
Sets the source reference on Chart Studio Cloud for
`hoverinfo`.
hoverlabel
:class:`plotly.graph_objects.mesh3d.Hoverlabel`
instance or dict with compatible properties
hovertemplate
Template string used for rendering the information that
appear on hover box. Note that this will override
`hoverinfo`. Variables are inserted using %{variable},
for example "y: %{y}" as well as %{xother}, {%_xother},
{%_xother_}, {%xother_}. When showing info for several
points, "xother" will be added to those with different
x positions from the first point. An underscore before
or after "(x|y)other" will add a space on that side,
only when this field is shown. Numbers are formatted
using d3-format's syntax %{variable:d3-format}, for
example "Price: %{y:$.2f}".
https://github.com/d3/d3-format/tree/v1.4.5#d3-format
for details on the formatting syntax. Dates are
formatted using d3-time-format's syntax
%{variable|d3-time-format}, for example "Day:
%{2019-01-01|%A}". https://github.com/d3/d3-time-
format/tree/v2.2.3#locale_format for details on the
date formatting syntax. Variables that can't be found
will be replaced with the specifier. For example, a
template of "data: %{x}, %{y}" will result in a value
of "data: 1, %{y}" if x is 1 and y is missing.
Variables with an undefined value will be replaced with
the fallback value. The variables available in
`hovertemplate` are the ones emitted as event data
described at this link
https://plotly.com/javascript/plotlyjs-events/#event-
data. Additionally, all attributes that can be
specified per-point (the ones that are `arrayOk: true`)
are available. Anything contained in tag `<extra>` is
displayed in the secondary box, for example
`<extra>%{fullData.name}</extra>`. To hide the
secondary box completely, use an empty tag
`<extra></extra>`.
hovertemplatefallback
Fallback string that's displayed when a variable
referenced in a template is missing. If the boolean
value 'false' is passed in, the specifier with the
missing variable will be displayed.
hovertemplatesrc
Sets the source reference on Chart Studio Cloud for
`hovertemplate`.
hovertext
Same as `text`.
hovertextsrc
Sets the source reference on Chart Studio Cloud for
`hovertext`.
i
A vector of vertex indices, i.e. integer values between
0 and the length of the vertex vectors, representing
the "first" vertex of a triangle. For example, `{i[m],
j[m], k[m]}` together represent face m (triangle m) in
the mesh, where `i[m] = n` points to the triplet
`{x[n], y[n], z[n]}` in the vertex arrays. Therefore,
each element in `i` represents a point in space, which
is the first vertex of a triangle.
ids
Assigns id labels to each datum. These ids for object
constancy of data points during animation. Should be an
array of strings, not numbers or any other type.
idssrc
Sets the source reference on Chart Studio Cloud for
`ids`.
intensity
Sets the intensity values for vertices or cells as
defined by `intensitymode`. It can be used for plotting
fields on meshes.
intensitymode
Determines the source of `intensity` values.
intensitysrc
Sets the source reference on Chart Studio Cloud for
`intensity`.
isrc
Sets the source reference on Chart Studio Cloud for
`i`.
j
A vector of vertex indices, i.e. integer values between
0 and the length of the vertex vectors, representing
the "second" vertex of a triangle. For example, `{i[m],
j[m], k[m]}` together represent face m (triangle m) in
the mesh, where `j[m] = n` points to the triplet
`{x[n], y[n], z[n]}` in the vertex arrays. Therefore,
each element in `j` represents a point in space, which
is the second vertex of a triangle.
jsrc
Sets the source reference on Chart Studio Cloud for
`j`.
k
A vector of vertex indices, i.e. integer values between
0 and the length of the vertex vectors, representing
the "third" vertex of a triangle. For example, `{i[m],
j[m], k[m]}` together represent face m (triangle m) in
the mesh, where `k[m] = n` points to the triplet
`{x[n], y[n], z[n]}` in the vertex arrays. Therefore,
each element in `k` represents a point in space, which
is the third vertex of a triangle.
ksrc
Sets the source reference on Chart Studio Cloud for
`k`.
legend
Sets the reference to a legend to show this trace in.
References to these legends are "legend", "legend2",
"legend3", etc. Settings for these legends are set in
the layout, under `layout.legend`, `layout.legend2`,
etc.
legendgroup
Sets the legend group for this trace. Traces and shapes
part of the same legend group hide/show at the same
time when toggling legend items.
legendgrouptitle
:class:`plotly.graph_objects.mesh3d.Legendgrouptitle`
instance or dict with compatible properties
legendrank
Sets the legend rank for this trace. Items and groups
with smaller ranks are presented on top/left side while
with "reversed" `legend.traceorder` they are on
bottom/right side. The default legendrank is 1000, so
that you can use ranks less than 1000 to place certain
items before all unranked items, and ranks greater than
1000 to go after all unranked items. When having
unranked or equal rank items shapes would be displayed
after traces i.e. according to their order in data and
layout.
legendwidth
Sets the width (in px or fraction) of the legend for
this trace.
lighting
:class:`plotly.graph_objects.mesh3d.Lighting` instance
or dict with compatible properties
lightposition
:class:`plotly.graph_objects.mesh3d.Lightposition`
instance or dict with compatible properties
meta
Assigns extra meta information associated with this
trace that can be used in various text attributes.
Attributes such as trace `name`, graph, axis and
colorbar `title.text`, annotation `text`
`rangeselector`, `updatemenues` and `sliders` `label`
text all support `meta`. To access the trace `meta`
values in an attribute in the same trace, simply use
`%{meta[i]}` where `i` is the index or key of the
`meta` item in question. To access trace `meta` in
layout attributes, use `%{data[n[.meta[i]}` where `i`
is the index or key of the `meta` and `n` is the trace
index.
metasrc
Sets the source reference on Chart Studio Cloud for
`meta`.
name
Sets the trace name. The trace name appears as the
legend item and on hover.
opacity
Sets the opacity of the surface. Please note that in
the case of using high `opacity` values for example a
value greater than or equal to 0.5 on two surfaces (and
0.25 with four surfaces), an overlay of multiple
transparent surfaces may not perfectly be sorted in
depth by the webgl API. This behavior may be improved
in the near future and is subject to change.
reversescale
Reverses the color mapping if true. If true, `cmin`
will correspond to the last color in the array and
`cmax` will correspond to the first color.
scene
Sets a reference between this trace's 3D coordinate
system and a 3D scene. If "scene" (the default value),
the (x,y,z) coordinates refer to `layout.scene`. If
"scene2", the (x,y,z) coordinates refer to
`layout.scene2`, and so on.
showlegend
Determines whether or not an item corresponding to this
trace is shown in the legend.
showscale
Determines whether or not a colorbar is displayed for
this trace.
stream
:class:`plotly.graph_objects.mesh3d.Stream` instance or
dict with compatible properties
text
Sets the text elements associated with the vertices. If
trace `hoverinfo` contains a "text" flag and
"hovertext" is not set, these elements will be seen in
the hover labels.
textsrc
Sets the source reference on Chart Studio Cloud for
`text`.
uid
Assign an id to this trace, Use this to provide object
constancy between traces during animations and
transitions.
uirevision
Controls persistence of some user-driven changes to the
trace: `constraintrange` in `parcoords` traces, as well
as some `editable: true` modifications such as `name`
and `colorbar.title`. Defaults to `layout.uirevision`.
Note that other user-driven trace attribute changes are
controlled by `layout` attributes: `trace.visible` is
controlled by `layout.legend.uirevision`,
`selectedpoints` is controlled by
`layout.selectionrevision`, and `colorbar.(x|y)`
(accessible with `config: {editable: true}`) is
controlled by `layout.editrevision`. Trace changes are
tracked by `uid`, which only falls back on trace index
if no `uid` is provided. So if your app can add/remove
traces before the end of the `data` array, such that
the same trace has a different index, you can still
preserve user-driven changes if you give each trace a
`uid` that stays with it as it moves.
vertexcolor
Sets the color of each vertex Overrides "color". While
Red, green and blue colors are in the range of 0 and
255; in the case of having vertex color data in RGBA
format, the alpha color should be normalized to be
between 0 and 1.
vertexcolorsrc
Sets the source reference on Chart Studio Cloud for
`vertexcolor`.
visible
Determines whether or not this trace is visible. If
"legendonly", the trace is not drawn, but can appear as
a legend item (provided that the legend itself is
visible).
x
Sets the X coordinates of the vertices. The nth element
of vectors `x`, `y` and `z` jointly represent the X, Y
and Z coordinates of the nth vertex.
xcalendar
Sets the calendar system to use with `x` date data.
xhoverformat
Sets the hover text formatting rulefor `x` using d3
formatting mini-languages which are very similar to
those in Python. For numbers, see:
https://github.com/d3/d3-format/tree/v1.4.5#d3-format.
And for dates see: https://github.com/d3/d3-time-
format/tree/v2.2.3#locale_format. We add two items to
d3's date formatter: "%h" for half of the year as a
decimal number as well as "%{n}f" for fractional
seconds with n digits. For example, *2016-10-13
09:15:23.456* with tickformat "%H~%M~%S.%2f" would
display *09~15~23.46*By default the values are
formatted using `xaxis.hoverformat`.
xsrc
Sets the source reference on Chart Studio Cloud for
`x`.
y
Sets the Y coordinates of the vertices. The nth element
of vectors `x`, `y` and `z` jointly represent the X, Y
and Z coordinates of the nth vertex.
ycalendar
Sets the calendar system to use with `y` date data.
yhoverformat
Sets the hover text formatting rulefor `y` using d3
formatting mini-languages which are very similar to
those in Python. For numbers, see:
https://github.com/d3/d3-format/tree/v1.4.5#d3-format.
And for dates see: https://github.com/d3/d3-time-
format/tree/v2.2.3#locale_format. We add two items to
d3's date formatter: "%h" for half of the year as a
decimal number as well as "%{n}f" for fractional
seconds with n digits. For example, *2016-10-13
09:15:23.456* with tickformat "%H~%M~%S.%2f" would
display *09~15~23.46*By default the values are
formatted using `yaxis.hoverformat`.
ysrc
Sets the source reference on Chart Studio Cloud for
`y`.
z
Sets the Z coordinates of the vertices. The nth element
of vectors `x`, `y` and `z` jointly represent the X, Y
and Z coordinates of the nth vertex.
zcalendar
Sets the calendar system to use with `z` date data.
zhoverformat
Sets the hover text formatting rulefor `z` using d3
formatting mini-languages which are very similar to
those in Python. For numbers, see:
https://github.com/d3/d3-format/tree/v1.4.5#d3-format.
And for dates see: https://github.com/d3/d3-time-
format/tree/v2.2.3#locale_format. We add two items to
d3's date formatter: "%h" for half of the year as a
decimal number as well as "%{n}f" for fractional
seconds with n digits. For example, *2016-10-13
09:15:23.456* with tickformat "%H~%M~%S.%2f" would
display *09~15~23.46*By default the values are
formatted using `zaxis.hoverformat`.
zsrc
Sets the source reference on Chart Studio Cloud for
`z`.
"""
def __init__(
self,
arg=None,
alphahull=None,
autocolorscale=None,
cauto=None,
cmax=None,
cmid=None,
cmin=None,
color=None,
coloraxis=None,
colorbar=None,
colorscale=None,
contour=None,
customdata=None,
customdatasrc=None,
delaunayaxis=None,
facecolor=None,
facecolorsrc=None,
flatshading=None,
hoverinfo=None,
hoverinfosrc=None,
hoverlabel=None,
hovertemplate=None,
hovertemplatefallback=None,
hovertemplatesrc=None,
hovertext=None,
hovertextsrc=None,
i=None,
ids=None,
idssrc=None,
intensity=None,
intensitymode=None,
intensitysrc=None,
isrc=None,
j=None,
jsrc=None,
k=None,
ksrc=None,
legend=None,
legendgroup=None,
legendgrouptitle=None,
legendrank=None,
legendwidth=None,
lighting=None,
lightposition=None,
meta=None,
metasrc=None,
name=None,
opacity=None,
reversescale=None,
scene=None,
showlegend=None,
showscale=None,
stream=None,
text=None,
textsrc=None,
uid=None,
uirevision=None,
vertexcolor=None,
vertexcolorsrc=None,
visible=None,
x=None,
xcalendar=None,
xhoverformat=None,
xsrc=None,
y=None,
ycalendar=None,
yhoverformat=None,
ysrc=None,
z=None,
zcalendar=None,
zhoverformat=None,
zsrc=None,
**kwargs,
):
"""
Construct a new Mesh3d object
Draws sets of triangles with coordinates given by three
1-dimensional arrays in `x`, `y`, `z` and (1) a sets of `i`,
`j`, `k` indices (2) Delaunay triangulation or (3) the Alpha-
shape algorithm or (4) the Convex-hull algorithm
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of :class:`plotly.graph_objs.Mesh3d`
alphahull
Determines how the mesh surface triangles are derived
from the set of vertices (points) represented by the
`x`, `y` and `z` arrays, if the `i`, `j`, `k` arrays
are not supplied. For general use of `mesh3d` it is
preferred that `i`, `j`, `k` are supplied. If "-1",
Delaunay triangulation is used, which is mainly
suitable if the mesh is a single, more or less layer
surface that is perpendicular to `delaunayaxis`. In
case the `delaunayaxis` intersects the mesh surface at
more than one point it will result triangles that are
very long in the dimension of `delaunayaxis`. If ">0",
the alpha-shape algorithm is used. In this case, the
positive `alphahull` value signals the use of the
alpha-shape algorithm, _and_ its value acts as the
parameter for the mesh fitting. If 0, the convex-hull
algorithm is used. It is suitable for convex bodies or
if the intention is to enclose the `x`, `y` and `z`
point set into a convex hull.
autocolorscale
Determines whether the colorscale is a default palette
(`autocolorscale: true`) or the palette determined by
`colorscale`. In case `colorscale` is unspecified or
`autocolorscale` is true, the default palette will be
chosen according to whether numbers in the `color`
array are all positive, all negative or mixed.
cauto
Determines whether or not the color domain is computed
with respect to the input data (here `intensity`) or
the bounds set in `cmin` and `cmax` Defaults to `false`
when `cmin` and `cmax` are set by the user.
cmax
Sets the upper bound of the color domain. Value should
have the same units as `intensity` and if set, `cmin`
must be set as well.
cmid
Sets the mid-point of the color domain by scaling
`cmin` and/or `cmax` to be equidistant to this point.
Value should have the same units as `intensity`. Has no
effect when `cauto` is `false`.
cmin
Sets the lower bound of the color domain. Value should
have the same units as `intensity` and if set, `cmax`
must be set as well.
color
Sets the color of the whole mesh
coloraxis
Sets a reference to a shared color axis. References to
these shared color axes are "coloraxis", "coloraxis2",
"coloraxis3", etc. Settings for these shared color axes
are set in the layout, under `layout.coloraxis`,
`layout.coloraxis2`, etc. Note that multiple color
scales can be linked to the same color axis.
colorbar
:class:`plotly.graph_objects.mesh3d.ColorBar` instance
or dict with compatible properties
colorscale
Sets the colorscale. The colorscale must be an array
containing arrays mapping a normalized value to an rgb,
rgba, hex, hsl, hsv, or named color string. At minimum,
a mapping for the lowest (0) and highest (1) values are
required. For example, `[[0, 'rgb(0,0,255)'], [1,
'rgb(255,0,0)']]`. To control the bounds of the
colorscale in color space, use `cmin` and `cmax`.
Alternatively, `colorscale` may be a palette name
string of the following list: Blackbody,Bluered,Blues,C
ividis,Earth,Electric,Greens,Greys,Hot,Jet,Picnic,Portl
and,Rainbow,RdBu,Reds,Viridis,YlGnBu,YlOrRd.
contour
:class:`plotly.graph_objects.mesh3d.Contour` instance
or dict with compatible properties
customdata
Assigns extra data each datum. This may be useful when
listening to hover, click and selection events. Note
that, "scatter" traces also appends customdata items in
the markers DOM elements
customdatasrc
Sets the source reference on Chart Studio Cloud for
`customdata`.
delaunayaxis
Sets the Delaunay axis, which is the axis that is
perpendicular to the surface of the Delaunay
triangulation. It has an effect if `i`, `j`, `k` are
not provided and `alphahull` is set to indicate
Delaunay triangulation.
facecolor
Sets the color of each face Overrides "color" and
"vertexcolor".
facecolorsrc
Sets the source reference on Chart Studio Cloud for
`facecolor`.
flatshading
Determines whether or not normal smoothing is applied
to the meshes, creating meshes with an angular, low-
poly look via flat reflections.
hoverinfo
Determines which trace information appear on hover. If
`none` or `skip` are set, no information is displayed
upon hovering. But, if `none` is set, click and hover
events are still fired.
hoverinfosrc
Sets the source reference on Chart Studio Cloud for
`hoverinfo`.
hoverlabel
:class:`plotly.graph_objects.mesh3d.Hoverlabel`
instance or dict with compatible properties
hovertemplate
Template string used for rendering the information that
appear on hover box. Note that this will override
`hoverinfo`. Variables are inserted using %{variable},
for example "y: %{y}" as well as %{xother}, {%_xother},
{%_xother_}, {%xother_}. When showing info for several
points, "xother" will be added to those with different
x positions from the first point. An underscore before
or after "(x|y)other" will add a space on that side,
only when this field is shown. Numbers are formatted
using d3-format's syntax %{variable:d3-format}, for
example "Price: %{y:$.2f}".
https://github.com/d3/d3-format/tree/v1.4.5#d3-format
for details on the formatting syntax. Dates are
formatted using d3-time-format's syntax
%{variable|d3-time-format}, for example "Day:
%{2019-01-01|%A}". https://github.com/d3/d3-time-
format/tree/v2.2.3#locale_format for details on the
date formatting syntax. Variables that can't be found
will be replaced with the specifier. For example, a
template of "data: %{x}, %{y}" will result in a value
of "data: 1, %{y}" if x is 1 and y is missing.
Variables with an undefined value will be replaced with
the fallback value. The variables available in
`hovertemplate` are the ones emitted as event data
described at this link
https://plotly.com/javascript/plotlyjs-events/#event-
data. Additionally, all attributes that can be
specified per-point (the ones that are `arrayOk: true`)
are available. Anything contained in tag `<extra>` is
displayed in the secondary box, for example
`<extra>%{fullData.name}</extra>`. To hide the
secondary box completely, use an empty tag
`<extra></extra>`.
hovertemplatefallback
Fallback string that's displayed when a variable
referenced in a template is missing. If the boolean
value 'false' is passed in, the specifier with the
missing variable will be displayed.
hovertemplatesrc
Sets the source reference on Chart Studio Cloud for
`hovertemplate`.
hovertext
Same as `text`.
hovertextsrc
Sets the source reference on Chart Studio Cloud for
`hovertext`.
i
A vector of vertex indices, i.e. integer values between
0 and the length of the vertex vectors, representing
the "first" vertex of a triangle. For example, `{i[m],
j[m], k[m]}` together represent face m (triangle m) in
the mesh, where `i[m] = n` points to the triplet
`{x[n], y[n], z[n]}` in the vertex arrays. Therefore,
each element in `i` represents a point in space, which
is the first vertex of a triangle.
ids
Assigns id labels to each datum. These ids for object
constancy of data points during animation. Should be an
array of strings, not numbers or any other type.
idssrc
Sets the source reference on Chart Studio Cloud for
`ids`.
intensity
Sets the intensity values for vertices or cells as
defined by `intensitymode`. It can be used for plotting
fields on meshes.
intensitymode
Determines the source of `intensity` values.
intensitysrc
Sets the source reference on Chart Studio Cloud for
`intensity`.
isrc
Sets the source reference on Chart Studio Cloud for
`i`.
j
A vector of vertex indices, i.e. integer values between
0 and the length of the vertex vectors, representing
the "second" vertex of a triangle. For example, `{i[m],
j[m], k[m]}` together represent face m (triangle m) in
the mesh, where `j[m] = n` points to the triplet
`{x[n], y[n], z[n]}` in the vertex arrays. Therefore,
each element in `j` represents a point in space, which
is the second vertex of a triangle.
jsrc
Sets the source reference on Chart Studio Cloud for
`j`.
k
A vector of vertex indices, i.e. integer values between
0 and the length of the vertex vectors, representing
the "third" vertex of a triangle. For example, `{i[m],
j[m], k[m]}` together represent face m (triangle m) in
the mesh, where `k[m] = n` points to the triplet
`{x[n], y[n], z[n]}` in the vertex arrays. Therefore,
each element in `k` represents a point in space, which
is the third vertex of a triangle.
ksrc
Sets the source reference on Chart Studio Cloud for
`k`.
legend
Sets the reference to a legend to show this trace in.
References to these legends are "legend", "legend2",
"legend3", etc. Settings for these legends are set in
the layout, under `layout.legend`, `layout.legend2`,
etc.
legendgroup
Sets the legend group for this trace. Traces and shapes
part of the same legend group hide/show at the same
time when toggling legend items.
legendgrouptitle
:class:`plotly.graph_objects.mesh3d.Legendgrouptitle`
instance or dict with compatible properties
legendrank
Sets the legend rank for this trace. Items and groups
with smaller ranks are presented on top/left side while
with "reversed" `legend.traceorder` they are on
bottom/right side. The default legendrank is 1000, so
that you can use ranks less than 1000 to place certain
items before all unranked items, and ranks greater than
1000 to go after all unranked items. When having
unranked or equal rank items shapes would be displayed
after traces i.e. according to their order in data and
layout.
legendwidth
Sets the width (in px or fraction) of the legend for
this trace.
lighting
:class:`plotly.graph_objects.mesh3d.Lighting` instance
or dict with compatible properties
lightposition
:class:`plotly.graph_objects.mesh3d.Lightposition`
instance or dict with compatible properties
meta
Assigns extra meta information associated with this
trace that can be used in various text attributes.
Attributes such as trace `name`, graph, axis and
colorbar `title.text`, annotation `text`
`rangeselector`, `updatemenues` and `sliders` `label`
text all support `meta`. To access the trace `meta`
values in an attribute in the same trace, simply use
`%{meta[i]}` where `i` is the index or key of the
`meta` item in question. To access trace `meta` in
layout attributes, use `%{data[n[.meta[i]}` where `i`
is the index or key of the `meta` and `n` is the trace
index.
metasrc
Sets the source reference on Chart Studio Cloud for
`meta`.
name
Sets the trace name. The trace name appears as the
legend item and on hover.
opacity
Sets the opacity of the surface. Please note that in
the case of using high `opacity` values for example a
value greater than or equal to 0.5 on two surfaces (and
0.25 with four surfaces), an overlay of multiple
transparent surfaces may not perfectly be sorted in
depth by the webgl API. This behavior may be improved
in the near future and is subject to change.
reversescale
Reverses the color mapping if true. If true, `cmin`
will correspond to the last color in the array and
`cmax` will correspond to the first color.
scene
Sets a reference between this trace's 3D coordinate
system and a 3D scene. If "scene" (the default value),
the (x,y,z) coordinates refer to `layout.scene`. If
"scene2", the (x,y,z) coordinates refer to
`layout.scene2`, and so on.
showlegend
Determines whether or not an item corresponding to this
trace is shown in the legend.
showscale
Determines whether or not a colorbar is displayed for
this trace.
stream
:class:`plotly.graph_objects.mesh3d.Stream` instance or
dict with compatible properties
text
Sets the text elements associated with the vertices. If
trace `hoverinfo` contains a "text" flag and
"hovertext" is not set, these elements will be seen in
the hover labels.
textsrc
Sets the source reference on Chart Studio Cloud for
`text`.
uid
Assign an id to this trace, Use this to provide object
constancy between traces during animations and
transitions.
uirevision
Controls persistence of some user-driven changes to the
trace: `constraintrange` in `parcoords` traces, as well
as some `editable: true` modifications such as `name`
and `colorbar.title`. Defaults to `layout.uirevision`.
Note that other user-driven trace attribute changes are
controlled by `layout` attributes: `trace.visible` is
controlled by `layout.legend.uirevision`,
`selectedpoints` is controlled by
`layout.selectionrevision`, and `colorbar.(x|y)`
(accessible with `config: {editable: true}`) is
controlled by `layout.editrevision`. Trace changes are
tracked by `uid`, which only falls back on trace index
if no `uid` is provided. So if your app can add/remove
traces before the end of the `data` array, such that
the same trace has a different index, you can still
preserve user-driven changes if you give each trace a
`uid` that stays with it as it moves.
vertexcolor
Sets the color of each vertex Overrides "color". While
Red, green and blue colors are in the range of 0 and
255; in the case of having vertex color data in RGBA
format, the alpha color should be normalized to be
between 0 and 1.
vertexcolorsrc
Sets the source reference on Chart Studio Cloud for
`vertexcolor`.
visible
Determines whether or not this trace is visible. If
"legendonly", the trace is not drawn, but can appear as
a legend item (provided that the legend itself is
visible).
x
Sets the X coordinates of the vertices. The nth element
of vectors `x`, `y` and `z` jointly represent the X, Y
and Z coordinates of the nth vertex.
xcalendar
Sets the calendar system to use with `x` date data.
xhoverformat
Sets the hover text formatting rulefor `x` using d3
formatting mini-languages which are very similar to
those in Python. For numbers, see:
https://github.com/d3/d3-format/tree/v1.4.5#d3-format.
And for dates see: https://github.com/d3/d3-time-
format/tree/v2.2.3#locale_format. We add two items to
d3's date formatter: "%h" for half of the year as a
decimal number as well as "%{n}f" for fractional
seconds with n digits. For example, *2016-10-13
09:15:23.456* with tickformat "%H~%M~%S.%2f" would
display *09~15~23.46*By default the values are
formatted using `xaxis.hoverformat`.
xsrc
Sets the source reference on Chart Studio Cloud for
`x`.
y
Sets the Y coordinates of the vertices. The nth element
of vectors `x`, `y` and `z` jointly represent the X, Y
and Z coordinates of the nth vertex.
ycalendar
Sets the calendar system to use with `y` date data.
yhoverformat
Sets the hover text formatting rulefor `y` using d3
formatting mini-languages which are very similar to
those in Python. For numbers, see:
https://github.com/d3/d3-format/tree/v1.4.5#d3-format.
And for dates see: https://github.com/d3/d3-time-
format/tree/v2.2.3#locale_format. We add two items to
d3's date formatter: "%h" for half of the year as a
decimal number as well as "%{n}f" for fractional
seconds with n digits. For example, *2016-10-13
09:15:23.456* with tickformat "%H~%M~%S.%2f" would
display *09~15~23.46*By default the values are
formatted using `yaxis.hoverformat`.
ysrc
Sets the source reference on Chart Studio Cloud for
`y`.
z
Sets the Z coordinates of the vertices. The nth element
of vectors `x`, `y` and `z` jointly represent the X, Y
and Z coordinates of the nth vertex.
zcalendar
Sets the calendar system to use with `z` date data.
zhoverformat
Sets the hover text formatting rulefor `z` using d3
formatting mini-languages which are very similar to
those in Python. For numbers, see:
https://github.com/d3/d3-format/tree/v1.4.5#d3-format.
And for dates see: https://github.com/d3/d3-time-
format/tree/v2.2.3#locale_format. We add two items to
d3's date formatter: "%h" for half of the year as a
decimal number as well as "%{n}f" for fractional
seconds with n digits. For example, *2016-10-13
09:15:23.456* with tickformat "%H~%M~%S.%2f" would
display *09~15~23.46*By default the values are
formatted using `zaxis.hoverformat`.
zsrc
Sets the source reference on Chart Studio Cloud for
`z`.
Returns
-------
Mesh3d
"""
super().__init__("mesh3d")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError("""\
The first argument to the plotly.graph_objs.Mesh3d
constructor must be a dict or
an instance of :class:`plotly.graph_objs.Mesh3d`""")
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
self._set_property("alphahull", arg, alphahull)
self._set_property("autocolorscale", arg, autocolorscale)
self._set_property("cauto", arg, cauto)
self._set_property("cmax", arg, cmax)
self._set_property("cmid", arg, cmid)
self._set_property("cmin", arg, cmin)
self._set_property("color", arg, color)
self._set_property("coloraxis", arg, coloraxis)
self._set_property("colorbar", arg, colorbar)
self._set_property("colorscale", arg, colorscale)
self._set_property("contour", arg, contour)
self._set_property("customdata", arg, customdata)
self._set_property("customdatasrc", arg, customdatasrc)
self._set_property("delaunayaxis", arg, delaunayaxis)
self._set_property("facecolor", arg, facecolor)
self._set_property("facecolorsrc", arg, facecolorsrc)
self._set_property("flatshading", arg, flatshading)
self._set_property("hoverinfo", arg, hoverinfo)
self._set_property("hoverinfosrc", arg, hoverinfosrc)
self._set_property("hoverlabel", arg, hoverlabel)
self._set_property("hovertemplate", arg, hovertemplate)
self._set_property("hovertemplatefallback", arg, hovertemplatefallback)
self._set_property("hovertemplatesrc", arg, hovertemplatesrc)
self._set_property("hovertext", arg, hovertext)
self._set_property("hovertextsrc", arg, hovertextsrc)
self._set_property("i", arg, i)
self._set_property("ids", arg, ids)
self._set_property("idssrc", arg, idssrc)
self._set_property("intensity", arg, intensity)
self._set_property("intensitymode", arg, intensitymode)
self._set_property("intensitysrc", arg, intensitysrc)
self._set_property("isrc", arg, isrc)
self._set_property("j", arg, j)
self._set_property("jsrc", arg, jsrc)
self._set_property("k", arg, k)
self._set_property("ksrc", arg, ksrc)
self._set_property("legend", arg, legend)
self._set_property("legendgroup", arg, legendgroup)
self._set_property("legendgrouptitle", arg, legendgrouptitle)
self._set_property("legendrank", arg, legendrank)
self._set_property("legendwidth", arg, legendwidth)
self._set_property("lighting", arg, lighting)
self._set_property("lightposition", arg, lightposition)
self._set_property("meta", arg, meta)
self._set_property("metasrc", arg, metasrc)
self._set_property("name", arg, name)
self._set_property("opacity", arg, opacity)
self._set_property("reversescale", arg, reversescale)
self._set_property("scene", arg, scene)
self._set_property("showlegend", arg, showlegend)
self._set_property("showscale", arg, showscale)
self._set_property("stream", arg, stream)
self._set_property("text", arg, text)
self._set_property("textsrc", arg, textsrc)
self._set_property("uid", arg, uid)
self._set_property("uirevision", arg, uirevision)
self._set_property("vertexcolor", arg, vertexcolor)
self._set_property("vertexcolorsrc", arg, vertexcolorsrc)
self._set_property("visible", arg, visible)
self._set_property("x", arg, x)
self._set_property("xcalendar", arg, xcalendar)
self._set_property("xhoverformat", arg, xhoverformat)
self._set_property("xsrc", arg, xsrc)
self._set_property("y", arg, y)
self._set_property("ycalendar", arg, ycalendar)
self._set_property("yhoverformat", arg, yhoverformat)
self._set_property("ysrc", arg, ysrc)
self._set_property("z", arg, z)
self._set_property("zcalendar", arg, zcalendar)
self._set_property("zhoverformat", arg, zhoverformat)
self._set_property("zsrc", arg, zsrc)
self._props["type"] = "mesh3d"
arg.pop("type", None)
self._process_kwargs(**dict(arg, **kwargs))
self._skip_invalid = False
|
Mesh3d
|
python
|
dagster-io__dagster
|
python_modules/libraries/dagster-airbyte/dagster_airbyte/managed/generated/destinations.py
|
{
"start": 205,
"end": 1776
}
|
class ____(GeneratedAirbyteDestination):
@public
def __init__(
self,
name: str,
dynamodb_table_name_prefix: str,
dynamodb_region: str,
access_key_id: str,
secret_access_key: str,
dynamodb_endpoint: Optional[str] = None,
):
"""Airbyte Destination for Dynamodb.
Documentation can be found at https://docs.airbyte.com/integrations/destinations/dynamodb
Args:
name (str): The name of the destination.
dynamodb_endpoint (Optional[str]): This is your DynamoDB endpoint url.(if you are working with AWS DynamoDB, just leave empty).
dynamodb_table_name_prefix (str): The prefix to use when naming DynamoDB tables.
dynamodb_region (str): The region of the DynamoDB.
access_key_id (str): The access key id to access the DynamoDB. Airbyte requires Read and Write permissions to the DynamoDB.
secret_access_key (str): The corresponding secret to the access key id.
"""
self.dynamodb_endpoint = check.opt_str_param(dynamodb_endpoint, "dynamodb_endpoint")
self.dynamodb_table_name_prefix = check.str_param(
dynamodb_table_name_prefix, "dynamodb_table_name_prefix"
)
self.dynamodb_region = check.str_param(dynamodb_region, "dynamodb_region")
self.access_key_id = check.str_param(access_key_id, "access_key_id")
self.secret_access_key = check.str_param(secret_access_key, "secret_access_key")
super().__init__("Dynamodb", name)
|
DynamodbDestination
|
python
|
pyparsing__pyparsing
|
pyparsing/core.py
|
{
"start": 202228,
"end": 203576
}
|
class ____(ParseElementEnhance):
"""
Decorates a returned token with its starting and ending
locations in the input string.
This helper adds the following results names:
- ``locn_start`` - location where matched expression begins
- ``locn_end`` - location where matched expression ends
- ``value`` - the actual parsed results
Be careful if the input text contains ``<TAB>`` characters, you
may want to call :class:`ParserElement.parse_with_tabs`
Example:
.. testcode::
wd = Word(alphas)
for match in Located(wd).search_string("ljsdf123lksdjjf123lkkjj1222"):
print(match)
prints:
.. testoutput::
[0, ['ljsdf'], 5]
[8, ['lksdjjf'], 15]
[18, ['lkkjj'], 23]
"""
def parseImpl(self, instring, loc, do_actions=True) -> ParseImplReturnType:
start = loc
loc, tokens = self.expr._parse(instring, start, do_actions, callPreParse=False)
ret_tokens = ParseResults([start, tokens, loc])
ret_tokens["locn_start"] = start
ret_tokens["value"] = tokens
ret_tokens["locn_end"] = loc
if self.resultsName:
# must return as a list, so that the name will be attached to the complete group
return loc, [ret_tokens]
else:
return loc, ret_tokens
|
Located
|
python
|
tensorflow__tensorflow
|
tensorflow/python/distribute/random_generator_test.py
|
{
"start": 3179,
"end": 13650
}
|
class ____(test.TestCase, parameterized.TestCase):
def setUp(self):
super(GeneratorTest, self).setUp()
v2_compat.enable_v2_behavior()
def assertAllDifferent(self, tensors):
"""Checks that there are no duplicate elements anywhere among the tensors.
Args:
tensors: a list of tensors. They can have different shapes.
"""
values = [array_ops.reshape(t, shape=[-1]) for t in tensors]
values = array_ops.concat(values, axis=0)
values = self.evaluate(values)
values = values.tolist()
self.assertAllEqual(len(values), len(set(values)))
@test_util.run_v2_only
def testCreateOutsideMirroredStrat(self):
"""Tests RNG/MirrorStrategy interaction #1.
If an RNG is created outside a DS scope, all replicas will access the
same RNG object, and accesses are serialized.
"""
shape = [3, 4]
dtype = dtypes.int32
gen = rng.Generator.from_seed(1234)
strat = MirroredStrategy(devices=["cpu:0", "cpu:1"])
with strat.scope():
def f():
t1 = gen.uniform_full_int(shape=shape, dtype=dtype)
t2 = gen.uniform_full_int(shape=shape, dtype=dtype)
t = array_ops_stack.stack([t1, t2])
return t
results = strat.extended.call_for_each_replica(fn=f)
values = results.values
self.assertAllEqual(2, len(values))
self.assertAllDifferent(values)
@test_util.run_v2_only
def testMirroredStratParaAsync(self):
"""Tests RNG/MirrorStrategy interaction #2.
The user can create n independent RNGs outside strategy.scope(), where n
is the number of replicas, and give one to each replica. The replicas can
thus get different random-number streams.
"""
shape = [3, 4]
dtype = dtypes.int32
gens = rng.get_global_generator().split(count=2)
devices = ["cpu:0", "cpu:1"]
strat = MirroredStrategy(devices=devices)
# Use `PerReplica` to specify which `gen` is sent to which replica
gens = dist_values.PerReplica([[g] for g in gens])
with strat.scope():
def f(gen):
t1 = gen.uniform_full_int(shape=shape, dtype=dtype)
t2 = gen.uniform_full_int(shape=shape, dtype=dtype)
t = array_ops_stack.stack([t1, t2])
return t
results = strat.extended.call_for_each_replica(fn=f, args=gens)
local_results = strat.experimental_local_results(results)
self.assertAllEqual(2, len(local_results))
self.assertAllDifferent(local_results)
@ds_combinations.generate(
combinations.combine(
strat=all_strategies,
mode=["eager"]))
def testCrossReplica(self, strat):
"""Tests that RNG can be properly advanced in cross-replica context."""
def read_values(dv):
return [v.read_value() for v in strat.experimental_local_results(dv)]
with strat.scope():
g = rng.Generator.from_seed(1)
s1 = read_values(g.state)
g.normal([3])
g.skip(4)
s2 = read_values(g.state)
self.assertNotAllEqual(s1[0], s2[0])
self.assertEqual(len(s1), len(s2))
for i in range(1, len(s1)):
self.assertAllEqual(s1[0], s1[i])
self.assertAllEqual(s2[0], s2[i])
@ds_combinations.generate(
combinations.combine(
strat=all_strategies,
mode=["eager"],
jit_replica_fn=[False, True],
seeded=[True, False],))
def testDistStrat(self, strat, jit_replica_fn, seeded):
"""Tests RNG with distribution strategies."""
strat_name = type(strat).__name__
if "TPU" in strat_name and not jit_replica_fn:
self.skipTest(
"TPUStrategy requires the replica function (the function passed to "
"strategy.run) to be decorated with tf.function")
coord = None
if "ParameterServer" in strat_name:
coord = coordinator_lib.ClusterCoordinator(strat)
creators = {
True: functools.partial(rng.Generator.from_seed, 1234),
False: rng.Generator.from_non_deterministic_state,
}
shape = [3, 4]
dtype = dtypes.int32
creator = creators[seeded]
with strat.scope():
gen = creator()
def f():
t1 = gen.uniform_full_int(shape=shape, dtype=dtype)
t2 = gen.uniform_full_int(shape=shape, dtype=dtype)
t = array_ops_stack.stack([t1, t2])
return t
replica_fn = def_function.function(f) if jit_replica_fn else f
results = run_on_strategy(replica_fn, strat, coord)
values = strat.experimental_local_results(results)
n = get_num_local_replicas(strat, values)
self.assertAllEqual(n, len(values))
self.assertAllDifferent(values)
@ds_combinations.generate(
combinations.combine(
strat=[
strategy_combinations.parameter_server_strategy_fn(
"ParameterServer1Worker2PSCPUFixedShards",
num_workers=1, num_ps=2,
variable_partitioner=(
sharded_variable.FixedShardsPartitioner(2)))
],
mode=["eager"]))
def testShardedError(self, strat):
"""Tests error about sharding is raised."""
with strat.scope():
with self.assertRaisesRegex(
ValueError, "state is sharded, which is not allowed"):
rng.Generator.from_seed(1234)
@ds_combinations.generate(
combinations.combine(
strat=all_strategies,
mode=["eager"],
jit_replica_fn=[False, True]))
def testDistVarAsTFFunArg(self, strat, jit_replica_fn):
"""Tests that RNG with dist variables can be used as tf.function's arg."""
strat_name = type(strat).__name__
if "CentralStorage" in strat_name:
self.skipTest(
"CentralStorageStrategy wraps variable updates in merge_call which "
"can't be called inside a tf.function that doesn't cover the entire "
"replica function (the function passed to strategy.run).")
if "TPU" in strat_name and not jit_replica_fn:
self.skipTest(
"TPUStrategy requires the replica function (the function passed to "
"strategy.run) to be decorated with tf.function")
coord = None
if "ParameterServer" in strat_name:
coord = coordinator_lib.ClusterCoordinator(strat)
shape = [3, 4]
dtype = dtypes.int32
with strat.scope():
gen = rng.Generator.from_seed(1234)
@def_function.function
def f(gen): # the main focus
t1 = gen.uniform_full_int(shape=shape, dtype=dtype)
t2 = gen.uniform_full_int(shape=shape, dtype=dtype)
t = array_ops_stack.stack([t1, t2])
return t
def g():
return f(gen)
replica_fn = def_function.function(g) if jit_replica_fn else g
for _ in range(2):
results = run_on_strategy(replica_fn, strat, coord)
values = strat.experimental_local_results(results)
n = get_num_local_replicas(strat, values)
self.assertAllEqual(n, len(values))
self.assertAllDifferent(values)
@ds_combinations.generate(
combinations.combine(
strat1=strategy_combinations.all_strategies,
strat2=strategy_combinations.all_strategies,
jit_replica_fn=[False, True],
mode=["eager"]) +
combinations.combine(
strat1=strategy_combinations.multiworker_strategies + ps_strategies,
strat2=[None],
jit_replica_fn=[False, True],
mode=["eager"]))
def testDistStratRestore(self, strat1, strat2, jit_replica_fn):
"""Tests checkpointing and restoring (to possibly different #replicas)."""
if strat2 is None:
strat2 = strat1
strat1_name = type(strat1).__name__
strat2_name = type(strat2).__name__
if "Default" in strat1_name or "Default" in strat2_name:
self.skipTest(
"We don't guarantee consistency between strategy and no-strategy.")
if ("TPU" in strat1_name or "TPU" in strat2_name) and not jit_replica_fn:
self.skipTest(
"TPUStrategy requires the replica function (the function passed to "
"strategy.run) to be decorated with tf.function")
coord1 = None
if "ParameterServer" in strat1_name:
coord1 = coordinator_lib.ClusterCoordinator(strat1)
coord2 = None
if "ParameterServer" in strat2_name:
coord2 = coordinator_lib.ClusterCoordinator(strat2)
fname = os.path.join(self.get_temp_dir(), "checkpoint")
def uniform(strat, coord, g):
def f():
return g.uniform_full_int([3], dtype=dtypes.int32)
replica_fn = def_function.function(f) if jit_replica_fn else f
result = run_on_strategy(replica_fn, strat, coord)
return strat.experimental_local_results(result)
with strat1.scope():
g1 = rng.Generator.from_seed(1)
with strat2.scope():
g2 = rng.Generator.from_seed(10)
cp1 = tracking_util.Checkpoint(g=g1)
cp2 = tracking_util.Checkpoint(g=g2)
def write_restore_compare():
cp1.write(fname)
r1 = uniform(strat1, coord1, g1)
cp2.restore(fname)
r2 = uniform(strat2, coord2, g2)
# Tests that overlapping replicas are properly restored.
n1 = get_num_local_replicas(strat1)
n2 = get_num_local_replicas(strat2)
n = min(n1, n2)
self.assertAllEqual(r1[:n], r2[:n])
# Run multiple times so that cp1.write is called in various RNG states
for _ in range(2):
write_restore_compare()
@ds_combinations.generate(
combinations.combine(
strat=all_strategies,
mode=["eager"],
is_save_in_scope=[True, False]))
def testSavedModel(self, strat, is_save_in_scope):
class CustomModule(module.Module):
def __init__(self):
super(CustomModule, self).__init__()
self.g = rng.Generator.from_seed(0)
@def_function.function
def __call__(self):
return self.g.state
@def_function.function
def mutate(self):
self.g.normal([])
with strat.scope():
m = CustomModule()
m.mutate()
state_before = m()
path = os.path.join(self.get_temp_dir(), "saved_model")
if is_save_in_scope:
with strat.scope():
save.save(m, path)
else:
save.save(m, path)
with strat.scope():
m.mutate()
state_before_2 = m()
imported = load.load(path)
state_after = imported()
self.assertAllEqual(state_before, state_after)
imported.mutate()
state_after_2 = imported()
self.assertAllEqual(state_before_2, state_after_2)
if __name__ == "__main__":
with deprecation.silence():
multi_process_runner.test_main()
|
GeneratorTest
|
python
|
sympy__sympy
|
sympy/functions/special/bessel.py
|
{
"start": 37967,
"end": 41856
}
|
class ____(SphericalHankelBase):
r"""
Spherical Hankel function of the second kind.
Explanation
===========
This function is defined as
.. math:: h_\nu^(2)(z) = j_\nu(z) - i y_\nu(z),
where $j_\nu(z)$ and $y_\nu(z)$ are the spherical
Bessel function of the first and second kinds.
For integral orders $n$, $h_n^(2)$ is calculated using the formula:
.. math:: h_n^(2)(z) = j_{n} - i (-1)^{n+1} j_{-n-1}(z)
Examples
========
>>> from sympy import Symbol, hn2, hankel2, expand_func, jn, yn
>>> z = Symbol("z")
>>> nu = Symbol("nu", integer=True)
>>> print(expand_func(hn2(nu, z)))
jn(nu, z) - I*yn(nu, z)
>>> print(expand_func(hn2(0, z)))
sin(z)/z + I*cos(z)/z
>>> print(expand_func(hn2(1, z)))
I*sin(z)/z - cos(z)/z + sin(z)/z**2 + I*cos(z)/z**2
>>> hn2(nu, z).rewrite(hankel2)
sqrt(2)*sqrt(pi)*sqrt(1/z)*hankel2(nu, z)/2
>>> hn2(nu, z).rewrite(jn)
-(-1)**(nu + 1)*I*jn(-nu - 1, z) + jn(nu, z)
>>> hn2(nu, z).rewrite(yn)
(-1)**nu*yn(-nu - 1, z) - I*yn(nu, z)
See Also
========
hn1, jn, yn, hankel1, hankel2
References
==========
.. [1] https://dlmf.nist.gov/10.47
"""
_hankel_kind_sign = -S.One
@assume_integer_order
def _eval_rewrite_as_hankel2(self, nu, z, **kwargs):
return sqrt(pi/(2*z))*hankel2(nu, z)
def jn_zeros(n, k, method="sympy", dps=15):
"""
Zeros of the spherical Bessel function of the first kind.
Explanation
===========
This returns an array of zeros of $jn$ up to the $k$-th zero.
* method = "sympy": uses `mpmath.besseljzero
<https://mpmath.org/doc/current/functions/bessel.html#mpmath.besseljzero>`_
* method = "scipy": uses the
`SciPy's sph_jn <https://docs.scipy.org/doc/scipy/reference/generated/scipy.special.jn_zeros.html>`_
and
`newton <https://docs.scipy.org/doc/scipy/reference/generated/scipy.optimize.newton.html>`_
to find all
roots, which is faster than computing the zeros using a general
numerical solver, but it requires SciPy and only works with low
precision floating point numbers. (The function used with
method="sympy" is a recent addition to mpmath; before that a general
solver was used.)
Examples
========
>>> from sympy import jn_zeros
>>> jn_zeros(2, 4, dps=5)
[5.7635, 9.095, 12.323, 15.515]
See Also
========
jn, yn, besselj, besselk, bessely
Parameters
==========
n : integer
order of Bessel function
k : integer
number of zeros to return
"""
from math import pi as math_pi
if method == "sympy":
from mpmath import besseljzero
from mpmath.libmp.libmpf import dps_to_prec
prec = dps_to_prec(dps)
return [Expr._from_mpmath(besseljzero(S(n + 0.5)._to_mpmath(prec),
int(l)), prec)
for l in range(1, k + 1)]
elif method == "scipy":
from scipy.optimize import newton
try:
from scipy.special import spherical_jn
f = lambda x: spherical_jn(n, x)
except ImportError:
from scipy.special import sph_jn
f = lambda x: sph_jn(n, x)[0][-1]
else:
raise NotImplementedError("Unknown method.")
def solver(f, x):
if method == "scipy":
root = newton(f, x)
else:
raise NotImplementedError("Unknown method.")
return root
# we need to approximate the position of the first root:
root = n + math_pi
# determine the first root exactly:
root = solver(f, root)
roots = [root]
for i in range(k - 1):
# estimate the position of the next root using the last root + pi:
root = solver(f, root + math_pi)
roots.append(root)
return roots
|
hn2
|
python
|
django-import-export__django-import-export
|
tests/core/tests/admin_integration/test_import_errors.py
|
{
"start": 598,
"end": 10244
}
|
class ____(AdminTestMixin, TestCase):
def test_import_action_handles_UnicodeDecodeError_as_form_error(self):
with mock.patch(
"import_export.admin.TempFolderStorage.read"
) as mock_tmp_folder_storage:
b_arr = b"\x00"
mock_tmp_folder_storage.side_effect = UnicodeDecodeError(
"codec", b_arr, 1, 2, "fail!"
)
response = self._do_import_post(self.book_import_url, "books.csv")
self.assertEqual(response.status_code, 200)
target_msg = (
"'UnicodeDecodeError' encountered while trying to read file. "
"Ensure you have chosen the correct format for the file."
)
self.assertFormError(response.context["form"], "import_file", target_msg)
def test_import_action_handles_ValueError_as_form_error(self):
with mock.patch(
"import_export.admin.TempFolderStorage.read"
) as mock_tmp_folder_storage:
mock_tmp_folder_storage.side_effect = ValueError("some unknown error")
response = self._do_import_post(self.book_import_url, "books.csv")
self.assertEqual(response.status_code, 200)
target_msg = (
"'ValueError' encountered while trying to read file. "
"Ensure you have chosen the correct format for the file."
)
self.assertFormError(response.context["form"], "import_file", target_msg)
def test_import_action_handles_NonFieldError(self):
# issue 2070
with mock.patch("django.forms.Form.clean") as mock_clean:
mock_clean.side_effect = ValidationError("some non field error")
response = self._do_import_post(self.book_import_url, "books.csv")
self.assertEqual(response.status_code, 200)
target_msg = "some non field error"
self.assertIn(target_msg, response.content.decode())
def test_import_action_handles_FieldError(self):
# issue 1722
with mock.patch(
"import_export.resources.Resource._check_import_id_fields"
) as mock_check_import_id_fields:
mock_check_import_id_fields.side_effect = FieldError("some unknown error")
response = self._do_import_post(self.book_import_url, "books.csv")
self.assertEqual(response.status_code, 200)
target_msg = "some unknown error"
self.assertIn(target_msg, response.content.decode())
@override_settings(LANGUAGE_CODE="es")
def test_import_action_handles_ValueError_as_form_error_with_translation(self):
with mock.patch(
"import_export.admin.TempFolderStorage.read"
) as mock_tmp_folder_storage:
mock_tmp_folder_storage.side_effect = ValueError("some unknown error")
response = self._do_import_post(self.book_import_url, "books.csv")
self.assertEqual(response.status_code, 200)
target_msg = (
"Se encontró 'ValueError' mientras se intentaba leer el archivo. "
"Asegúrese que seleccionó el formato correcto para el archivo."
)
self.assertFormError(response.context["form"], "import_file", target_msg)
def test_import_with_customized_form_handles_form_validation(self):
"""Test if admin import handles errors gracefully when confirm_form is
invalid for eg. if a required field (in this case 'Author') is left blank.
"""
# We use customized BookAdmin (CustomBookAdmin) with modified import
# form, which requires Author to be selected (from available authors).
# Note that url is /admin/core/ebook/import (and not: ...book/import)!
# We need a author in the db to select from in the admin import custom
# forms, first we will submit data with invalid author_id and if the
# error is handled correctly, resubmit form with correct author_id and
# check if data is imported successfully
Author.objects.create(id=11, name="Test Author")
# GET the import form
response = self._get_url_response(
self.ebook_import_url, str_in_response='form action=""'
)
self.assertTemplateUsed(response, self.admin_import_template_url)
# POST the import form
input_format = "0"
filename = os.path.join(
os.path.dirname(__file__),
os.path.pardir,
os.path.pardir,
"exports",
"books.csv",
)
with open(filename, "rb") as fobj:
data = {"author": 11, "format": input_format, "import_file": fobj}
self._prepend_form_prefix(data)
response = self._post_url_response(self.ebook_import_url, data)
self.assertIn("result", response.context)
self.assertFalse(response.context["result"].has_errors())
self.assertIn("confirm_form", response.context)
confirm_form = response.context["confirm_form"]
self.assertIsInstance(
confirm_form,
CustomBookAdmin(EBook, "ebook/import").get_confirm_form_class(None),
)
data = confirm_form.initial
self._prepend_form_prefix(data)
self.assertEqual(data["original_file_name"], "books.csv")
# manipulate data to make the payload invalid
data["author"] = ""
response = self._post_url_response(
self.ebook_process_import_url, data, follow=True
)
# check if error is captured gracefully
self.assertEqual(
response.context["errors"], {"author": ["This field is required."]}
)
# resubmit with valid data
data["author"] = 11
response = self._post_url_response(
self.ebook_process_import_url, data, follow=True
)
self.assertEqual(response.status_code, 200)
self.assertContains(
response,
_(
"Import finished: {} new, {} updated, {} deleted and {} skipped {}."
).format(1, 0, 0, 0, EBook._meta.verbose_name_plural),
)
def test_confirm_import_handles_non_field_error(self):
"""Test if admin import handles errors gracefully when confirm_form is
has a non-field error. See #2070.
"""
Author.objects.create(id=11, name="Test Author")
# GET the import form
response = self._get_url_response(
self.ebook_import_url, str_in_response='form action=""'
)
self.assertTemplateUsed(response, self.admin_import_template_url)
# POST the import form
input_format = "0"
filename = os.path.join(
os.path.dirname(__file__),
os.path.pardir,
os.path.pardir,
"exports",
"books.csv",
)
with open(filename, "rb") as fobj:
data = {"author": 11, "format": input_format, "import_file": fobj}
self._prepend_form_prefix(data)
response = self._post_url_response(self.ebook_import_url, data)
self.assertIn("result", response.context)
self.assertFalse(response.context["result"].has_errors())
self.assertIn("confirm_form", response.context)
confirm_form = response.context["confirm_form"]
self.assertIsInstance(
confirm_form,
CustomBookAdmin(EBook, "ebook/import").get_confirm_form_class(None),
)
data = confirm_form.initial
self.assertEqual(data["original_file_name"], "books.csv")
with mock.patch("django.forms.Form.clean") as mock_clean:
mock_clean.side_effect = ValidationError("some non field error")
response = self._post_url_response(
self.ebook_process_import_url, data, follow=True
)
target_msg = "some non field error"
self.assertIn(target_msg, response.content.decode())
def test_import_action_invalid_date(self):
# test that a row with an invalid date redirects to errors page
index = self._get_input_format_index("csv")
response = self._do_import_post(
self.book_import_url, "books-invalid-date.csv", index
)
result = response.context["result"]
# there should be a single invalid row
self.assertEqual(1, len(result.invalid_rows))
self.assertEqual(
"Value could not be parsed using defined formats.",
result.invalid_rows[0].error.messages[0],
)
# no rows should be imported because we rollback on validation errors
self.assertEqual(0, Book.objects.count())
def test_import_action_error_on_save(self):
with mock.patch("core.models.Book.save") as mock_save:
mock_save.side_effect = ValueError("some unknown error")
response = self._do_import_post(self.book_import_url, "books.csv")
self.assertIn("some unknown error", response.content.decode())
def test_import_action_invalidates_data_sheet_with_no_headers_or_data(self):
# GET the import form
response = self._get_url_response(
self.book_import_url, str_in_response='form action=""'
)
self.assertTemplateUsed(response, self.admin_import_template_url)
response = self._do_import_post(
self.book_import_url, "books-no-headers.csv", input_format=0
)
self.assertEqual(response.status_code, 200)
target_msg = (
"No valid data to import. Ensure your file "
"has the correct headers or data for import."
)
self.assertFormError(response.context["form"], "import_file", target_msg)
|
ImportErrorHandlingTests
|
python
|
django__django
|
tests/test_client_regress/tests.py
|
{
"start": 32819,
"end": 33206
}
|
class ____(SimpleTestCase):
def test_urlconf_was_changed(self):
"TestCase can enforce a custom URLconf on a per-test basis"
url = reverse("arg_view", args=["somename"])
self.assertEqual(url, "/arg_view/somename/")
# This test needs to run *after* UrlconfSubstitutionTests; the zz prefix in the
# name is to ensure alphabetical ordering.
|
UrlconfSubstitutionTests
|
python
|
sqlalchemy__sqlalchemy
|
test/orm/inheritance/test_assorted_poly.py
|
{
"start": 60978,
"end": 65535
}
|
class ____(fixtures.MappedTest):
@classmethod
def define_tables(cls, metadata):
Table(
"people",
metadata,
Column(
"id", Integer, primary_key=True, test_needs_autoincrement=True
),
Column("type", String(30)),
)
Table(
"users",
metadata,
Column("id", Integer, ForeignKey("people.id"), primary_key=True),
Column("supervisor_id", Integer, ForeignKey("people.id")),
)
Table(
"dudes",
metadata,
Column("id", Integer, ForeignKey("users.id"), primary_key=True),
)
@classmethod
def setup_classes(cls):
class Person(cls.Comparable):
pass
class User(Person):
pass
class Dude(User):
pass
def _roundtrip(self):
User = self.classes.User
sess = fixture_session()
u1 = User()
u2 = User()
u2.supervisor = u1
sess.add_all([u1, u2])
sess.commit()
assert u2.supervisor is u1
def _dude_roundtrip(self):
Dude, User = self.classes.Dude, self.classes.User
sess = fixture_session()
u1 = User()
d1 = Dude()
d1.supervisor = u1
sess.add_all([u1, d1])
sess.commit()
assert d1.supervisor is u1
def test_joined_to_base(self):
people, users = self.tables.people, self.tables.users
Person, User = self.classes.Person, self.classes.User
self.mapper_registry.map_imperatively(
Person,
people,
polymorphic_on=people.c.type,
polymorphic_identity="person",
)
self.mapper_registry.map_imperatively(
User,
users,
inherits=Person,
polymorphic_identity="user",
inherit_condition=(users.c.id == people.c.id),
properties={
"supervisor": relationship(
Person, primaryjoin=users.c.supervisor_id == people.c.id
)
},
)
assert User.supervisor.property.direction is MANYTOONE
self._roundtrip()
def test_joined_to_same_subclass(self):
people, users = self.tables.people, self.tables.users
Person, User = self.classes.Person, self.classes.User
self.mapper_registry.map_imperatively(
Person,
people,
polymorphic_on=people.c.type,
polymorphic_identity="person",
)
self.mapper_registry.map_imperatively(
User,
users,
inherits=Person,
polymorphic_identity="user",
inherit_condition=(users.c.id == people.c.id),
properties={
"supervisor": relationship(
User,
primaryjoin=users.c.supervisor_id == people.c.id,
remote_side=people.c.id,
foreign_keys=[users.c.supervisor_id],
)
},
)
assert User.supervisor.property.direction is MANYTOONE
self._roundtrip()
def test_joined_subclass_to_superclass(self):
people, users, dudes = (
self.tables.people,
self.tables.users,
self.tables.dudes,
)
Person, User, Dude = (
self.classes.Person,
self.classes.User,
self.classes.Dude,
)
self.mapper_registry.map_imperatively(
Person,
people,
polymorphic_on=people.c.type,
polymorphic_identity="person",
)
self.mapper_registry.map_imperatively(
User,
users,
inherits=Person,
polymorphic_identity="user",
inherit_condition=(users.c.id == people.c.id),
)
self.mapper_registry.map_imperatively(
Dude,
dudes,
inherits=User,
polymorphic_identity="dude",
inherit_condition=(dudes.c.id == users.c.id),
properties={
"supervisor": relationship(
User,
primaryjoin=users.c.supervisor_id == people.c.id,
remote_side=people.c.id,
foreign_keys=[users.c.supervisor_id],
)
},
)
assert Dude.supervisor.property.direction is MANYTOONE
self._dude_roundtrip()
|
JoinedInhAdjacencyTest
|
python
|
huggingface__transformers
|
tests/utils/test_convert_slow_tokenizer.py
|
{
"start": 245,
"end": 1740
}
|
class ____(unittest.TestCase):
def test_spm_converter_bytefallback_warning(self):
spm_model_file_without_bytefallback = get_tests_dir("fixtures/test_sentencepiece.model")
spm_model_file_with_bytefallback = get_tests_dir("fixtures/test_sentencepiece_with_bytefallback.model")
original_tokenizer_without_bytefallback = FakeOriginalTokenizer(vocab_file=spm_model_file_without_bytefallback)
with warnings.catch_warnings(record=True) as w:
_ = SpmConverter(original_tokenizer_without_bytefallback)
# We are looking for if there is any `UserWarning` with
# `The sentencepiece tokenizer that you are converting to a fast tokenizer uses the byte fallback option which is not implemented in the fast tokenizers.`
w = [x for x in w if x.category.__name__ != "DeprecationWarning"]
self.assertEqual(len(w), 0)
original_tokenizer_with_bytefallback = FakeOriginalTokenizer(vocab_file=spm_model_file_with_bytefallback)
with warnings.catch_warnings(record=True) as w:
_ = SpmConverter(original_tokenizer_with_bytefallback)
w = [x for x in w if x.category.__name__ != "DeprecationWarning"]
self.assertEqual(len(w), 1)
self.assertIn(
"The sentencepiece tokenizer that you are converting to a fast tokenizer uses the byte fallback option"
" which is not implemented in the fast tokenizers.",
str(w[0].message),
)
|
ConvertSlowTokenizerTest
|
python
|
realpython__materials
|
solid-principles-python/shapes_lsp.py
|
{
"start": 662,
"end": 854
}
|
class ____(Shape):
def __init__(self, width, height):
self.width = width
self.height = height
def calculate_area(self):
return self.width * self.height
|
Rectangle
|
python
|
gevent__gevent
|
src/gevent/tests/known_failures.py
|
{
"start": 1209,
"end": 1598
}
|
class ____(Condition):
__slots__ = (
'value',
'__name__',
)
def __init__(self, value, name=None):
self.value = bool(value)
self.__name__ = name or str(value)
def __bool__(self):
return self.value
def __repr__(self):
return self.__name__
ALWAYS = ConstantCondition(True)
NEVER = ConstantCondition(False)
|
ConstantCondition
|
python
|
huggingface__transformers
|
src/transformers/models/lfm2_vl/processing_lfm2_vl.py
|
{
"start": 1524,
"end": 11207
}
|
class ____(ProcessorMixin):
r"""
Constructs a Lfm2Vl processor which wraps a Lfm2Tokenizer tokenizer and Lfm2VlImageProcessor into a single processor.
[`Lfm2VlProcessor`] offers all the functionalities of [`Lfm2ImageProcessor`] and [`Lfm2Tokenizer`].
Args:
image_processor (`Lfm2VlImageProcessor`):
An instance of [`Lfm2VlImageProcessor`]. The image processor is a required input.
tokenizer (`PreTrainedTokenizerBase`):
An instance of [`PreTrainedTokenizerBase`]. This should correspond with the model's text model. The tokenizer is a required input.
chat_template (`str`, *optional*):
A Jinja template which will be used to convert lists of messages in a chat into a tokenizable string.
"""
def __init__(
self,
image_processor,
tokenizer,
chat_template: Optional[str] = None,
**kwargs,
):
self.image_token = getattr(tokenizer, "image_token", "<image>")
self.image_token_id = (
tokenizer.image_token_id
if hasattr(tokenizer, "image_token_id")
else tokenizer.convert_tokens_to_ids(self.image_token)
)
self.image_start_token = getattr(tokenizer, "image_start_token", "<|image_start|>")
self.image_end_token = getattr(tokenizer, "image_end_token", "<|image_end|>")
self.image_thumbnail_token = getattr(tokenizer, "image_thumbnail_token", "<|img_thumbnail|>")
super().__init__(image_processor, tokenizer, chat_template=chat_template, **kwargs)
def __call__(
self,
images: Optional[Union[ImageInput, list[ImageInput], list[list[ImageInput]]]] = None,
text: Optional[Union[TextInput, list[TextInput]]] = None,
**kwargs: Unpack[Lfm2VlProcessorKwargs],
) -> BatchEncoding:
"""
Processes the input prompts and returns a BatchFeature.
Args:
images (`PIL.Image.Image`, `np.ndarray`, `torch.Tensor`, `list[PIL.Image.Image]`, `list[np.ndarray]`, `list[torch.Tensor]`, *optional*):
The image or batch of images to be prepared. Each image can be a PIL image, NumPy array or PyTorch
tensor. If is of type `list[ImageInput]`, it's assumed that this is for a single prompt i.e. of batch size 1.
text (`TextInput`, *optional*):
The sequence or batch of sequences to be encoded.
Wherever an image token, `<image>` is encountered it is expanded to a proper sequence of image tokens.
return_tensors (`Optional[str, TensorType]`, *optional*):
If set, will return tensors of a particular framework. See [`PreTrainedTokenizerFast.__call__`] for more
information.
"""
if text is None and images is None:
raise ValueError("You must provide one of `text` or `images`.")
if images is not None and text is None:
raise ValueError(
"You must provide `text` when `images` is provided. Minimal text consists of a single image token."
)
output_kwargs = self._merge_kwargs(
Lfm2VlProcessorKwargs,
tokenizer_init_kwargs=self.tokenizer.init_kwargs,
**kwargs,
)
if isinstance(text, str):
text = [text]
elif not isinstance(text, list) and not isinstance(text[0], str):
raise TypeError("Invalid input text. Please provide a string, or a list of strings")
n_images_in_text = [sample.count(self.image_token) for sample in text]
if sum(n_images_in_text) > 0 and images is None:
raise ValueError(f"We detected {sum(n_images_in_text)} tokens in the text but no images were passed")
inputs = {}
use_image_special_tokens = output_kwargs["text_kwargs"].pop("use_image_special_tokens")
if images is not None:
images = self.image_processor.fetch_images(images)
batched_images = make_nested_list_of_images(images)
vision_inputs = self.image_processor(batched_images, **output_kwargs["images_kwargs"])
n_images_in_images = [len(sublist) for sublist in batched_images]
if n_images_in_images != n_images_in_text:
raise ValueError(
f"The number of images in the text {n_images_in_text} and images {n_images_in_images} should be the same."
)
text = self.expand_text_with_placeholders(
text,
batched_images,
image_rows=vision_inputs.pop("image_rows"),
image_cols=vision_inputs.pop("image_cols"),
image_sizes=vision_inputs.pop("image_sizes"),
use_image_special_tokens=use_image_special_tokens,
**output_kwargs["images_kwargs"],
)
inputs.update(vision_inputs)
return_tensors = output_kwargs["text_kwargs"].pop("return_tensors", None)
text_inputs = self.tokenizer(text, **output_kwargs["text_kwargs"])
inputs.update(text_inputs)
return BatchFeature(inputs, tensor_type=return_tensors)
def expand_text_with_placeholders(
self,
text: list[str],
images: list[list[ImageInput]],
image_rows: list[list[int]],
image_cols: list[list[int]],
image_sizes: list[list[int]],
use_image_special_tokens: bool,
**images_kwargs,
):
prompt_strings = []
image_data = iter(zip(*[image_rows, image_cols, image_sizes]))
for sample_text, sample_images in zip(text, images):
split_sample = sample_text.split(self.image_token)
sample_text_with_image_tokens = ""
for i, image in enumerate(sample_images):
sample_text_with_image_tokens += split_sample[i]
if use_image_special_tokens:
sample_text_with_image_tokens += self.image_start_token
rows, cols, image_size = next(image_data)
num_thumbnail_tokens, num_tokens_per_tile = self._get_image_num_tokens(image_size, **images_kwargs)
if rows > 1 or cols > 1:
for row in range(rows):
for col in range(cols):
if use_image_special_tokens:
sample_text_with_image_tokens += f"<|img_row_{row + 1}_col_{col + 1}|>"
sample_text_with_image_tokens += self.image_token * num_tokens_per_tile
if num_thumbnail_tokens > 0:
if use_image_special_tokens:
sample_text_with_image_tokens += self.image_thumbnail_token
sample_text_with_image_tokens += self.image_token * num_thumbnail_tokens
else:
sample_text_with_image_tokens += self.image_token * num_thumbnail_tokens
if use_image_special_tokens:
sample_text_with_image_tokens += self.image_end_token
sample_text_with_image_tokens += split_sample[i + 1]
prompt_strings.append(sample_text_with_image_tokens)
return prompt_strings
def _get_image_num_tokens(self, image_size: list[int], **images_kwargs) -> tuple[int, int]:
tile_size = images_kwargs.get("tile_size", self.image_processor.tile_size)
downsample_factor = images_kwargs.get("downsample_factor", self.image_processor.downsample_factor)
encoder_patch_size = images_kwargs.get("encoder_patch_size", self.image_processor.encoder_patch_size)
use_thumbnail = images_kwargs.get("use_thumbnail", self.image_processor.use_thumbnail)
thumbnail_tokens = 0
if use_thumbnail:
image_height, image_width = image_size
num_patches_height = image_height // encoder_patch_size
num_patches_width = image_width // encoder_patch_size
dwn_num_patches_height = math.ceil(num_patches_height / downsample_factor)
dwn_num_patches_width = math.ceil(num_patches_width / downsample_factor)
thumbnail_tokens = dwn_num_patches_height * dwn_num_patches_width
num_patches_tile = tile_size // encoder_patch_size
dwn_num_patches_tile = math.ceil(num_patches_tile / downsample_factor)
tile_tokens = dwn_num_patches_tile * dwn_num_patches_tile
return thumbnail_tokens, tile_tokens
def batch_decode(self, *args, **kwargs):
"""
This method forwards all its arguments to LFM2Tokeniser's [`~PreTrainedTokenizer.batch_decode`]. Please
refer to the docstring of this method for more information.
"""
batched_decode_output = self.tokenizer.batch_decode(*args, **kwargs)
return batched_decode_output
def decode(self, *args, **kwargs):
"""
This method forwards all its arguments to LFM2Tokeniser's [`~PreTrainedTokenizer.decode`]. Please refer to
the docstring of this method for more information.
"""
decode_output = self.tokenizer.decode(*args, **kwargs)
return decode_output
@property
def model_input_names(self):
tokenizer_input_names = self.tokenizer.model_input_names
image_processor_input_names = self.image_processor.model_input_names
# LFM2-VL has no dedicated tokenizer class and uses the Base class with default model input names
tokenizer_input_names = [name for name in tokenizer_input_names if name != "token_type_ids"]
return list(tokenizer_input_names + image_processor_input_names)
__all__ = ["Lfm2VlProcessor"]
|
Lfm2VlProcessor
|
python
|
conda__conda
|
conda/base/context.py
|
{
"start": 80739,
"end": 89977
}
|
class ____:
def __init__(self):
self._stack = [ContextStackObject() for _ in range(3)]
self._stack_idx = 0
self._last_search_path = None
self._last_argparse_args = None
def push(self, search_path: PathsType, argparse_args: Namespace | None) -> None:
self._stack_idx += 1
old_len = len(self._stack)
if self._stack_idx >= old_len:
self._stack.extend([ContextStackObject() for _ in range(old_len)])
self._stack[self._stack_idx].set_value(search_path, argparse_args)
self.apply()
def apply(self):
if (
self._last_search_path != self._stack[self._stack_idx].search_path
or self._last_argparse_args != self._stack[self._stack_idx].argparse_args
):
# Expensive:
self._stack[self._stack_idx].apply()
self._last_search_path = self._stack[self._stack_idx].search_path
self._last_argparse_args = self._stack[self._stack_idx].argparse_args
def pop(self):
self._stack_idx -= 1
self._stack[self._stack_idx].apply()
def replace(self, search_path: PathsType, argparse_args: Namespace | None) -> None:
self._stack[self._stack_idx].set_value(search_path, argparse_args)
self._stack[self._stack_idx].apply()
context_stack = ContextStack()
def stack_context(
pushing: bool,
search_path: PathsType = SEARCH_PATH,
argparse_args: Namespace | None = None,
) -> None:
if pushing:
# Fast
context_stack.push(search_path, argparse_args)
else:
# Slow
context_stack.pop()
# Default means "The configuration when there are no condarc files present". It is
# all the settings and defaults that are built in to the code and *not* the default
# value of search_path=SEARCH_PATH. It means search_path=().
def stack_context_default(
pushing: bool,
argparse_args: Namespace | None = None,
) -> None:
return stack_context(pushing, search_path=(), argparse_args=argparse_args)
def replace_context(
pushing: bool | None = None,
search_path: Iterable[str] = SEARCH_PATH,
argparse_args: Namespace | None = None,
) -> None:
# pushing arg intentionally not used here, but kept for API compatibility
return context_stack.replace(search_path, argparse_args)
def replace_context_default(
pushing: bool | None = None,
argparse_args: Namespace | None = None,
) -> None:
# pushing arg intentionally not used here, but kept for API compatibility
return context_stack.replace(search_path=(), argparse_args=argparse_args)
# Tests that want to only declare 'I support the project-wide default for how to
# manage stacking of contexts'. Tests that are known to be careful with context
# can use `replace_context_default` which might be faster, though it should
# be a stated goal to set conda_tests_ctxt_mgmt_def_pol to replace_context_default
# and not to stack_context_default.
conda_tests_ctxt_mgmt_def_pol = replace_context_default
def env_name(prefix: PathType) -> PathType | str | None:
# counter part to `locate_prefix_by_name()` below
if not prefix:
return None
if paths_equal(prefix, context.root_prefix):
return ROOT_ENV_NAME
maybe_envs_dir, maybe_name = path_split(prefix)
for envs_dir in context.envs_dirs:
if paths_equal(envs_dir, maybe_envs_dir):
return maybe_name
return prefix
def locate_prefix_by_name(name: str, envs_dirs: PathsType | None = None) -> PathType:
"""Find the location of a prefix given a conda env name. If the location does not exist, an
error is raised.
"""
if not name:
raise ValueError("'name' cannot be empty.")
if name in RESERVED_ENV_NAMES:
return context.root_prefix
if envs_dirs is None:
envs_dirs = context.envs_dirs
for envs_dir in envs_dirs:
if not isdir(envs_dir):
continue
prefix = join(envs_dir, name)
if isdir(prefix):
return abspath(prefix)
from ..exceptions import EnvironmentNameNotFound
raise EnvironmentNameNotFound(name)
def validate_channels(channels: Iterator[str]) -> tuple[str, ...]:
"""
Validate if the given channel URLs are allowed based on the context's allowlist
and denylist configurations.
:param channels: A list of channels (either URLs or names) to validate.
:raises ChannelNotAllowed: If any URL is not in the allowlist.
:raises ChannelDenied: If any URL is in the denylist.
"""
from ..exceptions import ChannelDenied, ChannelNotAllowed
from ..models.channel import Channel
allowlist = [
url
for channel in context.allowlist_channels
for url in Channel(channel).base_urls
]
denylist = [
url
for channel in context.denylist_channels
for url in Channel(channel).base_urls
]
if allowlist or denylist:
for channel in map(Channel, channels):
for url in channel.base_urls:
if url in denylist:
raise ChannelDenied(channel)
if allowlist and url not in allowlist:
raise ChannelNotAllowed(channel)
return tuple(dict.fromkeys(channels))
@deprecated(
"25.9", "26.3", addendum="Use PrefixData.validate_name() + PrefixData.from_name()"
)
def validate_prefix_name(
prefix_name: str, ctx: Context, allow_base: bool = True
) -> PathType:
"""Run various validations to make sure prefix_name is valid"""
from ..exceptions import CondaValueError
if PREFIX_NAME_DISALLOWED_CHARS.intersection(prefix_name):
raise CondaValueError(
dals(
f"""
Invalid environment name: {prefix_name!r}
Characters not allowed: {PREFIX_NAME_DISALLOWED_CHARS}
If you are specifying a path to an environment, the `-p`
flag should be used instead.
"""
)
)
if prefix_name in RESERVED_ENV_NAMES:
if allow_base:
return ctx.root_prefix
else:
raise CondaValueError(
"Use of 'base' as environment name is not allowed here."
)
else:
from ..exceptions import EnvironmentNameNotFound
from ..gateways.disk.create import first_writable_envs_dir
try:
return locate_prefix_by_name(prefix_name)
except EnvironmentNameNotFound:
return join(first_writable_envs_dir(), prefix_name)
def determine_target_prefix(ctx: Context, args: Namespace | None = None) -> PathType:
"""Get the prefix to operate in. The prefix may not yet exist.
Args:
ctx: the context of conda
args: the argparse args from the command line
Returns: the prefix
Raises: CondaEnvironmentNotFoundError if the prefix is invalid
"""
argparse_args = args or ctx._argparse_args
try:
prefix_name = argparse_args.name
except AttributeError:
prefix_name = None
try:
prefix_path = argparse_args.prefix
except AttributeError:
prefix_path = None
if prefix_name is not None and not prefix_name.strip(): # pragma: no cover
from ..exceptions import ArgumentError
raise ArgumentError("Argument --name requires a value.")
if prefix_path is not None and not prefix_path.strip(): # pragma: no cover
from ..exceptions import ArgumentError
raise ArgumentError("Argument --prefix requires a value.")
if prefix_name is None and prefix_path is None:
return ctx.default_prefix
elif prefix_path is not None:
return expand(prefix_path)
else:
from ..core.prefix_data import PrefixData
return str(PrefixData.from_name(prefix_name).prefix_path)
@deprecated(
"25.9", "26.3", addendum="Use conda.gateways.disk.create.first_writable_envs_dir"
)
def _first_writable_envs_dir() -> PathType:
from conda.gateways.disk.create import first_writable_envs_dir
return first_writable_envs_dir()
@deprecated(
"25.9",
"26.3",
addendum="Use `conda.base.context.context.plugins.raw_data` instead.",
)
def get_plugin_config_data(
data: dict[Path, dict[str, RawParameter]],
) -> dict[Path, dict[str, RawParameter]]:
from ..plugins.config import PluginConfig
return PluginConfig(data).raw_data
@deprecated(
"25.9",
"26.3",
addendum="Use `conda.plugins.config.PluginConfig.add_plugin_setting` instead.",
)
def add_plugin_setting(
name: str,
parameter: Parameter,
aliases: tuple[str, ...] = (),
) -> None:
from ..plugins.config import PluginConfig
return PluginConfig.add_plugin_setting(name, parameter, aliases)
@deprecated(
"25.9",
"26.3",
addendum="Use `conda.plugins.config.PluginConfig.remove_all_plugin_settings` instead.",
)
def remove_all_plugin_settings() -> None:
from ..plugins.config import PluginConfig
return PluginConfig.remove_all_plugin_settings()
try:
context = Context((), None)
except ConfigurationLoadError as e: # pragma: no cover
print(repr(e), file=sys.stderr)
# Exception handler isn't loaded so use sys.exit
sys.exit(1)
|
ContextStack
|
python
|
kamyu104__LeetCode-Solutions
|
Python/maximum-strong-pair-xor-i.py
|
{
"start": 120,
"end": 1985
}
|
class ____(object):
def maximumStrongPairXor(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
class Trie(object):
def __init__(self, bit_length):
self.__nodes = []
self.__cnts = []
self.__new_node()
self.__bit_length = bit_length
def __new_node(self):
self.__nodes.append([-1]*2)
self.__cnts.append(0)
return len(self.__nodes)-1
def update(self, num, d):
curr = 0
for i in reversed(xrange(self.__bit_length)):
x = num>>i
if self.__nodes[curr][x&1] == -1:
self.__nodes[curr][x&1] = self.__new_node()
curr = self.__nodes[curr][x&1]
self.__cnts[curr] += d
def query(self, num):
result = curr = 0
for i in reversed(xrange(self.__bit_length)):
result <<= 1
x = num>>i
if self.__nodes[curr][1^(x&1)] != -1 and self.__cnts[self.__nodes[curr][1^(x&1)]]:
curr = self.__nodes[curr][1^(x&1)]
result |= 1
else:
curr = self.__nodes[curr][x&1]
return result
nums.sort()
trie = Trie(nums[-1].bit_length())
result = j = 0
for i, num in enumerate(nums):
trie.update(num, +1)
while not (nums[i] <= 2*nums[j]) :
trie.update(nums[j], -1)
j += 1
result = max(result, trie.query(num))
return result
# Time: O(nlogr), r = max(nums)
# Space: O(t)
# bit manipulation, greedy, trie, dp
|
Solution
|
python
|
google__jax
|
jax/experimental/jax2tf/tests/flax_models/transformer_nlp_seq.py
|
{
"start": 4609,
"end": 5790
}
|
class ____(nn.Module):
"""Transformer encoder layer.
Attributes:
config: TransformerConfig dataclass containing hyperparameters.
"""
config: TransformerConfig
@nn.compact
def __call__(self, inputs, deterministic):
"""Applies Encoder1DBlock module.
Args:
inputs: input data.
deterministic: if true dropout is applied otherwise not.
Returns:
output after transformer encoder block.
"""
config = self.config
# Attention block.
assert inputs.ndim == 3
x = nn.LayerNorm(dtype=config.dtype)(inputs)
x = nn.SelfAttention(
num_heads=config.num_heads,
dtype=config.dtype,
qkv_features=config.qkv_dim,
kernel_init=config.kernel_init,
bias_init=config.bias_init,
use_bias=False,
broadcast_dropout=False,
dropout_rate=config.attention_dropout_rate,
deterministic=deterministic)(
x)
x = nn.Dropout(rate=config.dropout_rate)(x, deterministic=deterministic)
x = x + inputs
# MLP block.
y = nn.LayerNorm(dtype=config.dtype)(x)
y = MlpBlock(config=config)(y, deterministic=deterministic)
return x + y
|
Encoder1DBlock
|
python
|
numba__numba
|
numba/tests/test_listobject.py
|
{
"start": 21503,
"end": 24373
}
|
class ____(MemoryLeakMixin, TestCase):
"""Test list delitem.
"""
def test_list_singleton_delitem_index(self):
@njit
def foo():
l = listobject.new_list(int32)
l.append(0)
del l[0]
return len(l)
self.assertEqual(foo(), 0)
def test_list_singleton_delitem_slice_defaults(self):
@njit
def foo():
l = listobject.new_list(int32)
l.append(0)
del l[:]
return len(l)
self.assertEqual(foo(), 0)
def test_list_singleton_delitem_slice_start(self):
@njit
def foo():
l = listobject.new_list(int32)
l.append(0)
del l[0:]
return len(l)
self.assertEqual(foo(), 0)
def test_list_singleton_delitem_slice_stop(self):
@njit
def foo():
l = listobject.new_list(int32)
l.append(0)
del l[:1]
return len(l)
self.assertEqual(foo(), 0)
def test_list_singleton_delitem_slice_start_stop(self):
@njit
def foo():
l = listobject.new_list(int32)
l.append(0)
del l[0:1]
return len(l)
self.assertEqual(foo(), 0)
def test_list_singleton_delitem_slice_start_step(self):
@njit
def foo():
l = listobject.new_list(int32)
l.append(0)
del l[0::1]
return len(l)
self.assertEqual(foo(), 0)
def test_list_singleton_delitem_slice_start_stop_step(self):
@njit
def foo():
l = listobject.new_list(int32)
l.append(0)
del l[0:1:1]
return len(l)
self.assertEqual(foo(), 0)
def test_list_multiple_delitem(self):
@njit
def foo():
l = listobject.new_list(int32)
for j in (10, 11, 12):
l.append(j)
del l[0]
return len(l), l[0], l[1]
self.assertEqual(foo(), (2, 11, 12))
def test_list_multiple_delitem_slice(self):
@njit
def foo():
l = listobject.new_list(int32)
for j in (10, 11, 12):
l.append(j)
del l[:]
return len(l)
self.assertEqual(foo(), 0)
def test_list_multiple_delitem_off_by_one(self):
# this was exposing a nasty off-by-one error, leaving it in to detect
# and regressions
@njit
def foo():
l = listobject.new_list(int32)
for j in range(10, 20):
l.append(j)
k = listobject.new_list(int32)
for j in range(10, 20):
k.append(j)
# should be a no-op
del l[-9:-20]
return k == l
self.assertTrue(foo())
|
TestListObjectDelitem
|
python
|
getlogbook__logbook
|
src/logbook/ticketing.py
|
{
"start": 15881,
"end": 16673
}
|
class ____(Handler, HashingHandlerMixin):
"""Baseclass for ticketing handlers. This can be used to interface
ticketing systems that do not necessarily provide an interface that
would be compatible with the :class:`BackendBase` interface.
"""
def __init__(self, hash_salt, level=NOTSET, filter=None, bubble=False):
Handler.__init__(self, level, filter, bubble)
self.hash_salt = hash_salt
def hash_record_raw(self, record):
"""Returns the unique hash of a record."""
hash = HashingHandlerMixin.hash_record_raw(self, record)
if self.hash_salt is not None:
hash_salt = self.hash_salt
hash_salt = hash_salt.encode("utf-8")
hash.update(b"\x00" + hash_salt)
return hash
|
TicketingBaseHandler
|
python
|
walkccc__LeetCode
|
solutions/1404. Number of Steps to Reduce a Number in Binary Representation to One/1404.py
|
{
"start": 0,
"end": 523
}
|
class ____:
def numSteps(self, s: str) -> int:
ans = 0
chars = list(s)
# All the trailing 0s can be popped by 1 step.
while chars[-1] == '0':
chars.pop()
ans += 1
if ''.join(chars) == '1':
return ans
# `s` is now odd, so add 1 to `s` and cost 1 step.
# All the 1s will become 0s and can be popped by 1 step.
# All the 0s will become 1s and can be popped by 2 steps (adding 1 then
# dividing by 2).
return ans + 1 + sum(1 if c == '1' else 2 for c in chars)
|
Solution
|
python
|
spack__spack
|
var/spack/test_repos/spack_repo/builtin_mock/packages/py_test_callback/package.py
|
{
"start": 998,
"end": 1353
}
|
class ____(BuilderWithDefaults):
phases = ("install",)
#: Callback names for install-time test
install_time_test_callbacks = ["test_callback"]
def install(self, pkg, spec, prefix):
pkg.install(spec, prefix)
run_after("install")(execute_install_time_tests)
def test_callback(self):
self.pkg.test_callback()
|
MyBuilder
|
python
|
xlwings__xlwings
|
xlwings/_win32patch.py
|
{
"start": 1803,
"end": 3353
}
|
class ____:
def __init__(self, oobj=None):
if oobj is None:
oobj = pythoncom.new(self.CLSID)
self.__dict__["_dispobj_"] = self.default_interface(oobj)
def __repr__(self):
return "<win32com.gen_py.%s.%s>" % (__doc__, self.__class__.__name__)
def __getattr__(self, attr):
d = self.__dict__["_dispobj_"]
if d is not None:
return getattr(d, attr)
raise AttributeError(attr)
def __setattr__(self, attr, value):
if attr in self.__dict__:
self.__dict__[attr] = value
return
try:
d = self.__dict__["_dispobj_"]
if d is not None:
d.__setattr__(attr, value)
return
except AttributeError:
pass
self.__dict__[attr] = value
# Special methods don't use __getattr__ etc, so explicitly delegate here.
# Some wrapped objects might not have them, but that's OK - the attribute
# error can just bubble up.
def __call__(self, *args, **kwargs):
return self.__dict__["_dispobj_"].__call__(*args, **kwargs)
def __str__(self, *args):
return self.__dict__["_dispobj_"].__str__(*args)
def __int__(self, *args):
return self.__dict__["_dispobj_"].__int__(*args)
def __iter__(self):
return self.__dict__["_dispobj_"].__iter__()
def __len__(self):
return self.__dict__["_dispobj_"].__len__()
def __nonzero__(self):
return self.__dict__["_dispobj_"].__nonzero__()
|
CoClassBaseClass
|
python
|
dask__dask
|
dask/dataframe/dask_expr/_expr.py
|
{
"start": 87060,
"end": 89380
}
|
class ____(Elemwise):
_parameters = ["left", "right"]
@functools.cached_property
def _broadcastable(self):
deps = self.dependencies()
return (
1 in {dep.npartitions for dep in deps}
and len({dep.ndim for dep in deps}) == 2
)
def __str__(self):
return f"{self.left} {self._operator_repr} {self.right}"
def _simplify_up(self, parent, dependents):
if isinstance(parent, Projection):
changed = False
columns = determine_column_projection(self, parent, dependents)
columns = _convert_to_list(columns)
columns = [col for col in self.columns if col in columns]
if (
isinstance(self.left, Expr)
and self.left.ndim > 1
and self.left.columns != columns
):
left = self.left[columns] # TODO: filter just the correct columns
changed = True
else:
left = self.left
if (
isinstance(self.right, Expr)
and self.right.ndim > 1
and self.right.columns != columns
):
right = self.right[columns] # TODO: filter just the correct columns
changed = True
else:
right = self.right
if not changed:
return
return type(parent)(type(self)(left, right), *parent.operands[1:])
def _node_label_args(self):
return [self.left, self.right]
def _divisions(self):
if is_index_like(self._meta):
left_divisions = (
pd.Series(self.left.divisions)
if isinstance(self.left, Expr)
else self.left
)
right_divisions = (
pd.Series(self.right.divisions)
if isinstance(self.right, Expr)
else self.right
)
return tuple(self.operation(left_divisions, right_divisions))
elif self._broadcastable and len(self.dependencies()) == 2:
if self.left.ndim < self.right.ndim:
return self.right.divisions
else:
return self.left.divisions
else:
return super()._divisions()
|
Binop
|
python
|
ansible__ansible
|
test/lib/ansible_test/_internal/cli/environments.py
|
{
"start": 1248,
"end": 21511
}
|
class ____(enum.Enum):
"""Type of provisioning to use for the controller."""
NO_DELEGATION = enum.auto()
ORIGIN = enum.auto()
DELEGATED = enum.auto()
def add_environments(
parser: argparse.ArgumentParser,
completer: CompositeActionCompletionFinder,
controller_mode: ControllerMode,
target_mode: TargetMode,
) -> None:
"""Add arguments for the environments used to run ansible-test and commands it invokes."""
no_environment = controller_mode == ControllerMode.NO_DELEGATION and target_mode == TargetMode.NO_TARGETS
parser.set_defaults(no_environment=no_environment)
if no_environment:
return
parser.set_defaults(target_mode=target_mode)
add_global_options(parser, controller_mode)
add_legacy_environment_options(parser, controller_mode, target_mode)
action_types = add_composite_environment_options(parser, completer, controller_mode, target_mode)
sections = [f'{heading}\n{content}'
for action_type, documentation_state in CompositeAction.documentation_state.items() if action_type in action_types
for heading, content in documentation_state.sections.items()]
if not get_ci_provider().supports_core_ci_auth():
sections.append('Remote provisioning options have been hidden since no Ansible Core CI API key was found.')
sections.append(get_epilog(completer))
parser.formatter_class = argparse.RawDescriptionHelpFormatter
parser.epilog = '\n\n'.join(sections)
def add_global_options(
parser: argparse.ArgumentParser,
controller_mode: ControllerMode,
):
"""Add global options for controlling the test environment that work with both the legacy and composite options."""
global_parser = t.cast(argparse.ArgumentParser, parser.add_argument_group(title='global environment arguments'))
global_parser.add_argument(
'--containers',
metavar='JSON',
help=argparse.SUPPRESS,
)
global_parser.add_argument(
'--pypi-proxy',
action='store_true',
help=argparse.SUPPRESS,
)
global_parser.add_argument(
'--pypi-endpoint',
metavar='URI',
help=argparse.SUPPRESS,
)
global_parser.add_argument(
'--requirements',
action='store_true',
default=False,
help='install command requirements',
)
global_parser.add_argument(
'--host-path',
help=argparse.SUPPRESS, # for internal use only by ansible-test
)
global_parser.add_argument(
'--metadata',
default=os.environ.get('ANSIBLE_TEST_METADATA_PATH'),
help=argparse.SUPPRESS, # for internal use only by ansible-test
)
add_global_remote(global_parser, controller_mode)
add_global_docker(global_parser, controller_mode)
add_global_debug(global_parser)
def add_composite_environment_options(
parser: argparse.ArgumentParser,
completer: CompositeActionCompletionFinder,
controller_mode: ControllerMode,
target_mode: TargetMode,
) -> list[t.Type[CompositeAction]]:
"""Add composite options for controlling the test environment."""
composite_parser = t.cast(argparse.ArgumentParser, parser.add_argument_group(
title='composite environment arguments (mutually exclusive with "environment arguments" above)'))
action_types: list[t.Type[CompositeAction]] = []
def register_action_type(action_type: t.Type[CompositeAction]) -> t.Type[CompositeAction]:
"""Register the provided composite action type and return it."""
action_types.append(action_type)
return action_type
if controller_mode == ControllerMode.NO_DELEGATION:
composite_parser.set_defaults(controller=None)
else:
register_completer(composite_parser.add_argument(
'--controller',
metavar='OPT',
action=register_action_type(DelegatedControllerAction if controller_mode == ControllerMode.DELEGATED else OriginControllerAction),
help='configuration for the controller',
), completer.completer)
if target_mode == TargetMode.NO_TARGETS:
composite_parser.set_defaults(targets=[])
elif target_mode == TargetMode.SHELL:
group = composite_parser.add_mutually_exclusive_group()
register_completer(group.add_argument(
'--target-posix',
metavar='OPT',
action=register_action_type(PosixSshTargetAction),
help='configuration for the target',
), completer.completer)
suppress = None if get_ci_provider().supports_core_ci_auth() else argparse.SUPPRESS
register_completer(group.add_argument(
'--target-windows',
metavar='OPT',
action=WindowsSshTargetAction if suppress else register_action_type(WindowsSshTargetAction),
help=suppress or 'configuration for the target',
), completer.completer)
register_completer(group.add_argument(
'--target-network',
metavar='OPT',
action=NetworkSshTargetAction if suppress else register_action_type(NetworkSshTargetAction),
help=suppress or 'configuration for the target',
), completer.completer)
else:
if target_mode.multiple_pythons:
target_option = '--target-python'
target_help = 'configuration for the target python interpreter(s)'
elif target_mode == TargetMode.POSIX_INTEGRATION:
target_option = '--target'
target_help = 'configuration for the target'
else:
target_option = '--target'
target_help = 'configuration for the target(s)'
target_actions = {
TargetMode.POSIX_INTEGRATION: PosixTargetAction,
TargetMode.WINDOWS_INTEGRATION: WindowsTargetAction,
TargetMode.NETWORK_INTEGRATION: NetworkTargetAction,
TargetMode.SANITY: SanityPythonTargetAction,
TargetMode.UNITS: UnitsPythonTargetAction,
}
target_action = target_actions[target_mode]
register_completer(composite_parser.add_argument(
target_option,
metavar='OPT',
action=register_action_type(target_action),
help=target_help,
), completer.completer)
return action_types
def add_legacy_environment_options(
parser: argparse.ArgumentParser,
controller_mode: ControllerMode,
target_mode: TargetMode,
):
"""Add legacy options for controlling the test environment."""
environment: argparse.ArgumentParser = parser.add_argument_group( # type: ignore[assignment] # real type private
title='environment arguments (mutually exclusive with "composite environment arguments" below)',
)
add_environments_python(environment, target_mode)
add_environments_host(environment, controller_mode, target_mode)
def add_environments_python(
environments_parser: argparse.ArgumentParser,
target_mode: TargetMode,
) -> None:
"""Add environment arguments to control the Python version(s) used."""
python_versions: tuple[str, ...]
if target_mode.has_python:
python_versions = SUPPORTED_PYTHON_VERSIONS
else:
python_versions = CONTROLLER_PYTHON_VERSIONS
environments_parser.add_argument(
'--python',
metavar='X.Y',
choices=python_versions + ('default',),
help='python version: %s' % ', '.join(python_versions),
)
environments_parser.add_argument(
'--python-interpreter',
metavar='PATH',
help='path to the python interpreter',
)
def add_environments_host(
environments_parser: argparse.ArgumentParser,
controller_mode: ControllerMode,
target_mode: TargetMode,
) -> None:
"""Add environment arguments for the given host and argument modes."""
environments_exclusive_group: argparse.ArgumentParser = environments_parser.add_mutually_exclusive_group() # type: ignore[assignment] # real type private
add_environment_local(environments_exclusive_group)
add_environment_venv(environments_exclusive_group, environments_parser)
if controller_mode == ControllerMode.DELEGATED:
add_environment_remote(environments_exclusive_group, environments_parser, target_mode)
add_environment_docker(environments_exclusive_group, environments_parser, target_mode)
if target_mode == TargetMode.WINDOWS_INTEGRATION:
add_environment_windows(environments_parser)
if target_mode == TargetMode.NETWORK_INTEGRATION:
add_environment_network(environments_parser)
def add_environment_network(
environments_parser: argparse.ArgumentParser,
) -> None:
"""Add environment arguments for running on a windows host."""
register_completer(environments_parser.add_argument(
'--platform',
metavar='PLATFORM',
action='append',
help='network platform/version',
), complete_network_platform)
register_completer(environments_parser.add_argument(
'--platform-collection',
type=key_value_type,
metavar='PLATFORM=COLLECTION',
action='append',
help='collection used to test platform',
), complete_network_platform_collection)
register_completer(environments_parser.add_argument(
'--platform-connection',
type=key_value_type,
metavar='PLATFORM=CONNECTION',
action='append',
help='connection used to test platform',
), complete_network_platform_connection)
environments_parser.add_argument(
'--inventory',
metavar='PATH',
help='path to inventory used for tests',
)
def add_environment_windows(
environments_parser: argparse.ArgumentParser,
) -> None:
"""Add environment arguments for running on a windows host."""
register_completer(environments_parser.add_argument(
'--windows',
metavar='VERSION',
action='append',
help='windows version',
), complete_windows)
environments_parser.add_argument(
'--inventory',
metavar='PATH',
help='path to inventory used for tests',
)
def add_environment_local(
exclusive_parser: argparse.ArgumentParser,
) -> None:
"""Add environment arguments for running on the local (origin) host."""
exclusive_parser.add_argument(
'--local',
action='store_true',
help='run from the local environment',
)
def add_environment_venv(
exclusive_parser: argparse.ArgumentParser,
environments_parser: argparse.ArgumentParser,
) -> None:
"""Add environment arguments for running in ansible-test managed virtual environments."""
exclusive_parser.add_argument(
'--venv',
action='store_true',
help='run from a virtual environment',
)
environments_parser.add_argument(
'--venv-system-site-packages',
action='store_true',
help='enable system site packages',
)
def add_global_docker(
parser: argparse.ArgumentParser,
controller_mode: ControllerMode,
) -> None:
"""Add global options for Docker."""
if controller_mode != ControllerMode.DELEGATED:
parser.set_defaults(
docker_network=None,
docker_terminate=None,
prime_containers=False,
dev_systemd_debug=False,
dev_probe_cgroups=None,
)
return
parser.add_argument(
'--docker-network',
metavar='NET',
help='run using the specified network',
)
parser.add_argument(
'--docker-terminate',
metavar='T',
default=TerminateMode.ALWAYS,
type=TerminateMode,
action=EnumAction,
help='terminate the container: %(choices)s (default: %(default)s)',
)
parser.add_argument(
'--prime-containers',
action='store_true',
help='download containers without running tests',
)
# Docker support isn't related to ansible-core-ci.
# However, ansible-core-ci support is a reasonable indicator that the user may need the `--dev-*` options.
suppress = None if get_ci_provider().supports_core_ci_auth() else argparse.SUPPRESS
parser.add_argument(
'--dev-systemd-debug',
action='store_true',
help=suppress or 'enable systemd debugging in containers',
)
parser.add_argument(
'--dev-probe-cgroups',
metavar='DIR',
nargs='?',
const='',
help=suppress or 'probe container cgroups, with optional log dir',
)
def add_global_debug(
parser: argparse.ArgumentParser,
) -> None:
"""Add global debug options."""
# These `--dev-*` options are experimental features that may change or be removed without regard for backward compatibility.
# Additionally, they're features that are not likely to be used by most users.
# To avoid confusion, they're hidden from `--help` and tab completion by default, except for ansible-core-ci users.
suppress = None if get_ci_provider().supports_core_ci_auth() else argparse.SUPPRESS
parser.add_argument(
'--dev-debug-on-demand',
action='store_true',
default=False,
help=suppress or 'enable remote debugging only under a debugger',
)
parser.add_argument(
'--dev-debug-cli',
action='store_true',
default=False,
help=suppress or 'enable remote debugging for the Ansible CLI',
)
parser.add_argument(
'--dev-debug-ansiballz',
action='store_true',
default=False,
help=suppress or 'enable remote debugging for AnsiballZ modules',
)
parser.add_argument(
'--dev-debug-self',
action='store_true',
default=False,
help=suppress or 'enable remote debugging for ansible-test',
)
def add_environment_docker(
exclusive_parser: argparse.ArgumentParser,
environments_parser: argparse.ArgumentParser,
target_mode: TargetMode,
) -> None:
"""Add environment arguments for running in docker containers."""
if target_mode in (TargetMode.POSIX_INTEGRATION, TargetMode.SHELL):
docker_images = sorted(filter_completion(docker_completion()))
else:
docker_images = sorted(filter_completion(docker_completion(), controller_only=True))
register_completer(exclusive_parser.add_argument(
'--docker',
metavar='IMAGE',
nargs='?',
const='default',
help='run from a docker container',
), functools.partial(complete_choices, docker_images))
environments_parser.add_argument(
'--docker-privileged',
action='store_true',
help='run docker container in privileged mode',
)
environments_parser.add_argument(
'--docker-seccomp',
metavar='SC',
choices=SECCOMP_CHOICES,
help='set seccomp confinement for the test container: %(choices)s',
)
environments_parser.add_argument(
'--docker-memory',
metavar='INT',
type=int,
help='memory limit for docker in bytes',
)
def add_global_remote(
parser: argparse.ArgumentParser,
controller_mode: ControllerMode,
) -> None:
"""Add global options for remote instances."""
if controller_mode != ControllerMode.DELEGATED:
parser.set_defaults(
remote_stage=None,
remote_endpoint=None,
remote_terminate=None,
)
return
suppress = None if get_ci_provider().supports_core_ci_auth() else argparse.SUPPRESS
register_completer(parser.add_argument(
'--remote-stage',
metavar='STAGE',
default='prod',
help=suppress or 'remote stage to use: prod, dev',
), complete_remote_stage)
parser.add_argument(
'--remote-endpoint',
metavar='EP',
help=suppress or 'remote provisioning endpoint to use',
)
parser.add_argument(
'--remote-terminate',
metavar='T',
default=TerminateMode.NEVER,
type=TerminateMode,
action=EnumAction,
help=suppress or 'terminate the remote instance: %(choices)s (default: %(default)s)',
)
def add_environment_remote(
exclusive_parser: argparse.ArgumentParser,
environments_parser: argparse.ArgumentParser,
target_mode: TargetMode,
) -> None:
"""Add environment arguments for running in ansible-core-ci provisioned remote virtual machines."""
if target_mode == TargetMode.POSIX_INTEGRATION:
remote_platforms = get_remote_platform_choices()
elif target_mode == TargetMode.SHELL:
remote_platforms = sorted(set(get_remote_platform_choices()) | set(get_windows_platform_choices()))
else:
remote_platforms = get_remote_platform_choices(True)
suppress = None if get_ci_provider().supports_core_ci_auth() else argparse.SUPPRESS
register_completer(exclusive_parser.add_argument(
'--remote',
metavar='NAME',
help=suppress or 'run from a remote instance',
), functools.partial(complete_choices, remote_platforms))
environments_parser.add_argument(
'--remote-provider',
metavar='PR',
choices=REMOTE_PROVIDERS,
help=suppress or 'remote provider to use: %(choices)s',
)
environments_parser.add_argument(
'--remote-arch',
metavar='ARCH',
choices=REMOTE_ARCHITECTURES,
help=suppress or 'remote arch to use: %(choices)s',
)
def complete_remote_stage(prefix: str, **_) -> list[str]:
"""Return a list of supported stages matching the given prefix."""
return [stage for stage in ('prod', 'dev') if stage.startswith(prefix)]
def complete_windows(prefix: str, parsed_args: argparse.Namespace, **_) -> list[str]:
"""Return a list of supported Windows versions matching the given prefix, excluding versions already parsed from the command line."""
return [i for i in get_windows_version_choices() if i.startswith(prefix) and (not parsed_args.windows or i not in parsed_args.windows)]
def complete_network_platform(prefix: str, parsed_args: argparse.Namespace, **_) -> list[str]:
"""Return a list of supported network platforms matching the given prefix, excluding platforms already parsed from the command line."""
images = sorted(filter_completion(network_completion()))
return [i for i in images if i.startswith(prefix) and (not parsed_args.platform or i not in parsed_args.platform)]
def complete_network_platform_collection(prefix: str, parsed_args: argparse.Namespace, **_) -> list[str]:
"""Return a list of supported network platforms matching the given prefix, excluding collection platforms already parsed from the command line."""
left = prefix.split('=')[0]
images = sorted(set(image.platform for image in filter_completion(network_completion()).values()))
return [i + '=' for i in images if i.startswith(left) and (not parsed_args.platform_collection or i not in [x[0] for x in parsed_args.platform_collection])]
def complete_network_platform_connection(prefix: str, parsed_args: argparse.Namespace, **_) -> list[str]:
"""Return a list of supported network platforms matching the given prefix, excluding connection platforms already parsed from the command line."""
left = prefix.split('=')[0]
images = sorted(set(image.platform for image in filter_completion(network_completion()).values()))
return [i + '=' for i in images if i.startswith(left) and (not parsed_args.platform_connection or i not in [x[0] for x in parsed_args.platform_connection])]
def get_remote_platform_choices(controller: bool = False) -> list[str]:
"""Return a list of supported remote platforms matching the given prefix."""
return sorted(filter_completion(remote_completion(), controller_only=controller))
def get_windows_platform_choices() -> list[str]:
"""Return a list of supported Windows versions matching the given prefix."""
return sorted(f'windows/{windows.version}' for windows in filter_completion(windows_completion()).values())
def get_windows_version_choices() -> list[str]:
"""Return a list of supported Windows versions."""
return sorted(windows.version for windows in filter_completion(windows_completion()).values())
|
ControllerMode
|
python
|
google__pytype
|
pytype/matcher.py
|
{
"start": 2312,
"end": 2651
}
|
class ____:
"""An expected type/actual value mismatch."""
view: _ViewType
expected: error_types.BadType
actual: cfg.Variable
@property
def actual_binding(self):
return self.view[self.actual]
@property
def error_details(self):
return self.expected.error_details
@dataclasses.dataclass(eq=True, frozen=True)
|
BadMatch
|
python
|
sqlalchemy__sqlalchemy
|
test/orm/test_selectin_relations.py
|
{
"start": 91741,
"end": 94303
}
|
class ____(
fixtures.DeclarativeMappedTest, testing.AssertsCompiledSQL
):
__dialect__ = "default"
@classmethod
def setup_classes(cls):
Base = cls.DeclarativeBasic
class Foo(ComparableEntity, Base):
__tablename__ = "foo"
id = Column(Integer, primary_key=True)
type = Column(String(50))
foo_id = Column(Integer, ForeignKey("foo.id"))
foo = relationship(
lambda: Foo, foreign_keys=foo_id, remote_side=id
)
__mapper_args__ = {
"polymorphic_on": type,
"polymorphic_identity": "foo",
}
class Bar(Foo):
__mapper_args__ = {"polymorphic_identity": "bar"}
@classmethod
def insert_data(cls, connection):
Foo, Bar = cls.classes("Foo", "Bar")
session = Session(connection)
target = Bar(id=1)
b1 = Bar(id=2, foo=Foo(id=3, foo=target))
session.add(b1)
session.commit()
def test_twolevel_selectin_w_polymorphic(self):
Foo, Bar = self.classes("Foo", "Bar")
for count in range(1):
r = with_polymorphic(Foo, "*", aliased=True)
attr1 = Foo.foo.of_type(r)
attr2 = r.foo
s = fixture_session()
from sqlalchemy.orm import Load
opt1 = selectinload(attr1).selectinload(attr2) # noqa
opt2 = Load(Foo).selectinload(attr1).selectinload(attr2) # noqa
q = s.query(Foo).filter(Foo.id == 2).options(opt2)
# q.all()
# return
results = self.assert_sql_execution(
testing.db,
q.all,
CompiledSQL(
"SELECT foo.id AS foo_id_1, foo.type AS foo_type, "
"foo.foo_id AS foo_foo_id FROM foo WHERE foo.id = :id_1",
[{"id_1": 2}],
),
CompiledSQL(
"SELECT foo_1.id, foo_1.type, foo_1.foo_id "
"FROM foo AS foo_1 "
"WHERE foo_1.id IN (__[POSTCOMPILE_primary_keys])",
{"primary_keys": [3]},
),
CompiledSQL(
"SELECT foo.id, foo.type, "
"foo.foo_id FROM foo "
"WHERE foo.id IN (__[POSTCOMPILE_primary_keys])",
{"primary_keys": [1]},
),
)
eq_(results, [Bar(id=2, foo=Foo(id=3, foo=Bar(id=1)))])
|
SelfRefInheritanceAliasedTest
|
python
|
fluentpython__example-code-2e
|
10-dp-1class-func/pytypes/classic_strategy.py
|
{
"start": 1133,
"end": 1213
}
|
class ____(typing.NamedTuple):
name: str
fidelity: int
@typelogged
|
Customer
|
python
|
getsentry__sentry
|
src/sentry/monitors/processing_errors/errors.py
|
{
"start": 1344,
"end": 1521
}
|
class ____(TypedDict):
"""
The checkin was already completed and we attempted to modify it
"""
type: Literal[ProcessingErrorType.CHECKIN_FINISHED]
|
CheckinFinished
|
python
|
airbytehq__airbyte
|
airbyte-integrations/connectors/source-sftp-bulk/source_sftp_bulk/spec.py
|
{
"start": 643,
"end": 1006
}
|
class ____(BaseModel):
class Config(OneOfOptionConfig):
title = "Authenticate via Private Key"
discriminator = "auth_type"
auth_type: Literal["private_key"] = Field("private_key", const=True)
private_key: str = Field(title="Private key", description="The Private key", multiline=True, order=4, airbyte_secret=True)
|
PrivateKeyCredentials
|
python
|
optuna__optuna
|
tests/storages_tests/journal_tests/test_journal.py
|
{
"start": 1342,
"end": 10865
}
|
class ____:
def __init__(self, storage_type: str, grace_period: int | None) -> None:
self.storage_type = storage_type
self.tempfile: IO[Any] | None = None
self.grace_period = grace_period
def __enter__(self) -> optuna.storages.journal.BaseJournalBackend:
if self.storage_type.startswith("file"):
self.tempfile = NamedTemporaryFilePool().tempfile()
lock: BaseJournalFileLock
if self.storage_type == "file_with_open_lock":
lock = optuna.storages.journal.JournalFileOpenLock(
self.tempfile.name, self.grace_period
)
elif self.storage_type == "file_with_link_lock":
lock = optuna.storages.journal.JournalFileOpenLock(
self.tempfile.name, self.grace_period
)
else:
raise Exception("Must not reach here")
return optuna.storages.journal.JournalFileBackend(self.tempfile.name, lock)
elif self.storage_type.startswith("redis"):
assert self.grace_period is None
use_cluster = self.storage_type == "redis_with_use_cluster"
journal_redis_storage = optuna.storages.journal.JournalRedisBackend(
"redis://localhost", use_cluster
)
journal_redis_storage._redis = FakeStrictRedis()
return journal_redis_storage
else:
raise RuntimeError(f"Unknown log storage type: {self.storage_type}")
def __exit__(
self, exc_type: type[BaseException], exc_val: BaseException, exc_tb: TracebackType
) -> None:
if self.tempfile:
self.tempfile.close()
@pytest.mark.parametrize("log_storage_type,grace_period", LOG_STORAGE_WITH_PARAMETER)
def test_concurrent_append_logs_for_multi_processes(
log_storage_type: str, grace_period: int | None
) -> None:
if log_storage_type.startswith("redis"):
pytest.skip("The `fakeredis` does not support multi process environments.")
num_executors = 10
num_records = 200
record = {"key": "value"}
with JournalLogStorageSupplier(log_storage_type, grace_period) as storage:
with ProcessPoolExecutor(num_executors) as pool:
pool.map(storage.append_logs, [[record] for _ in range(num_records)], timeout=20)
assert len(list(storage.read_logs(0))) == num_records
assert all(record == r for r in storage.read_logs(0))
@pytest.mark.parametrize("log_storage_type,grace_period", LOG_STORAGE_WITH_PARAMETER)
def test_concurrent_append_logs_for_multi_threads(
log_storage_type: str, grace_period: int | None
) -> None:
num_executors = 10
num_records = 200
record = {"key": "value"}
with JournalLogStorageSupplier(log_storage_type, grace_period) as storage:
with ThreadPoolExecutor(num_executors) as pool:
pool.map(storage.append_logs, [[record] for _ in range(num_records)], timeout=20)
assert len(list(storage.read_logs(0))) == num_records
assert all(record == r for r in storage.read_logs(0))
def pop_waiting_trial(file_path: str, study_name: str) -> int | None:
file_storage = optuna.storages.journal.JournalFileBackend(file_path)
storage = optuna.storages.JournalStorage(file_storage)
study = optuna.load_study(storage=storage, study_name=study_name)
return study._pop_waiting_trial_id()
def test_pop_waiting_trial_multiprocess_safe() -> None:
with NamedTemporaryFilePool() as file:
file_storage = optuna.storages.journal.JournalFileBackend(file.name)
storage = optuna.storages.JournalStorage(file_storage)
study = optuna.create_study(storage=storage)
num_enqueued = 10
for i in range(num_enqueued):
study.enqueue_trial({"i": i})
trial_id_set = set()
with ProcessPoolExecutor(10) as pool:
futures = []
for i in range(num_enqueued):
future = pool.submit(pop_waiting_trial, file.name, study.study_name)
futures.append(future)
for future in as_completed(futures):
trial_id = future.result()
if trial_id is not None:
trial_id_set.add(trial_id)
assert len(trial_id_set) == num_enqueued
@pytest.mark.parametrize("storage_mode", JOURNAL_STORAGE_SUPPORTING_SNAPSHOT)
def test_save_snapshot_per_each_trial(storage_mode: str) -> None:
def objective(trial: optuna.Trial) -> float:
return trial.suggest_float("x", 0, 10)
with StorageSupplier(storage_mode) as storage:
assert isinstance(storage, JournalStorage)
study = create_study(storage=storage)
journal_log_storage = storage._backend
assert isinstance(journal_log_storage, BaseJournalSnapshot)
assert journal_log_storage.load_snapshot() is None
with mock.patch("optuna.storages.journal._storage.SNAPSHOT_INTERVAL", 1, create=True):
study.optimize(objective, n_trials=2)
assert isinstance(journal_log_storage.load_snapshot(), bytes)
@pytest.mark.parametrize("storage_mode", JOURNAL_STORAGE_SUPPORTING_SNAPSHOT)
def test_save_snapshot_per_each_study(storage_mode: str) -> None:
with StorageSupplier(storage_mode) as storage:
assert isinstance(storage, JournalStorage)
journal_log_storage = storage._backend
assert isinstance(journal_log_storage, BaseJournalSnapshot)
assert journal_log_storage.load_snapshot() is None
with mock.patch("optuna.storages.journal._storage.SNAPSHOT_INTERVAL", 1, create=True):
for _ in range(2):
create_study(storage=storage)
assert isinstance(journal_log_storage.load_snapshot(), bytes)
@pytest.mark.parametrize("storage_mode", JOURNAL_STORAGE_SUPPORTING_SNAPSHOT)
def test_check_replay_result_restored_from_snapshot(storage_mode: str) -> None:
with StorageSupplier(storage_mode) as storage1:
with mock.patch("optuna.storages.journal._storage.SNAPSHOT_INTERVAL", 1, create=True):
for _ in range(2):
create_study(storage=storage1)
assert isinstance(storage1, JournalStorage)
storage2 = optuna.storages.JournalStorage(storage1._backend)
assert len(storage1.get_all_studies()) == len(storage2.get_all_studies())
assert storage1._replay_result.log_number_read == storage2._replay_result.log_number_read
@pytest.mark.parametrize("storage_mode", JOURNAL_STORAGE_SUPPORTING_SNAPSHOT)
def test_snapshot_given(storage_mode: str, capsys: _pytest.capture.CaptureFixture) -> None:
with StorageSupplier(storage_mode) as storage:
assert isinstance(storage, JournalStorage)
replay_result = JournalStorageReplayResult("")
# Bytes object which is a valid pickled object.
storage.restore_replay_result(pickle.dumps(replay_result))
assert replay_result.log_number_read == storage._replay_result.log_number_read
# We need to reconstruct our default handler to properly capture stderr.
optuna.logging._reset_library_root_logger()
optuna.logging.enable_default_handler()
optuna.logging.set_verbosity(optuna.logging.WARNING)
# Bytes object which cannot be unpickled is passed.
storage.restore_replay_result(b"hoge")
_, err = capsys.readouterr()
assert err
# Bytes object which can be pickled but is not `JournalStorageReplayResult`.
storage.restore_replay_result(pickle.dumps("hoge"))
_, err = capsys.readouterr()
assert err
def test_if_future_warning_occurs() -> None:
with NamedTemporaryFilePool() as file:
with pytest.warns(FutureWarning):
optuna.storages.JournalFileStorage(file.name)
with pytest.warns(FutureWarning):
optuna.storages.JournalRedisStorage("redis://localhost")
class _CustomJournalBackendInheritingDeprecatedClass(BaseJournalLogStorage):
def read_logs(self, log_number_from: int) -> list[dict[str, Any]]:
return [{"": ""}]
def append_logs(self, logs: list[dict[str, Any]]) -> None:
return
with pytest.warns(FutureWarning):
_ = _CustomJournalBackendInheritingDeprecatedClass()
@pytest.mark.parametrize(
"lock_obj", (DeprecatedJournalFileOpenLock, DeprecatedJournalFileSymlinkLock)
)
def test_future_warning_of_deprecated_file_lock_obj_paths(
tmp_path: pathlib.PurePath,
lock_obj: type[DeprecatedJournalFileOpenLock | DeprecatedJournalFileSymlinkLock],
) -> None:
with pytest.warns(FutureWarning):
lock_obj(filepath=str(tmp_path))
def test_raise_error_for_deprecated_class_import_from_journal() -> None:
# TODO(nabenabe0928): Remove this test once deprecated objects, e.g., JournalFileStorage,
# are removed.
with pytest.raises(AttributeError):
journal.JournalFileStorage # type: ignore[attr-defined]
with pytest.raises(AttributeError):
journal.JournalRedisStorage # type: ignore[attr-defined]
with pytest.raises(AttributeError):
journal.BaseJournalLogStorage # type: ignore[attr-defined]
@pytest.mark.parametrize("log_storage_type", ("file_with_open_lock", "file_with_link_lock"))
@pytest.mark.parametrize("grace_period", (0, -1))
def test_invalid_grace_period(log_storage_type: str, grace_period: int) -> None:
with pytest.raises(ValueError):
with JournalLogStorageSupplier(log_storage_type, grace_period):
pass
|
JournalLogStorageSupplier
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.