language stringclasses 1 value | repo stringclasses 346 values | path stringlengths 6 201 | class_span dict | source stringlengths 21 2.38M | target stringlengths 1 96 |
|---|---|---|---|---|---|
python | redis__redis-py | redis/commands/core.py | {
"start": 98969,
"end": 114337
} | class ____(CommandsProtocol):
"""
Redis commands for List data type.
see: https://redis.io/topics/data-types#lists
"""
def blpop(
self, keys: List, timeout: Optional[Number] = 0
) -> Union[Awaitable[list], list]:
"""
LPOP a value off of the first non-empty list
named in the ``keys`` list.
If none of the lists in ``keys`` has a value to LPOP, then block
for ``timeout`` seconds, or until a value gets pushed on to one
of the lists.
If timeout is 0, then block indefinitely.
For more information, see https://redis.io/commands/blpop
"""
if timeout is None:
timeout = 0
keys = list_or_args(keys, None)
keys.append(timeout)
return self.execute_command("BLPOP", *keys)
def brpop(
self, keys: List, timeout: Optional[Number] = 0
) -> Union[Awaitable[list], list]:
"""
RPOP a value off of the first non-empty list
named in the ``keys`` list.
If none of the lists in ``keys`` has a value to RPOP, then block
for ``timeout`` seconds, or until a value gets pushed on to one
of the lists.
If timeout is 0, then block indefinitely.
For more information, see https://redis.io/commands/brpop
"""
if timeout is None:
timeout = 0
keys = list_or_args(keys, None)
keys.append(timeout)
return self.execute_command("BRPOP", *keys)
def brpoplpush(
self, src: KeyT, dst: KeyT, timeout: Optional[Number] = 0
) -> Union[Awaitable[Optional[str]], Optional[str]]:
"""
Pop a value off the tail of ``src``, push it on the head of ``dst``
and then return it.
This command blocks until a value is in ``src`` or until ``timeout``
seconds elapse, whichever is first. A ``timeout`` value of 0 blocks
forever.
For more information, see https://redis.io/commands/brpoplpush
"""
if timeout is None:
timeout = 0
return self.execute_command("BRPOPLPUSH", src, dst, timeout)
def blmpop(
self,
timeout: float,
numkeys: int,
*args: str,
direction: str,
count: Optional[int] = 1,
) -> Optional[list]:
"""
Pop ``count`` values (default 1) from first non-empty in the list
of provided key names.
When all lists are empty this command blocks the connection until another
client pushes to it or until the timeout, timeout of 0 blocks indefinitely
For more information, see https://redis.io/commands/blmpop
"""
cmd_args = [timeout, numkeys, *args, direction, "COUNT", count]
return self.execute_command("BLMPOP", *cmd_args)
def lmpop(
self,
num_keys: int,
*args: str,
direction: str,
count: Optional[int] = 1,
) -> Union[Awaitable[list], list]:
"""
Pop ``count`` values (default 1) first non-empty list key from the list
of args provided key names.
For more information, see https://redis.io/commands/lmpop
"""
cmd_args = [num_keys] + list(args) + [direction]
if count != 1:
cmd_args.extend(["COUNT", count])
return self.execute_command("LMPOP", *cmd_args)
def lindex(
self, name: KeyT, index: int
) -> Union[Awaitable[Optional[str]], Optional[str]]:
"""
Return the item from list ``name`` at position ``index``
Negative indexes are supported and will return an item at the
end of the list
For more information, see https://redis.io/commands/lindex
"""
return self.execute_command("LINDEX", name, index, keys=[name])
def linsert(
self, name: KeyT, where: str, refvalue: str, value: str
) -> Union[Awaitable[int], int]:
"""
Insert ``value`` in list ``name`` either immediately before or after
[``where``] ``refvalue``
Returns the new length of the list on success or -1 if ``refvalue``
is not in the list.
For more information, see https://redis.io/commands/linsert
"""
return self.execute_command("LINSERT", name, where, refvalue, value)
def llen(self, name: KeyT) -> Union[Awaitable[int], int]:
"""
Return the length of the list ``name``
For more information, see https://redis.io/commands/llen
"""
return self.execute_command("LLEN", name, keys=[name])
def lpop(
self,
name: KeyT,
count: Optional[int] = None,
) -> Union[Awaitable[Union[str, List, None]], Union[str, List, None]]:
"""
Removes and returns the first elements of the list ``name``.
By default, the command pops a single element from the beginning of
the list. When provided with the optional ``count`` argument, the reply
will consist of up to count elements, depending on the list's length.
For more information, see https://redis.io/commands/lpop
"""
if count is not None:
return self.execute_command("LPOP", name, count)
else:
return self.execute_command("LPOP", name)
def lpush(self, name: KeyT, *values: FieldT) -> Union[Awaitable[int], int]:
"""
Push ``values`` onto the head of the list ``name``
For more information, see https://redis.io/commands/lpush
"""
return self.execute_command("LPUSH", name, *values)
def lpushx(self, name: KeyT, *values: FieldT) -> Union[Awaitable[int], int]:
"""
Push ``value`` onto the head of the list ``name`` if ``name`` exists
For more information, see https://redis.io/commands/lpushx
"""
return self.execute_command("LPUSHX", name, *values)
def lrange(self, name: KeyT, start: int, end: int) -> Union[Awaitable[list], list]:
"""
Return a slice of the list ``name`` between
position ``start`` and ``end``
``start`` and ``end`` can be negative numbers just like
Python slicing notation
For more information, see https://redis.io/commands/lrange
"""
return self.execute_command("LRANGE", name, start, end, keys=[name])
def lrem(self, name: KeyT, count: int, value: str) -> Union[Awaitable[int], int]:
"""
Remove the first ``count`` occurrences of elements equal to ``value``
from the list stored at ``name``.
The count argument influences the operation in the following ways:
count > 0: Remove elements equal to value moving from head to tail.
count < 0: Remove elements equal to value moving from tail to head.
count = 0: Remove all elements equal to value.
For more information, see https://redis.io/commands/lrem
"""
return self.execute_command("LREM", name, count, value)
def lset(self, name: KeyT, index: int, value: str) -> Union[Awaitable[str], str]:
"""
Set element at ``index`` of list ``name`` to ``value``
For more information, see https://redis.io/commands/lset
"""
return self.execute_command("LSET", name, index, value)
def ltrim(self, name: KeyT, start: int, end: int) -> Union[Awaitable[str], str]:
"""
Trim the list ``name``, removing all values not within the slice
between ``start`` and ``end``
``start`` and ``end`` can be negative numbers just like
Python slicing notation
For more information, see https://redis.io/commands/ltrim
"""
return self.execute_command("LTRIM", name, start, end)
def rpop(
self,
name: KeyT,
count: Optional[int] = None,
) -> Union[Awaitable[Union[str, List, None]], Union[str, List, None]]:
"""
Removes and returns the last elements of the list ``name``.
By default, the command pops a single element from the end of the list.
When provided with the optional ``count`` argument, the reply will
consist of up to count elements, depending on the list's length.
For more information, see https://redis.io/commands/rpop
"""
if count is not None:
return self.execute_command("RPOP", name, count)
else:
return self.execute_command("RPOP", name)
def rpoplpush(self, src: KeyT, dst: KeyT) -> Union[Awaitable[str], str]:
"""
RPOP a value off of the ``src`` list and atomically LPUSH it
on to the ``dst`` list. Returns the value.
For more information, see https://redis.io/commands/rpoplpush
"""
return self.execute_command("RPOPLPUSH", src, dst)
def rpush(self, name: KeyT, *values: FieldT) -> Union[Awaitable[int], int]:
"""
Push ``values`` onto the tail of the list ``name``
For more information, see https://redis.io/commands/rpush
"""
return self.execute_command("RPUSH", name, *values)
def rpushx(self, name: KeyT, *values: str) -> Union[Awaitable[int], int]:
"""
Push ``value`` onto the tail of the list ``name`` if ``name`` exists
For more information, see https://redis.io/commands/rpushx
"""
return self.execute_command("RPUSHX", name, *values)
def lpos(
self,
name: KeyT,
value: str,
rank: Optional[int] = None,
count: Optional[int] = None,
maxlen: Optional[int] = None,
) -> Union[str, List, None]:
"""
Get position of ``value`` within the list ``name``
If specified, ``rank`` indicates the "rank" of the first element to
return in case there are multiple copies of ``value`` in the list.
By default, LPOS returns the position of the first occurrence of
``value`` in the list. When ``rank`` 2, LPOS returns the position of
the second ``value`` in the list. If ``rank`` is negative, LPOS
searches the list in reverse. For example, -1 would return the
position of the last occurrence of ``value`` and -2 would return the
position of the next to last occurrence of ``value``.
If specified, ``count`` indicates that LPOS should return a list of
up to ``count`` positions. A ``count`` of 2 would return a list of
up to 2 positions. A ``count`` of 0 returns a list of all positions
matching ``value``. When ``count`` is specified and but ``value``
does not exist in the list, an empty list is returned.
If specified, ``maxlen`` indicates the maximum number of list
elements to scan. A ``maxlen`` of 1000 will only return the
position(s) of items within the first 1000 entries in the list.
A ``maxlen`` of 0 (the default) will scan the entire list.
For more information, see https://redis.io/commands/lpos
"""
pieces: list[EncodableT] = [name, value]
if rank is not None:
pieces.extend(["RANK", rank])
if count is not None:
pieces.extend(["COUNT", count])
if maxlen is not None:
pieces.extend(["MAXLEN", maxlen])
return self.execute_command("LPOS", *pieces, keys=[name])
def sort(
self,
name: KeyT,
start: Optional[int] = None,
num: Optional[int] = None,
by: Optional[str] = None,
get: Optional[List[str]] = None,
desc: bool = False,
alpha: bool = False,
store: Optional[str] = None,
groups: Optional[bool] = False,
) -> Union[List, int]:
"""
Sort and return the list, set or sorted set at ``name``.
``start`` and ``num`` allow for paging through the sorted data
``by`` allows using an external key to weight and sort the items.
Use an "*" to indicate where in the key the item value is located
``get`` allows for returning items from external keys rather than the
sorted data itself. Use an "*" to indicate where in the key
the item value is located
``desc`` allows for reversing the sort
``alpha`` allows for sorting lexicographically rather than numerically
``store`` allows for storing the result of the sort into
the key ``store``
``groups`` if set to True and if ``get`` contains at least two
elements, sort will return a list of tuples, each containing the
values fetched from the arguments to ``get``.
For more information, see https://redis.io/commands/sort
"""
if (start is not None and num is None) or (num is not None and start is None):
raise DataError("``start`` and ``num`` must both be specified")
pieces: list[EncodableT] = [name]
if by is not None:
pieces.extend([b"BY", by])
if start is not None and num is not None:
pieces.extend([b"LIMIT", start, num])
if get is not None:
# If get is a string assume we want to get a single value.
# Otherwise assume it's an interable and we want to get multiple
# values. We can't just iterate blindly because strings are
# iterable.
if isinstance(get, (bytes, str)):
pieces.extend([b"GET", get])
else:
for g in get:
pieces.extend([b"GET", g])
if desc:
pieces.append(b"DESC")
if alpha:
pieces.append(b"ALPHA")
if store is not None:
pieces.extend([b"STORE", store])
if groups:
if not get or isinstance(get, (bytes, str)) or len(get) < 2:
raise DataError(
'when using "groups" the "get" argument '
"must be specified and contain at least "
"two keys"
)
options = {"groups": len(get) if groups else None}
options["keys"] = [name]
return self.execute_command("SORT", *pieces, **options)
def sort_ro(
self,
key: str,
start: Optional[int] = None,
num: Optional[int] = None,
by: Optional[str] = None,
get: Optional[List[str]] = None,
desc: bool = False,
alpha: bool = False,
) -> list:
"""
Returns the elements contained in the list, set or sorted set at key.
(read-only variant of the SORT command)
``start`` and ``num`` allow for paging through the sorted data
``by`` allows using an external key to weight and sort the items.
Use an "*" to indicate where in the key the item value is located
``get`` allows for returning items from external keys rather than the
sorted data itself. Use an "*" to indicate where in the key
the item value is located
``desc`` allows for reversing the sort
``alpha`` allows for sorting lexicographically rather than numerically
For more information, see https://redis.io/commands/sort_ro
"""
return self.sort(
key, start=start, num=num, by=by, get=get, desc=desc, alpha=alpha
)
AsyncListCommands = ListCommands
| ListCommands |
python | vyperlang__vyper | vyper/ast/nodes.py | {
"start": 33491,
"end": 34400
} | class ____(Operator):
__slots__ = ()
_description = "exponentiation"
_pretty = "**"
def _op(self, left, right):
if isinstance(left, decimal.Decimal):
raise TypeMismatch("Cannot perform exponentiation on decimal values.", self._parent)
if right < 0:
raise InvalidOperation("Cannot calculate a negative power", self._parent)
# prevent a compiler hang. we are ok with false positives at this
# stage since we are just trying to filter out inputs which can cause
# the compiler to hang. the others will get caught during constant
# folding or codegen.
# l**r > 2**256
# r * ln(l) > ln(2 ** 256)
# r > ln(2 ** 256) / ln(l)
if right > math.log(decimal.Decimal(2**257)) / math.log(decimal.Decimal(left)):
raise InvalidLiteral("Out of bounds", self)
return int(left**right)
| Pow |
python | getsentry__sentry | src/sentry/release_health/release_monitor/base.py | {
"start": 120,
"end": 262
} | class ____(TypedDict):
total_sessions: int
releases: dict[str, int]
Totals = dict[int, dict[str, EnvironmentTotals]]
| EnvironmentTotals |
python | explosion__spaCy | setup.py | {
"start": 3398,
"end": 6916
} | class ____(build_ext, build_ext_options):
def build_extensions(self):
if self.parallel is None and os.environ.get("SPACY_NUM_BUILD_JOBS") is not None:
self.parallel = int(os.environ.get("SPACY_NUM_BUILD_JOBS"))
build_ext_options.build_options(self)
build_ext.build_extensions(self)
# Include the git version in the build (adapted from NumPy)
# Copyright (c) 2005-2020, NumPy Developers.
# BSD 3-Clause license, see licenses/3rd_party_licenses.txt
def write_git_info_py(filename="spacy/git_info.py"):
def _minimal_ext_cmd(cmd):
# construct minimal environment
env = {}
for k in ["SYSTEMROOT", "PATH", "HOME"]:
v = os.environ.get(k)
if v is not None:
env[k] = v
# LANGUAGE is used on win32
env["LANGUAGE"] = "C"
env["LANG"] = "C"
env["LC_ALL"] = "C"
out = subprocess.check_output(cmd, stderr=subprocess.STDOUT, env=env)
return out
git_version = "Unknown"
if Path(".git").exists():
try:
out = _minimal_ext_cmd(["git", "rev-parse", "--short", "HEAD"])
git_version = out.strip().decode("ascii")
except Exception:
pass
elif Path(filename).exists():
# must be a source distribution, use existing version file
try:
a = open(filename, "r")
lines = a.readlines()
git_version = lines[-1].split('"')[1]
except Exception:
pass
finally:
a.close()
text = """# THIS FILE IS GENERATED FROM SPACY SETUP.PY
#
GIT_VERSION = "%(git_version)s"
"""
a = open(filename, "w")
try:
a.write(text % {"git_version": git_version})
finally:
a.close()
def clean(path):
for path in path.glob("**/*"):
if path.is_file() and path.suffix in (".so", ".cpp", ".html"):
print(f"Deleting {path.name}")
path.unlink()
def setup_package():
write_git_info_py()
if len(sys.argv) > 1 and sys.argv[1] == "clean":
return clean(PACKAGE_ROOT)
with (PACKAGE_ROOT / "about.py").open("r") as f:
about = {}
exec(f.read(), about)
for copy_file, target_dir in COPY_FILES.items():
if copy_file.exists():
shutil.copy(str(copy_file), str(target_dir))
print(f"Copied {copy_file} -> {target_dir}")
include_dirs = [
numpy.get_include(),
get_path("include"),
]
ext_modules = []
ext_modules.append(
Extension(
"spacy.matcher.levenshtein",
[
"spacy/matcher/levenshtein.pyx",
"spacy/matcher/polyleven.c",
],
language="c",
include_dirs=include_dirs,
)
)
for name in MOD_NAMES:
mod_path = name.replace(".", "/") + ".pyx"
ext = Extension(
name,
[mod_path],
language="c++",
include_dirs=include_dirs,
extra_compile_args=["-std=c++11"],
)
ext_modules.append(ext)
print("Cythonizing sources")
ext_modules = cythonize(ext_modules, compiler_directives=COMPILER_DIRECTIVES)
setup(
name="spacy",
packages=PACKAGES,
version=about["__version__"],
ext_modules=ext_modules,
cmdclass={"build_ext": build_ext_subclass},
package_data={"": ["*.pyx", "*.pxd", "*.pxi"]},
)
if __name__ == "__main__":
setup_package()
| build_ext_subclass |
python | numba__numba | numba/cuda/tests/cudapy/test_device_func.py | {
"start": 203,
"end": 6892
} | class ____(CUDATestCase):
def test_use_add2f(self):
@cuda.jit("float32(float32, float32)", device=True)
def add2f(a, b):
return a + b
def use_add2f(ary):
i = cuda.grid(1)
ary[i] = add2f(ary[i], ary[i])
compiled = cuda.jit("void(float32[:])")(use_add2f)
nelem = 10
ary = np.arange(nelem, dtype=np.float32)
exp = ary + ary
compiled[1, nelem](ary)
self.assertTrue(np.all(ary == exp), (ary, exp))
def test_indirect_add2f(self):
@cuda.jit("float32(float32, float32)", device=True)
def add2f(a, b):
return a + b
@cuda.jit("float32(float32, float32)", device=True)
def indirect(a, b):
return add2f(a, b)
def indirect_add2f(ary):
i = cuda.grid(1)
ary[i] = indirect(ary[i], ary[i])
compiled = cuda.jit("void(float32[:])")(indirect_add2f)
nelem = 10
ary = np.arange(nelem, dtype=np.float32)
exp = ary + ary
compiled[1, nelem](ary)
self.assertTrue(np.all(ary == exp), (ary, exp))
def _check_cpu_dispatcher(self, add):
@cuda.jit
def add_kernel(ary):
i = cuda.grid(1)
ary[i] = add(ary[i], 1)
ary = np.arange(10)
expect = ary + 1
add_kernel[1, ary.size](ary)
np.testing.assert_equal(expect, ary)
def test_cpu_dispatcher(self):
# Test correct usage
@jit
def add(a, b):
return a + b
self._check_cpu_dispatcher(add)
@skip_on_cudasim('not supported in cudasim')
def test_cpu_dispatcher_invalid(self):
# Test invalid usage
# Explicit signature disables compilation, which also disable
# compiling on CUDA.
@jit('(i4, i4)')
def add(a, b):
return a + b
# Check that the right error message is provided.
with self.assertRaises(TypingError) as raises:
self._check_cpu_dispatcher(add)
msg = "Untyped global name 'add':.*using cpu function on device"
expected = re.compile(msg)
self.assertTrue(expected.search(str(raises.exception)) is not None)
def test_cpu_dispatcher_other_module(self):
@jit
def add(a, b):
return a + b
mymod = types.ModuleType(name='mymod')
mymod.add = add
del add
@cuda.jit
def add_kernel(ary):
i = cuda.grid(1)
ary[i] = mymod.add(ary[i], 1)
ary = np.arange(10)
expect = ary + 1
add_kernel[1, ary.size](ary)
np.testing.assert_equal(expect, ary)
@skip_on_cudasim('not supported in cudasim')
def test_inspect_llvm(self):
@cuda.jit(device=True)
def foo(x, y):
return x + y
args = (int32, int32)
cres = foo.compile_device(args)
fname = cres.fndesc.mangled_name
# Verify that the function name has "foo" in it as in the python name
self.assertIn('foo', fname)
llvm = foo.inspect_llvm(args)
# Check that the compiled function name is in the LLVM.
self.assertIn(fname, llvm)
@skip_on_cudasim('not supported in cudasim')
def test_inspect_asm(self):
@cuda.jit(device=True)
def foo(x, y):
return x + y
args = (int32, int32)
cres = foo.compile_device(args)
fname = cres.fndesc.mangled_name
# Verify that the function name has "foo" in it as in the python name
self.assertIn('foo', fname)
ptx = foo.inspect_asm(args)
# Check that the compiled function name is in the PTX
self.assertIn(fname, ptx)
@skip_on_cudasim('not supported in cudasim')
def test_inspect_sass_disallowed(self):
@cuda.jit(device=True)
def foo(x, y):
return x + y
with self.assertRaises(RuntimeError) as raises:
foo.inspect_sass((int32, int32))
self.assertIn('Cannot inspect SASS of a device function',
str(raises.exception))
@skip_on_cudasim('cudasim will allow calling any function')
def test_device_func_as_kernel_disallowed(self):
@cuda.jit(device=True)
def f():
pass
with self.assertRaises(RuntimeError) as raises:
f[1, 1]()
self.assertIn('Cannot compile a device function as a kernel',
str(raises.exception))
@skip_on_cudasim('cudasim ignores casting by jit decorator signature')
def test_device_casting(self):
# Ensure that casts to the correct type are forced when calling a
# device function with a signature. This test ensures that:
#
# - We don't compile a new specialization of rgba for float32 when we
# shouldn't
# - We insert a cast when calling rgba, as opposed to failing to type.
@cuda.jit('int32(int32, int32, int32, int32)', device=True)
def rgba(r, g, b, a):
return (((r & 0xFF) << 16) |
((g & 0xFF) << 8) |
((b & 0xFF) << 0) |
((a & 0xFF) << 24))
@cuda.jit
def rgba_caller(x, channels):
x[0] = rgba(channels[0], channels[1], channels[2], channels[3])
x = cuda.device_array(1, dtype=np.int32)
channels = cuda.to_device(np.asarray([1.0, 2.0, 3.0, 4.0],
dtype=np.float32))
rgba_caller[1, 1](x, channels)
self.assertEqual(0x04010203, x[0])
def _test_declare_device(self, decl):
self.assertEqual(decl.name, 'f1')
self.assertEqual(decl.sig.args, (float32[:],))
self.assertEqual(decl.sig.return_type, int32)
@skip_on_cudasim('cudasim does not check signatures')
def test_declare_device_signature(self):
f1 = cuda.declare_device('f1', int32(float32[:]))
self._test_declare_device(f1)
@skip_on_cudasim('cudasim does not check signatures')
def test_declare_device_string(self):
f1 = cuda.declare_device('f1', 'int32(float32[:])')
self._test_declare_device(f1)
@skip_on_cudasim('cudasim does not check signatures')
def test_bad_declare_device_tuple(self):
with self.assertRaisesRegex(TypeError, 'Return type'):
cuda.declare_device('f1', (float32[:],))
@skip_on_cudasim('cudasim does not check signatures')
def test_bad_declare_device_string(self):
with self.assertRaisesRegex(TypeError, 'Return type'):
cuda.declare_device('f1', '(float32[:],)')
if __name__ == '__main__':
unittest.main()
| TestDeviceFunc |
python | tensorflow__tensorflow | tensorflow/python/trackable/resource.py | {
"start": 7784,
"end": 10112
} | class ____(CapturableResource):
"""Holds a Tensor which a tf.function can capture.
A TrackableResource is most useful for stateful Tensors that require
initialization, such as `tf.lookup.StaticHashTable`. `TrackableResource`s
are discovered by traversing the graph of object attributes, e.g. during
`tf.saved_model.save`.
A TrackableResource has three methods to override:
* `_create_resource` should create the resource tensor handle.
* `_initialize` should initialize the resource held at `self.resource_handle`.
* `_destroy_resource` is called upon a `TrackableResource`'s destruction
and should decrement the resource's ref count. For most resources, this
should be done with a call to `tf.raw_ops.DestroyResourceOp`.
Example usage:
>>> class DemoResource(tf.saved_model.experimental.TrackableResource):
... def __init__(self):
... super().__init__()
... self._initialize()
... def _create_resource(self):
... return tf.raw_ops.VarHandleOp(dtype=tf.float32, shape=[2])
... def _initialize(self):
... tf.raw_ops.AssignVariableOp(
... resource=self.resource_handle, value=tf.ones([2]))
... def _destroy_resource(self):
... tf.raw_ops.DestroyResourceOp(resource=self.resource_handle)
>>> class DemoModule(tf.Module):
... def __init__(self):
... self.resource = DemoResource()
... def increment(self, tensor):
... return tensor + tf.raw_ops.ReadVariableOp(
... resource=self.resource.resource_handle, dtype=tf.float32)
>>> demo = DemoModule()
>>> demo.increment([5, 1])
<tf.Tensor: shape=(2,), dtype=float32, numpy=array([6., 2.], dtype=float32)>
"""
def __init__(self, device=""):
"""Initialize the `TrackableResource`.
Args:
device: A string indicating a required placement for this resource,
e.g. "CPU" if this resource must be created on a CPU device. A blank
device allows the user to place resource creation, so generally this
should be blank unless the resource only makes sense on one device.
"""
global _RESOURCE_TRACKER_STACK
for resource_tracker in _RESOURCE_TRACKER_STACK:
resource_tracker.add_resource(self)
super().__init__(device=device)
# TODO(b/124205571,b/124092991): Solve destruction of resources.
| TrackableResource |
python | openai__openai-python | src/openai/resources/audio/transcriptions.py | {
"start": 50453,
"end": 51568
} | class ____:
def __init__(self, transcriptions: AsyncTranscriptions) -> None:
self._transcriptions = transcriptions
self.create = async_to_streamed_response_wrapper(
transcriptions.create,
)
def _get_response_format_type(
response_format: AudioResponseFormat | Omit,
) -> type[Transcription | TranscriptionVerbose | TranscriptionDiarized | str]:
if isinstance(response_format, Omit) or response_format is None: # pyright: ignore[reportUnnecessaryComparison]
return Transcription
if response_format == "json":
return Transcription
elif response_format == "verbose_json":
return TranscriptionVerbose
elif response_format == "diarized_json":
return TranscriptionDiarized
elif response_format == "srt" or response_format == "text" or response_format == "vtt":
return str
elif TYPE_CHECKING: # type: ignore[unreachable]
assert_never(response_format)
else:
log.warn("Unexpected audio response format: %s", response_format)
return Transcription
| AsyncTranscriptionsWithStreamingResponse |
python | vyperlang__vyper | vyper/venom/analysis/stack_order.py | {
"start": 943,
"end": 5099
} | class ____:
function: IRFunction
liveness: LivenessAnalysis
cfg: CFGAnalysis
_from_to: dict[tuple[IRBasicBlock, IRBasicBlock], Needed]
def __init__(self, ac: IRAnalysesCache):
self._from_to = dict()
self.ac = ac
self.liveness = ac.request_analysis(LivenessAnalysis)
self.cfg = ac.request_analysis(CFGAnalysis)
def analyze_bb(self, bb: IRBasicBlock) -> Needed:
self.needed: Needed = []
self.stack: Stack = []
for inst in bb.instructions:
if inst.opcode == "assign":
self._handle_assign(inst)
elif inst.opcode == "phi":
self._handle_inst(inst)
elif inst.is_bb_terminator:
self._handle_terminator(inst)
else:
self._handle_inst(inst)
if len(inst.operands) > 0:
if not inst.is_bb_terminator:
assert self.stack[-len(inst.operands) :] == inst.operands, (
inst,
self.stack,
inst.operands,
)
self.stack = self.stack[: -len(inst.operands)]
self.stack.extend(inst.get_outputs())
for pred in self.cfg.cfg_in(bb):
self._from_to[(pred, bb)] = self.needed.copy()
return self.needed
def get_stack(self, bb: IRBasicBlock) -> Needed:
succs = self.cfg.cfg_out(bb)
for succ in succs:
self.analyze_bb(succ)
orders = [self._from_to.get((bb, succ), []) for succ in succs]
return self._merge(orders)
def from_to(self, origin: IRBasicBlock, successor: IRBasicBlock) -> Needed:
target = self._from_to.get((origin, successor), []).copy()
for var in self.liveness.input_vars_from(origin, successor):
if var not in target:
target.append(var)
return target
def _handle_assign(self, inst: IRInstruction):
assert inst.opcode == "assign"
_ = inst.output # Assert single output
index = inst.parent.instructions.index(inst)
next_inst = inst.parent.instructions[index + 1]
next_live = self.liveness.live_vars_at(next_inst)
src = inst.operands[0]
if not isinstance(src, IRVariable):
self.stack.append(src)
elif src in next_live:
self.stack.append(src)
assert src in self.stack
self._add_needed(src)
else:
if src not in self.stack:
self.stack.append(src)
self._add_needed(src)
else:
_swap(self.stack, src)
def _add_needed(self, op: IRVariable):
if op not in self.needed:
self.needed.append(op)
def _reorder(self, target_stack: Stack):
count = len(target_stack)
for index, op in enumerate(target_stack):
depth = count - index - 1
_swap(self.stack, op)
_swap_to(self.stack, depth)
if len(target_stack) != 0:
assert target_stack == self.stack[-len(target_stack) :], (target_stack, self.stack)
def _handle_inst(self, inst: IRInstruction):
ops = inst.operands
for op in ops:
if isinstance(op, IRVariable) and op not in self.stack:
self._add_needed(op)
if op not in self.stack:
self.stack.append(op)
self._reorder(ops)
def _merge(self, orders: list[Needed]) -> Needed:
if len(orders) == 0:
return []
res = orders[0]
for order in orders:
res = _max_same_prefix(res, order)
return res
def _handle_terminator(self, inst: IRInstruction):
bb = inst.parent
orders = [self._from_to.get((bb, succ), []) for succ in self.cfg.cfg_out(bb)]
ops = (op for op in inst.operands if isinstance(op, IRVariable))
for op in ops:
if op not in self.stack:
self._add_needed(op)
for op in self._merge(orders):
if op not in self.stack:
self._add_needed(op)
| StackOrderAnalysis |
python | tensorflow__tensorflow | tensorflow/tools/compatibility/ast_edits.py | {
"start": 28390,
"end": 28853
} | class ____:
"""This class defines how `AnalysisResult`s should be generated.
It specifies how to map imports and symbols to `AnalysisResult`s.
This class must provide the following fields:
* `symbols_to_detect`: maps function names to `AnalysisResult`s
* `imports_to_detect`: maps imports represented as (full module name, alias)
tuples to `AnalysisResult`s
notifications)
For an example, see `TFAPIImportAnalysisSpec`.
"""
| APIAnalysisSpec |
python | tensorflow__tensorflow | tensorflow/python/debug/lib/grpc_debug_test_server.py | {
"start": 2380,
"end": 7754
} | class ____(
grpc_debug_server.EventListenerBaseStreamHandler):
"""Implementation of EventListenerBaseStreamHandler that dumps to file."""
def __init__(self, dump_dir, event_listener_servicer):
super(EventListenerTestStreamHandler, self).__init__()
self._dump_dir = dump_dir
self._event_listener_servicer = event_listener_servicer
if self._dump_dir:
self._try_makedirs(self._dump_dir)
self._grpc_path = None
self._cached_graph_defs = []
self._cached_graph_def_device_names = []
self._cached_graph_def_wall_times = []
def on_core_metadata_event(self, event):
self._event_listener_servicer.toggle_watch()
core_metadata = json.loads(event.log_message.message)
if not self._grpc_path:
grpc_path = core_metadata["grpc_path"]
if grpc_path:
if grpc_path.startswith("/"):
grpc_path = grpc_path[1:]
if self._dump_dir:
self._dump_dir = os.path.join(self._dump_dir, grpc_path)
# Write cached graph defs to filesystem.
for graph_def, device_name, wall_time in zip(
self._cached_graph_defs,
self._cached_graph_def_device_names,
self._cached_graph_def_wall_times):
self._write_graph_def(graph_def, device_name, wall_time)
if self._dump_dir:
self._write_core_metadata_event(event)
else:
self._event_listener_servicer.core_metadata_json_strings.append(
event.log_message.message)
def on_graph_def(self, graph_def, device_name, wall_time):
"""Implementation of the tensor value-carrying Event proto callback.
Args:
graph_def: A GraphDef object.
device_name: Name of the device on which the graph was created.
wall_time: An epoch timestamp (in microseconds) for the graph.
"""
if self._dump_dir:
if self._grpc_path:
self._write_graph_def(graph_def, device_name, wall_time)
else:
self._cached_graph_defs.append(graph_def)
self._cached_graph_def_device_names.append(device_name)
self._cached_graph_def_wall_times.append(wall_time)
else:
self._event_listener_servicer.partition_graph_defs.append(graph_def)
def on_value_event(self, event):
"""Implementation of the tensor value-carrying Event proto callback.
Writes the Event proto to the file system for testing. The path written to
follows the same pattern as the file:// debug URLs of tfdbg, i.e., the
name scope of the op becomes the directory structure under the dump root
directory.
Args:
event: The Event proto carrying a tensor value.
Returns:
If the debug node belongs to the set of currently activated breakpoints,
a `EventReply` proto will be returned.
"""
if self._dump_dir:
self._write_value_event(event)
else:
value = event.summary.value[0]
tensor_value = debug_data.load_tensor_from_event(event)
self._event_listener_servicer.debug_tensor_values[value.node_name].append(
tensor_value)
items = event.summary.value[0].node_name.split(":")
node_name = items[0]
output_slot = int(items[1])
debug_op = items[2]
if ((node_name, output_slot, debug_op) in
self._event_listener_servicer.breakpoints):
return debug_service_pb2.EventReply()
def _try_makedirs(self, dir_path):
if not os.path.isdir(dir_path):
try:
os.makedirs(dir_path)
except OSError as error:
if error.errno != errno.EEXIST:
raise
def _write_core_metadata_event(self, event):
core_metadata_path = os.path.join(
self._dump_dir,
debug_data.METADATA_FILE_PREFIX + debug_data.CORE_METADATA_TAG +
"_%d" % event.wall_time)
self._try_makedirs(self._dump_dir)
with open(core_metadata_path, "wb") as f:
f.write(event.SerializeToString())
def _write_graph_def(self, graph_def, device_name, wall_time):
encoded_graph_def = graph_def.SerializeToString()
graph_hash = int(hashlib.sha1(encoded_graph_def).hexdigest(), 16)
event = event_pb2.Event(graph_def=encoded_graph_def, wall_time=wall_time)
graph_file_path = os.path.join(
self._dump_dir,
debug_data.device_name_to_device_path(device_name),
debug_data.METADATA_FILE_PREFIX + debug_data.GRAPH_FILE_TAG +
debug_data.HASH_TAG + "%d_%d" % (graph_hash, wall_time))
self._try_makedirs(os.path.dirname(graph_file_path))
with open(graph_file_path, "wb") as f:
f.write(event.SerializeToString())
def _write_value_event(self, event):
value = event.summary.value[0]
# Obtain the device name from the metadata.
summary_metadata = event.summary.value[0].metadata
if not summary_metadata.plugin_data:
raise ValueError("The value lacks plugin data.")
try:
content = json.loads(compat.as_text(summary_metadata.plugin_data.content))
except ValueError as err:
raise ValueError("Could not parse content into JSON: %r, %r" % (content,
err))
device_name = content["device"]
dump_full_path = _get_dump_file_path(
self._dump_dir, device_name, value.node_name)
self._try_makedirs(os.path.dirname(dump_full_path))
with open(dump_full_path, "wb") as f:
f.write(event.SerializeToString())
| EventListenerTestStreamHandler |
python | kamyu104__LeetCode-Solutions | Python/minimum-number-of-seconds-to-make-mountain-height-zero.py | {
"start": 1105,
"end": 1600
} | class ____(object):
def minNumberOfSeconds(self, mountainHeight, workerTimes):
"""
:type mountainHeight: int
:type workerTimes: List[int]
:rtype: int
"""
min_heap = [(0+1*t, i, 1) for i, t in enumerate(workerTimes)]
heapq.heapify(min_heap)
for _ in xrange(mountainHeight):
result, i, x = heapq.heappop(min_heap)
heapq.heappush(min_heap, (result+(x+1)*workerTimes[i], i, x+1))
return result
| Solution2 |
python | ray-project__ray | python/ray/tune/experiment/trial.py | {
"start": 4222,
"end": 6992
} | class ____:
"""Temporary trial state.
Values saved here should not be restored on resume.
"""
def __init__(self):
self.location = _Location()
self.ray_actor: Optional[ray.actor.ActorHandle] = None
self.saving_to: Optional[_FutureTrainingResult] = None
self.restoring_from: Optional[_TrainingResult] = None
self.num_restore_failures: int = 0
def __getstate__(self):
return {}
def _get_max_path_length() -> int:
if hasattr(os, "pathconf"):
return os.pathconf("/", "PC_PATH_MAX")
# Windows
return _DEFAULT_WIN_MAX_PATH_LENGTH
def _create_unique_logdir_name(root: str, relative_logdir: str) -> str:
candidate = Path(root).expanduser().joinpath(relative_logdir)
if candidate.exists():
relative_logdir_old = relative_logdir
relative_logdir += "_" + uuid.uuid4().hex[:4]
logger.info(
f"Creating a new dirname {relative_logdir} because "
f"trial dirname '{relative_logdir_old}' already exists."
)
return relative_logdir
def _noop_logger_creator(config: Dict[str, Any], logdir: str):
# Upon remote process setup, record the actor's original working dir before
# changing to the Tune logdir
os.environ.setdefault("TUNE_ORIG_WORKING_DIR", os.getcwd())
os.makedirs(logdir, exist_ok=True)
if bool(int(os.environ.get(RAY_CHDIR_TO_TRIAL_DIR, "1"))):
# Set the working dir to the trial directory in the remote process,
# for user file writes
if not ray._private.worker._mode() == ray._private.worker.LOCAL_MODE:
os.chdir(logdir)
return NoopLogger(config, logdir)
def _get_trainable_kwargs(trial: "Trial") -> Dict[str, Any]:
trial.init_local_path()
logger_creator = partial(
_noop_logger_creator, logdir=trial.storage.trial_working_directory
)
trial_config = copy.deepcopy(trial.config)
trial_config[TRIAL_INFO] = _TrialInfo(trial)
stdout_file, stderr_file = trial.log_to_file
trial_config[STDOUT_FILE] = stdout_file
trial_config[STDERR_FILE] = stderr_file
assert trial.storage.trial_dir_name
kwargs = {
"config": trial_config,
"logger_creator": logger_creator,
"storage": trial.storage,
}
return kwargs
@contextmanager
def _change_working_directory(trial):
"""Context manager changing working directory to trial logdir.
Used in local mode.
For non-local mode it is no-op.
"""
if ray._private.worker._mode() == ray._private.worker.LOCAL_MODE:
old_dir = os.getcwd()
try:
os.chdir(trial.local_path)
yield
finally:
os.chdir(old_dir)
else:
yield
@DeveloperAPI
| _TemporaryTrialState |
python | allegroai__clearml | clearml/utilities/proxy_object.py | {
"start": 3598,
"end": 11077
} | class ____(object):
def __call__(self, *args: Any, **kwargs: Any) -> "StubObject":
return self
def __getattr__(self, attr: str) -> "StubObject":
return self
def __setattr__(self, attr: str, val: Any) -> None:
pass
def verify_basic_type(
a_dict_list: Union[float, int, bool, str, list, tuple, dict],
basic_types: Optional[Tuple[Union[Any, Tuple[Any]]]] = None,
) -> bool:
basic_types = (
(
float,
int,
bool,
six.string_types,
)
if not basic_types
else tuple(b for b in basic_types if b not in (list, tuple, dict))
)
if isinstance(a_dict_list, basic_types):
return True
if isinstance(a_dict_list, (list, tuple)):
return all(verify_basic_type(v, basic_types=basic_types) for v in a_dict_list)
elif isinstance(a_dict_list, dict):
return all(verify_basic_type(k, basic_types=basic_types) for k in a_dict_list.keys()) and all(
verify_basic_type(v, basic_types=basic_types) for v in a_dict_list.values()
)
def convert_bool(s: str) -> bool:
s = s.strip().lower()
if s == "true":
return True
elif s == "false" or not s:
return False
raise ValueError("Invalid value (boolean literal expected): {}".format(s))
def cast_basic_type(value: Any, type_str: str) -> Any:
if not type_str:
# empty string with no type is treated as None
if value == "":
return None
return value
basic_types = {str(getattr(v, "__name__", v)): v for v in (float, int, str, list, tuple, dict)}
basic_types["bool"] = convert_bool
parts = type_str.split("/")
# nested = len(parts) > 1
if parts[0] in ("list", "tuple", "dict"):
# noinspection PyBroadException
try:
# lists/tuple/dicts should be json loadable
return basic_types.get(parts[0])(json.loads(value))
except Exception:
# noinspection PyBroadException
try:
# fallback to legacy basic type loading
v = "[" + value.lstrip("[(").rstrip("])") + "]"
v = yaml.load(v, Loader=yaml.SafeLoader)
return basic_types.get(parts[0])(v)
except Exception:
getLogger().warning("Could not cast `{}` to basic type. Returning it as `str`".format(value))
return value
t = basic_types.get(str(type_str).lower().strip(), False)
if t is not False:
# noinspection PyBroadException
try:
return t(value)
except Exception:
return value
return value
def get_type_from_basic_type_str(type_str: str) -> type:
# default to str
if not type_str:
return str
if str(type_str).startswith("list/"):
v_type = list
elif str(type_str).startswith("tuple/"):
v_type = tuple
elif str(type_str).startswith("dict/"):
v_type = dict
else:
v_type = next(
(t for t in (bool, int, float, str, list, tuple, dict) if t.__name__ == type_str),
str,
)
return v_type
def get_basic_type(value: Any) -> str:
basic_types = (float, int, bool, six.string_types, list, tuple, dict)
if isinstance(value, (list, tuple)) and value:
tv = type(value)
t = type(value[0])
if all(t == type(v) for v in value):
return "{}/{}".format(str(getattr(tv, "__name__", tv)), str(getattr(t, "__name__", t)))
elif isinstance(value, dict) and value:
t = type(list(value.values())[0])
if all(t == type(v) for v in value.values()):
return "dict/{}".format(str(getattr(t, "__name__", t)))
# it might be an empty list/dict/tuple
t = type(value)
if isinstance(value, basic_types):
return str(getattr(t, "__name__", t))
# we are storing it, even though we will not be able to restore it
return str(getattr(t, "__name__", t))
def flatten_dictionary(a_dict: dict, prefix: str = "", sep: str = "/") -> dict:
flat_dict = {}
basic_types = (
float,
int,
bool,
six.string_types,
)
for k, v in a_dict.items():
k = str(k)
if isinstance(v, (float, int, bool, six.string_types)):
flat_dict[prefix + k] = v
elif isinstance(v, (list, tuple)) and all([isinstance(i, basic_types) for i in v]):
flat_dict[prefix + k] = v
elif isinstance(v, dict):
nested_flat_dict = flatten_dictionary(v, prefix=prefix + k + sep, sep=sep)
if nested_flat_dict:
flat_dict.update(nested_flat_dict)
else:
flat_dict[k] = {}
else:
# this is a mixture of list and dict, or any other object,
# leave it as is, we have nothing to do with it.
flat_dict[prefix + k] = v
return flat_dict
def nested_from_flat_dictionary(a_dict: dict, flat_dict: dict, prefix: str = "", sep: str = "/") -> dict:
basic_types = (
float,
int,
bool,
six.string_types,
)
org_dict = copy(a_dict)
for k, v in org_dict.items():
k = str(k)
if isinstance(v, (float, int, bool, six.string_types)):
a_dict[k] = flat_dict.get(prefix + k, v)
elif isinstance(v, (list, tuple)) and all([isinstance(i, basic_types) for i in v]):
a_dict[k] = flat_dict.get(prefix + k, v)
elif isinstance(v, dict):
a_dict[k] = nested_from_flat_dictionary(v, flat_dict, prefix=prefix + k + sep, sep=sep) or v
else:
# this is a mixture of list and dict, or any other object,
# leave it as is, we have nothing to do with it.
a_dict[k] = flat_dict.get(prefix + k, v)
return a_dict
def naive_nested_from_flat_dictionary(flat_dict: dict, sep: str = "/") -> dict:
"""A naive conversion of a flat dictionary with '/'-separated keys signifying nesting
into a nested dictionary.
"""
return {
sub_prefix: (
bucket[0][1]
if (len(bucket) == 1 and sub_prefix == bucket[0][0])
else naive_nested_from_flat_dictionary(
{k[len(sub_prefix) + 1 :]: v for k, v in bucket if len(k) > len(sub_prefix)},
sep=sep,
)
)
for sub_prefix, bucket in (
(key, list(group))
for key, group in itertools.groupby(sorted(flat_dict.items()), key=lambda item: item[0].partition(sep)[0])
)
}
def walk_nested_dict_tuple_list(
dict_list_tuple: Union[dict, tuple, list], callback: Callable[[Any], Any]
) -> Union[dict, tuple, list, Any]:
# Do Not Change, type call will not trigger the auto resolving / download of the Lazy evaluator
nested = (dict, tuple, list)
type_dict_list_tuple = type(dict_list_tuple)
if type_dict_list_tuple not in nested:
return callback(dict_list_tuple)
if type_dict_list_tuple == dict:
ret = {}
for k, v in dict_list_tuple.items():
ret[k] = walk_nested_dict_tuple_list(v, callback=callback) if type(v) in nested else callback(v)
else:
ret = []
for v in dict_list_tuple:
ret.append(walk_nested_dict_tuple_list(v, callback=callback) if type(v) in nested else callback(v))
if type_dict_list_tuple == tuple:
ret = tuple(dict_list_tuple)
return ret
| StubObject |
python | getlogbook__logbook | src/logbook/compat.py | {
"start": 2006,
"end": 5066
} | class ____(logging.Handler):
"""A handler for the stdlib's logging system that redirects
transparently to logbook. This is used by the
:func:`redirect_logging` and :func:`redirected_logging`
functions.
If you want to customize the redirecting you can subclass it.
"""
def __init__(self):
logging.Handler.__init__(self)
def convert_level(self, level):
"""Converts a logging level into a logbook level."""
if level >= logging.CRITICAL:
return logbook.CRITICAL
if level >= logging.ERROR:
return logbook.ERROR
if level >= logging.WARNING:
return logbook.WARNING
if level >= logging.INFO:
return logbook.INFO
return logbook.DEBUG
def find_extra(self, old_record):
"""Tries to find custom data from the old logging record. The
return value is a dictionary that is merged with the log record
extra dictionaries.
"""
rv = vars(old_record).copy()
for key in (
"name",
"msg",
"args",
"levelname",
"levelno",
"pathname",
"filename",
"module",
"exc_info",
"exc_text",
"lineno",
"funcName",
"created",
"msecs",
"relativeCreated",
"thread",
"threadName",
"greenlet",
"processName",
"process",
):
rv.pop(key, None)
return rv
def find_caller(self, old_record):
"""Tries to find the caller that issued the call."""
frm = sys._getframe(2)
while frm is not None:
if (
frm.f_globals is globals()
or frm.f_globals is logbook.base.__dict__
or frm.f_globals is logging.__dict__
):
frm = frm.f_back
else:
return frm
def convert_time(self, timestamp):
"""Converts the UNIX timestamp of the old record into a
datetime object as used by logbook.
"""
return datetime_utcfromtimestamp(timestamp)
def convert_record(self, old_record):
"""Converts an old logging record into a logbook log record."""
args = old_record.args
kwargs = None
# Logging allows passing a mapping object, in which case args will be a mapping.
if isinstance(args, Mapping):
kwargs = args
args = None
record = LoggingCompatRecord(
old_record.name,
self.convert_level(old_record.levelno),
old_record.msg,
args,
kwargs,
old_record.exc_info,
self.find_extra(old_record),
self.find_caller(old_record),
)
record.time = self.convert_time(old_record.created)
return record
def emit(self, record):
logbook.dispatch_record(self.convert_record(record))
| RedirectLoggingHandler |
python | tensorflow__tensorflow | tensorflow/python/debug/cli/debugger_cli_common.py | {
"start": 4618,
"end": 16724
} | class ____:
"""Rich multi-line text.
Line-by-line text output, with font attributes (e.g., color) and annotations
(e.g., indices in a multi-dimensional tensor). Used as the text output of CLI
commands. Can be rendered on terminal environments such as curses.
This is not to be confused with Rich Text Format (RTF). This class is for text
lines only.
"""
def __init__(self, lines, font_attr_segs=None, annotations=None):
"""Constructor of RichTextLines.
Args:
lines: A list of str or a single str, representing text output to
screen. The latter case is for convenience when the text output is
single-line.
font_attr_segs: A map from 0-based row index to a list of 3-tuples.
It lists segments in each row that have special font attributes, such
as colors, that are not the default attribute. For example:
{1: [(0, 3, "red"), (4, 7, "green")], 2: [(10, 20, "yellow")]}
In each tuple, the 1st element is the start index of the segment. The
2nd element is the end index, in an "open interval" fashion. The 3rd
element is an object or a list of objects that represents the font
attribute. Colors are represented as strings as in the examples above.
annotations: A map from 0-based row index to any object for annotating
the row. A typical use example is annotating rows of the output as
indices in a multi-dimensional tensor. For example, consider the
following text representation of a 3x2x2 tensor:
[[[0, 0], [0, 0]],
[[0, 0], [0, 0]],
[[0, 0], [0, 0]]]
The annotation can indicate the indices of the first element shown in
each row, i.e.,
{0: [0, 0, 0], 1: [1, 0, 0], 2: [2, 0, 0]}
This information can make display of tensors on screen clearer and can
help the user navigate (scroll) to the desired location in a large
tensor.
Raises:
ValueError: If lines is of invalid type.
"""
if isinstance(lines, list):
self._lines = lines
elif isinstance(lines, str):
self._lines = [lines]
else:
raise ValueError("Unexpected type in lines: %s" % type(lines))
self._font_attr_segs = font_attr_segs
if not self._font_attr_segs:
self._font_attr_segs = {}
# TODO(cais): Refactor to collections.defaultdict(list) to simplify code.
self._annotations = annotations
if not self._annotations:
self._annotations = {}
# TODO(cais): Refactor to collections.defaultdict(list) to simplify code.
@property
def lines(self):
return self._lines
@property
def font_attr_segs(self):
return self._font_attr_segs
@property
def annotations(self):
return self._annotations
def num_lines(self):
return len(self._lines)
def slice(self, begin, end):
"""Slice a RichTextLines object.
The object itself is not changed. A sliced instance is returned.
Args:
begin: (int) Beginning line index (inclusive). Must be >= 0.
end: (int) Ending line index (exclusive). Must be >= 0.
Returns:
(RichTextLines) Sliced output instance of RichTextLines.
Raises:
ValueError: If begin or end is negative.
"""
if begin < 0 or end < 0:
raise ValueError("Encountered negative index.")
# Copy lines.
lines = self.lines[begin:end]
# Slice font attribute segments.
font_attr_segs = {}
for key in self.font_attr_segs:
if key >= begin and key < end:
font_attr_segs[key - begin] = self.font_attr_segs[key]
# Slice annotations.
annotations = {}
for key in self.annotations:
if not isinstance(key, int):
# Annotations can contain keys that are not line numbers.
annotations[key] = self.annotations[key]
elif key >= begin and key < end:
annotations[key - begin] = self.annotations[key]
return RichTextLines(
lines, font_attr_segs=font_attr_segs, annotations=annotations)
def extend(self, other):
"""Extend this instance of RichTextLines with another instance.
The extension takes effect on the text lines, the font attribute segments,
as well as the annotations. The line indices in the font attribute
segments and the annotations are adjusted to account for the existing
lines. If there are duplicate, non-line-index fields in the annotations,
the value from the input argument "other" will override that in this
instance.
Args:
other: (RichTextLines) The other RichTextLines instance to be appended at
the end of this instance.
"""
orig_num_lines = self.num_lines() # Record original number of lines.
# Merge the lines.
self._lines.extend(other.lines)
# Merge the font_attr_segs.
for line_index in other.font_attr_segs:
self._font_attr_segs[orig_num_lines + line_index] = (
other.font_attr_segs[line_index])
# Merge the annotations.
for key in other.annotations:
if isinstance(key, int):
self._annotations[orig_num_lines + key] = (other.annotations[key])
else:
self._annotations[key] = other.annotations[key]
def _extend_before(self, other):
"""Add another RichTextLines object to the front.
Args:
other: (RichTextLines) The other object to add to the front to this
object.
"""
other_num_lines = other.num_lines() # Record original number of lines.
# Merge the lines.
self._lines = other.lines + self._lines
# Merge the font_attr_segs.
new_font_attr_segs = {}
for line_index in self.font_attr_segs:
new_font_attr_segs[other_num_lines + line_index] = (
self.font_attr_segs[line_index])
new_font_attr_segs.update(other.font_attr_segs)
self._font_attr_segs = new_font_attr_segs
# Merge the annotations.
new_annotations = {}
for key in self._annotations:
if isinstance(key, int):
new_annotations[other_num_lines + key] = (self.annotations[key])
else:
new_annotations[key] = other.annotations[key]
new_annotations.update(other.annotations)
self._annotations = new_annotations
def append(self, line, font_attr_segs=None):
"""Append a single line of text.
Args:
line: (str) The text to be added to the end.
font_attr_segs: (list of tuples) Font attribute segments of the appended
line.
"""
self._lines.append(line)
if font_attr_segs:
self._font_attr_segs[len(self._lines) - 1] = font_attr_segs
def append_rich_line(self, rich_line):
self.append(rich_line.text, rich_line.font_attr_segs)
def prepend(self, line, font_attr_segs=None):
"""Prepend (i.e., add to the front) a single line of text.
Args:
line: (str) The text to be added to the front.
font_attr_segs: (list of tuples) Font attribute segments of the appended
line.
"""
other = RichTextLines(line)
if font_attr_segs:
other.font_attr_segs[0] = font_attr_segs
self._extend_before(other)
def write_to_file(self, file_path):
"""Write the object itself to file, in a plain format.
The font_attr_segs and annotations are ignored.
Args:
file_path: (str) path of the file to write to.
"""
with gfile.Open(file_path, "w") as f:
for line in self._lines:
f.write(line + "\n")
# TODO(cais): Add a method to allow appending to a line in RichTextLines with
# both text and font_attr_segs.
def regex_find(orig_screen_output, regex, font_attr):
"""Perform regex match in rich text lines.
Produces a new RichTextLines object with font_attr_segs containing highlighted
regex matches.
Example use cases include:
1) search for specific items in a large list of items, and
2) search for specific numerical values in a large tensor.
Args:
orig_screen_output: The original RichTextLines, in which the regex find
is to be performed.
regex: The regex used for matching.
font_attr: Font attribute used for highlighting the found result.
Returns:
A modified copy of orig_screen_output.
Raises:
ValueError: If input str regex is not a valid regular expression.
"""
new_screen_output = RichTextLines(
orig_screen_output.lines,
font_attr_segs=copy.deepcopy(orig_screen_output.font_attr_segs),
annotations=orig_screen_output.annotations)
try:
re_prog = re.compile(regex)
except re.error:
raise ValueError("Invalid regular expression: \"%s\"" % regex)
regex_match_lines = []
for i, line in enumerate(new_screen_output.lines):
find_it = re_prog.finditer(line)
match_segs = []
for match in find_it:
match_segs.append((match.start(), match.end(), font_attr))
if match_segs:
if i not in new_screen_output.font_attr_segs:
new_screen_output.font_attr_segs[i] = match_segs
else:
new_screen_output.font_attr_segs[i].extend(match_segs)
new_screen_output.font_attr_segs[i] = sorted(
new_screen_output.font_attr_segs[i], key=lambda x: x[0])
regex_match_lines.append(i)
new_screen_output.annotations[REGEX_MATCH_LINES_KEY] = regex_match_lines
return new_screen_output
def wrap_rich_text_lines(inp, cols):
"""Wrap RichTextLines according to maximum number of columns.
Produces a new RichTextLines object with the text lines, font_attr_segs and
annotations properly wrapped. This ought to be used sparingly, as in most
cases, command handlers producing RichTextLines outputs should know the
screen/panel width via the screen_info kwarg and should produce properly
length-limited lines in the output accordingly.
Args:
inp: Input RichTextLines object.
cols: Number of columns, as an int.
Returns:
1) A new instance of RichTextLines, with line lengths limited to cols.
2) A list of new (wrapped) line index. For example, if the original input
consists of three lines and only the second line is wrapped, and it's
wrapped into two lines, this return value will be: [0, 1, 3].
Raises:
ValueError: If inputs have invalid types.
"""
new_line_indices = []
if not isinstance(inp, RichTextLines):
raise ValueError("Invalid type of input screen_output")
if not isinstance(cols, int):
raise ValueError("Invalid type of input cols")
out = RichTextLines([])
row_counter = 0 # Counter for new row index
for i, line in enumerate(inp.lines):
new_line_indices.append(out.num_lines())
if i in inp.annotations:
out.annotations[row_counter] = inp.annotations[i]
if len(line) <= cols:
# No wrapping.
out.lines.append(line)
if i in inp.font_attr_segs:
out.font_attr_segs[row_counter] = inp.font_attr_segs[i]
row_counter += 1
else:
# Wrap.
wlines = [] # Wrapped lines.
osegs = []
if i in inp.font_attr_segs:
osegs = inp.font_attr_segs[i]
idx = 0
while idx < len(line):
if idx + cols > len(line):
rlim = len(line)
else:
rlim = idx + cols
wlines.append(line[idx:rlim])
for seg in osegs:
if (seg[0] < rlim) and (seg[1] >= idx):
# Calculate left bound within wrapped line.
if seg[0] >= idx:
lb = seg[0] - idx
else:
lb = 0
# Calculate right bound within wrapped line.
if seg[1] < rlim:
rb = seg[1] - idx
else:
rb = rlim - idx
if rb > lb: # Omit zero-length segments.
wseg = (lb, rb, seg[2])
if row_counter not in out.font_attr_segs:
out.font_attr_segs[row_counter] = [wseg]
else:
out.font_attr_segs[row_counter].append(wseg)
idx += cols
row_counter += 1
out.lines.extend(wlines)
# Copy over keys of annotation that are not row indices.
for key in inp.annotations:
if not isinstance(key, int):
out.annotations[key] = inp.annotations[key]
return out, new_line_indices
| RichTextLines |
python | django__django | tests/string_lookup/models.py | {
"start": 158,
"end": 411
} | class ____(models.Model):
name = models.CharField(max_length=50)
normal = models.ForeignKey(Foo, models.CASCADE, related_name="normal_foo")
fwd = models.ForeignKey("Whiz", models.CASCADE)
back = models.ForeignKey("Foo", models.CASCADE)
| Bar |
python | doocs__leetcode | solution/2900-2999/2907.Maximum Profitable Triplets With Increasing Prices I/Solution3.py | {
"start": 388,
"end": 1135
} | class ____:
def maxProfit(self, prices: List[int], profits: List[int]) -> int:
n = len(prices)
left = [0] * n
right = [0] * n
s = sorted(set(prices))
m = len(s)
tree1 = BinaryIndexedTree(m + 1)
tree2 = BinaryIndexedTree(m + 1)
for i, x in enumerate(prices):
x = bisect_left(s, x) + 1
left[i] = tree1.query(x - 1)
tree1.update(x, profits[i])
for i in range(n - 1, -1, -1):
x = m + 1 - (bisect_left(s, prices[i]) + 1)
right[i] = tree2.query(x - 1)
tree2.update(x, profits[i])
return max(
(l + x + r for l, x, r in zip(left, profits, right) if l and r), default=-1
)
| Solution |
python | openai__openai-python | src/openai/types/realtime/realtime_session_create_response.py | {
"start": 9840,
"end": 10334
} | class ____(BaseModel):
always: Optional[ToolMcpToolRequireApprovalMcpToolApprovalFilterAlways] = None
"""A filter object to specify which tools are allowed."""
never: Optional[ToolMcpToolRequireApprovalMcpToolApprovalFilterNever] = None
"""A filter object to specify which tools are allowed."""
ToolMcpToolRequireApproval: TypeAlias = Union[
ToolMcpToolRequireApprovalMcpToolApprovalFilter, Literal["always", "never"], None
]
| ToolMcpToolRequireApprovalMcpToolApprovalFilter |
python | tensorflow__tensorflow | tensorflow/python/data/experimental/kernel_tests/auto_shard_dataset_test.py | {
"start": 2145,
"end": 24437
} | class ____(tf_record_test_base.TFRecordTestBase,
parameterized.TestCase):
def setUp(self):
super(AutoShardDatasetTest, self).setUp()
self._num_files = 10
self._num_records = 10
self._filenames = self._createFiles()
def getAllDatasetElements(self, dataset):
actual = []
next_fn = self.getNext(dataset)
while True:
try:
actual.append(self.evaluate(next_fn()))
except errors.OutOfRangeError:
break
return actual
def assertDatasetProducesWithShuffle(self, dataset, expected, batch,
num_examples, shuffle):
if shuffle:
actual = []
next_fn = self.getNext(dataset)
for _ in range(num_examples):
elem = self.evaluate(next_fn())
if isinstance(elem, tuple):
actual.extend(elem)
else:
actual.extend(elem.tolist())
self.assertCountEqual(actual, expected)
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(next_fn())
else:
self.assertDatasetProduces(dataset, list(chunk(expected, batch)))
@combinations.generate(
combinations.times(
test_base.default_test_combinations(),
combinations.combine(shuffle=[True, False])))
def testFlatMapReaderPipeline(self, shuffle):
dataset = dataset_ops.Dataset.list_files(
self._filenames, shuffle=shuffle)
dataset = dataset.flat_map(core_readers.TFRecordDataset)
dataset = dataset.batch(5)
dataset = distribute._AutoShardDataset(dataset, 5, 3)
expected = [
b"Record %d of file %d" % (r, f) # pylint:disable=g-complex-comprehension
for f in (3, 8)
for r in range(0, 10)
]
self.assertDatasetProducesWithShuffle(dataset, expected, 5, 4, shuffle)
@combinations.generate(
combinations.times(test_base.default_test_combinations(),
combinations.combine(batch_size=[1, 3, 10])))
def testDatasetOfReaderDatasetsPipeline(self, batch_size):
# This tests a scenario where a list_files main return multiple files
# due to the glob containing wildcards.
def batch(iterator, n):
l = len(iterator)
for i in range(0, l, n):
yield iterator[i:min(i + n, l)]
datasets = []
for files in batch(self._filenames, batch_size):
datasets.append(
dataset_ops.Dataset.list_files(files, shuffle=False).map(
core_readers.TFRecordDataset))
dataset = dataset_ops.Dataset.from_tensor_slices(datasets)
dataset = dataset.flat_map(lambda x: x)
# Simulate additional ops in between flat_map and interleave. This should be
# a no-op since if ShardDataset is placed right after flat_map, we will only
# have two datasets left at this point.
dataset = dataset.prefetch(1)
dataset = dataset.prefetch(1)
dataset = dataset.interleave(
lambda x: x, cycle_length=1, num_parallel_calls=1)
dataset = distribute._AutoShardDataset(dataset, 5, 0)
expected = [
b"Record %d of file %d" % (r, f) # pylint:disable=g-complex-comprehension
for f in (0, 5)
for r in range(0, 10)
]
self.assertDatasetProduces(dataset, expected)
@combinations.generate(test_base.default_test_combinations())
def testZipReaderPipeline(self):
dataset1 = dataset_ops.Dataset.list_files(
self._filenames, shuffle=False)
dataset1 = dataset1.apply(
interleave_ops.parallel_interleave(core_readers.TFRecordDataset, 10))
dataset2 = dataset_ops.Dataset.list_files(
self._filenames, shuffle=False)
dataset2 = dataset2.apply(
interleave_ops.parallel_interleave(core_readers.TFRecordDataset, 10))
dataset = dataset_ops.Dataset.zip((dataset1, dataset2))
dataset = distribute._AutoShardDataset(dataset, 5, 3)
expected = [
(b"Record %d of file %d" % (r, f), b"Record %d of file %d" % (r, f)) # pylint:disable=g-complex-comprehension
for r in range(0, 10)
for f in (3, 8)
]
self.assertDatasetProduces(dataset, expected)
@combinations.generate(
combinations.times(
test_base.default_test_combinations(),
combinations.combine(shuffle=[True, False])))
def testConcatenateReaderPipeline(self, shuffle):
dataset1 = dataset_ops.Dataset.list_files(
self._filenames, shuffle=shuffle)
dataset1 = dataset1.apply(
interleave_ops.parallel_interleave(core_readers.TFRecordDataset, 10))
dataset1 = dataset1.batch(5)
dataset2 = dataset_ops.Dataset.list_files(
self._filenames, shuffle=shuffle)
dataset2 = dataset2.apply(
interleave_ops.parallel_interleave(core_readers.TFRecordDataset, 10))
dataset2 = dataset2.batch(5)
dataset = dataset1.concatenate(dataset2)
dataset = distribute._AutoShardDataset(dataset, 5, 3)
expected = [
b"Record %d of file %d" % (r, f) # pylint:disable=g-complex-comprehension
for r in range(0, 10)
for f in (3, 8)
]
expected += expected
self.assertDatasetProducesWithShuffle(dataset, expected, 5, 8, shuffle)
@combinations.generate(
combinations.times(
test_base.default_test_combinations(),
combinations.combine(shuffle=[True, False])))
def testPipelineWithMap(self, shuffle):
dataset = dataset_ops.Dataset.list_files(self._filenames, shuffle=False)
dataset = dataset.apply(
interleave_ops.parallel_interleave(core_readers.TFRecordDataset, 10))
dataset = dataset.map(lambda x: string_ops.substr_v2(x, 2, 1000))
dataset = dataset.batch(5)
dataset = distribute._AutoShardDataset(dataset, 5, 3)
expected = [
b"cord %d of file %d" % (r, f) # pylint:disable=g-complex-comprehension
for r in range(0, 10)
for f in (3, 8)
]
self.assertDatasetProducesWithShuffle(dataset, expected, 5, 4, shuffle)
@combinations.generate(test_base.default_test_combinations())
def testDirectFilenameTFRecordReaderPipeline(self):
dataset = core_readers.TFRecordDataset(self._filenames)
dataset = distribute._AutoShardDataset(dataset, 5, 0)
expected = [
b"Record %d of file %d" % (r, f) # pylint:disable=g-complex-comprehension
for f in (0, 5)
for r in range(0, 10)
]
self.assertDatasetProduces(dataset, expected)
@combinations.generate(
combinations.times(
test_base.default_test_combinations(),
combinations.combine(shuffle=[True, False])))
def testValidPipelineWithRangeDataset(self, shuffle):
dataset = dataset_ops.Dataset.range(self._num_files)
dataset = dataset.map(lambda n: string_ops.string_join( # pylint:disable=g-long-lambda
[self.get_temp_dir(),
string_ops.string_format("/tf_record.{}.txt", [n])]))
dataset = dataset.apply(
interleave_ops.parallel_interleave(core_readers.TFRecordDataset, 10))
dataset = dataset.map(lambda x: string_ops.substr_v2(x, 2, 1000))
dataset = dataset.batch(5)
dataset = distribute._AutoShardDataset(dataset, 5, 3)
expected = [
b"cord %d of file %d" % (r, f) # pylint:disable=g-complex-comprehension
for r in range(0, 10)
for f in (3, 8)
]
self.assertDatasetProducesWithShuffle(dataset, expected, 5, 4, shuffle)
@combinations.generate(
combinations.times(
test_base.default_test_combinations(),
combinations.combine(params=[(1, 0, 10, 10), (2, 1, 20, 5),
(10, 1, 1, 10)])))
def testStandardReaderPipeline(self, params):
num_epochs, index, batch_size, parallel_reads = params
dataset = readers.make_tf_record_dataset(
file_pattern=self._filenames,
num_epochs=num_epochs,
batch_size=batch_size,
parser_fn=None,
num_parallel_reads=parallel_reads,
drop_final_batch=True,
shuffle=False)
dataset = distribute._AutoShardDataset(dataset, 2, index)
outputs = self.getNext(dataset)
self._verify_records(
outputs,
batch_size=batch_size,
file_index=[i for i in range(index, self._num_records, 2)],
num_epochs=num_epochs,
interleave_cycle_length=parallel_reads,
drop_final_batch=True,
use_parser_fn=None)
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(outputs())
@combinations.generate(test_base.default_test_combinations())
def testShardInputToInterleave(self):
file1 = self._writeFile("f0", [1, 2, 3])
file2 = self._writeFile("f1", [4, 5, 6])
file3 = self._writeFile("f2", [7, 8, 9])
dataset = dataset_ops.Dataset.from_tensor_slices([file1, file2, file3])
dataset = dataset.interleave(core_readers.TFRecordDataset, cycle_length=3)
dataset = distribute._AutoShardDataset(dataset, 2, 0)
# Sharding by file will interleave files 0 and 2
expected = [str.encode(str(i)) for i in [1, 7, 2, 8, 3, 9]]
actual = self.getDatasetOutput(dataset)
self.assertEqual(actual, expected)
@combinations.generate(test_base.default_test_combinations())
def testShardInputToInterleaveWithIdentityFunction(self):
file1 = self._writeFile("f0", [1, 2, 3])
file2 = self._writeFile("f1", [4, 5, 6])
file3 = self._writeFile("f2", [7, 8, 9])
dataset = dataset_ops.Dataset.from_tensor_slices([file1, file2, file3])
dataset = dataset.map(core_readers.TFRecordDataset)
dataset = dataset.interleave(lambda x: x, cycle_length=3)
dataset = distribute._AutoShardDataset(dataset, 2, 0)
# Sharding by file will interleave files 0 and 2
expected = [str.encode(str(i)) for i in [1, 7, 2, 8, 3, 9]]
actual = self.getDatasetOutput(dataset)
self.assertEqual(actual, expected)
@combinations.generate(
combinations.times(
test_base.default_test_combinations(),
combinations.combine(shuffle=[True, False])))
def testSampleResNetPipeline(self, shuffle):
dataset = dataset_ops.Dataset.list_files(
self._filenames, shuffle=shuffle)
dataset = dataset.apply(
interleave_ops.parallel_interleave(core_readers.TFRecordDataset, 10))
dataset = dataset.batch(5)
dataset = distribute._AutoShardDataset(dataset, 5, 3)
expected = [
b"Record %d of file %d" % (r, f) # pylint:disable=g-complex-comprehension
for r in range(0, 10)
for f in (3, 8)
]
self.assertDatasetProducesWithShuffle(dataset, expected, 5, 4, shuffle)
@combinations.generate(
combinations.times(
test_base.default_test_combinations(),
combinations.combine(sharding_policy=[
options_lib.AutoShardPolicy.DATA,
options_lib.AutoShardPolicy.AUTO
])))
def testShardByDataBeforePrefetch(self, sharding_policy):
dataset = dataset_ops.Dataset.range(4)
dataset = dataset.apply(testing.assert_next(["Shard", "Prefetch"]))
dataset = dataset.prefetch(1)
options = options_lib.Options()
options.experimental_distribute.auto_shard_policy = sharding_policy
dataset = dataset.with_options(options)
dataset = distribute._AutoShardDataset(dataset, 2, 0)
self.assertDatasetProduces(dataset, [0, 2])
@combinations.generate(
combinations.times(
test_base.default_test_combinations(),
combinations.times(combinations.combine(
sharding_policy=[options_lib.AutoShardPolicy.DATA,
options_lib.AutoShardPolicy.FILE]),
combinations.combine(shuffle=[True, False]))))
def testReplicateAndShardProduceDisjointData(self, shuffle, sharding_policy):
dataset = dataset_ops.Dataset.list_files(self._filenames,
shuffle=shuffle)
dataset = dataset.flat_map(core_readers.TFRecordDataset)
graph_def = dataset._as_serialized_graph(
strip_device_assignment=True,
external_state_policy=options_lib.ExternalStatePolicy.WARN)
options = options_lib.Options()
options.experimental_distribute.auto_shard_policy = sharding_policy
ds1 = distribute._RemoteDataset(graph_def, "/device:CPU:0",
dataset.element_spec)
ds2 = distribute._RemoteDataset(graph_def, "/device:CPU:0",
dataset.element_spec)
ds1 = ds1.with_options(options)
ds2 = ds2.with_options(options)
ds1 = distribute._AutoShardDataset(ds1, 2, 0)
ds2 = distribute._AutoShardDataset(ds2, 2, 1)
elems1 = set(self.getAllDatasetElements(ds1))
elems2 = set(self.getAllDatasetElements(ds2))
self.assertEmpty(elems1.intersection(elems2))
@combinations.generate(test_base.default_test_combinations())
def testWorkersGreaterThanNumFilesWithDataSharding(self):
options = options_lib.Options()
options.experimental_distribute.auto_shard_policy = (
options_lib.AutoShardPolicy.DATA)
dataset = core_readers._TFRecordDataset(self._filenames)
dataset = dataset.with_options(options)
dataset = distribute._AutoShardDataset(dataset, 5, 0)
# Should return "Record (0,5) of file (0 --> 9)" since we are sharding by
# individual elements, we should be able to get some data from all files.
expected = [
b"Record %d of file %d" % (r, f) # pylint:disable=g-complex-comprehension
for f in range(0, 10)
for r in (0, 5)
]
self.assertDatasetProduces(dataset, expected)
@combinations.generate(test_base.default_test_combinations())
def testAutoshardPolicyOff(self):
options = options_lib.Options()
options.experimental_distribute.auto_shard_policy = (
options_lib.AutoShardPolicy.OFF)
dataset = core_readers._TFRecordDataset(self._filenames)
dataset = dataset.with_options(options)
dataset = distribute._AutoShardDataset(dataset, 5, 0)
# Should return every record in every file since autosharding is turned off.
expected = [
b"Record %d of file %d" % (r, f) # pylint:disable=g-complex-comprehension
for f in range(0, 10)
for r in range(0, 10)
]
self.assertDatasetProduces(dataset, expected)
@combinations.generate(test_base.default_test_combinations())
def testFileShardingWithoutReaderDatasetOp(self):
options = options_lib.Options()
options.experimental_distribute.auto_shard_policy = (
options_lib.AutoShardPolicy.FILE)
dataset = dataset_ops.Dataset.range(1024)
dataset = dataset.with_options(options)
# We are specifying that we want a file sharding policy, and this pipeline
# doesn't start with file reading, so we should error out.
with self.assertRaises(errors.NotFoundError):
dataset = distribute._AutoShardDataset(dataset, 10, 0)
self.evaluate(self.getNext(dataset)())
@combinations.generate(test_base.default_test_combinations())
def testWorkersGreaterThanNumFiles(self):
dataset = dataset_ops.Dataset.list_files(self._filenames)
dataset = dataset.apply(
interleave_ops.parallel_interleave(core_readers.TFRecordDataset, 10))
dataset = dataset.batch(5)
dataset = distribute._AutoShardDataset(dataset, 500, 499)
self.assertDatasetProduces(dataset, [])
@combinations.generate(test_base.default_test_combinations())
def testTFRecordReaderWithDirectFileNames(self):
# Using `_TFRecordDataset` creates a raw op rather than wrapping it around
# a flat_map automatically.
dataset = core_readers._TFRecordDataset(self._filenames)
dataset = distribute._AutoShardDataset(dataset, 5, 0)
expected = [
b"Record %d of file %d" % (r, f) # pylint:disable=g-complex-comprehension
for f in range(0, 10)
for r in (0, 5)
]
self.assertDatasetProduces(dataset, expected)
@combinations.generate(test_base.default_test_combinations())
def testTFRecordReaderWithDirectFileNamesAndShapes(self):
# Using `_TFRecordDataset` creates a raw op rather than wrapping it around
# a flat_map automatically.
dataset = core_readers._TFRecordDataset(self._filenames)
# BatchDataset contains `output_types` and `output_shapes`
dataset = dataset.batch(5)
dataset = distribute._AutoShardDataset(dataset, 2, 0)
expected = [
b"Record %d of file %d" % (r, f) # pylint:disable=g-complex-comprehension
for f in range(0, 10)
for r in range(0, 5)
]
self.assertDatasetProduces(dataset, list(chunk(expected, 5)))
@combinations.generate(test_base.default_test_combinations())
def testShardOutOfRange(self):
dataset = dataset_ops.Dataset.range(5)
with self.assertRaises(errors.InvalidArgumentError):
dataset = distribute._AutoShardDataset(dataset, 10, 0)
self.evaluate(self.getNext(dataset)())
@combinations.generate(test_base.default_test_combinations())
def testShardOutOfRangeEmptyDataset(self):
dataset = dataset_ops.Dataset.range(0)
with self.assertRaises(errors.OutOfRangeError):
dataset = distribute._AutoShardDataset(dataset, 10, 0)
self.evaluate(self.getNext(dataset)())
@combinations.generate(test_base.default_test_combinations())
def testNoReaderPipelines(self):
dataset = dataset_ops.Dataset.range(1024)
dataset = distribute._AutoShardDataset(dataset, 2, 0)
self.assertDatasetProduces(dataset, [i for i in range(1024) if i % 2 == 0])
@combinations.generate(test_base.default_test_combinations())
def testUnknownOpInPipelineStillShardsAtTheEnd(self):
dataset = dataset_ops.Dataset.list_files(self._filenames, shuffle=False)
dataset = dataset.flat_map(core_readers.TFRecordDataset)
dataset = dataset.apply(unique.unique())
dataset = distribute._AutoShardDataset(dataset, 5, 0)
expected = [
b"Record %d of file %d" % (r, f) # pylint:disable=g-complex-comprehension
for f in range(0, 10)
for r in (0, 5)
]
self.assertDatasetProduces(dataset, expected)
@combinations.generate(test_base.default_test_combinations())
def testInvalidWorkerIndex(self):
dataset = dataset_ops.Dataset.list_files(self._filenames)
dataset = dataset.flat_map(core_readers.TFRecordDataset)
dataset = dataset.batch(5)
with self.assertRaises(errors.InvalidArgumentError):
dataset = distribute._AutoShardDataset(dataset, 2, 2)
self.evaluate(self.getNext(dataset)())
@combinations.generate(test_base.default_test_combinations())
def testAssertCardinality(self):
dataset = dataset_ops.Dataset.list_files(self._filenames, shuffle=False)
dataset = dataset.flat_map(core_readers.TFRecordDataset)
dataset = dataset.batch(5)
dataset = dataset.apply(cardinality.assert_cardinality(42))
dataset = distribute._AutoShardDataset(dataset, 5, 0)
expected = [
b"Record %d of file %d" % (r, f) # pylint:disable=g-complex-comprehension
for f in (0, 5)
for r in range(0, 10)
]
self.assertDatasetProduces(dataset, list(chunk(expected, 5)))
@combinations.generate(test_base.default_test_combinations())
def testMakeBatchedFeaturesDataset(self):
files = 2
records_per_file = 5
def make_record(file_index):
example = example_pb2.Example(
features=feature_pb2.Features(
feature={
"file":
feature_pb2.Feature(
int64_list=feature_pb2.Int64List(value=[file_index])),
}))
return example.SerializeToString()
filenames = []
for file_index in range(files):
filename = os.path.join(self.get_temp_dir(),
"tf_record.%d.txt" % file_index)
filenames.append(filename)
writer = python_io.TFRecordWriter(filename)
for _ in range(records_per_file):
writer.write(make_record(file_index))
writer.close()
dataset = readers.make_batched_features_dataset(
file_pattern=filenames,
batch_size=records_per_file,
features={
"file": parsing_ops.FixedLenFeature([], dtypes.int64),
},
reader=core_readers.TFRecordDataset,
num_epochs=1)
# We should shard at the file level, so that all records come from file 0.
dataset = distribute._AutoShardDataset(dataset, 2, 0)
dataset = dataset.unbatch()
output = self.getDatasetOutput(dataset)
files = [elem["file"] for elem in output]
self.assertEqual(files, [0] * records_per_file)
@combinations.generate(test_base.default_test_combinations())
def testHintShardingValidPattern(self):
options = options_lib.Options()
options.experimental_distribute.auto_shard_policy = (
options_lib.AutoShardPolicy.HINT)
dataset = dataset_ops.Dataset.range(100).shard(distribute.SHARD_HINT, 0)
dataset = dataset.with_options(options)
dataset = distribute._AutoShardDataset(dataset, 10, 0)
self.assertDatasetProduces(dataset, list(range(0, 100, 10)))
@combinations.generate(test_base.default_test_combinations())
def testHintShardingInvalidPattern(self):
options = options_lib.Options()
options.experimental_distribute.auto_shard_policy = (
options_lib.AutoShardPolicy.HINT)
dataset = dataset_ops.Dataset.range(100).shard(1, 0)
dataset = dataset.with_options(options)
dataset = distribute._AutoShardDataset(dataset, 10, 0)
self.assertDatasetProduces(dataset, list(range(100)))
@combinations.generate(
combinations.times(
test_base.default_test_combinations(),
combinations.combine(
auto_shard_policy=list(
policy.name for policy in options_lib.AutoShardPolicy
)
),
)
)
def testEnumerateAutoShardPolicies(self, auto_shard_policy):
"""Verifies tf.data handles every auto-shard policy with no errors."""
policy_enum = options_lib.AutoShardPolicy[auto_shard_policy]
dataset = dataset_ops.Dataset.list_files(self._filenames, shuffle=False)
dataset = dataset.flat_map(core_readers.TFRecordDataset)
dataset = dataset.batch(5)
options = options_lib.Options()
options.experimental_distribute.auto_shard_policy = policy_enum
dataset = dataset.with_options(options)
dataset = distribute._AutoShardDataset(dataset, 5, 3)
self.getDatasetOutput(dataset, requires_initialization=True)
| AutoShardDatasetTest |
python | scikit-learn__scikit-learn | sklearn/compose/tests/test_column_transformer.py | {
"start": 77372,
"end": 95383
} | class ____(BaseEstimator):
def __init__(self, offset=1.0):
self.offset = offset
def fit(self, X, y=None):
pd = pytest.importorskip("pandas")
assert isinstance(X, pd.DataFrame)
return self
def transform(self, X, y=None):
pd = pytest.importorskip("pandas")
assert isinstance(X, pd.DataFrame)
return X - self.offset
def set_output(self, transform=None):
# This transformer will always output a DataFrame regardless of the
# configuration.
return self
@pytest.mark.parametrize(
"trans_1, expected_verbose_names, expected_non_verbose_names",
[
(
PandasOutTransformer(offset=2.0),
["trans_0__feat1", "trans_1__feat0"],
["feat1", "feat0"],
),
(
"drop",
["trans_0__feat1"],
["feat1"],
),
(
"passthrough",
["trans_0__feat1", "trans_1__feat0"],
["feat1", "feat0"],
),
],
)
def test_transformers_with_pandas_out_but_not_feature_names_out(
trans_1, expected_verbose_names, expected_non_verbose_names
):
"""Check that set_config(transform="pandas") is compatible with more transformers.
Specifically, if transformers returns a DataFrame, but does not define
`get_feature_names_out`.
"""
pd = pytest.importorskip("pandas")
X_df = pd.DataFrame({"feat0": [1.0, 2.0, 3.0], "feat1": [2.0, 3.0, 4.0]})
ct = ColumnTransformer(
[
("trans_0", PandasOutTransformer(offset=3.0), ["feat1"]),
("trans_1", trans_1, ["feat0"]),
]
)
X_trans_np = ct.fit_transform(X_df)
assert isinstance(X_trans_np, np.ndarray)
# `ct` does not have `get_feature_names_out` because `PandasOutTransformer` does
# not define the method.
with pytest.raises(AttributeError, match="not provide get_feature_names_out"):
ct.get_feature_names_out()
# The feature names are prefixed because verbose_feature_names_out=True is default
ct.set_output(transform="pandas")
X_trans_df0 = ct.fit_transform(X_df)
assert_array_equal(X_trans_df0.columns, expected_verbose_names)
ct.set_params(verbose_feature_names_out=False)
X_trans_df1 = ct.fit_transform(X_df)
assert_array_equal(X_trans_df1.columns, expected_non_verbose_names)
@pytest.mark.parametrize(
"empty_selection",
[[], np.array([False, False]), [False, False]],
ids=["list", "bool", "bool_int"],
)
def test_empty_selection_pandas_output(empty_selection):
"""Check that pandas output works when there is an empty selection.
Non-regression test for gh-25487
"""
pd = pytest.importorskip("pandas")
X = pd.DataFrame([[1.0, 2.2], [3.0, 1.0]], columns=["a", "b"])
ct = ColumnTransformer(
[
("categorical", "passthrough", empty_selection),
("numerical", StandardScaler(), ["a", "b"]),
],
verbose_feature_names_out=True,
)
ct.set_output(transform="pandas")
X_out = ct.fit_transform(X)
assert_array_equal(X_out.columns, ["numerical__a", "numerical__b"])
ct.set_params(verbose_feature_names_out=False)
X_out = ct.fit_transform(X)
assert_array_equal(X_out.columns, ["a", "b"])
def test_raise_error_if_index_not_aligned():
"""Check column transformer raises error if indices are not aligned.
Non-regression test for gh-26210.
"""
pd = pytest.importorskip("pandas")
X = pd.DataFrame([[1.0, 2.2], [3.0, 1.0]], columns=["a", "b"], index=[8, 3])
reset_index_transformer = FunctionTransformer(
lambda x: x.reset_index(drop=True), feature_names_out="one-to-one"
)
ct = ColumnTransformer(
[
("num1", "passthrough", ["a"]),
("num2", reset_index_transformer, ["b"]),
],
)
ct.set_output(transform="pandas")
msg = (
"Concatenating DataFrames from the transformer's output lead to"
" an inconsistent number of samples. The output may have Pandas"
" Indexes that do not match."
)
with pytest.raises(ValueError, match=msg):
ct.fit_transform(X)
def test_remainder_set_output():
"""Check that the output is set for the remainder.
Non-regression test for #26306.
"""
pd = pytest.importorskip("pandas")
df = pd.DataFrame({"a": [True, False, True], "b": [1, 2, 3]})
ct = make_column_transformer(
(VarianceThreshold(), make_column_selector(dtype_include=bool)),
remainder=VarianceThreshold(),
verbose_feature_names_out=False,
)
ct.set_output(transform="pandas")
out = ct.fit_transform(df)
pd.testing.assert_frame_equal(out, df)
ct.set_output(transform="default")
out = ct.fit_transform(df)
assert isinstance(out, np.ndarray)
def test_transform_pd_na():
"""Check behavior when a tranformer's output contains pandas.NA
It should raise an error unless the output config is set to 'pandas'.
"""
pd = pytest.importorskip("pandas")
if not hasattr(pd, "Float64Dtype"):
pytest.skip(
"The issue with pd.NA tested here does not happen in old versions that do"
" not have the extension dtypes"
)
df = pd.DataFrame({"a": [1.5, None]})
ct = make_column_transformer(("passthrough", ["a"]))
# No warning with non-extension dtypes and np.nan
with warnings.catch_warnings():
warnings.simplefilter("error")
ct.fit_transform(df)
df = df.convert_dtypes()
# Error with extension dtype and pd.NA
with pytest.raises(ValueError, match=r"set_output\(transform='pandas'\)"):
ct.fit_transform(df)
# No error when output is set to pandas
ct.set_output(transform="pandas")
ct.fit_transform(df)
ct.set_output(transform="default")
# No error when there are no pd.NA
ct.fit_transform(df.fillna(-1.0))
def test_dataframe_different_dataframe_libraries():
"""Check fitting and transforming on pandas and polars dataframes."""
pd = pytest.importorskip("pandas")
pl = pytest.importorskip("polars")
X_train_np = np.array([[0, 1], [2, 4], [4, 5]])
X_test_np = np.array([[1, 2], [1, 3], [2, 3]])
# Fit on pandas and transform on polars
X_train_pd = pd.DataFrame(X_train_np, columns=["a", "b"])
X_test_pl = pl.DataFrame(X_test_np, schema=["a", "b"])
ct = make_column_transformer((Trans(), [0, 1]))
ct.fit(X_train_pd)
out_pl_in = ct.transform(X_test_pl)
assert_array_equal(out_pl_in, X_test_np)
# Fit on polars and transform on pandas
X_train_pl = pl.DataFrame(X_train_np, schema=["a", "b"])
X_test_pd = pd.DataFrame(X_test_np, columns=["a", "b"])
ct.fit(X_train_pl)
out_pd_in = ct.transform(X_test_pd)
assert_array_equal(out_pd_in, X_test_np)
def test_column_transformer__getitem__():
"""Check __getitem__ for ColumnTransformer."""
X = np.array([[0, 1, 2], [3, 4, 5]])
ct = ColumnTransformer([("t1", Trans(), [0, 1]), ("t2", Trans(), [1, 2])])
msg = "ColumnTransformer is subscriptable after it is fitted"
with pytest.raises(TypeError, match=msg):
ct["t1"]
ct.fit(X)
assert ct["t1"] is ct.named_transformers_["t1"]
assert ct["t2"] is ct.named_transformers_["t2"]
msg = "'does_not_exist' is not a valid transformer name"
with pytest.raises(KeyError, match=msg):
ct["does_not_exist"]
@pytest.mark.parametrize("transform_output", ["default", "pandas"])
def test_column_transformer_remainder_passthrough_naming_consistency(transform_output):
"""Check that when `remainder="passthrough"`, inconsistent naming is handled
correctly by the underlying `FunctionTransformer`.
Non-regression test for:
https://github.com/scikit-learn/scikit-learn/issues/28232
"""
pd = pytest.importorskip("pandas")
X = pd.DataFrame(np.random.randn(10, 4))
preprocessor = ColumnTransformer(
transformers=[("scaler", StandardScaler(), [0, 1])],
remainder="passthrough",
).set_output(transform=transform_output)
X_trans = preprocessor.fit_transform(X)
assert X_trans.shape == X.shape
expected_column_names = [
"scaler__x0",
"scaler__x1",
"remainder__x2",
"remainder__x3",
]
if hasattr(X_trans, "columns"):
assert X_trans.columns.tolist() == expected_column_names
assert preprocessor.get_feature_names_out().tolist() == expected_column_names
@pytest.mark.parametrize("dataframe_lib", ["pandas", "polars"])
def test_column_transformer_column_renaming(dataframe_lib):
"""Check that we properly rename columns when using `ColumnTransformer` and
selected columns are redundant between transformers.
Non-regression test for:
https://github.com/scikit-learn/scikit-learn/issues/28260
"""
lib = pytest.importorskip(dataframe_lib)
df = lib.DataFrame({"x1": [1, 2, 3], "x2": [10, 20, 30], "x3": [100, 200, 300]})
transformer = ColumnTransformer(
transformers=[
("A", "passthrough", ["x1", "x2", "x3"]),
("B", FunctionTransformer(), ["x1", "x2"]),
("C", StandardScaler(), ["x1", "x3"]),
# special case of a transformer returning 0-columns, e.g feature selector
(
"D",
FunctionTransformer(lambda x: _safe_indexing(x, [], axis=1)),
["x1", "x2", "x3"],
),
],
verbose_feature_names_out=True,
).set_output(transform=dataframe_lib)
df_trans = transformer.fit_transform(df)
assert list(df_trans.columns) == [
"A__x1",
"A__x2",
"A__x3",
"B__x1",
"B__x2",
"C__x1",
"C__x3",
]
@pytest.mark.parametrize("dataframe_lib", ["pandas", "polars"])
def test_column_transformer_error_with_duplicated_columns(dataframe_lib):
"""Check that we raise an error when using `ColumnTransformer` and
the columns names are duplicated between transformers."""
lib = pytest.importorskip(dataframe_lib)
df = lib.DataFrame({"x1": [1, 2, 3], "x2": [10, 20, 30], "x3": [100, 200, 300]})
transformer = ColumnTransformer(
transformers=[
("A", "passthrough", ["x1", "x2", "x3"]),
("B", FunctionTransformer(), ["x1", "x2"]),
("C", StandardScaler(), ["x1", "x3"]),
# special case of a transformer returning 0-columns, e.g feature selector
(
"D",
FunctionTransformer(lambda x: _safe_indexing(x, [], axis=1)),
["x1", "x2", "x3"],
),
],
verbose_feature_names_out=False,
).set_output(transform=dataframe_lib)
err_msg = re.escape(
"Duplicated feature names found before concatenating the outputs of the "
"transformers: ['x1', 'x2', 'x3'].\n"
"Transformer A has conflicting columns names: ['x1', 'x2', 'x3'].\n"
"Transformer B has conflicting columns names: ['x1', 'x2'].\n"
"Transformer C has conflicting columns names: ['x1', 'x3'].\n"
)
with pytest.raises(ValueError, match=err_msg):
transformer.fit_transform(df)
# TODO: remove mark once loky bug is fixed:
# https://github.com/joblib/loky/issues/458
@pytest.mark.thread_unsafe
@pytest.mark.skipif(
parse_version(joblib.__version__) < parse_version("1.3"),
reason="requires joblib >= 1.3",
)
def test_column_transformer_auto_memmap(global_random_seed):
"""Check that ColumnTransformer works in parallel with joblib's auto-memmapping.
non-regression test for issue #28781
"""
X = np.random.RandomState(global_random_seed).uniform(size=(3, 4))
scaler = StandardScaler(copy=False)
transformer = ColumnTransformer(
transformers=[("scaler", scaler, [0])],
n_jobs=2,
)
with joblib.parallel_backend("loky", max_nbytes=1):
Xt = transformer.fit_transform(X)
assert_allclose(Xt, StandardScaler().fit_transform(X[:, [0]]))
def test_column_transformer_non_default_index():
"""Check index handling when both pd.Series and pd.DataFrame slices are used in
ColumnTransformer.
Non-regression test for issue #31546.
"""
pd = pytest.importorskip("pandas")
df = pd.DataFrame(
{
"dict_col": [{"foo": 1, "bar": 2}, {"foo": 3, "baz": 1}],
"dummy_col": [1, 2],
},
index=[1, 2],
)
t = make_column_transformer(
(DictVectorizer(sparse=False), "dict_col"),
(FunctionTransformer(), ["dummy_col"]),
)
t.set_output(transform="pandas")
X = t.fit_transform(df)
assert list(X.index) == [1, 2]
# Metadata Routing Tests
# ======================
@pytest.mark.parametrize("method", ["transform", "fit_transform", "fit"])
def test_routing_passed_metadata_not_supported(method):
"""Test that the right error message is raised when metadata is passed while
not supported when `enable_metadata_routing=False`."""
X = np.array([[0, 1, 2], [2, 4, 6]]).T
y = [1, 2, 3]
trs = ColumnTransformer([("trans", Trans(), [0])]).fit(X, y)
with pytest.raises(
ValueError, match="is only supported if enable_metadata_routing=True"
):
getattr(trs, method)([[1]], sample_weight=[1], prop="a")
@pytest.mark.parametrize("method", ["transform", "fit_transform", "fit"])
@config_context(enable_metadata_routing=True)
def test_metadata_routing_for_column_transformer(method):
"""Test that metadata is routed correctly for column transformer."""
X = np.array([[0, 1, 2], [2, 4, 6]]).T
y = [1, 2, 3]
registry = _Registry()
sample_weight, metadata = [1], "a"
trs = ColumnTransformer(
[
(
"trans",
ConsumingTransformer(registry=registry)
.set_fit_request(sample_weight=True, metadata=True)
.set_transform_request(sample_weight=True, metadata=True),
[0],
)
]
)
if method == "transform":
trs.fit(X, y, sample_weight=sample_weight, metadata=metadata)
trs.transform(X, sample_weight=sample_weight, metadata=metadata)
else:
getattr(trs, method)(X, y, sample_weight=sample_weight, metadata=metadata)
assert len(registry)
for _trs in registry:
check_recorded_metadata(
obj=_trs,
method=method,
parent=method,
sample_weight=sample_weight,
metadata=metadata,
)
@config_context(enable_metadata_routing=True)
def test_metadata_routing_no_fit_transform():
"""Test metadata routing when the sub-estimator doesn't implement
``fit_transform``."""
class NoFitTransform(BaseEstimator):
def fit(self, X, y=None, sample_weight=None, metadata=None):
assert sample_weight
assert metadata
return self
def transform(self, X, sample_weight=None, metadata=None):
assert sample_weight
assert metadata
return X
X = np.array([[0, 1, 2], [2, 4, 6]]).T
y = [1, 2, 3]
sample_weight, metadata = [1], "a"
trs = ColumnTransformer(
[
(
"trans",
NoFitTransform()
.set_fit_request(sample_weight=True, metadata=True)
.set_transform_request(sample_weight=True, metadata=True),
[0],
)
]
)
trs.fit(X, y, sample_weight=sample_weight, metadata=metadata)
trs.fit_transform(X, y, sample_weight=sample_weight, metadata=metadata)
@pytest.mark.parametrize("method", ["transform", "fit_transform", "fit"])
@config_context(enable_metadata_routing=True)
def test_metadata_routing_error_for_column_transformer(method):
"""Test that the right error is raised when metadata is not requested."""
X = np.array([[0, 1, 2], [2, 4, 6]]).T
y = [1, 2, 3]
sample_weight, metadata = [1], "a"
trs = ColumnTransformer([("trans", ConsumingTransformer(), [0])])
error_message = (
"[sample_weight, metadata] are passed but are not explicitly set as requested"
f" or not requested for ConsumingTransformer.{method}"
)
with pytest.raises(ValueError, match=re.escape(error_message)):
if method == "transform":
trs.fit(X, y)
trs.transform(X, sample_weight=sample_weight, metadata=metadata)
else:
getattr(trs, method)(X, y, sample_weight=sample_weight, metadata=metadata)
@config_context(enable_metadata_routing=True)
def test_get_metadata_routing_works_without_fit():
# Regression test for https://github.com/scikit-learn/scikit-learn/issues/28186
# Make sure ct.get_metadata_routing() works w/o having called fit.
ct = ColumnTransformer([("trans", ConsumingTransformer(), [0])])
ct.get_metadata_routing()
@config_context(enable_metadata_routing=True)
def test_remainder_request_always_present():
# Test that remainder request is always present.
ct = ColumnTransformer(
[("trans", StandardScaler(), [0])],
remainder=ConsumingTransformer()
.set_fit_request(metadata=True)
.set_transform_request(metadata=True),
)
router = ct.get_metadata_routing()
assert router.consumes("fit", ["metadata"]) == set(["metadata"])
@config_context(enable_metadata_routing=True)
def test_unused_transformer_request_present():
# Test that the request of a transformer is always present even when not
# used due to no selected columns.
ct = ColumnTransformer(
[
(
"trans",
ConsumingTransformer()
.set_fit_request(metadata=True)
.set_transform_request(metadata=True),
lambda X: [],
)
]
)
router = ct.get_metadata_routing()
assert router.consumes("fit", ["metadata"]) == set(["metadata"])
# End of Metadata Routing Tests
# =============================
| PandasOutTransformer |
python | huggingface__transformers | src/transformers/models/textnet/modeling_textnet.py | {
"start": 9762,
"end": 12706
} | class ____(TextNetPreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.num_labels = config.num_labels
self.textnet = TextNetModel(config)
self.avg_pool = nn.AdaptiveAvgPool2d((1, 1))
self.flatten = nn.Flatten()
self.fc = nn.Linear(config.hidden_sizes[-1], config.num_labels) if config.num_labels > 0 else nn.Identity()
# classification head
self.classifier = nn.ModuleList([self.avg_pool, self.flatten])
# initialize weights and apply final processing
self.post_init()
@auto_docstring
def forward(
self,
pixel_values: Optional[torch.FloatTensor] = None,
labels: Optional[torch.LongTensor] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
) -> ImageClassifierOutputWithNoAttention:
r"""
labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
Labels for computing the image classification/regression loss. Indices should be in `[0, ...,
config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
`config.num_labels > 1` a classification loss is computed (Cross-Entropy).
Examples:
```python
>>> import torch
>>> import requests
>>> from transformers import TextNetForImageClassification, TextNetImageProcessor
>>> from PIL import Image
>>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
>>> image = Image.open(requests.get(url, stream=True).raw)
>>> processor = TextNetImageProcessor.from_pretrained("czczup/textnet-base")
>>> model = TextNetForImageClassification.from_pretrained("czczup/textnet-base")
>>> inputs = processor(images=image, return_tensors="pt")
>>> with torch.no_grad():
... outputs = model(**inputs)
>>> outputs.logits.shape
torch.Size([1, 2])
```"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
outputs = self.textnet(pixel_values, output_hidden_states=output_hidden_states, return_dict=return_dict)
last_hidden_state = outputs[0]
for layer in self.classifier:
last_hidden_state = layer(last_hidden_state)
logits = self.fc(last_hidden_state)
loss = None
if labels is not None:
loss = self.loss_function(labels, logits, self.config)
if not return_dict:
output = (logits,) + outputs[2:]
return (loss,) + output if loss is not None else output
return ImageClassifierOutputWithNoAttention(loss=loss, logits=logits, hidden_states=outputs.hidden_states)
@auto_docstring(
custom_intro="""
TextNet backbone, to be used with frameworks like DETR and MaskFormer.
"""
)
| TextNetForImageClassification |
python | celery__celery | celery/exceptions.py | {
"start": 6704,
"end": 6815
} | class ____(TaskError):
"""Found the end of a stream of data, but the data isn't complete."""
| IncompleteStream |
python | sqlalchemy__sqlalchemy | test/aaa_profiling/test_orm.py | {
"start": 13681,
"end": 15509
} | class ____(NoCache, fixtures.MappedTest):
__requires__ = ("python_profiling_backend",)
__backend__ = True
@classmethod
def define_tables(cls, metadata):
Table(
"parent",
metadata,
Column(
"id", Integer, primary_key=True, test_needs_autoincrement=True
),
Column("data", String(20)),
)
Table(
"child",
metadata,
Column(
"id", Integer, primary_key=True, test_needs_autoincrement=True
),
Column("data", String(20)),
Column(
"parent_id", Integer, ForeignKey("parent.id"), nullable=False
),
)
@classmethod
def setup_classes(cls):
class Parent(cls.Basic):
pass
class Child(cls.Basic):
pass
@classmethod
def setup_mappers(cls):
Child, Parent, parent, child = (
cls.classes.Child,
cls.classes.Parent,
cls.tables.parent,
cls.tables.child,
)
cls.mapper_registry.map_imperatively(
Parent,
parent,
properties={"children": relationship(Child, backref="parent")},
)
cls.mapper_registry.map_imperatively(Child, child)
# the profiling count depends on weakref callbacks being GC'ed
@testing.add_to_marker.gc_intensive
def test_expire_lots(self):
Parent, Child = self.classes.Parent, self.classes.Child
obj = [
Parent(children=[Child() for j in range(10)]) for i in range(10)
]
sess = fixture_session()
sess.add_all(obj)
sess.flush()
@profiling.function_call_count()
def go():
sess.expire_all()
go()
| SessionTest |
python | jmcnamara__XlsxWriter | xlsxwriter/test/comparison/test_chart_column01.py | {
"start": 315,
"end": 2247
} | class ____(ExcelComparisonTest):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.set_filename("chart_column01.xlsx")
def test_create_file(self):
"""Test the creation of a simple XlsxWriter file."""
workbook = Workbook(self.got_filename)
worksheet = workbook.add_worksheet()
chart = workbook.add_chart({"type": "column"})
chart.axis_ids = [43424000, 43434368]
data = [
[1, 2, 3, 4, 5],
[2, 4, 6, 8, 10],
[3, 6, 9, 12, 15],
]
worksheet.write_column("A1", data[0])
worksheet.write_column("B1", data[1])
worksheet.write_column("C1", data[2])
chart.add_series({"values": "=Sheet1!$A$1:$A$5"})
chart.add_series({"values": "=Sheet1!$B$1:$B$5"})
chart.add_series({"values": "=Sheet1!$C$1:$C$5"})
worksheet.insert_chart("E9", chart)
workbook.close()
self.assertExcelEqual()
def test_unused_chart(self):
"""Test charts that were created but not inserted."""
workbook = Workbook(self.got_filename)
worksheet = workbook.add_worksheet()
chart = workbook.add_chart({"type": "column"})
# Unused chart.
workbook.add_chart({"type": "column"})
chart.axis_ids = [43424000, 43434368]
data = [
[1, 2, 3, 4, 5],
[2, 4, 6, 8, 10],
[3, 6, 9, 12, 15],
]
worksheet.write_column("A1", data[0])
worksheet.write_column("B1", data[1])
worksheet.write_column("C1", data[2])
chart.add_series({"values": "=Sheet1!$A$1:$A$5"})
chart.add_series({"values": "=Sheet1!$B$1:$B$5"})
chart.add_series({"values": "=Sheet1!$C$1:$C$5"})
worksheet.insert_chart("E9", chart)
workbook.close()
self.assertExcelEqual()
| TestCompareXLSXFiles |
python | pydata__xarray | asv_bench/benchmarks/unstacking.py | {
"start": 615,
"end": 805
} | class ____(Unstacking):
def setup(self, *args, **kwargs):
requires_dask()
super().setup(**kwargs)
self.da_full = self.da_full.chunk({"flat_dim": 25})
| UnstackingDask |
python | marshmallow-code__marshmallow | src/marshmallow/fields.py | {
"start": 64253,
"end": 64522
} | class ____(IPInterface):
"""A IPv6 Network Interface field."""
default_error_messages = {"invalid_ip_interface": "Not a valid IPv6 interface."}
DESERIALIZATION_CLASS = ipaddress.IPv6Interface
_EnumT = typing.TypeVar("_EnumT", bound=EnumType)
| IPv6Interface |
python | scrapy__scrapy | tests/test_spidermiddleware_referer.py | {
"start": 24692,
"end": 24863
} | class ____(
MixinOriginWhenCrossOrigin, TestRefererMiddleware
):
req_meta = {"referrer_policy": POLICY_ORIGIN_WHEN_CROSS_ORIGIN}
| TestRequestMetaOriginWhenCrossOrigin |
python | huggingface__transformers | tests/models/idefics2/test_processing_idefics2.py | {
"start": 1013,
"end": 13531
} | class ____(ProcessorTesterMixin, unittest.TestCase):
processor_class = Idefics2Processor
model_id = "HuggingFaceM4/idefics2-8b"
@classmethod
def _setup_test_attributes(cls, processor):
cls.image1 = load_image(
url_to_local_path(
"https://cdn.britannica.com/61/93061-050-99147DCE/Statue-of-Liberty-Island-New-York-Bay.jpg"
)
)
cls.image2 = load_image(
url_to_local_path("https://cdn.britannica.com/59/94459-050-DBA42467/Skyline-Chicago.jpg")
)
cls.image3 = load_image(
url_to_local_path(
"https://thumbs.dreamstime.com/b/golden-gate-bridge-san-francisco-purple-flowers-california-echium-candicans-36805947.jpg"
)
)
cls.bos_token = processor.tokenizer.bos_token
cls.image_token = processor.image_token
cls.fake_image_token = processor.fake_image_token
cls.bos_token_id = processor.tokenizer.convert_tokens_to_ids(cls.bos_token)
cls.image_token_id = processor.tokenizer.convert_tokens_to_ids(cls.image_token)
cls.fake_image_token_id = processor.tokenizer.convert_tokens_to_ids(cls.fake_image_token)
cls.image_seq_len = processor.image_seq_len
@staticmethod
def prepare_processor_dict():
return {"image_seq_len": 2}
def test_process_interleaved_images_prompts_no_image_splitting(self):
processor = self.get_processor()
tokenizer = processor.tokenizer
processor.image_processor.do_image_splitting = False
# Test that a single image is processed correctly
inputs = processor(images=self.image1)
self.assertEqual(inputs["pixel_values"].shape, (1, 1, 3, 653, 980))
self.assertEqual(inputs["pixel_attention_mask"].shape, (1, 1, 653, 980))
# fmt: on
# Test a single sample with image and text
image_str = "<image>"
text_str = "In this image, we see"
text = image_str + text_str
inputs = processor(text=text, images=self.image1)
# fmt: off
tokenized_sentence = tokenizer(text_str, add_special_tokens=False)
expected_input_ids = [[self.bos_token_id] + [self.fake_image_token_id] + [self.image_token_id] * self.image_seq_len + [self.fake_image_token_id] + tokenized_sentence["input_ids"]]
self.assertEqual(inputs["input_ids"], expected_input_ids)
self.assertEqual(inputs["attention_mask"], [[1] * len(expected_input_ids[0])])
self.assertEqual(inputs["pixel_values"].shape, (1, 1, 3, 653, 980))
self.assertEqual(inputs["pixel_attention_mask"].shape, (1, 1, 653, 980))
# fmt: on
# Test that batch is correctly processed
image_str = "<image>"
text_str_1 = "In this image, we see"
text_str_2 = "bla, bla"
text = [
image_str + text_str_1,
text_str_2 + image_str + image_str,
]
images = [[self.image1], [self.image2, self.image3]]
inputs = processor(text=text, images=images, padding=True)
# fmt: off
tokenized_sentence_1 = tokenizer(text_str_1, add_special_tokens=False)
tokenized_sentence_2 = tokenizer(text_str_2, add_special_tokens=False)
expected_input_ids_1 = [self.bos_token_id] + [self.fake_image_token_id] + [self.image_token_id] * self.image_seq_len + [self.fake_image_token_id] + tokenized_sentence_1["input_ids"]
expected_input_ids_2 = [self.bos_token_id] + tokenized_sentence_2["input_ids"] + [self.fake_image_token_id] + [self.image_token_id] * self.image_seq_len + [self.fake_image_token_id] + [self.image_token_id] * self.image_seq_len + [self.fake_image_token_id]
# Pad the first input to match the second input
pad_len = len(expected_input_ids_2) - len(expected_input_ids_1)
padded_expected_input_ids_1 = [0] * pad_len + expected_input_ids_1
self.assertEqual(
inputs["input_ids"], [padded_expected_input_ids_1, expected_input_ids_2]
)
self.assertEqual(
inputs["attention_mask"],
[[0] * pad_len + [1] * len(expected_input_ids_1), [1] * len(expected_input_ids_2)]
)
self.assertEqual(inputs['pixel_values'].shape, (2, 2, 3, 767, 980))
self.assertEqual(inputs['pixel_attention_mask'].shape, (2, 2, 767, 980))
# fmt: on
def test_process_interleaved_images_prompts_image_splitting(self):
processor = self.get_processor()
tokenizer = processor.tokenizer
processor.image_processor.do_image_splitting = True
# Test that a single image is processed correctly
inputs = processor(images=self.image1)
self.assertEqual(inputs["pixel_values"].shape, (1, 5, 3, 653, 980))
self.assertEqual(inputs["pixel_attention_mask"].shape, (1, 5, 653, 980))
# fmt: on
# Test a single sample with image and text
image_str = "<image>"
text_str = "In this image, we see"
text = image_str + text_str
inputs = processor(text=text, images=self.image1)
# fmt: off
tokenized_sentence = tokenizer(text_str, add_special_tokens=False)
expected_input_ids = [[self.bos_token_id] + ([self.fake_image_token_id] + [self.image_token_id] * self.image_seq_len) * 5 + [self.fake_image_token_id] + tokenized_sentence["input_ids"]]
self.assertEqual(inputs["input_ids"], expected_input_ids)
self.assertEqual(inputs["attention_mask"], [[1] * len(expected_input_ids[0])])
self.assertEqual(inputs["pixel_values"].shape, (1, 5, 3, 653, 980))
self.assertEqual(inputs["pixel_attention_mask"].shape, (1, 5, 653, 980))
# fmt: on
# Test that batch is correctly processed
image_str = "<image>"
text_str_1 = "In this image, we see"
text_str_2 = "bla, bla"
text = [
image_str + text_str_1,
text_str_2 + image_str + image_str,
]
images = [[self.image1], [self.image2, self.image3]]
inputs = processor(text=text, images=images, padding=True)
# fmt: off
tokenized_sentence_1 = tokenizer(text_str_1, add_special_tokens=False)
tokenized_sentence_2 = tokenizer(text_str_2, add_special_tokens=False)
expected_input_ids_1 = [self.bos_token_id] + ([self.fake_image_token_id] + [self.image_token_id] * self.image_seq_len) * 5 + [self.fake_image_token_id] + tokenized_sentence_1["input_ids"]
expected_input_ids_2 = [self.bos_token_id] + tokenized_sentence_2["input_ids"] + ([self.fake_image_token_id] + [self.image_token_id] * self.image_seq_len) * 5 + ([self.fake_image_token_id] + [self.image_token_id] * self.image_seq_len) * 5 + [self.fake_image_token_id]
# Pad the first input to match the second input
pad_len = len(expected_input_ids_2) - len(expected_input_ids_1)
padded_expected_input_ids_1 = [0] * pad_len + expected_input_ids_1
self.assertEqual(
inputs["input_ids"], [padded_expected_input_ids_1, expected_input_ids_2]
)
self.assertEqual(
inputs["attention_mask"],
[[0] * pad_len + [1] * len(expected_input_ids_1), [1] * len(expected_input_ids_2)]
)
self.assertEqual(inputs['pixel_values'].shape, (2, 10, 3, 767, 980))
self.assertEqual(inputs['pixel_attention_mask'].shape, (2, 10, 767, 980))
# fmt: on
def test_add_special_tokens_processor(self):
processor = self.get_processor()
tokenizer = processor.tokenizer
image_str = "<image>"
text_str = "In this image, we see"
text = text_str + image_str
n_image_repeat = 5 if processor.image_processor.do_image_splitting else 1
# fmt: off
inputs = processor(text=text, images=self.image1, add_special_tokens=False)
tokenized_sentence = tokenizer(text_str, add_special_tokens=False)
expected_input_ids = [tokenized_sentence["input_ids"] + ([self.fake_image_token_id] + [self.image_token_id] * self.image_seq_len) * n_image_repeat + [self.fake_image_token_id]]
self.assertEqual(inputs["input_ids"], expected_input_ids)
inputs = processor(text=text, images=self.image1)
expected_input_ids = [[self.bos_token_id] + tokenized_sentence["input_ids"] + ([self.fake_image_token_id] + [self.image_token_id] * self.image_seq_len) * n_image_repeat + [self.fake_image_token_id]]
self.assertEqual(inputs["input_ids"], expected_input_ids)
# fmt: on
def test_non_nested_images_with_batched_text(self):
processor = self.get_processor()
processor.image_processor.do_image_splitting = False
image_str = "<image>"
text_str_1 = "In this image, we see"
text_str_2 = "bla, bla"
text = [
image_str + text_str_1,
text_str_2 + image_str + image_str,
]
images = [self.image1, self.image2, self.image3]
inputs = processor(text=text, images=images, padding=True)
self.assertEqual(inputs["pixel_values"].shape, (2, 2, 3, 767, 980))
self.assertEqual(inputs["pixel_attention_mask"].shape, (2, 2, 767, 980))
def test_process_interleaved_images_prompts_image_error(self):
processor = self.get_processor()
text = [
"This is a test sentence.",
"In this other sentence we try some good things",
]
images = [[self.image1], [self.image2]]
with self.assertRaises(ValueError):
processor(text=text, images=images, padding=True)
images = [[self.image1], []]
with self.assertRaises(ValueError):
processor(text=text, images=images, padding=True)
text = [
"This is a test sentence.<image>",
"In this other sentence we try some good things<image>",
]
images = [[self.image1], [self.image2, self.image3]]
with self.assertRaises(ValueError):
processor(text=text, images=images, padding=True)
images = [[], [self.image2]]
with self.assertRaises(ValueError):
processor(text=text, images=images, padding=True)
images = [self.image1, self.image2, self.image3]
with self.assertRaises(ValueError):
processor(text=text, images=images, padding=True)
images = [self.image1]
with self.assertRaises(ValueError):
processor(text=text, images=images, padding=True)
text = [
"This is a test sentence.",
"In this other sentence we try some good things<image>",
]
images = [[self.image1], []]
with self.assertRaises(ValueError):
processor(text=text, images=images, padding=True)
images = [self.image1, self.image2]
with self.assertRaises(ValueError):
processor(text=text, images=images, padding=True)
def test_apply_chat_template(self):
# Message contains content which a mix of lists with images and image urls and string
messages = [
{
"role": "user",
"content": [
{"type": "text", "text": "What do these images show?"},
{"type": "image"},
{"type": "image"},
"What do these images show?",
],
},
{
"role": "assistant",
"content": [
{
"type": "text",
"text": "The first image shows the statue of Liberty in New York. The second image picture depicts Idefix, the dog of Obelix in Asterix and Obelix.",
}
],
},
{"role": "user", "content": [{"type": "text", "text": "And who is that?"}]},
]
processor = self.get_processor()
# Make short sequence length to test that the fake tokens are added correctly
rendered = processor.apply_chat_template(messages, add_generation_prompt=True)
expected_rendered = (
"User: What do these images show?<image><image><end_of_utterance>\n"
"Assistant: The first image shows the statue of Liberty in New York. The second image picture depicts Idefix, the dog of Obelix in Asterix and Obelix.<end_of_utterance>\n"
"User: And who is that?<end_of_utterance>\n"
"Assistant:"
)
self.assertEqual(rendered, expected_rendered)
| Idefics2ProcessorTest |
python | google__pytype | pytype/rewrite/load_abstract_test.py | {
"start": 2478,
"end": 3430
} | class ____(test_utils.ContextfulTestBase):
def assertPythonConstant(self, val, expected):
self.assertIsInstance(val, abstract.PythonConstant)
self.assertEqual(val.constant, expected)
def test_basic(self):
const = (1, 2, 3)
t = self.ctx.abstract_loader.build_tuple(const)
self.assertIsInstance(t, abstract.Tuple)
self.assertIsInstance(t.constant, tuple)
self.assertPythonConstant(t.constant[0].values[0], 1)
def test_nested(self):
const = (1, (2, 3, 4), 5)
t = self.ctx.abstract_loader.build_tuple(const)
self.assertIsInstance(t, abstract.Tuple)
self.assertIsInstance(t.constant, tuple)
self.assertIsInstance(t.constant[0], variables.Variable)
inner = t.constant[1].values[0]
self.assertIsInstance(inner, abstract.Tuple)
self.assertIsInstance(inner.constant, tuple)
self.assertPythonConstant(inner.constant[0].values[0], 2)
if __name__ == '__main__':
unittest.main()
| LoadTupleTest |
python | getsentry__sentry | tests/sentry/incidents/endpoints/test_organization_alert_rule_details.py | {
"start": 63640,
"end": 85138
} | class ____(AlertRuleDetailsBase):
method = "put"
def mock_conversations_info(self, channel):
return patch(
"slack_sdk.web.client.WebClient.conversations_info",
return_value=SlackResponse(
client=None,
http_verb="POST",
api_url="https://slack.com/api/conversations.info",
req_args={"channel": channel},
data={"ok": True, "channel": channel},
headers={},
status_code=200,
),
)
def mock_users_info(self, user):
return patch(
"slack_sdk.web.client.WebClient.users_info",
return_value=SlackResponse(
client=None,
http_verb="POST",
api_url="https://slack.com/api/users.info",
req_args={"user": user},
data={"ok": True, "user": user},
headers={},
status_code=200,
),
)
def _organization_alert_rule_api_call(
self,
channelName: str | None = None,
channelID: str | None = None,
) -> Response:
"""
Call the project alert rule API but do some Slack integration set up before doing so
"""
# Set up the Slack integration
self.integration = self.create_slack_integration(
self.organization,
external_id="TXXXXXXX1",
user=self.user,
)
# Prep steps for the API call
test_params = self.valid_params.copy()
test_params["triggers"] = [
{
"label": "critical",
"alertThreshold": 200,
"actions": [
{"type": "slack", "targetType": "specific", "integration": self.integration.id}
],
},
]
if channelName:
test_params["triggers"][0]["actions"][0]["targetIdentifier"] = channelName
if channelID:
# The trigger code would accept channelId to be a string and that is why I don't cast it to an int
test_params["triggers"][0]["actions"][0]["inputChannelId"] = channelID
with self.feature("organizations:incidents"):
resp = self.get_response(self.organization.slug, self.alert_rule.id, **test_params)
return resp
@patch(
"sentry.integrations.slack.utils.channel.get_channel_id_with_timeout",
return_value=SlackChannelIdData("#", None, True),
)
@patch.object(find_channel_id_for_alert_rule, "apply_async")
@patch("sentry.integrations.slack.utils.rule_status.uuid4")
def test_kicks_off_slack_async_job(
self, mock_uuid4, mock_find_channel_id_for_alert_rule, mock_get_channel_id
):
self.create_member(
user=self.user, organization=self.organization, role="owner", teams=[self.team]
)
self.login_as(self.user)
mock_uuid4.return_value = self.get_mock_uuid()
self.integration, _ = self.create_provider_integration_for(
self.organization,
self.user,
provider="slack",
name="Team A",
external_id="TXXXXXXX1",
metadata={"access_token": "xoxp-xxxxxxxxx-xxxxxxxxxx-xxxxxxxxxxxx"},
)
test_params = self.valid_params.copy()
test_params["triggers"] = [
{
"label": "critical",
"alertThreshold": 200,
"actions": [
{
"type": "slack",
"targetIdentifier": "my-channel",
"targetType": "specific",
"integration": self.integration.id,
}
],
},
]
with self.feature("organizations:incidents"):
resp = self.get_response(self.organization.slug, self.alert_rule.id, **test_params)
# A task with this uuid has been scheduled because there's a Slack channel async search
assert resp.data["uuid"] == "abc123"
kwargs = {
"organization_id": self.organization.id,
"uuid": "abc123",
"alert_rule_id": self.alert_rule.id,
"data": test_params,
"user_id": self.user.id,
}
mock_find_channel_id_for_alert_rule.assert_called_once_with(kwargs=kwargs)
def test_create_slack_alert_with_name_and_channel_id_sdk(self) -> None:
"""
The user specifies the Slack channel and channel ID (which match).
"""
self.create_member(
user=self.user, organization=self.organization, role="owner", teams=[self.team]
)
self.login_as(self.user)
channelName = "my-channel"
# Specifying an inputChannelID will cause the validate_channel_id logic to be triggered
channelID = "C12345678"
channel = {"name": channelName}
with self.mock_conversations_info(channel):
with (
assume_test_silo_mode(SiloMode.REGION),
override_settings(SILO_MODE=SiloMode.REGION),
):
resp = self._organization_alert_rule_api_call(
channelName=channelName, channelID=channelID
)
stored_action = resp.data["triggers"][0]["actions"][0]
assert stored_action["inputChannelId"] == str(channelID)
assert stored_action["targetIdentifier"] == channelName
def test_create_slack_alert_with_mismatch_name_and_channel_id_sdk(self) -> None:
"""
The user specifies the Slack channel and channel ID but they do not match.
"""
self.create_member(
user=self.user, organization=self.organization, role="owner", teams=[self.team]
)
self.login_as(self.user)
otherChannel = "some-other-channel"
channelName = "my-channel"
# Specifying an inputChannelID will cause the validate_channel_id logic to be triggered
channelID = "C12345678"
channel = {"name": otherChannel}
with self.mock_conversations_info(channel):
with (
assume_test_silo_mode(SiloMode.REGION),
override_settings(SILO_MODE=SiloMode.REGION),
):
resp = self._organization_alert_rule_api_call(
channelName=channelName, channelID=channelID
)
assert resp.status_code == 400
assert resp.data == {
"nonFieldErrors": [
ErrorDetail(
string="Slack channel name from ID does not match input channel name.",
code="invalid",
)
]
}
def test_create_slack_alert_with_mismatch_name_and_user_id_sdk(self) -> None:
"""
The user specifies the Slack user and user ID but they do not match.
"""
self.create_member(
user=self.user, organization=self.organization, role="owner", teams=[self.team]
)
self.login_as(self.user)
otherUserId = "U12345678"
otherUser = {
"id": otherUserId,
"name": "kim.possible",
"profile": {
"display_name": "Kim Possible 🕵️♀️",
"display_name_normalized": "Kim Possible",
},
}
inputName = "Ron Stoppable"
with self.mock_users_info(user=otherUser):
resp = self._organization_alert_rule_api_call(
channelName=inputName, channelID=otherUserId
)
assert resp.status_code == 400
assert resp.data == {
"nonFieldErrors": [
ErrorDetail(
string="Slack username from ID does not match input username.",
code="invalid",
)
]
}
def test_create_slack_alert_with_missing_name_from_sdk(self) -> None:
"""
The user specifies the Slack user and user ID but the response doesn't have a name.
"""
self.create_member(
user=self.user, organization=self.organization, role="owner", teams=[self.team]
)
self.login_as(self.user)
otherUserId = "U12345678"
otherUser = {"id": otherUserId}
inputName = "Ron Stoppable"
with self.mock_users_info(user=otherUser):
resp = self._organization_alert_rule_api_call(
channelName=inputName, channelID=otherUserId
)
assert resp.status_code == 400
assert resp.data == {
"nonFieldErrors": [
ErrorDetail(
string="Did not receive user name from API results",
code="invalid",
)
]
}
# An incorrect channelID will raise an SlackApiError in the Slack client
@responses.activate
def test_create_slack_alert_with_non_existent_channel_id(self) -> None:
"""
The user specifies a bad Slack channel ID.
"""
with patch(
"slack_sdk.web.client.WebClient.conversations_info",
side_effect=SlackApiError(
"error",
SlackResponse(
client=None,
http_verb="POST",
api_url="https://slack.com/api/conversations.info",
req_args={"channel": "my-channel"},
data={"ok": False, "error": "channel_not_found"},
headers={},
status_code=400,
),
),
):
self.create_member(
user=self.user, organization=self.organization, role="owner", teams=[self.team]
)
self.login_as(self.user)
channelName = "my-channel"
# Specifying an inputChannelID will cause the validate_channel_id logic to be triggered
channelID = "C12345678"
resp = self._organization_alert_rule_api_call(
channelName=channelName, channelID=channelID
)
assert resp.status_code == 400
assert resp.data == {
"nonFieldErrors": [
ErrorDetail(string="Channel not found. Invalid ID provided.", code="invalid")
]
}
@responses.activate
def test_create_slack_alert_with_non_existent_user_id(self) -> None:
"""
The user specifies a bad Slack user ID.
"""
with patch(
"slack_sdk.web.client.WebClient.users_info",
side_effect=SlackApiError(
"error",
SlackResponse(
client=None,
http_verb="POST",
api_url="https://slack.com/api/users.info",
req_args={"user": "waldo"},
data={"ok": False, "error": "user_not_found"},
headers={},
status_code=400,
),
),
):
self.create_member(
user=self.user, organization=self.organization, role="owner", teams=[self.team]
)
self.login_as(self.user)
resp = self._organization_alert_rule_api_call(
channelName="waldo", channelID="U12345678"
)
assert resp.status_code == 400
assert resp.data == {
"nonFieldErrors": [
ErrorDetail(string="User not found. Invalid ID provided.", code="invalid")
]
}
@responses.activate
def test_create_slack_alert_with_non_visible_user(self) -> None:
"""
The user specifies a hidden Slack user ID.
"""
with patch(
"slack_sdk.web.client.WebClient.users_info",
side_effect=SlackApiError(
"error",
SlackResponse(
client=None,
http_verb="POST",
api_url="https://slack.com/api/users.info",
req_args={"user": "waldo"},
data={"ok": False, "error": "user_not_visible"},
headers={},
status_code=400,
),
),
):
self.create_member(
user=self.user, organization=self.organization, role="owner", teams=[self.team]
)
self.login_as(self.user)
resp = self._organization_alert_rule_api_call(
channelName="waldo", channelID="U12345678"
)
assert resp.status_code == 400
assert resp.data == {
"nonFieldErrors": [
ErrorDetail(
string="User not visible, you may need to modify your Slack settings.",
code="invalid",
)
]
}
@responses.activate
def test_create_slack_alert_with_bad_user_response(self) -> None:
"""
Catch-all for less common Slack API errors.
"""
with patch(
"slack_sdk.web.client.WebClient.users_info",
side_effect=SlackApiError(
"error",
SlackResponse(
client=None,
http_verb="POST",
api_url="https://slack.com/api/users.info",
req_args={"user": "waldo"},
data={"ok": False, "error": "user_not_found"},
headers={},
status_code=400,
),
),
):
self.create_member(
user=self.user, organization=self.organization, role="owner", teams=[self.team]
)
self.login_as(self.user)
resp = self._organization_alert_rule_api_call(
channelName="waldo", channelID="U12345678"
)
assert resp.status_code == 400
assert resp.data == {
"nonFieldErrors": [
ErrorDetail(string="User not found. Invalid ID provided.", code="invalid")
]
}
@patch.object(find_channel_id_for_alert_rule, "apply_async")
@patch("sentry.integrations.slack.utils.rule_status.uuid4")
@responses.activate
def test_create_slack_alert_with_empty_channel_id(
self, mock_uuid4, mock_find_channel_id_for_alert_rule
):
"""
The user selects the channel ID field and the UI will send the empty string to the
endpoint, thus, a channel name search will be performed
"""
self.create_member(
user=self.user, organization=self.organization, role="owner", teams=[self.team]
)
self.login_as(self.user)
mock_uuid4.return_value = self.get_mock_uuid()
channelName = "my-channel"
# Because channel ID is None it will be converted to an async request for the channel name
resp = self._organization_alert_rule_api_call(channelName=channelName, channelID=None)
# A task with this uuid has been scheduled because there's a Slack channel async search
assert resp.status_code == 202
assert resp.data == {"uuid": "abc123"}
@patch(
"sentry.integrations.slack.utils.channel.get_channel_id_with_timeout",
side_effect=[
SlackChannelIdData("#", "10", False),
SlackChannelIdData("#", "10", False),
SlackChannelIdData("#", "20", False),
],
)
@patch("sentry.integrations.slack.utils.rule_status.uuid4")
def test_async_lookup_outside_transaction(
self, mock_uuid4: MagicMock, mock_get_channel_id: MagicMock
) -> None:
self.create_member(
user=self.user, organization=self.organization, role="owner", teams=[self.team]
)
self.login_as(self.user)
mock_uuid4.return_value = self.get_mock_uuid()
with assume_test_silo_mode(SiloMode.CONTROL):
self.integration = self.create_provider_integration(
provider="slack",
name="Team A",
external_id="TXXXXXXX1",
metadata={"access_token": "xoxp-xxxxxxxxx-xxxxxxxxxx-xxxxxxxxxxxx"},
)
self.integration.add_organization(self.organization, self.user)
test_params = self.valid_params.copy()
test_params["triggers"] = [
{
"label": "critical",
"alertThreshold": 200,
"actions": [
{
"type": "slack",
"targetIdentifier": "my-channel",
"targetType": "specific",
"integration": self.integration.id,
},
],
},
]
with self.feature("organizations:incidents"), self.tasks():
resp = self.get_response(self.organization.slug, self.alert_rule.id, **test_params)
# A task with this uuid has been scheduled because there's a Slack channel async search
assert resp.data["uuid"] == "abc123"
assert mock_get_channel_id.call_count == 1
# Using get deliberately as there should only be one. Test should fail otherwise.
trigger = AlertRuleTrigger.objects.get(alert_rule_id=self.alert_rule.id)
action = AlertRuleTriggerAction.objects.get(alert_rule_trigger=trigger)
assert action.target_identifier == "10"
assert action.target_display == "my-channel"
# Now two actions with slack:
test_params = self.valid_params.copy()
test_params["triggers"] = [
{
"label": "critical",
"alertThreshold": 200,
"actions": [
{
"type": "slack",
"targetIdentifier": "my-channel",
"targetType": "specific",
"integration": self.integration.id,
},
{
"type": "slack",
"targetIdentifier": "another-channel",
"targetType": "specific",
"integration": self.integration.id,
},
{
"type": "slack",
"targetIdentifier": "another-channel",
"targetType": "specific",
"integration": self.integration.id,
},
],
},
{
"label": "warning",
"alertThreshold": 200,
"actions": [
{
"type": "slack",
"targetIdentifier": "my-channel", # same channel, but only one lookup made per channel
"targetType": "specific",
"integration": self.integration.id,
},
],
},
]
with self.feature("organizations:incidents"), self.tasks():
resp = self.get_response(self.organization.slug, self.alert_rule.id, **test_params)
assert resp.data["uuid"] == "abc123"
assert (
mock_get_channel_id.call_count == 3
) # just made 2 calls, plus the call from the single action test
# Using get deliberately as there should only be one. Test should fail otherwise.
triggers = AlertRuleTrigger.objects.filter(alert_rule_id=self.alert_rule.id)
actions = AlertRuleTriggerAction.objects.filter(alert_rule_trigger__in=triggers).order_by(
"id"
)
# The 3 critical trigger actions:
assert actions[0].target_identifier == "10"
assert actions[0].target_display == "my-channel"
assert actions[1].target_identifier == "20"
assert actions[1].target_display == "another-channel"
assert actions[2].target_identifier == "20"
assert actions[2].target_display == "another-channel"
# This is the warning trigger action:
assert actions[3].target_identifier == "10"
assert actions[3].target_display == "my-channel"
# Now an invalid action (we want to early out with a good validationerror and not schedule the task):
name = "MyInvalidActionRule"
test_params["name"] = name
test_params["triggers"] = [
{
"label": "critical",
"alertThreshold": 75,
"actions": [
{
"type": "element",
"targetIdentifier": "my-channel",
"targetType": "arbitrary",
"integrationId": self.integration.id,
},
],
},
]
with self.feature("organizations:incidents"), self.tasks():
resp = self.get_response(self.organization.slug, self.alert_rule.id, **test_params)
assert resp.status_code == 400
assert (
mock_get_channel_id.call_count == 3
) # Did not increment from the last assertion because we early out on the validation error
| AlertRuleDetailsSlackPutEndpointTest |
python | pandas-dev__pandas | pandas/tests/io/test_pickle.py | {
"start": 11211,
"end": 12598
} | class ____:
@pytest.mark.parametrize("protocol", [-1, 0, 1, 2])
def test_read(self, protocol, get_random_path, temp_file):
path = temp_file
df = DataFrame(
1.1 * np.arange(120).reshape((30, 4)),
columns=Index(list("ABCD"), dtype=object),
index=Index([f"i-{i}" for i in range(30)], dtype=object),
)
df.to_pickle(path, protocol=protocol)
df2 = pd.read_pickle(path)
tm.assert_frame_equal(df, df2)
def test_pickle_buffer_roundtrip(temp_file):
path = temp_file
df = DataFrame(
1.1 * np.arange(120).reshape((30, 4)),
columns=Index(list("ABCD"), dtype=object),
index=Index([f"i-{i}" for i in range(30)], dtype=object),
)
with open(path, "wb") as fh:
df.to_pickle(fh)
with open(path, "rb") as fh:
result = pd.read_pickle(fh)
tm.assert_frame_equal(df, result)
def test_pickle_fsspec_roundtrip(temp_file):
pytest.importorskip("fsspec")
# Using temp_file for context, but fsspec uses memory URL
mockurl = "memory://mockfile"
df = DataFrame(
1.1 * np.arange(120).reshape((30, 4)),
columns=Index(list("ABCD"), dtype=object),
index=Index([f"i-{i}" for i in range(30)], dtype=object),
)
df.to_pickle(mockurl)
result = pd.read_pickle(mockurl)
tm.assert_frame_equal(df, result)
| TestProtocol |
python | ray-project__ray | rllib/examples/envs/classes/multi_agent/footsies/game/proto/footsies_service_pb2_grpc.py | {
"start": 5865,
"end": 10474
} | class ____(object):
"""Missing associated documentation comment in .proto file."""
@staticmethod
def StartGame(
request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None,
):
return grpc.experimental.unary_unary(
request,
target,
"/FootsiesGameService/StartGame",
footsies__service__pb2.Empty.SerializeToString,
footsies__service__pb2.Empty.FromString,
options,
channel_credentials,
insecure,
call_credentials,
compression,
wait_for_ready,
timeout,
metadata,
)
@staticmethod
def ResetGame(
request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None,
):
return grpc.experimental.unary_unary(
request,
target,
"/FootsiesGameService/ResetGame",
footsies__service__pb2.Empty.SerializeToString,
footsies__service__pb2.Empty.FromString,
options,
channel_credentials,
insecure,
call_credentials,
compression,
wait_for_ready,
timeout,
metadata,
)
@staticmethod
def StepNFrames(
request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None,
):
return grpc.experimental.unary_unary(
request,
target,
"/FootsiesGameService/StepNFrames",
footsies__service__pb2.StepInput.SerializeToString,
footsies__service__pb2.GameState.FromString,
options,
channel_credentials,
insecure,
call_credentials,
compression,
wait_for_ready,
timeout,
metadata,
)
@staticmethod
def GetState(
request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None,
):
return grpc.experimental.unary_unary(
request,
target,
"/FootsiesGameService/GetState",
footsies__service__pb2.Empty.SerializeToString,
footsies__service__pb2.GameState.FromString,
options,
channel_credentials,
insecure,
call_credentials,
compression,
wait_for_ready,
timeout,
metadata,
)
@staticmethod
def GetEncodedState(
request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None,
):
return grpc.experimental.unary_unary(
request,
target,
"/FootsiesGameService/GetEncodedState",
footsies__service__pb2.Empty.SerializeToString,
footsies__service__pb2.EncodedGameState.FromString,
options,
channel_credentials,
insecure,
call_credentials,
compression,
wait_for_ready,
timeout,
metadata,
)
@staticmethod
def IsReady(
request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None,
):
return grpc.experimental.unary_unary(
request,
target,
"/FootsiesGameService/IsReady",
footsies__service__pb2.Empty.SerializeToString,
footsies__service__pb2.BoolValue.FromString,
options,
channel_credentials,
insecure,
call_credentials,
compression,
wait_for_ready,
timeout,
metadata,
)
| FootsiesGameService |
python | viewflow__viewflow | tests/views/test_views__list.py | {
"start": 1154,
"end": 1699
} | class ____(ListModelView):
model = User
columns = (
"first_name",
"last_name",
"email",
"get_full_name",
"role",
)
ordering = "pk"
def role(self, obj):
return "Admin" if obj.is_superuser else "User"
urlpatterns = [
path(
"user/",
ListModelView.as_view(
model=User,
columns=("first_name", "last_name", "email", "is_staff"),
ordering="pk",
),
),
path("advanced_user/", UserListView.as_view()),
]
| UserListView |
python | getsentry__sentry | src/sentry/api/endpoints/organization_missing_org_members.py | {
"start": 5224,
"end": 7174
} | class ____(OrganizationEndpoint):
owner = ApiOwner.ECOSYSTEM
publish_status = {
"GET": ApiPublishStatus.EXPERIMENTAL,
}
permission_classes = (MissingMembersPermission,)
def get(self, request: Request, organization: Organization) -> Response:
# ensure the organization has an integration with the commit feature
integrations = integration_service.get_integrations(
organization_id=organization.id, status=ObjectStatus.ACTIVE
)
def provider_reducer(dict, integration):
if not integration.has_feature(feature=IntegrationFeatures.COMMITS):
return dict
if dict.get(integration.provider):
dict[integration.provider].append(integration.id)
else:
dict[integration.provider] = [integration.id]
return dict
integration_provider_to_ids: dict[str, Sequence[int]] = reduce(
provider_reducer, integrations, defaultdict(list)
)
shared_domain = _get_shared_email_domain(organization)
missing_org_members = []
for integration_provider, integration_ids in integration_provider_to_ids.items():
# TODO(cathy): allow other integration providers
if integration_provider != IntegrationProviderSlug.GITHUB.value:
continue
queryset = _get_missing_organization_members(
organization, integration_provider, integration_ids, shared_domain
)
missing_members_for_integration = {
"integration": integration_provider,
"users": serialize(queryset, request.user, serializer=MissingOrgMemberSerializer()),
}
missing_org_members.append(missing_members_for_integration)
return Response(
missing_org_members,
status=status.HTTP_200_OK,
)
| OrganizationMissingMembersEndpoint |
python | kamyu104__LeetCode-Solutions | Python/check-if-a-parentheses-string-can-be-valid.py | {
"start": 29,
"end": 599
} | class ____(object):
def canBeValid(self, s, locked):
"""
:type s: str
:type locked: str
:rtype: bool
"""
if len(s)%2:
return False
for direction, c in ((lambda x:x, '('), (reversed, ')')):
cnt = bal = 0
for i in direction(xrange(len(s))):
if locked[i] == '0':
cnt += 1
else:
bal += 1 if s[i] == c else -1
if cnt+bal < 0:
return False
return True
| Solution |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/constrainedTypeVar14.py | {
"start": 283,
"end": 519
} | class ____(Generic[T]):
def __init__(self, thing: T) -> None:
self.thing = thing
f2 = F[A2](A2())
reveal_type(F[A2], expected_text="type[F[A]]")
reveal_type(f2, expected_text="F[A]")
reveal_type(f2.thing, expected_text="A")
| F |
python | huggingface__transformers | src/transformers/models/auto/auto_factory.py | {
"start": 20223,
"end": 26977
} | class ____(_BaseAutoModelClass):
# Base class for auto backbone models.
_model_mapping = None
@classmethod
def _load_timm_backbone_from_pretrained(cls, pretrained_model_name_or_path, *model_args, **kwargs):
requires_backends(cls, ["vision", "timm"])
from ...models.timm_backbone import TimmBackboneConfig
config = kwargs.pop("config", TimmBackboneConfig())
if kwargs.get("out_features") is not None:
raise ValueError("Cannot specify `out_features` for timm backbones")
if kwargs.get("output_loading_info", False):
raise ValueError("Cannot specify `output_loading_info=True` when loading from timm")
num_channels = kwargs.pop("num_channels", config.num_channels)
features_only = kwargs.pop("features_only", config.features_only)
use_pretrained_backbone = kwargs.pop("use_pretrained_backbone", config.use_pretrained_backbone)
out_indices = kwargs.pop("out_indices", config.out_indices)
config = TimmBackboneConfig(
backbone=pretrained_model_name_or_path,
num_channels=num_channels,
features_only=features_only,
use_pretrained_backbone=use_pretrained_backbone,
out_indices=out_indices,
)
return super().from_config(config, **kwargs)
@classmethod
def from_pretrained(cls, pretrained_model_name_or_path, *model_args, **kwargs):
use_timm_backbone = kwargs.pop("use_timm_backbone", False)
if use_timm_backbone:
return cls._load_timm_backbone_from_pretrained(pretrained_model_name_or_path, *model_args, **kwargs)
return super().from_pretrained(pretrained_model_name_or_path, *model_args, **kwargs)
def insert_head_doc(docstring, head_doc: str = ""):
if len(head_doc) > 0:
return docstring.replace(
"one of the model classes of the library ",
f"one of the model classes of the library (with a {head_doc} head) ",
)
return docstring.replace(
"one of the model classes of the library ", "one of the base model classes of the library "
)
def auto_class_update(cls, checkpoint_for_example: str = "google-bert/bert-base-cased", head_doc: str = ""):
# Create a new class with the right name from the base class
model_mapping = cls._model_mapping
name = cls.__name__
class_docstring = insert_head_doc(CLASS_DOCSTRING, head_doc=head_doc)
cls.__doc__ = class_docstring.replace("BaseAutoModelClass", name)
# Now we need to copy and re-register `from_config` and `from_pretrained` as class methods otherwise we can't
# have a specific docstrings for them.
from_config = copy_func(_BaseAutoModelClass.from_config)
from_config_docstring = insert_head_doc(FROM_CONFIG_DOCSTRING, head_doc=head_doc)
from_config_docstring = from_config_docstring.replace("BaseAutoModelClass", name)
from_config_docstring = from_config_docstring.replace("checkpoint_placeholder", checkpoint_for_example)
from_config.__doc__ = from_config_docstring
from_config = replace_list_option_in_docstrings(model_mapping._model_mapping, use_model_types=False)(from_config)
cls.from_config = classmethod(from_config)
from_pretrained_docstring = FROM_PRETRAINED_TORCH_DOCSTRING
from_pretrained = copy_func(_BaseAutoModelClass.from_pretrained)
from_pretrained_docstring = insert_head_doc(from_pretrained_docstring, head_doc=head_doc)
from_pretrained_docstring = from_pretrained_docstring.replace("BaseAutoModelClass", name)
from_pretrained_docstring = from_pretrained_docstring.replace("checkpoint_placeholder", checkpoint_for_example)
shortcut = checkpoint_for_example.split("/")[-1].split("-")[0]
from_pretrained_docstring = from_pretrained_docstring.replace("shortcut_placeholder", shortcut)
from_pretrained.__doc__ = from_pretrained_docstring
from_pretrained = replace_list_option_in_docstrings(model_mapping._model_mapping)(from_pretrained)
cls.from_pretrained = classmethod(from_pretrained)
return cls
def get_values(model_mapping):
result = []
for model in model_mapping.values():
if isinstance(model, (list, tuple)):
result += list(model)
else:
result.append(model)
return result
def getattribute_from_module(module, attr):
if attr is None:
return None
if isinstance(attr, tuple):
return tuple(getattribute_from_module(module, a) for a in attr)
if hasattr(module, attr):
return getattr(module, attr)
# Some of the mappings have entries model_type -> object of another model type. In that case we try to grab the
# object at the top level.
transformers_module = importlib.import_module("transformers")
if module != transformers_module:
try:
return getattribute_from_module(transformers_module, attr)
except ValueError:
raise ValueError(f"Could not find {attr} neither in {module} nor in {transformers_module}!")
else:
raise ValueError(f"Could not find {attr} in {transformers_module}!")
def add_generation_mixin_to_remote_model(model_class):
"""
Adds `GenerationMixin` to the inheritance of `model_class`, if `model_class` is a PyTorch model.
This function is used for backwards compatibility purposes: in v4.45, we've started a deprecation cycle to make
`PreTrainedModel` stop inheriting from `GenerationMixin`. Without this function, older models dynamically loaded
from the Hub may not have the `generate` method after we remove the inheritance.
"""
# 1. If it is not a PT model (i.e. doesn't inherit Module), do nothing
if "torch.nn.modules.module.Module" not in str(model_class.__mro__):
return model_class
# 2. If it already **directly** inherits from GenerationMixin, do nothing
if "GenerationMixin" in str(model_class.__bases__):
return model_class
# 3. Prior to v4.45, we could detect whether a model was `generate`-compatible if it had its own `generate` and/or
# `prepare_inputs_for_generation` method.
has_custom_generate_in_class = hasattr(model_class, "generate") and "GenerationMixin" not in str(
getattr(model_class, "generate")
)
has_custom_prepare_inputs = hasattr(model_class, "prepare_inputs_for_generation") and "GenerationMixin" not in str(
getattr(model_class, "prepare_inputs_for_generation")
)
if has_custom_generate_in_class or has_custom_prepare_inputs:
model_class_with_generation_mixin = type(
model_class.__name__, (model_class, GenerationMixin), {**model_class.__dict__}
)
return model_class_with_generation_mixin
return model_class
| _BaseAutoBackboneClass |
python | huggingface__transformers | tests/models/doge/test_modeling_doge.py | {
"start": 13658,
"end": 15174
} | class ____(unittest.TestCase):
# This variable is used to determine which CUDA device are we using for our runners (A10 or T4)
# Depending on the hardware we get different logits / generations
cuda_compute_capability_major_version = None
@classmethod
def setUpClass(cls):
if is_torch_available() and torch.cuda.is_available():
# 8 is for A100 / A10 and 7 for T4
cls.cuda_compute_capability_major_version = torch.cuda.get_device_capability()[0]
@slow
@require_read_token
def test_Doge_20M_hard(self):
"""
An integration test for Doge-20M. It tests against a long output to ensure the subtle numerical differences
"""
EXPECTED_TEXT = "Here's everything I know about dogs. Dogs is the best animal in the world. It is a very popular and popular dog in the United States. It is a very popular"
tokenizer = AutoTokenizer.from_pretrained("SmallDoge/Doge-20M")
model = DogeForCausalLM.from_pretrained("SmallDoge/Doge-20M", device_map="auto", dtype=torch.bfloat16)
input_text = ["Here's everything I know about dogs. Dogs is the best animal in the"]
set_seed(0)
model_inputs = tokenizer(input_text, return_tensors="pt").to(model.device)
generated_ids = model.generate(**model_inputs, max_new_tokens=20, do_sample=False)
generated_text = tokenizer.decode(generated_ids[0], skip_special_tokens=True)
self.assertEqual(generated_text, EXPECTED_TEXT)
| DogeIntegrationTest |
python | crytic__slither | slither/slithir/variables/variable.py | {
"start": 55,
"end": 327
} | class ____(Variable):
def __init__(self) -> None:
super().__init__()
self._index = 0
@property
def ssa_name(self) -> str:
assert self.name
return self.name
def __str__(self) -> str:
return self.ssa_name
| SlithIRVariable |
python | getsentry__sentry | src/sentry/interfaces/contexts.py | {
"start": 1108,
"end": 5814
} | class ____:
context_to_tag_mapping: ClassVar[dict[str, str]] = {}
"""
This indicates which fields should be promoted into tags during event
normalization. (See EventManager)
The key for each entry is used as the name of the tag suffixed by the
"alias" of the context (this is the key of the context in the contexts
object, it is NOT the `type` of the context, though they are often the
same).
The value is a format string spec that uses python string.Formatter to
interpolate any value from the context object.
There is one special case:
- When the key of the mapping is an empty string the tag name will simply be
the alias.
For example if you have a context named "myContext" with the data:
```json
"myContext": {
"some_value": "hello world",
"subkey": "whatever",
"type": "myContext"
}
```
and you have a context_to_tag_mapping that looks like
```python
context_to_tag_mapping = {"": "{some_value}", "subkey": "{subkey}"}
```
Then normalization will result in two tags being promoted:
- myContext: "hello world"
- myContext.subkey: "whatever"
"""
type: str
"""This should match the `type` key in context object"""
def __init__(self, alias, data):
self.alias = alias
ctx_data = {}
for key, value in data.items():
# we use a simple check here, rather than ' in set()' to avoid
# issues with maps/lists.
# Even if the value is an empty string,
# we still want to display the info the UI
if value is not None:
ctx_data[force_str(key)] = value
# Numbers exceeding 15 place values will be converted to strings to avoid rendering issues
if isinstance(value, (int, float, list, dict)):
ctx_data[force_str(key)] = self.change_type(value)
self.data = ctx_data
def to_json(self):
rv = dict(self.data)
rv["type"] = self.type
return prune_empty_keys(rv)
@classmethod
def values_for_data(cls, data):
rv = []
for context in (data.get("contexts") or {}).values():
if context and context.get("type") == cls.type:
rv.append(context)
return rv
@classmethod
def primary_value_for_data(cls, data):
val = get_path(data, "contexts", cls.type)
if val and val.get("type") == cls.type:
return val
rv = cls.values_for_data(data)
if len(rv) == 1:
return rv[0]
def iter_tags(self):
if self.context_to_tag_mapping:
for field, f_string in self.context_to_tag_mapping.items():
try:
value = format_index_expr(f_string, self.data)
except KeyError:
continue
if value:
if not field:
yield (self.alias, value)
else:
yield (f"{self.alias}.{field}", value)
def change_type(self, value: int | float | list | dict) -> Any:
if isinstance(value, (float, int)) and len(str_value := force_str(value)) > 15:
return str_value
if isinstance(value, list):
return [self.change_type(el) for el in value]
elif isinstance(value, dict):
return {key: self.change_type(el) for key, el in value.items()}
else:
return value
# NOTE:
# If you are adding a new context to tag mapping which creates a tag out of an interpolation
# of multiple context fields, you will most likely have to add the same mapping creation in Relay,
# which should be added directly to the context payload itself, and you should reflect this here.
#
# Current examples of this include the `os`, `runtime` and `browser` fields of their respective context.
#
# Example:
# Suppose you have a new context named "my_context" which has fields:
# - "field_1"
# - "field_2"
#
# And you want to create a tag named "field_3" which is equal to "{field_1}-{field_2}".
#
# If you do this here, on demand metrics will stop working because if a user filters by "field_3" and
# we generate a metrics extraction specification for it, Relay won't know what "field_3" means, it will
# only know "field_1" and "field_2" from the context.
#
# To solve this, you should materialize "field_3" during event normalization in Relay and directly express
# the mapping in Sentry as "field_3" is equal to "field_3" (which was added by Relay during normalization).
# TODO(dcramer): contexts need to document/describe expected (optional) fields
@contexttype
| ContextType |
python | kamyu104__LeetCode-Solutions | Python/consecutive-characters.py | {
"start": 423,
"end": 541
} | class ____(object):
def maxPower(self, s):
return max(len(list(v)) for _, v in itertools.groupby(s))
| Solution2 |
python | PrefectHQ__prefect | src/integrations/prefect-github/prefect_github/schemas/graphql_schema.py | {
"start": 496074,
"end": 499541
} | class ____(sgqlc.types.Interface):
"""
See source code for more info.
"""
__schema__ = graphql_schema
__field_names__ = (
"any_pinnable_items",
"email",
"id",
"item_showcase",
"location",
"login",
"name",
"pinnable_items",
"pinned_items",
"pinned_items_remaining",
"viewer_can_change_pinned_items",
"website_url",
)
any_pinnable_items = sgqlc.types.Field(
sgqlc.types.non_null(Boolean),
graphql_name="anyPinnableItems",
args=sgqlc.types.ArgDict(
(
(
"type",
sgqlc.types.Arg(
PinnableItemType, graphql_name="type", default=None
),
),
)
),
)
email = sgqlc.types.Field(String, graphql_name="email")
id = sgqlc.types.Field(sgqlc.types.non_null(ID), graphql_name="id")
item_showcase = sgqlc.types.Field(
sgqlc.types.non_null(ProfileItemShowcase), graphql_name="itemShowcase"
)
location = sgqlc.types.Field(String, graphql_name="location")
login = sgqlc.types.Field(sgqlc.types.non_null(String), graphql_name="login")
name = sgqlc.types.Field(String, graphql_name="name")
pinnable_items = sgqlc.types.Field(
sgqlc.types.non_null(PinnableItemConnection),
graphql_name="pinnableItems",
args=sgqlc.types.ArgDict(
(
(
"types",
sgqlc.types.Arg(
sgqlc.types.list_of(sgqlc.types.non_null(PinnableItemType)),
graphql_name="types",
default=None,
),
),
("after", sgqlc.types.Arg(String, graphql_name="after", default=None)),
(
"before",
sgqlc.types.Arg(String, graphql_name="before", default=None),
),
("first", sgqlc.types.Arg(Int, graphql_name="first", default=None)),
("last", sgqlc.types.Arg(Int, graphql_name="last", default=None)),
)
),
)
pinned_items = sgqlc.types.Field(
sgqlc.types.non_null(PinnableItemConnection),
graphql_name="pinnedItems",
args=sgqlc.types.ArgDict(
(
(
"types",
sgqlc.types.Arg(
sgqlc.types.list_of(sgqlc.types.non_null(PinnableItemType)),
graphql_name="types",
default=None,
),
),
("after", sgqlc.types.Arg(String, graphql_name="after", default=None)),
(
"before",
sgqlc.types.Arg(String, graphql_name="before", default=None),
),
("first", sgqlc.types.Arg(Int, graphql_name="first", default=None)),
("last", sgqlc.types.Arg(Int, graphql_name="last", default=None)),
)
),
)
pinned_items_remaining = sgqlc.types.Field(
sgqlc.types.non_null(Int), graphql_name="pinnedItemsRemaining"
)
viewer_can_change_pinned_items = sgqlc.types.Field(
sgqlc.types.non_null(Boolean), graphql_name="viewerCanChangePinnedItems"
)
website_url = sgqlc.types.Field(URI, graphql_name="websiteUrl")
| ProfileOwner |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-github/source_github/github_schema.py | {
"start": 25763,
"end": 27458
} | class ____(sgqlc.types.Enum):
"""The state of a Git signature.
Enumeration Choices:
* `BAD_CERT`: The signing certificate or its chain could not be
verified
* `BAD_EMAIL`: Invalid email used for signing
* `EXPIRED_KEY`: Signing key expired
* `GPGVERIFY_ERROR`: Internal error - the GPG verification service
misbehaved
* `GPGVERIFY_UNAVAILABLE`: Internal error - the GPG verification
service is unavailable at the moment
* `INVALID`: Invalid signature
* `MALFORMED_SIG`: Malformed signature
* `NOT_SIGNING_KEY`: The usage flags for the key that signed this
don't allow signing
* `NO_USER`: Email used for signing not known to GitHub
* `OCSP_ERROR`: Valid signature, though certificate revocation
check failed
* `OCSP_PENDING`: Valid signature, pending certificate revocation
checking
* `OCSP_REVOKED`: One or more certificates in chain has been
revoked
* `UNKNOWN_KEY`: Key used for signing not known to GitHub
* `UNKNOWN_SIG_TYPE`: Unknown signature type
* `UNSIGNED`: Unsigned
* `UNVERIFIED_EMAIL`: Email used for signing unverified on GitHub
* `VALID`: Valid signature and verified by GitHub
"""
__schema__ = github_schema
__choices__ = (
"BAD_CERT",
"BAD_EMAIL",
"EXPIRED_KEY",
"GPGVERIFY_ERROR",
"GPGVERIFY_UNAVAILABLE",
"INVALID",
"MALFORMED_SIG",
"NOT_SIGNING_KEY",
"NO_USER",
"OCSP_ERROR",
"OCSP_PENDING",
"OCSP_REVOKED",
"UNKNOWN_KEY",
"UNKNOWN_SIG_TYPE",
"UNSIGNED",
"UNVERIFIED_EMAIL",
"VALID",
)
| GitSignatureState |
python | getsentry__sentry | tests/sentry/snuba/test_subscriptions.py | {
"start": 17989,
"end": 19340
} | class ____(TestCase):
def test(self) -> None:
with self.tasks():
snuba_query = create_snuba_query(
SnubaQuery.Type.ERROR,
Dataset.Events,
"level:error",
"count()",
timedelta(minutes=10),
timedelta(minutes=1),
None,
)
subscription = create_snuba_subscription(self.project, "something", snuba_query)
snuba_query = create_snuba_query(
SnubaQuery.Type.ERROR,
Dataset.Events,
"level:error",
"count()",
timedelta(minutes=10),
timedelta(minutes=1),
None,
)
other_subscription = create_snuba_subscription(
self.create_project(organization=self.organization), "something", snuba_query
)
subscription_ids = [subscription.id, other_subscription.id]
bulk_delete_snuba_subscriptions([subscription, other_subscription])
assert (
QuerySubscription.objects.filter(
id__in=subscription_ids,
status=QuerySubscription.Status.DELETING.value,
subscription_id__isnull=False,
).count()
== 2
)
| BulkDeleteSnubaSubscriptionTest |
python | doocs__leetcode | solution/3400-3499/3479.Fruits Into Baskets III/Solution.py | {
"start": 0,
"end": 1092
} | class ____:
__slots__ = ["nums", "tr"]
def __init__(self, nums):
self.nums = nums
n = len(nums)
self.tr = [0] * (n << 2)
self.build(1, 1, n)
def build(self, u, l, r):
if l == r:
self.tr[u] = self.nums[l - 1]
return
mid = (l + r) >> 1
self.build(u << 1, l, mid)
self.build(u << 1 | 1, mid + 1, r)
self.pushup(u)
def modify(self, u, l, r, i, v):
if l == r:
self.tr[u] = v
return
mid = (l + r) >> 1
if i <= mid:
self.modify(u << 1, l, mid, i, v)
else:
self.modify(u << 1 | 1, mid + 1, r, i, v)
self.pushup(u)
def query(self, u, l, r, v):
if self.tr[u] < v:
return -1
if l == r:
return l
mid = (l + r) >> 1
if self.tr[u << 1] >= v:
return self.query(u << 1, l, mid, v)
return self.query(u << 1 | 1, mid + 1, r, v)
def pushup(self, u):
self.tr[u] = max(self.tr[u << 1], self.tr[u << 1 | 1])
| SegmentTree |
python | bokeh__bokeh | src/bokeh/models/annotations/arrows.py | {
"start": 4093,
"end": 6082
} | class ____(DataAnnotation):
''' Render arrows as an annotation.
See :ref:`ug_basic_annotations_arrows` for information on plotting arrows.
'''
# explicit __init__ to support Init signatures
def __init__(self, *args: Any, **kwargs: Any) -> None:
super().__init__(*args, **kwargs)
x_start = NumberSpec(default=field("x_start"), help="""
The x-coordinates to locate the start of the arrows.
""")
y_start = NumberSpec(default=field("y_start"), help="""
The y-coordinates to locate the start of the arrows.
""")
start_units = Enum(CoordinateUnits, default='data', help="""
The unit type for the start_x and start_y attributes. Interpreted as "data
space" units by default.
""")
start = Nullable(Instance(ArrowHead), help="""
Instance of ``ArrowHead``.
""")
x_end = NumberSpec(default=field("x_end"), help="""
The x-coordinates to locate the end of the arrows.
""")
y_end = NumberSpec(default=field("y_end"), help="""
The y-coordinates to locate the end of the arrows.
""")
end_units = Enum(CoordinateUnits, default='data', help="""
The unit type for the end_x and end_y attributes. Interpreted as "data
space" units by default.
""")
end = Nullable(Instance(ArrowHead), default=InstanceDefault(OpenHead), help="""
Instance of ``ArrowHead``.
""")
body_props = Include(LineProps, help="""
The {prop} values for the arrow body.
""")
#-----------------------------------------------------------------------------
# Dev API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Private API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Code
#-----------------------------------------------------------------------------
| Arrow |
python | sqlalchemy__sqlalchemy | test/dialect/mssql/test_reflection.py | {
"start": 1328,
"end": 36800
} | class ____(fixtures.TestBase, ComparesTables, AssertsCompiledSQL):
__only_on__ = "mssql"
__backend__ = True
def test_basic_reflection(self, metadata, connection):
meta = metadata
users = Table(
"engine_users",
meta,
Column("user_id", types.INT, primary_key=True),
Column("user_name", types.VARCHAR(20), nullable=False),
Column("test1", types.CHAR(5), nullable=False),
Column("test2", types.Float(5), nullable=False),
Column("test2.5", types.Float(), nullable=False),
Column("test3", types.Text()),
Column("test4", types.Numeric, nullable=False),
Column("test4.5", types.Numeric(10, 2), nullable=False),
Column("test5", types.DateTime),
Column(
"parent_user_id",
types.Integer,
ForeignKey("engine_users.user_id"),
),
Column("test6", types.DateTime, nullable=False),
Column("test7", types.Text()),
Column("test8", types.LargeBinary()),
Column("test_passivedefault2", types.Integer, server_default="5"),
Column("test9", types.BINARY(100)),
Column("test_numeric", types.Numeric()),
)
addresses = Table(
"engine_email_addresses",
meta,
Column("address_id", types.Integer, primary_key=True),
Column(
"remote_user_id", types.Integer, ForeignKey(users.c.user_id)
),
Column("email_address", types.String(20)),
)
meta.create_all(connection)
meta2 = MetaData()
reflected_users = Table(
"engine_users", meta2, autoload_with=connection
)
reflected_addresses = Table(
"engine_email_addresses",
meta2,
autoload_with=connection,
)
self.assert_tables_equal(users, reflected_users)
self.assert_tables_equal(addresses, reflected_addresses)
@testing.combinations(
(mssql.XML, "XML"),
(mssql.IMAGE, "IMAGE"),
(mssql.MONEY, "MONEY"),
(mssql.NUMERIC(10, 2), "NUMERIC(10, 2)"),
(mssql.FLOAT, "FLOAT(53)"),
(mssql.REAL, "REAL"),
# FLOAT(5) comes back as REAL
(mssql.FLOAT(5), "REAL"),
argnames="type_obj,ddl",
)
def test_assorted_types(self, metadata, connection, type_obj, ddl):
table = Table("type_test", metadata, Column("col1", type_obj))
table.create(connection)
m2 = MetaData()
table2 = Table("type_test", m2, autoload_with=connection)
self.assert_compile(
schema.CreateTable(table2),
"CREATE TABLE type_test (col1 %s NULL)" % ddl,
)
def test_identity(self, metadata, connection):
table = Table(
"identity_test",
metadata,
Column(
"col1",
Integer,
mssql_identity_start=2,
mssql_identity_increment=3,
primary_key=True,
),
)
with testing.expect_deprecated(
"The dialect options 'mssql_identity_start' and"
):
table.create(connection)
meta2 = MetaData()
table2 = Table("identity_test", meta2, autoload_with=connection)
eq_(table2.c["col1"].dialect_options["mssql"]["identity_start"], None)
eq_(
table2.c["col1"].dialect_options["mssql"]["identity_increment"],
None,
)
eq_(table2.c["col1"].identity.start, 2)
eq_(table2.c["col1"].identity.increment, 3)
def test_skip_types(self, connection):
connection.exec_driver_sql(
"create table foo (id integer primary key, data xml)"
)
with mock.patch.object(
connection.dialect, "ischema_names", {"int": mssql.INTEGER}
):
with testing.expect_warnings(
"Did not recognize type 'xml' of column 'data'"
):
eq_(
inspect(connection).get_columns("foo"),
[
{
"name": "id",
"type": testing.eq_type_affinity(sqltypes.INTEGER),
"nullable": False,
"default": None,
"autoincrement": False,
"comment": None,
},
{
"name": "data",
"type": testing.eq_type_affinity(
sqltypes.NullType
),
"nullable": True,
"default": None,
"autoincrement": False,
"comment": None,
},
],
)
def test_cross_schema_fk_pk_name_overlaps(self, metadata, connection):
# test for issue #4228
Table(
"subject",
metadata,
Column("id", Integer),
PrimaryKeyConstraint("id", name="subj_pk"),
schema=testing.config.test_schema,
)
Table(
"referrer",
metadata,
Column("id", Integer, primary_key=True),
Column(
"sid",
ForeignKey(
"%s.subject.id" % testing.config.test_schema,
name="fk_subject",
),
),
schema=testing.config.test_schema,
)
Table(
"subject",
metadata,
Column("id", Integer),
PrimaryKeyConstraint("id", name="subj_pk"),
schema=testing.config.test_schema_2,
)
metadata.create_all(connection)
insp = inspect(connection)
eq_(
insp.get_foreign_keys("referrer", testing.config.test_schema),
[
{
"name": "fk_subject",
"constrained_columns": ["sid"],
"referred_schema": "test_schema",
"referred_table": "subject",
"referred_columns": ["id"],
"options": {},
}
],
)
def test_table_name_that_is_greater_than_16_chars(
self, metadata, connection
):
Table(
"ABCDEFGHIJKLMNOPQRSTUVWXYZ",
metadata,
Column("id", Integer, primary_key=True),
Column("foo", Integer),
Index("foo_idx", "foo"),
)
metadata.create_all(connection)
t = Table(
"ABCDEFGHIJKLMNOPQRSTUVWXYZ", MetaData(), autoload_with=connection
)
eq_(t.name, "ABCDEFGHIJKLMNOPQRSTUVWXYZ")
@testing.combinations(
("local_temp", "#tmp", True),
("global_temp", "##tmp", True),
("nonexistent", "#no_es_bueno", False),
id_="iaa",
argnames="table_name, exists",
)
def test_temporary_table(self, metadata, connection, table_name, exists):
if exists:
tt = Table(
table_name,
metadata,
Column("id", Integer, primary_key=True),
Column("txt", mssql.NVARCHAR(50)),
Column("dt2", mssql.DATETIME2),
)
tt.create(connection)
connection.execute(
tt.insert(),
[
{
"id": 1,
"txt": "foo",
"dt2": datetime.datetime(2020, 1, 1, 1, 1, 1),
},
{
"id": 2,
"txt": "bar",
"dt2": datetime.datetime(2020, 2, 2, 2, 2, 2),
},
],
)
if not exists:
with expect_raises(exc.NoSuchTableError):
Table(
table_name,
metadata,
autoload_with=connection,
)
else:
tmp_t = Table(table_name, metadata, autoload_with=connection)
result = connection.execute(
tmp_t.select().where(tmp_t.c.id == 2)
).fetchall()
eq_(
result,
[(2, "bar", datetime.datetime(2020, 2, 2, 2, 2, 2))],
)
@testing.combinations(
("local_temp", "#tmp", True),
("global_temp", "##tmp", True),
("nonexistent", "#no_es_bueno", False),
id_="iaa",
argnames="table_name, exists",
)
def test_has_table_temporary(
self, metadata, connection, table_name, exists
):
if exists:
tt = Table(
table_name,
metadata,
Column("id", Integer),
)
tt.create(connection)
found_it = testing.db.dialect.has_table(connection, table_name)
eq_(found_it, exists)
def test_has_table_temp_not_present_but_another_session(self):
"""test #6910"""
with testing.db.connect() as c1, testing.db.connect() as c2:
try:
with c1.begin():
c1.exec_driver_sql(
"create table #myveryveryuniquetemptablename (a int)"
)
assert not c2.dialect.has_table(
c2, "#myveryveryuniquetemptablename"
)
finally:
with c1.begin():
c1.exec_driver_sql(
"drop table #myveryveryuniquetemptablename"
)
def test_has_table_temp_temp_present_both_sessions(self):
"""test #7168, continues from #6910"""
with testing.db.connect() as c1, testing.db.connect() as c2:
try:
with c1.begin():
c1.exec_driver_sql(
"create table #myveryveryuniquetemptablename (a int)"
)
with c2.begin():
c2.exec_driver_sql(
"create table #myveryveryuniquetemptablename (a int)"
)
assert c2.dialect.has_table(
c2, "#myveryveryuniquetemptablename"
)
c2.rollback()
finally:
with c1.begin():
c1.exec_driver_sql(
"drop table #myveryveryuniquetemptablename"
)
with c2.begin():
c2.exec_driver_sql(
"drop table #myveryveryuniquetemptablename"
)
@testing.fixture
def temp_db_alt_collation_fixture(
self, connection_no_trans, testing_engine
):
temp_db_name = "%s_different_collation" % (
provision.FOLLOWER_IDENT or "default"
)
cnxn = connection_no_trans.execution_options(
isolation_level="AUTOCOMMIT"
)
cnxn.exec_driver_sql(f"DROP DATABASE IF EXISTS {temp_db_name}")
cnxn.exec_driver_sql(
f"CREATE DATABASE {temp_db_name} COLLATE Danish_Norwegian_CI_AS"
)
eng = testing_engine(
url=testing.db.url.set(database=temp_db_name),
options=dict(poolclass=NullPool),
)
yield eng
cnxn.exec_driver_sql(f"DROP DATABASE IF EXISTS {temp_db_name}")
def test_global_temp_different_collation(
self, temp_db_alt_collation_fixture
):
"""test #8035"""
tname = f"##foo{random.randint(1, 1000000)}"
with temp_db_alt_collation_fixture.connect() as conn:
conn.exec_driver_sql(f"CREATE TABLE {tname} (id int primary key)")
conn.commit()
eq_(
inspect(conn).get_columns(tname),
[
{
"name": "id",
"type": testing.eq_type_affinity(sqltypes.INTEGER),
"nullable": False,
"default": None,
"autoincrement": False,
"comment": None,
}
],
)
Table(tname, MetaData(), autoload_with=conn)
@testing.combinations(
("test_schema"),
("[test_schema]"),
argnames="schema_value",
)
@testing.variation(
"reflection_operation", ["has_table", "reflect_table", "get_columns"]
)
def test_has_table_with_single_token_schema(
self, metadata, connection, schema_value, reflection_operation
):
"""test for #9133"""
tt = Table(
"test", metadata, Column("id", Integer), schema=schema_value
)
tt.create(connection)
if reflection_operation.has_table:
is_true(inspect(connection).has_table("test", schema=schema_value))
elif reflection_operation.reflect_table:
m2 = MetaData()
Table("test", m2, autoload_with=connection, schema=schema_value)
elif reflection_operation.get_columns:
is_true(
inspect(connection).get_columns("test", schema=schema_value)
)
else:
reflection_operation.fail()
def test_db_qualified_items(self, metadata, connection):
Table("foo", metadata, Column("id", Integer, primary_key=True))
Table(
"bar",
metadata,
Column("id", Integer, primary_key=True),
Column("foo_id", Integer, ForeignKey("foo.id", name="fkfoo")),
)
metadata.create_all(connection)
dbname = connection.exec_driver_sql("select db_name()").scalar()
owner = connection.exec_driver_sql("SELECT user_name()").scalar()
referred_schema = "%(dbname)s.%(owner)s" % {
"dbname": dbname,
"owner": owner,
}
inspector = inspect(connection)
bar_via_db = inspector.get_foreign_keys("bar", schema=referred_schema)
eq_(
bar_via_db,
[
{
"referred_table": "foo",
"referred_columns": ["id"],
"referred_schema": referred_schema,
"name": "fkfoo",
"constrained_columns": ["foo_id"],
"options": {},
}
],
)
assert inspect(connection).has_table("bar", schema=referred_schema)
m2 = MetaData()
Table(
"bar",
m2,
schema=referred_schema,
autoload_with=connection,
)
eq_(m2.tables["%s.foo" % referred_schema].schema, referred_schema)
def test_fk_on_unique_index(self, metadata, connection):
# test for issue #7160
Table(
"uidx_parent",
metadata,
Column("id", Integer, primary_key=True),
Column("uidx_col1", Integer, nullable=False),
Column("uidx_col2", Integer, nullable=False),
Index(
"UIDX_composite",
"uidx_col1",
"uidx_col2",
unique=True,
),
)
Table(
"uidx_child",
metadata,
Column("id", Integer, primary_key=True),
Column("parent_uidx_col1", Integer, nullable=False),
Column("parent_uidx_col2", Integer, nullable=False),
ForeignKeyConstraint(
["parent_uidx_col1", "parent_uidx_col2"],
["uidx_parent.uidx_col1", "uidx_parent.uidx_col2"],
name="FK_uidx_parent",
),
)
metadata.create_all(connection)
inspector = inspect(connection)
fk_info = inspector.get_foreign_keys("uidx_child")
eq_(
fk_info,
[
{
"referred_table": "uidx_parent",
"referred_columns": ["uidx_col1", "uidx_col2"],
"referred_schema": None,
"name": "FK_uidx_parent",
"constrained_columns": [
"parent_uidx_col1",
"parent_uidx_col2",
],
"options": {},
}
],
)
def test_fk_with_same_column_name_as_pk_idx(self, metadata, connection):
"""test #12907"""
# Create table A with primary key AId and a unique index IX_A_AId
Table(
"a",
metadata,
Column("aid", Integer, nullable=False),
Column("name", types.String(50)),
PrimaryKeyConstraint("aid", name="PK_A"),
).create(connection)
# IMPORTANT - create unique index on a *first* before creating
# FK on B, this affects how the FK is generated in SQL server
connection.exec_driver_sql("CREATE UNIQUE INDEX IX_A_AId ON a (aid)")
# Create table B with foreign key column AId referencing A(AId)
# and an index with the same name IX_A_AId
Table(
"b",
metadata,
Column("id", Integer, Identity(), primary_key=True),
Column("aid", Integer),
ForeignKeyConstraint(["aid"], ["a.aid"], name="FK_B_A"),
).create(connection)
connection.exec_driver_sql("CREATE INDEX IX_A_AId ON B(aid)")
m2 = MetaData()
table_b = Table("b", m2, autoload_with=connection)
fks = list(table_b.foreign_keys)
eq_(len(fks), 1)
eq_(fks[0].parent.name, "aid")
eq_(fks[0].column.table.name, "a")
eq_(fks[0].column.name, "aid")
def test_indexes_cols(self, metadata, connection):
t1 = Table("t", metadata, Column("x", Integer), Column("y", Integer))
Index("foo", t1.c.x, t1.c.y)
metadata.create_all(connection)
m2 = MetaData()
t2 = Table("t", m2, autoload_with=connection)
eq_(set(list(t2.indexes)[0].columns), {t2.c["x"], t2.c.y})
def test_indexes_cols_with_commas(self, metadata, connection):
t1 = Table(
"t",
metadata,
Column("x, col", Integer, key="x"),
Column("y", Integer),
)
Index("foo", t1.c.x, t1.c.y)
metadata.create_all(connection)
m2 = MetaData()
t2 = Table("t", m2, autoload_with=connection)
eq_(set(list(t2.indexes)[0].columns), {t2.c["x, col"], t2.c.y})
def test_indexes_cols_with_spaces(self, metadata, connection):
t1 = Table(
"t",
metadata,
Column("x col", Integer, key="x"),
Column("y", Integer),
)
Index("foo", t1.c.x, t1.c.y)
metadata.create_all(connection)
m2 = MetaData()
t2 = Table("t", m2, autoload_with=connection)
eq_(set(list(t2.indexes)[0].columns), {t2.c["x col"], t2.c.y})
def test_indexes_with_filtered(self, metadata, connection):
t1 = Table(
"t",
metadata,
Column("id", Integer, primary_key=True),
Column("x", types.String(20)),
Column("y", types.Integer),
)
Index("idx_x", t1.c.x, mssql_where=t1.c.x == "test")
Index("idx_y", t1.c.y, mssql_where=t1.c.y >= 5)
metadata.create_all(connection)
ind = testing.db.dialect.get_indexes(connection, "t", None)
filtered_indexes = []
for ix in ind:
if "dialect_options" in ix:
filtered_indexes.append(ix["dialect_options"]["mssql_where"])
eq_(sorted(filtered_indexes), ["([x]='test')", "([y]>=(5))"])
t2 = Table("t", MetaData(), autoload_with=connection)
idx = list(sorted(t2.indexes, key=lambda idx: idx.name))[0]
self.assert_compile(
CreateIndex(idx),
"CREATE NONCLUSTERED INDEX idx_x ON t (x) WHERE ([x]='test')",
)
def test_index_reflection_clustered(self, metadata, connection):
"""
when the result of get_indexes() is used to build an index it should
include the CLUSTERED keyword when appropriate
"""
t1 = Table(
"t",
metadata,
Column("id", Integer),
Column("x", types.String(20)),
Column("y", types.Integer),
)
Index("idx_x", t1.c.x, mssql_clustered=True)
Index("idx_y", t1.c.y)
metadata.create_all(connection)
ind = testing.db.dialect.get_indexes(connection, "t", None)
clustered_index = ""
for ix in ind:
if ix["dialect_options"]["mssql_clustered"]:
clustered_index = ix["name"]
eq_(clustered_index, "idx_x")
t2 = Table("t", MetaData(), autoload_with=connection)
idx = list(sorted(t2.indexes, key=lambda idx: idx.name))[0]
self.assert_compile(
CreateIndex(idx), "CREATE CLUSTERED INDEX idx_x ON t (x)"
)
def test_index_reflection_filtered_and_clustered(
self, metadata, connection
):
"""
table with one filtered index and one clustered index so each index
will have different dialect_options keys
"""
t1 = Table(
"t",
metadata,
Column("id", Integer),
Column("x", types.String(20)),
Column("y", types.Integer),
)
Index("idx_x", t1.c.x, mssql_clustered=True)
Index("idx_y", t1.c.y, mssql_where=t1.c.y >= 5)
metadata.create_all(connection)
ind = testing.db.dialect.get_indexes(connection, "t", None)
clustered_index = ""
for ix in ind:
if ix["dialect_options"]["mssql_clustered"]:
clustered_index = ix["name"]
is_false("mssql_columnstore" in ix["dialect_options"])
eq_(clustered_index, "idx_x")
filtered_indexes = []
for ix in ind:
if "dialect_options" in ix:
if "mssql_where" in ix["dialect_options"]:
filtered_indexes.append(
ix["dialect_options"]["mssql_where"]
)
eq_(sorted(filtered_indexes), ["([y]>=(5))"])
t2 = Table("t", MetaData(), autoload_with=connection)
clustered_idx = list(
sorted(t2.indexes, key=lambda clustered_idx: clustered_idx.name)
)[0]
filtered_idx = list(
sorted(t2.indexes, key=lambda filtered_idx: filtered_idx.name)
)[1]
self.assert_compile(
CreateIndex(clustered_idx), "CREATE CLUSTERED INDEX idx_x ON t (x)"
)
self.assert_compile(
CreateIndex(filtered_idx),
"CREATE NONCLUSTERED INDEX idx_y ON t (y) WHERE ([y]>=(5))",
)
def test_index_reflection_nonclustered(self, metadata, connection):
"""
one index created by specifying mssql_clustered=False
one created without specifying mssql_clustered property so it will
use default of NONCLUSTERED.
When reflected back mssql_clustered=False should be included in both
"""
t1 = Table(
"t",
metadata,
Column("id", Integer),
Column("x", types.String(20)),
Column("y", types.Integer),
)
Index("idx_x", t1.c.x, mssql_clustered=False)
Index("idx_y", t1.c.y)
metadata.create_all(connection)
ind = testing.db.dialect.get_indexes(connection, "t", None)
for ix in ind:
assert ix["dialect_options"]["mssql_clustered"] == False
is_false("mssql_columnstore" in ix["dialect_options"])
t2 = Table("t", MetaData(), autoload_with=connection)
idx = list(sorted(t2.indexes, key=lambda idx: idx.name))[0]
self.assert_compile(
CreateIndex(idx), "CREATE NONCLUSTERED INDEX idx_x ON t (x)"
)
def test_index_column_order_clustered(self, metadata, connection):
"""test for #12894"""
test_table = Table(
"t",
metadata,
Column("id", Integer, primary_key=True),
Column("x", Integer),
Column("y", Integer),
PrimaryKeyConstraint("id", mssql_clustered=False),
)
Index(
"idx_x",
test_table.c.y,
test_table.c.id,
test_table.c.x,
mssql_clustered=True,
)
metadata.create_all(connection)
indexes = testing.db.dialect.get_indexes(connection, "t", None)
eq_(indexes[0]["column_names"], ["y", "id", "x"])
@testing.only_if("mssql>=12")
def test_index_reflection_colstore_clustered(self, metadata, connection):
t1 = Table(
"t",
metadata,
Column("id", Integer),
Column("x", types.String(20)),
Column("y", types.Integer),
Index("idx_x", mssql_clustered=True, mssql_columnstore=True),
)
Index("idx_y", t1.c.y)
metadata.create_all(connection)
ind = testing.db.dialect.get_indexes(connection, "t", None)
for ix in ind:
if ix["name"] == "idx_x":
is_true(ix["dialect_options"]["mssql_clustered"])
is_true(ix["dialect_options"]["mssql_columnstore"])
eq_(ix["dialect_options"]["mssql_include"], [])
eq_(ix["column_names"], [])
else:
is_false(ix["dialect_options"]["mssql_clustered"])
is_false("mssql_columnstore" in ix["dialect_options"])
t2 = Table("t", MetaData(), autoload_with=connection)
idx = list(sorted(t2.indexes, key=lambda idx: idx.name))[0]
self.assert_compile(
CreateIndex(idx), "CREATE CLUSTERED COLUMNSTORE INDEX idx_x ON t"
)
@testing.only_if("mssql>=11")
def test_index_reflection_colstore_nonclustered(
self, metadata, connection
):
t1 = Table(
"t",
metadata,
Column("id", Integer),
Column("x", types.String(20)),
Column("y", types.Integer),
)
Index("idx_x", t1.c.x, mssql_clustered=False, mssql_columnstore=True)
Index("idx_y", t1.c.y)
metadata.create_all(connection)
ind = testing.db.dialect.get_indexes(connection, "t", None)
for ix in ind:
is_false(ix["dialect_options"]["mssql_clustered"])
if ix["name"] == "idx_x":
is_true(ix["dialect_options"]["mssql_columnstore"])
eq_(ix["dialect_options"]["mssql_include"], [])
eq_(ix["column_names"], ["x"])
else:
is_false("mssql_columnstore" in ix["dialect_options"])
t2 = Table("t", MetaData(), autoload_with=connection)
idx = list(sorted(t2.indexes, key=lambda idx: idx.name))[0]
self.assert_compile(
CreateIndex(idx),
"CREATE NONCLUSTERED COLUMNSTORE INDEX idx_x ON t (x)",
)
@testing.only_if("mssql>=11")
def test_index_reflection_colstore_nonclustered_none(
self, metadata, connection
):
t1 = Table(
"t",
metadata,
Column("id", Integer),
Column("x", types.String(20)),
Column("y", types.Integer),
)
Index("idx_x", t1.c.x, mssql_columnstore=True)
Index("idx_y", t1.c.y)
metadata.create_all(connection)
ind = testing.db.dialect.get_indexes(connection, "t", None)
for ix in ind:
is_false(ix["dialect_options"]["mssql_clustered"])
if ix["name"] == "idx_x":
is_true(ix["dialect_options"]["mssql_columnstore"])
eq_(ix["dialect_options"]["mssql_include"], [])
eq_(ix["column_names"], ["x"])
else:
is_false("mssql_columnstore" in ix["dialect_options"])
t2 = Table("t", MetaData(), autoload_with=connection)
idx = list(sorted(t2.indexes, key=lambda idx: idx.name))[0]
self.assert_compile(
CreateIndex(idx),
"CREATE NONCLUSTERED COLUMNSTORE INDEX idx_x ON t (x)",
)
@testing.only_if("mssql>=11")
def test_index_reflection_colstore_nonclustered_multicol(
self, metadata, connection
):
t1 = Table(
"t",
metadata,
Column("id", Integer),
Column("x", types.String(20)),
Column("y", types.Integer),
)
Index(
"idx_xid",
t1.c.x,
t1.c.id,
mssql_clustered=False,
mssql_columnstore=True,
)
Index("idx_y", t1.c.y)
metadata.create_all(connection)
ind = testing.db.dialect.get_indexes(connection, "t", None)
for ix in ind:
is_false(ix["dialect_options"]["mssql_clustered"])
if ix["name"] == "idx_xid":
is_true(ix["dialect_options"]["mssql_columnstore"])
eq_(ix["dialect_options"]["mssql_include"], [])
eq_(ix["column_names"], ["x", "id"])
else:
is_false("mssql_columnstore" in ix["dialect_options"])
t2 = Table("t", MetaData(), autoload_with=connection)
idx = list(sorted(t2.indexes, key=lambda idx: idx.name))[0]
self.assert_compile(
CreateIndex(idx),
"CREATE NONCLUSTERED COLUMNSTORE INDEX idx_xid ON t (x, id)",
)
def test_primary_key_reflection_clustered(self, metadata, connection):
"""
A primary key will be clustered by default if no other clustered index
exists.
When reflected back, mssql_clustered=True should be present.
"""
t1 = Table(
"t",
metadata,
Column("id", Integer),
Column("x", types.String(20)),
Column("y", types.Integer),
)
PrimaryKeyConstraint(t1.c.id, name="pk_t")
metadata.create_all(connection)
pk_reflect = testing.db.dialect.get_pk_constraint(
connection, "t", None
)
assert pk_reflect["dialect_options"]["mssql_clustered"] == True
def test_primary_key_reflection_nonclustered(self, metadata, connection):
"""
Nonclustered primary key should include mssql_clustered=False
when reflected back
"""
t1 = Table(
"t",
metadata,
Column("id", Integer),
Column("x", types.String(20)),
Column("y", types.Integer),
)
PrimaryKeyConstraint(t1.c.id, name="pk_t", mssql_clustered=False)
metadata.create_all(connection)
pk_reflect = testing.db.dialect.get_pk_constraint(
connection, "t", None
)
assert pk_reflect["dialect_options"]["mssql_clustered"] == False
def test_max_ident_in_varchar_not_present(self, metadata, connection):
"""test [ticket:3504].
Here we are testing not just that the "max" token comes back
as None, but also that these types accept "max" as the value
of "length" on construction, which isn't a directly documented
pattern however is likely in common use.
"""
Table(
"t",
metadata,
Column("t1", types.String),
Column("t2", types.Text("max")),
Column("t3", types.Text("max")),
Column("t4", types.LargeBinary("max")),
Column("t5", types.VARBINARY("max")),
)
metadata.create_all(connection)
for col in inspect(connection).get_columns("t"):
is_(col["type"].length, None)
in_("max", str(col["type"].compile(dialect=connection.dialect)))
@testing.fixture
def comment_table(self, metadata):
Table(
"tbl_with_comments",
metadata,
Column(
"id",
types.Integer,
primary_key=True,
comment="pk comment 🔑",
),
Column("no_comment", types.Integer),
Column(
"has_comment",
types.String(20),
comment="has the comment § méil 📧",
),
comment="table comment çòé 🐍",
)
metadata.create_all(testing.db)
def test_comments(self, connection, comment_table):
insp = inspect(connection)
eq_(
insp.get_table_comment("tbl_with_comments"),
{"text": "table comment çòé 🐍"},
)
cols = {
col["name"]: col["comment"]
for col in insp.get_columns("tbl_with_comments")
}
eq_(
cols,
{
"id": "pk comment 🔑",
"no_comment": None,
"has_comment": "has the comment § méil 📧",
},
)
def test_comments_not_supported(self, testing_engine, comment_table):
eng = testing_engine(options={"supports_comments": False})
insp = inspect(eng)
with expect_raises_message(
NotImplementedError,
"Can't get table comments on current SQL Server version in use",
):
insp.get_table_comment("tbl_with_comments")
# currently, column comments still reflect normally since we
# aren't using an fn/sp for that
cols = {
col["name"]: col["comment"]
for col in insp.get_columns("tbl_with_comments")
}
eq_(
cols,
{
"id": "pk comment 🔑",
"no_comment": None,
"has_comment": "has the comment § méil 📧",
},
)
def test_comments_with_dropped_column(self, metadata, connection):
"""test issue #12654"""
Table(
"tbl_with_comments",
metadata,
Column(
"id", types.Integer, primary_key=True, comment="pk comment"
),
Column("foobar", Integer, comment="comment_foobar"),
Column("foo", Integer, comment="comment_foo"),
Column(
"bar",
Integer,
comment="comment_bar",
),
)
metadata.create_all(connection)
insp = inspect(connection)
eq_(
{
c["name"]: c["comment"]
for c in insp.get_columns("tbl_with_comments")
},
{
"id": "pk comment",
"foobar": "comment_foobar",
"foo": "comment_foo",
"bar": "comment_bar",
},
)
connection.exec_driver_sql(
"ALTER TABLE [tbl_with_comments] DROP COLUMN [foobar]"
)
insp = inspect(connection)
eq_(
{
c["name"]: c["comment"]
for c in insp.get_columns("tbl_with_comments")
},
{
"id": "pk comment",
"foo": "comment_foo",
"bar": "comment_bar",
},
)
| ReflectionTest |
python | dagster-io__dagster | python_modules/dagster/dagster_tests/definitions_tests/module_loader_tests/asset_package_with_cacheable/__init__.py | {
"start": 268,
"end": 1139
} | class ____(CacheableAssetsDefinition):
def compute_cacheable_data(self):
return [
AssetsDefinitionCacheableData(
keys_by_input_name={}, keys_by_output_name={"result": dg.AssetKey(self.unique_id)}
)
]
def build_definitions(self, data):
@dg.op
def my_op():
return 1
return [
AssetsDefinition.from_op(
my_op,
keys_by_input_name=cd.keys_by_input_name,
keys_by_output_name=cd.keys_by_output_name,
)
for cd in data
]
x = MyCacheableAssetsDefinition("foo")
def make_list_of_cacheable_assets():
return [MyCacheableAssetsDefinition("abc"), MyCacheableAssetsDefinition("def")]
list_of_assets_and_source_assets = [
*make_list_of_cacheable_assets(),
]
| MyCacheableAssetsDefinition |
python | pyqtgraph__pyqtgraph | pyqtgraph/parametertree/parameterTypes/basetypes.py | {
"start": 9120,
"end": 9380
} | class ____(QtCore.QObject):
def __init__(self, qobj, callback):
QtCore.QObject.__init__(self)
self.callback = callback
qobj.installEventFilter(self)
def eventFilter(self, obj, ev):
return self.callback(obj, ev)
| EventProxy |
python | ZoranPandovski__al-go-rithms | games/Python/Blackjack.py | {
"start": 1820,
"end": 6313
} | class ____:
def __init__(self):
self.total = 100 # This can be set to a default value or supplied by a user input
self.bet = 0
def win_bet(self):
self.total += self.bet
def lose_bet(self):
self.total -= self.bet
# In[6]:
def take_bet(chips):
while True:
try:
chips.bet = int(input('How many chips would you like to bet? '))
except ValueError:
print('Sorry, a bet must be an integer!')
else:
if chips.bet > chips.total:
print("Sorry, your bet can't exceed",chips.total)
else:
break
# In[9]:
def hit(deck,hand):
hand.add_card(deck.deal())
hand.adjust_for_ace()
# In[10]:
def show_some(player,dealer):
print("\nDealer's Hand:")
print(" <card hidden>")
print('',dealer.cards[1])
print("\nPlayer's Hand:", *player.cards, sep='\n ')
def show_all(player,dealer):
print("\nDealer's Hand:", *dealer.cards, sep='\n ')
print("Dealer's Hand =",dealer.value)
print("\nPlayer's Hand:", *player.cards, sep='\n ')
print("Player's Hand =",player.value)
# In[11]:
def hit_or_stand(deck,hand):
global playing # to control an upcoming while loop
while True:
x = input("Would you like to Hit or Stand? Enter 'h' or 's' ")
if x[0].lower() == 'h':
hit(deck,hand) # hit() function defined above
elif x[0].lower() == 's':
print("Player stands. Dealer is playing.")
playing = False
else:
print("Sorry, please try again.")
continue
break
# In[12]:
def player_busts(player,dealer,chips):
print("Player busts!")
chips.lose_bet()
def player_wins(player,dealer,chips):
print("Player wins!")
chips.win_bet()
def dealer_busts(player,dealer,chips):
print("Dealer busts!")
chips.win_bet()
def dealer_wins(player,dealer,chips):
print("Dealer wins!")
chips.lose_bet()
def push(player,dealer):
print("Dealer and Player tie! It's a push.")
# In[ ]:
while True:
# Print an opening statement
print('Welcome to BlackJack! Get as close to 21 as you can without going over!\n Dealer hits until she reaches 17. Aces count as 1 or 11.')
# Create & shuffle the deck, deal two cards to each player
deck = Deck()
deck.shuffle()
player_hand = Hand()
player_hand.add_card(deck.deal())
player_hand.add_card(deck.deal())
dealer_hand = Hand()
dealer_hand.add_card(deck.deal())
dealer_hand.add_card(deck.deal())
# Set up the Player's chips
player_chips = Chips() # remember the default value is 100
# Prompt the Player for their bet
take_bet(player_chips)
# Show cards (but keep one dealer card hidden)
show_some(player_hand,dealer_hand)
while playing: # recall this variable from our hit_or_stand function
# Prompt for Player to Hit or Stand
hit_or_stand(deck,player_hand)
# Show cards (but keep one dealer card hidden)
show_some(player_hand,dealer_hand)
# If player's hand exceeds 21, run player_busts() and break out of loop
if player_hand.value > 21:
player_busts(player_hand,dealer_hand,player_chips)
break
# If Player hasn't busted, play Dealer's hand until Dealer reaches 17
if player_hand.value <= 21:
while dealer_hand.value < 17:
hit(deck,dealer_hand)
# Show all cards
show_all(player_hand,dealer_hand)
# Run different winning scenarios
if dealer_hand.value > 21:
dealer_busts(player_hand,dealer_hand,player_chips)
elif dealer_hand.value > player_hand.value:
dealer_wins(player_hand,dealer_hand,player_chips)
elif dealer_hand.value < player_hand.value:
player_wins(player_hand,dealer_hand,player_chips)
else:
push(player_hand,dealer_hand)
# Inform Player of their chips total
print("\nPlayer's winnings stand at",player_chips.total)
# Ask to play again
new_game = input("Would you like to play another hand? Enter 'y' or 'n' ")
if new_game[0].lower()=='y':
playing=True
continue
else:
print("Thanks for playing!")
break
# In[ ]:
| Chips |
python | kamyu104__LeetCode-Solutions | Python/minimum-time-to-break-locks-i.py | {
"start": 2165,
"end": 2829
} | class ____(object):
def findMinimumTime(self, strength, K):
"""
:type strength: List[int]
:type K: int
:rtype: int
"""
def ceil_divide(a, b):
return (a+b-1)//b
def popcount(x):
return bin(x).count('1')
dp = [float('inf')]*(1<<len(strength))
dp[0] = 0
for mask in xrange(1, len(dp)):
x = 1+(popcount(mask)-1)*K
for i in xrange(len(strength)):
if not (mask&(1<<i)):
continue
dp[mask] = min(dp[mask], dp[mask^(1<<i)]+ceil_divide(strength[i], x))
return dp[-1]
| Solution2 |
python | huggingface__transformers | src/transformers/models/arcee/modeling_arcee.py | {
"start": 2758,
"end": 3481
} | class ____(nn.Module):
def __init__(self, hidden_size, eps=1e-6):
"""
ArceeRMSNorm is equivalent to T5LayerNorm
"""
super().__init__()
self.weight = nn.Parameter(torch.ones(hidden_size))
self.variance_epsilon = eps
def forward(self, hidden_states):
input_dtype = hidden_states.dtype
hidden_states = hidden_states.to(torch.float32)
variance = hidden_states.pow(2).mean(-1, keepdim=True)
hidden_states = hidden_states * torch.rsqrt(variance + self.variance_epsilon)
return self.weight * hidden_states.to(input_dtype)
def extra_repr(self):
return f"{tuple(self.weight.shape)}, eps={self.variance_epsilon}"
| ArceeRMSNorm |
python | huggingface__transformers | src/transformers/models/tapas/modeling_tapas.py | {
"start": 59186,
"end": 98368
} | class ____(IndexMap):
"""The product of two indices."""
def __init__(self, outer_index, inner_index):
"""
Combines indices i and j into pairs (i, j). The result is an index where each segment (i, j) is the
intersection of segments i and j. For example if the inputs represent table cells indexed by respectively rows
and columns the output will be a table indexed by (row, column) pairs, i.e. by cell. The implementation
combines indices {0, .., n - 1} and {0, .., m - 1} into {0, .., nm - 1}. The output has *num_segments* equal to
*outer_index.num_segments* * *inner_index.num_segments*
Args:
outer_index (`IndexMap`):
IndexMap.
inner_index (`IndexMap`):
IndexMap, must have the same shape as *outer_index*.
"""
if outer_index.batch_dims != inner_index.batch_dims:
raise ValueError("outer_index.batch_dims and inner_index.batch_dims must be the same.")
super().__init__(
indices=(inner_index.indices + outer_index.indices * inner_index.num_segments),
num_segments=inner_index.num_segments * outer_index.num_segments,
batch_dims=inner_index.batch_dims,
)
self.outer_index = outer_index
self.inner_index = inner_index
def project_outer(self, index):
"""Projects an index with the same index set onto the outer components."""
indices = torch.div(index.indices, self.inner_index.num_segments, rounding_mode="floor").type(torch.long)
return IndexMap(indices=indices, num_segments=self.outer_index.num_segments, batch_dims=index.batch_dims)
def project_inner(self, index):
"""Projects an index with the same index set onto the inner components."""
return IndexMap(
indices=torch.fmod(index.indices, self.inner_index.num_segments)
.type(torch.float)
.floor()
.type(torch.long),
num_segments=self.inner_index.num_segments,
batch_dims=index.batch_dims,
)
def gather(values, index, name="segmented_gather"):
"""
Gathers from *values* using the index map. For each element in the domain of the index map this operation looks up
a value for that index in *values*. Two elements from the same segment always get assigned the same value.
Args:
values (`torch.Tensor` of shape (B1, ..., Bn, num_segments, V1, ...)):
Tensor with segment values.
index (`IndexMap` of shape (B1, ..., Bn, I1, ..., Ik)):
IndexMap.
name (`str`, *optional*, defaults to 'segmented_gather'):
Name for the operation. Currently not used
Returns:
`tuple(torch.Tensor)`: Tensor of shape (B1, ..., Bn, I1, ..., Ik, V1, ...) with the gathered values.
"""
indices = index.indices
# first, check whether the indices of the index represent scalar values (i.e. not vectorized)
if len(values.shape[index.batch_dims :]) < 2:
return torch.gather(
values,
index.batch_dims,
indices.view(
values.size()[0], -1
), # torch.gather expects index to have the same number of dimensions as values
).view(indices.size())
else:
# this means we have a vectorized version
# we have to adjust the index
indices = indices.unsqueeze(-1).expand(values.shape)
return torch.gather(values, index.batch_dims, indices)
def flatten(index, name="segmented_flatten"):
"""
Flattens a batched index map (which is typically of shape batch_size, seq_length) to a 1d index map. This operation
relabels the segments to keep batch elements distinct. The k-th batch element will have indices shifted by
*num_segments* * (k - 1). The result is a tensor with *num_segments* multiplied by the number of elements in the
batch.
Args:
index (`IndexMap`):
IndexMap to flatten.
name (`str`, *optional*, defaults to 'segmented_flatten'):
Name for the operation. Currently not used
Returns:
(`IndexMap`): The flattened IndexMap.
"""
# first, get batch_size as scalar tensor
batch_size = torch.prod(torch.tensor(list(index.batch_shape())))
# next, create offset as 1-D tensor of length batch_size,
# and multiply element-wise by num segments (to offset different elements in the batch) e.g. if batch size is 2: [0, 64]
offset = torch.arange(start=0, end=batch_size, device=index.num_segments.device) * index.num_segments
offset = offset.view(index.batch_shape())
for _ in range(index.batch_dims, len(index.indices.size())): # typically range(1,2)
offset = offset.unsqueeze(-1)
indices = offset + index.indices
return IndexMap(indices=indices.view(-1), num_segments=index.num_segments * batch_size, batch_dims=0)
def range_index_map(batch_shape, num_segments, name="range_index_map"):
"""
Constructs an index map equal to range(num_segments).
Args:
batch_shape (`torch.Size`):
Batch shape
num_segments (`int`):
Number of segments
name (`str`, *optional*, defaults to 'range_index_map'):
Name for the operation. Currently not used
Returns:
(`IndexMap`): IndexMap of shape batch_shape with elements equal to range(num_segments).
"""
device = num_segments.device if torch.is_tensor(num_segments) else "cpu"
batch_shape = torch.as_tensor(
batch_shape, dtype=torch.long, device=device
) # create a rank 1 tensor vector containing batch_shape (e.g. [2])
assert len(batch_shape.size()) == 1
num_segments = torch.as_tensor(
num_segments, device=device
) # create a rank 0 tensor (scalar) containing num_segments (e.g. 64)
assert len(num_segments.size()) == 0
indices = torch.arange(
start=0, end=num_segments, device=num_segments.device
) # create a rank 1 vector with num_segments elements
new_tensor = torch.cat(
[torch.ones_like(batch_shape, dtype=torch.long, device=num_segments.device), num_segments.unsqueeze(dim=0)],
dim=0,
)
# new_tensor is just a vector of [1 64] for example (assuming only 1 batch dimension)
new_shape = [int(x) for x in new_tensor.tolist()]
indices = indices.view(new_shape)
multiples = torch.cat([batch_shape, torch.as_tensor([1], device=device)], dim=0)
indices = indices.repeat(multiples.tolist())
# equivalent (in Numpy:)
# indices = torch.as_tensor(np.tile(indices.numpy(), multiples.tolist()))
return IndexMap(indices=indices, num_segments=num_segments, batch_dims=list(batch_shape.size())[0])
def _segment_reduce(values, index, segment_reduce_fn, name):
"""
Applies a segment reduction segment-wise.
Args:
values (`torch.Tensor`):
Tensor with segment values.
index (`IndexMap`):
IndexMap.
segment_reduce_fn (`str`):
Name for the reduce operation. One of "sum", "mean", "max" or "min".
name (`str`):
Name for the operation. Currently not used
Returns:
(`IndexMap`): IndexMap of shape batch_shape with elements equal to range(num_segments).
"""
# Flatten the batch dimensions, as segments ops (scatter) do not support batching.
# However if `values` has extra dimensions to the right keep them
# unflattened. Segmented ops support vector-valued operations.
flat_index = flatten(index)
vector_shape = values.size()[len(index.indices.size()) :] # torch.Size object
flattened_shape = torch.cat(
[torch.as_tensor([-1], dtype=torch.long), torch.as_tensor(vector_shape, dtype=torch.long)], dim=0
)
# changed "view" by "reshape" in the following line
flat_values = values.reshape(flattened_shape.tolist())
out = torch.zeros(int(flat_index.num_segments), dtype=torch.float, device=flat_values.device)
segment_means = out.scatter_reduce(
dim=0, index=flat_index.indices.long(), src=flat_values.float(), reduce=segment_reduce_fn, include_self=False
)
device = index.num_segments.device
# Unflatten the values.
new_shape = torch.cat(
[
torch.as_tensor(index.batch_shape(), dtype=torch.long, device=device),
torch.as_tensor([index.num_segments], dtype=torch.long, device=device),
torch.as_tensor(vector_shape, dtype=torch.long, device=device),
],
dim=0,
)
output_values = segment_means.clone().view(new_shape.tolist()).to(values.dtype)
output_index = range_index_map(index.batch_shape(), index.num_segments)
return output_values, output_index
def reduce_sum(values, index, name="segmented_reduce_sum"):
"""
Sums a tensor over its segments.
Outputs 0 for empty segments.
This operations computes the sum over segments, with support for:
- Batching using the first dimensions [B1, B2, ..., Bn]. Each element in a batch can have different indices.
- Vectorization using the last dimension [V1, V2, ...]. If they are present, the output will be a sum of
vectors rather than scalars. Only the middle dimensions [I1, ..., Ik] are reduced by the operation.
Args:
values (`torch.Tensor` of shape [B1, B2, ..., Bn, I1, .., Ik, V1, V2, ..]):
Tensor containing the values of which the sum must be taken segment-wise.
index (`IndexMap`, indices are of shape [B1, B2, ..., Bn, I1, .., Ik].):
Index defining the segments.
name (`str`, *optional*, defaults to 'segmented_reduce_sum'):
Name for the operation. Currently not used
Returns:
output_values (`torch.Tensor`of shape [B1, B2, ..., Bn, num_segments, V1, V2, ..]): Tensor containing the
output values. output_index (`IndexMap`): IndexMap with shape [B1, B2, ..., Bn, num_segments]. .
"""
return _segment_reduce(values, index, "sum", name)
def reduce_mean(values, index, name="segmented_reduce_mean"):
"""
Averages a tensor over its segments.
Outputs 0 for empty segments.
This operations computes the mean over segments, with support for:
- Batching using the first dimensions [B1, B2, ..., Bn]. Each element in a batch can have different indices.
- Vectorization using the last dimension [V1, V2, ...]. If they are present, the output will be a mean of
vectors rather than scalars.
Only the middle dimensions [I1, ..., Ik] are reduced by the operation.
Args:
values (`torch.Tensor` of shape [B1, B2, ..., Bn, I1, .., Ik, V1, V2, ..]):
Tensor containing the values of which the mean must be taken segment-wise.
index (`IndexMap`, indices are of shape [B1, B2, ..., Bn, I1, .., Ik].):
Index defining the segments.
name (`str`, *optional*, defaults to 'segmented_reduce_sum'):
Name for the operation. Currently not used
Returns:
output_values (`torch.Tensor`of shape [B1, B2, ..., Bn, num_segments, V1, V2, ..]): Tensor containing the
output values. output_index (`IndexMap`): IndexMap with shape [B1, B2, ..., Bn, num_segments].
"""
return _segment_reduce(values, index, "mean", name)
def reduce_max(values, index, name="segmented_reduce_max"):
"""
Computes the maximum over segments.
This operation computes the maximum over segments, with support for:
- Batching using the first dimensions [B1, B2, ..., Bn]. Each element in a batch can have different indices.
- Vectorization using the last dimension [V1, V2, ...]. If they are present, the output will be an element-wise
maximum of vectors rather than scalars.
Only the middle dimensions [I1, ..., Ik] are reduced by the operation.
Args:
values (`torch.Tensor` of shape [B1, B2, ..., Bn, I1, .., Ik, V1, V2, ..]):
Tensor containing the values of which the max must be taken segment-wise.
index (`IndexMap`, indices are of shape [B1, B2, ..., Bn, I1, .., Ik].):
Index defining the segments.
name (`str`, *optional*, defaults to 'segmented_reduce_sum'):
Name for the operation. Currently not used
Returns:
output_values (`torch.Tensor`of shape [B1, B2, ..., Bn, num_segments, V1, V2, ..]): Tensor containing the
output values. output_index (`IndexMap`): IndexMap with shape [B1, B2, ..., Bn, num_segments].
"""
return _segment_reduce(values, index, "amax", name)
def reduce_min(values, index, name="segmented_reduce_min"):
"""
Computes the minimum over segments.
This operations computes the minimum over segments, with support for:
- Batching using the first dimensions [B1, B2, ..., Bn]. Each element in a batch can have different indices.
- Vectorization using the last dimension [V1, V2, ...]. If they are present, the output will be an element-wise
minimum of vectors rather than scalars.
Only the middle dimensions [I1, ..., Ik] are reduced by the operation.
Args:
values (`torch.Tensor` of shape [B1, B2, ..., Bn, I1, .., Ik, V1, V2, ..]):
Tensor containing the values of which the min must be taken segment-wise.
index (`IndexMap`, indices are of shape [B1, B2, ..., Bn, I1, .., Ik].):
Index defining the segments.
name (`str`, *optional*, defaults to 'segmented_reduce_sum'):
Name for the operation. Currently not used
Returns:
output_values (`torch.Tensor`of shape [B1, B2, ..., Bn, num_segments, V1, V2, ..]): Tensor containing the
output values. output_index (`IndexMap`): IndexMap with shape [B1, B2, ..., Bn, num_segments].
"""
return _segment_reduce(values, index, "amin", name)
# End of everything related to segmented tensors
def compute_column_logits(
sequence_output, column_output_weights, column_output_bias, cell_index, cell_mask, allow_empty_column_selection
):
"""
Computes the column logits.
Args:
sequence_output (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`):
Also known as last_hidden_state. Sequence of hidden-states at the output of the last layer of the model.
column_output_weights (`torch.FloatTensor` of shape `(hidden_size)`):
Weights of the linear layer for column selection.
column_output_bias (`torch.FloatTensor` of shape `()`):
Bias of the linear layer for column selection.
cell_index (`ProductIndexMap`):
Index that groups tokens into cells.
cell_mask (`torch.FloatTensor` of shape `(batch_size, max_num_rows * max_num_cols)`):
Mask for cells that exist in the table (i.e. that are not padding).
allow_empty_column_selection (`bool`):
Whether to allow not to select any column
Returns:
column_logits (`torch.FloatTensor`of shape `(batch_size, max_num_cols)`): Tensor containing the column logits
for every example in the batch.
"""
# First, compute the token logits (batch_size, seq_len) - without temperature
token_logits = torch.einsum("bsj,j->bs", sequence_output, column_output_weights) + column_output_bias
# Next, average the logits per cell (batch_size, max_num_cols*max_num_rows)
cell_logits, cell_logits_index = reduce_mean(token_logits, cell_index)
# Finally, average the logits per column (batch_size, max_num_cols)
column_index = cell_index.project_inner(cell_logits_index)
column_logits, out_index = reduce_sum(cell_logits * cell_mask, column_index)
cell_count, _ = reduce_sum(cell_mask, column_index)
column_logits /= cell_count + EPSILON_ZERO_DIVISION
# Mask columns that do not appear in the example.
is_padding = torch.logical_and(cell_count < 0.5, ~torch.eq(out_index.indices, 0))
column_logits += CLOSE_ENOUGH_TO_LOG_ZERO * torch.as_tensor(
is_padding, dtype=torch.float32, device=is_padding.device
)
if not allow_empty_column_selection:
column_logits += CLOSE_ENOUGH_TO_LOG_ZERO * torch.as_tensor(
torch.eq(out_index.indices, 0), dtype=torch.float32, device=out_index.indices.device
)
return column_logits
def _single_column_cell_selection_loss(token_logits, column_logits, labels, cell_index, col_index, cell_mask):
"""
Computes the loss for cell selection constrained to a single column. The loss is a hierarchical log-likelihood. The
model first predicts a column and then selects cells within that column (conditioned on the column). Cells outside
the selected column are never selected.
Args:
token_logits (`torch.FloatTensor` of shape `(batch_size, sequence_length)`):
Tensor containing the logits per token.
column_logits (`torch.FloatTensor` of shape `(batch_size, max_num_cols)`):
Tensor containing the logits per column.
labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
Labels per token.
cell_index (`ProductIndexMap`):
Index that groups tokens into cells.
col_index (`IndexMap`):
Index that groups tokens into columns.
cell_mask (`torch.FloatTensor` of shape `(batch_size, max_num_rows * max_num_cols)`):
Mask for cells that exist in the table (i.e. that are not padding).
Returns:
selection_loss_per_example (`torch.FloatTensor` of shape `(batch_size,)`): Loss for each example. logits
(`torch.FloatTensor` of shape `(batch_size, sequence_length)`): New logits which are only allowed to select
cells in a single column. Logits outside of the most likely column according to *column_logits* will be set to
a very low value (such that the probabilities are 0).
"""
# Part 1: column loss
# First find the column we should select. We use the column with maximum number of selected cells.
labels_per_column, _ = reduce_sum(torch.as_tensor(labels, dtype=torch.float32, device=labels.device), col_index)
# shape of labels_per_column is (batch_size, max_num_cols). It contains the number of label ids for every column, for every example
column_label = torch.argmax(labels_per_column, dim=-1) # shape (batch_size,)
# Check if there are no selected cells in the column. In that case the model
# should predict the special column id 0, which means "select nothing".
no_cell_selected = torch.eq(
torch.max(labels_per_column, dim=-1)[0], 0
) # no_cell_selected is of shape (batch_size,) and equals True
# if an example of the batch has no cells selected (i.e. if there are no labels set to 1 for that example)
column_label = torch.where(
no_cell_selected.view(column_label.size()), torch.zeros_like(column_label), column_label
)
column_dist = torch.distributions.Categorical(logits=column_logits) # shape (batch_size, max_num_cols)
column_loss_per_example = -column_dist.log_prob(column_label)
# Part 2: cell loss
# Reduce the labels and logits to per-cell from per-token.
# logits_per_cell: shape (batch_size, max_num_rows*max_num_cols) i.e. (batch_size, 64*32)
logits_per_cell, _ = reduce_mean(token_logits, cell_index)
# labels_per_cell: shape (batch_size, 64*32), indicating whether each cell should be selected (1) or not (0)
labels_per_cell, labels_index = reduce_max(
torch.as_tensor(labels, dtype=torch.long, device=labels.device), cell_index
)
# Mask for the selected column.
# column_id_for_cells: shape (batch_size, 64*32), indicating to which column each cell belongs
column_id_for_cells = cell_index.project_inner(labels_index).indices
# column_mask: shape (batch_size, 64*32), equal to 1 if cell belongs to column to be selected
column_mask = torch.as_tensor(
torch.eq(column_id_for_cells, torch.unsqueeze(column_label, dim=-1)),
dtype=torch.float32,
device=cell_mask.device,
)
# Compute the log-likelihood for cells, but only for the selected column.
cell_dist = torch.distributions.Bernoulli(logits=logits_per_cell) # shape (batch_size, 64*32)
cell_log_prob = cell_dist.log_prob(labels_per_cell.type(torch.float32)) # shape(batch_size, 64*32)
cell_loss = -torch.sum(cell_log_prob * column_mask * cell_mask, dim=1)
# We need to normalize the loss by the number of cells in the column.
cell_loss /= torch.sum(column_mask * cell_mask, dim=1) + EPSILON_ZERO_DIVISION
selection_loss_per_example = column_loss_per_example
selection_loss_per_example += torch.where(
no_cell_selected.view(selection_loss_per_example.size()),
torch.zeros_like(selection_loss_per_example),
cell_loss,
)
# Set the probs outside the selected column (selected by the *model*)
# to 0. This ensures backwards compatibility with models that select
# cells from multiple columns.
selected_column_id = torch.as_tensor(
torch.argmax(column_logits, dim=-1), dtype=torch.long, device=column_logits.device
) # shape (batch_size,)
# selected_column_mask: shape (batch_size, 64*32), equal to 1 if cell belongs to column selected by the model
selected_column_mask = torch.as_tensor(
torch.eq(column_id_for_cells, torch.unsqueeze(selected_column_id, dim=-1)),
dtype=torch.float32,
device=selected_column_id.device,
)
# Never select cells with the special column id 0.
selected_column_mask = torch.where(
torch.eq(column_id_for_cells, 0).view(selected_column_mask.size()),
torch.zeros_like(selected_column_mask),
selected_column_mask,
)
new_logits_per_cell = logits_per_cell + CLOSE_ENOUGH_TO_LOG_ZERO * (1.0 - cell_mask * selected_column_mask)
logits = gather(new_logits_per_cell, cell_index)
return selection_loss_per_example, logits
def compute_token_logits(sequence_output, temperature, output_weights, output_bias):
"""
Computes logits per token
Args:
sequence_output (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`):
Also known as last_hidden_state. Sequence of hidden-states at the output of the last layer of the model.
temperature (`float`):
Temperature for the Bernoulli distribution.
output_weights (`torch.FloatTensor` of shape `(hidden_size,)`):
Weights of the linear layer for cell selection.
output_bias (`torch.FloatTensor` of shape `()`):
Bias of the linear layer for cell selection
Returns:
logits (`torch.FloatTensor` of shape `(batch_size, sequence_length)`): Logits per token.
"""
logits = (torch.einsum("bsj,j->bs", sequence_output, output_weights) + output_bias) / temperature
return logits
def _calculate_aggregate_mask(answer, pooled_output, cell_selection_preference, labels, aggregation_classifier):
"""
Finds examples where the model should select cells with no aggregation.
Returns a mask that determines for which examples should the model select answers directly from the table, without
any aggregation function. If the answer is a piece of text the case is unambiguous as aggregation functions only
apply to numbers. If the answer is a number but does not appear in the table then we must use some aggregation
case. The ambiguous case is when the answer is a number that also appears in the table. In this case we use the
aggregation function probabilities predicted by the model to decide whether to select or aggregate. The threshold
for this is a hyperparameter *cell_selection_preference*
Args:
answer (`torch.FloatTensor` of shape `(batch_size, )`):
Answer for every example in the batch. Nan if there is no scalar answer.
pooled_output (`torch.FloatTensor` of shape `(batch_size, hidden_size)`):
Output of the pooler (BertPooler) on top of the encoder layer.
cell_selection_preference (`float`):
Preference for cell selection in ambiguous cases.
labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
Labels per token. aggregation_classifier (`torch.nn.Linear`): Aggregation head
Returns:
aggregate_mask (`torch.FloatTensor` of shape `(batch_size,)`): A mask set to 1 for examples that should use
aggregation functions.
"""
# torch.FloatTensor(batch_size,)
aggregate_mask_init = torch.logical_not(torch.isnan(answer)).type(torch.FloatTensor).to(answer.device)
logits_aggregation = aggregation_classifier(pooled_output)
dist_aggregation = torch.distributions.categorical.Categorical(logits=logits_aggregation)
# Index 0 corresponds to "no aggregation".
aggregation_ops_total_mass = torch.sum(dist_aggregation.probs[:, 1:], dim=1)
# Cell selection examples according to current model.
is_pred_cell_selection = aggregation_ops_total_mass <= cell_selection_preference
# Examples with non-empty cell selection supervision.
is_cell_supervision_available = torch.sum(labels, dim=1) > 0
aggregate_mask = torch.where(
torch.logical_and(is_pred_cell_selection, is_cell_supervision_available).view(aggregate_mask_init.size()),
torch.zeros_like(aggregate_mask_init, dtype=torch.float32),
aggregate_mask_init,
)
aggregate_mask = aggregate_mask.detach()
return aggregate_mask
def _calculate_aggregation_loss_known(
logits_aggregation, aggregate_mask, aggregation_labels, use_answer_as_supervision, num_aggregation_labels
):
"""
Calculates aggregation loss when its type is known during training.
In the weakly supervised setting, the only known information is that for cell selection examples, "no aggregation"
should be predicted. For other examples (those that require aggregation), no loss is accumulated. In the setting
where aggregation type is always known, standard cross entropy loss is accumulated for all examples
Args:
logits_aggregation (`torch.FloatTensor` of shape `(batch_size, num_aggregation_labels)`):
Logits per aggregation operation.
aggregate_mask (`torch.FloatTensor` of shape `(batch_size, )`):
A mask set to 1 for examples that should use aggregation functions.
aggregation_labels (`torch.LongTensor` of shape `(batch_size, )`):
Aggregation function id for every example in the batch.
use_answer_as_supervision (`bool`, *optional*):
Whether to use the answer as the only supervision for aggregation examples.
num_aggregation_labels (`int`, *optional*, defaults to 0):
The number of aggregation operators to predict.
Returns:
aggregation_loss_known (`torch.FloatTensor` of shape `(batch_size,)`): Aggregation loss (when its type is known
during training) per example.
"""
if use_answer_as_supervision:
# Prepare "no aggregation" targets for cell selection examples.
target_aggregation = torch.zeros_like(aggregate_mask, dtype=torch.long)
else:
# Use aggregation supervision as the target.
target_aggregation = aggregation_labels
one_hot_labels = nn.functional.one_hot(target_aggregation, num_classes=num_aggregation_labels).type(torch.float32)
log_probs = nn.functional.log_softmax(logits_aggregation, dim=-1)
# torch.FloatTensor[batch_size]
per_example_aggregation_intermediate = -torch.sum(one_hot_labels * log_probs, dim=-1)
if use_answer_as_supervision:
# Accumulate loss only for examples requiring cell selection
# (no aggregation).
return per_example_aggregation_intermediate * (1 - aggregate_mask)
else:
return per_example_aggregation_intermediate
def _calculate_aggregation_loss_unknown(logits_aggregation, aggregate_mask):
"""
Calculates aggregation loss in the case of answer supervision.
Args:
logits_aggregation (`torch.FloatTensor` of shape `(batch_size, num_aggregation_labels)`):
Logits per aggregation operation.
aggregate_mask (`torch.FloatTensor` of shape `(batch_size, )`):
A mask set to 1 for examples that should use aggregation functions
Returns:
aggregation_loss_unknown (`torch.FloatTensor` of shape `(batch_size,)`): Aggregation loss (in case of answer
supervision) per example.
"""
dist_aggregation = torch.distributions.categorical.Categorical(logits=logits_aggregation)
# Index 0 corresponds to "no aggregation".
aggregation_ops_total_mass = torch.sum(dist_aggregation.probs[:, 1:], dim=1)
# Predict some aggregation in case of an answer that needs aggregation.
# This increases the probability of all aggregation functions, in a way
# similar to MML, but without considering whether the function gives the
# correct answer.
return -torch.log(aggregation_ops_total_mass) * aggregate_mask
def _calculate_aggregation_loss(
logits_aggregation,
aggregate_mask,
aggregation_labels,
use_answer_as_supervision,
num_aggregation_labels,
aggregation_loss_weight,
):
"""
Calculates the aggregation loss per example.
Args:
logits_aggregation (`torch.FloatTensor` of shape `(batch_size, num_aggregation_labels)`):
Logits per aggregation operation.
aggregate_mask (`torch.FloatTensor` of shape `(batch_size, )`):
A mask set to 1 for examples that should use aggregation functions.
aggregation_labels (`torch.LongTensor` of shape `(batch_size, )`):
Aggregation function id for every example in the batch.
use_answer_as_supervision (`bool`, *optional*):
Whether to use the answer as the only supervision for aggregation examples.
num_aggregation_labels (`int`, *optional*, defaults to 0):
The number of aggregation operators to predict.
aggregation_loss_weight (`float`, *optional*, defaults to 1.0):
Importance weight for the aggregation loss.
Returns:
aggregation_loss (`torch.FloatTensor` of shape `(batch_size,)`): Aggregation loss per example.
"""
per_example_aggregation_loss = _calculate_aggregation_loss_known(
logits_aggregation, aggregate_mask, aggregation_labels, use_answer_as_supervision, num_aggregation_labels
)
if use_answer_as_supervision:
# Add aggregation loss for numeric answers that need aggregation.
per_example_aggregation_loss += _calculate_aggregation_loss_unknown(logits_aggregation, aggregate_mask)
return aggregation_loss_weight * per_example_aggregation_loss
def _calculate_expected_result(
dist_per_cell, numeric_values, numeric_values_scale, input_mask_float, logits_aggregation, config
):
"""
Calculates the expected result given cell and aggregation probabilities.
Args:
dist_per_cell (`torch.distributions.Bernoulli`):
Cell selection distribution for each cell.
numeric_values (`torch.FloatTensor` of shape `(batch_size, seq_length)`):
Numeric values of every token. Nan for tokens which are not numeric values.
numeric_values_scale (`torch.FloatTensor` of shape `(batch_size, seq_length)`):
Scale of the numeric values of every token.
input_mask_float (`torch.FloatTensor` of shape `(batch_size, seq_length)`):
Mask for the table, without question tokens and table headers.
logits_aggregation (`torch.FloatTensor` of shape `(batch_size, num_aggregation_labels)`):
Logits per aggregation operation.
config ([`TapasConfig`]):
Model configuration class with all the hyperparameters of the model
Returns:
expected_result (`torch.FloatTensor` of shape `(batch_size,)`): The expected result per example.
"""
if config.use_gumbel_for_cells:
gumbel_dist = torch.distributions.RelaxedBernoulli(
# The token logits where already divided by the temperature and used for
# computing cell selection errors so we need to multiply it again here
temperature=config.temperature,
logits=dist_per_cell.logits * config.temperature,
)
scaled_probability_per_cell = gumbel_dist.sample()
else:
scaled_probability_per_cell = dist_per_cell.probs
# <float32>[batch_size, seq_length]
scaled_probability_per_cell = (scaled_probability_per_cell / numeric_values_scale) * input_mask_float
count_result = torch.sum(scaled_probability_per_cell, dim=1)
numeric_values_masked = torch.where(
torch.isnan(numeric_values), torch.zeros_like(numeric_values), numeric_values
) # Mask non-numeric table values to zero.
sum_result = torch.sum(scaled_probability_per_cell * numeric_values_masked, dim=1)
avg_approximation = config.average_approximation_function
if avg_approximation == AverageApproximationFunction.RATIO:
average_result = sum_result / (count_result + EPSILON_ZERO_DIVISION)
elif avg_approximation == AverageApproximationFunction.FIRST_ORDER:
# The sum of all probabilities except that correspond to other cells
# Ex here stands for expectation, more explicitly the expectation of the sum of N-1 Bernoulli random variables plus
# the constant 1, which is computed as adding all N expected values and subtracting the extra one. It corresponds to X_c
# in Appendix D of the original TAPAS paper which is trying to approximate the average of a random set.
ex = torch.sum(scaled_probability_per_cell, dim=1, keepdim=True) - scaled_probability_per_cell + 1
average_result = torch.sum(numeric_values_masked * scaled_probability_per_cell / ex, dim=1)
elif avg_approximation == AverageApproximationFunction.SECOND_ORDER:
# The sum of all probabilities except that correspond to other cells
ex = torch.sum(scaled_probability_per_cell, dim=1, keepdim=True) - scaled_probability_per_cell + 1
pointwise_var = scaled_probability_per_cell * (1 - scaled_probability_per_cell)
var = torch.sum(pointwise_var, dim=1, keepdim=True) - pointwise_var
multiplier = (var / torch.square(ex) + 1) / ex
average_result = torch.sum(numeric_values_masked * scaled_probability_per_cell * multiplier, dim=1)
else:
raise ValueError(f"Invalid average_approximation_function: {config.average_approximation_function}")
if config.use_gumbel_for_aggregation:
gumbel_dist = torch.distributions.RelaxedOneHotCategorical(
config.aggregation_temperature, logits=logits_aggregation[:, 1:]
)
# <float32>[batch_size, num_aggregation_labels - 1]
aggregation_op_only_probs = gumbel_dist.sample()
else:
# <float32>[batch_size, num_aggregation_labels - 1]
aggregation_op_only_probs = nn.functional.softmax(
logits_aggregation[:, 1:] / config.aggregation_temperature, dim=-1
)
all_results = torch.cat(
[
torch.unsqueeze(sum_result, dim=1),
torch.unsqueeze(average_result, dim=1),
torch.unsqueeze(count_result, dim=1),
],
dim=1,
)
expected_result = torch.sum(all_results * aggregation_op_only_probs, dim=1)
return expected_result
# PyTorch does not currently support Huber loss with custom delta so we define it ourself
def huber_loss(input, target, delta: float = 1.0):
errors = torch.abs(input - target) # shape (batch_size,)
return torch.where(errors < delta, 0.5 * errors**2, errors * delta - (0.5 * delta**2))
def _calculate_regression_loss(
answer,
aggregate_mask,
dist_per_cell,
numeric_values,
numeric_values_scale,
input_mask_float,
logits_aggregation,
config,
):
"""
Calculates the regression loss per example.
Args:
answer (`torch.FloatTensor` of shape `(batch_size,)`):
Answer for every example in the batch. Nan if there is no scalar answer.
aggregate_mask (`torch.FloatTensor` of shape `(batch_size,)`):
A mask set to 1 for examples that should use aggregation functions.
dist_per_cell (`torch.distributions.Bernoulli`):
Cell selection distribution for each cell.
numeric_values (`torch.FloatTensor` of shape `(batch_size, seq_length)`):
Numeric values of every token. Nan for tokens which are not numeric values.
numeric_values_scale (`torch.FloatTensor` of shape `(batch_size, seq_length)`):
Scale of the numeric values of every token.
input_mask_float (`torch.FloatTensor` of shape `(batch_size, seq_length)`):
Mask for the table, without question tokens and table headers.
logits_aggregation (`torch.FloatTensor` of shape `(batch_size, num_aggregation_labels)`):
Logits per aggregation operation.
config ([`TapasConfig`]):
Model configuration class with all the parameters of the model
Returns:
per_example_answer_loss_scaled (`torch.FloatTensor` of shape `(batch_size,)`): Scales answer loss for each
example in the batch. large_answer_loss_mask (`torch.FloatTensor` of shape `(batch_size,)`): A mask which is 1
for examples for which their answer loss is larger than the answer_loss_cutoff.
"""
# float32 (batch_size,)
expected_result = _calculate_expected_result(
dist_per_cell, numeric_values, numeric_values_scale, input_mask_float, logits_aggregation, config
)
# float32 (batch_size,)
answer_masked = torch.where(torch.isnan(answer), torch.zeros_like(answer), answer)
if config.use_normalized_answer_loss:
normalizer = (torch.max(torch.abs(expected_result), torch.abs(answer_masked)) + EPSILON_ZERO_DIVISION).detach()
normalized_answer_masked = answer_masked / normalizer
normalized_expected_result = expected_result / normalizer
per_example_answer_loss = huber_loss(
normalized_expected_result * aggregate_mask, normalized_answer_masked * aggregate_mask
)
else:
per_example_answer_loss = huber_loss(
expected_result * aggregate_mask, answer_masked * aggregate_mask, delta=config.huber_loss_delta
)
if config.answer_loss_cutoff is None:
large_answer_loss_mask = torch.ones_like(per_example_answer_loss, dtype=torch.float32)
else:
large_answer_loss_mask = torch.where(
per_example_answer_loss > config.answer_loss_cutoff,
torch.zeros_like(per_example_answer_loss, dtype=torch.float32),
torch.ones_like(per_example_answer_loss, dtype=torch.float32),
)
per_example_answer_loss_scaled = config.answer_loss_importance * (per_example_answer_loss * aggregate_mask)
return per_example_answer_loss_scaled, large_answer_loss_mask
__all__ = [
"TapasForMaskedLM",
"TapasForQuestionAnswering",
"TapasForSequenceClassification",
"TapasModel",
"TapasPreTrainedModel",
]
| ProductIndexMap |
python | apache__airflow | providers/amazon/tests/unit/amazon/aws/operators/test_sqs.py | {
"start": 1408,
"end": 6032
} | class ____:
@pytest.fixture(autouse=True)
def _setup_test_cases(self):
self.default_op_kwargs = {
"task_id": "test_task",
"message_content": "hello",
"aws_conn_id": None,
"region_name": REGION_NAME,
}
self.sqs_client = SqsHook(aws_conn_id=None, region_name=REGION_NAME).conn
def test_init(self):
self.default_op_kwargs.pop("aws_conn_id", None)
self.default_op_kwargs.pop("region_name", None)
op = SqsPublishOperator(sqs_queue=QUEUE_NAME, **self.default_op_kwargs)
assert op.hook.aws_conn_id == "aws_default"
assert op.hook._region_name is None
assert op.hook._verify is None
assert op.hook._config is None
op = SqsPublishOperator(
sqs_queue=FIFO_QUEUE_NAME,
**self.default_op_kwargs,
aws_conn_id=None,
region_name=REGION_NAME,
verify="/spam/egg.pem",
botocore_config={"read_timeout": 42},
)
assert op.hook.aws_conn_id is None
assert op.hook._region_name == REGION_NAME
assert op.hook._verify == "/spam/egg.pem"
assert op.hook._config is not None
assert op.hook._config.read_timeout == 42
@mock_aws
def test_execute_success(self, mocked_context):
self.sqs_client.create_queue(QueueName=QUEUE_NAME)
# Send SQS Message
op = SqsPublishOperator(**self.default_op_kwargs, sqs_queue=QUEUE_NAME)
result = op.execute(mocked_context)
assert "MD5OfMessageBody" in result
assert "MessageId" in result
# Validate message through moto
message = self.sqs_client.receive_message(QueueUrl=QUEUE_URL)
assert len(message["Messages"]) == 1
assert message["Messages"][0]["MessageId"] == result["MessageId"]
assert message["Messages"][0]["Body"] == "hello"
@mock_aws
def test_execute_failure_fifo_queue(self, mocked_context):
self.sqs_client.create_queue(QueueName=FIFO_QUEUE_NAME, Attributes={"FifoQueue": "true"})
op = SqsPublishOperator(**self.default_op_kwargs, sqs_queue=FIFO_QUEUE_NAME)
error_message = (
r"An error occurred \(MissingParameter\) when calling the SendMessage operation: "
r"The request must contain the parameter MessageGroupId."
)
with pytest.raises(ClientError, match=error_message):
op.execute(mocked_context)
@mock_aws
def test_deduplication_failure(self, mocked_context):
self.sqs_client.create_queue(
QueueName=FIFO_QUEUE_NAME, Attributes={"FifoQueue": "true", "ContentBasedDeduplication": "false"}
)
op = SqsPublishOperator(**self.default_op_kwargs, sqs_queue=FIFO_QUEUE_NAME, message_group_id="abc")
error_message = (
r"An error occurred \(InvalidParameterValue\) when calling the SendMessage operation: "
r"The queue should either have ContentBasedDeduplication enabled or MessageDeduplicationId provided explicitly"
)
with pytest.raises(ClientError, match=error_message):
op.execute(mocked_context)
@mock_aws
def test_execute_success_fifo_queue(self, mocked_context):
self.sqs_client.create_queue(
QueueName=FIFO_QUEUE_NAME, Attributes={"FifoQueue": "true", "ContentBasedDeduplication": "true"}
)
# Send SQS Message into the FIFO Queue
op = SqsPublishOperator(
**self.default_op_kwargs,
sqs_queue=FIFO_QUEUE_NAME,
message_group_id="abc",
message_deduplication_id="abc",
)
result = op.execute(mocked_context)
assert "MD5OfMessageBody" in result
assert "MessageId" in result
# Validate message through moto
message = self.sqs_client.receive_message(
QueueUrl=FIFO_QUEUE_URL, AttributeNames=["MessageGroupId", "MessageDeduplicationId"]
)
assert len(message["Messages"]) == 1
assert message["Messages"][0]["MessageId"] == result["MessageId"]
assert message["Messages"][0]["Body"] == "hello"
assert message["Messages"][0]["Attributes"]["MessageGroupId"] == "abc"
assert message["Messages"][0]["Attributes"]["MessageDeduplicationId"] == "abc"
def test_template_fields(self):
operator = SqsPublishOperator(
**self.default_op_kwargs,
sqs_queue=FIFO_QUEUE_NAME,
message_group_id="abc",
message_deduplication_id="abc",
)
validate_template_fields(operator)
| TestSqsPublishOperator |
python | pypa__pip | src/pip/_vendor/rich/console.py | {
"start": 2758,
"end": 2965
} | class ____(NamedTuple):
"""Size of the terminal."""
width: int
"""The width of the console in 'cells'."""
height: int
"""The height of the console in lines."""
@dataclass
| ConsoleDimensions |
python | dagster-io__dagster | python_modules/dagster/dagster/_config/field_utils.py | {
"start": 9028,
"end": 16812
} | class ____(_ConfigHasFields):
"""Define a config field requiring the user to select one option.
Selectors are used when you want to be able to present several different options in config but
allow only one to be selected. For example, a single input might be read in from either a csv
file or a parquet file, but not both at once.
Note that in some other type systems this might be called an 'input union'.
Functionally, a selector is like a :py:class:`Dict`, except that only one key from the dict can
be specified in valid config.
Args:
fields (Dict[str, Field]): The fields from which the user must select.
**Examples:**
.. code-block:: python
@op(
config_schema=Field(
Selector(
{
'haw': {'whom': Field(String, default_value='honua', is_required=False)},
'cn': {'whom': Field(String, default_value='世界', is_required=False)},
'en': {'whom': Field(String, default_value='world', is_required=False)},
}
),
is_required=False,
default_value={'en': {'whom': 'world'}},
)
)
def hello_world_with_default(context):
if 'haw' in context.op_config:
return 'Aloha {whom}!'.format(whom=context.op_config['haw']['whom'])
if 'cn' in context.op_config:
return '你好, {whom}!'.format(whom=context.op_config['cn']['whom'])
if 'en' in context.op_config:
return 'Hello, {whom}!'.format(whom=context.op_config['en']['whom'])
"""
def __new__(cls, fields, description=None):
return _memoize_inst_in_field_cache(
cls,
Selector,
_define_selector_key(expand_fields_dict(fields), description),
)
def __init__(self, fields, description=None):
# if we hit in field cache avoid double init
if self._initialized:
return
fields = expand_fields_dict(fields)
super().__init__(
key=_define_selector_key(fields, description),
kind=ConfigTypeKind.SELECTOR,
fields=fields,
description=description,
)
self._initialized = True
# Config syntax expansion code below
def is_potential_field(potential_field: object) -> bool:
from dagster._config.field import Field, resolve_to_config_type
return isinstance(potential_field, (Field, dict, list)) or bool(
resolve_to_config_type(potential_field)
)
def convert_fields_to_dict_type(fields: Mapping[str, object]):
return _convert_fields_to_dict_type(fields, fields, [])
def _convert_fields_to_dict_type(
original_root: object, fields: Mapping[str, object], stack: list[str]
) -> Shape:
return Shape(_expand_fields_dict(original_root, fields, stack))
def expand_fields_dict(fields: Mapping[str, object]) -> Mapping[str, "Field"]:
return _expand_fields_dict(fields, fields, [])
def _expand_fields_dict(
original_root: object, fields: Mapping[str, object], stack: list[str]
) -> Mapping[str, "Field"]:
check.mapping_param(fields, "fields")
return {
name: _convert_potential_field(original_root, value, stack + [name])
for name, value in fields.items()
}
def expand_list(original_root: object, the_list: Sequence[object], stack: list[str]) -> Array:
if len(the_list) != 1:
raise DagsterInvalidConfigDefinitionError(
original_root, the_list, stack, "List must be of length 1"
)
inner_type = _convert_potential_type(original_root, the_list[0], stack)
if not inner_type:
raise DagsterInvalidConfigDefinitionError(
original_root,
the_list,
stack,
f"List have a single item and contain a valid type i.e. [int]. Got item {the_list[0]!r}",
)
return Array(inner_type)
def expand_map(original_root: object, the_dict: Mapping[object, object], stack: list[str]) -> Map:
if len(the_dict) != 1:
raise DagsterInvalidConfigDefinitionError(
original_root, the_dict, stack, "Map dict must be of length 1"
)
key = next(iter(the_dict.keys()))
key_type = _convert_potential_type(original_root, key, stack)
if not key_type or not key_type.kind == ConfigTypeKind.SCALAR: # type: ignore
raise DagsterInvalidConfigDefinitionError(
original_root,
the_dict,
stack,
f"Map dict must have a scalar type as its only key. Got key {key!r}",
)
inner_type = _convert_potential_type(original_root, the_dict[key], stack)
if not inner_type:
raise DagsterInvalidConfigDefinitionError(
original_root,
the_dict,
stack,
f"Map must have a single value and contain a valid type i.e. {{str: int}}. Got item {the_dict[key]!r}",
)
return Map(key_type, inner_type)
def convert_potential_field(potential_field: object) -> "Field":
return _convert_potential_field(potential_field, potential_field, [])
def _convert_potential_type(original_root: object, potential_type, stack: list[str]):
from dagster._config.field import resolve_to_config_type
if isinstance(potential_type, Mapping):
# A dictionary, containing a single key which is a type (int, str, etc) and not a string is interpreted as a Map
if len(potential_type) == 1:
key = next(iter(potential_type.keys()))
if not isinstance(key, str) and _convert_potential_type(original_root, key, stack):
return expand_map(original_root, potential_type, stack)
# Otherwise, the dictionary is interpreted as a Shape
return Shape(_expand_fields_dict(original_root, potential_type, stack))
if isinstance(potential_type, list):
return expand_list(original_root, potential_type, stack)
return resolve_to_config_type(potential_type)
def _convert_potential_field(
original_root: object, potential_field: object, stack: list[str]
) -> "Field":
from dagster._config.field import Field
if potential_field is None:
raise DagsterInvalidConfigDefinitionError(
original_root, potential_field, stack, reason="Fields cannot be None"
)
if not is_potential_field(potential_field):
raise DagsterInvalidConfigDefinitionError(original_root, potential_field, stack)
if isinstance(potential_field, Field):
return potential_field
return Field(_convert_potential_type(original_root, potential_field, stack))
def config_dictionary_from_values(
values: Mapping[str, Any], config_field: "Field"
) -> dict[str, Any]:
"""Converts a set of config values into a dictionary representation,
in particular converting EnvVar objects into Dagster config inputs
and processing data structures such as dicts, lists, and structured Config classes.
"""
assert ConfigTypeKind.is_shape(config_field.config_type.kind)
from dagster._config.pythonic_config import _config_value_to_dict_representation
return check.is_dict(_config_value_to_dict_representation(None, values))
def _create_direct_access_exception(cls: type, env_var_name: str) -> Exception:
return RuntimeError(
f'Attempted to directly retrieve environment variable {cls.__name__}("{env_var_name}").'
f" {cls.__name__} defers resolution of the environment variable value until run time, and"
" should only be used as input to Dagster config or resources.\n\nTo access the"
f" environment variable value, call `get_value` on the {cls.__name__}, or use os.getenv"
" directly."
)
| Selector |
python | tensorflow__tensorflow | tensorflow/python/ops/distributions/multinomial.py | {
"start": 2142,
"end": 11734
} | class ____(distribution.Distribution):
"""Multinomial distribution.
This Multinomial distribution is parameterized by `probs`, a (batch of)
length-`K` `prob` (probability) vectors (`K > 1`) such that
`tf.reduce_sum(probs, -1) = 1`, and a `total_count` number of trials, i.e.,
the number of trials per draw from the Multinomial. It is defined over a
(batch of) length-`K` vector `counts` such that
`tf.reduce_sum(counts, -1) = total_count`. The Multinomial is identically the
Binomial distribution when `K = 2`.
#### Mathematical Details
The Multinomial is a distribution over `K`-class counts, i.e., a length-`K`
vector of non-negative integer `counts = n = [n_0, ..., n_{K-1}]`.
The probability mass function (pmf) is,
```none
pmf(n; pi, N) = prod_j (pi_j)**n_j / Z
Z = (prod_j n_j!) / N!
```
where:
* `probs = pi = [pi_0, ..., pi_{K-1}]`, `pi_j > 0`, `sum_j pi_j = 1`,
* `total_count = N`, `N` a positive integer,
* `Z` is the normalization constant, and,
* `N!` denotes `N` factorial.
Distribution parameters are automatically broadcast in all functions; see
examples for details.
#### Pitfalls
The number of classes, `K`, must not exceed:
- the largest integer representable by `self.dtype`, i.e.,
`2**(mantissa_bits+1)` (IEE754),
- the maximum `Tensor` index, i.e., `2**31-1`.
In other words,
```python
K <= min(2**31-1, {
tf.float16: 2**11,
tf.float32: 2**24,
tf.float64: 2**53 }[param.dtype])
```
Note: This condition is validated only when `self.validate_args = True`.
#### Examples
Create a 3-class distribution, with the 3rd class is most likely to be drawn,
using logits.
```python
logits = [-50., -43, 0]
dist = Multinomial(total_count=4., logits=logits)
```
Create a 3-class distribution, with the 3rd class is most likely to be drawn.
```python
p = [.2, .3, .5]
dist = Multinomial(total_count=4., probs=p)
```
The distribution functions can be evaluated on counts.
```python
# counts same shape as p.
counts = [1., 0, 3]
dist.prob(counts) # Shape []
# p will be broadcast to [[.2, .3, .5], [.2, .3, .5]] to match counts.
counts = [[1., 2, 1], [2, 2, 0]]
dist.prob(counts) # Shape [2]
# p will be broadcast to shape [5, 7, 3] to match counts.
counts = [[...]] # Shape [5, 7, 3]
dist.prob(counts) # Shape [5, 7]
```
Create a 2-batch of 3-class distributions.
```python
p = [[.1, .2, .7], [.3, .3, .4]] # Shape [2, 3]
dist = Multinomial(total_count=[4., 5], probs=p)
counts = [[2., 1, 1], [3, 1, 1]]
dist.prob(counts) # Shape [2]
dist.sample(5) # Shape [5, 2, 3]
```
"""
@deprecation.deprecated(
"2019-01-01",
"The TensorFlow Distributions library has moved to "
"TensorFlow Probability "
"(https://github.com/tensorflow/probability). You "
"should update all references to use `tfp.distributions` "
"instead of `tf.distributions`.",
warn_once=True)
def __init__(self,
total_count,
logits=None,
probs=None,
validate_args=False,
allow_nan_stats=True,
name="Multinomial"):
"""Initialize a batch of Multinomial distributions.
Args:
total_count: Non-negative floating point tensor with shape broadcastable
to `[N1,..., Nm]` with `m >= 0`. Defines this as a batch of
`N1 x ... x Nm` different Multinomial distributions. Its components
should be equal to integer values.
logits: Floating point tensor representing unnormalized log-probabilities
of a positive event with shape broadcastable to
`[N1,..., Nm, K]` `m >= 0`, and the same dtype as `total_count`. Defines
this as a batch of `N1 x ... x Nm` different `K` class Multinomial
distributions. Only one of `logits` or `probs` should be passed in.
probs: Positive floating point tensor with shape broadcastable to
`[N1,..., Nm, K]` `m >= 0` and same dtype as `total_count`. Defines
this as a batch of `N1 x ... x Nm` different `K` class Multinomial
distributions. `probs`'s components in the last portion of its shape
should sum to `1`. Only one of `logits` or `probs` should be passed in.
validate_args: Python `bool`, default `False`. When `True` distribution
parameters are checked for validity despite possibly degrading runtime
performance. When `False` invalid inputs may silently render incorrect
outputs.
allow_nan_stats: Python `bool`, default `True`. When `True`, statistics
(e.g., mean, mode, variance) use the value "`NaN`" to indicate the
result is undefined. When `False`, an exception is raised if one or
more of the statistic's batch members are undefined.
name: Python `str` name prefixed to Ops created by this class.
"""
parameters = dict(locals())
with ops.name_scope(name, values=[total_count, logits, probs]) as name:
self._total_count = ops.convert_to_tensor(total_count, name="total_count")
if validate_args:
self._total_count = (
distribution_util.embed_check_nonnegative_integer_form(
self._total_count))
self._logits, self._probs = distribution_util.get_logits_and_probs(
logits=logits,
probs=probs,
multidimensional=True,
validate_args=validate_args,
name=name)
self._mean_val = self._total_count[..., array_ops.newaxis] * self._probs
super(Multinomial, self).__init__(
dtype=self._probs.dtype,
reparameterization_type=distribution.NOT_REPARAMETERIZED,
validate_args=validate_args,
allow_nan_stats=allow_nan_stats,
parameters=parameters,
graph_parents=[self._total_count,
self._logits,
self._probs],
name=name)
@property
def total_count(self):
"""Number of trials used to construct a sample."""
return self._total_count
@property
def logits(self):
"""Vector of coordinatewise logits."""
return self._logits
@property
def probs(self):
"""Probability of drawing a `1` in that coordinate."""
return self._probs
def _batch_shape_tensor(self):
return array_ops.shape(self._mean_val)[:-1]
def _batch_shape(self):
return self._mean_val.get_shape().with_rank_at_least(1)[:-1]
def _event_shape_tensor(self):
return array_ops.shape(self._mean_val)[-1:]
def _event_shape(self):
return self._mean_val.get_shape().with_rank_at_least(1)[-1:]
def _sample_n(self, n, seed=None):
n_draws = math_ops.cast(self.total_count, dtype=dtypes.int32)
k = self.event_shape_tensor()[0]
# broadcast the total_count and logits to same shape
n_draws = array_ops.ones_like(
self.logits[..., 0], dtype=n_draws.dtype) * n_draws
logits = array_ops.ones_like(
n_draws[..., array_ops.newaxis], dtype=self.logits.dtype) * self.logits
# flatten the total_count and logits
flat_logits = array_ops.reshape(logits, [-1, k]) # [B1B2...Bm, k]
flat_ndraws = n * array_ops.reshape(n_draws, [-1]) # [B1B2...Bm]
# computes each total_count and logits situation by map_fn
def _sample_single(args):
logits, n_draw = args[0], args[1] # [K], []
x = random_ops.multinomial(logits[array_ops.newaxis, ...], n_draw,
seed) # [1, n*n_draw]
x = array_ops.reshape(x, shape=[n, -1]) # [n, n_draw]
x = math_ops.reduce_sum(array_ops.one_hot(x, depth=k), axis=-2) # [n, k]
return x
x = map_fn.map_fn(
_sample_single, [flat_logits, flat_ndraws],
dtype=self.dtype) # [B1B2...Bm, n, k]
# reshape the results to proper shape
x = array_ops.transpose(x, perm=[1, 0, 2])
final_shape = array_ops.concat([[n], self.batch_shape_tensor(), [k]], 0)
x = array_ops.reshape(x, final_shape) # [n, B1, B2,..., Bm, k]
return x
@distribution_util.AppendDocstring(_multinomial_sample_note)
def _log_prob(self, counts):
return self._log_unnormalized_prob(counts) - self._log_normalization(counts)
def _log_unnormalized_prob(self, counts):
counts = self._maybe_assert_valid_sample(counts)
return math_ops.reduce_sum(counts * nn_ops.log_softmax(self.logits), -1)
def _log_normalization(self, counts):
counts = self._maybe_assert_valid_sample(counts)
return -distribution_util.log_combinations(self.total_count, counts)
def _mean(self):
return array_ops.identity(self._mean_val)
def _covariance(self):
p = self.probs * array_ops.ones_like(
self.total_count)[..., array_ops.newaxis]
# pylint: disable=invalid-unary-operand-type
return array_ops.matrix_set_diag(
-math_ops.matmul(
self._mean_val[..., array_ops.newaxis],
p[..., array_ops.newaxis, :]), # outer product
self._variance())
def _variance(self):
p = self.probs * array_ops.ones_like(
self.total_count)[..., array_ops.newaxis]
return self._mean_val - self._mean_val * p
def _maybe_assert_valid_sample(self, counts):
"""Check counts for proper shape, values, then return tensor version."""
if not self.validate_args:
return counts
counts = distribution_util.embed_check_nonnegative_integer_form(counts)
return control_flow_ops.with_dependencies([
check_ops.assert_equal(
self.total_count, math_ops.reduce_sum(counts, -1),
message="counts must sum to `self.total_count`"),
], counts)
| Multinomial |
python | ray-project__ray | python/ray/serve/exceptions.py | {
"start": 279,
"end": 917
} | class ____(RayServeException):
"""Raised when max_queued_requests is exceeded on a DeploymentHandle."""
def __init__(self, num_queued_requests: int, max_queued_requests: int):
super().__init__(num_queued_requests, max_queued_requests)
self._message = (
f"Request dropped due to backpressure "
f"(num_queued_requests={num_queued_requests}, "
f"max_queued_requests={max_queued_requests})."
)
def __str__(self) -> str:
return self._message
@property
def message(self) -> str:
return self._message
@PublicAPI(stability="alpha")
| BackPressureError |
python | rapidsai__cudf | python/cudf/cudf/core/tokenize_vocabulary.py | {
"start": 209,
"end": 1296
} | class ____:
"""
A vocabulary object used to tokenize input text.
Parameters
----------
vocabulary : str
Strings column of vocabulary terms
"""
def __init__(self, vocabulary: Series) -> None:
self.vocabulary = plc.nvtext.tokenize.TokenizeVocabulary(
vocabulary._column.plc_column
)
def tokenize(
self, text: Series, delimiter: str = "", default_id: int = -1
) -> Series:
"""
Parameters
----------
text : cudf string series
The strings to be tokenized.
delimiter : str
Delimiter to identify tokens. Default is whitespace.
default_id : int
Value to use for tokens not found in the vocabulary.
Default is -1.
Returns
-------
Tokenized strings
"""
if delimiter is None:
delimiter = ""
result = text._column.tokenize_with_vocabulary(
self.vocabulary, delimiter, default_id
)
return Series._from_column(result)
| TokenizeVocabulary |
python | aio-libs__aiohttp | aiohttp/helpers.py | {
"start": 19983,
"end": 20167
} | class ____(ContextManager["BaseTimerContext"]):
__slots__ = ()
def assert_timeout(self) -> None:
"""Raise TimeoutError if timeout has been exceeded."""
| BaseTimerContext |
python | pandas-dev__pandas | pandas/tests/frame/indexing/test_setitem.py | {
"start": 38247,
"end": 38926
} | class ____:
def test_setitem_callable(self):
# GH#12533
df = DataFrame({"A": [1, 2, 3, 4], "B": [5, 6, 7, 8]})
df[lambda x: "A"] = [11, 12, 13, 14]
exp = DataFrame({"A": [11, 12, 13, 14], "B": [5, 6, 7, 8]})
tm.assert_frame_equal(df, exp)
def test_setitem_other_callable(self):
# GH#13299
def inc(x):
return x + 1
# Set dtype object straight away to avoid upcast when setting inc below
df = DataFrame([[-1, 1], [1, -1]], dtype=object)
df[df > 0] = inc
expected = DataFrame([[-1, inc], [inc, -1]])
tm.assert_frame_equal(df, expected)
| TestDataFrameSetItemCallable |
python | apache__airflow | providers/alibaba/tests/unit/alibaba/cloud/hooks/test_maxcompute.py | {
"start": 1143,
"end": 4214
} | class ____:
def setup_method(self):
with mock.patch(
MAXCOMPUTE_HOOK_MODULE.format("get_connection"),
) as mock_get_connection:
mock_conn = mock.MagicMock()
mock_conn.extra_dejson = {
"access_key_id": "mock_access_key_id",
"access_key_secret": "mock_access_key_secret",
"project": MOCK_MAXCOMPUTE_PROJECT,
"endpoint": MOCK_MAXCOMPUTE_ENDPOINT,
}
mock_get_connection.return_value = mock_conn
self.hook = MaxComputeHook(maxcompute_conn_id=MOCK_MAXCOMPUTE_CONN_ID)
@mock.patch(MAXCOMPUTE_HOOK_MODULE.format("get_client"))
def test_run_sql(self, mock_get_client):
mock_instance = mock.MagicMock()
mock_client = mock.MagicMock()
mock_client.run_sql.return_value = mock_instance
mock_get_client.return_value = mock_client
sql = "SELECT 1"
priority = 1
running_cluster = "mock_running_cluster"
hints = {"hint_key": "hint_value"}
aliases = {"alias_key": "alias_value"}
default_schema = "mock_default_schema"
quota_name = "mock_quota_name"
instance = self.hook.run_sql(
sql=sql,
priority=priority,
running_cluster=running_cluster,
hints=hints,
aliases=aliases,
default_schema=default_schema,
quota_name=quota_name,
)
assert instance == mock_instance
assert mock_client.run_sql.asssert_called_once_with(
sql=sql,
priority=priority,
running_cluster=running_cluster,
hints=hints,
aliases=aliases,
default_schema=default_schema,
quota_name=quota_name,
)
@mock.patch(MAXCOMPUTE_HOOK_MODULE.format("get_client"))
def test_get_instance(self, mock_get_client):
mock_client = mock.MagicMock()
mock_client.exist_instance.return_value = True
mock_instance = mock.MagicMock()
mock_client.get_instance.return_value = mock_instance
mock_get_client.return_value = mock_client
instance_id = "mock_instance_id"
instance = self.hook.get_instance(
instance_id=instance_id,
project=MOCK_MAXCOMPUTE_PROJECT,
endpoint=MOCK_MAXCOMPUTE_ENDPOINT,
)
mock_client.get_instance.assert_called_once_with(
id_=instance_id,
project=MOCK_MAXCOMPUTE_PROJECT,
)
assert instance == mock_instance
@mock.patch(MAXCOMPUTE_HOOK_MODULE.format("get_client"))
def test_stop_instance_success(self, mock_get_client):
mock_client = mock.MagicMock()
mock_get_client.return_value = mock_client
instance_id = "mock_instance_id"
self.hook.stop_instance(
instance_id=instance_id,
project=MOCK_MAXCOMPUTE_PROJECT,
endpoint=MOCK_MAXCOMPUTE_ENDPOINT,
)
mock_client.stop_instance.assert_called_once()
| TestMaxComputeHook |
python | pytorch__pytorch | torch/ao/nn/quantized/dynamic/modules/conv.py | {
"start": 12658,
"end": 15534
} | class ____(nnq.ConvTranspose2d):
r"""A dynamically quantized transposed convolution module with floating point tensors as inputs and outputs.
For details on input arguments, parameters, and implementation see
:class:`~torch.nn.ConvTranspose2d`.
For special notes, please, see :class:`~torch.ao.nn.quantized.dynamic.Conv2d`
Attributes:
weight (Tensor): packed tensor derived from the learnable weight
parameter.
scale (Tensor): scalar for the output scale
zero_point (Tensor): scalar for the output zero point
See :class:`~torch.nn.ConvTranspose2d` for other attributes.
Examples::
>>> # xdoctest: +SKIP
>>> # With square kernels and equal stride
>>> m = nnq.ConvTranspose2d(16, 33, 3, stride=2)
>>> # non-square kernels and unequal stride and with padding
>>> m = nnq.ConvTranspose2d(16, 33, (3, 5), stride=(2, 1), padding=(4, 2))
>>> output = m(input)
>>> # exact output size can be also specified as an argument
>>> downsample = nnq.Conv2d(16, 16, 3, stride=2, padding=1)
>>> upsample = nnq.ConvTranspose2d(16, 16, 3, stride=2, padding=1)
>>> h = downsample(input)
>>> h.size()
torch.Size([1, 16, 6, 6])
>>> output = upsample(h, output_size=input.size())
>>> output.size()
torch.Size([1, 16, 12, 12])
"""
_FLOAT_MODULE: ClassVar[type[nn.ConvTranspose2d]] = nn.ConvTranspose2d
def __init__(
self,
in_channels,
out_channels,
kernel_size,
stride=1,
padding=0,
output_padding=0,
groups=1,
bias=True,
dilation=1,
padding_mode="zeros",
device=None,
dtype=None,
):
warnings.warn(
f"The current implementation of the {self._get_name()} module has poor numerical accuracy and its use is not recommended", # noqa: B950
stacklevel=2,
)
factory_kwargs = {"device": device, "dtype": dtype}
super().__init__(
in_channels,
out_channels,
kernel_size,
stride,
padding,
output_padding,
groups,
bias,
dilation,
padding_mode,
**factory_kwargs,
)
def _get_name(self):
return "DynamicQuantizedConvTranspose2d"
def forward(self, input: Tensor, reduce_range: bool = True) -> Tensor:
# Temporarily using len(shape) instead of ndim due to JIT issue
# https://github.com/pytorch/pytorch/issues/23890
if len(input.shape) != 4:
raise ValueError("Input shape must be `(N, C, H, W)`!")
return ops.quantized.conv_transpose2d_dynamic(
input, self._packed_params, reduce_range
)
| ConvTranspose2d |
python | apache__airflow | providers/fab/tests/unit/fab/db_manager/test_fab_db_manager.py | {
"start": 1118,
"end": 3444
} | class ____:
@conf_vars(
{("database", "external_db_managers"): "airflow.providers.fab.auth_manager.models.db.FABDBManager"}
)
def test_db_manager_uses_config(self):
from airflow.providers.fab.auth_manager.models.db import FABDBManager
run_db_manager = RunDBManager()
assert run_db_manager._managers == [FABDBManager]
@conf_vars(
{("database", "external_db_managers"): "airflow.providers.fab.auth_manager.models.db.FABDBManager"}
)
def test_defining_table_same_name_as_airflow_table_name_raises(self):
from sqlalchemy import Column, Integer, String
run_db_manager = RunDBManager()
metadata = run_db_manager._managers[0].metadata
# Add dag_run table to metadata
mytable = Table(
"dag_run", metadata, Column("id", Integer, primary_key=True), Column("name", String(50))
)
metadata._add_table("dag_run", None, mytable)
with pytest.raises(AirflowException, match="Table 'dag_run' already exists in the Airflow metadata"):
run_db_manager.validate()
metadata._remove_table("dag_run", None)
@mock.patch.object(RunDBManager, "upgradedb")
@mock.patch.object(RunDBManager, "initdb")
def test_init_db_calls_rundbmanager(self, mock_initdb, mock_upgrade_db, session):
initdb(session=session)
mock_initdb.assert_called()
mock_initdb.assert_called_once_with(session)
@conf_vars(
{("database", "external_db_managers"): "airflow.providers.fab.auth_manager.models.db.FABDBManager"}
)
@mock.patch("airflow.providers.fab.auth_manager.models.db.FABDBManager")
def test_rundbmanager_calls_dbmanager_methods(self, mock_fabdb_manager, session):
mock_fabdb_manager.supports_table_dropping = True
fabdb_manager = mock_fabdb_manager.return_value
ext_db = RunDBManager()
# initdb
ext_db.initdb(session=session)
fabdb_manager.initdb.assert_called_once()
# upgradedb
ext_db.upgradedb(session=session)
fabdb_manager.upgradedb.assert_called_once()
# drop_tables
connection = mock.MagicMock()
ext_db.drop_tables(session, connection)
mock_fabdb_manager.return_value.drop_tables.assert_called_once_with(connection)
| TestRunDBManagerWithFab |
python | fastai__fastai | fastai/callback/hook.py | {
"start": 1935,
"end": 4195
} | class ____():
"Create several hooks on the modules in `ms` with `hook_func`."
def __init__(self, ms, hook_func, is_forward=True, detach=True, cpu=False):
self.hooks = [Hook(m, hook_func, is_forward, detach, cpu) for m in ms]
def __getitem__(self,i): return self.hooks[i]
def __len__(self): return len(self.hooks)
def __iter__(self): return iter(self.hooks)
@property
def stored(self): return L(o.stored for o in self)
def remove(self):
"Remove the hooks from the model."
for h in self.hooks: h.remove()
def __enter__(self, *args): return self
def __exit__ (self, *args): self.remove()
_docs = dict(stored = "The states saved in each hook.",
__enter__="Register the hooks",
__exit__="Remove the hooks")
# %% ../../nbs/15_callback.hook.ipynb 39
def hook_outputs(modules, detach=True, cpu=False, grad=False):
"Return `Hooks` that store activations of all `modules` in `self.stored`"
return Hooks(modules, _hook_inner, detach=detach, cpu=cpu, is_forward=not grad)
# %% ../../nbs/15_callback.hook.ipynb 43
def dummy_eval(m, size=(64,64)):
"Evaluate `m` on a dummy input of a certain `size`"
ch_in = in_channels(m)
x = one_param(m).new(1, ch_in, *size).requires_grad_(False).uniform_(-1.,1.)
with torch.no_grad(): return m.eval()(x)
# %% ../../nbs/15_callback.hook.ipynb 44
def model_sizes(m, size=(64,64)):
"Pass a dummy input through the model `m` to get the various sizes of activations."
with hook_outputs(m) as hooks:
_ = dummy_eval(m, size=size)
return [o.stored.shape for o in hooks]
# %% ../../nbs/15_callback.hook.ipynb 46
def num_features_model(m):
"Return the number of output features for `m`."
sz,ch_in = 32,in_channels(m)
while True:
#Trying for a few sizes in case the model requires a big input size.
try:
return model_sizes(m, (sz,sz))[-1][1]
except Exception as e:
sz *= 2
if sz > 2048: raise e
# %% ../../nbs/15_callback.hook.ipynb 50
def has_params(m):
"Check if `m` has at least one parameter"
return len(list(m.parameters())) > 0
# %% ../../nbs/15_callback.hook.ipynb 52
@funcs_kwargs
| Hooks |
python | run-llama__llama_index | llama-index-integrations/memory/llama-index-memory-mem0/llama_index/memory/mem0/base.py | {
"start": 1458,
"end": 2089
} | class ____(BaseModel):
user_id: Optional[str] = None
agent_id: Optional[str] = None
run_id: Optional[str] = None
@model_validator(mode="after")
def check_at_least_one_assigned(cls, values):
if not any(
getattr(values, field) for field in ["user_id", "agent_id", "run_id"]
):
raise ValueError(
"At least one of 'user_id', 'agent_id', or 'run_id' must be assigned."
)
return values
def get_context(self) -> Dict[str, Optional[str]]:
return {key: value for key, value in self.__dict__.items() if value is not None}
| Mem0Context |
python | bokeh__bokeh | tests/unit/bokeh/colors/test_color__colors.py | {
"start": 2964,
"end": 5693
} | class ____:
def test_init(self) -> None:
c = bcc.HSL(10, 0.2, 0.3)
assert c
assert c.a == 1.0
assert c.h == 10
assert c.s == 0.2
assert c.l == 0.3
c = bcc.HSL(10, 0.2, 0.3, 0.3)
assert c
assert c.a == 0.3
assert c.h == 10
assert c.s == 0.2
assert c.l == 0.3
def test_repr(self) -> None:
c = bcc.HSL(10, 0.2, 0.3)
assert repr(c) == c.to_css()
c = bcc.HSL(10, 0.2, 0.3, 0.3)
assert repr(c) == c.to_css()
def test_copy(self) -> None:
c = bcc.HSL(10, 0.2, 0.3)
c2 = c.copy()
assert c2 is not c
assert c2.a == c.a
assert c2.h == c.h
assert c2.s == c.s
assert c2.l == c.l
def test_from_hsl(self) -> None:
c = bcc.HSL(10, 0.2, 0.3)
c2 = bcc.HSL.from_hsl(c)
assert c2 is not c
assert c2.a == c.a
assert c2.h == c.h
assert c2.s == c.s
assert c2.l == c.l
c = bcc.HSL(10, 0.2, 0.3, 0.1)
c2 = bcc.HSL.from_hsl(c)
assert c2 is not c
assert c2.a == c.a
assert c2.h == c.h
assert c2.s == c.s
assert c2.l == c.l
def test_from_rgb(self) -> None:
c = bcc.RGB(255, 100, 0)
c2 = bcc.HSL.from_rgb(c)
assert c2 is not c
assert c2.a == 1
assert c2.h == 24
assert c2.s == 1.0
assert c2.l == 0.5
c = bcc.RGB(255, 100, 0, 0.1)
c2 = bcc.HSL.from_rgb(c)
assert c2 is not c
assert c2.a == 0.1
assert c2.h == 24
assert c2.s == 1.0
assert c2.l == 0.5
def test_to_css(self) -> None:
c = bcc.HSL(10, 0.2, 0.3)
assert c.to_css() == "hsl(10, 20.0%, 30.0%)"
c = bcc.HSL(10, 0.2, 0.3, 0.3)
assert c.to_css() == "hsla(10, 20.0%, 30.0%, 0.3)"
def test_to_hsl(self) -> None:
c = bcc.HSL(10, 0.2, 0.3)
c2 = c.to_hsl()
assert c2 is not c
assert c2.a == c.a
assert c2.h == c.h
assert c2.s == c.s
assert c2.l == c.l
c = bcc.HSL(10, 0.2, 0.3, 0.1)
c2 = c.to_hsl()
assert c2 is not c
assert c2.a == c.a
assert c2.h == c.h
assert c2.s == c.s
assert c2.l == c.l
def test_to_rgb(self) -> None:
c = bcc.HSL(10, 0.2, 0.3)
c2 = c.to_rgb()
assert c2 is not c
assert c2.a == 1.0
assert c2.r == 92
assert c2.g == 66
assert c2.b == 61
c = bcc.HSL(10, 0.2, 0.3, 0.1)
c2 = c.to_rgb()
assert c2 is not c
assert c.a == 0.1
assert c2.r == 92
assert c2.g == 66
assert c2.b == 61
| Test_HSL |
python | pytorch__pytorch | torch/_inductor/ir.py | {
"start": 148601,
"end": 153183
} | class ____(IRNode, CodegenSymbol):
# Name is sometimes None; e.g., ForceInPlace, where there isn't
# a meaningful name
name: Optional[str]
layout: OutputSpec
# Multi-output buffers will define 'outputs: List[Buffer]'. Confusingly,
# MultiOutput does NOT define this!
def __post_init__(self) -> None:
super().__post_init__()
self._post_init_setattr("origin_node", None)
def make_indexer(self) -> Callable[[Sequence[Expr]], Expr]:
return self.get_layout().make_indexer()
def get_name(self) -> str:
assert self.name, self
return self.name
def get_example(self) -> Union[torch.Tensor, torch.SymInt]:
if isinstance(self.layout, Layout):
return self.layout.get_example()
raise NotImplementedError(type(self.layout).__name__)
def get_device(self) -> Optional[torch.device]:
return self.get_output_spec().get_device()
def get_defining_op(self) -> Optional[Operation]:
return None
@property
def dtype(self) -> torch.dtype:
return self.get_layout().dtype
def get_size(self) -> Sequence[Expr]:
return [*self.get_layout().size]
def get_stride(self) -> list[Expr]:
return [*self.get_layout().stride]
def get_offset(self) -> Expr:
return self.get_layout().offset
def get_layout(self) -> Layout:
if isinstance(self.layout, Layout):
return self.layout
raise NotImplementedError(type(self.layout).__name__)
def get_output_spec(self) -> OutputSpec:
return self.layout
def get_storage_numel(self) -> int:
return self.get_numel()
def get_is_pinned(self) -> bool:
return self.get_layout().is_pinned
def freeze_layout(self) -> None:
if isinstance(self.layout, Layout) and not isinstance(
self.layout, NonOwningLayout
):
self.layout = self.layout.as_fixed()
def freeze_layout_with_stride_order(
self, order: Sequence[int], allow_padding: bool = False
) -> None:
assert isinstance(self.layout, FlexibleLayout), type(self.layout)
self.layout = self.layout.as_stride_order(order, allow_padding=allow_padding)
def freeze_layout_with_fill_order(self, order: Sequence[int]) -> None:
assert isinstance(self.layout, FlexibleLayout), type(self.layout)
self.layout = self.layout.as_fill_order(order)
def freeze_layout_with_same_order(self, stride: Sequence[int]) -> None:
assert isinstance(self.layout, FlexibleLayout), type(self.layout)
self.layout = self.layout.as_same_order(stride)
def freeze_layout_with_exact_strides(
self, exact_strides: Sequence[int], allow_padding: bool = False
) -> None:
assert isinstance(self.layout, FlexibleLayout), type(self.layout)
self.layout = self.layout.as_exact_strides(
exact_strides, allow_padding=allow_padding
)
def is_zero_elements(self) -> bool:
return V.graph.sizevars.statically_known_true(sympy.Eq(self.get_numel(), 0))
def make_loader(self) -> Callable[[Sequence[Expr]], OpsValue]:
# Loading from a zero-element buffer is a no-op
if self.is_zero_elements():
return partial(nop_loader_fn, dtype=self.get_dtype())
def loader(index: Sequence[Expr]) -> OpsValue:
indexer = self.make_indexer()
return ops.load(self.name or "unnamed", indexer(index))
return loader
def codegen_reference(self, writer: Optional[IndentedBuffer] = None) -> str:
return self.get_name()
def decide_layout(self) -> None:
pass
def get_inputs_that_alias_output(self) -> Sequence[str]:
if isinstance(self.layout, NonOwningLayout):
return [self.layout.view.get_name()]
return ()
def get_mutation_names(self) -> Sequence[str]:
if isinstance(self.layout, MutationLayoutSHOULDREMOVE):
return [self.layout.target.get_name()]
return ()
def get_read_names(self) -> OrderedSet[str]:
return OrderedSet([self.get_name()])
@cache_on_self_and_args("Buffer")
def get_free_symbol_uses(
self, unbacked_only: bool = False
) -> OrderedSet[sympy.Symbol]:
return OrderedSet()
def get_unbacked_symbol_defs(self) -> OrderedSet[sympy.Symbol]:
return OrderedSet()
def realize(self) -> Optional[str]:
pass
def should_allocate(self) -> bool:
# Returns False by default.
return False
@ir_dataclass(frozen=False)
| Buffer |
python | Lightning-AI__lightning | src/lightning/pytorch/loops/fetchers.py | {
"start": 958,
"end": 2807
} | class ____(Iterator):
def __init__(self) -> None:
self._combined_loader: Optional[CombinedLoader] = None
self.iterator: Optional[Iterator] = None
self.fetched: int = 0
self.done: bool = False
self.length: Optional[int] = None
self._start_profiler = _profile_nothing
self._stop_profiler = _profile_nothing
@property
def combined_loader(self) -> CombinedLoader:
if self._combined_loader is None:
raise MisconfigurationException(
f"`{self.__class__.__name__}` should have been `setup` with a `CombinedLoader`."
)
return self._combined_loader
def setup(self, combined_loader: CombinedLoader) -> None:
self._combined_loader = combined_loader
@override
def __iter__(self) -> "_DataFetcher":
self.iterator = iter(self.combined_loader)
self.reset()
return self
@override
def __next__(self) -> _ITERATOR_RETURN:
assert self.iterator is not None
self._start_profiler()
try:
batch = next(self.iterator)
except StopIteration:
self.done = True
raise
finally:
self._stop_profiler()
self.fetched += 1
if self.length is not None:
self.done = self.fetched >= self.length
return batch
def reset(self) -> None:
self.fetched = 0
# teardown calls `reset()`, and if it happens early, `combined_loader` can still be None
if self._combined_loader is not None:
self.length = sized_len(self.combined_loader)
self.done = self.length == 0
def teardown(self) -> None:
self.reset()
if self._combined_loader is not None:
self._combined_loader.reset()
self.iterator = None
| _DataFetcher |
python | sympy__sympy | sympy/physics/quantum/operator.py | {
"start": 6774,
"end": 7309
} | class ____(Operator):
"""A unitary operator that satisfies U*Dagger(U) == 1.
Parameters
==========
args : tuple
The list of numbers or parameters that uniquely specify the
operator. For time-dependent operators, this will include the time.
Examples
========
>>> from sympy.physics.quantum import Dagger, UnitaryOperator
>>> U = UnitaryOperator('U')
>>> U*Dagger(U)
1
"""
is_unitary = True
def _eval_adjoint(self):
return self._eval_inverse()
| UnitaryOperator |
python | rapidsai__cudf | python/cudf/cudf/core/resample.py | {
"start": 3342,
"end": 3409
} | class ____(_Resampler, DataFrameGroupBy):
pass
| DataFrameResampler |
python | altair-viz__altair | altair/vegalite/v6/display.py | {
"start": 5399,
"end": 6087
} | class ____(Displayable):
"""An IPython/Jupyter display class for rendering VegaLite 6."""
renderers = renderers
schema_path = (__name__, "schema/vega-lite-schema.json")
def vegalite(spec: dict, validate: bool = True) -> None:
"""
Render and optionally validate a VegaLite 6 spec.
This will use the currently enabled renderer to render the spec.
Parameters
----------
spec: dict
A fully compliant VegaLite 6 spec, with the data portion fully processed.
validate: bool
Should the spec be validated against the VegaLite 6 schema?
"""
from IPython.display import display
display(VegaLite(spec, validate=validate))
| VegaLite |
python | sqlalchemy__sqlalchemy | lib/sqlalchemy/testing/fixtures/orm.py | {
"start": 4138,
"end": 5205
} | class ____(MappedTest):
run_setup_classes = "once"
run_setup_mappers = "once"
@classmethod
def _setup_once_tables(cls):
pass
@classmethod
def _with_register_classes(cls, fn):
cls_registry = cls.classes
class _DeclBase(DeclarativeBase):
__table_cls__ = schema.Table
metadata = cls._tables_metadata
type_annotation_map = {
str: sa.String().with_variant(
sa.String(50), "mysql", "mariadb", "oracle"
)
}
def __init_subclass__(cls, **kw) -> None:
assert cls_registry is not None
cls_registry[cls.__name__] = cls
super().__init_subclass__(**kw)
cls.DeclarativeBasic = _DeclBase
# sets up cls.Basic which is helpful for things like composite
# classes
super()._with_register_classes(fn)
if cls._tables_metadata.tables and cls.run_create_tables:
cls._tables_metadata.create_all(config.db)
| DeclarativeMappedTest |
python | walkccc__LeetCode | solutions/1542. Find Longest Awesome Substring/1542.py | {
"start": 0,
"end": 437
} | class ____:
def longestAwesome(self, s: str) -> int:
ans = 0
prefix = 0 # the binary prefix
prefixToIndex = [len(s)] * 1024
prefixToIndex[0] = -1
for i, c in enumerate(s):
prefix ^= 1 << int(c)
ans = max(ans, i - prefixToIndex[prefix])
for j in range(10):
ans = max(ans, i - prefixToIndex[prefix ^ 1 << j])
prefixToIndex[prefix] = min(prefixToIndex[prefix], i)
return ans
| Solution |
python | python__mypy | mypy/nodes.py | {
"start": 66222,
"end": 66580
} | class ____(Expression):
"""Float literal"""
__slots__ = ("value",)
__match_args__ = ("value",)
value: float # 0.0 by default
def __init__(self, value: float) -> None:
super().__init__()
self.value = value
def accept(self, visitor: ExpressionVisitor[T]) -> T:
return visitor.visit_float_expr(self)
| FloatExpr |
python | kamyu104__LeetCode-Solutions | Python/difference-between-maximum-and-minimum-price-sum.py | {
"start": 1536,
"end": 2421
} | class ____(object):
def maxOutput(self, n, edges, price):
"""
:type n: int
:type edges: List[List[int]]
:type price: List[int]
:rtype: int
"""
def dfs(u, p):
dp = [price[u], 0] # [max_path_sum, max_path_sum_without_last_node]
for v in adj[u]:
if v == p:
continue
new_dp = dfs(v, u)
result[0] = max(result[0], dp[0]+new_dp[1], dp[1]+new_dp[0])
dp[0] = max(dp[0], new_dp[0]+price[u])
dp[1] = max(dp[1], new_dp[1]+price[u])
return dp
result = [0]
adj = [[] for _ in xrange(n)]
for u, v in edges:
adj[u].append(v)
adj[v].append(u)
dfs(0, -1)
return result[0]
# Time: O(n)
# Space: O(n)
# iterative dfs, tree dp
| Solution2 |
python | apache__airflow | providers/amazon/src/airflow/providers/amazon/aws/sensors/dms.py | {
"start": 1224,
"end": 3642
} | class ____(AwsBaseSensor[DmsHook]):
"""
Contains general sensor behavior for DMS task.
Subclasses should set ``target_statuses`` and ``termination_statuses`` fields.
:param replication_task_arn: AWS DMS replication task ARN
:param target_statuses: the target statuses, sensor waits until
the task reaches any of these states
:param termination_statuses: the termination statuses, sensor fails when
the task reaches any of these states
:param aws_conn_id: The Airflow connection used for AWS credentials.
If this is ``None`` or empty then the default boto3 behaviour is used. If
running Airflow in a distributed manner and aws_conn_id is None or
empty, then default boto3 configuration would be used (and must be
maintained on each worker node).
:param region_name: AWS region_name. If not specified then the default boto3 behaviour is used.
:param verify: Whether or not to verify SSL certificates. See:
https://boto3.amazonaws.com/v1/documentation/api/latest/reference/core/session.html
:param botocore_config: Configuration dictionary (key-values) for botocore client. See:
https://botocore.amazonaws.com/v1/documentation/api/latest/reference/config.html
"""
aws_hook_class = DmsHook
template_fields: Sequence[str] = aws_template_fields("replication_task_arn")
def __init__(
self,
replication_task_arn: str,
target_statuses: Iterable[str] | None = None,
termination_statuses: Iterable[str] | None = None,
**kwargs,
):
super().__init__(**kwargs)
self.replication_task_arn = replication_task_arn
self.target_statuses: Iterable[str] = target_statuses or []
self.termination_statuses: Iterable[str] = termination_statuses or []
def poke(self, context: Context):
if not (status := self.hook.get_task_status(self.replication_task_arn)):
raise AirflowException(
f"Failed to read task status, task with ARN {self.replication_task_arn} not found"
)
self.log.info("DMS Replication task (%s) has status: %s", self.replication_task_arn, status)
if status in self.target_statuses:
return True
if status in self.termination_statuses:
raise AirflowException(f"Unexpected status: {status}")
return False
| DmsTaskBaseSensor |
python | huggingface__transformers | src/transformers/models/layoutlmv2/modeling_layoutlmv2.py | {
"start": 4249,
"end": 8237
} | class ____(nn.Module):
def __init__(self, config):
super().__init__()
if config.hidden_size % config.num_attention_heads != 0 and not hasattr(config, "embedding_size"):
raise ValueError(
f"The hidden size ({config.hidden_size}) is not a multiple of the number of attention "
f"heads ({config.num_attention_heads})"
)
self.fast_qkv = config.fast_qkv
self.num_attention_heads = config.num_attention_heads
self.attention_head_size = int(config.hidden_size / config.num_attention_heads)
self.all_head_size = self.num_attention_heads * self.attention_head_size
self.has_relative_attention_bias = config.has_relative_attention_bias
self.has_spatial_attention_bias = config.has_spatial_attention_bias
if config.fast_qkv:
self.qkv_linear = nn.Linear(config.hidden_size, 3 * self.all_head_size, bias=False)
self.q_bias = nn.Parameter(torch.zeros(1, 1, self.all_head_size))
self.v_bias = nn.Parameter(torch.zeros(1, 1, self.all_head_size))
else:
self.query = nn.Linear(config.hidden_size, self.all_head_size)
self.key = nn.Linear(config.hidden_size, self.all_head_size)
self.value = nn.Linear(config.hidden_size, self.all_head_size)
self.dropout = nn.Dropout(config.attention_probs_dropout_prob)
def compute_qkv(self, hidden_states):
if self.fast_qkv:
qkv = self.qkv_linear(hidden_states)
q, k, v = torch.chunk(qkv, 3, dim=-1)
if q.ndimension() == self.q_bias.ndimension():
q = q + self.q_bias
v = v + self.v_bias
else:
_sz = (1,) * (q.ndimension() - 1) + (-1,)
q = q + self.q_bias.view(*_sz)
v = v + self.v_bias.view(*_sz)
else:
q = self.query(hidden_states)
k = self.key(hidden_states)
v = self.value(hidden_states)
return q, k, v
def forward(
self,
hidden_states,
attention_mask=None,
output_attentions=False,
rel_pos=None,
rel_2d_pos=None,
):
batch_size, seq_length, _ = hidden_states.shape
query, key, value = self.compute_qkv(hidden_states)
# (B, L, H*D) -> (B, H, L, D)
query_layer = query.view(batch_size, -1, self.num_attention_heads, self.attention_head_size).transpose(1, 2)
key_layer = key.view(batch_size, -1, self.num_attention_heads, self.attention_head_size).transpose(1, 2)
value_layer = value.view(batch_size, -1, self.num_attention_heads, self.attention_head_size).transpose(1, 2)
query_layer = query_layer / math.sqrt(self.attention_head_size)
# [BSZ, NAT, L, L]
attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2))
if self.has_relative_attention_bias:
attention_scores += rel_pos
if self.has_spatial_attention_bias:
attention_scores += rel_2d_pos
attention_scores = attention_scores.float().masked_fill_(
attention_mask.to(torch.bool), torch.finfo(attention_scores.dtype).min
)
attention_probs = nn.functional.softmax(attention_scores, dim=-1, dtype=torch.float32).type_as(value_layer)
# This is actually dropping out entire tokens to attend to, which might
# seem a bit unusual, but is taken from the original Transformer paper.
attention_probs = self.dropout(attention_probs)
context_layer = torch.matmul(attention_probs, value_layer)
context_layer = context_layer.permute(0, 2, 1, 3).contiguous()
new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,)
context_layer = context_layer.view(*new_context_layer_shape)
outputs = (context_layer, attention_probs) if output_attentions else (context_layer,)
return outputs
| LayoutLMv2SelfAttention |
python | getsentry__sentry | src/sentry/snuba/uptime_results.py | {
"start": 497,
"end": 1763
} | class ____(rpc_dataset_common.RPCBase):
DEFINITIONS = UPTIME_RESULT_DEFINITIONS
@classmethod
@sentry_sdk.trace
def run_table_query(
cls,
*,
params: SnubaParams,
query_string: str,
selected_columns: list[str],
orderby: list[str] | None,
offset: int,
limit: int,
referrer: str,
config: SearchResolverConfig,
sampling_mode: SAMPLING_MODES | None = None,
equations: list[str] | None = None,
search_resolver: SearchResolver | None = None,
page_token: PageToken | None = None,
debug: bool = False,
additional_queries: AdditionalQueries | None = None,
) -> EAPResponse:
return cls._run_table_query(
rpc_dataset_common.TableQuery(
query_string=query_string,
selected_columns=selected_columns,
equations=equations,
orderby=orderby,
offset=offset,
limit=limit,
referrer=referrer,
sampling_mode=sampling_mode,
resolver=search_resolver or cls.get_resolver(params, config),
page_token=page_token,
),
debug,
)
| UptimeResults |
python | langchain-ai__langchain | libs/core/tests/unit_tests/_api/test_beta_decorator.py | {
"start": 10944,
"end": 11728
} | class ____(BaseModel):
@beta()
def beta_method(self) -> str:
"""Original doc."""
return "This is a beta method."
def test_beta_method_pydantic() -> None:
"""Test beta method."""
with warnings.catch_warnings(record=True) as warning_list:
warnings.simplefilter("always")
obj = MyModel()
assert obj.beta_method() == "This is a beta method."
assert len(warning_list) == 1
warning = warning_list[0].message
assert str(warning) == (
"The method `MyModel.beta_method` is in beta. It is actively being "
"worked on, so "
"the API may change."
)
doc = obj.beta_method.__doc__
assert isinstance(doc, str)
assert doc.startswith(".. beta::")
| MyModel |
python | sympy__sympy | sympy/plotting/pygletplot/plot_modes.py | {
"start": 3738,
"end": 4388
} | class ____(PlotSurface):
i_vars, d_vars = 'th', 'r'
intervals = [[0, 2*pi, 40], [-1, 1, 20]]
aliases = ['cylindrical', 'polar']
is_default = False
def _get_sympy_evaluator(self):
fr = self.d_vars[0]
t = self.u_interval.v
h = self.v_interval.v
def e(_t, _h):
_r = float(fr.subs(t, _t).subs(h, _h))
return (_r*p_cos(_t), _r*p_sin(_t), _h)
return e
def _get_lambda_evaluator(self):
fr = self.d_vars[0]
t = self.u_interval.v
h = self.v_interval.v
fx, fy = fr*cos(t), fr*sin(t)
return lambdify([t, h], [fx, fy, h])
| Cylindrical |
python | tensorflow__tensorflow | tensorflow/dtensor/python/tests/mesh_util_test.py | {
"start": 10709,
"end": 15836
} | class ____(test_util.DTensorBaseTest):
"""Tests for mesh_util that require accelerator initialization."""
def setUp(self):
super().setUp()
device_type = config.preferred_device_type()
accelerator_util.initialize_accelerator_system(device_type)
def tearDown(self):
super().tearDown()
context._reset_context() # pylint: disable=protected-access
def test_is_initialized(self):
self.assertTrue(accelerator_util.is_initialized())
def test_initialize_accelerator_system(self):
accelerator_util.shutdown_accelerator_system()
device_type = accelerator_util.initialize_accelerator_system('CPU')
self.assertEqual(device_type, 'CPU')
# Default uses preferred_device_type.
accelerator_util.shutdown_accelerator_system()
device_type = accelerator_util.initialize_accelerator_system()
self.assertEqual(device_type, config.preferred_device_type())
@mock.patch.dict(os.environ, {'DTENSOR_GPU_USE_NCCL_COMMUNICATION': '1'})
def test_initialize_error_vgpu_with_nccl(self):
self.skipForDeviceType(['CPU', 'TPU'], reason='Test is intended for GPUs')
accelerator_util.shutdown_accelerator_system()
num_physical_devices = config.num_local_devices('GPU')
test_util.reset_logical_devices('GPU', 2 * num_physical_devices)
with self.assertRaisesRegex(ValueError,
'DTENSOR_GPU_USE_NCCL_COMMUNICATION'):
accelerator_util.initialize_accelerator_system('GPU')
@mock.patch.dict(os.environ, {'DTENSOR_GPU_USE_NCCL_COMMUNICATION': '1'})
def test_initialize_with_nccl(self):
self.skipForDeviceType(['CPU', 'TPU'], reason='Test is intended for GPUs')
accelerator_util.shutdown_accelerator_system()
accelerator_util.initialize_accelerator_system('GPU')
num_devices = len(test_util.list_local_logical_devices('GPU'))
mesh = mesh_util.create_mesh([('dim', num_devices)], device_type='GPU')
# The following shall run, but there is no clear way to check if it uses
# Collectives backed by NCCL.
mesh_util.barrier(mesh)
def test_initialize_after_tensorflow(self):
accelerator_util.shutdown_accelerator_system()
context.ensure_initialized()
with self.assertRaisesRegex(ValueError,
'TensorFlow has already been initialized'):
accelerator_util.initialize_accelerator_system('CPU')
def test_initialize_after_tensorflow_with_reset(self):
accelerator_util.shutdown_accelerator_system()
test_util.reset_logical_devices('CPU', 32)
context.ensure_initialized()
with self.assertLogs(level='WARNING') as log:
accelerator_util.initialize_accelerator_system(
'CPU', experimental_reset_context=True
)
self.assertIn('experimental_reset_context', log[0][0].message)
# Preserves the original logical device setting.
self.assertLen(test_util.list_local_logical_devices('CPU'), 32)
@parameterized.parameters(
dict(
device_type='CPU', skip_for=[]
), # We can create CPU meshes on TPU and GPU platforms!
dict(device_type='GPU', skip_for=['CPU', 'TPU']),
dict(device_type='TPU', skip_for=['CPU', 'GPU']),
)
def test_initialize_with_manual_logical_cpu_devices(
self, device_type: str, skip_for: list[str]
):
self.skipForDeviceType(
skip_for,
reason=f'Test is not intended for {skip_for}',
)
accelerator_util.shutdown_accelerator_system()
test_util.reset_logical_devices('CPU', 1)
accelerator_util.initialize_accelerator_system(
device_type, num_logical_cpu_devices=32
)
self.assertLen(test_util.list_local_logical_devices('CPU'), 32)
def test_shutdown_accelerator_system(self):
self.assertTrue(accelerator_util.is_initialized())
accelerator_util.shutdown_accelerator_system()
self.assertFalse(accelerator_util.is_initialized())
with self.assertRaisesRegex(ValueError, 'not initialized'):
accelerator_util.shutdown_accelerator_system()
def test_distributed_tpu_mesh_creation(self):
self.skipForDeviceType(['CPU', 'GPU'], reason='Test is intended for TPUs')
self.skipForDeviceType(['TPU'],
reason='Test requires exactly 8 cores',
unless_device_count_equals_to=8)
num_devices = len(test_util.list_local_logical_devices('TPU'))
mesh = mesh_util.create_distributed_mesh(
mesh_name='distributed_1d_mesh',
mesh_dims=[('x', num_devices)],
device_type='TPU')
self.assertEqual(mesh.num_local_devices(), 8)
self.assertEqual(mesh.size, 8)
def test_mesh_barrier(self):
device_type = config.preferred_device_type()
num_devices = len(test_util.list_local_logical_devices(device_type))
mesh = mesh_util.create_mesh([('dim', num_devices)],
device_type=device_type)
# FIXME(b/235416015): To really test this we'll need a new eager async
# API. The following shall run, but the barrier semantics is not tested.
mesh_util.barrier(mesh, 'Name')
mesh_util.barrier(mesh)
if __name__ == '__main__':
test.main()
| InitializedMeshUtilTest |
python | pikepdf__pikepdf | src/pikepdf/form.py | {
"start": 14447,
"end": 15822
} | class ____:
"""Represents a single radio button in a radio button group."""
_group: RadioButtonGroup
_annot_dict: Dictionary
def __init__(self, group: RadioButtonGroup, annot_dict: Dictionary, index: int):
"""Create a new option for a radio button group."""
self._group = group
self._annot_dict = annot_dict
self._index = index
@property
def states(self) -> Sequence[Name]:
"""List the possible states for this radio button.
Typically this will be /Off plus one additional arbitrary value representing the
on state.
"""
return (Name(key) for key in self._field.obj.AP.N.keys())
@property
def on_value(self) -> Name:
"""The underlying value associated with this button's "on" state."""
for name in self._annot_dict.AP.N.keys():
if name != Name.Off:
return Name(name)
def select(self):
"""Mark this as the selected option."""
self._group.value = self.on_value
@property
def checked(self) -> bool:
"""Is this is the currently selected option?"""
return self.on_value == self._group.value
@checked.setter
def checked(self, value: bool):
if value:
self._group.value = self.on_value
else:
self._group.value = Name.Off
| RadioButtonOption |
python | pytorch__pytorch | torch/utils/data/datapipes/dataframe/dataframes.py | {
"start": 895,
"end": 940
} | class ____:
disabled = False
| CaptureControl |
python | ansible__ansible | test/lib/ansible_test/_internal/commands/integration/filters.py | {
"start": 7114,
"end": 9121
} | class ____[TRemoteConfig: RemoteConfig](TargetFilter[TRemoteConfig]):
"""Target filter for remote Ansible Core CI managed hosts."""
def filter_profiles[THostProfile: HostProfile](self, profiles: list[THostProfile], target: IntegrationTarget) -> list[THostProfile]:
"""Filter the list of profiles, returning only those which are not skipped for the given target."""
profiles = super().filter_profiles(profiles, target)
skipped_profiles = [profile for profile in profiles if any(skip in target.skips for skip in get_remote_skip_aliases(profile.config))]
if skipped_profiles:
configs: list[TRemoteConfig] = [profile.config for profile in skipped_profiles]
display.warning(f'Excluding skipped hosts from inventory: {", ".join(config.name for config in configs)}')
profiles = [profile for profile in profiles if profile not in skipped_profiles]
return profiles
def filter_targets(self, targets: list[IntegrationTarget], exclude: set[str]) -> None:
"""Filter the list of targets, adding any which this host profile cannot support to the provided exclude list."""
super().filter_targets(targets, exclude)
if len(self.configs) > 1:
host_skips = {host.name: get_remote_skip_aliases(host) for host in self.configs}
# Skip only targets which skip all hosts.
# Targets that skip only some hosts will be handled during inventory generation.
skipped = [target.name for target in targets if all(any(skip in target.skips for skip in skips) for skips in host_skips.values())]
if skipped:
exclude.update(skipped)
display.warning(f'Excluding tests which do not support {", ".join(host_skips.keys())}: {", ".join(skipped)}')
else:
skips = get_remote_skip_aliases(self.config)
for skip, reason in skips.items():
self.skip(skip, reason, targets, exclude)
| RemoteTargetFilter |
python | scipy__scipy | scipy/special/tests/test_sph_harm.py | {
"start": 104,
"end": 1780
} | class ____:
@pytest.mark.slow
def test_p(self):
m_max = 20
n_max = 10
theta = np.linspace(0, np.pi)
phi = np.linspace(0, 2*np.pi)
theta, phi = np.meshgrid(theta, phi)
y, y_jac, y_hess = sc.sph_harm_y_all(n_max, m_max, theta, phi, diff_n=2)
p, p_jac, p_hess = sc.sph_legendre_p_all(n_max, m_max, theta, diff_n=2)
m = np.concatenate([np.arange(m_max + 1), np.arange(-m_max, 0)])
m = np.expand_dims(m, axis=(0,)+tuple(range(2,theta.ndim+2)))
assert_allclose(y, p * np.exp(1j * m * phi))
assert_allclose(y_jac[..., 0], p_jac * np.exp(1j * m * phi))
assert_allclose(y_jac[..., 1], 1j * m * p * np.exp(1j * m * phi))
assert_allclose(y_hess[..., 0, 0], p_hess * np.exp(1j * m * phi))
assert_allclose(y_hess[..., 0, 1], 1j * m * p_jac * np.exp(1j * m * phi))
assert_allclose(y_hess[..., 1, 0], y_hess[..., 0, 1])
assert_allclose(y_hess[..., 1, 1], -m * m * p * np.exp(1j * m * phi))
@pytest.mark.parametrize("n_max", [7, 10, 50])
@pytest.mark.parametrize("m_max", [1, 4, 5, 9, 14])
def test_all(self, n_max, m_max):
theta = np.linspace(0, np.pi)
phi = np.linspace(0, 2 * np.pi)
n = np.arange(n_max + 1)
n = np.expand_dims(n, axis=tuple(range(1,theta.ndim+2)))
m = np.concatenate([np.arange(m_max + 1), np.arange(-m_max, 0)])
m = np.expand_dims(m, axis=(0,)+tuple(range(2,theta.ndim+2)))
y_actual = sc.sph_harm_y_all(n_max, m_max, theta, phi)
y_desired = sc.sph_harm_y(n, m, theta, phi)
np.testing.assert_allclose(y_actual, y_desired, rtol=1e-05)
| TestSphHarm |
python | pyca__cryptography | tests/hazmat/primitives/decrepit/test_algorithms.py | {
"start": 7393,
"end": 7953
} | class ____:
def test_key_size(self):
cipher = IDEA(b"\x00" * 16)
assert cipher.key_size == 128
def test_invalid_key_size(self):
with pytest.raises(ValueError):
IDEA(b"\x00" * 17)
def test_invalid_key_type(self):
with pytest.raises(TypeError, match="key must be bytes"):
IDEA("0" * 16) # type: ignore[arg-type]
@pytest.mark.supported(
only_if=lambda backend: backend.cipher_supported(
IDEA(b"\x00" * 16), modes.ECB()
),
skip_message="Does not support IDEA ECB",
)
| TestIDEA |
python | streamlit__streamlit | lib/tests/streamlit/elements/layouts_test.py | {
"start": 14358,
"end": 23636
} | class ____(DeltaGeneratorTestCase):
def test_border_parameter(self):
"""Test that it can be called with border parameter"""
st.container(border=True)
container_block = self.get_delta_from_queue()
assert container_block.add_block.flex_container.border
def test_allow_empty_with_border(self):
"""Test that it allows empty when the container has a border."""
st.container(border=True)
container_block = self.get_delta_from_queue()
assert container_block.add_block.allow_empty
def test_disallow_empty_without_border_or_height(self):
"""Test that it disallows empty when no border or height is set."""
st.container()
container_block = self.get_delta_from_queue()
assert not container_block.add_block.allow_empty
def test_without_parameters(self):
"""Test that it can be called without any parameters."""
st.container()
container_block = self.get_delta_from_queue()
assert not container_block.add_block.flex_container.border
assert not container_block.add_block.allow_empty
assert container_block.add_block.id == ""
def test_setting_key(self):
"""Test that the key can be set and that it is included in the
generated element ID."""
st.container(key="container_key")
container_block = self.get_delta_from_queue()
assert "container_key" in container_block.add_block.id
def test_height_parameter(self):
"""Test that it can be called with height parameter"""
st.container(height=100)
container_block = self.get_delta_from_queue()
assert container_block.add_block.height_config.pixel_height == 100
# Should allow empty and have a border as default:
assert container_block.add_block.flex_container.border
assert container_block.add_block.allow_empty
def test_width_config(self):
"""Test that width configuration works correctly"""
st.container(width=200)
container_block = self.get_delta_from_queue()
assert container_block.add_block.width_config.pixel_width == 200
st.container(width="stretch")
container_block = self.get_delta_from_queue()
assert container_block.add_block.width_config.use_stretch
st.container(width="content")
container_block = self.get_delta_from_queue()
assert container_block.add_block.width_config.use_content
@parameterized.expand(
[
(None,),
("invalid",),
(-100,),
(0,),
]
)
def test_invalid_width(self, invalid_width):
"""Test that invalid width values raise an error"""
with pytest.raises(StreamlitAPIException):
st.container(width=invalid_width)
def test_height_config(self):
"""Test that height configuration works correctly"""
st.container(height=200)
container_block = self.get_delta_from_queue()
assert container_block.add_block.height_config.pixel_height == 200
st.container(height="stretch")
container_block = self.get_delta_from_queue()
assert container_block.add_block.height_config.use_stretch
st.container(height="content")
container_block = self.get_delta_from_queue()
assert container_block.add_block.height_config.use_content
@parameterized.expand(
[
(None,),
("invalid",),
(-100,),
(0,),
]
)
def test_invalid_height(self, invalid_height):
"""Test that invalid height values raise an error"""
with pytest.raises(StreamlitAPIException):
st.container(height=invalid_height)
@parameterized.expand(
[
(False, BlockProto.FlexContainer.Direction.VERTICAL),
(True, BlockProto.FlexContainer.Direction.HORIZONTAL),
],
)
def test_container_direction(
self, direction: bool, expected_direction: int
) -> None:
"""Test that st.container sets the correct direction."""
st.container(horizontal=direction)
container_block = self.get_delta_from_queue()
assert container_block.add_block.flex_container.direction == expected_direction
@parameterized.expand(
[
("left", BlockProto.FlexContainer.Justify.JUSTIFY_START),
("center", BlockProto.FlexContainer.Justify.JUSTIFY_CENTER),
("right", BlockProto.FlexContainer.Justify.JUSTIFY_END),
("distribute", BlockProto.FlexContainer.Justify.SPACE_BETWEEN),
]
)
def test_container_horizontal_alignment(
self, horizontal_alignment: str, expected_justify: int
) -> None:
"""Test that st.container sets the correct horizontal alignment (justify)."""
st.container(horizontal=True, horizontal_alignment=horizontal_alignment)
container_block = self.get_delta_from_queue()
assert container_block.add_block.flex_container.justify == expected_justify
@parameterized.expand(
[
("top", BlockProto.FlexContainer.Align.ALIGN_START),
("center", BlockProto.FlexContainer.Align.ALIGN_CENTER),
("bottom", BlockProto.FlexContainer.Align.ALIGN_END),
("distribute", BlockProto.FlexContainer.Align.ALIGN_UNDEFINED),
],
)
def test_container_vertical_alignment(
self, vertical_alignment: str, expected_align: int
) -> None:
"""Test that st.container sets the correct vertical alignment (align)."""
st.container(horizontal=True, vertical_alignment=vertical_alignment)
container_block = self.get_delta_from_queue()
assert container_block.add_block.flex_container.align == expected_align
@parameterized.expand(
[
("top", BlockProto.FlexContainer.Justify.JUSTIFY_START),
("center", BlockProto.FlexContainer.Justify.JUSTIFY_CENTER),
("bottom", BlockProto.FlexContainer.Justify.JUSTIFY_END),
("distribute", BlockProto.FlexContainer.Justify.SPACE_BETWEEN),
]
)
def test_container_vertical_direction_vertical_alignment(
self, vertical_alignment: str, expected_justify: int
) -> None:
"""Test that st.container with direction='vertical' sets the correct justify value for vertical_alignment."""
st.container(horizontal=False, vertical_alignment=vertical_alignment)
container_block = self.get_delta_from_queue()
assert container_block.add_block.flex_container.justify == expected_justify
@parameterized.expand(
[
("left", BlockProto.FlexContainer.Align.ALIGN_START),
("center", BlockProto.FlexContainer.Align.ALIGN_CENTER),
("right", BlockProto.FlexContainer.Align.ALIGN_END),
("distribute", BlockProto.FlexContainer.Align.ALIGN_UNDEFINED),
]
)
def test_container_vertical_direction_horizontal_alignment(
self, horizontal_alignment: str, expected_align: int
) -> None:
"""Test that st.container with direction='vertical' sets the correct align value for horizontal_alignment."""
st.container(horizontal=False, horizontal_alignment=horizontal_alignment)
container_block = self.get_delta_from_queue()
assert container_block.add_block.flex_container.align == expected_align
@parameterized.expand(
[
(True, True),
(False, False),
],
)
def test_container_wrap(self, direction: bool, wrap: bool) -> None:
"""Test that st.container sets the wrap property correctly."""
st.container(horizontal=direction)
container_block = self.get_delta_from_queue()
assert container_block.add_block.flex_container.wrap == wrap
@parameterized.expand(
[
("small", GapSize.SMALL),
("medium", GapSize.MEDIUM),
("large", GapSize.LARGE),
(None, GapSize.NONE),
],
)
def test_container_gap(self, gap, expected_gap) -> None:
"""Test that st.container sets the gap property correctly."""
st.container(gap=gap)
container_block = self.get_delta_from_queue()
assert (
container_block.add_block.flex_container.gap_config.gap_size == expected_gap
)
@parameterized.expand(
[
"invalid",
None,
],
)
def test_container_invalid_horizontal_alignment(self, horizontal_alignment) -> None:
"""Test that st.container raises on invalid horizontal_alignment."""
import streamlit as st
with pytest.raises(StreamlitInvalidHorizontalAlignmentError):
st.container(horizontal=True, horizontal_alignment=horizontal_alignment)
@parameterized.expand(
[
"invalid",
None,
],
)
def test_container_invalid_vertical_alignment(self, vertical_alignment) -> None:
"""Test that st.container raises on invalid vertical_alignment."""
import streamlit as st
with pytest.raises(StreamlitInvalidVerticalAlignmentError):
st.container(horizontal=True, vertical_alignment=vertical_alignment)
| ContainerTest |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-iterable/source_iterable/slice_generators.py | {
"start": 2454,
"end": 6482
} | class ____(SliceGenerator):
"""
Generate slices from start_date up to current date. Every next slice could
have different range based on was the previous slice processed successfully
and how much time it took.
The alghorithm is following:
1. First slice have INITIAL_RANGE_DAYS (30 days) length.
2. When slice is processed by stream this class expect "adjust_range"
method to be called with parameter how much time it took to process
previous request
3. Knowing previous slice range we can calculate days per minute processing
speed. Dividing this speed by REQUEST_PER_MINUTE_LIMIT (4) we can calculate
next slice range. Next range cannot be greater than MAX_RANGE_DAYS (180 days)
If processing of previous slice havent been completed "reduce_range" method
should be called. It would reset next range start date to previous slice
and reduce next slice range by RANGE_REDUCE_FACTOR (2 times)
In case if range havent been adjusted before getting next slice (it could
happend if there were no records for given date range), next slice would
have MAX_RANGE_DAYS (180) length.
"""
REQUEST_PER_MINUTE_LIMIT = 4
INITIAL_RANGE_DAYS: int = 30
DEFAULT_RANGE_DAYS: int = 90
MAX_RANGE_DAYS: int = 180
RANGE_REDUCE_FACTOR = 2
# This variable play important roles: stores length of previos range before
# next adjusting next slice lenght and provide length of next slice after
# adjusting
_current_range: int = INITIAL_RANGE_DAYS
# Save previous start date in case if slice processing fail and we need to
# go back to previous range.
_prev_start_date: DateTime = None
# In case if adjust_range method havent been called (no records for slice)
# next range would have MAX_RANGE_DAYS length
# Default is True so for first slice it would length would be INITIAL_RANGE_DAYS (30 days)
_range_adjusted = True
def adjust_range(self, previous_request_time: Period):
"""
Calculate next slice length in days based on previous slice length and
processing time.
"""
minutes_spent = previous_request_time.total_minutes()
if minutes_spent == 0:
self._current_range = self.DEFAULT_RANGE_DAYS
else:
days_per_minute = self._current_range / minutes_spent
next_range = math.floor(days_per_minute / self.REQUEST_PER_MINUTE_LIMIT)
self._current_range = min(next_range or self.DEFAULT_RANGE_DAYS, self.MAX_RANGE_DAYS)
self._range_adjusted = True
def reduce_range(self) -> StreamSlice:
"""
This method is supposed to be called when slice processing failed.
Reset next slice start date to previous one and reduce slice range by
RANGE_REDUCE_FACTOR (2 times).
Returns updated slice to try again.
"""
self._current_range = int(max(self._current_range / self.RANGE_REDUCE_FACTOR, self.INITIAL_RANGE_DAYS))
start_date = self._prev_start_date
end_date = min(self._end_date, start_date + (pendulum.Duration(days=self._current_range)))
self._start_date = end_date
return StreamSlice(start_date=start_date, end_date=end_date)
def __next__(self) -> StreamSlice:
"""
Generates next slice based on prevouis slice processing result. All the
next slice range calculations should be done after calling adjust_range
and reduce_range methods.
"""
if self._start_date >= self._end_date:
raise StopIteration()
if not self._range_adjusted:
self._current_range = self.MAX_RANGE_DAYS
next_start_date = min(self._end_date, self._start_date + pendulum.Duration(days=self._current_range))
slice = StreamSlice(start_date=self._start_date, end_date=next_start_date)
self._prev_start_date = self._start_date
self._start_date = next_start_date
self._range_adjusted = False
return slice
| AdjustableSliceGenerator |
python | huggingface__transformers | tests/models/superglue/test_image_processing_superglue.py | {
"start": 1539,
"end": 5029
} | class ____:
def __init__(
self,
parent,
batch_size=6,
num_channels=3,
image_size=18,
min_resolution=30,
max_resolution=400,
do_resize=True,
size=None,
do_grayscale=True,
):
size = size if size is not None else {"height": 480, "width": 640}
self.parent = parent
self.batch_size = batch_size
self.num_channels = num_channels
self.image_size = image_size
self.min_resolution = min_resolution
self.max_resolution = max_resolution
self.do_resize = do_resize
self.size = size
self.do_grayscale = do_grayscale
def prepare_image_processor_dict(self):
return {
"do_resize": self.do_resize,
"size": self.size,
"do_grayscale": self.do_grayscale,
}
def expected_output_image_shape(self, images):
return 2, self.num_channels, self.size["height"], self.size["width"]
def prepare_image_inputs(self, equal_resolution=False, numpify=False, torchify=False, pairs=True, batch_size=None):
batch_size = batch_size if batch_size is not None else self.batch_size
image_inputs = prepare_image_inputs(
batch_size=batch_size,
num_channels=self.num_channels,
min_resolution=self.min_resolution,
max_resolution=self.max_resolution,
equal_resolution=equal_resolution,
numpify=numpify,
torchify=torchify,
)
if pairs:
image_inputs = [image_inputs[i : i + 2] for i in range(0, len(image_inputs), 2)]
return image_inputs
def prepare_keypoint_matching_output(self, pixel_values):
max_number_keypoints = 50
batch_size = len(pixel_values)
mask = torch.zeros((batch_size, 2, max_number_keypoints), dtype=torch.int)
keypoints = torch.zeros((batch_size, 2, max_number_keypoints, 2))
matches = torch.full((batch_size, 2, max_number_keypoints), -1, dtype=torch.int)
scores = torch.zeros((batch_size, 2, max_number_keypoints))
for i in range(batch_size):
random_number_keypoints0 = np.random.randint(10, max_number_keypoints)
random_number_keypoints1 = np.random.randint(10, max_number_keypoints)
random_number_matches = np.random.randint(5, min(random_number_keypoints0, random_number_keypoints1))
mask[i, 0, :random_number_keypoints0] = 1
mask[i, 1, :random_number_keypoints1] = 1
keypoints[i, 0, :random_number_keypoints0] = torch.rand((random_number_keypoints0, 2))
keypoints[i, 1, :random_number_keypoints1] = torch.rand((random_number_keypoints1, 2))
random_matches_indices0 = torch.randperm(random_number_keypoints1, dtype=torch.int)[:random_number_matches]
random_matches_indices1 = torch.randperm(random_number_keypoints0, dtype=torch.int)[:random_number_matches]
matches[i, 0, random_matches_indices1] = random_matches_indices0
matches[i, 1, random_matches_indices0] = random_matches_indices1
scores[i, 0, random_matches_indices1] = torch.rand((random_number_matches,))
scores[i, 1, random_matches_indices0] = torch.rand((random_number_matches,))
return SuperGlueKeypointMatchingOutput(mask=mask, keypoints=keypoints, matches=matches, matching_scores=scores)
@require_torch
@require_vision
| SuperGlueImageProcessingTester |
python | dagster-io__dagster | python_modules/libraries/dagster-powerbi/dagster_powerbi/translator.py | {
"start": 5036,
"end": 5632
} | class ____(NamespacedMetadataSet):
web_url: Optional[UrlMetadataValue] = None
id: Optional[str] = None
name: Optional[str] = None
@classmethod
def namespace(cls) -> str:
return "dagster-powerbi"
def _build_table_metadata(table: dict[str, Any]) -> TableMetadataSet:
return TableMetadataSet(
table_name=table["name"],
column_schema=TableSchema(
columns=[
TableColumn(name=column["name"].lower(), type=column.get("dataType"))
for column in table["columns"]
]
),
)
| PowerBIMetadataSet |
python | PrefectHQ__prefect | src/prefect/client/schemas/objects.py | {
"start": 58332,
"end": 58728
} | class ____(PrefectBaseModel):
"""
Worker metadata.
We depend on the structure of `integrations`, but otherwise, worker classes
should support flexible metadata.
"""
integrations: list[Integration] = Field(
default=..., description="Prefect integrations installed in the worker."
)
model_config: ClassVar[ConfigDict] = ConfigDict(extra="allow")
| WorkerMetadata |
python | huggingface__transformers | src/transformers/models/wav2vec2/modeling_wav2vec2.py | {
"start": 29240,
"end": 32283
} | class ____(nn.Module):
def __init__(self, config):
super().__init__()
self.config = config
self.pos_conv_embed = Wav2Vec2PositionalConvEmbedding(config)
self.layer_norm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.dropout = nn.Dropout(config.hidden_dropout)
self.layers = nn.ModuleList(
[Wav2Vec2EncoderLayerStableLayerNorm(config) for _ in range(config.num_hidden_layers)]
)
self.gradient_checkpointing = False
def forward(
self,
hidden_states,
attention_mask=None,
output_attentions=False,
output_hidden_states=False,
return_dict=True,
):
all_hidden_states = () if output_hidden_states else None
all_self_attentions = () if output_attentions else None
if attention_mask is not None:
# make sure padded tokens output 0
expand_attention_mask = attention_mask.unsqueeze(-1).repeat(1, 1, hidden_states.shape[2])
hidden_states[~expand_attention_mask] = 0
attention_mask = create_bidirectional_mask(
config=self.config,
input_embeds=hidden_states,
attention_mask=attention_mask,
)
position_embeddings = self.pos_conv_embed(hidden_states)
hidden_states = hidden_states + position_embeddings
hidden_states = self.dropout(hidden_states)
synced_gpus = is_deepspeed_zero3_enabled() or is_fsdp_managed_module(self)
for layer in self.layers:
if output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
# add LayerDrop (see https://huggingface.co/papers/1909.11556 for description)
dropout_probability = torch.rand([])
skip_the_layer = self.training and dropout_probability < self.config.layerdrop
if not skip_the_layer or synced_gpus:
# under fsdp or deepspeed zero3 all gpus must run in sync
# XXX: could optimize this like synced_gpus in generate_utils but not sure if it's worth the code complication
layer_outputs = layer(
hidden_states, attention_mask=attention_mask, output_attentions=output_attentions
)
hidden_states = layer_outputs[0]
if skip_the_layer:
layer_outputs = (None, None)
if output_attentions:
all_self_attentions = all_self_attentions + (layer_outputs[1],)
hidden_states = self.layer_norm(hidden_states)
if output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
if not return_dict:
return tuple(v for v in [hidden_states, all_hidden_states, all_self_attentions] if v is not None)
return BaseModelOutput(
last_hidden_state=hidden_states,
hidden_states=all_hidden_states,
attentions=all_self_attentions,
)
| Wav2Vec2EncoderStableLayerNorm |
python | charliermarsh__ruff | crates/ruff_linter/resources/test/fixtures/flake8_pyi/PYI012.py | {
"start": 878,
"end": 909
} | class ____:
eyes: int = 2
| Dog |
python | PrefectHQ__prefect | tests/test_task_engine.py | {
"start": 20040,
"end": 27730
} | class ____:
def test_basic(self):
@task
def foo():
return 42
result = run_task_sync(foo)
assert result == 42
def test_with_params(self):
@task
def bar(x: int, y: Optional[str] = None):
return x, y
parameters = get_call_parameters(bar.fn, (42,), dict(y="nate"))
result = run_task_sync(bar, parameters=parameters)
assert result == (42, "nate")
def test_with_args(self):
@task
def f(*args):
return args
args = (42, "nate")
result = f(*args)
assert result == args
def test_with_kwargs(self):
@task
def f(**kwargs):
return kwargs
kwargs = dict(x=42, y="nate")
result = f(**kwargs)
assert result == kwargs
def test_with_args_kwargs(self):
@task
def f(*args, x, **kwargs):
return args, x, kwargs
result = f(1, 2, x=5, y=6, z=7)
assert result == ((1, 2), 5, dict(y=6, z=7))
async def test_task_run_name(self, prefect_client, events_pipeline):
@task(task_run_name="name is {x}")
def foo(x):
return TaskRunContext.get().task_run.id
result = run_task_sync(foo, parameters=dict(x="blue"))
await events_pipeline.process_events()
run = await prefect_client.read_task_run(result)
assert run.name == "name is blue"
def test_get_run_logger(self, caplog):
caplog.set_level(logging.CRITICAL)
@task(task_run_name="test-run")
def my_log_task():
get_run_logger().critical("hey yall")
result = run_task_sync(my_log_task)
assert result is None
record = next((r for r in caplog.records if r.message == "hey yall"), None)
assert record is not None, "Couldn't find expected log record"
assert record.task_name == "my_log_task"
assert record.task_run_name == "test-run"
assert UUID(record.task_run_id)
assert record.message == "hey yall"
assert record.levelname == "CRITICAL"
def test_flow_run_id_is_set(self, prefect_client):
flow_run_id = None
@task
def foo():
return TaskRunContext.get().task_run.flow_run_id
@flow
def workflow():
nonlocal flow_run_id
flow_run_id = get_run_context().flow_run.id
return run_task_sync(foo)
assert workflow() == flow_run_id
async def test_task_ends_in_completed(self, prefect_client, events_pipeline):
@task
def foo():
return TaskRunContext.get().task_run.id
result = run_task_sync(foo)
await events_pipeline.process_events()
run = await prefect_client.read_task_run(result)
assert run.state_type == StateType.COMPLETED
async def test_task_ends_in_failed(self, prefect_client, events_pipeline):
ID = None
@task
def foo():
nonlocal ID
ID = TaskRunContext.get().task_run.id
raise ValueError("xyz")
with pytest.raises(ValueError, match="xyz"):
run_task_sync(foo)
await events_pipeline.process_events()
run = await prefect_client.read_task_run(ID)
assert run.state_type == StateType.FAILED
async def test_task_ends_in_failed_after_retrying(
self, prefect_client, events_pipeline
):
ID = None
@task(retries=1)
def foo():
nonlocal ID
if ID is None:
ID = TaskRunContext.get().task_run.id
raise ValueError("xyz")
else:
return ID
result = run_task_sync(foo)
await events_pipeline.process_events()
run = await prefect_client.read_task_run(result)
assert run.state_type == StateType.COMPLETED
async def test_task_tracks_nested_parent_as_dependency(
self, prefect_client, events_pipeline
):
@task
def inner():
return TaskRunContext.get().task_run.id
@task
def outer():
id1 = inner()
return (id1, TaskRunContext.get().task_run.id)
a, b = run_task_sync(outer)
assert a != b
await events_pipeline.process_events()
# assertions on outer
outer_run = await prefect_client.read_task_run(b)
assert outer_run.task_inputs == {}
# assertions on inner
inner_run = await prefect_client.read_task_run(a)
assert "__parents__" in inner_run.task_inputs
assert inner_run.task_inputs["__parents__"][0].id == b
async def test_task_runs_respect_result_persistence(
self, prefect_client, events_pipeline
):
@task(persist_result=False)
def no_persist():
ctx = TaskRunContext.get()
assert ctx
return ctx.task_run.id
@task(persist_result=True)
def persist():
ctx = TaskRunContext.get()
assert ctx
return ctx.task_run.id
# assert no persistence
run_id = run_task_sync(no_persist)
await events_pipeline.process_events()
task_run = await prefect_client.read_task_run(run_id)
api_state = task_run.state
with pytest.raises(MissingResult):
await api_state.result()
# assert persistence
run_id = run_task_sync(persist)
await events_pipeline.process_events()
task_run = await prefect_client.read_task_run(run_id)
api_state = task_run.state
assert await api_state.result() == run_id
async def test_task_runs_respect_cache_key(self):
@task(cache_key_fn=lambda *args, **kwargs: "key", persist_result=True)
def first():
return 42
@task(cache_key_fn=lambda *args, **kwargs: "key", persist_result=True)
def second():
return 500
one = run_task_sync(first)
two = run_task_sync(second)
assert one == 42
assert two == 42
async def test_task_run_states(
self,
prefect_client,
events_pipeline,
):
@task
def foo():
return TaskRunContext.get().task_run.id
task_run_id = run_task_sync(foo)
await events_pipeline.process_events()
states = await prefect_client.read_task_run_states(task_run_id)
state_names = [state.name for state in states]
assert state_names == [
"Pending",
"Running",
"Completed",
]
async def test_task_run_states_with_equal_timestamps(
self,
prefect_client,
events_pipeline,
):
@task
def foo():
return TaskRunContext.get().task_run.id
original_set_state = SyncTaskRunEngine.set_state
def alter_new_state_timestamp(engine, state, force=False):
"""Give the new state the same timestamp as the current state."""
state.timestamp = engine.task_run.state.timestamp
return original_set_state(engine, state, force=force)
with patch.object(SyncTaskRunEngine, "set_state", alter_new_state_timestamp):
task_run_id = run_task_sync(foo)
await events_pipeline.process_events()
states = await prefect_client.read_task_run_states(task_run_id)
state_names = [state.name for state in states]
assert state_names == [
"Pending",
"Running",
"Completed",
]
assert states[0].timestamp < states[1].timestamp < states[2].timestamp
| TestTaskRunsSync |
python | PyCQA__flake8 | src/flake8/statistics.py | {
"start": 191,
"end": 2215
} | class ____:
"""Manager of aggregated statistics for a run of Flake8."""
def __init__(self) -> None:
"""Initialize the underlying dictionary for our statistics."""
self._store: dict[Key, Statistic] = {}
def error_codes(self) -> list[str]:
"""Return all unique error codes stored.
:returns:
Sorted list of error codes.
"""
return sorted({key.code for key in self._store})
def record(self, error: Violation) -> None:
"""Add the fact that the error was seen in the file.
:param error:
The Violation instance containing the information about the
violation.
"""
key = Key.create_from(error)
if key not in self._store:
self._store[key] = Statistic.create_from(error)
self._store[key].increment()
def statistics_for(
self, prefix: str, filename: str | None = None,
) -> Generator[Statistic]:
"""Generate statistics for the prefix and filename.
If you have a :class:`Statistics` object that has recorded errors,
you can generate the statistics for a prefix (e.g., ``E``, ``E1``,
``W50``, ``W503``) with the optional filter of a filename as well.
.. code-block:: python
>>> stats = Statistics()
>>> stats.statistics_for('E12',
filename='src/flake8/statistics.py')
<generator ...>
>>> stats.statistics_for('W')
<generator ...>
:param prefix:
The error class or specific error code to find statistics for.
:param filename:
(Optional) The filename to further filter results by.
:returns:
Generator of instances of :class:`Statistic`
"""
matching_errors = sorted(
key for key in self._store if key.matches(prefix, filename)
)
for error_code in matching_errors:
yield self._store[error_code]
| Statistics |
python | python-openxml__python-docx | src/docx/oxml/shape.py | {
"start": 6914,
"end": 7055
} | class ____(BaseOxmlElement):
"""``<a:fillRect>`` element, specifying picture should fill containing rectangle
shape."""
| CT_RelativeRect |
python | apache__airflow | task-sdk/src/airflow/sdk/definitions/asset/__init__.py | {
"start": 21304,
"end": 21561
} | class ____(attrs.AttrsInstance):
"""Representation of asset event to be triggered by an asset alias."""
source_alias_name: str
dest_asset_key: AssetUniqueKey
dest_asset_extra: dict[str, JsonValue]
extra: dict[str, JsonValue]
| AssetAliasEvent |
python | xlwings__xlwings | xlwings/constants.py | {
"start": 97685,
"end": 97857
} | class ____:
xlPriorityHigh = -4127 # from enum XlPriority
xlPriorityLow = -4134 # from enum XlPriority
xlPriorityNormal = -4143 # from enum XlPriority
| Priority |
python | donnemartin__interactive-coding-challenges | sorting_searching/radix_sort/test_radix_sort.py | {
"start": 18,
"end": 543
} | class ____(unittest.TestCase):
def test_sort(self):
radix_sort = RadixSort()
self.assertRaises(TypeError, radix_sort.sort, None)
self.assertEqual(radix_sort.sort([]), [])
array = [128, 256, 164, 8, 2, 148, 212, 242, 244]
expected = [2, 8, 128, 148, 164, 212, 242, 244, 256]
self.assertEqual(radix_sort.sort(array), expected)
print('Success: test_sort')
def main():
test = TestRadixSort()
test.test_sort()
if __name__ == '__main__':
main()
| TestRadixSort |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.