language
stringclasses 1
value | repo
stringclasses 346
values | path
stringlengths 6
201
| class_span
dict | source
stringlengths 21
2.38M
| target
stringlengths 1
96
|
|---|---|---|---|---|---|
python
|
google__pytype
|
pytype/tools/traces/traces_test.py
|
{
"start": 11137,
"end": 12139
}
|
class ____(MatchAstTestCase):
def test_basic(self):
matches = self._get_traces("lambda x: x.upper()", ast.Lambda)
sym = "<lambda>"
self.assertTracesEqual(
matches, [((1, 0), "MAKE_FUNCTION", sym, ("Callable[[Any], Any]",))])
def test_function_locals(self):
matches = self._get_traces("""
def f():
return lambda x: x.upper()
""", ast.Lambda)
sym = "f.<locals>.<lambda>"
self.assertTracesEqual(
matches, [((2, 9), "MAKE_FUNCTION", sym, ("Callable[[Any], Any]",))])
def test_multiple_functions(self):
matches = self._get_traces("""
def f():
return (w for w in range(3)), lambda x: x.upper(), lambda y, z: (y, z)
""", ast.Lambda)
sym = "f.<locals>.<lambda>"
self.assertTracesEqual(
matches, [
((2, 32), "MAKE_FUNCTION", sym, ("Callable[[Any], Any]",)),
((2, 53), "MAKE_FUNCTION", sym, ("Callable[[Any, Any], Any]",))])
if __name__ == "__main__":
unittest.main()
|
MatchLambdaTest
|
python
|
ansible__ansible
|
lib/ansible/playbook/base.py
|
{
"start": 29575,
"end": 33809
}
|
class ____(FieldAttributeBase):
name = NonInheritableFieldAttribute(isa='string', default='', always_post_validate=True)
# connection/transport
connection = ConnectionFieldAttribute(isa='string', default=context.cliargs_deferred_get('connection'))
port = FieldAttribute(isa='int')
remote_user = FieldAttribute(isa='string', default=context.cliargs_deferred_get('remote_user'))
# variables
vars = NonInheritableFieldAttribute(isa='dict', priority=100, static=True, default=dict)
# module default params
module_defaults = FieldAttribute(isa='list', extend=True, prepend=True)
# flags and misc. settings
environment = FieldAttribute(isa='list', extend=True, prepend=True)
no_log = FieldAttribute(isa='bool', default=C.DEFAULT_NO_LOG)
run_once = FieldAttribute(isa='bool')
ignore_errors = FieldAttribute(isa='bool')
ignore_unreachable = FieldAttribute(isa='bool')
check_mode = FieldAttribute(isa='bool', default=context.cliargs_deferred_get('check'))
diff = FieldAttribute(isa='bool', default=context.cliargs_deferred_get('diff'))
any_errors_fatal = FieldAttribute(isa='bool', default=C.ANY_ERRORS_FATAL)
throttle = FieldAttribute(isa='int', default=0)
timeout = FieldAttribute(isa='int', default=C.TASK_TIMEOUT)
# explicitly invoke a debugger on tasks
debugger = FieldAttribute(isa='string')
# Privilege escalation
become = FieldAttribute(isa='bool', default=context.cliargs_deferred_get('become'))
become_method = FieldAttribute(isa='string', default=context.cliargs_deferred_get('become_method'))
become_user = FieldAttribute(isa='string', default=context.cliargs_deferred_get('become_user'))
become_flags = FieldAttribute(isa='string', default=context.cliargs_deferred_get('become_flags'))
become_exe = FieldAttribute(isa='string', default=context.cliargs_deferred_get('become_exe'))
# used to hold sudo/su stuff
DEPRECATED_ATTRIBUTES = [] # type: list[str]
def update_result_no_log(self, templar: TemplateEngine, result: dict[str, t.Any]) -> None:
"""Set the post-validated no_log value for the result, falling back to a default on validation/templating failure with a warning."""
if self.finalized:
no_log = self.no_log
else:
try:
no_log = self.post_validate_attribute('no_log', templar=templar)
except Exception as ex:
display.error_as_warning('Invalid no_log value for task, output will be masked.', exception=ex)
no_log = True
result_no_log = result.get('_ansible_no_log', False)
if not isinstance(result_no_log, bool):
display.warning(f'Invalid _ansible_no_log value of type {type(result_no_log).__name__!r} in task result, output will be masked.')
no_log = True
no_log = no_log or result_no_log
result.update(_ansible_no_log=no_log)
def get_path(self) -> str:
""" return the absolute path of the playbook object and its line number """
origin = self._origin
if not origin:
try:
origin = self._parent._play._origin
except AttributeError:
pass
if origin and origin.path:
path = f"{origin.path}:{origin.line_num or 1}"
else:
path = ""
return path
def get_dep_chain(self):
if hasattr(self, '_parent') and self._parent:
return self._parent.get_dep_chain()
else:
return None
def get_search_path(self):
"""
Return the list of paths you should search for files, in order.
This follows role/playbook dependency chain.
"""
path_stack = []
dep_chain = self.get_dep_chain()
# inside role: add the dependency chain from current to dependent
if dep_chain:
path_stack.extend(reversed([x._role_path for x in dep_chain if hasattr(x, '_role_path')]))
# add path of task itself, unless it is already in the list
task_dir = os.path.dirname(self.get_path())
if task_dir not in path_stack:
path_stack.append(task_dir)
return path_stack
|
Base
|
python
|
getsentry__sentry
|
tests/sentry/api/endpoints/test_api_applications.py
|
{
"start": 209,
"end": 875
}
|
class ____(APITestCase):
def test_simple(self) -> None:
app1 = ApiApplication.objects.create(owner=self.user, name="a")
app2 = ApiApplication.objects.create(owner=self.user, name="b")
ApiApplication.objects.create(owner=self.create_user("foo@example.com"))
self.login_as(self.user)
url = reverse("sentry-api-0-api-applications")
response = self.client.get(url)
assert response.status_code == 200, response.content
assert len(response.data) == 2
assert response.data[0]["id"] == app1.client_id
assert response.data[1]["id"] == app2.client_id
@control_silo_test
|
ApiApplicationsListTest
|
python
|
airbytehq__airbyte
|
airbyte-ci/connectors/metadata_service/lib/metadata_service/models/generated/ConnectorMetadataDefinitionV0.py
|
{
"start": 6090,
"end": 6787
}
|
class ____(BaseModel):
class Config:
extra = Extra.forbid
commit_sha: Optional[str] = Field(
None,
description="The git commit sha of the last commit that modified this file.",
)
commit_timestamp: Optional[datetime] = Field(
None,
description="The git commit timestamp of the last commit that modified this file.",
)
commit_author: Optional[str] = Field(
None,
description="The git commit author of the last commit that modified this file.",
)
commit_author_email: Optional[str] = Field(
None,
description="The git commit author email of the last commit that modified this file.",
)
|
GitInfo
|
python
|
spack__spack
|
lib/spack/spack/cmd/stage.py
|
{
"start": 441,
"end": 3646
}
|
class ____:
"""
Encapsulation of reasons to skip staging
"""
def __init__(self, exclusions, skip_installed):
"""
:param exclusions: A list of specs to skip if satisfied.
:param skip_installed: A boolean indicating whether to skip already installed specs.
"""
self.exclusions = exclusions
self.skip_installed = skip_installed
def __call__(self, spec):
"""filter action, true means spec should be filtered"""
if spec.external:
return True
if self.skip_installed and spec.installed:
return True
if any(spec.satisfies(exclude) for exclude in self.exclusions):
return True
return False
def setup_parser(subparser: argparse.ArgumentParser) -> None:
arguments.add_common_arguments(subparser, ["no_checksum", "specs"])
subparser.add_argument(
"-p", "--path", dest="path", help="path to stage package, does not add to spack tree"
)
subparser.add_argument(
"-e",
"--exclude",
action="append",
default=[],
help="exclude packages that satisfy the specified specs",
)
subparser.add_argument(
"-s", "--skip-installed", action="store_true", help="dont restage already installed specs"
)
arguments.add_concretizer_args(subparser)
def stage(parser, args):
if args.no_checksum:
spack.config.set("config:checksum", False, scope="command_line")
exclusion_specs = spack.cmd.parse_specs(args.exclude, concretize=False)
filter = StageFilter(exclusion_specs, args.skip_installed)
if not args.specs:
env = ev.active_environment()
if not env:
tty.die("`spack stage` requires a spec or an active environment")
return _stage_env(env, filter)
specs = spack.cmd.parse_specs(args.specs, concretize=False)
# We temporarily modify the working directory when setting up a stage, so we need to
# convert this to an absolute path here in order for it to remain valid later.
custom_path = os.path.abspath(args.path) if args.path else None
# prevent multiple specs from extracting in the same folder
if len(specs) > 1 and custom_path:
tty.die("`--path` requires a single spec, but multiple were provided")
specs = spack.cmd.matching_specs_from_env(specs)
for spec in specs:
spec = spack.cmd.matching_spec_from_env(spec)
if filter(spec):
continue
pkg = spec.package
if custom_path:
pkg.path = custom_path
_stage(pkg)
def _stage_env(env: ev.Environment, filter):
tty.msg(f"Staging specs from environment {env.name}")
for spec in spack.traverse.traverse_nodes(env.concrete_roots()):
if filter(spec):
continue
_stage(spec.package)
def _stage(pkg: spack.package_base.PackageBase):
# Use context manager to ensure we don't restage while an installation is in progress
# keep = True ensures that the stage is not removed after exiting the context manager
pkg.stage.keep = True
with pkg.stage:
pkg.do_stage()
tty.msg(f"Staged {pkg.name} in {pkg.stage.path}")
|
StageFilter
|
python
|
pallets__click
|
src/click/exceptions.py
|
{
"start": 9042,
"end": 9561
}
|
class ____(ClickException):
"""Raised if a file cannot be opened."""
def __init__(self, filename: str, hint: str | None = None) -> None:
if hint is None:
hint = _("unknown error")
super().__init__(hint)
self.ui_filename: str = format_filename(filename)
self.filename = filename
def format_message(self) -> str:
return _("Could not open file {filename!r}: {message}").format(
filename=self.ui_filename, message=self.message
)
|
FileError
|
python
|
django__django
|
tests/gis_tests/geos_tests/test_mutable_list.py
|
{
"start": 1306,
"end": 17053
}
|
class ____(SimpleTestCase):
"""
Tests base class ListMixin by comparing a list clone which is
a ListMixin subclass with a real Python list.
"""
limit = 3
listType = UserListA
def lists_of_len(self, length=None):
if length is None:
length = self.limit
pl = list(range(length))
return pl, self.listType(pl)
def limits_plus(self, b):
return range(-self.limit - b, self.limit + b)
def step_range(self):
return [*range(-1 - self.limit, 0), *range(1, 1 + self.limit)]
def test01_getslice(self):
"Slice retrieval"
pl, ul = self.lists_of_len()
for i in self.limits_plus(1):
with self.subTest(i=i):
self.assertEqual(pl[i:], ul[i:], "slice [%d:]" % (i))
self.assertEqual(pl[:i], ul[:i], "slice [:%d]" % (i))
for j in self.limits_plus(1):
self.assertEqual(pl[i:j], ul[i:j], "slice [%d:%d]" % (i, j))
for k in self.step_range():
self.assertEqual(
pl[i:j:k], ul[i:j:k], "slice [%d:%d:%d]" % (i, j, k)
)
for k in self.step_range():
self.assertEqual(pl[i::k], ul[i::k], "slice [%d::%d]" % (i, k))
self.assertEqual(pl[:i:k], ul[:i:k], "slice [:%d:%d]" % (i, k))
for k in self.step_range():
with self.subTest(k=k):
self.assertEqual(pl[::k], ul[::k], "slice [::%d]" % (k))
def test02_setslice(self):
"Slice assignment"
def setfcn(x, i, j, k, L):
x[i:j:k] = range(L)
pl, ul = self.lists_of_len()
for slen in range(self.limit + 1):
ssl = nextRange(slen)
with self.subTest(slen=slen):
ul[:] = ssl
pl[:] = ssl
self.assertEqual(pl, ul[:], "set slice [:]")
for i in self.limits_plus(1):
ssl = nextRange(slen)
ul[i:] = ssl
pl[i:] = ssl
self.assertEqual(pl, ul[:], "set slice [%d:]" % (i))
ssl = nextRange(slen)
ul[:i] = ssl
pl[:i] = ssl
self.assertEqual(pl, ul[:], "set slice [:%d]" % (i))
for j in self.limits_plus(1):
ssl = nextRange(slen)
ul[i:j] = ssl
pl[i:j] = ssl
self.assertEqual(pl, ul[:], "set slice [%d:%d]" % (i, j))
for k in self.step_range():
ssl = nextRange(len(ul[i:j:k]))
ul[i:j:k] = ssl
pl[i:j:k] = ssl
self.assertEqual(
pl, ul[:], "set slice [%d:%d:%d]" % (i, j, k)
)
sliceLen = len(ul[i:j:k])
msg = (
f"attempt to assign sequence of size {sliceLen + 1} "
f"to extended slice of size {sliceLen}"
)
with self.assertRaisesMessage(ValueError, msg):
setfcn(ul, i, j, k, sliceLen + 1)
if sliceLen > 2:
msg = (
f"attempt to assign sequence of size {sliceLen - 1}"
f" to extended slice of size {sliceLen}"
)
with self.assertRaisesMessage(ValueError, msg):
setfcn(ul, i, j, k, sliceLen - 1)
for k in self.step_range():
ssl = nextRange(len(ul[i::k]))
ul[i::k] = ssl
pl[i::k] = ssl
self.assertEqual(pl, ul[:], "set slice [%d::%d]" % (i, k))
ssl = nextRange(len(ul[:i:k]))
ul[:i:k] = ssl
pl[:i:k] = ssl
self.assertEqual(pl, ul[:], "set slice [:%d:%d]" % (i, k))
for k in self.step_range():
ssl = nextRange(len(ul[::k]))
ul[::k] = ssl
pl[::k] = ssl
self.assertEqual(pl, ul[:], "set slice [::%d]" % (k))
def test03_delslice(self):
"Delete slice"
for Len in range(self.limit):
pl, ul = self.lists_of_len(Len)
with self.subTest(Len=Len):
del pl[:]
del ul[:]
self.assertEqual(pl[:], ul[:], "del slice [:]")
for i in range(-Len - 1, Len + 1):
pl, ul = self.lists_of_len(Len)
del pl[i:]
del ul[i:]
self.assertEqual(pl[:], ul[:], "del slice [%d:]" % (i))
pl, ul = self.lists_of_len(Len)
del pl[:i]
del ul[:i]
self.assertEqual(pl[:], ul[:], "del slice [:%d]" % (i))
for j in range(-Len - 1, Len + 1):
pl, ul = self.lists_of_len(Len)
del pl[i:j]
del ul[i:j]
self.assertEqual(pl[:], ul[:], "del slice [%d:%d]" % (i, j))
for k in [*range(-Len - 1, 0), *range(1, Len)]:
pl, ul = self.lists_of_len(Len)
del pl[i:j:k]
del ul[i:j:k]
self.assertEqual(
pl[:], ul[:], "del slice [%d:%d:%d]" % (i, j, k)
)
for k in [*range(-Len - 1, 0), *range(1, Len)]:
pl, ul = self.lists_of_len(Len)
del pl[:i:k]
del ul[:i:k]
self.assertEqual(pl[:], ul[:], "del slice [:%d:%d]" % (i, k))
pl, ul = self.lists_of_len(Len)
del pl[i::k]
del ul[i::k]
self.assertEqual(pl[:], ul[:], "del slice [%d::%d]" % (i, k))
for k in [*range(-Len - 1, 0), *range(1, Len)]:
pl, ul = self.lists_of_len(Len)
del pl[::k]
del ul[::k]
self.assertEqual(pl[:], ul[:], "del slice [::%d]" % (k))
def test04_get_set_del_single(self):
"Get/set/delete single item"
pl, ul = self.lists_of_len()
for i in self.limits_plus(0):
with self.subTest(i=i):
self.assertEqual(pl[i], ul[i], "get single item [%d]" % i)
for i in self.limits_plus(0):
pl, ul = self.lists_of_len()
pl[i] = 100
ul[i] = 100
with self.subTest(i=i):
self.assertEqual(pl[:], ul[:], "set single item [%d]" % i)
for i in self.limits_plus(0):
pl, ul = self.lists_of_len()
del pl[i]
del ul[i]
with self.subTest(i=i):
self.assertEqual(pl[:], ul[:], "del single item [%d]" % i)
def test05_out_of_range_exceptions(self):
"Out of range exceptions"
def setfcn(x, i):
x[i] = 20
def getfcn(x, i):
return x[i]
def delfcn(x, i):
del x[i]
pl, ul = self.lists_of_len()
for i in (-1 - self.limit, self.limit):
msg = f"invalid index: {i}"
with self.subTest(i=i):
with self.assertRaisesMessage(IndexError, msg):
setfcn(ul, i)
with self.assertRaisesMessage(IndexError, msg):
getfcn(ul, i)
with self.assertRaisesMessage(IndexError, msg):
delfcn(ul, i)
def test06_list_methods(self):
"List methods"
pl, ul = self.lists_of_len()
pl.append(40)
ul.append(40)
self.assertEqual(pl[:], ul[:], "append")
pl.extend(range(50, 55))
ul.extend(range(50, 55))
self.assertEqual(pl[:], ul[:], "extend")
pl.reverse()
ul.reverse()
self.assertEqual(pl[:], ul[:], "reverse")
for i in self.limits_plus(1):
pl, ul = self.lists_of_len()
pl.insert(i, 50)
ul.insert(i, 50)
with self.subTest(i=i):
self.assertEqual(pl[:], ul[:], "insert at %d" % i)
for i in self.limits_plus(0):
pl, ul = self.lists_of_len()
with self.subTest(i=i):
self.assertEqual(pl.pop(i), ul.pop(i), "popped value at %d" % i)
self.assertEqual(pl[:], ul[:], "after pop at %d" % i)
pl, ul = self.lists_of_len()
self.assertEqual(pl.pop(), ul.pop(i), "popped value")
self.assertEqual(pl[:], ul[:], "after pop")
pl, ul = self.lists_of_len()
def popfcn(x, i):
x.pop(i)
with self.assertRaisesMessage(IndexError, "invalid index: 3"):
popfcn(ul, self.limit)
with self.assertRaisesMessage(IndexError, "invalid index: -4"):
popfcn(ul, -1 - self.limit)
pl, ul = self.lists_of_len()
for val in range(self.limit):
with self.subTest(val=val):
self.assertEqual(pl.index(val), ul.index(val), "index of %d" % val)
for val in self.limits_plus(2):
with self.subTest(val=val):
self.assertEqual(pl.count(val), ul.count(val), "count %d" % val)
for val in range(self.limit):
pl, ul = self.lists_of_len()
pl.remove(val)
ul.remove(val)
with self.subTest(val=val):
self.assertEqual(pl[:], ul[:], "after remove val %d" % val)
def indexfcn(x, v):
return x.index(v)
def removefcn(x, v):
return x.remove(v)
msg = "40 not found in object"
with self.assertRaisesMessage(ValueError, msg):
indexfcn(ul, 40)
with self.assertRaisesMessage(ValueError, msg):
removefcn(ul, 40)
def test07_allowed_types(self):
"Type-restricted list"
pl, ul = self.lists_of_len()
ul._allowed = int
ul[1] = 50
ul[:2] = [60, 70, 80]
def setfcn(x, i, v):
x[i] = v
msg = "Invalid type encountered in the arguments."
with self.assertRaisesMessage(TypeError, msg):
setfcn(ul, 2, "hello")
with self.assertRaisesMessage(TypeError, msg):
setfcn(ul, slice(0, 3, 2), ("hello", "goodbye"))
def test08_min_length(self):
"Length limits"
pl, ul = self.lists_of_len(5)
ul._minlength = 3
def delfcn(x, i):
del x[:i]
def setfcn(x, i):
x[:i] = []
msg = "Must have at least 3 items"
for i in range(len(ul) - ul._minlength + 1, len(ul)):
with self.subTest(i=i):
with self.assertRaisesMessage(ValueError, msg):
delfcn(ul, i)
with self.assertRaisesMessage(ValueError, msg):
setfcn(ul, i)
del ul[: len(ul) - ul._minlength]
ul._maxlength = 4
for i in range(0, ul._maxlength - len(ul)):
with self.subTest(i=i):
ul.append(i)
with self.assertRaisesMessage(ValueError, "Cannot have more than 4 items"):
ul.append(10)
def test09_iterable_check(self):
"Error on assigning non-iterable to slice"
pl, ul = self.lists_of_len(self.limit + 1)
def setfcn(x, i, v):
x[i] = v
with self.assertRaisesMessage(
TypeError, "can only assign an iterable to a slice"
):
setfcn(ul, slice(0, 3, 2), 2)
def test10_checkindex(self):
"Index check"
pl, ul = self.lists_of_len()
for i in self.limits_plus(0):
with self.subTest(i=i):
if i < 0:
self.assertEqual(
ul._checkindex(i), i + self.limit, "_checkindex(neg index)"
)
else:
self.assertEqual(ul._checkindex(i), i, "_checkindex(pos index)")
for i in (-self.limit - 1, self.limit):
with (
self.subTest(i=i),
self.assertRaisesMessage(IndexError, f"invalid index: {i}"),
):
ul._checkindex(i)
def test_11_sorting(self):
"Sorting"
pl, ul = self.lists_of_len()
pl.insert(0, pl.pop())
ul.insert(0, ul.pop())
pl.sort()
ul.sort()
self.assertEqual(pl[:], ul[:], "sort")
mid = pl[len(pl) // 2]
pl.sort(key=lambda x: (mid - x) ** 2)
ul.sort(key=lambda x: (mid - x) ** 2)
self.assertEqual(pl[:], ul[:], "sort w/ key")
pl.insert(0, pl.pop())
ul.insert(0, ul.pop())
pl.sort(reverse=True)
ul.sort(reverse=True)
self.assertEqual(pl[:], ul[:], "sort w/ reverse")
mid = pl[len(pl) // 2]
pl.sort(key=lambda x: (mid - x) ** 2)
ul.sort(key=lambda x: (mid - x) ** 2)
self.assertEqual(pl[:], ul[:], "sort w/ key")
def test_12_arithmetic(self):
"Arithmetic"
pl, ul = self.lists_of_len()
al = list(range(10, 14))
self.assertEqual(list(pl + al), list(ul + al), "add")
self.assertEqual(type(ul), type(ul + al), "type of add result")
self.assertEqual(list(al + pl), list(al + ul), "radd")
self.assertEqual(type(al), type(al + ul), "type of radd result")
objid = id(ul)
pl += al
ul += al
self.assertEqual(pl[:], ul[:], "in-place add")
self.assertEqual(objid, id(ul), "in-place add id")
for n in (-1, 0, 1, 3):
pl, ul = self.lists_of_len()
self.assertEqual(list(pl * n), list(ul * n), "mul by %d" % n)
self.assertEqual(type(ul), type(ul * n), "type of mul by %d result" % n)
self.assertEqual(list(n * pl), list(n * ul), "rmul by %d" % n)
self.assertEqual(type(ul), type(n * ul), "type of rmul by %d result" % n)
objid = id(ul)
pl *= n
ul *= n
self.assertEqual(pl[:], ul[:], "in-place mul by %d" % n)
self.assertEqual(objid, id(ul), "in-place mul by %d id" % n)
pl, ul = self.lists_of_len()
self.assertEqual(pl, ul, "cmp for equal")
self.assertNotEqual(ul, pl + [2], "cmp for not equal")
self.assertGreaterEqual(pl, ul, "cmp for gte self")
self.assertLessEqual(pl, ul, "cmp for lte self")
self.assertGreaterEqual(ul, pl, "cmp for self gte")
self.assertLessEqual(ul, pl, "cmp for self lte")
self.assertGreater(pl + [5], ul, "cmp")
self.assertGreaterEqual(pl + [5], ul, "cmp")
self.assertLess(pl, ul + [2], "cmp")
self.assertLessEqual(pl, ul + [2], "cmp")
self.assertGreater(ul + [5], pl, "cmp")
self.assertGreaterEqual(ul + [5], pl, "cmp")
self.assertLess(ul, pl + [2], "cmp")
self.assertLessEqual(ul, pl + [2], "cmp")
pl[1] = 20
self.assertGreater(pl, ul, "cmp for gt self")
self.assertLess(ul, pl, "cmp for self lt")
pl[1] = -20
self.assertLess(pl, ul, "cmp for lt self")
self.assertGreater(ul, pl, "cmp for gt self")
|
ListMixinTest
|
python
|
mwaskom__seaborn
|
seaborn/_stats/aggregation.py
|
{
"start": 413,
"end": 1252
}
|
class ____(Stat):
"""
Aggregate data along the value axis using given method.
Parameters
----------
func : str or callable
Name of a :class:`pandas.Series` method or a vector -> scalar function.
See Also
--------
objects.Est : Aggregation with error bars.
Examples
--------
.. include:: ../docstrings/objects.Agg.rst
"""
func: str | Callable[[Vector], float] = "mean"
group_by_orient: ClassVar[bool] = True
def __call__(
self, data: DataFrame, groupby: GroupBy, orient: str, scales: dict[str, Scale],
) -> DataFrame:
var = {"x": "y", "y": "x"}.get(orient)
res = (
groupby
.agg(data, {var: self.func})
.dropna(subset=[var])
.reset_index(drop=True)
)
return res
@dataclass
|
Agg
|
python
|
doocs__leetcode
|
solution/3100-3199/3165.Maximum Sum of Subsequence With Non-adjacent Elements/Solution.py
|
{
"start": 63,
"end": 263
}
|
class ____:
__slots__ = "l", "r", "s00", "s01", "s10", "s11"
def __init__(self, l: int, r: int):
self.l = l
self.r = r
self.s00 = self.s01 = self.s10 = self.s11 = 0
|
Node
|
python
|
sqlalchemy__sqlalchemy
|
lib/sqlalchemy/util/langhelpers.py
|
{
"start": 48928,
"end": 50664
}
|
class ____(int):
"""A constant symbol.
>>> symbol("foo") is symbol("foo")
True
>>> symbol("foo")
<symbol 'foo>
A slight refinement of the MAGICCOOKIE=object() pattern. The primary
advantage of symbol() is its repr(). They are also singletons.
Repeated calls of symbol('name') will all return the same instance.
"""
name: str
symbols: Dict[str, symbol] = {}
_lock = threading.Lock()
def __new__(
cls,
name: str,
doc: Optional[str] = None,
canonical: Optional[int] = None,
) -> symbol:
with cls._lock:
sym = cls.symbols.get(name)
if sym is None:
assert isinstance(name, str)
if canonical is None:
canonical = hash(name)
sym = int.__new__(symbol, canonical)
sym.name = name
if doc:
sym.__doc__ = doc
# NOTE: we should ultimately get rid of this global thing,
# however, currently it is to support pickling. The best
# change would be when we are on py3.11 at a minimum, we
# switch to stdlib enum.IntFlag.
cls.symbols[name] = sym
else:
if canonical and canonical != sym:
raise TypeError(
f"Can't replace canonical symbol for {name!r} "
f"with new int value {canonical}"
)
return sym
def __reduce__(self):
return symbol, (self.name, "x", int(self))
def __str__(self):
return repr(self)
def __repr__(self):
return f"symbol({self.name!r})"
|
symbol
|
python
|
ray-project__ray
|
python/ray/_private/ray_logging/constants.py
|
{
"start": 783,
"end": 1317
}
|
class ____(str, Enum):
# Core context
JOB_ID = "job_id"
WORKER_ID = "worker_id"
NODE_ID = "node_id"
ACTOR_ID = "actor_id"
TASK_ID = "task_id"
ACTOR_NAME = "actor_name"
TASK_NAME = "task_name"
TASK_FUNCTION_NAME = "task_func_name"
# Logger built-in context
ASCTIME = "asctime"
LEVELNAME = "levelname"
MESSAGE = "message"
FILENAME = "filename"
LINENO = "lineno"
EXC_TEXT = "exc_text"
PROCESS = "process"
# Ray logging context
TIMESTAMP_NS = "timestamp_ns"
|
LogKey
|
python
|
PyCQA__pylint
|
tests/functional/i/invalid/invalid_getnewargs/invalid_getnewargs_returned.py
|
{
"start": 1201,
"end": 1308
}
|
class ____:
""" Uninferable return value """
__getnewargs__ = lambda self: Missing
|
AmbigousGetNewArgs
|
python
|
huggingface__transformers
|
src/transformers/models/esm/modeling_esm.py
|
{
"start": 19666,
"end": 20847
}
|
class ____(nn.Module):
def __init__(self, config):
super().__init__()
self.config = config
self.layer = nn.ModuleList([EsmLayer(config) for _ in range(config.num_hidden_layers)])
self.emb_layer_norm_after = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.gradient_checkpointing = False
@can_return_tuple
def forward(
self,
hidden_states,
attention_mask=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
**kwargs: Unpack[TransformersKwargs],
):
for i, layer_module in enumerate(self.layer):
hidden_states = layer_module(
hidden_states,
attention_mask=attention_mask,
encoder_hidden_states=encoder_hidden_states,
encoder_attention_mask=encoder_attention_mask,
**kwargs,
)
if self.emb_layer_norm_after:
hidden_states = self.emb_layer_norm_after(hidden_states)
return BaseModelOutputWithCrossAttentions(last_hidden_state=hidden_states)
# Copied from transformers.models.bert.modeling_bert.BertPooler
|
EsmEncoder
|
python
|
PrefectHQ__prefect
|
src/prefect/server/database/orm_models.py
|
{
"start": 28142,
"end": 31910
}
|
class ____(Base):
"""SQLAlchemy model of a deployment."""
name: Mapped[str]
version: Mapped[Optional[str]]
description: Mapped[Optional[str]] = mapped_column(sa.Text())
work_queue_name: Mapped[Optional[str]] = mapped_column(index=True)
infra_overrides: Mapped[dict[str, Any]] = mapped_column(
JSON, server_default="{}", default=dict
)
path: Mapped[Optional[str]]
entrypoint: Mapped[Optional[str]]
last_polled: Mapped[Optional[DateTime]]
status: Mapped[DeploymentStatus] = mapped_column(
sa.Enum(DeploymentStatus, name="deployment_status"),
default=DeploymentStatus.NOT_READY,
server_default="NOT_READY",
)
@declared_attr
def job_variables(self) -> Mapped[dict[str, Any]]:
return synonym("infra_overrides")
flow_id: Mapped[uuid.UUID] = mapped_column(
sa.ForeignKey("flow.id", ondelete="CASCADE"), index=True
)
work_queue_id: Mapped[Optional[uuid.UUID]] = mapped_column(
sa.ForeignKey("work_queue.id", ondelete="SET NULL"), index=True
)
paused: Mapped[bool] = mapped_column(server_default="0", default=False, index=True)
schedules: Mapped[list["DeploymentSchedule"]] = relationship(
lazy="selectin", order_by=lambda: DeploymentSchedule.updated.desc()
)
# deprecated in favor of `concurrency_limit_id` FK
_concurrency_limit: Mapped[Optional[int]] = mapped_column(name="concurrency_limit")
concurrency_limit_id: Mapped[Optional[uuid.UUID]] = mapped_column(
sa.ForeignKey("concurrency_limit_v2.id", ondelete="SET NULL"),
)
global_concurrency_limit: Mapped[Optional["ConcurrencyLimitV2"]] = relationship(
lazy="selectin",
)
concurrency_options: Mapped[Optional[schemas.core.ConcurrencyOptions]] = (
mapped_column(
Pydantic(schemas.core.ConcurrencyOptions),
server_default=None,
nullable=True,
default=None,
)
)
tags: Mapped[list[str]] = mapped_column(JSON, server_default="[]", default=list)
labels: Mapped[Optional[schemas.core.KeyValueLabels]] = mapped_column(JSON)
parameters: Mapped[dict[str, Any]] = mapped_column(
JSON, server_default="{}", default=dict
)
pull_steps: Mapped[Optional[list[dict[str, Any]]]] = mapped_column(
JSON, default=list
)
parameter_openapi_schema: Mapped[Optional[dict[str, Any]]] = mapped_column(
JSON, default=dict
)
enforce_parameter_schema: Mapped[bool] = mapped_column(
default=True, server_default="0"
)
created_by: Mapped[Optional[schemas.core.CreatedBy]] = mapped_column(
Pydantic(schemas.core.CreatedBy)
)
updated_by: Mapped[Optional[schemas.core.UpdatedBy]] = mapped_column(
Pydantic(schemas.core.UpdatedBy)
)
infrastructure_document_id: Mapped[Optional[uuid.UUID]] = mapped_column(
sa.ForeignKey("block_document.id", ondelete="CASCADE"), index=False
)
storage_document_id: Mapped[Optional[uuid.UUID]] = mapped_column(
sa.ForeignKey("block_document.id", ondelete="CASCADE"),
index=False,
)
flow: Mapped["Flow"] = relationship(
"Flow", back_populates="deployments", lazy="raise"
)
work_queue: Mapped[Optional["WorkQueue"]] = relationship(
lazy="selectin", foreign_keys=[work_queue_id]
)
__table_args__: Any = (
sa.Index(
"uq_deployment__flow_id_name",
"flow_id",
"name",
unique=True,
),
sa.Index(
"ix_deployment__created",
"created",
),
sa.Index("trgm_ix_deployment_name", "name", postgresql_using="gin").ddl_if(
dialect="postgresql"
),
)
|
Deployment
|
python
|
numpy__numpy
|
numpy/random/tests/test_generator_mt19937.py
|
{
"start": 1571,
"end": 2793
}
|
class ____:
def test_scalar(self):
s = Generator(MT19937(0))
assert_equal(s.integers(1000), 479)
s = Generator(MT19937(4294967295))
assert_equal(s.integers(1000), 324)
def test_array(self):
s = Generator(MT19937(range(10)))
assert_equal(s.integers(1000), 465)
s = Generator(MT19937(np.arange(10)))
assert_equal(s.integers(1000), 465)
s = Generator(MT19937([0]))
assert_equal(s.integers(1000), 479)
s = Generator(MT19937([4294967295]))
assert_equal(s.integers(1000), 324)
def test_seedsequence(self):
s = MT19937(SeedSequence(0))
assert_equal(s.random_raw(1), 2058676884)
def test_invalid_scalar(self):
# seed must be an unsigned 32 bit integer
assert_raises(TypeError, MT19937, -0.5)
assert_raises(ValueError, MT19937, -1)
def test_invalid_array(self):
# seed must be an unsigned integer
assert_raises(TypeError, MT19937, [-0.5])
assert_raises(ValueError, MT19937, [-1])
assert_raises(ValueError, MT19937, [1, -2, 4294967296])
def test_noninstantized_bitgen(self):
assert_raises(ValueError, Generator, MT19937)
|
TestSeed
|
python
|
apache__airflow
|
providers/microsoft/azure/tests/unit/microsoft/azure/hooks/test_cosmos.py
|
{
"start": 1452,
"end": 12687
}
|
class ____:
# Set up an environment to test with
@pytest.fixture(autouse=True)
def setup_test_cases(self, create_mock_connection):
# set up some test variables
self.test_end_point = "https://test_endpoint:443"
self.test_master_key = "magic_test_key"
self.test_database_name = "test_database_name"
self.test_collection_name = "test_collection_name"
self.test_database_default = "test_database_default"
self.test_collection_default = "test_collection_default"
self.test_partition_key = "/test_partition_key"
create_mock_connection(
Connection(
conn_id="azure_cosmos_test_key_id",
conn_type="azure_cosmos",
login=self.test_end_point,
password=self.test_master_key,
extra={
"database_name": self.test_database_default,
"collection_name": self.test_collection_default,
"partition_key": self.test_partition_key,
},
)
)
@pytest.mark.parametrize(
"mocked_connection",
[
Connection(
conn_id="azure_cosmos_test_default_credential",
conn_type="azure_cosmos",
login="https://test_endpoint:443",
extra={
"resource_group_name": "resource-group-name",
"subscription_id": "subscription_id",
"managed_identity_client_id": "test_client_id",
"workload_identity_tenant_id": "test_tenant_id",
},
)
],
indirect=True,
)
@mock.patch(f"{MODULE}.get_sync_default_azure_credential")
@mock.patch(f"{MODULE}.CosmosDBManagementClient")
@mock.patch(f"{MODULE}.CosmosClient")
def test_get_conn(self, mock_cosmos, mock_cosmos_db, mock_default_azure_credential, mocked_connection):
mock_cosmos_db.return_value.database_accounts.list_keys.return_value.primary_master_key = "master-key"
hook = AzureCosmosDBHook(azure_cosmos_conn_id="azure_cosmos_test_default_credential")
hook.get_conn()
mock_default_azure_credential.assert_called()
args = mock_default_azure_credential.call_args
assert args.kwargs["managed_identity_client_id"] == "test_client_id"
assert args.kwargs["workload_identity_tenant_id"] == "test_tenant_id"
@mock.patch(f"{MODULE}.CosmosClient", autospec=True)
def test_client(self, mock_cosmos):
hook = AzureCosmosDBHook(azure_cosmos_conn_id="azure_cosmos_test_key_id")
assert isinstance(hook.get_conn(), CosmosClient)
@mock.patch(f"{MODULE}.CosmosClient")
def test_create_database(self, mock_cosmos):
hook = AzureCosmosDBHook(azure_cosmos_conn_id="azure_cosmos_test_key_id")
hook.create_database(self.test_database_name)
expected_calls = [mock.call().create_database("test_database_name")]
mock_cosmos.assert_any_call(self.test_end_point, {"masterKey": self.test_master_key})
mock_cosmos.assert_has_calls(expected_calls)
@mock.patch(f"{MODULE}.CosmosClient")
def test_create_database_exception(self, mock_cosmos):
hook = AzureCosmosDBHook(azure_cosmos_conn_id="azure_cosmos_test_key_id")
with pytest.raises(AirflowException):
hook.create_database(None)
@mock.patch(f"{MODULE}.CosmosClient")
def test_create_container_exception(self, mock_cosmos):
hook = AzureCosmosDBHook(azure_cosmos_conn_id="azure_cosmos_test_key_id")
with pytest.raises(AirflowException):
hook.create_collection(None)
@mock.patch(f"{MODULE}.CosmosClient")
def test_create_container(self, mock_cosmos):
hook = AzureCosmosDBHook(azure_cosmos_conn_id="azure_cosmos_test_key_id")
hook.create_collection(self.test_collection_name, self.test_database_name, partition_key="/id")
expected_calls = [
mock.call()
.get_database_client("test_database_name")
.create_container("test_collection_name", partition_key=PartitionKey(path="/id"))
]
mock_cosmos.assert_any_call(self.test_end_point, {"masterKey": self.test_master_key})
mock_cosmos.assert_has_calls(expected_calls)
@mock.patch(f"{MODULE}.CosmosClient")
def test_create_container_default(self, mock_cosmos):
hook = AzureCosmosDBHook(azure_cosmos_conn_id="azure_cosmos_test_key_id")
hook.create_collection(self.test_collection_name)
expected_calls = [
mock.call()
.get_database_client("test_database_name")
.create_container(
"test_collection_name", partition_key=PartitionKey(path=self.test_partition_key)
)
]
mock_cosmos.assert_any_call(self.test_end_point, {"masterKey": self.test_master_key})
mock_cosmos.assert_has_calls(expected_calls)
@mock.patch(f"{MODULE}.CosmosClient")
def test_upsert_document_default(self, mock_cosmos):
test_id = str(uuid.uuid4())
(
mock_cosmos.return_value.get_database_client.return_value.get_container_client.return_value.upsert_item.return_value
) = {"id": test_id}
hook = AzureCosmosDBHook(azure_cosmos_conn_id="azure_cosmos_test_key_id")
returned_item = hook.upsert_document({"id": test_id})
expected_calls = [
mock.call()
.get_database_client("test_database_name")
.get_container_client("test_collection_name")
.upsert_item({"id": test_id})
]
mock_cosmos.assert_any_call(self.test_end_point, {"masterKey": self.test_master_key})
mock_cosmos.assert_has_calls(expected_calls)
logging.getLogger().info(returned_item)
assert returned_item["id"] == test_id
@mock.patch(f"{MODULE}.CosmosClient")
def test_upsert_document(self, mock_cosmos):
test_id = str(uuid.uuid4())
(
mock_cosmos.return_value.get_database_client.return_value.get_container_client.return_value.upsert_item.return_value
) = {"id": test_id}
hook = AzureCosmosDBHook(azure_cosmos_conn_id="azure_cosmos_test_key_id")
returned_item = hook.upsert_document(
{"data1": "somedata"},
database_name=self.test_database_name,
collection_name=self.test_collection_name,
document_id=test_id,
)
expected_calls = [
mock.call()
.get_database_client("test_database_name")
.get_container_client("test_collection_name")
.upsert_item({"data1": "somedata", "id": test_id})
]
mock_cosmos.assert_any_call(self.test_end_point, {"masterKey": self.test_master_key})
mock_cosmos.assert_has_calls(expected_calls)
logging.getLogger().info(returned_item)
assert returned_item["id"] == test_id
@mock.patch(f"{MODULE}.CosmosClient")
def test_insert_documents(self, mock_cosmos):
test_id1 = str(uuid.uuid4())
test_id2 = str(uuid.uuid4())
test_id3 = str(uuid.uuid4())
documents = [
{"id": test_id1, "data": "data1"},
{"id": test_id2, "data": "data2"},
{"id": test_id3, "data": "data3"},
]
hook = AzureCosmosDBHook(azure_cosmos_conn_id="azure_cosmos_test_key_id")
returned_item = hook.insert_documents(documents)
expected_calls = [
mock.call()
.get_database_client("test_database_name")
.get_container_client("test_collection_name")
.create_item({"data": "data1", "id": test_id1}),
mock.call()
.get_database_client("test_database_name")
.get_container_client("test_collection_name")
.create_item({"data": "data2", "id": test_id2}),
mock.call()
.get_database_client("test_database_name")
.get_container_client("test_collection_name")
.create_item({"data": "data3", "id": test_id3}),
]
logging.getLogger().info(returned_item)
mock_cosmos.assert_any_call(self.test_end_point, {"masterKey": self.test_master_key})
mock_cosmos.assert_has_calls(expected_calls, any_order=True)
@mock.patch(f"{MODULE}.CosmosClient")
def test_delete_database(self, mock_cosmos):
hook = AzureCosmosDBHook(azure_cosmos_conn_id="azure_cosmos_test_key_id")
hook.delete_database(self.test_database_name)
expected_calls = [mock.call().delete_database("test_database_name")]
mock_cosmos.assert_any_call(self.test_end_point, {"masterKey": self.test_master_key})
mock_cosmos.assert_has_calls(expected_calls)
@mock.patch(f"{MODULE}.CosmosClient")
def test_delete_database_exception(self, mock_cosmos):
hook = AzureCosmosDBHook(azure_cosmos_conn_id="azure_cosmos_test_key_id")
with pytest.raises(AirflowException):
hook.delete_database(None)
@mock.patch("azure.cosmos.cosmos_client.CosmosClient")
def test_delete_container_exception(self, mock_cosmos):
hook = AzureCosmosDBHook(azure_cosmos_conn_id="azure_cosmos_test_key_id")
with pytest.raises(AirflowException):
hook.delete_collection(None)
@mock.patch(f"{MODULE}.CosmosClient")
def test_delete_container(self, mock_cosmos):
hook = AzureCosmosDBHook(azure_cosmos_conn_id="azure_cosmos_test_key_id")
hook.delete_collection(self.test_collection_name, self.test_database_name)
expected_calls = [
mock.call().get_database_client("test_database_name").delete_container("test_collection_name")
]
mock_cosmos.assert_any_call(self.test_end_point, {"masterKey": self.test_master_key})
mock_cosmos.assert_has_calls(expected_calls)
@mock.patch(f"{MODULE}.CosmosClient")
def test_delete_container_default(self, mock_cosmos):
hook = AzureCosmosDBHook(azure_cosmos_conn_id="azure_cosmos_test_key_id")
hook.delete_collection(self.test_collection_name)
expected_calls = [
mock.call().get_database_client("test_database_name").delete_container("test_collection_name")
]
mock_cosmos.assert_any_call(self.test_end_point, {"masterKey": self.test_master_key})
mock_cosmos.assert_has_calls(expected_calls)
@mock.patch(f"{MODULE}.CosmosClient")
def test_connection_success(self, mock_cosmos):
hook = AzureCosmosDBHook(azure_cosmos_conn_id="azure_cosmos_test_key_id")
hook.get_conn().list_databases.return_value = {"id": self.test_database_name}
status, msg = hook.test_connection()
assert status is True
assert msg == "Successfully connected to Azure Cosmos."
@mock.patch(f"{MODULE}.CosmosClient")
def test_connection_failure(self, mock_cosmos):
hook = AzureCosmosDBHook(azure_cosmos_conn_id="azure_cosmos_test_key_id")
hook.get_conn().list_databases = PropertyMock(side_effect=Exception("Authentication failed."))
status, msg = hook.test_connection()
assert status is False
assert msg == "Authentication failed."
|
TestAzureCosmosDbHook
|
python
|
PrefectHQ__prefect
|
src/prefect/types/_concurrency.py
|
{
"start": 105,
"end": 354
}
|
class ____(BaseModel):
"""Model for validating concurrency lease holder information."""
model_config: ClassVar[ConfigDict] = ConfigDict(extra="forbid")
type: Literal["flow_run", "task_run", "deployment"]
id: UUID
|
ConcurrencyLeaseHolder
|
python
|
getsentry__sentry
|
src/sentry/grouping/strategies/base.py
|
{
"start": 7907,
"end": 11533
}
|
class ____(Generic[ConcreteInterface]):
"""Base class for all strategies."""
def __init__(
self,
id: str,
name: str,
interface: str,
score: int | None,
func: StrategyFunc[ConcreteInterface],
):
self.id = id
self.strategy_class = id.split(":", 1)[0]
self.name = name
self.interface_name = interface
self.score = score
self.func = func
self.variant_processor_func: VariantProcessor | None = None
def __repr__(self) -> str:
return f"<{self.__class__.__name__} id={self.id!r}>"
def _invoke(
self, func: Callable[..., ComponentsByVariant], *args: Any, **kwargs: Any
) -> ComponentsByVariant:
# We forcefully override strategy here. This lets a strategy
# function always access its metadata and directly forward it to
# subcomponents.
kwargs["strategy"] = self
return func(*args, **kwargs)
def __call__(self, *args: Any, **kwargs: Any) -> ComponentsByVariant:
return self._invoke(self.func, *args, **kwargs)
def variant_processor(self, func: VariantProcessor) -> VariantProcessor:
"""Registers a variant reducer function that can be used to postprocess
all variants created from this strategy.
"""
self.variant_processor_func = func
return func
def get_grouping_components(
self, event: Event, context: GroupingContext
) -> ComponentsByVariant:
"""
Return a dictionary, keyed by variant name, of components produced by this strategy.
"""
interface = event.interfaces.get(self.interface_name)
if interface is None:
return {}
with context:
components_by_variant = self(interface, event=event, context=context)
final_components_by_variant = {}
priority_contributing_variants_by_hash = {}
non_priority_contributing_variants = []
for variant_name, component in components_by_variant.items():
is_priority = variant_name.startswith("!")
variant_name = variant_name.lstrip("!")
if component.contributes:
# Track priority and non-priority contributing hashes separately, so the latter can
# be deduped against the former
if is_priority:
priority_contributing_variants_by_hash[component.get_hash()] = variant_name
else:
non_priority_contributing_variants.append(variant_name)
final_components_by_variant[variant_name] = component
# Mark any non-priority duplicates of priority hashes as non-contributing
for non_priority_variant_name in non_priority_contributing_variants:
non_priority_component = final_components_by_variant[non_priority_variant_name]
hash_value = non_priority_component.get_hash()
matching_hash_variant_name = priority_contributing_variants_by_hash.get(hash_value)
if matching_hash_variant_name is not None:
non_priority_component.update(
contributes=False,
hint="ignored because hash matches %s variant" % matching_hash_variant_name,
)
if self.variant_processor_func is not None:
final_components_by_variant = self._invoke(
self.variant_processor_func,
final_components_by_variant,
event=event,
context=context,
)
return final_components_by_variant
|
Strategy
|
python
|
apache__airflow
|
providers/amazon/tests/unit/amazon/aws/operators/test_ssm.py
|
{
"start": 4030,
"end": 10200
}
|
class ____:
@pytest.fixture
def mock_hook(self) -> Generator[mock.MagicMock, None, None]:
with mock.patch.object(SsmGetCommandInvocationOperator, "hook") as _hook:
yield _hook
def setup_method(self):
self.command_id = "test-command-id-123"
self.instance_id = "i-1234567890abcdef0"
self.operator = SsmGetCommandInvocationOperator(
task_id="test_get_command_invocation",
command_id=self.command_id,
instance_id=self.instance_id,
)
def test_execute_with_specific_instance(self, mock_hook):
# Mock response for specific instance
mock_invocation_details = {
"Status": "Success",
"ResponseCode": 0,
"StandardOutputContent": "Hello World",
"StandardErrorContent": "",
"ExecutionStartDateTime": "2023-01-01T12:00:00Z",
"ExecutionEndDateTime": "2023-01-01T12:00:05Z",
"DocumentName": "AWS-RunShellScript",
"Comment": "Test command",
}
mock_hook.get_command_invocation.return_value = mock_invocation_details
result = self.operator.execute({})
# Verify hook was called correctly
mock_hook.get_command_invocation.assert_called_once_with(self.command_id, self.instance_id)
# Verify returned data structure - should use standardized format with invocations array
expected_result = {
"command_id": self.command_id,
"invocations": [
{
"instance_id": self.instance_id,
"status": "Success",
"response_code": 0,
"standard_output": "Hello World",
"standard_error": "",
"execution_start_time": "2023-01-01T12:00:00Z",
"execution_end_time": "2023-01-01T12:00:05Z",
"document_name": "AWS-RunShellScript",
"comment": "Test command",
}
],
}
assert result == expected_result
def test_execute_all_instances(self, mock_hook):
# Setup operator without instance_id to get all instances
operator = SsmGetCommandInvocationOperator(
task_id="test_get_all_invocations",
command_id=self.command_id,
)
# Mock list_command_invocations response
mock_invocations = [
{"InstanceId": "i-111"},
{"InstanceId": "i-222"},
]
mock_hook.list_command_invocations.return_value = {"CommandInvocations": mock_invocations}
# Mock get_command_invocation responses
mock_invocation_details_1 = {
"Status": "Success",
"ResponseCode": 0,
"StandardOutputContent": "Output 1",
"StandardErrorContent": "",
"ExecutionStartDateTime": "2023-01-01T12:00:00Z",
"ExecutionEndDateTime": "2023-01-01T12:00:05Z",
"DocumentName": "AWS-RunShellScript",
"Comment": "",
}
mock_invocation_details_2 = {
"Status": "Failed",
"ResponseCode": 1,
"StandardOutputContent": "",
"StandardErrorContent": "Error occurred",
"ExecutionStartDateTime": "2023-01-01T12:00:00Z",
"ExecutionEndDateTime": "2023-01-01T12:00:10Z",
"DocumentName": "AWS-RunShellScript",
"Comment": "",
}
mock_hook.get_command_invocation.side_effect = [
mock_invocation_details_1,
mock_invocation_details_2,
]
result = operator.execute({})
# Verify hook calls
mock_hook.list_command_invocations.assert_called_once_with(self.command_id)
assert mock_hook.get_command_invocation.call_count == 2
mock_hook.get_command_invocation.assert_any_call(self.command_id, "i-111")
mock_hook.get_command_invocation.assert_any_call(self.command_id, "i-222")
# Verify returned data structure
expected_result = {
"command_id": self.command_id,
"invocations": [
{
"instance_id": "i-111",
"status": "Success",
"response_code": 0,
"standard_output": "Output 1",
"standard_error": "",
"execution_start_time": "2023-01-01T12:00:00Z",
"execution_end_time": "2023-01-01T12:00:05Z",
"document_name": "AWS-RunShellScript",
"comment": "",
},
{
"instance_id": "i-222",
"status": "Failed",
"response_code": 1,
"standard_output": "",
"standard_error": "Error occurred",
"execution_start_time": "2023-01-01T12:00:00Z",
"execution_end_time": "2023-01-01T12:00:10Z",
"document_name": "AWS-RunShellScript",
"comment": "",
},
],
}
assert result == expected_result
def test_execute_all_instances_with_error(self, mock_hook):
# Setup operator without instance_id
operator = SsmGetCommandInvocationOperator(
task_id="test_get_all_with_error",
command_id=self.command_id,
)
# Mock list_command_invocations response
mock_invocations = [{"InstanceId": "i-111"}]
mock_hook.list_command_invocations.return_value = {"CommandInvocations": mock_invocations}
# Mock get_command_invocation to raise an exception
mock_hook.get_command_invocation.side_effect = Exception("API Error")
result = operator.execute({})
# Verify error handling
expected_result = {
"command_id": self.command_id,
"invocations": [{"instance_id": "i-111", "error": "API Error"}],
}
assert result == expected_result
def test_template_fields(self):
validate_template_fields(self.operator)
|
TestSsmGetCommandInvocationOperator
|
python
|
weaviate__weaviate-python-client
|
weaviate/collections/classes/config.py
|
{
"start": 56468,
"end": 56652
}
|
class ____(_ConfigBase):
preset: StopwordsPreset
additions: Optional[List[str]]
removals: Optional[List[str]]
StopwordsConfig = _StopwordsConfig
@dataclass
|
_StopwordsConfig
|
python
|
microsoft__pyright
|
packages/pyright-internal/src/tests/samples/classes9.py
|
{
"start": 591,
"end": 697
}
|
class ____(B1):
pass
# This should generate an error because B0.M is not
# type compatible with B1.M.
|
E1
|
python
|
conda__conda
|
conda/common/configuration.py
|
{
"start": 9098,
"end": 13661
}
|
class ____(RawParameter):
# this class should encapsulate all direct use of ruamel.yaml in this module
def __init__(self, source, key, raw_value, key_comment):
self._key_comment = key_comment
super().__init__(source, key, raw_value)
if isinstance(self._raw_value, CommentedSeq):
value_comments = self._get_yaml_list_comments(self._raw_value)
self._value_flags = tuple(
ParameterFlag.from_string(s) for s in value_comments
)
children_values = []
for i in range(len(self._raw_value)):
children_values.append(
YamlRawParameter(
self.source, self.key, self._raw_value[i], value_comments[i]
)
)
self._value = tuple(children_values)
elif isinstance(self._raw_value, CommentedMap):
value_comments = self._get_yaml_map_comments(self._raw_value)
self._value_flags = {
k: ParameterFlag.from_string(v)
for k, v in value_comments.items()
if v is not None
}
children_values = {}
for k, v in self._raw_value.items():
children_values[k] = YamlRawParameter(
self.source, self.key, v, value_comments[k]
)
self._value = frozendict(children_values)
elif isinstance(self._raw_value, primitive_types):
self._value_flags = None
self._value = self._raw_value
else:
print(type(self._raw_value), self._raw_value, file=sys.stderr)
raise ThisShouldNeverHappenError() # pragma: no cover
def value(self, parameter_obj):
return self._value
def keyflag(self):
return ParameterFlag.from_string(self._key_comment)
def valueflags(self, parameter_obj):
return self._value_flags
@staticmethod
def _get_yaml_key_comment(commented_dict, key):
try:
return commented_dict.ca.items[key][2].value.strip()
except (AttributeError, KeyError):
return None
@classmethod
def _get_yaml_list_comments(cls, value):
# value is a ruamel.yaml CommentedSeq, len(value) is the number of lines in the sequence,
# value.ca is the comment object for the sequence and the comments themselves are stored as
# a sparse dict
list_comments = []
for i in range(len(value)):
try:
list_comments.append(cls._get_yaml_list_comment_item(value.ca.items[i]))
except (AttributeError, IndexError, KeyError, TypeError):
list_comments.append(None)
return tuple(list_comments)
@staticmethod
def _get_yaml_list_comment_item(item):
# take the pre_item comment if available
# if not, take the first post_item comment if available
if item[0]:
return item[0].value.strip() or None
else:
return item[1][0].value.strip() or None
@staticmethod
def _get_yaml_map_comments(value):
map_comments = {}
for key in value:
try:
map_comments[key] = value.ca.items[key][2].value.strip() or None
except (AttributeError, KeyError):
map_comments[key] = None
return map_comments
@classmethod
def make_raw_parameters(cls, source, from_map):
if from_map:
return {
key: cls(
source, key, from_map[key], cls._get_yaml_key_comment(from_map, key)
)
for key in from_map
}
return EMPTY_MAP
@classmethod
def make_raw_parameters_from_file(cls, filepath):
with open(filepath) as fh:
try:
yaml_obj = yaml_round_trip_load(fh)
except ScannerError as err:
mark = err.problem_mark
raise ConfigurationLoadError(
filepath,
" reason: invalid yaml at line %(line)s, column %(column)s",
line=mark.line,
column=mark.column,
)
except ReaderError as err:
raise ConfigurationLoadError(
filepath,
" reason: invalid yaml at position %(position)s",
position=err.position,
)
return cls.make_raw_parameters(filepath, yaml_obj) or EMPTY_MAP
|
YamlRawParameter
|
python
|
kubernetes-client__python
|
kubernetes/client/models/v1_priority_level_configuration_condition.py
|
{
"start": 383,
"end": 7641
}
|
class ____(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'last_transition_time': 'datetime',
'message': 'str',
'reason': 'str',
'status': 'str',
'type': 'str'
}
attribute_map = {
'last_transition_time': 'lastTransitionTime',
'message': 'message',
'reason': 'reason',
'status': 'status',
'type': 'type'
}
def __init__(self, last_transition_time=None, message=None, reason=None, status=None, type=None, local_vars_configuration=None): # noqa: E501
"""V1PriorityLevelConfigurationCondition - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._last_transition_time = None
self._message = None
self._reason = None
self._status = None
self._type = None
self.discriminator = None
if last_transition_time is not None:
self.last_transition_time = last_transition_time
if message is not None:
self.message = message
if reason is not None:
self.reason = reason
if status is not None:
self.status = status
if type is not None:
self.type = type
@property
def last_transition_time(self):
"""Gets the last_transition_time of this V1PriorityLevelConfigurationCondition. # noqa: E501
`lastTransitionTime` is the last time the condition transitioned from one status to another. # noqa: E501
:return: The last_transition_time of this V1PriorityLevelConfigurationCondition. # noqa: E501
:rtype: datetime
"""
return self._last_transition_time
@last_transition_time.setter
def last_transition_time(self, last_transition_time):
"""Sets the last_transition_time of this V1PriorityLevelConfigurationCondition.
`lastTransitionTime` is the last time the condition transitioned from one status to another. # noqa: E501
:param last_transition_time: The last_transition_time of this V1PriorityLevelConfigurationCondition. # noqa: E501
:type: datetime
"""
self._last_transition_time = last_transition_time
@property
def message(self):
"""Gets the message of this V1PriorityLevelConfigurationCondition. # noqa: E501
`message` is a human-readable message indicating details about last transition. # noqa: E501
:return: The message of this V1PriorityLevelConfigurationCondition. # noqa: E501
:rtype: str
"""
return self._message
@message.setter
def message(self, message):
"""Sets the message of this V1PriorityLevelConfigurationCondition.
`message` is a human-readable message indicating details about last transition. # noqa: E501
:param message: The message of this V1PriorityLevelConfigurationCondition. # noqa: E501
:type: str
"""
self._message = message
@property
def reason(self):
"""Gets the reason of this V1PriorityLevelConfigurationCondition. # noqa: E501
`reason` is a unique, one-word, CamelCase reason for the condition's last transition. # noqa: E501
:return: The reason of this V1PriorityLevelConfigurationCondition. # noqa: E501
:rtype: str
"""
return self._reason
@reason.setter
def reason(self, reason):
"""Sets the reason of this V1PriorityLevelConfigurationCondition.
`reason` is a unique, one-word, CamelCase reason for the condition's last transition. # noqa: E501
:param reason: The reason of this V1PriorityLevelConfigurationCondition. # noqa: E501
:type: str
"""
self._reason = reason
@property
def status(self):
"""Gets the status of this V1PriorityLevelConfigurationCondition. # noqa: E501
`status` is the status of the condition. Can be True, False, Unknown. Required. # noqa: E501
:return: The status of this V1PriorityLevelConfigurationCondition. # noqa: E501
:rtype: str
"""
return self._status
@status.setter
def status(self, status):
"""Sets the status of this V1PriorityLevelConfigurationCondition.
`status` is the status of the condition. Can be True, False, Unknown. Required. # noqa: E501
:param status: The status of this V1PriorityLevelConfigurationCondition. # noqa: E501
:type: str
"""
self._status = status
@property
def type(self):
"""Gets the type of this V1PriorityLevelConfigurationCondition. # noqa: E501
`type` is the type of the condition. Required. # noqa: E501
:return: The type of this V1PriorityLevelConfigurationCondition. # noqa: E501
:rtype: str
"""
return self._type
@type.setter
def type(self, type):
"""Sets the type of this V1PriorityLevelConfigurationCondition.
`type` is the type of the condition. Required. # noqa: E501
:param type: The type of this V1PriorityLevelConfigurationCondition. # noqa: E501
:type: str
"""
self._type = type
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, V1PriorityLevelConfigurationCondition):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, V1PriorityLevelConfigurationCondition):
return True
return self.to_dict() != other.to_dict()
|
V1PriorityLevelConfigurationCondition
|
python
|
numba__numba
|
numba/tests/test_parfors.py
|
{
"start": 30159,
"end": 31818
}
|
class ____(TestParforsBase):
""" Miscellaneous 'classical' numerical tests """
def test_pi(self):
def test_impl(n):
x = 2 * np.random.ranf(n) - 1
y = 2 * np.random.ranf(n) - 1
return 4 * np.sum(x**2 + y**2 < 1) / n
self.check(test_impl, 100000, decimal=1)
self.assertEqual(countParfors(test_impl, (types.int64, )), 1)
self.assertEqual(countArrays(test_impl, (types.intp,)), 0)
def test_blackscholes(self):
# blackscholes takes 5 1D float array args
args = (numba.float64[:], ) * 5
self.assertEqual(countParfors(blackscholes_impl, args), 1)
@needs_blas
def test_logistic_regression(self):
args = (numba.float64[:], numba.float64[:,:], numba.float64[:],
numba.int64)
self.assertEqual(countParfors(lr_impl, args), 2)
self.assertEqual(countArrayAllocs(lr_impl, args), 1)
def test_kmeans(self):
np.random.seed(0)
N = 1024
D = 10
centers = 3
A = np.random.ranf((N, D))
init_centroids = np.random.ranf((centers, D))
self.check(example_kmeans_test, A, centers, 3, init_centroids,
decimal=1)
# TODO: count parfors after k-means fusion is working
# requires recursive parfor counting
arg_typs = (types.Array(types.float64, 2, 'C'), types.intp, types.intp,
types.Array(types.float64, 2, 'C'))
self.assertEqual(
countNonParforArrayAccesses(example_kmeans_test, arg_typs), 0)
@skip_parfors_unsupported
|
TestParforNumericalMisc
|
python
|
readthedocs__readthedocs.org
|
readthedocs/core/views/hooks.py
|
{
"start": 580,
"end": 7190
}
|
class ____:
"""
Version information.
If type is None, it means that the version can be either a branch or a tag.
"""
name: str
type: Literal["branch", "tag", None]
log = structlog.get_logger(__name__)
def _build_version(project, version):
"""
Where we actually trigger builds for a project and version.
All webhook logic should route here to call ``trigger_build``.
"""
if not project.has_valid_webhook:
project.has_valid_webhook = True
project.save()
# Previously we were building the latest version (inactive or active)
# when building the default version,
# some users may have relied on this to update the version list #4450
if version.active:
log.info(
"Triggering build.",
project_slug=project.slug,
version_slug=version.slug,
)
trigger_build(project=project, version=version)
return True
log.info("Not building.", version_slug=version.slug)
return False
def build_versions_from_names(project, versions_info: list[VersionInfo]):
"""
Build the branches or tags from the project.
:param project: Project instance
:returns: A tuple with the versions that were built and the versions that were not built.
"""
to_build = set()
not_building = set()
for version_info in versions_info:
for version in project.versions_from_name(version_info.name, version_info.type):
log.debug(
"Processing.",
project_slug=project.slug,
version_slug=version.slug,
)
if version.slug in to_build:
continue
version_built = _build_version(project, version)
if version_built:
to_build.add(version.slug)
else:
not_building.add(version.slug)
return to_build, not_building
def trigger_sync_versions(project):
"""
Sync the versions of a repo using its latest version.
This doesn't register a new build,
but clones the repo and syncs the versions.
Due that `sync_repository_task` is bound to a version,
we always pass the default version.
:returns: The version slug that was used to trigger the clone.
:rtype: str or ``None`` if failed
"""
if not Project.objects.is_active(project):
log.warning(
"Sync not triggered because project is not active.",
project_slug=project.slug,
)
return None
try:
version = project.get_latest_version()
if not version:
log.info("Unable to sync versions, project doesn't have a valid latest version.")
return None
if project.has_feature(Feature.SKIP_SYNC_VERSIONS):
log.info("Skipping sync versions for project.", project_slug=project.slug)
return None
_, build_api_key = BuildAPIKey.objects.create_key(project=project)
log.debug(
"Triggering sync repository.",
project_slug=version.project.slug,
version_slug=version.slug,
)
options = {}
# Use custom queue if defined, as some repositories need to
# be synced from a specific queue (like IP restricted ones).
if project.build_queue:
options["queue"] = project.build_queue
sync_repository_task.apply_async(
args=[version.pk],
kwargs={"build_api_key": build_api_key},
**options,
)
return version.slug
except Exception:
log.exception("Unknown sync versions exception")
return None
def get_or_create_external_version(project, version_data):
"""
Get or create version using the ``commit`` as identifier, and PR id as ``verbose_name``.
if external version does not exist create an external version
:param project: Project instance
:param version_data: A :py:class:`readthedocs.api.v2.views.integrations.ExternalVersionData`
instance.
:returns: External version.
:rtype: Version
"""
external_version, created = project.versions.get_or_create(
verbose_name=version_data.id,
type=EXTERNAL,
defaults={
"identifier": version_data.commit,
"active": True,
"state": EXTERNAL_VERSION_STATE_OPEN,
},
)
if created:
log.info(
"External version created.",
project_slug=project.slug,
version_slug=external_version.slug,
)
else:
# Identifier will change if there is a new commit to the Pull/Merge Request.
external_version.identifier = version_data.commit
# If the PR was previously closed it was marked as closed
external_version.state = EXTERNAL_VERSION_STATE_OPEN
external_version.active = True
external_version.save()
log.info(
"External version updated.",
project_slug=project.slug,
version_slug=external_version.slug,
)
return external_version
def close_external_version(project, version_data):
"""
Close external versions using `identifier` and `verbose_name`.
We mark the version's state as `closed` so another celery task will remove
it after some days. If external version does not exist then returns `None`.
:param project: Project instance
:param version_data: A :py:class:`readthedocs.api.v2.views.integrations.ExternalVersionData`
instance.
:rtype: str
"""
external_version = (
project.versions(manager=EXTERNAL).filter(verbose_name=version_data.id).first()
)
if external_version:
external_version.state = EXTERNAL_VERSION_STATE_CLOSED
external_version.save()
log.info(
"External version marked as closed.",
project_slug=project.slug,
version_slug=external_version.slug,
)
return external_version.verbose_name
return None
def build_external_version(project, version):
"""
Where we actually trigger builds for external versions.
All pull/merge request webhook logic should route here to call ``trigger_build``.
"""
if not project.has_valid_webhook:
project.has_valid_webhook = True
project.save()
# Build External version
log.info(
"Building external version",
project_slug=project.slug,
version_slug=version.slug,
)
trigger_build(project=project, version=version, commit=version.identifier)
return version.verbose_name
|
VersionInfo
|
python
|
numpy__numpy
|
numpy/random/tests/test_randomstate.py
|
{
"start": 5893,
"end": 8115
}
|
class ____:
def test_basic(self):
random.multinomial(100, [0.2, 0.8])
def test_zero_probability(self):
random.multinomial(100, [0.2, 0.8, 0.0, 0.0, 0.0])
def test_int_negative_interval(self):
assert_(-5 <= random.randint(-5, -1) < -1)
x = random.randint(-5, -1, 5)
assert_(np.all(-5 <= x))
assert_(np.all(x < -1))
def test_size(self):
# gh-3173
p = [0.5, 0.5]
assert_equal(random.multinomial(1, p, np.uint32(1)).shape, (1, 2))
assert_equal(random.multinomial(1, p, np.uint32(1)).shape, (1, 2))
assert_equal(random.multinomial(1, p, np.uint32(1)).shape, (1, 2))
assert_equal(random.multinomial(1, p, [2, 2]).shape, (2, 2, 2))
assert_equal(random.multinomial(1, p, (2, 2)).shape, (2, 2, 2))
assert_equal(random.multinomial(1, p, np.array((2, 2))).shape,
(2, 2, 2))
assert_raises(TypeError, random.multinomial, 1, p,
float(1))
def test_invalid_prob(self):
assert_raises(ValueError, random.multinomial, 100, [1.1, 0.2])
assert_raises(ValueError, random.multinomial, 100, [-.1, 0.9])
def test_invalid_n(self):
assert_raises(ValueError, random.multinomial, -1, [0.8, 0.2])
def test_p_non_contiguous(self):
p = np.arange(15.)
p /= np.sum(p[1::3])
pvals = p[1::3]
rng = random.RandomState(1432985819)
non_contig = rng.multinomial(100, pvals=pvals)
rng = random.RandomState(1432985819)
contig = rng.multinomial(100, pvals=np.ascontiguousarray(pvals))
assert_array_equal(non_contig, contig)
def test_multinomial_pvals_float32(self):
x = np.array([9.9e-01, 9.9e-01, 1.0e-09, 1.0e-09, 1.0e-09, 1.0e-09,
1.0e-09, 1.0e-09, 1.0e-09, 1.0e-09], dtype=np.float32)
pvals = x / x.sum()
match = r"[\w\s]*pvals array is cast to 64-bit floating"
with pytest.raises(ValueError, match=match):
random.multinomial(1, pvals)
def test_multinomial_n_float(self):
# Non-index integer types should gracefully truncate floats
random.multinomial(100.5, [0.2, 0.8])
|
TestMultinomial
|
python
|
doocs__leetcode
|
solution/1900-1999/1998.GCD Sort of an Array/Solution.py
|
{
"start": 0,
"end": 685
}
|
class ____:
def gcdSort(self, nums: List[int]) -> bool:
n = 10**5 + 10
p = list(range(n))
f = defaultdict(list)
mx = max(nums)
for i in range(2, mx + 1):
if f[i]:
continue
for j in range(i, mx + 1, i):
f[j].append(i)
def find(x):
if p[x] != x:
p[x] = find(p[x])
return p[x]
for i in nums:
for j in f[i]:
p[find(i)] = find(j)
s = sorted(nums)
for i, num in enumerate(nums):
if s[i] != num and find(num) != find(s[i]):
return False
return True
|
Solution
|
python
|
spack__spack
|
lib/spack/spack/modules/tcl.py
|
{
"start": 2100,
"end": 2370
}
|
class ____(BaseContext):
"""Context class for tcl module files."""
@tengine.context_property
def prerequisites(self):
"""List of modules that needs to be loaded automatically."""
return self._create_module_list_of("specs_to_prereq")
|
TclContext
|
python
|
pyqtgraph__pyqtgraph
|
pyqtgraph/parametertree/parameterTypes/text.py
|
{
"start": 115,
"end": 530
}
|
class ____(WidgetParameterItem):
"""ParameterItem displaying a QTextEdit widget."""
def makeWidget(self):
self.hideWidget = False
self.asSubItem = True
self.textBox = w = QtWidgets.QTextEdit()
w.sizeHint = lambda: QtCore.QSize(300, 100)
w.value = w.toPlainText
w.setValue = w.setPlainText
w.sigChanged = w.textChanged
return w
|
TextParameterItem
|
python
|
huggingface__transformers
|
src/transformers/models/sam3_tracker_video/modular_sam3_tracker_video.py
|
{
"start": 18776,
"end": 18862
}
|
class ____(Sam2VideoMemoryAttentionLayer):
pass
|
Sam3TrackerVideoMemoryAttentionLayer
|
python
|
pytest-dev__pytest
|
src/_pytest/assertion/rewrite.py
|
{
"start": 21894,
"end": 48206
}
|
class ____(ast.NodeVisitor):
"""Assertion rewriting implementation.
The main entrypoint is to call .run() with an ast.Module instance,
this will then find all the assert statements and rewrite them to
provide intermediate values and a detailed assertion error. See
http://pybites.blogspot.be/2011/07/behind-scenes-of-pytests-new-assertion.html
for an overview of how this works.
The entry point here is .run() which will iterate over all the
statements in an ast.Module and for each ast.Assert statement it
finds call .visit() with it. Then .visit_Assert() takes over and
is responsible for creating new ast statements to replace the
original assert statement: it rewrites the test of an assertion
to provide intermediate values and replace it with an if statement
which raises an assertion error with a detailed explanation in
case the expression is false and calls pytest_assertion_pass hook
if expression is true.
For this .visit_Assert() uses the visitor pattern to visit all the
AST nodes of the ast.Assert.test field, each visit call returning
an AST node and the corresponding explanation string. During this
state is kept in several instance attributes:
:statements: All the AST statements which will replace the assert
statement.
:variables: This is populated by .variable() with each variable
used by the statements so that they can all be set to None at
the end of the statements.
:variable_counter: Counter to create new unique variables needed
by statements. Variables are created using .variable() and
have the form of "@py_assert0".
:expl_stmts: The AST statements which will be executed to get
data from the assertion. This is the code which will construct
the detailed assertion message that is used in the AssertionError
or for the pytest_assertion_pass hook.
:explanation_specifiers: A dict filled by .explanation_param()
with %-formatting placeholders and their corresponding
expressions to use in the building of an assertion message.
This is used by .pop_format_context() to build a message.
:stack: A stack of the explanation_specifiers dicts maintained by
.push_format_context() and .pop_format_context() which allows
to build another %-formatted string while already building one.
:scope: A tuple containing the current scope used for variables_overwrite.
:variables_overwrite: A dict filled with references to variables
that change value within an assert. This happens when a variable is
reassigned with the walrus operator
This state, except the variables_overwrite, is reset on every new assert
statement visited and used by the other visitors.
"""
def __init__(
self, module_path: str | None, config: Config | None, source: bytes
) -> None:
super().__init__()
self.module_path = module_path
self.config = config
if config is not None:
self.enable_assertion_pass_hook = config.getini(
"enable_assertion_pass_hook"
)
else:
self.enable_assertion_pass_hook = False
self.source = source
self.scope: tuple[ast.AST, ...] = ()
self.variables_overwrite: defaultdict[tuple[ast.AST, ...], dict[str, str]] = (
defaultdict(dict)
)
def run(self, mod: ast.Module) -> None:
"""Find all assert statements in *mod* and rewrite them."""
if not mod.body:
# Nothing to do.
return
# We'll insert some special imports at the top of the module, but after any
# docstrings and __future__ imports, so first figure out where that is.
doc = getattr(mod, "docstring", None)
expect_docstring = doc is None
if doc is not None and self.is_rewrite_disabled(doc):
return
pos = 0
for item in mod.body:
match item:
case ast.Expr(value=ast.Constant(value=str() as doc)) if (
expect_docstring
):
if self.is_rewrite_disabled(doc):
return
expect_docstring = False
case ast.ImportFrom(level=0, module="__future__"):
pass
case _:
break
pos += 1
# Special case: for a decorated function, set the lineno to that of the
# first decorator, not the `def`. Issue #4984.
if isinstance(item, ast.FunctionDef) and item.decorator_list:
lineno = item.decorator_list[0].lineno
else:
lineno = item.lineno
# Now actually insert the special imports.
aliases = [
ast.alias("builtins", "@py_builtins", lineno=lineno, col_offset=0),
ast.alias(
"_pytest.assertion.rewrite",
"@pytest_ar",
lineno=lineno,
col_offset=0,
),
]
imports = [
ast.Import([alias], lineno=lineno, col_offset=0) for alias in aliases
]
mod.body[pos:pos] = imports
# Collect asserts.
self.scope = (mod,)
nodes: list[ast.AST | Sentinel] = [mod]
while nodes:
node = nodes.pop()
if isinstance(node, ast.FunctionDef | ast.AsyncFunctionDef | ast.ClassDef):
self.scope = tuple((*self.scope, node))
nodes.append(_SCOPE_END_MARKER)
if node == _SCOPE_END_MARKER:
self.scope = self.scope[:-1]
continue
assert isinstance(node, ast.AST)
for name, field in ast.iter_fields(node):
if isinstance(field, list):
new: list[ast.AST] = []
for i, child in enumerate(field):
if isinstance(child, ast.Assert):
# Transform assert.
new.extend(self.visit(child))
else:
new.append(child)
if isinstance(child, ast.AST):
nodes.append(child)
setattr(node, name, new)
elif (
isinstance(field, ast.AST)
# Don't recurse into expressions as they can't contain
# asserts.
and not isinstance(field, ast.expr)
):
nodes.append(field)
@staticmethod
def is_rewrite_disabled(docstring: str) -> bool:
return "PYTEST_DONT_REWRITE" in docstring
def variable(self) -> str:
"""Get a new variable."""
# Use a character invalid in python identifiers to avoid clashing.
name = "@py_assert" + str(next(self.variable_counter))
self.variables.append(name)
return name
def assign(self, expr: ast.expr) -> ast.Name:
"""Give *expr* a name."""
name = self.variable()
self.statements.append(ast.Assign([ast.Name(name, ast.Store())], expr))
return ast.copy_location(ast.Name(name, ast.Load()), expr)
def display(self, expr: ast.expr) -> ast.expr:
"""Call saferepr on the expression."""
return self.helper("_saferepr", expr)
def helper(self, name: str, *args: ast.expr) -> ast.expr:
"""Call a helper in this module."""
py_name = ast.Name("@pytest_ar", ast.Load())
attr = ast.Attribute(py_name, name, ast.Load())
return ast.Call(attr, list(args), [])
def builtin(self, name: str) -> ast.Attribute:
"""Return the builtin called *name*."""
builtin_name = ast.Name("@py_builtins", ast.Load())
return ast.Attribute(builtin_name, name, ast.Load())
def explanation_param(self, expr: ast.expr) -> str:
"""Return a new named %-formatting placeholder for expr.
This creates a %-formatting placeholder for expr in the
current formatting context, e.g. ``%(py0)s``. The placeholder
and expr are placed in the current format context so that it
can be used on the next call to .pop_format_context().
"""
specifier = "py" + str(next(self.variable_counter))
self.explanation_specifiers[specifier] = expr
return "%(" + specifier + ")s"
def push_format_context(self) -> None:
"""Create a new formatting context.
The format context is used for when an explanation wants to
have a variable value formatted in the assertion message. In
this case the value required can be added using
.explanation_param(). Finally .pop_format_context() is used
to format a string of %-formatted values as added by
.explanation_param().
"""
self.explanation_specifiers: dict[str, ast.expr] = {}
self.stack.append(self.explanation_specifiers)
def pop_format_context(self, expl_expr: ast.expr) -> ast.Name:
"""Format the %-formatted string with current format context.
The expl_expr should be an str ast.expr instance constructed from
the %-placeholders created by .explanation_param(). This will
add the required code to format said string to .expl_stmts and
return the ast.Name instance of the formatted string.
"""
current = self.stack.pop()
if self.stack:
self.explanation_specifiers = self.stack[-1]
keys: list[ast.expr | None] = [ast.Constant(key) for key in current.keys()]
format_dict = ast.Dict(keys, list(current.values()))
form = ast.BinOp(expl_expr, ast.Mod(), format_dict)
name = "@py_format" + str(next(self.variable_counter))
if self.enable_assertion_pass_hook:
self.format_variables.append(name)
self.expl_stmts.append(ast.Assign([ast.Name(name, ast.Store())], form))
return ast.Name(name, ast.Load())
def generic_visit(self, node: ast.AST) -> tuple[ast.Name, str]:
"""Handle expressions we don't have custom code for."""
assert isinstance(node, ast.expr)
res = self.assign(node)
return res, self.explanation_param(self.display(res))
def visit_Assert(self, assert_: ast.Assert) -> list[ast.stmt]:
"""Return the AST statements to replace the ast.Assert instance.
This rewrites the test of an assertion to provide
intermediate values and replace it with an if statement which
raises an assertion error with a detailed explanation in case
the expression is false.
"""
if isinstance(assert_.test, ast.Tuple) and len(assert_.test.elts) >= 1:
import warnings
from _pytest.warning_types import PytestAssertRewriteWarning
# TODO: This assert should not be needed.
assert self.module_path is not None
warnings.warn_explicit(
PytestAssertRewriteWarning(
"assertion is always true, perhaps remove parentheses?"
),
category=None,
filename=self.module_path,
lineno=assert_.lineno,
)
self.statements: list[ast.stmt] = []
self.variables: list[str] = []
self.variable_counter = itertools.count()
if self.enable_assertion_pass_hook:
self.format_variables: list[str] = []
self.stack: list[dict[str, ast.expr]] = []
self.expl_stmts: list[ast.stmt] = []
self.push_format_context()
# Rewrite assert into a bunch of statements.
top_condition, explanation = self.visit(assert_.test)
negation = ast.UnaryOp(ast.Not(), top_condition)
if self.enable_assertion_pass_hook: # Experimental pytest_assertion_pass hook
msg = self.pop_format_context(ast.Constant(explanation))
# Failed
if assert_.msg:
assertmsg = self.helper("_format_assertmsg", assert_.msg)
gluestr = "\n>assert "
else:
assertmsg = ast.Constant("")
gluestr = "assert "
err_explanation = ast.BinOp(ast.Constant(gluestr), ast.Add(), msg)
err_msg = ast.BinOp(assertmsg, ast.Add(), err_explanation)
err_name = ast.Name("AssertionError", ast.Load())
fmt = self.helper("_format_explanation", err_msg)
exc = ast.Call(err_name, [fmt], [])
raise_ = ast.Raise(exc, None)
statements_fail = []
statements_fail.extend(self.expl_stmts)
statements_fail.append(raise_)
# Passed
fmt_pass = self.helper("_format_explanation", msg)
orig = _get_assertion_exprs(self.source)[assert_.lineno]
hook_call_pass = ast.Expr(
self.helper(
"_call_assertion_pass",
ast.Constant(assert_.lineno),
ast.Constant(orig),
fmt_pass,
)
)
# If any hooks implement assert_pass hook
hook_impl_test = ast.If(
self.helper("_check_if_assertion_pass_impl"),
[*self.expl_stmts, hook_call_pass],
[],
)
statements_pass: list[ast.stmt] = [hook_impl_test]
# Test for assertion condition
main_test = ast.If(negation, statements_fail, statements_pass)
self.statements.append(main_test)
if self.format_variables:
variables: list[ast.expr] = [
ast.Name(name, ast.Store()) for name in self.format_variables
]
clear_format = ast.Assign(variables, ast.Constant(None))
self.statements.append(clear_format)
else: # Original assertion rewriting
# Create failure message.
body = self.expl_stmts
self.statements.append(ast.If(negation, body, []))
if assert_.msg:
assertmsg = self.helper("_format_assertmsg", assert_.msg)
explanation = "\n>assert " + explanation
else:
assertmsg = ast.Constant("")
explanation = "assert " + explanation
template = ast.BinOp(assertmsg, ast.Add(), ast.Constant(explanation))
msg = self.pop_format_context(template)
fmt = self.helper("_format_explanation", msg)
err_name = ast.Name("AssertionError", ast.Load())
exc = ast.Call(err_name, [fmt], [])
raise_ = ast.Raise(exc, None)
body.append(raise_)
# Clear temporary variables by setting them to None.
if self.variables:
variables = [ast.Name(name, ast.Store()) for name in self.variables]
clear = ast.Assign(variables, ast.Constant(None))
self.statements.append(clear)
# Fix locations (line numbers/column offsets).
for stmt in self.statements:
for node in traverse_node(stmt):
if getattr(node, "lineno", None) is None:
# apply the assertion location to all generated ast nodes without source location
# and preserve the location of existing nodes or generated nodes with an correct location.
ast.copy_location(node, assert_)
return self.statements
def visit_NamedExpr(self, name: ast.NamedExpr) -> tuple[ast.NamedExpr, str]:
# This method handles the 'walrus operator' repr of the target
# name if it's a local variable or _should_repr_global_name()
# thinks it's acceptable.
locs = ast.Call(self.builtin("locals"), [], [])
target_id = name.target.id
inlocs = ast.Compare(ast.Constant(target_id), [ast.In()], [locs])
dorepr = self.helper("_should_repr_global_name", name)
test = ast.BoolOp(ast.Or(), [inlocs, dorepr])
expr = ast.IfExp(test, self.display(name), ast.Constant(target_id))
return name, self.explanation_param(expr)
def visit_Name(self, name: ast.Name) -> tuple[ast.Name, str]:
# Display the repr of the name if it's a local variable or
# _should_repr_global_name() thinks it's acceptable.
locs = ast.Call(self.builtin("locals"), [], [])
inlocs = ast.Compare(ast.Constant(name.id), [ast.In()], [locs])
dorepr = self.helper("_should_repr_global_name", name)
test = ast.BoolOp(ast.Or(), [inlocs, dorepr])
expr = ast.IfExp(test, self.display(name), ast.Constant(name.id))
return name, self.explanation_param(expr)
def visit_BoolOp(self, boolop: ast.BoolOp) -> tuple[ast.Name, str]:
res_var = self.variable()
expl_list = self.assign(ast.List([], ast.Load()))
app = ast.Attribute(expl_list, "append", ast.Load())
is_or = int(isinstance(boolop.op, ast.Or))
body = save = self.statements
fail_save = self.expl_stmts
levels = len(boolop.values) - 1
self.push_format_context()
# Process each operand, short-circuiting if needed.
for i, v in enumerate(boolop.values):
if i:
fail_inner: list[ast.stmt] = []
# cond is set in a prior loop iteration below
self.expl_stmts.append(ast.If(cond, fail_inner, [])) # noqa: F821
self.expl_stmts = fail_inner
match v:
# Check if the left operand is an ast.NamedExpr and the value has already been visited
case ast.Compare(
left=ast.NamedExpr(target=ast.Name(id=target_id))
) if target_id in [
e.id for e in boolop.values[:i] if hasattr(e, "id")
]:
pytest_temp = self.variable()
self.variables_overwrite[self.scope][target_id] = v.left # type:ignore[assignment]
# mypy's false positive, we're checking that the 'target' attribute exists.
v.left.target.id = pytest_temp # type:ignore[attr-defined]
self.push_format_context()
res, expl = self.visit(v)
body.append(ast.Assign([ast.Name(res_var, ast.Store())], res))
expl_format = self.pop_format_context(ast.Constant(expl))
call = ast.Call(app, [expl_format], [])
self.expl_stmts.append(ast.Expr(call))
if i < levels:
cond: ast.expr = res
if is_or:
cond = ast.UnaryOp(ast.Not(), cond)
inner: list[ast.stmt] = []
self.statements.append(ast.If(cond, inner, []))
self.statements = body = inner
self.statements = save
self.expl_stmts = fail_save
expl_template = self.helper("_format_boolop", expl_list, ast.Constant(is_or))
expl = self.pop_format_context(expl_template)
return ast.Name(res_var, ast.Load()), self.explanation_param(expl)
def visit_UnaryOp(self, unary: ast.UnaryOp) -> tuple[ast.Name, str]:
pattern = UNARY_MAP[unary.op.__class__]
operand_res, operand_expl = self.visit(unary.operand)
res = self.assign(ast.copy_location(ast.UnaryOp(unary.op, operand_res), unary))
return res, pattern % (operand_expl,)
def visit_BinOp(self, binop: ast.BinOp) -> tuple[ast.Name, str]:
symbol = BINOP_MAP[binop.op.__class__]
left_expr, left_expl = self.visit(binop.left)
right_expr, right_expl = self.visit(binop.right)
explanation = f"({left_expl} {symbol} {right_expl})"
res = self.assign(
ast.copy_location(ast.BinOp(left_expr, binop.op, right_expr), binop)
)
return res, explanation
def visit_Call(self, call: ast.Call) -> tuple[ast.Name, str]:
new_func, func_expl = self.visit(call.func)
arg_expls = []
new_args = []
new_kwargs = []
for arg in call.args:
if isinstance(arg, ast.Name) and arg.id in self.variables_overwrite.get(
self.scope, {}
):
arg = self.variables_overwrite[self.scope][arg.id] # type:ignore[assignment]
res, expl = self.visit(arg)
arg_expls.append(expl)
new_args.append(res)
for keyword in call.keywords:
match keyword.value:
case ast.Name(id=id) if id in self.variables_overwrite.get(
self.scope, {}
):
keyword.value = self.variables_overwrite[self.scope][id] # type:ignore[assignment]
res, expl = self.visit(keyword.value)
new_kwargs.append(ast.keyword(keyword.arg, res))
if keyword.arg:
arg_expls.append(keyword.arg + "=" + expl)
else: # **args have `arg` keywords with an .arg of None
arg_expls.append("**" + expl)
expl = "{}({})".format(func_expl, ", ".join(arg_expls))
new_call = ast.copy_location(ast.Call(new_func, new_args, new_kwargs), call)
res = self.assign(new_call)
res_expl = self.explanation_param(self.display(res))
outer_expl = f"{res_expl}\n{{{res_expl} = {expl}\n}}"
return res, outer_expl
def visit_Starred(self, starred: ast.Starred) -> tuple[ast.Starred, str]:
# A Starred node can appear in a function call.
res, expl = self.visit(starred.value)
new_starred = ast.Starred(res, starred.ctx)
return new_starred, "*" + expl
def visit_Attribute(self, attr: ast.Attribute) -> tuple[ast.Name, str]:
if not isinstance(attr.ctx, ast.Load):
return self.generic_visit(attr)
value, value_expl = self.visit(attr.value)
res = self.assign(
ast.copy_location(ast.Attribute(value, attr.attr, ast.Load()), attr)
)
res_expl = self.explanation_param(self.display(res))
pat = "%s\n{%s = %s.%s\n}"
expl = pat % (res_expl, res_expl, value_expl, attr.attr)
return res, expl
def visit_Compare(self, comp: ast.Compare) -> tuple[ast.expr, str]:
self.push_format_context()
# We first check if we have overwritten a variable in the previous assert
match comp.left:
case ast.Name(id=name_id) if name_id in self.variables_overwrite.get(
self.scope, {}
):
comp.left = self.variables_overwrite[self.scope][name_id] # type: ignore[assignment]
case ast.NamedExpr(target=ast.Name(id=target_id)):
self.variables_overwrite[self.scope][target_id] = comp.left # type: ignore[assignment]
left_res, left_expl = self.visit(comp.left)
if isinstance(comp.left, ast.Compare | ast.BoolOp):
left_expl = f"({left_expl})"
res_variables = [self.variable() for i in range(len(comp.ops))]
load_names: list[ast.expr] = [ast.Name(v, ast.Load()) for v in res_variables]
store_names = [ast.Name(v, ast.Store()) for v in res_variables]
it = zip(range(len(comp.ops)), comp.ops, comp.comparators, strict=True)
expls: list[ast.expr] = []
syms: list[ast.expr] = []
results = [left_res]
for i, op, next_operand in it:
match (next_operand, left_res):
case (
ast.NamedExpr(target=ast.Name(id=target_id)),
ast.Name(id=name_id),
) if target_id == name_id:
next_operand.target.id = self.variable()
self.variables_overwrite[self.scope][name_id] = next_operand # type: ignore[assignment]
next_res, next_expl = self.visit(next_operand)
if isinstance(next_operand, ast.Compare | ast.BoolOp):
next_expl = f"({next_expl})"
results.append(next_res)
sym = BINOP_MAP[op.__class__]
syms.append(ast.Constant(sym))
expl = f"{left_expl} {sym} {next_expl}"
expls.append(ast.Constant(expl))
res_expr = ast.copy_location(ast.Compare(left_res, [op], [next_res]), comp)
self.statements.append(ast.Assign([store_names[i]], res_expr))
left_res, left_expl = next_res, next_expl
# Use pytest.assertion.util._reprcompare if that's available.
expl_call = self.helper(
"_call_reprcompare",
ast.Tuple(syms, ast.Load()),
ast.Tuple(load_names, ast.Load()),
ast.Tuple(expls, ast.Load()),
ast.Tuple(results, ast.Load()),
)
if len(comp.ops) > 1:
res: ast.expr = ast.BoolOp(ast.And(), load_names)
else:
res = load_names[0]
return res, self.explanation_param(self.pop_format_context(expl_call))
def try_makedirs(cache_dir: Path) -> bool:
"""Attempt to create the given directory and sub-directories exist.
Returns True if successful or if it already exists.
"""
try:
os.makedirs(cache_dir, exist_ok=True)
except (FileNotFoundError, NotADirectoryError, FileExistsError):
# One of the path components was not a directory:
# - we're in a zip file
# - it is a file
return False
except PermissionError:
return False
except OSError as e:
# as of now, EROFS doesn't have an equivalent OSError-subclass
#
# squashfuse_ll returns ENOSYS "OSError: [Errno 38] Function not
# implemented" for a read-only error
if e.errno in {errno.EROFS, errno.ENOSYS}:
return False
raise
return True
def get_cache_dir(file_path: Path) -> Path:
"""Return the cache directory to write .pyc files for the given .py file path."""
if sys.pycache_prefix:
# given:
# prefix = '/tmp/pycs'
# path = '/home/user/proj/test_app.py'
# we want:
# '/tmp/pycs/home/user/proj'
return Path(sys.pycache_prefix) / Path(*file_path.parts[1:-1])
else:
# classic pycache directory
return file_path.parent / "__pycache__"
|
AssertionRewriter
|
python
|
modin-project__modin
|
modin/tests/pandas/test_io.py
|
{
"start": 8097,
"end": 47384
}
|
class ____:
# delimiter tests
@pytest.mark.parametrize("sep", ["_", ",", "."])
@pytest.mark.parametrize("decimal", [".", "_"])
@pytest.mark.parametrize("thousands", [None, ",", "_", " "])
def test_read_csv_seps(self, make_csv_file, sep, decimal, thousands):
unique_filename = make_csv_file(
delimiter=sep,
thousands_separator=thousands,
decimal_separator=decimal,
)
eval_io(
fn_name="read_csv",
# read_csv kwargs
filepath_or_buffer=unique_filename,
sep=sep,
decimal=decimal,
thousands=thousands,
)
@pytest.mark.parametrize("sep", [None, "_"])
@pytest.mark.parametrize("delimiter", [".", "_"])
def test_read_csv_seps_except(self, make_csv_file, sep, delimiter):
unique_filename = make_csv_file(delimiter=delimiter)
eval_io(
fn_name="read_csv",
# read_csv kwargs
filepath_or_buffer=unique_filename,
delimiter=delimiter,
sep=sep,
expected_exception=ValueError(
"Specified a sep and a delimiter; you can only specify one."
),
)
@pytest.mark.parametrize(
"dtype_backend", [lib.no_default, "numpy_nullable", "pyarrow"]
)
def test_read_csv_dtype_backend(self, make_csv_file, dtype_backend):
unique_filename = make_csv_file()
def comparator(df1, df2):
df_equals(df1, df2)
df_equals(df1.dtypes, df2.dtypes)
eval_io(
fn_name="read_csv",
# read_csv kwargs
filepath_or_buffer=unique_filename,
dtype_backend=dtype_backend,
comparator=comparator,
)
# Column and Index Locations and Names tests
@pytest.mark.parametrize("header", ["infer", None, 0])
@pytest.mark.parametrize("index_col", [None, "col1"])
@pytest.mark.parametrize(
"names", [lib.no_default, ["col1"], ["c1", "c2", "c3", "c4", "c5", "c6"]]
)
@pytest.mark.parametrize(
"usecols", [None, ["col1"], ["col1", "col2", "col6"], [0, 1, 5]]
)
@pytest.mark.parametrize("skip_blank_lines", [True, False])
def test_read_csv_col_handling(
self,
header,
index_col,
names,
usecols,
skip_blank_lines,
):
if names is lib.no_default:
pytest.skip("some parameters combiantions fails: issue #2312")
if header in ["infer", None] and names is not lib.no_default:
pytest.skip(
"Heterogeneous data in a column is not cast to a common type: issue #3346"
)
eval_io(
fn_name="read_csv",
# read_csv kwargs
filepath_or_buffer=pytest.csvs_names["test_read_csv_blank_lines"],
header=header,
index_col=index_col,
names=names,
usecols=usecols,
skip_blank_lines=skip_blank_lines,
# FIXME: https://github.com/modin-project/modin/issues/7035
expected_exception=False,
)
@pytest.mark.parametrize("usecols", [lambda col_name: col_name in ["a", "b", "e"]])
def test_from_csv_with_callable_usecols(self, usecols):
fname = "modin/tests/pandas/data/test_usecols.csv"
pandas_df = pandas.read_csv(fname, usecols=usecols)
modin_df = pd.read_csv(fname, usecols=usecols)
df_equals(modin_df, pandas_df)
# General Parsing Configuration
@pytest.mark.parametrize("dtype", [None, True])
@pytest.mark.parametrize("engine", [None, "python", "c"])
@pytest.mark.parametrize(
"converters",
[
None,
{
"col1": lambda x: np.int64(x) * 10,
"col2": pandas.to_datetime,
"col4": lambda x: x.replace(":", ";"),
},
],
)
@pytest.mark.parametrize("skipfooter", [0, 10])
def test_read_csv_parsing_1(
self,
dtype,
engine,
converters,
skipfooter,
):
if dtype:
dtype = {
col: "object"
for col in pandas.read_csv(
pytest.csvs_names["test_read_csv_regular"], nrows=1
).columns
}
expected_exception = None
if engine == "c" and skipfooter != 0:
expected_exception = ValueError(
"the 'c' engine does not support skipfooter"
)
eval_io(
fn_name="read_csv",
expected_exception=expected_exception,
check_kwargs_callable=not callable(converters),
# read_csv kwargs
filepath_or_buffer=pytest.csvs_names["test_read_csv_regular"],
dtype=dtype,
engine=engine,
converters=converters,
skipfooter=skipfooter,
)
@pytest.mark.parametrize("header", ["infer", None, 0])
@pytest.mark.parametrize(
"skiprows",
[
2,
lambda x: x % 2,
lambda x: x > 25,
lambda x: x > 128,
np.arange(10, 50),
np.arange(10, 50, 2),
],
)
@pytest.mark.parametrize("nrows", [35, None])
@pytest.mark.parametrize(
"names",
[
[f"c{col_number}" for col_number in range(4)],
[f"c{col_number}" for col_number in range(6)],
None,
],
)
@pytest.mark.parametrize("encoding", ["latin1", "windows-1251", None])
def test_read_csv_parsing_2(
self,
make_csv_file,
request,
header,
skiprows,
nrows,
names,
encoding,
):
if encoding:
unique_filename = make_csv_file(encoding=encoding)
else:
unique_filename = pytest.csvs_names["test_read_csv_regular"]
kwargs = {
"filepath_or_buffer": unique_filename,
"header": header,
"skiprows": skiprows,
"nrows": nrows,
"names": names,
"encoding": encoding,
}
if Engine.get() != "Python":
df = pandas.read_csv(**dict(kwargs, nrows=1))
# in that case first partition will contain str
if df[df.columns[0]][df.index[0]] in ["c1", "col1", "c3", "col3"]:
pytest.xfail("read_csv incorrect output with float data - issue #2634")
eval_io(
fn_name="read_csv",
expected_exception=None,
check_kwargs_callable=not callable(skiprows),
# read_csv kwargs
**kwargs,
)
@pytest.mark.parametrize("true_values", [["Yes"], ["Yes", "true"], None])
@pytest.mark.parametrize("false_values", [["No"], ["No", "false"], None])
@pytest.mark.parametrize("skipfooter", [0, 10])
@pytest.mark.parametrize("nrows", [35, None])
def test_read_csv_parsing_3(
self,
true_values,
false_values,
skipfooter,
nrows,
):
# TODO: Check #2446 as it was closed
xfail_case = (false_values or true_values) and Engine.get() != "Python"
if xfail_case:
pytest.xfail("modin and pandas dataframes differs - issue #2446")
expected_exception = None
if skipfooter != 0 and nrows is not None:
expected_exception = ValueError("'skipfooter' not supported with 'nrows'")
eval_io(
fn_name="read_csv",
expected_exception=expected_exception,
# read_csv kwargs
filepath_or_buffer=pytest.csvs_names["test_read_csv_yes_no"],
true_values=true_values,
false_values=false_values,
skipfooter=skipfooter,
nrows=nrows,
)
def test_read_csv_skipinitialspace(self):
with ensure_clean(".csv") as unique_filename:
str_initial_spaces = (
"col1,col2,col3,col4\n"
+ "five, six, seven, eight\n"
+ " five, six, seven, eight\n"
+ "five, six, seven, eight\n"
)
eval_io_from_str(str_initial_spaces, unique_filename, skipinitialspace=True)
# NA and Missing Data Handling tests
@pytest.mark.parametrize("na_values", ["custom_nan", "73"])
@pytest.mark.parametrize("keep_default_na", [True, False])
@pytest.mark.parametrize("na_filter", [True, False])
@pytest.mark.parametrize("verbose", [True, False])
@pytest.mark.parametrize("skip_blank_lines", [True, False])
def test_read_csv_nans_handling(
self,
na_values,
keep_default_na,
na_filter,
verbose,
skip_blank_lines,
):
eval_io(
fn_name="read_csv",
# read_csv kwargs
filepath_or_buffer=pytest.csvs_names["test_read_csv_nans"],
na_values=na_values,
keep_default_na=keep_default_na,
na_filter=na_filter,
verbose=verbose,
skip_blank_lines=skip_blank_lines,
)
# Datetime Handling tests
@pytest.mark.parametrize(
"parse_dates", [True, False, ["col2"], ["col2", "col4"], [1, 3]]
)
@pytest.mark.parametrize("infer_datetime_format", [True, False])
@pytest.mark.parametrize("keep_date_col", [True, False])
@pytest.mark.parametrize(
"date_parser",
[lib.no_default, lambda x: pandas.to_datetime(x, format="%Y-%m-%d")],
ids=["default", "format-Ymd"],
)
@pytest.mark.parametrize("dayfirst", [True, False])
@pytest.mark.parametrize("cache_dates", [True, False])
def test_read_csv_datetime(
self,
parse_dates,
infer_datetime_format,
keep_date_col,
date_parser,
dayfirst,
cache_dates,
request,
):
expected_exception = None
if "format-Ymd" in request.node.callspec.id and (
"parse_dates3" in request.node.callspec.id
or "parse_dates4" in request.node.callspec.id
):
msg = (
'time data "00:00:00" doesn\'t match format "%Y-%m-%d", at position 0. You might want to try:\n'
+ " - passing `format` if your strings have a consistent format;\n"
+ " - passing `format='ISO8601'` if your strings are all ISO8601 "
+ "but not necessarily in exactly the same format;\n"
+ " - passing `format='mixed'`, and the format will be inferred "
+ "for each element individually. You might want to use `dayfirst` "
+ "alongside this."
)
expected_exception = ValueError(msg)
eval_io(
fn_name="read_csv",
check_kwargs_callable=not callable(date_parser),
expected_exception=expected_exception,
# read_csv kwargs
filepath_or_buffer=pytest.csvs_names["test_read_csv_regular"],
parse_dates=parse_dates,
infer_datetime_format=infer_datetime_format,
keep_date_col=keep_date_col,
date_parser=date_parser,
dayfirst=dayfirst,
cache_dates=cache_dates,
)
@pytest.mark.parametrize("date", ["2023-01-01 00:00:01.000000000", "2023"])
@pytest.mark.parametrize("dtype", [None, "str", {"id": "int64"}])
@pytest.mark.parametrize("parse_dates", [None, [], ["date"], [1]])
def test_read_csv_dtype_parse_dates(self, date, dtype, parse_dates):
with ensure_clean(".csv") as filename:
with open(filename, "w") as file:
file.write(f"id,date\n1,{date}")
eval_io(
fn_name="read_csv",
# read_csv kwargs
filepath_or_buffer=filename,
dtype=dtype,
parse_dates=parse_dates,
)
# Iteration tests
@pytest.mark.parametrize("iterator", [True, False])
def test_read_csv_iteration(self, iterator):
filename = pytest.csvs_names["test_read_csv_regular"]
# Tests __next__ and correctness of reader as an iterator
# Use larger chunksize to read through file quicker
rdf_reader = pd.read_csv(filename, chunksize=500, iterator=iterator)
pd_reader = pandas.read_csv(filename, chunksize=500, iterator=iterator)
for modin_df, pd_df in zip(rdf_reader, pd_reader):
df_equals(modin_df, pd_df)
# Tests that get_chunk works correctly
rdf_reader = pd.read_csv(filename, chunksize=1, iterator=iterator)
pd_reader = pandas.read_csv(filename, chunksize=1, iterator=iterator)
modin_df = rdf_reader.get_chunk(1)
pd_df = pd_reader.get_chunk(1)
df_equals(modin_df, pd_df)
# Tests that read works correctly
rdf_reader = pd.read_csv(filename, chunksize=1, iterator=iterator)
pd_reader = pandas.read_csv(filename, chunksize=1, iterator=iterator)
modin_df = rdf_reader.read()
pd_df = pd_reader.read()
df_equals(modin_df, pd_df)
# Tests #6553
if iterator:
rdf_reader = pd.read_csv(filename, iterator=iterator)
pd_reader = pandas.read_csv(filename, iterator=iterator)
modin_df = rdf_reader.read()
pd_df = pd_reader.read()
df_equals(modin_df, pd_df)
@pytest.mark.parametrize("pathlike", [False, True])
def test_read_csv_encoding_976(self, pathlike):
file_name = "modin/tests/pandas/data/issue_976.csv"
if pathlike:
file_name = Path(file_name)
names = [str(i) for i in range(11)]
kwargs = {
"sep": ";",
"names": names,
"encoding": "windows-1251",
}
df1 = pd.read_csv(file_name, **kwargs)
df2 = pandas.read_csv(file_name, **kwargs)
# these columns contain data of various types in partitions
# see #1931 for details;
df1 = df1.drop(["4", "5"], axis=1)
df2 = df2.drop(["4", "5"], axis=1)
df_equals(df1, df2)
# Quoting, Compression parameters tests
@pytest.mark.parametrize("compression", ["infer", "gzip", "bz2", "xz", "zip"])
@pytest.mark.parametrize("encoding", [None, "latin8", "utf16"])
@pytest.mark.parametrize("engine", [None, "python", "c", "pyarrow"])
def test_read_csv_compression(self, make_csv_file, compression, encoding, engine):
unique_filename = make_csv_file(encoding=encoding, compression=compression)
expected_exception = None
if encoding == "utf16" and compression in ("bz2", "xz"):
expected_exception = UnicodeError("UTF-16 stream does not start with BOM")
eval_io(
fn_name="read_csv",
# read_csv kwargs
filepath_or_buffer=unique_filename,
compression=compression,
encoding=encoding,
engine=engine,
expected_exception=expected_exception,
)
@pytest.mark.parametrize(
"encoding",
[
None,
"ISO-8859-1",
"latin1",
"iso-8859-1",
"cp1252",
"utf8",
pytest.param(
"unicode_escape",
marks=pytest.mark.skipif(
condition=sys.version_info < (3, 9),
reason="https://bugs.python.org/issue45461",
),
),
"raw_unicode_escape",
"utf_16_le",
"utf_16_be",
"utf32",
"utf_32_le",
"utf_32_be",
"utf-8-sig",
],
)
def test_read_csv_encoding(self, make_csv_file, encoding):
unique_filename = make_csv_file(encoding=encoding)
eval_io(
fn_name="read_csv",
# read_csv kwargs
filepath_or_buffer=unique_filename,
encoding=encoding,
)
@pytest.mark.parametrize("thousands", [None, ",", "_", " "])
@pytest.mark.parametrize("decimal", [".", "_"])
@pytest.mark.parametrize("lineterminator", [None, "x", "\n"])
@pytest.mark.parametrize("escapechar", [None, "d", "x"])
@pytest.mark.parametrize("dialect", ["test_csv_dialect", "use_dialect_name", None])
def test_read_csv_file_format(
self,
make_csv_file,
thousands,
decimal,
lineterminator,
escapechar,
dialect,
):
if dialect:
test_csv_dialect_params = {
"delimiter": "_",
"doublequote": False,
"escapechar": "\\",
"quotechar": "d",
"quoting": csv.QUOTE_ALL,
}
csv.register_dialect(dialect, **test_csv_dialect_params)
if dialect != "use_dialect_name":
# otherwise try with dialect name instead of `_csv.Dialect` object
dialect = csv.get_dialect(dialect)
unique_filename = make_csv_file(**test_csv_dialect_params)
else:
unique_filename = make_csv_file(
thousands_separator=thousands,
decimal_separator=decimal,
escapechar=escapechar,
lineterminator=lineterminator,
)
expected_exception = None
if dialect is None:
# FIXME: https://github.com/modin-project/modin/issues/7035
expected_exception = False
eval_io(
fn_name="read_csv",
# read_csv kwargs
filepath_or_buffer=unique_filename,
thousands=thousands,
decimal=decimal,
lineterminator=lineterminator,
escapechar=escapechar,
dialect=dialect,
expected_exception=expected_exception,
)
@pytest.mark.parametrize(
"quoting",
[csv.QUOTE_ALL, csv.QUOTE_MINIMAL, csv.QUOTE_NONNUMERIC, csv.QUOTE_NONE],
)
@pytest.mark.parametrize("quotechar", ['"', "_", "d"])
@pytest.mark.parametrize("doublequote", [True, False])
@pytest.mark.parametrize("comment", [None, "#", "x"])
def test_read_csv_quoting(
self,
make_csv_file,
quoting,
quotechar,
doublequote,
comment,
):
# in these cases escapechar should be set, otherwise error occures
# _csv.Error: need to escape, but no escapechar set"
use_escapechar = (
not doublequote and quotechar != '"' and quoting != csv.QUOTE_NONE
)
escapechar = "\\" if use_escapechar else None
unique_filename = make_csv_file(
quoting=quoting,
quotechar=quotechar,
doublequote=doublequote,
escapechar=escapechar,
comment_col_char=comment,
)
eval_io(
fn_name="read_csv",
# read_csv kwargs
filepath_or_buffer=unique_filename,
quoting=quoting,
quotechar=quotechar,
doublequote=doublequote,
escapechar=escapechar,
comment=comment,
)
# Error Handling parameters tests
@pytest.mark.skip(reason="https://github.com/modin-project/modin/issues/6239")
@pytest.mark.parametrize("on_bad_lines", ["error", "warn", "skip", None])
def test_read_csv_error_handling(self, on_bad_lines):
# in that case exceptions are raised both by Modin and pandas
# and tests pass
raise_exception_case = on_bad_lines is not None
# TODO: Check #2500 as it was closed
if not raise_exception_case and Engine.get() not in ["Python"]:
pytest.xfail("read_csv doesn't raise `bad lines` exceptions - issue #2500")
eval_io(
fn_name="read_csv",
# read_csv kwargs
filepath_or_buffer=pytest.csvs_names["test_read_csv_bad_lines"],
on_bad_lines=on_bad_lines,
)
@pytest.mark.parametrize("float_precision", [None, "high", "legacy", "round_trip"])
def test_python_engine_float_precision_except(self, float_precision):
expected_exception = None
if float_precision is not None:
expected_exception = ValueError(
"The 'float_precision' option is not supported with the 'python' engine"
)
eval_io(
fn_name="read_csv",
# read_csv kwargs
filepath_or_buffer=pytest.csvs_names["test_read_csv_regular"],
engine="python",
float_precision=float_precision,
expected_exception=expected_exception,
)
@pytest.mark.parametrize("low_memory", [False, True])
def test_python_engine_low_memory_except(self, low_memory):
expected_exception = None
if not low_memory:
expected_exception = ValueError(
"The 'low_memory' option is not supported with the 'python' engine"
)
eval_io(
fn_name="read_csv",
# read_csv kwargs
filepath_or_buffer=pytest.csvs_names["test_read_csv_regular"],
engine="python",
low_memory=low_memory,
expected_exception=expected_exception,
)
@pytest.mark.parametrize("delim_whitespace", [True, False])
def test_delim_whitespace(self, delim_whitespace, tmp_path):
str_delim_whitespaces = "col1 col2 col3 col4\n5 6 7 8\n9 10 11 12\n"
unique_filename = get_unique_filename(data_dir=tmp_path)
eval_io_from_str(
str_delim_whitespaces,
unique_filename,
delim_whitespace=delim_whitespace,
)
# Internal parameters tests
@pytest.mark.parametrize("engine", ["c"])
@pytest.mark.parametrize("delimiter", [",", " "])
@pytest.mark.parametrize("low_memory", [True, False])
@pytest.mark.parametrize("memory_map", [True, False])
@pytest.mark.parametrize("float_precision", [None, "high", "round_trip"])
def test_read_csv_internal(
self,
make_csv_file,
engine,
delimiter,
low_memory,
memory_map,
float_precision,
):
unique_filename = make_csv_file(delimiter=delimiter)
eval_io(
filepath_or_buffer=unique_filename,
fn_name="read_csv",
engine=engine,
delimiter=delimiter,
low_memory=low_memory,
memory_map=memory_map,
float_precision=float_precision,
)
# Issue related, specific or corner cases
@pytest.mark.parametrize("nrows", [2, None])
def test_read_csv_bad_quotes(self, nrows):
csv_bad_quotes = (
'1, 2, 3, 4\none, two, three, four\nfive, "six", seven, "eight\n'
)
with ensure_clean(".csv") as unique_filename:
eval_io_from_str(csv_bad_quotes, unique_filename, nrows=nrows)
def test_read_csv_categories(self):
eval_io(
fn_name="read_csv",
# read_csv kwargs
filepath_or_buffer="modin/tests/pandas/data/test_categories.csv",
names=["one", "two"],
dtype={"one": "int64", "two": "category"},
)
@pytest.mark.parametrize("encoding", [None, "utf-8"])
@pytest.mark.parametrize("encoding_errors", ["strict", "ignore"])
@pytest.mark.parametrize(
"parse_dates",
[pytest.param(value, id=id) for id, value in parse_dates_values_by_id.items()],
)
@pytest.mark.parametrize("index_col", [None, 0, 5])
@pytest.mark.parametrize("header", ["infer", 0])
@pytest.mark.parametrize(
"names",
[
None,
[
"timestamp",
"year",
"month",
"date",
"symbol",
"high",
"low",
"open",
"close",
"spread",
"volume",
],
],
)
@pytest.mark.exclude_in_sanity
def test_read_csv_parse_dates(
self,
names,
header,
index_col,
parse_dates,
encoding,
encoding_errors,
request,
):
if names is not None and header == "infer":
pytest.xfail(
"read_csv with Ray engine works incorrectly with date data and names parameter provided - issue #2509"
)
expected_exception = None
if "nonexistent_int_column" in request.node.callspec.id:
expected_exception = IndexError("list index out of range")
elif "nonexistent_string_column" in request.node.callspec.id:
expected_exception = ValueError(
"Missing column provided to 'parse_dates': 'z'"
)
eval_io(
fn_name="read_csv",
expected_exception=expected_exception,
# read_csv kwargs
filepath_or_buffer=time_parsing_csv_path,
names=names,
header=header,
index_col=index_col,
parse_dates=parse_dates,
encoding=encoding,
encoding_errors=encoding_errors,
)
@pytest.mark.parametrize(
"storage_options",
[{"anon": False}, {"anon": True}, {"key": "123", "secret": "123"}, None],
)
@pytest.mark.xfail(
reason="S3 file gone missing, see https://github.com/modin-project/modin/issues/4875"
)
def test_read_csv_s3(self, storage_options):
eval_io(
fn_name="read_csv",
# read_csv kwargs
filepath_or_buffer="s3://noaa-ghcn-pds/csv/1788.csv",
storage_options=storage_options,
)
@pytest.mark.xfail(
reason="S3 file gone missing, see https://github.com/modin-project/modin/issues/7571"
)
def test_read_csv_s3_issue4658(self):
eval_io(
fn_name="read_csv",
# read_csv kwargs
filepath_or_buffer="s3://dask-data/nyc-taxi/2015/yellow_tripdata_2015-01.csv",
nrows=10,
storage_options={"anon": True},
)
@pytest.mark.parametrize("names", [list("XYZ"), None])
@pytest.mark.parametrize("skiprows", [1, 2, 3, 4, None])
def test_read_csv_skiprows_names(self, names, skiprows):
eval_io(
fn_name="read_csv",
# read_csv kwargs
filepath_or_buffer="modin/tests/pandas/data/issue_2239.csv",
names=names,
skiprows=skiprows,
)
def _has_pandas_fallback_reason(self):
# The Python engine does not use custom IO dispatchers, so specialized error messages
# won't appear
return Engine.get() != "Python"
def test_read_csv_default_to_pandas(self):
if self._has_pandas_fallback_reason():
warning_suffix = "buffers"
else:
warning_suffix = ""
with warns_that_defaulting_to_pandas_if(
not current_execution_is_native(), suffix=warning_suffix
):
# This tests that we default to pandas on a buffer
with open(pytest.csvs_names["test_read_csv_regular"], "r") as _f:
pd.read_csv(StringIO(_f.read()))
def test_read_csv_url(self):
eval_io(
fn_name="read_csv",
# read_csv kwargs
filepath_or_buffer="https://raw.githubusercontent.com/modin-project/modin/main/modin/tests/pandas/data/blah.csv",
)
@pytest.mark.parametrize("nrows", [21, 5, None])
@pytest.mark.parametrize("skiprows", [4, 1, 500, None])
def test_read_csv_newlines_in_quotes(self, nrows, skiprows):
expected_exception = None
if skiprows == 500:
expected_exception = pandas.errors.EmptyDataError(
"No columns to parse from file"
)
eval_io(
fn_name="read_csv",
expected_exception=expected_exception,
# read_csv kwargs
filepath_or_buffer="modin/tests/pandas/data/newlines.csv",
nrows=nrows,
skiprows=skiprows,
cast_to_str=True,
)
@pytest.mark.parametrize("skiprows", [None, 0, [], [1, 2], np.arange(0, 2)])
def test_read_csv_skiprows_with_usecols(self, skiprows):
usecols = {"float_data": "float64"}
expected_exception = None
if isinstance(skiprows, np.ndarray):
expected_exception = ValueError(
"Usecols do not match columns, columns expected but not found: ['float_data']"
)
eval_io(
fn_name="read_csv",
expected_exception=expected_exception,
# read_csv kwargs
filepath_or_buffer="modin/tests/pandas/data/issue_4543.csv",
skiprows=skiprows,
usecols=usecols.keys(),
dtype=usecols,
)
def test_read_csv_sep_none(self):
eval_io(
fn_name="read_csv",
modin_warning=ParserWarning,
# read_csv kwargs
filepath_or_buffer=pytest.csvs_names["test_read_csv_regular"],
sep=None,
)
def test_read_csv_incorrect_data(self):
eval_io(
fn_name="read_csv",
# read_csv kwargs
filepath_or_buffer="modin/tests/pandas/data/test_categories.json",
)
@pytest.mark.parametrize(
"kwargs",
[
{"names": [5, 1, 3, 4, 2, 6]},
{"names": [0]},
{"names": None, "usecols": [1, 0, 2]},
{"names": [3, 1, 2, 5], "usecols": [4, 1, 3, 2]},
],
)
def test_read_csv_names_neq_num_cols(self, kwargs):
eval_io(
fn_name="read_csv",
# read_csv kwargs
filepath_or_buffer="modin/tests/pandas/data/issue_2074.csv",
**kwargs,
)
def test_read_csv_wrong_path(self):
expected_exception = FileNotFoundError(2, "No such file or directory")
eval_io(
fn_name="read_csv",
expected_exception=expected_exception,
# read_csv kwargs
filepath_or_buffer="/some/wrong/path.csv",
)
@pytest.mark.parametrize("extension", [None, "csv", "csv.gz"])
@pytest.mark.parametrize("sep", [" "])
@pytest.mark.parametrize("header", [False, True, "sfx-"])
@pytest.mark.parametrize("mode", ["w", "wb+"])
@pytest.mark.parametrize("idx_name", [None, "Index"])
@pytest.mark.parametrize("index", [True, False, "New index"])
@pytest.mark.parametrize("index_label", [None, False, "New index"])
@pytest.mark.parametrize("columns", [None, ["col1", "col3", "col5"]])
@pytest.mark.exclude_in_sanity
@pytest.mark.skipif(
condition=Engine.get() == "Unidist" and os.name == "nt",
reason="https://github.com/modin-project/modin/issues/6846",
)
def test_to_csv(
self,
tmp_path,
extension,
sep,
header,
mode,
idx_name,
index,
index_label,
columns,
):
pandas_df = generate_dataframe(idx_name=idx_name)
modin_df = pd.DataFrame(pandas_df)
if isinstance(header, str):
if columns is None:
header = [f"{header}{c}" for c in modin_df.columns]
else:
header = [f"{header}{c}" for c in columns]
eval_to_csv_file(
tmp_path,
modin_obj=modin_df,
pandas_obj=pandas_df,
extension=extension,
sep=sep,
header=header,
mode=mode,
index=index,
index_label=index_label,
columns=columns,
)
@pytest.mark.skipif(
condition=Engine.get() == "Unidist" and os.name == "nt",
reason="https://github.com/modin-project/modin/issues/6846",
)
def test_dataframe_to_csv(self, tmp_path):
pandas_df = pandas.read_csv(pytest.csvs_names["test_read_csv_regular"])
modin_df = pd.DataFrame(pandas_df)
eval_to_csv_file(
tmp_path,
modin_obj=modin_df,
pandas_obj=pandas_df,
extension="csv",
)
@pytest.mark.skipif(
condition=Engine.get() == "Unidist" and os.name == "nt",
reason="https://github.com/modin-project/modin/issues/6846",
)
def test_series_to_csv(self, tmp_path):
pandas_s = pandas.read_csv(
pytest.csvs_names["test_read_csv_regular"], usecols=["col1"]
).squeeze()
modin_s = pd.Series(pandas_s)
eval_to_csv_file(
tmp_path,
modin_obj=modin_s,
pandas_obj=pandas_s,
extension="csv",
)
def test_read_csv_within_decorator(self):
@dummy_decorator()
def wrapped_read_csv(file, method):
if method == "pandas":
return pandas.read_csv(file)
if method == "modin":
return pd.read_csv(file)
pandas_df = wrapped_read_csv(
pytest.csvs_names["test_read_csv_regular"], method="pandas"
)
modin_df = wrapped_read_csv(
pytest.csvs_names["test_read_csv_regular"], method="modin"
)
df_equals(modin_df, pandas_df)
@pytest.mark.parametrize(
"read_mode",
[
"r",
"rb",
],
)
@pytest.mark.parametrize("buffer_start_pos", [0, 10])
@pytest.mark.parametrize("set_async_read_mode", [False, True], indirect=True)
def test_read_csv_file_handle(
self, read_mode, make_csv_file, buffer_start_pos, set_async_read_mode
):
unique_filename = make_csv_file()
with open(unique_filename, mode=read_mode) as buffer:
buffer.seek(buffer_start_pos)
pandas_df = pandas.read_csv(buffer)
buffer.seek(buffer_start_pos)
modin_df = pd.read_csv(buffer)
df_equals(modin_df, pandas_df)
@pytest.mark.skipif(
current_execution_is_native(),
reason="no partitions",
)
def test_unnamed_index(self):
def get_internal_df(df):
partition = read_df._query_compiler._modin_frame._partitions[0][0]
return partition.to_pandas()
path = "modin/tests/pandas/data/issue_3119.csv"
read_df = pd.read_csv(path, index_col=0)
assert get_internal_df(read_df).index.name is None
read_df = pd.read_csv(path, index_col=[0, 1])
for name1, name2 in zip(get_internal_df(read_df).index.names, [None, "a"]):
assert name1 == name2
def test_read_csv_empty_frame(self):
eval_io(
fn_name="read_csv",
# read_csv kwargs
filepath_or_buffer=pytest.csvs_names["test_read_csv_regular"],
usecols=["col1"],
index_col="col1",
)
@pytest.mark.parametrize(
"skiprows",
[
[x for x in range(10)],
[x + 5 for x in range(15)],
[x for x in range(10) if x % 2 == 0],
[x + 5 for x in range(15) if x % 2 == 0],
lambda x: x % 2,
lambda x: x > 20,
lambda x: x < 20,
lambda x: True,
lambda x: x in [10, 20],
lambda x: x << 10,
],
)
@pytest.mark.parametrize("header", ["infer", None, 0, 1, 150])
def test_read_csv_skiprows_corner_cases(self, skiprows, header):
eval_io(
fn_name="read_csv",
check_kwargs_callable=not callable(skiprows),
# read_csv kwargs
filepath_or_buffer=pytest.csvs_names["test_read_csv_regular"],
skiprows=skiprows,
header=header,
dtype="str", # to avoid issues with heterogeneous data
# FIXME: https://github.com/modin-project/modin/issues/7035
expected_exception=False,
)
def test_to_csv_with_index(self, tmp_path):
cols = 100
arows = 20000
keyrange = 100
values = np.vstack(
[
np.random.choice(keyrange, size=(arows)),
np.random.normal(size=(cols, arows)),
]
).transpose()
modin_df = pd.DataFrame(
values,
columns=["key"] + ["avalue" + str(i) for i in range(1, 1 + cols)],
).set_index("key")
pandas_df = pandas.DataFrame(
values,
columns=["key"] + ["avalue" + str(i) for i in range(1, 1 + cols)],
).set_index("key")
eval_to_csv_file(tmp_path, modin_df, pandas_df, "csv")
@pytest.mark.parametrize("set_async_read_mode", [False, True], indirect=True)
def test_read_csv_issue_5150(self, set_async_read_mode):
with ensure_clean(".csv") as unique_filename:
pandas_df = pandas.DataFrame(np.random.randint(0, 100, size=(2**6, 2**6)))
pandas_df.to_csv(unique_filename, index=False)
expected_pandas_df = pandas.read_csv(unique_filename, index_col=False)
modin_df = pd.read_csv(unique_filename, index_col=False)
actual_pandas_df = modin_df._to_pandas()
if AsyncReadMode.get():
# If read operations are asynchronous, then the dataframes
# check should be inside `ensure_clean` context
# because the file may be deleted before actual reading starts
df_equals(expected_pandas_df, actual_pandas_df)
if not AsyncReadMode.get():
df_equals(expected_pandas_df, actual_pandas_df)
@pytest.mark.parametrize("usecols", [None, [0, 1, 2, 3, 4]])
def test_read_csv_1930(self, usecols):
eval_io(
fn_name="read_csv",
# read_csv kwargs
filepath_or_buffer="modin/tests/pandas/data/issue_1930.csv",
names=["c1", "c2", "c3", "c4", "c5"],
usecols=usecols,
)
def _check_relative_io(fn_name, unique_filename, path_arg, storage_default=()):
# Windows can be funny at where it searches for ~; besides, Python >= 3.8 no longer honors %HOME%
dirname, basename = os.path.split(unique_filename)
pinned_home = {envvar: dirname for envvar in ("HOME", "USERPROFILE", "HOMEPATH")}
should_default = Engine.get() == "Python" or StorageFormat.get() in storage_default
with mock.patch.dict(os.environ, pinned_home):
with warns_that_defaulting_to_pandas_if(should_default):
eval_io(
fn_name=fn_name,
**{path_arg: f"~/{basename}"},
)
# check that when read without $HOME patched we have equivalent results
eval_general(
f"~/{basename}",
unique_filename,
lambda fname: getattr(pandas, fn_name)(**{path_arg: fname}),
)
# Leave this test apart from the test classes, which skip the default to pandas
# warning check. We want to make sure we are NOT defaulting to pandas for a
# path relative to user home.
# TODO(https://github.com/modin-project/modin/issues/3655): Get rid of this
# commment once we turn all default to pandas messages into errors.
def test_read_csv_relative_to_user_home(make_csv_file):
unique_filename = make_csv_file()
_check_relative_io("read_csv", unique_filename, "filepath_or_buffer")
@pytest.mark.filterwarnings(default_to_pandas_ignore_string)
|
TestCsv
|
python
|
walkccc__LeetCode
|
solutions/2022. Convert 1D Array Into 2D Array/2022.py
|
{
"start": 0,
"end": 298
}
|
class ____:
def construct2DArray(self, original: list[int],
m: int, n: int) -> list[list[int]]:
if len(original) != m * n:
return []
ans = [[0] * n for _ in range(m)]
for i, num in enumerate(original):
ans[i // n][i % n] = num
return ans
|
Solution
|
python
|
astropy__astropy
|
astropy/units/tests/test_structured.py
|
{
"start": 9031,
"end": 9656
}
|
class ____(StructuredTestBaseWithUnits):
def test_copy(self):
su_copy = copy.copy(self.pv_t_unit)
assert su_copy is not self.pv_t_unit
assert su_copy == self.pv_t_unit
assert su_copy._units is self.pv_t_unit._units
def test_deepcopy(self):
su_copy = copy.deepcopy(self.pv_t_unit)
assert su_copy is not self.pv_t_unit
assert su_copy == self.pv_t_unit
assert su_copy._units is not self.pv_t_unit._units
def test_pickle(self, pickle_protocol): # noqa: F811
check_pickling_recovery(self.pv_t_unit, pickle_protocol)
|
TestStructuredUnitsCopyPickle
|
python
|
django__django
|
tests/files/tests.py
|
{
"start": 9505,
"end": 9810
}
|
class ____(unittest.TestCase):
def test_open_resets_file_to_start_and_returns_context_manager(self):
uf = InMemoryUploadedFile(StringIO("1"), "", "test", "text/plain", 1, "utf8")
uf.read()
with uf.open() as f:
self.assertEqual(f.read(), "1")
|
InMemoryUploadedFileTests
|
python
|
doocs__leetcode
|
solution/3000-3099/3068.Find the Maximum Sum of Node Values/Solution.py
|
{
"start": 0,
"end": 243
}
|
class ____:
def maximumValueSum(self, nums: List[int], k: int, edges: List[List[int]]) -> int:
f0, f1 = 0, -inf
for x in nums:
f0, f1 = max(f0 + x, f1 + (x ^ k)), max(f1 + x, f0 + (x ^ k))
return f0
|
Solution
|
python
|
Lightning-AI__lightning
|
tests/tests_pytorch/trainer/test_dataloaders.py
|
{
"start": 29862,
"end": 31319
}
|
class ____(Callback):
def __init__(self, expected_seeds=(0, 0, 0)):
self.expected_seed = expected_seeds
def on_train_start(self, trainer, pl_module):
train_sampler = trainer.train_dataloader.sampler
assert isinstance(train_sampler, DistributedSampler)
assert train_sampler.shuffle
assert train_sampler.seed == self.expected_seed[0]
def on_validation_start(self, trainer, pl_module):
val_sampler = trainer.val_dataloaders.sampler
assert isinstance(val_sampler, DistributedSampler)
assert not val_sampler.shuffle
assert val_sampler.seed == self.expected_seed[1]
def on_test_start(self, trainer, pl_module):
test_sampler = trainer.test_dataloaders.sampler
assert isinstance(test_sampler, DistributedSampler)
assert not test_sampler.shuffle
assert test_sampler.seed == self.expected_seed[2]
@RunIf(min_cuda_gpus=2, skip_windows=True)
def test_dataloader_distributed_sampler(tmp_path):
"""Test DistributedSampler and it's arguments for DDP backend."""
seed_everything(123)
model = BoringModel()
trainer = Trainer(
accelerator="gpu",
devices=[0, 1],
num_nodes=1,
strategy="ddp_spawn",
default_root_dir=tmp_path,
max_steps=1,
callbacks=[DistribSamplerCallback(expected_seeds=(123, 123, 123))],
)
trainer.fit(model)
trainer.test(model)
|
DistribSamplerCallback
|
python
|
huggingface__transformers
|
tests/tensor_parallel/test_tensor_parallel.py
|
{
"start": 4132,
"end": 14520
}
|
class ____(TestCasePlus):
def test_tp_plan_property_setter_getter(self):
"""Test that tp_plan property can be set and retrieved correctly."""
model_id = "JackFram/llama-68m"
model = AutoModelForCausalLM.from_pretrained(model_id, dtype="auto")
# Test setting empty plan
model.tp_plan = {}
self.assertEqual(model.tp_plan, {})
# Test setting a valid plan
valid_plan = {"model.layers.*.self_attn.q_proj": "colwise"}
model.tp_plan = valid_plan
self.assertEqual(model.tp_plan, valid_plan)
# Test updating the plan
model.tp_plan.update({"model.layers.*.self_attn.k_proj": "colwise"})
expected_plan = {"model.layers.*.self_attn.q_proj": "colwise", "model.layers.*.self_attn.k_proj": "colwise"}
self.assertEqual(model.tp_plan, expected_plan)
# Test overriding existing entry
model.tp_plan.update({"model.layers.*.self_attn.q_proj": "colwise_rep"})
expected_plan = {
"model.layers.*.self_attn.q_proj": "colwise_rep",
"model.layers.*.self_attn.k_proj": "colwise",
}
self.assertEqual(model.tp_plan, expected_plan)
def test_tp_plan_validation_invalid_style(self):
"""Test that invalid parallel styles are rejected."""
model_id = "JackFram/llama-68m"
model = AutoModelForCausalLM.from_pretrained(model_id, dtype="auto")
# Test invalid parallel style
with self.assertRaises(ValueError) as context:
model.tp_plan = {"layers.*.self_attn.q_proj": "invalid_style"}
self.assertIn("Unsupported tensor parallel style 'invalid_style'", str(context.exception))
self.assertIn("Supported styles are", str(context.exception))
def test_tp_plan_validation_nonexistent_layer_warning(self):
"""Test that warnings are issued for non-existent layer patterns."""
model_id = "JackFram/llama-68m"
model = AutoModelForCausalLM.from_pretrained(model_id, dtype="auto")
# Test warning for non-existent layer pattern
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
model.tp_plan = {"nonexistent.*.layer": "colwise"}
# Check that a warning was issued
self.assertTrue(len(w) > 0)
warning_message = str(w[0].message)
self.assertIn("Layer pattern 'nonexistent.*.layer' does not match any parameters", warning_message)
def test_tp_plan_valid_layer_patterns(self):
"""Test that valid layer patterns are accepted without warnings."""
model_id = "JackFram/llama-68m"
model = AutoModelForCausalLM.from_pretrained(model_id, dtype="auto")
# Test valid layer patterns that should match the model structure
valid_plans = [
{"model.layers.*.self_attn.q_proj": "colwise"},
{"model.layers.*.self_attn.k_proj": "rowwise"},
{"model.layers.*.mlp.gate_proj": "colwise_rep"},
]
for plan in valid_plans:
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
model.tp_plan = plan
# Filter out any warnings that are not about layer patterns
layer_warnings = [
warning
for warning in w
if "Layer pattern" in str(warning.message)
and "does not match any parameters" in str(warning.message)
]
# Should not have layer pattern warnings for valid patterns
self.assertEqual(
len(layer_warnings),
0,
f"Unexpected warning for valid pattern {plan}: {[str(w.message) for w in layer_warnings]}",
)
# Verify the final plan was set correctly
self.assertEqual(model.tp_plan, valid_plans[-1])
def test_tp_plan_none_handling(self):
"""Test that None values are handled correctly."""
model_id = "JackFram/llama-68m"
model = AutoModelForCausalLM.from_pretrained(model_id, dtype="auto")
# Test setting None
model.tp_plan = None
self.assertEqual(model.tp_plan, {})
# Test setting a plan after None
model.tp_plan = {"model.layers.*.self_attn.q_proj": "colwise"}
self.assertEqual(model.tp_plan, {"model.layers.*.self_attn.q_proj": "colwise"})
# ====== TEST FUNCTIONS ======
def _test_model_dense_forward_impl(rank, mode):
"""Implementation for comparing TP and non-TP model outputs."""
model_id = "JackFram/llama-68m"
# Ensure same random seed for reproducibility
torch.manual_seed(0)
# Load tokenizer and prepare inputs - same for both models
tokenizer = AutoTokenizer.from_pretrained(model_id, use_fast=False)
prompt = "Can I help"
inputs = tokenizer(prompt, return_tensors="pt")
# Load TP model first to determine device
model_tp = AutoModelForCausalLM.from_pretrained(model_id, dtype="auto", tp_plan="auto")
dist.barrier()
if mode == "eval":
model_tp.eval()
else:
model_tp.train()
# Load non-TP model and move to same device as TP model
device = model_tp.device
model = AutoModelForCausalLM.from_pretrained(model_id, dtype="auto")
model = model.to(device)
if mode == "eval":
model.eval()
else:
model.train()
# Prepare inputs on the same device
input_ids = inputs.input_ids.to(device)
# Run forward pass on both models
with torch.no_grad():
# Non-TP model output
outputs = model(input_ids)
logits = outputs.logits
# TP model output
outputs_tp = model_tp(input_ids)
logits_tp = outputs_tp.logits
# Compare outputs - they should match
assert torch.allclose(logits, logits_tp, atol=1e-5, rtol=1e-5), (
f"TP and non-TP model outputs differ. Max diff: {(logits - logits_tp).abs().max().item()} | Min diff: {(logits - logits_tp).abs().min().item()}"
)
dist.barrier()
def _test_model_dense_backward_pass_impl(rank):
"""Implementation for comparing TP and non-TP model backward passes."""
model_id = "JackFram/llama-68m"
torch.manual_seed(0)
model_tp = AutoModelForCausalLM.from_pretrained(model_id, dtype=torch.float32, tp_plan="auto")
dist.barrier()
model_tp.train()
device = model_tp.device
model = AutoModelForCausalLM.from_pretrained(model_id, dtype=torch.float32)
model = model.to(device)
model.train()
batch_size, seq_length = 2, 10
torch.manual_seed(42) # Different seed for inputs to ensure they're deterministic
input_ids = torch.randint(0, model.config.vocab_size, (batch_size, seq_length), device=device)
labels = torch.randint(0, model.config.vocab_size, (batch_size, seq_length), device=device)
outputs = model(input_ids, labels=labels)
loss = outputs.loss
loss.backward()
outputs_tp = model_tp(input_ids, labels=labels)
loss_tp = outputs_tp.loss
loss_tp.backward()
assert torch.allclose(loss, loss_tp, atol=1e-5, rtol=1e-5), (
f"TP and non-TP model losses differ. Non-TP loss: {loss.item()}, TP loss: {loss_tp.item()}, Diff: {(loss - loss_tp).abs().item()}"
)
# Compare gradients for matching parameters
# Note: TP model may have sharded parameters (DTensors), so we slice the reference gradient to match
for (name, param), (name_tp, param_tp) in zip(model.named_parameters(), model_tp.named_parameters()):
if param.grad is not None and param_tp.grad is not None:
grad = param.grad
grad_tp = param_tp.grad
if isinstance(param_tp.data, dist.tensor.DTensor):
placement = param_tp.data.placements[0]
if hasattr(placement, "dim") and placement.dim is not None:
grad_shard = get_tensor_shard(grad, grad, param_tp.data.device_mesh, rank, placement.dim)
else:
grad_shard = grad
else:
grad_shard = grad
grad_tp_local = grad_tp.to_local() if isinstance(grad_tp, dist.tensor.DTensor) else grad_tp
assert torch.allclose(grad_shard.cpu(), grad_tp_local.cpu(), atol=1e-5, rtol=1e-5), (
f"Gradients differ for parameter {name}. Max diff: {(grad_shard.cpu() - grad_tp_local.cpu()).abs().max().item()} | Min diff: {(grad_shard.cpu() - grad_tp_local.cpu()).abs().min().item()}"
)
dist.barrier()
def _test_model_dense_forward_compile_impl(rank, mode):
"""Implementation for comparing TP and non-TP model outputs with torch.compile."""
model_id = "JackFram/llama-68m"
torch.manual_seed(0)
tokenizer = AutoTokenizer.from_pretrained(model_id, use_fast=False)
prompt = "Can I help"
inputs = tokenizer(prompt, return_tensors="pt")
model_tp = AutoModelForCausalLM.from_pretrained(model_id, dtype="auto", tp_plan="auto")
dist.barrier()
if mode == "eval":
model_tp.eval()
else:
model_tp.train()
device = model_tp.device
model = AutoModelForCausalLM.from_pretrained(model_id, dtype="auto")
model = model.to(device)
if mode == "eval":
model.eval()
else:
model.train()
# Compile both models
model.forward = torch.compile(model.forward)
model_tp.forward = torch.compile(model_tp.forward)
input_ids = inputs.input_ids.to(device)
with torch.no_grad():
outputs = model(input_ids)
logits = outputs.logits
outputs_tp = model_tp(input_ids)
logits_tp = outputs_tp.logits
assert torch.allclose(logits, logits_tp, atol=1e-5, rtol=1e-5), (
f"TP and non-TP model outputs differ. Max diff: {(logits - logits_tp).abs().max().item()} | Min diff: {(logits - logits_tp).abs().min().item()}"
)
dist.barrier()
def _test_model_dense_save_impl(rank, tmp_dir):
"""Implementation of test_model_save for distributed execution."""
model_id = "JackFram/llama-68m"
if dist.is_initialized():
kwargs = {"tp_plan": "auto"}
result_dir = f"{tmp_dir}/tp"
else:
kwargs = {}
result_dir = f"{tmp_dir}/nontp"
model = AutoModelForCausalLM.from_pretrained(model_id, **kwargs)
model.save_pretrained(result_dir)
|
TestTensorParallelProperties
|
python
|
airbytehq__airbyte
|
airbyte-integrations/connectors/source-github/source_github/github_schema.py
|
{
"start": 399746,
"end": 402031
}
|
class ____(sgqlc.types.Interface):
"""Represents a GitHub Enterprise Importer (GEI) migration."""
__schema__ = github_schema
__field_names__ = (
"continue_on_error",
"created_at",
"database_id",
"failure_reason",
"id",
"migration_log_url",
"migration_source",
"repository_name",
"source_url",
"state",
"warnings_count",
)
continue_on_error = sgqlc.types.Field(sgqlc.types.non_null(Boolean), graphql_name="continueOnError")
"""The migration flag to continue on error."""
created_at = sgqlc.types.Field(sgqlc.types.non_null(DateTime), graphql_name="createdAt")
"""Identifies the date and time when the object was created."""
database_id = sgqlc.types.Field(String, graphql_name="databaseId")
"""Identifies the primary key from the database."""
failure_reason = sgqlc.types.Field(String, graphql_name="failureReason")
"""The reason the migration failed."""
id = sgqlc.types.Field(sgqlc.types.non_null(ID), graphql_name="id")
migration_log_url = sgqlc.types.Field(URI, graphql_name="migrationLogUrl")
"""The URL for the migration log (expires 1 day after migration
completes).
"""
migration_source = sgqlc.types.Field(sgqlc.types.non_null("MigrationSource"), graphql_name="migrationSource")
"""The migration source."""
repository_name = sgqlc.types.Field(sgqlc.types.non_null(String), graphql_name="repositoryName")
"""The target repository name."""
source_url = sgqlc.types.Field(sgqlc.types.non_null(URI), graphql_name="sourceUrl")
"""The migration source URL, for example `https://github.com` or
`https://monalisa.ghe.com`.
"""
state = sgqlc.types.Field(sgqlc.types.non_null(MigrationState), graphql_name="state")
"""The migration state."""
warnings_count = sgqlc.types.Field(sgqlc.types.non_null(Int), graphql_name="warningsCount")
"""The number of warnings encountered for this migration. To review
the warnings, check the [Migration
Log](https://docs.github.com/en/migrations/using-github-
enterprise-importer/completing-your-migration-with-github-
enterprise-importer/accessing-your-migration-logs-for-github-
enterprise-importer).
"""
|
Migration
|
python
|
streamlit__streamlit
|
e2e_playwright/st_magic.py
|
{
"start": 2372,
"end": 2551
}
|
class ____:
"""MyClass: this help block should be printed."""
def __init__(self):
"""Should not be printed."""
MyClass
my_instance = MyClass()
my_instance
|
MyClass
|
python
|
pytorch__pytorch
|
test/distributed/checkpoint/e2e/test_fsdp_ep.py
|
{
"start": 672,
"end": 823
}
|
class ____(nn.Module):
def __init__(self) -> None:
super().__init__()
def forward(self, x):
raise NotImplementedError
|
Dummymodel
|
python
|
sqlalchemy__sqlalchemy
|
lib/sqlalchemy/orm/instrumentation.py
|
{
"start": 18004,
"end": 19213
}
|
class ____:
"""Provide serialization of a :class:`.ClassManager`.
The :class:`.InstanceState` uses ``__init__()`` on serialize
and ``__call__()`` on deserialize.
"""
def __init__(self, state: state.InstanceState[Any], d: Dict[str, Any]):
self.class_ = state.class_
manager = state.manager
manager.dispatch.pickle(state, d)
def __call__(self, state, inst, state_dict):
state.manager = manager = opt_manager_of_class(self.class_)
if manager is None:
raise exc.UnmappedInstanceError(
inst,
"Cannot deserialize object of type %r - "
"no mapper() has "
"been configured for this class within the current "
"Python process!" % self.class_,
)
elif manager.is_mapped and not manager.mapper.configured:
manager.mapper._check_configure()
# setup _sa_instance_state ahead of time so that
# unpickle events can access the object normally.
# see [ticket:2362]
if inst is not None:
manager.setup_instance(inst, state)
manager.dispatch.unpickle(state, state_dict)
|
_SerializeManager
|
python
|
allegroai__clearml
|
clearml/backend_api/services/v2_20/models.py
|
{
"start": 108998,
"end": 110128
}
|
class ____(Request):
"""
Convert company models to public
:param ids: Ids of the models to convert
:type ids: Sequence[str]
"""
_service = "models"
_action = "make_public"
_version = "2.20"
_schema = {
"definitions": {},
"properties": {
"ids": {
"description": "Ids of the models to convert",
"items": {"type": "string"},
"type": ["array", "null"],
}
},
"type": "object",
}
def __init__(self, ids: Optional[List[str]] = None, **kwargs: Any) -> None:
super(MakePublicRequest, self).__init__(**kwargs)
self.ids = ids
@schema_property("ids")
def ids(self) -> Optional[List[str]]:
return self._property_ids
@ids.setter
def ids(self, value: Optional[List[str]]) -> None:
if value is None:
self._property_ids = None
return
self.assert_isinstance(value, "ids", (list, tuple))
self.assert_isinstance(value, "ids", six.string_types, is_array=True)
self._property_ids = value
|
MakePublicRequest
|
python
|
huggingface__transformers
|
src/transformers/models/musicgen_melody/modeling_musicgen_melody.py
|
{
"start": 13434,
"end": 16634
}
|
class ____(GradientCheckpointingLayer):
def __init__(self, config: MusicgenMelodyDecoderConfig, layer_idx=None):
super().__init__()
self.embed_dim = config.hidden_size
self.self_attn = MusicgenMelodyAttention(
embed_dim=self.embed_dim,
num_heads=config.num_attention_heads,
dropout=config.attention_dropout,
is_decoder=True,
bias=False,
is_causal=True,
config=config,
layer_idx=layer_idx,
)
self.dropout = config.dropout
self.activation_fn = ACT2FN[config.activation_function]
self.activation_dropout = config.activation_dropout
self.self_attn_layer_norm = nn.LayerNorm(self.embed_dim)
self.fc1 = nn.Linear(self.embed_dim, config.ffn_dim, bias=False)
self.fc2 = nn.Linear(config.ffn_dim, self.embed_dim, bias=False)
self.final_layer_norm = nn.LayerNorm(self.embed_dim)
def forward(
self,
hidden_states: torch.Tensor,
attention_mask: Optional[torch.Tensor] = None,
past_key_values: Optional[Cache] = None,
output_attentions: Optional[bool] = False,
use_cache: Optional[bool] = True,
cache_position: Optional[torch.Tensor] = None,
) -> torch.Tensor:
"""
Args:
hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)`
attention_mask (`torch.FloatTensor`): attention mask of size
`(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values.
past_key_values (`Cache`): cached past key and value projection states
output_attentions (`bool`, *optional*):
Whether or not to return the attentions tensors of all attention layers. See `attentions` under
returned tensors for more detail.
"""
residual = hidden_states
hidden_states = self.self_attn_layer_norm(hidden_states)
# Self Attention
hidden_states, self_attn_weights = self.self_attn(
hidden_states=hidden_states,
past_key_values=past_key_values,
attention_mask=attention_mask,
output_attentions=output_attentions,
cache_position=cache_position,
)
hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)
hidden_states = residual + hidden_states
# Fully Connected
residual = hidden_states
hidden_states = self.final_layer_norm(hidden_states)
hidden_states = self.activation_fn(self.fc1(hidden_states))
hidden_states = nn.functional.dropout(hidden_states, p=self.activation_dropout, training=self.training)
hidden_states = self.fc2(hidden_states)
hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)
hidden_states = residual + hidden_states
return hidden_states, self_attn_weights
@auto_docstring
# Copied from transformers.models.musicgen.modeling_musicgen.MusicgenPreTrainedModel with Musicgen->MusicgenMelody
|
MusicgenMelodyDecoderLayer
|
python
|
pypa__setuptools
|
setuptools/_vendor/typeguard/_exceptions.py
|
{
"start": 57,
"end": 218
}
|
class ____(UserWarning):
"""
A warning that is emitted when a type hint in string form could not be resolved to
an actual type.
"""
|
TypeHintWarning
|
python
|
microsoft__pyright
|
packages/pyright-internal/src/tests/samples/autoVariance1.py
|
{
"start": 1223,
"end": 1440
}
|
class ____[T]:
x: T
vo4_1: ShouldBeCovariant4[float] = ShouldBeCovariant4[int](1)
# This should generate an error based on variance.
vo4_2: ShouldBeCovariant4[int] = ShouldBeCovariant4[float](1)
|
ShouldBeCovariant4
|
python
|
pytorch__pytorch
|
torch/distributed/flight_recorder/components/types.py
|
{
"start": 3218,
"end": 3277
}
|
class ____(NamedTuple):
id: int
frames: str
|
Traceback
|
python
|
astropy__astropy
|
astropy/units/tests/test_structured.py
|
{
"start": 531,
"end": 1228
}
|
class ____:
@classmethod
def setup_class(cls):
cls.pv_dtype = np.dtype([("p", "f8"), ("v", "f8")])
cls.pv_t_dtype = np.dtype([("pv", cls.pv_dtype), ("t", "f8")])
cls.p_unit = u.km
cls.v_unit = u.km / u.s
cls.t_unit = u.s
cls.pv_dtype = np.dtype([("p", "f8"), ("v", "f8")])
cls.pv_t_dtype = np.dtype([("pv", cls.pv_dtype), ("t", "f8")])
cls.pv = np.array([(1.0, 0.25), (2.0, 0.5), (3.0, 0.75)], cls.pv_dtype)
cls.pv_t = np.array(
[
((4.0, 2.5), 0.0),
((5.0, 5.0), 1.0),
((6.0, 7.5), 2.0),
],
cls.pv_t_dtype,
)
|
StructuredTestBase
|
python
|
keras-team__keras
|
keras/src/optimizers/sgd_test.py
|
{
"start": 165,
"end": 3734
}
|
class ____(testing.TestCase):
def test_config(self):
optimizer = SGD(
learning_rate=0.5,
momentum=0.06,
nesterov=True,
weight_decay=0.004,
)
self.run_class_serialization_test(optimizer)
def test_single_step(self):
optimizer = SGD(learning_rate=0.5)
self.assertEqual(len(optimizer.variables), 2)
grads = ops.array([1.0, 6.0, 7.0, 2.0])
vars = backend.Variable([1.0, 2.0, 3.0, 4.0])
optimizer.build([vars])
optimizer.apply_gradients(zip([grads], [vars]))
self.assertAllClose(vars, [0.5, -1.0, -0.5, 3.0], rtol=1e-4, atol=1e-4)
self.assertEqual(len(optimizer.variables), 2)
self.assertEqual(optimizer.variables[0], 1)
self.assertEqual(optimizer.variables[1], 0.5)
def test_invalid_momentum(self):
with self.assertRaisesRegex(
ValueError, "`momentum` must be a float between \\[0, 1\\]."
):
SGD(momentum=-1.0)
with self.assertRaisesRegex(
ValueError, "`momentum` must be a float between \\[0, 1\\]."
):
SGD(momentum=2.0)
def test_weight_decay(self):
grads, var1, var2, var3 = (
ops.zeros(()),
backend.Variable(2.0),
backend.Variable(2.0, name="exclude"),
backend.Variable(2.0),
)
optimizer_1 = SGD(learning_rate=1.0, weight_decay=0.004)
optimizer_1.apply_gradients(zip([grads], [var1]))
optimizer_2 = SGD(learning_rate=1.0, weight_decay=0.004)
optimizer_2.exclude_from_weight_decay(var_names=["exclude"])
optimizer_2.apply_gradients(zip([grads, grads], [var1, var2]))
optimizer_3 = SGD(learning_rate=1.0, weight_decay=0.004)
optimizer_3.exclude_from_weight_decay(var_list=[var3])
optimizer_3.apply_gradients(zip([grads, grads], [var1, var3]))
self.assertAlmostEqual(var1.numpy(), 1.9760959, decimal=6)
self.assertAlmostEqual(var2.numpy(), 2.0, decimal=6)
self.assertAlmostEqual(var3.numpy(), 2.0, decimal=6)
def test_correctness_with_golden(self):
optimizer = SGD(nesterov=True)
x = backend.Variable(np.ones([10]))
grads = ops.arange(0.1, 1.1, 0.1)
first_grads = ops.full((10,), 0.01)
# fmt: off
golden = np.array(
[[0.9999, 0.9999, 0.9999, 0.9999, 0.9999, 0.9999, 0.9999, 0.9999,
0.9999, 0.9999], [0.9989, 0.9979, 0.9969, 0.9959, 0.9949, 0.9939,
0.9929, 0.9919, 0.9909, 0.9899], [0.9979, 0.9959, 0.9939, 0.9919,
0.9899, 0.9879, 0.9859, 0.9839, 0.9819, 0.9799], [0.9969, 0.9939,
0.9909, 0.9879, 0.9849, 0.9819, 0.9789, 0.9759, 0.9729, 0.9699],
[0.9959, 0.9919, 0.9879, 0.9839, 0.9799, 0.9759, 0.9719, 0.9679,
0.9639, 0.9599]]
)
# fmt: on
optimizer.apply_gradients(zip([first_grads], [x]))
for i in range(5):
self.assertAllClose(x, golden[i], rtol=5e-4, atol=5e-4)
optimizer.apply_gradients(zip([grads], [x]))
def test_clip_norm(self):
optimizer = SGD(clipnorm=1)
grad = [np.array([100.0, 100.0])]
clipped_grad = optimizer._clip_gradients(grad)
self.assertAllClose(clipped_grad[0], [2**0.5 / 2, 2**0.5 / 2])
def test_clip_value(self):
optimizer = SGD(clipvalue=1)
grad = [np.array([100.0, 100.0])]
clipped_grad = optimizer._clip_gradients(grad)
self.assertAllClose(clipped_grad[0], [1.0, 1.0])
|
SGDTest
|
python
|
django__django
|
tests/db_functions/math/test_pi.py
|
{
"start": 123,
"end": 381
}
|
class ____(TestCase):
def test(self):
FloatModel.objects.create(f1=2.5, f2=15.9)
obj = FloatModel.objects.annotate(pi=Pi()).first()
self.assertIsInstance(obj.pi, float)
self.assertAlmostEqual(obj.pi, math.pi, places=5)
|
PiTests
|
python
|
scipy__scipy
|
scipy/fftpack/tests/test_basic.py
|
{
"start": 7483,
"end": 7612
}
|
class ____(_TestIFFTBase):
def setup_method(self):
self.cdt = np.complex64
self.rdt = np.float32
|
TestSingleIFFT
|
python
|
walkccc__LeetCode
|
solutions/1838. Frequency of the Most Frequent Element/1838.py
|
{
"start": 0,
"end": 309
}
|
class ____:
def maxFrequency(self, nums: list[int], k: int) -> int:
ans = 0
summ = 0
nums.sort()
l = 0
for r, num in enumerate(nums):
summ += num
while summ + k < num * (r - l + 1):
summ -= nums[l]
l += 1
ans = max(ans, r - l + 1)
return ans
|
Solution
|
python
|
tensorflow__tensorflow
|
tensorflow/python/keras/callbacks.py
|
{
"start": 65320,
"end": 70859
}
|
class ____(Callback):
"""Stop training when a monitored metric has stopped improving.
Assuming the goal of a training is to minimize the loss. With this, the
metric to be monitored would be `'loss'`, and mode would be `'min'`. A
`model.fit()` training loop will check at end of every epoch whether
the loss is no longer decreasing, considering the `min_delta` and
`patience` if applicable. Once it's found no longer decreasing,
`model.stop_training` is marked True and the training terminates.
The quantity to be monitored needs to be available in `logs` dict.
To make it so, pass the loss or metrics at `model.compile()`.
Args:
monitor: Quantity to be monitored.
min_delta: Minimum change in the monitored quantity
to qualify as an improvement, i.e. an absolute
change of less than min_delta, will count as no
improvement.
patience: Number of epochs with no improvement
after which training will be stopped.
verbose: verbosity mode.
mode: One of `{"auto", "min", "max"}`. In `min` mode,
training will stop when the quantity
monitored has stopped decreasing; in `"max"`
mode it will stop when the quantity
monitored has stopped increasing; in `"auto"`
mode, the direction is automatically inferred
from the name of the monitored quantity.
baseline: Baseline value for the monitored quantity.
Training will stop if the model doesn't show improvement over the
baseline.
restore_best_weights: Whether to restore model weights from
the epoch with the best value of the monitored quantity.
If False, the model weights obtained at the last step of
training are used. An epoch will be restored regardless
of the performance relative to the `baseline`. If no epoch
improves on `baseline`, training will run for `patience`
epochs and restore weights from the best epoch in that set.
Example:
>>> callback = tf.keras.callbacks.EarlyStopping(monitor='loss', patience=3)
>>> # This callback will stop the training when there is no improvement in
>>> # the loss for three consecutive epochs.
>>> model = tf.keras.models.Sequential([tf.keras.layers.Dense(10)])
>>> model.compile(tf.keras.optimizers.SGD(), loss='mse')
>>> history = model.fit(np.arange(100).reshape(5, 20), np.zeros(5),
... epochs=10, batch_size=1, callbacks=[callback],
... verbose=0)
>>> len(history.history['loss']) # Only 4 epochs are run.
4
"""
def __init__(self,
monitor='val_loss',
min_delta=0,
patience=0,
verbose=0,
mode='auto',
baseline=None,
restore_best_weights=False):
super(EarlyStopping, self).__init__()
self.monitor = monitor
self.patience = patience
self.verbose = verbose
self.baseline = baseline
self.min_delta = abs(min_delta)
self.wait = 0
self.stopped_epoch = 0
self.restore_best_weights = restore_best_weights
self.best_weights = None
if mode not in ['auto', 'min', 'max']:
logging.warning('EarlyStopping mode %s is unknown, '
'fallback to auto mode.', mode)
mode = 'auto'
if mode == 'min':
self.monitor_op = np.less
elif mode == 'max':
self.monitor_op = np.greater
else:
if 'acc' in self.monitor:
self.monitor_op = np.greater
else:
self.monitor_op = np.less
if self.monitor_op == np.greater:
self.min_delta *= 1
else:
self.min_delta *= -1
def on_train_begin(self, logs=None):
# Allow instances to be re-used
self.wait = 0
self.stopped_epoch = 0
self.best = np.inf if self.monitor_op == np.less else -np.inf
self.best_weights = None
def on_epoch_end(self, epoch, logs=None):
current = self.get_monitor_value(logs)
if current is None:
return
if self.restore_best_weights and self.best_weights is None:
# Restore the weights after first epoch if no progress is ever made.
self.best_weights = self.model.get_weights()
self.wait += 1
if self._is_improvement(current, self.best):
self.best = current
if self.restore_best_weights:
self.best_weights = self.model.get_weights()
# Only restart wait if we beat both the baseline and our previous best.
if self.baseline is None or self._is_improvement(current, self.baseline):
self.wait = 0
if self.wait >= self.patience:
self.stopped_epoch = epoch
self.model.stop_training = True
if self.restore_best_weights and self.best_weights is not None:
if self.verbose > 0:
print('Restoring model weights from the end of the best epoch.')
self.model.set_weights(self.best_weights)
def on_train_end(self, logs=None):
if self.stopped_epoch > 0 and self.verbose > 0:
print('Epoch %05d: early stopping' % (self.stopped_epoch + 1))
def get_monitor_value(self, logs):
logs = logs or {}
monitor_value = logs.get(self.monitor)
if monitor_value is None:
logging.warning('Early stopping conditioned on metric `%s` '
'which is not available. Available metrics are: %s',
self.monitor, ','.join(list(logs.keys())))
return monitor_value
def _is_improvement(self, monitor_value, reference_value):
return self.monitor_op(monitor_value - self.min_delta, reference_value)
|
EarlyStopping
|
python
|
ansible__ansible
|
test/integration/targets/collections/collection_root_user/ansible_collections/testns/testcoll/plugins/doc_fragments/frag.py
|
{
"start": 37,
"end": 261
}
|
class ____(object):
DOCUMENTATION = r"""
options:
normal_doc_frag:
description:
- an option
"""
OTHER_DOCUMENTATION = r"""
options:
other_doc_frag:
description:
- another option
"""
|
ModuleDocFragment
|
python
|
PyCQA__pylint
|
tests/functional/i/invalid/invalid_name/invalid_name_issue_3405.py
|
{
"start": 151,
"end": 326
}
|
class ____:
"""ClassVar attribute should be matched against class-attribute-rgx, not attr-rgx"""
# class-attribute-rgx='^y$'
x: ClassVar[int] = 0 # [invalid-name]
|
Foo
|
python
|
sqlalchemy__sqlalchemy
|
lib/sqlalchemy/orm/events.py
|
{
"start": 59408,
"end": 59961
}
|
class ____(_EventsHold[_ET]):
all_holds = weakref.WeakKeyDictionary()
def resolve(
self, class_: Union[Type[_T], _InternalEntityType[_T]]
) -> Optional[Mapper[_T]]:
return _mapper_or_none(class_)
# this fails on pyright if you use Any. Fails on mypy if you use _ET
class HoldMapperEvents(_EventsHold.HoldEvents[_ET], MapperEvents): # type: ignore[valid-type,misc] # noqa: E501
pass
dispatch = event.dispatcher(HoldMapperEvents)
_sessionevents_lifecycle_event_names: Set[str] = set()
|
_MapperEventsHold
|
python
|
django-extensions__django-extensions
|
tests/management/test_email_notifications.py
|
{
"start": 217,
"end": 3176
}
|
class ____(TestCase):
"""Tests for EmailNotificationCommand class."""
@override_settings(ADMINS=[])
@patch("sys.stdout", new_callable=StringIO)
def test_should_print_that_no_email_recipients_available(self, m_stdout):
with self.assertRaises(Exception):
call_command(
"test_email_notification_command", "--email-exception", verbosity=2
)
self.assertIn("No email recipients available", m_stdout.getvalue())
self.assertListEqual(mail.outbox, [])
@override_settings(
ADMINS=["foo@bar.com", "bar@foo.com"], DEFAULT_FROM_EMAIL="webmaster@foo.bar"
)
def test_should_send_email_with_command_name_and_full_traceback_if_command_fail(
self,
):
expected_lines = """Reporting execution of command: 'test_email_notification_command'
Traceback:
raise Exception()"""
with self.assertRaises(Exception):
call_command(
"test_email_notification_command", "--email-exception", verbosity=2
)
self.assertIsNot(mail.outbox, [])
self.assertEqual(
mail.outbox[0].subject, "Django extensions email notification."
)
self.assertEqual(mail.outbox[0].from_email, "webmaster@foo.bar")
self.assertListEqual(mail.outbox[0].to, ["foo@bar.com", "bar@foo.com"])
for expected_line in expected_lines.splitlines():
self.assertIn(expected_line, mail.outbox[0].body)
@patch("sys.stdout", new_callable=StringIO)
def test_should_not_notify_if_notification_level_is_not_set(self, m_stdout):
call_command("runscript", "sample_script", "--email-notifications", verbosity=2)
self.assertIn("Exiting, not in 'notify always' mode", m_stdout.getvalue())
self.assertListEqual(mail.outbox, [])
@override_settings(
ADMINS=["foo@bar.com"],
DEFAULT_FROM_EMAIL="webmaster@foo.bar",
EMAIL_NOTIFICATIONS={
"tests.testapp.scripts.sample_script": {
"subject": "my_script subject",
"body": "my_script body",
"from_email": "from_email@example.com",
"recipients": ("recipient0@example.com",),
"no_admins": False,
"no_traceback": False,
"notification_level": 1,
"fail_silently": False,
}
},
)
def test_should_notify_if_notification_level_is_greater_than_0(self):
call_command("runscript", "sample_script", "--email-notifications", verbosity=2)
self.assertIsNot(mail.outbox, [])
self.assertEqual(mail.outbox[0].subject, "my_script subject")
self.assertEqual(mail.outbox[0].body, "my_script body")
self.assertEqual(mail.outbox[0].from_email, "from_email@example.com")
self.assertListEqual(
mail.outbox[0].to, ["recipient0@example.com", "foo@bar.com"]
)
|
EmailNotificationCommandTests
|
python
|
Netflix__metaflow
|
metaflow/sidecar/sidecar_messages.py
|
{
"start": 561,
"end": 1071
}
|
class ____(object):
def __init__(self, msg_type, payload):
self.msg_type = msg_type
self.payload = payload
def serialize(self):
msg = {
"msg_type": self.msg_type,
"payload": self.payload,
}
return json.dumps(msg) + "\n"
@staticmethod
def deserialize(json_msg):
try:
return Message(**json.loads(json_msg))
except json.decoder.JSONDecodeError:
return Message(MessageTypes.INVALID, None)
|
Message
|
python
|
EpistasisLab__tpot
|
tpot/search_spaces/nodes/genetic_feature_selection.py
|
{
"start": 1789,
"end": 2806
}
|
class ____(SelectorMixin, BaseEstimator):
"""Select predefined feature subsets."""
def __init__(self, mask, set_output_transform=None):
self.mask = mask
self.set_output_transform = set_output_transform
if set_output_transform is not None:
self.set_output(transform=set_output_transform)
def fit(self, X, y=None):
self.n_features_in_ = X.shape[1]
if isinstance(X, pd.DataFrame):
self.feature_names_in_ = X.columns
# self.set_output(transform="pandas")
self.is_fitted_ = True #so sklearn knows it's fitted
return self
def __sklearn_tags__(self):
tags = super().__sklearn_tags__()
tags.input_tags.allow_nan = True
tags.target_tags.required = False # formally requires_y
return tags
def _get_support_mask(self):
return np.array(self.mask)
def get_feature_names_out(self, input_features=None):
return self.feature_names_in_[self.get_support()]
|
MaskSelector
|
python
|
google__pytype
|
pytype/pyi/visitor.py
|
{
"start": 172,
"end": 1295
}
|
class ____(ast_visitor.BaseVisitor):
"""Base visitor for all ast visitors.
- Reraises ParseError with position information.
- Handles literal constants
- Has an optional Definitions member
"""
def __init__(self, *, filename=None, src_code=None, visit_decorators=False):
super().__init__(astlib, visit_decorators=visit_decorators)
self.filename = filename # used for error messages
self.src_code = src_code # used for error messages
def enter(self, node):
try:
return super().enter(node)
except Exception as e: # pylint: disable=broad-except
raise _ParseError.from_exc(e).at(node, self.filename, self.src_code)
def visit(self, node):
try:
return super().visit(node)
except Exception as e: # pylint: disable=broad-except
raise _ParseError.from_exc(e).at(node, self.filename, self.src_code)
def leave(self, node):
try:
return super().leave(node)
except Exception as e: # pylint: disable=broad-except
raise _ParseError.from_exc(e).at(node, self.filename, self.src_code)
def generic_visit(self, node):
return node
|
BaseVisitor
|
python
|
pytorch__pytorch
|
test/dynamo/cpython/3_13/test_iter.py
|
{
"start": 3343,
"end": 3430
}
|
class ____:
def __getitem__(self, i):
return i
__iter__ = None
|
NoIterClass
|
python
|
tornadoweb__tornado
|
tornado/tcpclient.py
|
{
"start": 6934,
"end": 12135
}
|
class ____:
"""A non-blocking TCP connection factory.
.. versionchanged:: 5.0
The ``io_loop`` argument (deprecated since version 4.1) has been removed.
"""
def __init__(self, resolver: Optional[Resolver] = None) -> None:
if resolver is not None:
self.resolver = resolver
self._own_resolver = False
else:
self.resolver = Resolver()
self._own_resolver = True
def close(self) -> None:
if self._own_resolver:
self.resolver.close()
async def connect(
self,
host: str,
port: int,
af: socket.AddressFamily = socket.AF_UNSPEC,
ssl_options: Optional[Union[Dict[str, Any], ssl.SSLContext]] = None,
max_buffer_size: Optional[int] = None,
source_ip: Optional[str] = None,
source_port: Optional[int] = None,
timeout: Optional[Union[float, datetime.timedelta]] = None,
) -> IOStream:
"""Connect to the given host and port.
Asynchronously returns an `.IOStream` (or `.SSLIOStream` if
``ssl_options`` is not None).
Using the ``source_ip`` kwarg, one can specify the source
IP address to use when establishing the connection.
In case the user needs to resolve and
use a specific interface, it has to be handled outside
of Tornado as this depends very much on the platform.
Raises `TimeoutError` if the input future does not complete before
``timeout``, which may be specified in any form allowed by
`.IOLoop.add_timeout` (i.e. a `datetime.timedelta` or an absolute time
relative to `.IOLoop.time`)
Similarly, when the user requires a certain source port, it can
be specified using the ``source_port`` arg.
.. versionchanged:: 4.5
Added the ``source_ip`` and ``source_port`` arguments.
.. versionchanged:: 5.0
Added the ``timeout`` argument.
"""
if timeout is not None:
if isinstance(timeout, numbers.Real):
timeout = IOLoop.current().time() + timeout
elif isinstance(timeout, datetime.timedelta):
timeout = IOLoop.current().time() + timeout.total_seconds()
else:
raise TypeError("Unsupported timeout %r" % timeout)
if timeout is not None:
addrinfo = await gen.with_timeout(
timeout, self.resolver.resolve(host, port, af)
)
else:
addrinfo = await self.resolver.resolve(host, port, af)
connector = _Connector(
addrinfo,
functools.partial(
self._create_stream,
max_buffer_size,
source_ip=source_ip,
source_port=source_port,
),
)
af, addr, stream = await connector.start(connect_timeout=timeout)
# TODO: For better performance we could cache the (af, addr)
# information here and re-use it on subsequent connections to
# the same host. (http://tools.ietf.org/html/rfc6555#section-4.2)
if ssl_options is not None:
if timeout is not None:
stream = await gen.with_timeout(
timeout,
stream.start_tls(
False, ssl_options=ssl_options, server_hostname=host
),
)
else:
stream = await stream.start_tls(
False, ssl_options=ssl_options, server_hostname=host
)
return stream
def _create_stream(
self,
max_buffer_size: Optional[int],
af: socket.AddressFamily,
addr: Tuple,
source_ip: Optional[str] = None,
source_port: Optional[int] = None,
) -> Tuple[IOStream, "Future[IOStream]"]:
# Always connect in plaintext; we'll convert to ssl if necessary
# after one connection has completed.
source_port_bind = source_port if isinstance(source_port, int) else 0
source_ip_bind = source_ip
if source_port_bind and not source_ip:
# User required a specific port, but did not specify
# a certain source IP, will bind to the default loopback.
source_ip_bind = "::1" if af == socket.AF_INET6 else "127.0.0.1"
# Trying to use the same address family as the requested af socket:
# - 127.0.0.1 for IPv4
# - ::1 for IPv6
socket_obj = socket.socket(af)
if source_port_bind or source_ip_bind:
# If the user requires binding also to a specific IP/port.
try:
socket_obj.bind((source_ip_bind, source_port_bind))
except OSError:
socket_obj.close()
# Fail loudly if unable to use the IP/port.
raise
try:
stream = IOStream(socket_obj, max_buffer_size=max_buffer_size)
except OSError as e:
fu = Future() # type: Future[IOStream]
fu.set_exception(e)
return stream, fu
else:
return stream, stream.connect(addr)
|
TCPClient
|
python
|
pytorch__pytorch
|
test/distributed/tensor/test_dtensor_export.py
|
{
"start": 1964,
"end": 2296
}
|
class ____(torch.nn.Module):
def __init__(self, device):
super().__init__()
self.mlp_0 = MLPModule(device)
self.mlp_1 = MLPModule(device)
def forward(self, input):
if input.shape[0] > 4:
return self.mlp_0(input.sin())
return self.mlp_1(input.cos())
|
SimpleModelDynamicShapes
|
python
|
sphinx-doc__sphinx
|
sphinx/search/da.py
|
{
"start": 191,
"end": 589
}
|
class ____(SearchLanguage):
lang = 'da'
language_name = 'Danish'
js_stemmer_rawcode = 'danish-stemmer.js'
stopwords = DANISH_STOPWORDS
def __init__(self, options: dict[str, str]) -> None:
super().__init__(options)
self.stemmer = snowballstemmer.stemmer('danish')
def stem(self, word: str) -> str:
return self.stemmer.stemWord(word.lower())
|
SearchDanish
|
python
|
charliermarsh__ruff
|
crates/ruff_linter/resources/test/fixtures/pyupgrade/UP046_0.py
|
{
"start": 1364,
"end": 1404
}
|
class ____(A[S], Generic[S]):
var: S
|
B
|
python
|
readthedocs__readthedocs.org
|
readthedocs/subscriptions/notifications.py
|
{
"start": 1732,
"end": 2965
}
|
class ____(SubscriptionNotificationMixin, EmailNotification):
"""
Subscription has ended a month ago.
Notify the user that the organization will be disabled soon. After this
notification is sent, we are safe to disable the organization since the
customer was notified twice.
"""
name = "organization_disabled"
subject = "Your Read the Docs organization will be disabled soon"
days_after_end = DISABLE_AFTER_DAYS
@classmethod
def for_organizations(cls):
organizations = Organization.objects.disable_soon(
days=cls.days_after_end,
exact=True,
)
return organizations
MESSAGE_ORGANIZATION_DISABLED = "organization:disabled"
messages = [
Message(
id=MESSAGE_ORGANIZATION_DISABLED,
header=_("Your organization has been disabled"),
body=_(
textwrap.dedent(
"""
The organization "{{instance.slug}}" is currently disabled. You need to <a href="{% url 'subscription_detail' instance.slug %}">renew your subscription</a> to keep using Read the Docs
"""
).strip(),
),
type=INFO,
),
]
registry.add(messages)
|
OrganizationDisabledNotification
|
python
|
pyca__cryptography
|
tests/hazmat/primitives/test_hash_vectors.py
|
{
"start": 526,
"end": 864
}
|
class ____:
test_sha1 = generate_hash_test(
load_hash_vectors,
os.path.join("hashes", "SHA1"),
["SHA1LongMsg.rsp", "SHA1ShortMsg.rsp"],
hashes.SHA1(),
)
@pytest.mark.supported(
only_if=lambda backend: backend.hash_supported(hashes.SHA224()),
skip_message="Does not support SHA224",
)
|
TestSHA1
|
python
|
dagster-io__dagster
|
python_modules/dagster/dagster/_core/scheduler/instigation.py
|
{
"start": 1739,
"end": 2153
}
|
class ____(Enum):
# User has taken some manual action to change the status of the run instigator
RUNNING = "RUNNING"
STOPPED = "STOPPED"
# The run instigator status is controlled by its default setting in code
DECLARED_IN_CODE = "DECLARED_IN_CODE"
# DEPRECATED: use InstigatorStatus.DECLARED_IN_CODE
AUTOMATICALLY_RUNNING = "AUTOMATICALLY_RUNNING"
@whitelist_for_serdes
|
InstigatorStatus
|
python
|
redis__redis-py
|
tests/test_multidb/test_pipeline.py
|
{
"start": 10559,
"end": 20106
}
|
class ____:
@pytest.mark.parametrize(
"mock_multi_db_config,mock_db, mock_db1, mock_db2",
[
(
{},
{"weight": 0.2, "circuit": {"state": CBState.CLOSED}},
{"weight": 0.7, "circuit": {"state": CBState.CLOSED}},
{"weight": 0.5, "circuit": {"state": CBState.CLOSED}},
),
],
indirect=True,
)
def test_executes_transaction_against_correct_db(
self, mock_multi_db_config, mock_db, mock_db1, mock_db2, mock_hc
):
databases = create_weighted_list(mock_db, mock_db1, mock_db2)
with (
patch.object(mock_multi_db_config, "databases", return_value=databases),
patch.object(
mock_multi_db_config, "default_health_checks", return_value=[mock_hc]
),
):
mock_db1.client.transaction.return_value = ["OK1", "value1"]
mock_hc.check_health.return_value = True
client = MultiDBClient(mock_multi_db_config)
try:
assert (
mock_multi_db_config.failover_strategy.set_databases.call_count == 1
)
def callback(pipe: Pipeline):
pipe.set("key1", "value1")
pipe.get("key1")
assert client.transaction(callback) == ["OK1", "value1"]
assert len(mock_hc.check_health.call_args_list) >= 9
finally:
client.close()
@pytest.mark.parametrize(
"mock_multi_db_config,mock_db, mock_db1, mock_db2",
[
(
{},
{"weight": 0.2, "circuit": {"state": CBState.CLOSED}},
{"weight": 0.5, "circuit": {"state": CBState.CLOSED}},
{"weight": 0.7, "circuit": {"state": CBState.OPEN}},
),
],
indirect=True,
)
def test_execute_transaction_against_correct_db_and_closed_circuit(
self, mock_multi_db_config, mock_db, mock_db1, mock_db2, mock_hc
):
databases = create_weighted_list(mock_db, mock_db1, mock_db2)
with (
patch.object(mock_multi_db_config, "databases", return_value=databases),
patch.object(
mock_multi_db_config, "default_health_checks", return_value=[mock_hc]
),
):
mock_db1.client.transaction.return_value = ["OK1", "value1"]
def mock_check_health(database):
if database == mock_db2:
return False
else:
return True
mock_hc.check_health.side_effect = mock_check_health
client = MultiDBClient(mock_multi_db_config)
try:
assert (
mock_multi_db_config.failover_strategy.set_databases.call_count == 1
)
def callback(pipe: Pipeline):
pipe.set("key1", "value1")
pipe.get("key1")
assert client.transaction(callback) == ["OK1", "value1"]
assert len(mock_hc.check_health.call_args_list) >= 7
assert mock_db.circuit.state == CBState.CLOSED
assert mock_db1.circuit.state == CBState.CLOSED
assert mock_db2.circuit.state == CBState.OPEN
finally:
client.close()
@pytest.mark.parametrize(
"mock_multi_db_config,mock_db, mock_db1, mock_db2",
[
(
{"health_check_probes": 1},
{"weight": 0.2, "circuit": {"state": CBState.CLOSED}},
{"weight": 0.7, "circuit": {"state": CBState.CLOSED}},
{"weight": 0.5, "circuit": {"state": CBState.CLOSED}},
),
],
indirect=True,
)
def test_execute_transaction_against_correct_db_on_background_health_check_determine_active_db_unhealthy(
self, mock_multi_db_config, mock_db, mock_db1, mock_db2, mock_hc
):
cb = PBCircuitBreakerAdapter(pybreaker.CircuitBreaker(reset_timeout=5))
cb.database = mock_db
mock_db.circuit = cb
cb1 = PBCircuitBreakerAdapter(pybreaker.CircuitBreaker(reset_timeout=5))
cb1.database = mock_db1
mock_db1.circuit = cb1
cb2 = PBCircuitBreakerAdapter(pybreaker.CircuitBreaker(reset_timeout=5))
cb2.database = mock_db2
mock_db2.circuit = cb2
databases = create_weighted_list(mock_db, mock_db1, mock_db2)
# Track health check runs across all databases
health_check_run = 0
# Create events for each failover scenario
db1_became_unhealthy = threading.Event()
db2_became_unhealthy = threading.Event()
db_became_unhealthy = threading.Event()
counter_lock = threading.Lock()
def mock_check_health(database):
nonlocal health_check_run
# Increment run counter for each health check call
with counter_lock:
health_check_run += 1
current_run = health_check_run
# Run 1 (health_check_run 1-3): All databases healthy
if current_run <= 3:
return True
# Run 2 (health_check_run 4-6): mock_db1 unhealthy, others healthy
elif current_run <= 6:
if database == mock_db1:
if current_run == 6:
db1_became_unhealthy.set()
return False
# Signal that db1 has become unhealthy after all 3 checks
if current_run == 6:
db1_became_unhealthy.set()
return True
# Run 3 (health_check_run 7-9): mock_db1 and mock_db2 unhealthy, mock_db healthy
elif current_run <= 9:
if database == mock_db1 or database == mock_db2:
if current_run == 9:
db2_became_unhealthy.set()
return False
# Signal that db2 has become unhealthy after all 3 checks
if current_run == 9:
db2_became_unhealthy.set()
return True
# Run 4 (health_check_run 10-12): mock_db unhealthy, others healthy
else:
if database == mock_db:
if current_run >= 12:
db_became_unhealthy.set()
return False
# Signal that db has become unhealthy after all 3 checks
if current_run >= 12:
db_became_unhealthy.set()
return True
mock_hc.check_health.side_effect = mock_check_health
with (
patch.object(mock_multi_db_config, "databases", return_value=databases),
patch.object(
mock_multi_db_config,
"default_health_checks",
return_value=[mock_hc],
),
):
mock_db.client.transaction.return_value = ["OK", "value"]
mock_db1.client.transaction.return_value = ["OK1", "value"]
mock_db2.client.transaction.return_value = ["OK2", "value"]
mock_multi_db_config.health_check_interval = 0.1
mock_multi_db_config.failover_strategy = WeightBasedFailoverStrategy()
client = MultiDBClient(mock_multi_db_config)
try:
def callback(pipe: Pipeline):
pipe.set("key1", "value1")
pipe.get("key1")
# Run 1: All databases healthy - should use mock_db1 (highest weight 0.7)
assert client.transaction(callback) == ["OK1", "value"]
# Wait for mock_db1 to become unhealthy
assert db1_became_unhealthy.wait(timeout=1.0), (
"Timeout waiting for mock_db1 to become unhealthy"
)
wait_for_condition(
lambda: cb1.state == CBState.OPEN,
timeout=0.2,
error_message="Timeout waiting for cb1 to open",
)
# Run 2: mock_db1 unhealthy - should failover to mock_db2 (weight 0.5)
assert client.transaction(callback) == ["OK2", "value"]
# Wait for mock_db2 to become unhealthy
assert db2_became_unhealthy.wait(timeout=1.0), (
"Timeout waiting for mock_db2 to become unhealthy"
)
wait_for_condition(
lambda: cb2.state == CBState.OPEN,
timeout=0.2,
error_message="Timeout waiting for cb2 to open",
)
# Run 3: mock_db1 and mock_db2 unhealthy - should use mock_db (weight 0.2)
assert client.transaction(callback) == ["OK", "value"]
# Wait for mock_db to become unhealthy
assert db_became_unhealthy.wait(timeout=1.0), (
"Timeout waiting for mock_db to become unhealthy"
)
wait_for_condition(
lambda: cb.state == CBState.OPEN,
timeout=0.2,
error_message="Timeout waiting for cb to open",
)
# Run 4: mock_db unhealthy, others healthy - should use mock_db1 (highest weight)
assert client.transaction(callback) == ["OK1", "value"]
finally:
client.close()
|
TestTransaction
|
python
|
mitmproxy__pdoc
|
test/testdata/misc.py
|
{
"start": 5325,
"end": 5407
}
|
class ____(type):
def __call__(cls, *args, **kwargs):
pass
|
Issue352bMeta
|
python
|
numba__numba
|
numba/tests/test_ir_inlining.py
|
{
"start": 38911,
"end": 44716
}
|
class ____(TestCase):
def test_issue4691(self):
def output_factory(array, dtype):
pass
@overload(output_factory, inline='always')
def ol_output_factory(array, dtype):
if isinstance(array, types.npytypes.Array):
def impl(array, dtype):
shape = array.shape[3:]
return np.zeros(shape, dtype=dtype)
return impl
@njit(nogil=True)
def fn(array):
out = output_factory(array, array.dtype)
return out
@njit(nogil=True)
def fn2(array):
return np.zeros(array.shape[3:], dtype=array.dtype)
fn(np.ones((10, 20, 30, 40, 50)))
fn2(np.ones((10, 20, 30, 40, 50)))
def test_issue4693(self):
@njit(inline='always')
def inlining(array):
if array.ndim != 1:
raise ValueError("Invalid number of dimensions")
return array
@njit
def fn(array):
return inlining(array)
fn(np.zeros(10))
def test_issue5476(self):
# Actual issue has the ValueError passed as an arg to `inlining` so is
# a constant inference error
@njit(inline='always')
def inlining():
msg = 'Something happened'
raise ValueError(msg)
@njit
def fn():
return inlining()
with self.assertRaises(ValueError) as raises:
fn()
self.assertIn("Something happened", str(raises.exception))
def test_issue5792(self):
# Issue is that overloads cache their IR and closure inliner was
# manipulating the cached IR in a way that broke repeated inlines.
class Dummy:
def __init__(self, data):
self.data = data
def div(self, other):
return data / other.data
class DummyType(types.Type):
def __init__(self, data):
self.data = data
super().__init__(name=f'Dummy({self.data})')
@register_model(DummyType)
class DummyTypeModel(models.StructModel):
def __init__(self, dmm, fe_type):
members = [
('data', fe_type.data),
]
super().__init__(dmm, fe_type, members)
make_attribute_wrapper(DummyType, 'data', '_data')
@intrinsic
def init_dummy(typingctx, data):
def codegen(context, builder, sig, args):
typ = sig.return_type
data, = args
dummy = cgutils.create_struct_proxy(typ)(context, builder)
dummy.data = data
if context.enable_nrt:
context.nrt.incref(builder, sig.args[0], data)
return dummy._getvalue()
ret_typ = DummyType(data)
sig = signature(ret_typ, data)
return sig, codegen
@overload(Dummy, inline='always')
def dummy_overload(data):
def ctor(data):
return init_dummy(data)
return ctor
@overload_method(DummyType, 'div', inline='always')
def div_overload(self, other):
def impl(self, other):
return self._data / other._data
return impl
@njit
def test_impl(data, other_data):
dummy = Dummy(data) # ctor inlined once
other = Dummy(other_data) # ctor inlined again
return dummy.div(other)
data = 1.
other_data = 2.
res = test_impl(data, other_data)
self.assertEqual(res, data / other_data)
def test_issue5824(self):
""" Similar to the above test_issue5792, checks mutation of the inlinee
IR is local only"""
class CustomCompiler(CompilerBase):
def define_pipelines(self):
pm = DefaultPassBuilder.define_nopython_pipeline(self.state)
# Run the inliner twice!
pm.add_pass_after(InlineOverloads, InlineOverloads)
pm.finalize()
return [pm]
def bar(x):
...
@overload(bar, inline='always')
def ol_bar(x):
if isinstance(x, types.Integer):
def impl(x):
return x + 1.3
return impl
@njit(pipeline_class=CustomCompiler)
def foo(z):
return bar(z), bar(z)
self.assertEqual(foo(10), (11.3, 11.3))
@skip_parfors_unsupported
def test_issue7380(self):
# This checks that inlining a function containing a loop into another
# loop where the induction variable in both loops is the same doesn't
# end up with a name collision. Parfors can detect this so it is used.
# See: https://github.com/numba/numba/issues/7380
# Check Numba inlined function passes
@njit(inline="always")
def bar(x):
for i in range(x.size):
x[i] += 1
@njit(parallel=True)
def foo(a):
for i in prange(a.shape[0]):
bar(a[i])
a = np.ones((10, 10))
foo(a) # run
# check mutation of data is correct
self.assertPreciseEqual(a, 2 * np.ones_like(a))
# Check manually inlined equivalent function fails
@njit(parallel=True)
def foo_bad(a):
for i in prange(a.shape[0]):
x = a[i]
for i in range(x.size):
x[i] += 1
with self.assertRaises(errors.UnsupportedRewriteError) as e:
foo_bad(a)
self.assertIn("Overwrite of parallel loop index", str(e.exception))
if __name__ == '__main__':
unittest.main()
|
TestInlineMiscIssues
|
python
|
bokeh__bokeh
|
tests/unit/bokeh/models/test_plots.py
|
{
"start": 5036,
"end": 11311
}
|
class ____:
def test_missing_renderers(self) -> None:
p = figure()
p.renderers = []
with mock.patch('bokeh.core.validation.check.log') as mock_logger:
issues = check_integrity([p])
process_validation_issues(issues)
assert mock_logger.warning.call_count == 1
assert mock_logger.warning.call_args[0][0].startswith("W-1000 (MISSING_RENDERERS): Plot has no renderers")
def test_missing_scale(self) -> None:
p = figure()
with pytest.raises(ValueError):
p.x_scale = None
with pytest.raises(ValueError):
p.y_scale = None
def test_missing_range(self) -> None:
p = figure()
with pytest.raises(ValueError):
p.x_range = None
with pytest.raises(ValueError):
p.y_range = None
def test_bad_extra_range_name(self) -> None:
p = figure()
p.xaxis.x_range_name="junk"
with mock.patch('bokeh.core.validation.check.log') as mock_logger:
issues = check_integrity([p])
process_validation_issues(issues)
assert mock_logger.error.call_count == 1
assert mock_logger.error.call_args[0][0].startswith(
"E-1020 (BAD_EXTRA_RANGE_NAME): An extra range name is configured with a name that does not correspond to any range: x_range_name='junk' [LinearAxis",
)
p = figure()
p.extra_x_ranges['foo'] = Range1d()
p.grid.x_range_name="junk"
with mock.patch('bokeh.core.validation.check.log') as mock_logger:
issues = check_integrity([p])
process_validation_issues(issues)
assert mock_logger.error.call_count == 1
assert mock_logger.error.call_args[0][0].startswith(
"E-1020 (BAD_EXTRA_RANGE_NAME): An extra range name is configured with a name that does not correspond to any range: x_range_name='junk' [Grid",
)
assert mock_logger.error.call_args[0][0].count("Grid") == 2
def test_bad_extra_range_only_immediate_refs(self) -> None:
# test whether adding a figure (*and* it's extra ranges)
# to another's references doesn't create a false positive
p, dep = figure(), figure()
dep.extra_x_ranges['foo'] = Range1d()
dep.grid.x_range_name="foo"
p.grid[0].js_on_change("dimension", CustomJS(code = "", args = {"toto": dep.grid[0]}))
with mock.patch('bokeh.core.validation.check.log') as mock_logger:
issues = check_integrity([p])
process_validation_issues(issues)
assert mock_logger.error.call_count == 0
def test_min_preferred_max_width__issue13716(self):
p = figure(min_width=100, width=200, max_width=300)
p.circle([1, 2, 3], [1, 2, 3])
issues = check_integrity([p])
assert issues == ValidationIssues()
p = figure(min_width=100, max_width=300)
p.circle([1, 2, 3], [1, 2, 3])
issues = check_integrity([p])
assert issues == ValidationIssues()
p = figure(min_width=100, max_width=300, sizing_mode="stretch_width")
p.circle([1, 2, 3], [1, 2, 3])
issues = check_integrity([p])
assert issues == ValidationIssues()
p = figure(min_width=100, max_width=300, sizing_mode="fixed")
p.circle([1, 2, 3], [1, 2, 3])
issues = check_integrity([p])
assert issues == ValidationIssues(error=[
ValidationIssue(code=1022, name="MIN_PREFERRED_MAX_WIDTH", text="Expected min_width <= width <= max_width"),
])
def test_min_preferred_max_height__issue13716(self):
p = figure(min_height=100, height=200, max_height=300)
p.circle([1, 2, 3], [1, 2, 3])
issues = check_integrity([p])
assert issues == ValidationIssues()
p = figure(min_height=100, max_height=300)
p.circle([1, 2, 3], [1, 2, 3])
issues = check_integrity([p])
assert issues == ValidationIssues()
p = figure(min_height=100, max_height=300, sizing_mode="stretch_height")
p.circle([1, 2, 3], [1, 2, 3])
issues = check_integrity([p])
assert issues == ValidationIssues()
p = figure(min_height=100, max_height=300, sizing_mode="fixed")
p.circle([1, 2, 3], [1, 2, 3])
issues = check_integrity([p])
assert issues == ValidationIssues(error=[
ValidationIssue(code=1023, name="MIN_PREFERRED_MAX_HEIGHT", text="Expected min_height <= height <= max_height"),
])
def test_plot_add_layout_raises_error_if_not_render() -> None:
plot = figure()
with pytest.raises(ValueError):
plot.add_layout(Range1d())
def test_plot_add_layout_adds_label_to_plot_renderers() -> None:
plot = figure()
label = Label()
plot.add_layout(label)
assert label in plot.center
def test_plot_add_layout_adds_axis_to_renderers_and_side_renderers() -> None:
plot = figure()
axis = LinearAxis()
plot.add_layout(axis, 'left')
assert axis in plot.left
def test_plot_add_layout_moves_an_existing_renderer() -> None:
plot = figure()
axis = LinearAxis()
plot.add_layout(axis, 'left')
assert axis in plot.left
assert axis not in plot.right
assert axis not in plot.above
assert axis not in plot.below
assert axis not in plot.center
plot.add_layout(axis, 'above')
assert axis not in plot.left
assert axis not in plot.right
assert axis in plot.above
assert axis not in plot.below
assert axis not in plot.center
def test_plot_add_layout_moves_an_existing_renderer_added_manually() -> None:
plot = figure()
axis = LinearAxis()
grid = Grid()
plot.left = [axis, grid, axis]
assert grid in plot.left
assert axis in plot.left
assert axis not in plot.right
assert axis not in plot.above
assert axis not in plot.below
assert axis not in plot.center
plot.add_layout(axis, 'above')
assert grid in plot.left
assert axis not in plot.left
assert axis not in plot.right
assert axis in plot.above
assert axis not in plot.below
assert axis not in plot.center
def test_sizing_mode_property_is_fixed_by_default() -> None:
plot = figure()
assert plot.sizing_mode is None
|
TestPlotValidation
|
python
|
pyca__cryptography
|
tests/hazmat/primitives/test_aes.py
|
{
"start": 4149,
"end": 5103
}
|
class ____:
test_cbc = generate_encrypt_test(
load_nist_vectors,
os.path.join("ciphers", "AES", "CBC"),
[
"CBCGFSbox128.rsp",
"CBCGFSbox192.rsp",
"CBCGFSbox256.rsp",
"CBCKeySbox128.rsp",
"CBCKeySbox192.rsp",
"CBCKeySbox256.rsp",
"CBCVarKey128.rsp",
"CBCVarKey192.rsp",
"CBCVarKey256.rsp",
"CBCVarTxt128.rsp",
"CBCVarTxt192.rsp",
"CBCVarTxt256.rsp",
"CBCMMT128.rsp",
"CBCMMT192.rsp",
"CBCMMT256.rsp",
],
lambda key, **kwargs: algorithms.AES(binascii.unhexlify(key)),
lambda iv, **kwargs: modes.CBC(binascii.unhexlify(iv)),
)
@pytest.mark.supported(
only_if=lambda backend: backend.cipher_supported(
algorithms.AES(b"\x00" * 16), modes.ECB()
),
skip_message="Does not support AES ECB",
)
|
TestAESModeCBC
|
python
|
great-expectations__great_expectations
|
tests/integration/test_utils/data_source_config/big_query.py
|
{
"start": 1541,
"end": 2570
}
|
class ____(SQLBatchTestSetup[BigQueryDatasourceTestConfig]):
@property
@override
def connection_string(self) -> str:
return self.big_query_connection_config.connection_string
@property
@override
def use_schema(self) -> bool:
# BigQuery calls its schemas "datasets". Their docs show that the sql way of defining a
# dataset is to create a schema: https://cloud.google.com/bigquery/docs/datasets#sql
return True
@override
def make_asset(self) -> TableAsset:
return self.context.data_sources.add_bigquery(
name=self._random_resource_name(), connection_string=self.connection_string
).add_table_asset(
name=self._random_resource_name(),
table_name=self.table_name,
schema_name=self.schema,
)
@cached_property
def big_query_connection_config(self) -> BigQueryConnectionConfig:
return BigQueryConnectionConfig() # type: ignore[call-arg] # retrieves env vars
|
BigQueryBatchTestSetup
|
python
|
PrefectHQ__prefect
|
tests/server/schemas/test_core.py
|
{
"start": 9925,
"end": 12172
}
|
class ____:
async def test_validates_metadata_sizes(self):
artifact = schemas.core.Artifact(
metadata_={"a very long key": "x" * 5000, "a very short key": "o" * 10}
)
assert len(artifact.metadata_["a very short key"]) == 10
assert len(artifact.metadata_["a very long key"]) < 5000
assert len(artifact.metadata_["a very long key"]) == 503 # max length + "..."
async def test_from_result_populates_type_key_and_description(self):
# TODO: results received from the API should conform to a schema
result = dict(
some_string="abcdefghijklmnopqrstuvwxyz",
artifact_key="the secret pa55word",
artifact_type="a test result",
artifact_description="the most remarkable word",
)
artifact = schemas.core.Artifact.from_result(result)
assert artifact.key == "the secret pa55word"
assert artifact.data["some_string"] == "abcdefghijklmnopqrstuvwxyz"
assert artifact.type == "a test result"
assert artifact.description == "the most remarkable word"
async def test_from_result_compatible_with_older_result_payloads(self):
result = dict(
some_string="abcdefghijklmnopqrstuvwxyz",
)
artifact = schemas.core.Artifact.from_result(result)
assert artifact.data["some_string"] == "abcdefghijklmnopqrstuvwxyz"
assert artifact.type is None
assert artifact.metadata_ is None
@pytest.mark.parametrize("result", [1, "test", {"foo": "bar"}])
async def test_from_result_compatible_with_arbitrary_json(self, result):
artifact = schemas.core.Artifact.from_result(result)
assert artifact.data == result
assert artifact.type is None
assert artifact.description is None
assert artifact.metadata_ is None
async def test_from_result_can_contain_arbitrary_fields(self):
result = dict(
first_field="chickens",
second_field="cows",
third_field="horses",
)
artifact = schemas.core.Artifact.from_result(result)
assert artifact.data == result
assert artifact.type is None
assert artifact.metadata_ is None
|
TestArtifacts
|
python
|
bottlepy__bottle
|
bottle.py
|
{
"start": 85172,
"end": 86386
}
|
class ____(MultiDict):
""" This :class:`MultiDict` subclass is used to store request form data.
Additionally to the normal dict-like item access methods, this container
also supports attribute-like access to its values. Missing attributes
default to an empty string.
.. versionchanged:: 0.14
All keys and values are now decoded as utf8 by default, item and
attribute access will return the same string.
"""
def decode(self, encoding=None):
""" (deprecated) Starting with 0.13 all keys and values are already
correctly decoded. """
copy = FormsDict()
for key, value in self.allitems():
copy[key] = value
return copy
def getunicode(self, name, default=None, encoding=None):
""" (deprecated) Return the value as a unicode string, or the default. """
return self.get(name, default)
def __getattr__(self, name, default=str()):
# Without this guard, pickle generates a cryptic TypeError:
if name.startswith('__') and name.endswith('__'):
return super(FormsDict, self).__getattr__(name)
return self.get(name, default=default)
|
FormsDict
|
python
|
ray-project__ray
|
python/ray/_private/runtime_env/packaging.py
|
{
"start": 1753,
"end": 34757
}
|
class ____:
"""Asyncio version used to prevent blocking event loop."""
def __init__(self, lock_file: str):
self.file = FileLock(lock_file)
async def __aenter__(self):
while True:
try:
self.file.acquire(timeout=0)
return
except TimeoutError:
await asyncio.sleep(0.1)
async def __aexit__(self, exc_type, exc, tb):
self.file.release()
def _xor_bytes(left: bytes, right: bytes) -> bytes:
if left and right:
return bytes(a ^ b for (a, b) in zip(left, right))
return left or right
def _dir_travel(
path: Path,
excludes: List[Callable],
handler: Callable,
include_gitignore: bool,
logger: Optional[logging.Logger] = default_logger,
):
"""Travels the path recursively, calling the handler on each subpath.
Respects excludes, which will be called to check if this path is skipped.
"""
new_excludes = get_excludes_from_ignore_files(
path, include_gitignore=include_gitignore, logger=logger
)
excludes.extend(new_excludes)
skip = any(e(path) for e in excludes)
if not skip:
try:
handler(path)
except Exception as e:
logger.error(f"Issue with path: {path}")
raise e
if path.is_dir():
for sub_path in path.iterdir():
_dir_travel(
sub_path,
excludes,
handler,
include_gitignore=include_gitignore,
logger=logger,
)
for _ in range(len(new_excludes)):
excludes.pop()
def _hash_file_content_or_directory_name(
filepath: Path,
relative_path: Path,
logger: Optional[logging.Logger] = default_logger,
) -> bytes:
"""Helper function to create hash of a single file or directory.
This function hashes the path of the file or directory,
and if it's a file, then it hashes its content too.
"""
BUF_SIZE = 4096 * 1024
sha1 = hashlib.sha1()
sha1.update(str(filepath.relative_to(relative_path)).encode())
if not filepath.is_dir():
try:
f = filepath.open("rb")
except Exception as e:
logger.debug(
f"Skipping contents of file {filepath} when calculating package hash "
f"because the file couldn't be opened: {e}"
)
else:
try:
data = f.read(BUF_SIZE)
while len(data) != 0:
sha1.update(data)
data = f.read(BUF_SIZE)
finally:
f.close()
return sha1.digest()
def _hash_file(
filepath: Path,
relative_path: Path,
logger: Optional[logging.Logger] = default_logger,
) -> bytes:
"""Helper function to create hash of a single file.
It hashes the path of the file and its content to create a hash value.
"""
file_hash = _hash_file_content_or_directory_name(
filepath, relative_path, logger=logger
)
return _xor_bytes(file_hash, b"0" * 8)
def _hash_directory(
root: Path,
relative_path: Path,
excludes: Optional[Callable],
include_gitignore: bool,
logger: Optional[logging.Logger] = default_logger,
) -> bytes:
"""Helper function to create hash of a directory.
It'll go through all the files in the directory and xor
hash(file_name, file_content) to create a hash value.
"""
hash_val = b"0" * 8
def handler(path: Path):
file_hash = _hash_file_content_or_directory_name(
path, relative_path, logger=logger
)
nonlocal hash_val
hash_val = _xor_bytes(hash_val, file_hash)
excludes = [] if excludes is None else [excludes]
_dir_travel(
root, excludes, handler, include_gitignore=include_gitignore, logger=logger
)
return hash_val
def parse_path(pkg_path: str) -> None:
"""Parse the path to check it is well-formed and exists."""
path = Path(pkg_path)
try:
path.resolve(strict=True)
except OSError:
raise ValueError(f"{path} is not a valid path.")
def parse_uri(pkg_uri: str) -> Tuple[Protocol, str]:
"""
Parse package uri into protocol and package name based on its format.
Note that the output of this function is not for handling actual IO, it's
only for setting up local directory folders by using package name as path.
>>> parse_uri("https://test.com/file.zip")
(<Protocol.HTTPS: 'https'>, 'https_test_com_file.zip')
>>> parse_uri("https://test.com/file.whl")
(<Protocol.HTTPS: 'https'>, 'file.whl')
"""
if is_path(pkg_uri):
raise ValueError(f"Expected URI but received path {pkg_uri}")
uri = urlparse(pkg_uri)
try:
protocol = Protocol(uri.scheme)
except ValueError as e:
raise ValueError(
f'Invalid protocol for runtime_env URI "{pkg_uri}". '
f"Supported protocols: {Protocol._member_names_}. Original error: {e}"
)
if protocol in Protocol.remote_protocols():
if uri.path.endswith(".whl"):
# Don't modify the .whl filename. See
# https://peps.python.org/pep-0427/#file-name-convention
# for more information.
package_name = uri.path.split("/")[-1]
else:
package_name = f"{protocol.value}_{uri.netloc}{uri.path}"
disallowed_chars = ["/", ":", "@", "+", " ", "(", ")"]
for disallowed_char in disallowed_chars:
package_name = package_name.replace(disallowed_char, "_")
# Remove all periods except the last, which is part of the
# file extension
package_name = package_name.replace(".", "_", package_name.count(".") - 1)
else:
package_name = uri.netloc
return (protocol, package_name)
def is_zip_uri(uri: str) -> bool:
try:
protocol, path = parse_uri(uri)
except ValueError:
return False
return Path(path).suffix == ".zip"
def is_whl_uri(uri: str) -> bool:
try:
_, path = parse_uri(uri)
except ValueError:
return False
return Path(path).suffix == ".whl"
def is_jar_uri(uri: str) -> bool:
try:
_, path = parse_uri(uri)
except ValueError:
return False
return Path(path).suffix == ".jar"
def _get_excludes(path: Path, excludes: List[str]) -> Callable:
path = path.absolute()
pathspec = PathSpec.from_lines("gitwildmatch", excludes)
def match(p: Path):
path_str = str(p.absolute().relative_to(path))
return pathspec.match_file(path_str)
return match
def _get_ignore_file(path: Path, ignore_file: str) -> Optional[Callable]:
"""Returns a function that returns True if the path should be excluded.
Returns None if there is no ignore_file in the path.
Args:
path: The path to the directory to check for an ignore file.
ignore_file: The name of the ignore file.
Returns:
A function that returns True if the path should be excluded.
"""
path = path.absolute()
ignore_file = path / ignore_file
if ignore_file.is_file():
with ignore_file.open("r") as f:
pathspec = PathSpec.from_lines("gitwildmatch", f.readlines())
def match(p: Path):
path_str = str(p.absolute().relative_to(path))
return pathspec.match_file(path_str)
return match
else:
return None
def get_excludes_from_ignore_files(
path: Path,
include_gitignore: bool,
logger: Optional[logging.Logger] = default_logger,
) -> List[Callable]:
"""Get exclusion functions from .gitignore and .rayignore files in the current path.
Args:
path: The path to check for ignore files.
include_gitignore: Whether to respect .gitignore files.
logger: Logger to use.
Returns:
List[Callable]: List of exclusion functions. Each function takes a Path
and returns True if the path should be excluded based on the ignore
patterns in the respective ignore file.
"""
ignore_files = []
to_ignore: List[Optional[Callable]] = []
if include_gitignore:
g = _get_ignore_file(path, ignore_file=".gitignore")
if g is not None:
to_ignore.append(g)
ignore_files.append(path / ".gitignore")
r = _get_ignore_file(path, ignore_file=".rayignore")
if r is not None:
to_ignore.append(r)
ignore_files.append(path / ".rayignore")
if ignore_files:
logger.info(f"Ignoring upload to cluster for these files: {ignore_files}")
return to_ignore
def pin_runtime_env_uri(uri: str, *, expiration_s: Optional[int] = None) -> None:
"""Pin a reference to a runtime_env URI in the GCS on a timeout.
This is used to avoid premature eviction in edge conditions for job
reference counting. See https://github.com/ray-project/ray/pull/24719.
Packages are uploaded to GCS in order to be downloaded by a runtime env plugin
(e.g. working_dir, py_modules) after the job starts.
This function adds a temporary reference to the package in the GCS to prevent
it from being deleted before the job starts. (See #23423 for the bug where
this happened.)
If this reference didn't have an expiration, then if the script exited
(e.g. via Ctrl-C) before the job started, the reference would never be
removed, so the package would never be deleted.
"""
if expiration_s is None:
expiration_s = int(
os.environ.get(
RAY_RUNTIME_ENV_URI_PIN_EXPIRATION_S_ENV_VAR,
RAY_RUNTIME_ENV_URI_PIN_EXPIRATION_S_DEFAULT,
)
)
elif not isinstance(expiration_s, int):
raise ValueError(f"expiration_s must be an int, got {type(expiration_s)}.")
if expiration_s < 0:
raise ValueError(f"expiration_s must be >= 0, got {expiration_s}.")
elif expiration_s > 0:
_pin_runtime_env_uri(uri, expiration_s=expiration_s)
def _store_package_in_gcs(
pkg_uri: str,
data: bytes,
logger: Optional[logging.Logger] = default_logger,
) -> int:
"""Stores package data in the Global Control Store (GCS).
Args:
pkg_uri: The GCS key to store the data in.
data: The serialized package's bytes to store in the GCS.
logger (Optional[logging.Logger]): The logger used by this function.
Return:
int: Size of data
Raises:
RuntimeError: If the upload to the GCS fails.
ValueError: If the data's size exceeds GCS_STORAGE_MAX_SIZE.
"""
file_size = len(data)
size_str = _mib_string(file_size)
if len(data) >= GCS_STORAGE_MAX_SIZE:
raise ValueError(
f"Package size ({size_str}) exceeds the maximum size of "
f"{_mib_string(GCS_STORAGE_MAX_SIZE)}. You can exclude large "
"files using the 'excludes' option to the runtime_env or provide "
"a remote URI of a zip file using protocols such as 's3://', "
"'https://' and so on, refer to "
"https://docs.ray.io/en/latest/ray-core/handling-dependencies.html#api-reference." # noqa
)
logger.info(f"Pushing file package '{pkg_uri}' ({size_str}) to Ray cluster...")
try:
if os.environ.get(RAY_RUNTIME_ENV_FAIL_UPLOAD_FOR_TESTING_ENV_VAR):
raise RuntimeError(
"Simulating failure to upload package for testing purposes."
)
_internal_kv_put(pkg_uri, data)
except Exception as e:
raise RuntimeError(
"Failed to store package in the GCS.\n"
f" - GCS URI: {pkg_uri}\n"
f" - Package data ({size_str}): {data[:15]}...\n"
) from e
logger.info(f"Successfully pushed file package '{pkg_uri}'.")
return len(data)
def _get_local_path(base_directory: str, pkg_uri: str) -> str:
_, pkg_name = parse_uri(pkg_uri)
return os.path.join(base_directory, pkg_name)
def _zip_files(
path_str: str,
excludes: List[str],
output_path: str,
include_gitignore: bool,
include_parent_dir: bool = False,
logger: Optional[logging.Logger] = default_logger,
) -> None:
"""Zip the target file or directory and write it to the output_path.
path_str: The file or directory to zip.
excludes (List(str)): The directories or file to be excluded.
output_path: The output path for the zip file.
include_parent_dir: If true, includes the top-level directory as a
directory inside the zip file.
"""
pkg_file = Path(output_path).absolute()
with ZipFile(pkg_file, "w", strict_timestamps=False) as zip_handler:
# Put all files in the directory into the zip file.
file_path = Path(path_str).absolute()
dir_path = file_path
if file_path.is_file():
dir_path = file_path.parent
def handler(path: Path):
# Pack this path if it's an empty directory or it's a file.
if path.is_dir() and next(path.iterdir(), None) is None or path.is_file():
file_size = path.stat().st_size
if file_size >= FILE_SIZE_WARNING:
logger.warning(
f"File {path} is very large "
f"({_mib_string(file_size)}). Consider adding this "
"file to the 'excludes' list to skip uploading it: "
"`ray.init(..., "
f"runtime_env={{'excludes': ['{path}']}})`"
)
to_path = path.relative_to(dir_path)
if include_parent_dir:
to_path = dir_path.name / to_path
zip_handler.write(path, to_path)
excludes = [_get_excludes(file_path, excludes)]
_dir_travel(
file_path,
excludes,
handler,
include_gitignore=include_gitignore,
logger=logger,
)
def package_exists(pkg_uri: str) -> bool:
"""Check whether the package with given URI exists or not.
Args:
pkg_uri: The uri of the package
Return:
True for package existing and False for not.
"""
protocol, pkg_name = parse_uri(pkg_uri)
if protocol == Protocol.GCS:
return _internal_kv_exists(pkg_uri)
else:
raise NotImplementedError(f"Protocol {protocol} is not supported")
def get_uri_for_package(package: Path) -> str:
"""Get a content-addressable URI from a package's contents."""
if package.suffix == ".whl":
# Wheel file names include the Python package name, version
# and tags, so it is already effectively content-addressed.
return "{protocol}://{whl_filename}".format(
protocol=Protocol.GCS.value, whl_filename=package.name
)
else:
hash_val = hashlib.sha1(package.read_bytes()).hexdigest()
return "{protocol}://{pkg_name}.zip".format(
protocol=Protocol.GCS.value, pkg_name=RAY_PKG_PREFIX + hash_val
)
def get_uri_for_file(file: str) -> str:
"""Get a content-addressable URI from a file's content.
This function generates the name of the package by the file.
The final package name is _ray_pkg_<HASH_VAL>.zip of this package,
where HASH_VAL is the hash value of the file.
For example: _ray_pkg_029f88d5ecc55e1e4d64fc6e388fd103.zip
Examples:
>>> get_uri_for_file("/my_file.py") # doctest: +SKIP
_ray_pkg_af2734982a741.zip
Args:
file: The file.
Returns:
URI (str)
Raises:
ValueError: If the file doesn't exist.
"""
filepath = Path(file).absolute()
if not filepath.exists() or not filepath.is_file():
raise ValueError(f"File {filepath} must be an existing file")
hash_val = _hash_file(filepath, filepath.parent)
return "{protocol}://{pkg_name}.zip".format(
protocol=Protocol.GCS.value, pkg_name=RAY_PKG_PREFIX + hash_val.hex()
)
def get_uri_for_directory(
directory: str,
include_gitignore: bool,
excludes: Optional[List[str]] = None,
) -> str:
"""Get a content-addressable URI from a directory's contents.
This function generates the name of the package by the directory.
It'll go through all the files in the directory and hash the contents
of the files to get the hash value of the package.
The final package name is _ray_pkg_<HASH_VAL>.zip of this package.
For example: _ray_pkg_029f88d5ecc55e1e4d64fc6e388fd103.zip
Examples:
>>> get_uri_for_directory("/my_directory") # doctest: +SKIP
_ray_pkg_af2734982a741.zip
Args:
directory: The directory.
include_gitignore: Whether to respect .gitignore files.
excludes (list[str]): The dir or files that should be excluded.
Returns:
URI (str)
Raises:
ValueError: If the directory doesn't exist.
"""
if excludes is None:
excludes = []
directory = Path(directory).absolute()
if not directory.exists() or not directory.is_dir():
raise ValueError(f"directory {directory} must be an existing directory")
hash_val = _hash_directory(
directory,
directory,
_get_excludes(directory, excludes),
include_gitignore=include_gitignore,
)
return "{protocol}://{pkg_name}.zip".format(
protocol=Protocol.GCS.value, pkg_name=RAY_PKG_PREFIX + hash_val.hex()
)
def upload_package_to_gcs(pkg_uri: str, pkg_bytes: bytes) -> None:
"""Upload a local package to GCS.
Args:
pkg_uri: The URI of the package, e.g. gcs://my_package.zip
pkg_bytes: The data to be uploaded.
Raises:
RuntimeError: If the upload fails.
ValueError: If the pkg_uri is a remote path or if the data's
size exceeds GCS_STORAGE_MAX_SIZE.
NotImplementedError: If the protocol of the URI is not supported.
"""
protocol, pkg_name = parse_uri(pkg_uri)
if protocol == Protocol.GCS:
_store_package_in_gcs(pkg_uri, pkg_bytes)
elif protocol in Protocol.remote_protocols():
raise ValueError(
"upload_package_to_gcs should not be called with a remote path."
)
else:
raise NotImplementedError(f"Protocol {protocol} is not supported")
def create_package(
module_path: str,
target_path: Path,
include_gitignore: bool,
include_parent_dir: bool = False,
excludes: Optional[List[str]] = None,
logger: Optional[logging.Logger] = default_logger,
):
if excludes is None:
excludes = []
if logger is None:
logger = default_logger
if not target_path.exists():
logger.info(f"Creating a file package for local module '{module_path}'.")
_zip_files(
module_path,
excludes,
str(target_path),
include_gitignore=include_gitignore,
include_parent_dir=include_parent_dir,
logger=logger,
)
def upload_package_if_needed(
pkg_uri: str,
base_directory: str,
module_path: str,
include_gitignore: bool,
include_parent_dir: bool = False,
excludes: Optional[List[str]] = None,
logger: Optional[logging.Logger] = default_logger,
) -> bool:
"""Upload the contents of the directory under the given URI.
This will first create a temporary zip file under the passed
base_directory.
If the package already exists in storage, this is a no-op.
Args:
pkg_uri: URI of the package to upload.
base_directory: Directory where package files are stored.
module_path: The module to be uploaded, either a single .py file or a directory.
include_parent_dir: If true, includes the top-level directory as a
directory inside the zip file.
excludes: List specifying files to exclude.
include_gitignore: Whether to respect .gitignore files. Default is True.
Raises:
RuntimeError: If the upload fails.
ValueError: If the pkg_uri is a remote path or if the data's
size exceeds GCS_STORAGE_MAX_SIZE.
NotImplementedError: If the protocol of the URI is not supported.
"""
if excludes is None:
excludes = []
if logger is None:
logger = default_logger
pin_runtime_env_uri(pkg_uri)
if package_exists(pkg_uri):
return False
package_file = Path(_get_local_path(base_directory, pkg_uri))
# Make the temporary zip file name unique so that it doesn't conflict with
# concurrent upload_package_if_needed calls with the same pkg_uri.
# See https://github.com/ray-project/ray/issues/47471.
package_file = package_file.with_name(
f"{time.time_ns()}_{os.getpid()}_{package_file.name}"
)
create_package(
module_path,
package_file,
include_gitignore=include_gitignore,
include_parent_dir=include_parent_dir,
excludes=excludes,
)
package_file_bytes = package_file.read_bytes()
# Remove the local file to avoid accumulating temporary zip files.
package_file.unlink()
upload_package_to_gcs(pkg_uri, package_file_bytes)
return True
def get_local_dir_from_uri(uri: str, base_directory: str) -> Path:
"""Return the local directory corresponding to this URI."""
pkg_file = Path(_get_local_path(base_directory, uri))
local_dir = pkg_file.with_suffix("")
return local_dir
@DeveloperAPI
async def download_and_unpack_package(
pkg_uri: str,
base_directory: str,
gcs_client: Optional[GcsClient] = None,
logger: Optional[logging.Logger] = default_logger,
overwrite: bool = False,
) -> str:
"""Download the package corresponding to this URI and unpack it if zipped.
Will be written to a file or directory named {base_directory}/{uri}.
Returns the path to this file or directory.
Args:
pkg_uri: URI of the package to download.
base_directory: Directory to use as the parent directory of the target
directory for the unpacked files.
gcs_client: Client to use for downloading from the GCS.
logger: The logger to use.
overwrite: If True, overwrite the existing package.
Returns:
Path to the local directory containing the unpacked package files.
Raises:
IOError: If the download fails.
ImportError: If smart_open is not installed and a remote URI is used.
NotImplementedError: If the protocol of the URI is not supported.
ValueError: If the GCS client is not provided when downloading from GCS,
or if package URI is invalid.
"""
pkg_file = Path(_get_local_path(base_directory, pkg_uri))
if pkg_file.suffix == "":
raise ValueError(
f"Invalid package URI: {pkg_uri}."
"URI must have a file extension and the URI must be valid."
)
async with _AsyncFileLock(str(pkg_file) + ".lock"):
if logger is None:
logger = default_logger
logger.debug(f"Fetching package for URI: {pkg_uri}")
local_dir = get_local_dir_from_uri(pkg_uri, base_directory)
assert local_dir != pkg_file, "Invalid pkg_file!"
download_package: bool = True
if local_dir.exists() and not overwrite:
download_package = False
assert local_dir.is_dir(), f"{local_dir} is not a directory"
elif local_dir.exists():
logger.info(f"Removing {local_dir} with pkg_file {pkg_file}")
shutil.rmtree(local_dir)
if download_package:
protocol, _ = parse_uri(pkg_uri)
logger.info(
f"Downloading package from {pkg_uri} to {pkg_file} "
f"with protocol {protocol}"
)
if protocol == Protocol.GCS:
if gcs_client is None:
raise ValueError(
"GCS client must be provided to download from GCS."
)
# Download package from the GCS.
code = await gcs_client.async_internal_kv_get(
pkg_uri.encode(), namespace=None, timeout=None
)
if os.environ.get(RAY_RUNTIME_ENV_FAIL_DOWNLOAD_FOR_TESTING_ENV_VAR):
code = None
if code is None:
raise IOError(
f"Failed to download runtime_env file package {pkg_uri} "
"from the GCS to the Ray worker node. The package may "
"have prematurely been deleted from the GCS due to a "
"long upload time or a problem with Ray. Try setting the "
"environment variable "
f"{RAY_RUNTIME_ENV_URI_PIN_EXPIRATION_S_ENV_VAR} "
" to a value larger than the upload time in seconds "
"(the default is "
f"{RAY_RUNTIME_ENV_URI_PIN_EXPIRATION_S_DEFAULT}). "
"If this fails, try re-running "
"after making any change to a file in the file package."
)
code = code or b""
pkg_file.write_bytes(code)
if is_zip_uri(pkg_uri):
unzip_package(
package_path=pkg_file,
target_dir=local_dir,
remove_top_level_directory=False,
unlink_zip=True,
logger=logger,
)
else:
return str(pkg_file)
elif protocol in Protocol.remote_protocols():
protocol.download_remote_uri(source_uri=pkg_uri, dest_file=pkg_file)
if pkg_file.suffix in [".zip", ".jar"]:
unzip_package(
package_path=pkg_file,
target_dir=local_dir,
remove_top_level_directory=True,
unlink_zip=True,
logger=logger,
)
elif pkg_file.suffix == ".whl":
return str(pkg_file)
else:
raise NotImplementedError(
f"Package format {pkg_file.suffix} is ",
"not supported for remote protocols",
)
else:
raise NotImplementedError(f"Protocol {protocol} is not supported")
return str(local_dir)
def get_top_level_dir_from_compressed_package(package_path: str):
"""
If compressed package at package_path contains a single top-level
directory, returns the name of the top-level directory. Otherwise,
returns None.
Ignores a second top-level directory if it is named __MACOSX.
"""
package_zip = ZipFile(package_path, "r")
top_level_directory = None
def is_top_level_file(file_name):
return "/" not in file_name
def base_dir_name(file_name):
return file_name.split("/")[0]
for file_name in package_zip.namelist():
if top_level_directory is None:
# Cache the top_level_directory name when checking
# the first file in the zipped package
if is_top_level_file(file_name):
return None
else:
# Top-level directory, or non-top-level file or directory
dir_name = base_dir_name(file_name)
if dir_name == MAC_OS_ZIP_HIDDEN_DIR_NAME:
continue
top_level_directory = dir_name
else:
# Confirm that all other files
# belong to the same top_level_directory
if is_top_level_file(file_name) or base_dir_name(file_name) not in [
top_level_directory,
MAC_OS_ZIP_HIDDEN_DIR_NAME,
]:
return None
return top_level_directory
def remove_dir_from_filepaths(base_dir: str, rdir: str):
"""
base_dir: String path of the directory containing rdir
rdir: String path of directory relative to base_dir whose contents should
be moved to its base_dir, its parent directory
Removes rdir from the filepaths of all files and directories inside it.
In other words, moves all the files inside rdir to the directory that
contains rdir. Assumes base_dir's contents and rdir's contents have no
name conflicts.
"""
# Move rdir to a temporary directory, so its contents can be moved to
# base_dir without any name conflicts
with TemporaryDirectory() as tmp_dir:
# shutil.move() is used instead of os.rename() in case rdir and tmp_dir
# are located on separate file systems
shutil.move(os.path.join(base_dir, rdir), os.path.join(tmp_dir, rdir))
# Shift children out of rdir and into base_dir
rdir_children = os.listdir(os.path.join(tmp_dir, rdir))
for child in rdir_children:
shutil.move(
os.path.join(tmp_dir, rdir, child), os.path.join(base_dir, child)
)
def unzip_package(
package_path: str,
target_dir: str,
remove_top_level_directory: bool,
unlink_zip: bool,
logger: Optional[logging.Logger] = default_logger,
) -> None:
"""
Unzip the compressed package contained at package_path to target_dir.
If remove_top_level_directory is True and the top level consists of a
a single directory (or possibly also a second hidden directory named
__MACOSX at the top level arising from macOS's zip command), the function
will automatically remove the top-level directory and store the contents
directly in target_dir.
Otherwise, if remove_top_level_directory is False or if the top level
consists of multiple files or directories (not counting __MACOS),
the zip contents will be stored in target_dir.
Args:
package_path: String path of the compressed package to unzip.
target_dir: String path of the directory to store the unzipped contents.
remove_top_level_directory: Whether to remove the top-level directory
from the zip contents.
unlink_zip: Whether to unlink the zip file stored at package_path.
logger: Optional logger to use for logging.
"""
try:
os.mkdir(target_dir)
except FileExistsError:
logger.info(f"Directory at {target_dir} already exists")
logger.debug(f"Unpacking {package_path} to {target_dir}")
with ZipFile(str(package_path), "r") as zip_ref:
zip_ref.extractall(target_dir)
if remove_top_level_directory:
top_level_directory = get_top_level_dir_from_compressed_package(package_path)
if top_level_directory is not None:
# Remove __MACOSX directory if it exists
macos_dir = os.path.join(target_dir, MAC_OS_ZIP_HIDDEN_DIR_NAME)
if os.path.isdir(macos_dir):
shutil.rmtree(macos_dir)
remove_dir_from_filepaths(target_dir, top_level_directory)
if unlink_zip:
Path(package_path).unlink()
def delete_package(pkg_uri: str, base_directory: str) -> Tuple[bool, int]:
"""Deletes a specific URI from the local filesystem.
Args:
pkg_uri: URI to delete.
Returns:
bool: True if the URI was successfully deleted, else False.
"""
deleted = False
path = Path(_get_local_path(base_directory, pkg_uri))
with FileLock(str(path) + ".lock"):
path = path.with_suffix("")
if path.exists():
if path.is_dir() and not path.is_symlink():
shutil.rmtree(str(path))
else:
path.unlink()
deleted = True
return deleted
async def install_wheel_package(
wheel_uri: str,
target_dir: str,
logger: Optional[logging.Logger] = default_logger,
) -> None:
"""Install packages in the wheel URI, and then delete the local wheel file."""
pip_install_cmd = [
"pip",
"install",
wheel_uri,
f"--target={target_dir}",
]
logger.info("Running py_modules wheel install command: %s", str(pip_install_cmd))
try:
# TODO(architkulkarni): Use `await check_output_cmd` or similar.
exit_code, output = exec_cmd_stream_to_logger(pip_install_cmd, logger)
finally:
wheel_uri_path = Path(wheel_uri)
if wheel_uri_path.exists():
if wheel_uri_path.is_dir():
shutil.rmtree(wheel_uri)
else:
Path(wheel_uri).unlink()
if exit_code != 0:
if Path(target_dir).exists():
shutil.rmtree(target_dir)
raise RuntimeError(
f"Failed to install py_modules wheel {wheel_uri}"
f"to {target_dir}:\n{output}"
)
|
_AsyncFileLock
|
python
|
jazzband__django-model-utils
|
tests/models.py
|
{
"start": 1728,
"end": 2009
}
|
class ____(InheritanceManagerTestParent):
non_related_field_using_descriptor_2 = models.FileField(upload_to="test")
normal_field_2 = models.TextField()
objects: ClassVar[InheritanceManager[InheritanceManagerTestParent]] = InheritanceManager()
|
InheritanceManagerTestChild1
|
python
|
PyCQA__pylint
|
doc/data/messages/i/implicit-flag-alias/bad.py
|
{
"start": 27,
"end": 127
}
|
class ____(IntFlag):
READ = 1
WRITE = 2
EXECUTE = 3 # [implicit-flag-alias]
|
FilePermissions
|
python
|
aio-libs__aiohttp
|
aiohttp/web_urldispatcher.py
|
{
"start": 30647,
"end": 31181
}
|
class ____(Sized, Iterable[AbstractRoute], Container[AbstractRoute]):
def __init__(self, resources: list[AbstractResource]):
self._routes: list[AbstractRoute] = []
for resource in resources:
for route in resource:
self._routes.append(route)
def __len__(self) -> int:
return len(self._routes)
def __iter__(self) -> Iterator[AbstractRoute]:
yield from self._routes
def __contains__(self, route: object) -> bool:
return route in self._routes
|
RoutesView
|
python
|
chroma-core__chroma
|
chromadb/utils/embedding_functions/huggingface_sparse_embedding_function.py
|
{
"start": 480,
"end": 7003
}
|
class ____(SparseEmbeddingFunction[Documents]):
# Since we do dynamic imports we have to type this as Any
models: Dict[str, Any] = {}
def __init__(
self,
model_name: str,
device: str,
task: Optional[TaskType] = "document",
query_config: Optional[HuggingFaceSparseEmbeddingFunctionQueryConfig] = None,
**kwargs: Any,
):
"""Initialize SparseEncoderEmbeddingFunction.
Args:
model_name (str, optional): Identifier of the Huggingface SparseEncoder model
Some common models: prithivida/Splade_PP_en_v1, naver/splade-cocondenser-ensembledistil, naver/splade-v3
device (str, optional): Device used for computation
**kwargs: Additional arguments to pass to the Splade model.
"""
try:
from sentence_transformers import SparseEncoder
except ImportError:
raise ValueError(
"The sentence_transformers python package is not installed. Please install it with `pip install sentence_transformers`"
)
self.model_name = model_name
self.device = device
self.task = task
self.query_config = query_config
for key, value in kwargs.items():
if not isinstance(value, (str, int, float, bool, list, dict, tuple)):
raise ValueError(f"Keyword argument {key} is not a primitive type")
self.kwargs = kwargs
if model_name not in self.models:
self.models[model_name] = SparseEncoder(
model_name_or_path=model_name, device=device, **kwargs
)
self._model = self.models[model_name]
def __call__(self, input: Documents) -> SparseVectors:
"""Generate embeddings for the given documents.
Args:
input: Documents to generate embeddings for.
Returns:
Embeddings for the documents.
"""
try:
from sentence_transformers import SparseEncoder
except ImportError:
raise ValueError(
"The sentence_transformers python package is not installed. Please install it with `pip install sentence_transformers`"
)
model = cast(SparseEncoder, self._model)
if self.task == "document":
embeddings = model.encode_document(
list(input),
)
elif self.task == "query":
embeddings = model.encode_query(
list(input),
)
else:
raise ValueError(f"Invalid task: {self.task}")
sparse_vectors: SparseVectors = []
for vec in embeddings:
# Convert sparse tensor to dense array if needed
if hasattr(vec, "to_dense"):
vec_dense = vec.to_dense().numpy()
else:
vec_dense = vec.numpy() if hasattr(vec, "numpy") else np.array(vec)
nz = np.where(vec_dense != 0)[0]
sparse_vectors.append(
normalize_sparse_vector(
indices=nz.tolist(), values=vec_dense[nz].tolist()
)
)
return sparse_vectors
def embed_query(self, input: Documents) -> SparseVectors:
try:
from sentence_transformers import SparseEncoder
except ImportError:
raise ValueError(
"The sentence_transformers python package is not installed. Please install it with `pip install sentence_transformers`"
)
model = cast(SparseEncoder, self._model)
if self.query_config is not None:
if self.query_config.get("task") == "document":
embeddings = model.encode_document(
list(input),
)
elif self.query_config.get("task") == "query":
embeddings = model.encode_query(
list(input),
)
else:
raise ValueError(f"Invalid task: {self.query_config.get('task')}")
sparse_vectors: SparseVectors = []
for vec in embeddings:
# Convert sparse tensor to dense array if needed
if hasattr(vec, "to_dense"):
vec_dense = vec.to_dense().numpy()
else:
vec_dense = vec.numpy() if hasattr(vec, "numpy") else np.array(vec)
nz = np.where(vec_dense != 0)[0]
sparse_vectors.append(
normalize_sparse_vector(
indices=nz.tolist(), values=vec_dense[nz].tolist()
)
)
return sparse_vectors
else:
return self.__call__(input)
@staticmethod
def name() -> str:
return "huggingface_sparse"
@staticmethod
def build_from_config(
config: Dict[str, Any]
) -> "SparseEmbeddingFunction[Documents]":
model_name = config.get("model_name")
device = config.get("device")
task = config.get("task")
query_config = config.get("query_config")
kwargs = config.get("kwargs", {})
if model_name is None or device is None:
assert False, "This code should not be reached"
return HuggingFaceSparseEmbeddingFunction(
model_name=model_name,
device=device,
task=task,
query_config=query_config,
**kwargs,
)
def get_config(self) -> Dict[str, Any]:
return {
"model_name": self.model_name,
"device": self.device,
"task": self.task,
"query_config": self.query_config,
"kwargs": self.kwargs,
}
def validate_config_update(
self, old_config: Dict[str, Any], new_config: Dict[str, Any]
) -> None:
# model_name is also used as the identifier for model path if stored locally.
# Users should be able to change the path if needed, so we should not validate that.
# e.g. moving file path from /v1/my-model.bin to /v2/my-model.bin
return
@staticmethod
def validate_config(config: Dict[str, Any]) -> None:
"""
Validate the configuration using the JSON schema.
Args:
config: Configuration to validate
Raises:
ValidationError: If the configuration does not match the schema
"""
validate_config_schema(config, "huggingface_sparse")
|
HuggingFaceSparseEmbeddingFunction
|
python
|
numpy__numpy
|
numpy/lib/tests/test_index_tricks.py
|
{
"start": 14373,
"end": 14581
}
|
class ____:
def test_basic(self):
a = np.array([[1, 2], [3, 4]])
assert_equal(list(ndenumerate(a)),
[((0, 0), 1), ((0, 1), 2), ((1, 0), 3), ((1, 1), 4)])
|
TestNdenumerate
|
python
|
scrapy__scrapy
|
tests/mockserver/http_resources.py
|
{
"start": 8190,
"end": 8525
}
|
class ____(resource.Resource):
def render(self, request):
from twisted.internet import reactor
def response():
for i in range(1024):
request.write(b"x" * 1024)
request.finish()
reactor.callLater(0, response)
return server.NOT_DONE_YET
|
LargeChunkedFileResource
|
python
|
sqlalchemy__sqlalchemy
|
lib/sqlalchemy/testing/assertsql.py
|
{
"start": 14179,
"end": 14614
}
|
class ____:
def __init__(self, context, clauseelement, multiparams, params):
self.context = context
self.clauseelement = clauseelement
if multiparams:
self.parameters = multiparams
elif params:
self.parameters = [params]
else:
self.parameters = []
self.statements = []
def __repr__(self):
return str(self.statements)
|
SQLExecuteObserved
|
python
|
getsentry__sentry
|
src/sentry/api/endpoints/organization_trace_item_stats.py
|
{
"start": 1053,
"end": 2259
}
|
class ____(OrganizationEventsV2EndpointBase):
publish_status = {
"GET": ApiPublishStatus.PRIVATE,
}
owner = ApiOwner.DATA_BROWSING
def get(self, request: Request, organization: Organization) -> Response:
try:
snuba_params = self.get_snuba_params(request, organization)
except NoProjects:
return Response({"data": []})
serializer = OrganizationTraceItemsStatsSerializer(data=request.GET)
if not serializer.is_valid():
return Response(serializer.errors, status=400)
serialized = serializer.validated_data
resolver_config = SearchResolverConfig()
resolver = SearchResolver(
params=snuba_params, config=resolver_config, definitions=SPAN_DEFINITIONS
)
stats_results = Spans.run_stats_query(
params=snuba_params,
stats_types=serialized.get("statsType"),
query_string=serialized.get("query", ""),
referrer=Referrer.API_SPANS_FREQUENCY_STATS_RPC.value,
config=resolver_config,
search_resolver=resolver,
)
return Response({"data": stats_results})
|
OrganizationTraceItemsStatsEndpoint
|
python
|
pandas-dev__pandas
|
pandas/tests/arrays/datetimes/test_reductions.py
|
{
"start": 208,
"end": 5525
}
|
class ____:
@pytest.fixture
def arr1d(self, tz_naive_fixture):
"""Fixture returning DatetimeArray with parametrized timezones"""
tz = tz_naive_fixture
dtype = DatetimeTZDtype(tz=tz) if tz is not None else np.dtype("M8[ns]")
arr = DatetimeArray._from_sequence(
[
"2000-01-03",
"2000-01-03",
"NaT",
"2000-01-02",
"2000-01-05",
"2000-01-04",
],
dtype=dtype,
)
return arr
def test_min_max(self, arr1d, unit):
arr = arr1d
arr = arr.as_unit(unit)
tz = arr.tz
result = arr.min()
expected = pd.Timestamp("2000-01-02", tz=tz).as_unit(unit)
assert result == expected
assert result.unit == expected.unit
result = arr.max()
expected = pd.Timestamp("2000-01-05", tz=tz).as_unit(unit)
assert result == expected
assert result.unit == expected.unit
result = arr.min(skipna=False)
assert result is NaT
result = arr.max(skipna=False)
assert result is NaT
@pytest.mark.parametrize("tz", [None, "US/Central"])
def test_min_max_empty(self, skipna, tz):
dtype = DatetimeTZDtype(tz=tz) if tz is not None else np.dtype("M8[ns]")
arr = DatetimeArray._from_sequence([], dtype=dtype)
result = arr.min(skipna=skipna)
assert result is NaT
result = arr.max(skipna=skipna)
assert result is NaT
@pytest.mark.parametrize("tz", [None, "US/Central"])
def test_median_empty(self, skipna, tz):
dtype = DatetimeTZDtype(tz=tz) if tz is not None else np.dtype("M8[ns]")
arr = DatetimeArray._from_sequence([], dtype=dtype)
result = arr.median(skipna=skipna)
assert result is NaT
arr = arr.reshape(0, 3)
result = arr.median(axis=0, skipna=skipna)
expected = type(arr)._from_sequence([NaT, NaT, NaT], dtype=arr.dtype)
tm.assert_equal(result, expected)
result = arr.median(axis=1, skipna=skipna)
expected = type(arr)._from_sequence([], dtype=arr.dtype)
tm.assert_equal(result, expected)
def test_median(self, arr1d):
arr = arr1d
result = arr.median()
assert result == arr[0]
result = arr.median(skipna=False)
assert result is NaT
result = arr.dropna().median(skipna=False)
assert result == arr[0]
result = arr.median(axis=0)
assert result == arr[0]
def test_median_axis(self, arr1d):
arr = arr1d
assert arr.median(axis=0) == arr.median()
assert arr.median(axis=0, skipna=False) is NaT
msg = r"abs\(axis\) must be less than ndim"
with pytest.raises(ValueError, match=msg):
arr.median(axis=1)
@pytest.mark.filterwarnings("ignore:All-NaN slice encountered:RuntimeWarning")
def test_median_2d(self, arr1d):
arr = arr1d.reshape(1, -1)
# axis = None
assert arr.median() == arr1d.median()
assert arr.median(skipna=False) is NaT
# axis = 0
result = arr.median(axis=0)
expected = arr1d
tm.assert_equal(result, expected)
# Since column 3 is all-NaT, we get NaT there with or without skipna
result = arr.median(axis=0, skipna=False)
expected = arr1d
tm.assert_equal(result, expected)
# axis = 1
result = arr.median(axis=1)
expected = type(arr)._from_sequence([arr1d.median()], dtype=arr.dtype)
tm.assert_equal(result, expected)
result = arr.median(axis=1, skipna=False)
expected = type(arr)._from_sequence([NaT], dtype=arr.dtype)
tm.assert_equal(result, expected)
def test_mean(self, arr1d):
arr = arr1d
# manually verified result
expected = arr[0] + 0.4 * pd.Timedelta(days=1)
result = arr.mean()
assert result == expected
result = arr.mean(skipna=False)
assert result is NaT
result = arr.dropna().mean(skipna=False)
assert result == expected
result = arr.mean(axis=0)
assert result == expected
def test_mean_2d(self):
dti = pd.date_range("2016-01-01", periods=6, tz="US/Pacific", unit="ns")
dta = dti._data.reshape(3, 2)
result = dta.mean(axis=0)
expected = dta[1]
tm.assert_datetime_array_equal(result, expected)
result = dta.mean(axis=1)
expected = dta[:, 0] + pd.Timedelta(hours=12)
tm.assert_datetime_array_equal(result, expected)
result = dta.mean(axis=None)
expected = dti.mean()
assert result == expected
def test_mean_empty(self, arr1d, skipna):
arr = arr1d[:0]
assert arr.mean(skipna=skipna) is NaT
arr2d = arr.reshape(0, 3)
result = arr2d.mean(axis=0, skipna=skipna)
expected = DatetimeArray._from_sequence([NaT, NaT, NaT], dtype=arr.dtype)
tm.assert_datetime_array_equal(result, expected)
result = arr2d.mean(axis=1, skipna=skipna)
expected = arr # i.e. 1D, empty
tm.assert_datetime_array_equal(result, expected)
result = arr2d.mean(axis=None, skipna=skipna)
assert result is NaT
|
TestReductions
|
python
|
neetcode-gh__leetcode
|
python/0201-bitwise-and-of-numbers-range.py
|
{
"start": 66,
"end": 533
}
|
class ____:
def rangeBitwiseAnd(self, left: int, right: int) -> int:
res = 0
for i in range(32):
bit = (left >> i) & 1
if not bit:
continue
remain = left % (1 << (i + 1))
diff = (1 << (i + 1)) - remain
if right - left < diff:
res = res | (1 << i)
return res
# find the longest matching prefix of set bits between left and right
|
Solution
|
python
|
openai__openai-python
|
src/openai/types/beta/realtime/conversation_item_truncated_event.py
|
{
"start": 208,
"end": 704
}
|
class ____(BaseModel):
audio_end_ms: int
"""The duration up to which the audio was truncated, in milliseconds."""
content_index: int
"""The index of the content part that was truncated."""
event_id: str
"""The unique ID of the server event."""
item_id: str
"""The ID of the assistant message item that was truncated."""
type: Literal["conversation.item.truncated"]
"""The event type, must be `conversation.item.truncated`."""
|
ConversationItemTruncatedEvent
|
python
|
optuna__optuna
|
optuna/samplers/_grid.py
|
{
"start": 701,
"end": 11674
}
|
class ____(BaseSampler):
"""Sampler using grid search.
With :class:`~optuna.samplers.GridSampler`, the trials suggest all combinations of parameters
in the given search space during the study.
Example:
.. testcode::
import optuna
def objective(trial):
x = trial.suggest_float("x", -100, 100)
y = trial.suggest_int("y", -100, 100)
return x**2 + y**2
search_space = {"x": [-50, 0, 50], "y": [-99, 0, 99]}
study = optuna.create_study(sampler=optuna.samplers.GridSampler(search_space))
study.optimize(objective)
Note:
This sampler with :ref:`ask_and_tell` raises :exc:`RuntimeError` just after evaluating
the final grid. This is because :class:`~optuna.samplers.GridSampler` automatically
stops the optimization if all combinations in the passed ``search_space`` have already
been evaluated, internally invoking the :func:`~optuna.study.Study.stop` method.
As a workaround, we need to handle the error manually as in
https://github.com/optuna/optuna/issues/4121#issuecomment-1305289910.
Note:
:class:`~optuna.samplers.GridSampler` does not take care of a parameter's quantization
specified by discrete suggest methods but just samples one of values specified in the
search space. E.g., in the following code snippet, either of ``-0.5`` or ``0.5`` is
sampled as ``x`` instead of an integer point.
.. testcode::
import optuna
def objective(trial):
# The following suggest method specifies integer points between -5 and 5.
x = trial.suggest_float("x", -5, 5, step=1)
return x**2
# Non-int points are specified in the grid.
search_space = {"x": [-0.5, 0.5]}
study = optuna.create_study(sampler=optuna.samplers.GridSampler(search_space))
study.optimize(objective, n_trials=2)
Note:
A parameter configuration in the grid is not considered finished until its trial is
finished. Therefore, during distributed optimization where trials run concurrently,
different workers will occasionally suggest the same parameter configuration.
The total number of actual trials may therefore exceed the size of the grid.
Note:
All parameters must be specified when using :class:`~optuna.samplers.GridSampler` with
:meth:`~optuna.study.Study.enqueue_trial`.
Args:
search_space:
A dictionary whose key and value are a parameter name and the corresponding candidates
of values, respectively.
seed:
A seed to fix the order of trials as the grid is randomly shuffled. This shuffle is
beneficial when the number of grids is larger than ``n_trials`` in
:meth:`~optuna.Study.optimize` to suppress suggesting similar grids. Please note
that fixing ``seed`` for each process is strongly recommended in distributed
optimization to avoid duplicated suggestions.
"""
def __init__(
self, search_space: Mapping[str, Sequence[GridValueType]], seed: int | None = None
) -> None:
for param_name, param_values in search_space.items():
for value in param_values:
self._check_value(param_name, value)
self._search_space = {}
for param_name, param_values in sorted(search_space.items()):
self._search_space[param_name] = list(param_values)
self._all_grids = list(itertools.product(*self._search_space.values()))
self._param_names = sorted(search_space.keys())
self._n_min_trials = len(self._all_grids)
self._rng = LazyRandomState(seed or 0)
self._rng.rng.shuffle(self._all_grids) # type: ignore[arg-type]
def reseed_rng(self) -> None:
self._rng.rng.seed()
def before_trial(self, study: Study, trial: FrozenTrial) -> None:
# Instead of returning param values, GridSampler puts the target grid id as a system attr,
# and the values are returned from `sample_independent`. This is because the distribution
# object is hard to get at the beginning of trial, while we need the access to the object
# to validate the sampled value.
# When the trial is created by RetryFailedTrialCallback or enqueue_trial, we should not
# assign a new grid_id.
if "grid_id" in trial.system_attrs or "fixed_params" in trial.system_attrs:
return
if 0 <= trial.number and trial.number < self._n_min_trials:
study._storage.set_trial_system_attr(
trial._trial_id, "search_space", self._search_space
)
study._storage.set_trial_system_attr(trial._trial_id, "grid_id", trial.number)
return
target_grids = self._get_unvisited_grid_ids(study)
if len(target_grids) == 0:
# This case may occur with distributed optimization or trial queue. If there is no
# target grid, `GridSampler` evaluates a visited, duplicated point with the current
# trial. After that, the optimization stops.
_logger.warning(
"`GridSampler` is re-evaluating a configuration because the grid has been "
"exhausted. This may happen due to a timing issue during distributed optimization "
"or when re-running optimizations on already finished studies."
)
# One of all grids is randomly picked up in this case.
target_grids = list(range(len(self._all_grids)))
# In distributed optimization, multiple workers may simultaneously pick up the same grid.
# To make the conflict less frequent, the grid is chosen randomly.
grid_id = int(self._rng.rng.choice(target_grids))
study._storage.set_trial_system_attr(trial._trial_id, "search_space", self._search_space)
study._storage.set_trial_system_attr(trial._trial_id, "grid_id", grid_id)
def infer_relative_search_space(
self, study: Study, trial: FrozenTrial
) -> dict[str, BaseDistribution]:
return {}
def sample_relative(
self, study: Study, trial: FrozenTrial, search_space: dict[str, BaseDistribution]
) -> dict[str, Any]:
return {}
def sample_independent(
self,
study: Study,
trial: FrozenTrial,
param_name: str,
param_distribution: BaseDistribution,
) -> Any:
if "grid_id" not in trial.system_attrs:
message = "All parameters must be specified when using GridSampler with enqueue_trial."
raise ValueError(message)
if param_name not in self._search_space:
message = "The parameter name, {}, is not found in the given grid.".format(param_name)
raise ValueError(message)
grid_id = trial.system_attrs["grid_id"]
param_value = self._all_grids[grid_id][self._param_names.index(param_name)]
contains = param_distribution._contains(param_distribution.to_internal_repr(param_value))
if not contains:
optuna_warn(
f"The value `{param_value}` is out of range of the parameter `{param_name}`. "
f"The value will be used but the actual distribution is: `{param_distribution}`."
)
return param_value
def after_trial(
self,
study: Study,
trial: FrozenTrial,
state: TrialState,
values: Sequence[float] | None,
) -> None:
target_grids = self._get_unvisited_grid_ids(study)
if len(target_grids) == 0:
study.stop()
elif len(target_grids) == 1:
grid_id = study._storage.get_trial_system_attrs(trial._trial_id)["grid_id"]
if grid_id == target_grids[0]:
study.stop()
@staticmethod
def _check_value(param_name: str, param_value: Any) -> None:
if param_value is None or isinstance(param_value, (str, int, float, bool)):
return
message = (
"{} contains a value with the type of {}, which is not supported by "
"`GridSampler`. Please make sure a value is `str`, `int`, `float`, `bool`"
" or `None` for persistent storage.".format(param_name, type(param_value))
)
optuna_warn(message)
def _get_unvisited_grid_ids(self, study: Study) -> list[int]:
# List up unvisited grids based on already finished ones.
visited_grids = []
running_grids = []
# We directly query the storage to get trials here instead of `study.get_trials`,
# since some pruners such as `HyperbandPruner` use the study transformed
# to filter trials. See https://github.com/optuna/optuna/issues/2327 for details.
trials = study._storage.get_all_trials(study._study_id, deepcopy=False)
for t in trials:
if "grid_id" in t.system_attrs and self._same_search_space(
t.system_attrs["search_space"]
):
if t.state.is_finished():
visited_grids.append(t.system_attrs["grid_id"])
elif t.state == TrialState.RUNNING:
running_grids.append(t.system_attrs["grid_id"])
unvisited_grids = set(range(self._n_min_trials)) - set(visited_grids) - set(running_grids)
# If evaluations for all grids have been started, return grids that have not yet finished
# because all grids should be evaluated before stopping the optimization.
if len(unvisited_grids) == 0:
unvisited_grids = set(range(self._n_min_trials)) - set(visited_grids)
return list(unvisited_grids)
@staticmethod
def _grid_value_equal(value1: GridValueType, value2: GridValueType) -> bool:
value1_is_nan = isinstance(value1, Real) and np.isnan(float(value1))
value2_is_nan = isinstance(value2, Real) and np.isnan(float(value2))
return (value1 == value2) or (value1_is_nan and value2_is_nan)
def _same_search_space(self, search_space: Mapping[str, Sequence[GridValueType]]) -> bool:
if set(search_space.keys()) != set(self._search_space.keys()):
return False
for param_name in search_space.keys():
if len(search_space[param_name]) != len(self._search_space[param_name]):
return False
for i, param_value in enumerate(search_space[param_name]):
if not self._grid_value_equal(param_value, self._search_space[param_name][i]):
return False
return True
def is_exhausted(self, study: Study) -> bool:
"""
Return True if all the possible params are evaluated, otherwise return False.
"""
return len(self._get_unvisited_grid_ids(study)) == 0
|
GridSampler
|
python
|
pytorch__pytorch
|
torch/_inductor/runtime/caching/interfaces.py
|
{
"start": 956,
"end": 5291
}
|
class ____(Enum):
REPLAY = "replay"
RECORD_INSERTED = "record_inserted"
RECORD_NOT_INSERTED = "record_not_inserted"
RECORD_NOT_INSERTED_REPLAY = "record_not_inserted_replay"
HIT = "hit"
MISS = "miss"
INSERTED = "inserted"
NOT_INSERTED = "not_inserted"
def _intf_callback(
origin: _IntfCallbackOrigin,
action: _IntfCallbackAction,
dur: float,
fn: Callable[P, R],
params: Params,
*args: Any,
) -> None:
if origin == _IntfCallbackOrigin.RECORD:
result: R = args[0]
if action == _IntfCallbackAction.REPLAY:
logger.log(
DEBUG,
"[RECORD] for fn %s with params %r cached, "
"returned result %r in %f seconds.",
fn.__name__,
params,
result,
dur,
)
elif action == _IntfCallbackAction.RECORD_INSERTED:
fn_dur: float = args[1]
logger.log(
DEBUG,
"[RECORD] for fn %s with params %r not cached, "
"calculated and cached result %r in %f seconds "
"of which %f seconds was spent on the function call.",
fn.__name__,
params,
result,
dur,
fn_dur,
)
elif action == _IntfCallbackAction.RECORD_NOT_INSERTED:
fn_dur = args[1]
logger.log(
DEBUG,
"[RECORD] for fn %s with params %r not cached, "
"calculated result %r but was not able to "
"insert it into the cache as a matching "
"entry already exists; returned calculated result in %f seconds "
"of which %f seconds was spent on the function call.",
fn.__name__,
params,
result,
dur,
fn_dur,
)
elif action == _IntfCallbackAction.RECORD_NOT_INSERTED_REPLAY:
fn_dur = args[1]
cached_result: R = args[2]
logger.log(
DEBUG,
"[RECORD] for fn %s with params %r not cached, "
"calculated result %r but was not able to "
"insert it into the synchronization cache as a matching "
"entry already exists; returned cached result %r in %f seconds "
"of which %f seconds was spent on the function call.",
fn.__name__,
params,
result,
cached_result,
dur,
fn_dur,
)
else:
raise NotImplementedError
elif origin == _IntfCallbackOrigin.GET:
if action == _IntfCallbackAction.HIT:
result = args[0]
logger.log(
DEBUG,
"[GET] for fn %s with params %r cached, "
"returned result %r in %f seconds.",
fn.__name__,
params,
result,
dur,
)
elif action == _IntfCallbackAction.MISS:
logger.log(
DEBUG,
"[GET] for fn %s with params %r not cached, "
"returned nothing in %f seconds.",
fn.__name__,
params,
dur,
)
else:
raise NotImplementedError
elif origin == _IntfCallbackOrigin.INSERT:
result = args[0]
if action == _IntfCallbackAction.INSERTED:
logger.log(
DEBUG,
"[INSERT] for fn %s with params %r and "
"result %r inserted in %f seconds.",
fn.__name__,
params,
result,
dur,
)
elif action == _IntfCallbackAction.NOT_INSERTED:
logger.log(
DEBUG,
"[INSERT] for fn %s with params %r and "
"result %r not inserted in %f seconds as there is "
"already has a matching entry.",
fn.__name__,
params,
result,
dur,
)
else:
raise NotImplementedError
else:
raise NotImplementedError
|
_IntfCallbackAction
|
python
|
PrefectHQ__prefect
|
src/prefect/client/schemas/filters.py
|
{
"start": 27803,
"end": 28232
}
|
class ____(PrefectBaseModel, OperatorMixin):
id: Optional[WorkPoolFilterId] = Field(
default=None, description="Filter criteria for `WorkPool.id`"
)
name: Optional[WorkPoolFilterName] = Field(
default=None, description="Filter criteria for `WorkPool.name`"
)
type: Optional[WorkPoolFilterType] = Field(
default=None, description="Filter criteria for `WorkPool.type`"
)
|
WorkPoolFilter
|
python
|
astropy__astropy
|
astropy/utils/data.py
|
{
"start": 2576,
"end": 4089
}
|
class ____(_config.ConfigNamespace):
"""
Configuration parameters for `astropy.utils.data`.
"""
dataurl = _config.ConfigItem(
"http://data.astropy.org/", "Primary URL for astropy remote data site."
)
dataurl_mirror = _config.ConfigItem(
"http://www.astropy.org/astropy-data/",
"Mirror URL for astropy remote data site.",
)
default_http_user_agent = _config.ConfigItem(
"astropy",
"Default User-Agent for HTTP request headers. This can be overwritten "
"for a particular call via http_headers option, where available. "
"This only provides the default value when not set by https_headers.",
)
remote_timeout = _config.ConfigItem(
10.0,
"Time to wait for remote data queries (in seconds).",
aliases=["astropy.coordinates.name_resolve.name_resolve_timeout"],
)
allow_internet = _config.ConfigItem(
True, "If False, prevents any attempt to download from Internet."
)
compute_hash_block_size = _config.ConfigItem(
2**16, # 64K
"Block size for computing file hashes.",
)
download_block_size = _config.ConfigItem(
2**16, # 64K
"Number of bytes of remote data to download per step.",
)
delete_temporary_downloads_at_exit = _config.ConfigItem(
True,
"If True, temporary download files created when the cache is "
"inaccessible will be deleted at the end of the python session.",
)
conf = Conf()
|
Conf
|
python
|
sympy__sympy
|
sympy/codegen/fnodes.py
|
{
"start": 18363,
"end": 18470
}
|
class ____(FFunction):
""" Fortran sign intrinsic for double precision arguments. """
nargs = 2
|
dsign
|
python
|
neetcode-gh__leetcode
|
python/0622-design-circular-queue.py
|
{
"start": 0,
"end": 96
}
|
class ____:
def __init__(self, val: int):
self.val = val
self.next = None
|
Node
|
python
|
bokeh__bokeh
|
src/bokeh/models/expressions.py
|
{
"start": 5691,
"end": 6451
}
|
class ____(Expression):
''' An expression for generating arrays by summing different columns from
a ``ColumnDataSource``.
This expression is useful for implementing stacked bar charts at a low
level.
'''
# explicit __init__ to support Init signatures
def __init__(self, *args: Any, **kwargs: Any) -> None:
super().__init__(*args, **kwargs)
fields = Seq(String, default=[], help="""
A sequence of fields from a ``ColumnDataSource`` to sum (elementwise). For
example:
.. code-block:: python
Stack(fields=['sales', 'marketing'])
Will compute an array of values (in the browser) by adding the elements
of the ``'sales'`` and ``'marketing'`` columns of a data source.
""")
@abstract
|
Stack
|
python
|
doocs__leetcode
|
solution/1100-1199/1163.Last Substring in Lexicographical Order/Solution.py
|
{
"start": 0,
"end": 408
}
|
class ____:
def lastSubstring(self, s: str) -> str:
i, j, k = 0, 1, 0
while j + k < len(s):
if s[i + k] == s[j + k]:
k += 1
elif s[i + k] < s[j + k]:
i += k + 1
k = 0
if i >= j:
j = i + 1
else:
j += k + 1
k = 0
return s[i:]
|
Solution
|
python
|
HypothesisWorks__hypothesis
|
hypothesis-python/tests/cover/test_threading.py
|
{
"start": 2464,
"end": 2618
}
|
class ____:
@given(st.integers())
@pytest.mark.parametrize("x", range(2))
def test_a(self, x, i):
pass
|
TestNoDifferingExecutorsHealthCheck
|
python
|
mkdocs__mkdocs
|
mkdocs/tests/utils/templates_tests.py
|
{
"start": 137,
"end": 1801
}
|
class ____(unittest.TestCase):
def test_script_tag(self):
cfg_yaml = dedent(
'''
extra_javascript:
- some_plain_javascript.js
- implicitly_as_module.mjs
- path: explicitly_as_module.mjs
type: module
- path: deferred_plain.js
defer: true
- path: scripts/async_module.mjs
type: module
async: true
- path: 'aaaaaa/"my script".mjs'
type: module
async: true
defer: true
- path: plain.mjs
'''
)
config = load_config(**yaml.safe_load(cfg_yaml))
config.extra_javascript.append('plain_string.mjs')
self.assertEqual(
[
str(templates.script_tag_filter({'page': None, 'base_url': 'here'}, item))
for item in config.extra_javascript
],
[
'<script src="here/some_plain_javascript.js"></script>',
'<script src="here/implicitly_as_module.mjs" type="module"></script>',
'<script src="here/explicitly_as_module.mjs" type="module"></script>',
'<script src="here/deferred_plain.js" defer></script>',
'<script src="here/scripts/async_module.mjs" type="module" async></script>',
'<script src="here/aaaaaa/"my script".mjs" type="module" defer async></script>',
'<script src="here/plain.mjs"></script>',
'<script src="here/plain_string.mjs"></script>',
],
)
|
UtilsTemplatesTests
|
python
|
spack__spack
|
lib/spack/spack/spec.py
|
{
"start": 24319,
"end": 25002
}
|
class ____(lang.DeprecatedProperty):
def __init__(self):
super().__init__(name="compiler")
def factory(self, instance, owner):
if instance.original_spec_format() < 5:
compiler = instance.annotations.compiler_node_attribute
assert compiler is not None, "a compiler spec is expected"
return CompilerSpec(compiler)
for language in ("c", "cxx", "fortran"):
deps = instance.dependencies(virtuals=language)
if deps:
return CompilerSpec(deps[0])
raise AttributeError(f"{instance} has no C, C++, or Fortran compiler")
@lang.lazy_lexicographic_ordering
|
DeprecatedCompilerSpec
|
python
|
numpy__numpy
|
numpy/_core/tests/test_scalar_methods.py
|
{
"start": 5118,
"end": 7268
}
|
class ____:
@pytest.mark.parametrize("cls", [
np.number,
np.integer,
np.inexact,
np.unsignedinteger,
np.signedinteger,
np.floating,
])
def test_abc(self, cls: type[np.number]) -> None:
alias = cls[Any]
assert isinstance(alias, types.GenericAlias)
assert alias.__origin__ is cls
def test_abc_complexfloating(self) -> None:
alias = np.complexfloating[Any, Any]
assert isinstance(alias, types.GenericAlias)
assert alias.__origin__ is np.complexfloating
@pytest.mark.parametrize("arg_len", range(4))
def test_abc_complexfloating_subscript_tuple(self, arg_len: int) -> None:
arg_tup = (Any,) * arg_len
if arg_len in (1, 2):
assert np.complexfloating[arg_tup]
else:
match = f"Too {'few' if arg_len == 0 else 'many'} arguments"
with pytest.raises(TypeError, match=match):
np.complexfloating[arg_tup]
@pytest.mark.parametrize("cls", [np.generic, np.flexible, np.character])
def test_abc_non_numeric(self, cls: type[np.generic]) -> None:
with pytest.raises(TypeError):
cls[Any]
@pytest.mark.parametrize("code", np.typecodes["All"])
def test_concrete(self, code: str) -> None:
cls = np.dtype(code).type
if cls in {np.bool, np.datetime64}:
# these are intentionally subscriptable
assert cls[Any]
else:
with pytest.raises(TypeError):
cls[Any]
@pytest.mark.parametrize("arg_len", range(4))
def test_subscript_tuple(self, arg_len: int) -> None:
arg_tup = (Any,) * arg_len
if arg_len == 1:
assert np.number[arg_tup]
else:
with pytest.raises(TypeError):
np.number[arg_tup]
def test_subscript_scalar(self) -> None:
assert np.number[Any]
@pytest.mark.parametrize("subscript", [Literal[True], Literal[False]])
def test_subscript_bool(self, subscript: Literal[True, False]) -> None:
assert isinstance(np.bool[subscript], types.GenericAlias)
|
TestClassGetItem
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.