language stringclasses 1
value | repo stringclasses 346
values | path stringlengths 6 201 | class_span dict | source stringlengths 21 2.38M | target stringlengths 1 96 |
|---|---|---|---|---|---|
python | google__pytype | pytype/tools/analyze_project/pytype_runner_test.py | {
"start": 1501,
"end": 2080
} | class ____(unittest.TestCase):
"""Test resolved_file_to_module."""
def test_basic(self):
resolved_file = Local('foo/bar.py', 'bar.py', 'bar')
self.assertEqual(
pytype_runner.resolved_file_to_module(resolved_file),
Module('foo/', 'bar.py', 'bar', 'Local'),
)
def test_preserve_init(self):
resolved_file = Local('foo/bar/__init__.py', 'bar/__init__.py', 'bar')
self.assertEqual(
pytype_runner.resolved_file_to_module(resolved_file),
Module('foo/', 'bar/__init__.py', 'bar.__init__', 'Local'),
)
| TestResolvedFileToModule |
python | numba__numba | versioneer.py | {
"start": 22130,
"end": 65126
} | class ____(Exception):
"""Exception raised if a method is not valid for the current scenario."""
LONG_VERSION_PY: Dict[str, str] = {}
HANDLERS: Dict[str, Dict[str, Callable]] = {}
def register_vcs_handler(vcs, method): # decorator
"""Create decorator to mark a method as the handler of a VCS."""
def decorate(f):
"""Store f in HANDLERS[vcs][method]."""
if vcs not in HANDLERS:
HANDLERS[vcs] = {}
HANDLERS[vcs][method] = f
return f
return decorate
def run_command(commands, args, cwd=None, verbose=False, hide_stderr=False,
env=None):
"""Call the given command(s)."""
assert isinstance(commands, list)
process = None
popen_kwargs = {}
if sys.platform == "win32":
# This hides the console window if pythonw.exe is used
startupinfo = subprocess.STARTUPINFO()
startupinfo.dwFlags |= subprocess.STARTF_USESHOWWINDOW
popen_kwargs["startupinfo"] = startupinfo
for command in commands:
try:
dispcmd = str([command] + args)
# remember shell=False, so use git.cmd on windows, not just git
process = subprocess.Popen([command] + args, cwd=cwd, env=env,
stdout=subprocess.PIPE,
stderr=(subprocess.PIPE if hide_stderr
else None), **popen_kwargs)
break
except OSError:
e = sys.exc_info()[1]
if e.errno == errno.ENOENT:
continue
if verbose:
print("unable to run %%s" %% dispcmd)
print(e)
return None, None
else:
if verbose:
print("unable to find command, tried %%s" %% (commands,))
return None, None
stdout = process.communicate()[0].strip().decode()
if process.returncode != 0:
if verbose:
print("unable to run %%s (error)" %% dispcmd)
print("stdout was %%s" %% stdout)
return None, process.returncode
return stdout, process.returncode
def versions_from_parentdir(parentdir_prefix, root, verbose):
"""Try to determine the version from the parent directory name.
Source tarballs conventionally unpack into a directory that includes both
the project name and a version string. We will also support searching up
two directory levels for an appropriately named parent directory
"""
rootdirs = []
for _ in range(3):
dirname = os.path.basename(root)
if dirname.startswith(parentdir_prefix):
return {"version": dirname[len(parentdir_prefix):],
"full-revisionid": None,
"dirty": False, "error": None, "date": None}
rootdirs.append(root)
root = os.path.dirname(root) # up a level
if verbose:
print("Tried directories %%s but none started with prefix %%s" %%
(str(rootdirs), parentdir_prefix))
raise NotThisMethod("rootdir doesn't start with parentdir_prefix")
@register_vcs_handler("git", "get_keywords")
def git_get_keywords(versionfile_abs):
"""Extract version information from the given file."""
# the code embedded in _version.py can just fetch the value of these
# keywords. When used from setup.py, we don't want to import _version.py,
# so we do it with a regexp instead. This function is not used from
# _version.py.
keywords = {}
try:
with open(versionfile_abs, "r") as fobj:
for line in fobj:
if line.strip().startswith("git_refnames ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["refnames"] = mo.group(1)
if line.strip().startswith("git_full ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["full"] = mo.group(1)
if line.strip().startswith("git_date ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["date"] = mo.group(1)
except OSError:
pass
return keywords
@register_vcs_handler("git", "keywords")
def git_versions_from_keywords(keywords, tag_prefix, verbose):
"""Get version information from git keywords."""
if "refnames" not in keywords:
raise NotThisMethod("Short version file found")
date = keywords.get("date")
if date is not None:
# Use only the last line. Previous lines may contain GPG signature
# information.
date = date.splitlines()[-1]
# git-2.2.0 added "%%cI", which expands to an ISO-8601 -compliant
# datestamp. However we prefer "%%ci" (which expands to an "ISO-8601
# -like" string, which we must then edit to make compliant), because
# it's been around since git-1.5.3, and it's too difficult to
# discover which version we're using, or to work around using an
# older one.
date = date.strip().replace(" ", "T", 1).replace(" ", "", 1)
refnames = keywords["refnames"].strip()
if refnames.startswith("$Format"):
if verbose:
print("keywords are unexpanded, not using")
raise NotThisMethod("unexpanded keywords, not a git-archive tarball")
refs = {r.strip() for r in refnames.strip("()").split(",")}
# starting in git-1.8.3, tags are listed as "tag: foo-1.0" instead of
# just "foo-1.0". If we see a "tag: " prefix, prefer those.
TAG = "tag: "
tags = {r[len(TAG):] for r in refs if r.startswith(TAG)}
if not tags:
# Either we're using git < 1.8.3, or there really are no tags. We use
# a heuristic: assume all version tags have a digit. The old git %%d
# expansion behaves like git log --decorate=short and strips out the
# refs/heads/ and refs/tags/ prefixes that would let us distinguish
# between branches and tags. By ignoring refnames without digits, we
# filter out many common branch names like "release" and
# "stabilization", as well as "HEAD" and "master".
tags = {r for r in refs if re.search(r'\d', r)}
if verbose:
print("discarding '%%s', no digits" %% ",".join(refs - tags))
if verbose:
print("likely tags: %%s" %% ",".join(sorted(tags)))
for ref in sorted(tags):
# sorting will prefer e.g. "2.0" over "2.0rc1"
if ref.startswith(tag_prefix):
r = ref[len(tag_prefix):]
# Filter out refs that exactly match prefix or that don't start
# with a number once the prefix is stripped (mostly a concern
# when prefix is '')
if not re.match(r'\d', r):
continue
if verbose:
print("picking %%s" %% r)
return {"version": r,
"full-revisionid": keywords["full"].strip(),
"dirty": False, "error": None,
"date": date}
# no suitable tags, so version is "0+unknown", but full hex is still there
if verbose:
print("no suitable tags, using unknown + full revision id")
return {"version": "0+unknown",
"full-revisionid": keywords["full"].strip(),
"dirty": False, "error": "no suitable tags", "date": None}
@register_vcs_handler("git", "pieces_from_vcs")
def git_pieces_from_vcs(tag_prefix, root, verbose, runner=run_command):
"""Get version from 'git describe' in the root of the source tree.
This only gets called if the git-archive 'subst' keywords were *not*
expanded, and _version.py hasn't already been rewritten with a short
version string, meaning we're inside a checked out source tree.
"""
GITS = ["git"]
if sys.platform == "win32":
GITS = ["git.cmd", "git.exe"]
# GIT_DIR can interfere with correct operation of Versioneer.
# It may be intended to be passed to the Versioneer-versioned project,
# but that should not change where we get our version from.
env = os.environ.copy()
env.pop("GIT_DIR", None)
runner = functools.partial(runner, env=env)
_, rc = runner(GITS, ["rev-parse", "--git-dir"], cwd=root,
hide_stderr=not verbose)
if rc != 0:
if verbose:
print("Directory %%s not under git control" %% root)
raise NotThisMethod("'git rev-parse --git-dir' returned error")
# if there is a tag matching tag_prefix, this yields TAG-NUM-gHEX[-dirty]
# if there isn't one, this yields HEX[-dirty] (no NUM)
describe_out, rc = runner(GITS, [
"describe", "--tags", "--dirty", "--always", "--long",
"--match", f"{tag_prefix}[[:digit:]]*"
], cwd=root)
# --long was added in git-1.5.5
if describe_out is None:
raise NotThisMethod("'git describe' failed")
describe_out = describe_out.strip()
full_out, rc = runner(GITS, ["rev-parse", "HEAD"], cwd=root)
if full_out is None:
raise NotThisMethod("'git rev-parse' failed")
full_out = full_out.strip()
pieces = {}
pieces["long"] = full_out
pieces["short"] = full_out[:7] # maybe improved later
pieces["error"] = None
branch_name, rc = runner(GITS, ["rev-parse", "--abbrev-ref", "HEAD"],
cwd=root)
# --abbrev-ref was added in git-1.6.3
if rc != 0 or branch_name is None:
raise NotThisMethod("'git rev-parse --abbrev-ref' returned error")
branch_name = branch_name.strip()
if branch_name == "HEAD":
# If we aren't exactly on a branch, pick a branch which represents
# the current commit. If all else fails, we are on a branchless
# commit.
branches, rc = runner(GITS, ["branch", "--contains"], cwd=root)
# --contains was added in git-1.5.4
if rc != 0 or branches is None:
raise NotThisMethod("'git branch --contains' returned error")
branches = branches.split("\n")
# Remove the first line if we're running detached
if "(" in branches[0]:
branches.pop(0)
# Strip off the leading "* " from the list of branches.
branches = [branch[2:] for branch in branches]
if "master" in branches:
branch_name = "master"
elif not branches:
branch_name = None
else:
# Pick the first branch that is returned. Good or bad.
branch_name = branches[0]
pieces["branch"] = branch_name
# parse describe_out. It will be like TAG-NUM-gHEX[-dirty] or HEX[-dirty]
# TAG might have hyphens.
git_describe = describe_out
# look for -dirty suffix
dirty = git_describe.endswith("-dirty")
pieces["dirty"] = dirty
if dirty:
git_describe = git_describe[:git_describe.rindex("-dirty")]
# now we have TAG-NUM-gHEX or HEX
if "-" in git_describe:
# TAG-NUM-gHEX
mo = re.search(r'^(.+)-(\d+)-g([0-9a-f]+)$', git_describe)
if not mo:
# unparsable. Maybe git-describe is misbehaving?
pieces["error"] = ("unable to parse git-describe output: '%%s'"
%% describe_out)
return pieces
# tag
full_tag = mo.group(1)
if not full_tag.startswith(tag_prefix):
if verbose:
fmt = "tag '%%s' doesn't start with prefix '%%s'"
print(fmt %% (full_tag, tag_prefix))
pieces["error"] = ("tag '%%s' doesn't start with prefix '%%s'"
%% (full_tag, tag_prefix))
return pieces
pieces["closest-tag"] = full_tag[len(tag_prefix):]
# distance: number of commits since tag
pieces["distance"] = int(mo.group(2))
# commit: short hex revision ID
pieces["short"] = mo.group(3)
else:
# HEX: no tags
pieces["closest-tag"] = None
out, rc = runner(GITS, ["rev-list", "HEAD", "--left-right"], cwd=root)
pieces["distance"] = len(out.split()) # total number of commits
# commit date: see ISO-8601 comment in git_versions_from_keywords()
date = runner(GITS, ["show", "-s", "--format=%%ci", "HEAD"], cwd=root)[0].strip()
# Use only the last line. Previous lines may contain GPG signature
# information.
date = date.splitlines()[-1]
pieces["date"] = date.strip().replace(" ", "T", 1).replace(" ", "", 1)
return pieces
def plus_or_dot(pieces):
"""Return a + if we don't already have one, else return a ."""
if "+" in pieces.get("closest-tag", ""):
return "."
return "+"
def render_pep440(pieces):
"""Build up version string, with post-release "local version identifier".
Our goal: TAG[+DISTANCE.gHEX[.dirty]] . Note that if you
get a tagged build and then dirty it, you'll get TAG+0.gHEX.dirty
Exceptions:
1: no tags. git_describe was just HEX. 0+untagged.DISTANCE.gHEX[.dirty]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += plus_or_dot(pieces)
rendered += "%%d.g%%s" %% (pieces["distance"], pieces["short"])
if pieces["dirty"]:
rendered += ".dirty"
else:
# exception #1
rendered = "0+untagged.%%d.g%%s" %% (pieces["distance"],
pieces["short"])
if pieces["dirty"]:
rendered += ".dirty"
return rendered
def render_pep440_branch(pieces):
"""TAG[[.dev0]+DISTANCE.gHEX[.dirty]] .
The ".dev0" means not master branch. Note that .dev0 sorts backwards
(a feature branch will appear "older" than the master branch).
Exceptions:
1: no tags. 0[.dev0]+untagged.DISTANCE.gHEX[.dirty]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
if pieces["branch"] != "master":
rendered += ".dev0"
rendered += plus_or_dot(pieces)
rendered += "%%d.g%%s" %% (pieces["distance"], pieces["short"])
if pieces["dirty"]:
rendered += ".dirty"
else:
# exception #1
rendered = "0"
if pieces["branch"] != "master":
rendered += ".dev0"
rendered += "+untagged.%%d.g%%s" %% (pieces["distance"],
pieces["short"])
if pieces["dirty"]:
rendered += ".dirty"
return rendered
def pep440_split_post(ver):
"""Split pep440 version string at the post-release segment.
Returns the release segments before the post-release and the
post-release version number (or -1 if no post-release segment is present).
"""
vc = str.split(ver, ".post")
return vc[0], int(vc[1] or 0) if len(vc) == 2 else None
def render_pep440_pre(pieces):
"""TAG[.postN.devDISTANCE] -- No -dirty.
Exceptions:
1: no tags. 0.post0.devDISTANCE
"""
if pieces["closest-tag"]:
if pieces["distance"]:
# update the post release segment
tag_version, post_version = pep440_split_post(pieces["closest-tag"])
rendered = tag_version
if post_version is not None:
rendered += ".post%%d.dev%%d" %% (post_version + 1, pieces["distance"])
else:
rendered += ".post0.dev%%d" %% (pieces["distance"])
else:
# no commits, use the tag as the version
rendered = pieces["closest-tag"]
else:
# exception #1
rendered = "0.post0.dev%%d" %% pieces["distance"]
return rendered
def render_pep440_post(pieces):
"""TAG[.postDISTANCE[.dev0]+gHEX] .
The ".dev0" means dirty. Note that .dev0 sorts backwards
(a dirty tree will appear "older" than the corresponding clean one),
but you shouldn't be releasing software with -dirty anyways.
Exceptions:
1: no tags. 0.postDISTANCE[.dev0]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += ".post%%d" %% pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
rendered += plus_or_dot(pieces)
rendered += "g%%s" %% pieces["short"]
else:
# exception #1
rendered = "0.post%%d" %% pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
rendered += "+g%%s" %% pieces["short"]
return rendered
def render_pep440_post_branch(pieces):
"""TAG[.postDISTANCE[.dev0]+gHEX[.dirty]] .
The ".dev0" means not master branch.
Exceptions:
1: no tags. 0.postDISTANCE[.dev0]+gHEX[.dirty]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += ".post%%d" %% pieces["distance"]
if pieces["branch"] != "master":
rendered += ".dev0"
rendered += plus_or_dot(pieces)
rendered += "g%%s" %% pieces["short"]
if pieces["dirty"]:
rendered += ".dirty"
else:
# exception #1
rendered = "0.post%%d" %% pieces["distance"]
if pieces["branch"] != "master":
rendered += ".dev0"
rendered += "+g%%s" %% pieces["short"]
if pieces["dirty"]:
rendered += ".dirty"
return rendered
def render_pep440_old(pieces):
"""TAG[.postDISTANCE[.dev0]] .
The ".dev0" means dirty.
Exceptions:
1: no tags. 0.postDISTANCE[.dev0]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += ".post%%d" %% pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
else:
# exception #1
rendered = "0.post%%d" %% pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
return rendered
def render_git_describe(pieces):
"""TAG[-DISTANCE-gHEX][-dirty].
Like 'git describe --tags --dirty --always'.
Exceptions:
1: no tags. HEX[-dirty] (note: no 'g' prefix)
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"]:
rendered += "-%%d-g%%s" %% (pieces["distance"], pieces["short"])
else:
# exception #1
rendered = pieces["short"]
if pieces["dirty"]:
rendered += "-dirty"
return rendered
def render_git_describe_long(pieces):
"""TAG-DISTANCE-gHEX[-dirty].
Like 'git describe --tags --dirty --always -long'.
The distance/hash is unconditional.
Exceptions:
1: no tags. HEX[-dirty] (note: no 'g' prefix)
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
rendered += "-%%d-g%%s" %% (pieces["distance"], pieces["short"])
else:
# exception #1
rendered = pieces["short"]
if pieces["dirty"]:
rendered += "-dirty"
return rendered
def render(pieces, style):
"""Render the given version pieces into the requested style."""
if pieces["error"]:
return {"version": "unknown",
"full-revisionid": pieces.get("long"),
"dirty": None,
"error": pieces["error"],
"date": None}
if not style or style == "default":
style = "pep440" # the default
if style == "pep440":
rendered = render_pep440(pieces)
elif style == "pep440-branch":
rendered = render_pep440_branch(pieces)
elif style == "pep440-pre":
rendered = render_pep440_pre(pieces)
elif style == "pep440-post":
rendered = render_pep440_post(pieces)
elif style == "pep440-post-branch":
rendered = render_pep440_post_branch(pieces)
elif style == "pep440-old":
rendered = render_pep440_old(pieces)
elif style == "git-describe":
rendered = render_git_describe(pieces)
elif style == "git-describe-long":
rendered = render_git_describe_long(pieces)
else:
raise ValueError("unknown style '%%s'" %% style)
return {"version": rendered, "full-revisionid": pieces["long"],
"dirty": pieces["dirty"], "error": None,
"date": pieces.get("date")}
def get_versions():
"""Get version information or return default if unable to do so."""
# I am in _version.py, which lives at ROOT/VERSIONFILE_SOURCE. If we have
# __file__, we can work backwards from there to the root. Some
# py2exe/bbfreeze/non-CPython implementations don't do __file__, in which
# case we can only use expanded keywords.
cfg = get_config()
verbose = cfg.verbose
try:
return git_versions_from_keywords(get_keywords(), cfg.tag_prefix,
verbose)
except NotThisMethod:
pass
try:
root = os.path.realpath(__file__)
# versionfile_source is the relative path from the top of the source
# tree (where the .git directory might live) to this file. Invert
# this to find the root from __file__.
for _ in cfg.versionfile_source.split('/'):
root = os.path.dirname(root)
except NameError:
return {"version": "0+unknown", "full-revisionid": None,
"dirty": None,
"error": "unable to find root of source tree",
"date": None}
try:
pieces = git_pieces_from_vcs(cfg.tag_prefix, root, verbose)
return render(pieces, cfg.style)
except NotThisMethod:
pass
try:
if cfg.parentdir_prefix:
return versions_from_parentdir(cfg.parentdir_prefix, root, verbose)
except NotThisMethod:
pass
return {"version": "0+unknown", "full-revisionid": None,
"dirty": None,
"error": "unable to compute version", "date": None}
'''
@register_vcs_handler("git", "get_keywords")
def git_get_keywords(versionfile_abs):
"""Extract version information from the given file."""
# the code embedded in _version.py can just fetch the value of these
# keywords. When used from setup.py, we don't want to import _version.py,
# so we do it with a regexp instead. This function is not used from
# _version.py.
keywords = {}
try:
with open(versionfile_abs, "r") as fobj:
for line in fobj:
if line.strip().startswith("git_refnames ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["refnames"] = mo.group(1)
if line.strip().startswith("git_full ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["full"] = mo.group(1)
if line.strip().startswith("git_date ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["date"] = mo.group(1)
except OSError:
pass
return keywords
@register_vcs_handler("git", "keywords")
def git_versions_from_keywords(keywords, tag_prefix, verbose):
"""Get version information from git keywords."""
if "refnames" not in keywords:
raise NotThisMethod("Short version file found")
date = keywords.get("date")
if date is not None:
# Use only the last line. Previous lines may contain GPG signature
# information.
date = date.splitlines()[-1]
# git-2.2.0 added "%cI", which expands to an ISO-8601 -compliant
# datestamp. However we prefer "%ci" (which expands to an "ISO-8601
# -like" string, which we must then edit to make compliant), because
# it's been around since git-1.5.3, and it's too difficult to
# discover which version we're using, or to work around using an
# older one.
date = date.strip().replace(" ", "T", 1).replace(" ", "", 1)
refnames = keywords["refnames"].strip()
if refnames.startswith("$Format"):
if verbose:
print("keywords are unexpanded, not using")
raise NotThisMethod("unexpanded keywords, not a git-archive tarball")
refs = {r.strip() for r in refnames.strip("()").split(",")}
# starting in git-1.8.3, tags are listed as "tag: foo-1.0" instead of
# just "foo-1.0". If we see a "tag: " prefix, prefer those.
TAG = "tag: "
tags = {r[len(TAG):] for r in refs if r.startswith(TAG)}
if not tags:
# Either we're using git < 1.8.3, or there really are no tags. We use
# a heuristic: assume all version tags have a digit. The old git %d
# expansion behaves like git log --decorate=short and strips out the
# refs/heads/ and refs/tags/ prefixes that would let us distinguish
# between branches and tags. By ignoring refnames without digits, we
# filter out many common branch names like "release" and
# "stabilization", as well as "HEAD" and "master".
tags = {r for r in refs if re.search(r'\d', r)}
if verbose:
print("discarding '%s', no digits" % ",".join(refs - tags))
if verbose:
print("likely tags: %s" % ",".join(sorted(tags)))
for ref in sorted(tags):
# sorting will prefer e.g. "2.0" over "2.0rc1"
if ref.startswith(tag_prefix):
r = ref[len(tag_prefix):]
# Filter out refs that exactly match prefix or that don't start
# with a number once the prefix is stripped (mostly a concern
# when prefix is '')
if not re.match(r'\d', r):
continue
if verbose:
print("picking %s" % r)
return {"version": r,
"full-revisionid": keywords["full"].strip(),
"dirty": False, "error": None,
"date": date}
# no suitable tags, so version is "0+unknown", but full hex is still there
if verbose:
print("no suitable tags, using unknown + full revision id")
return {"version": "0+unknown",
"full-revisionid": keywords["full"].strip(),
"dirty": False, "error": "no suitable tags", "date": None}
@register_vcs_handler("git", "pieces_from_vcs")
def git_pieces_from_vcs(tag_prefix, root, verbose, runner=run_command):
"""Get version from 'git describe' in the root of the source tree.
This only gets called if the git-archive 'subst' keywords were *not*
expanded, and _version.py hasn't already been rewritten with a short
version string, meaning we're inside a checked out source tree.
"""
GITS = ["git"]
if sys.platform == "win32":
GITS = ["git.cmd", "git.exe"]
# GIT_DIR can interfere with correct operation of Versioneer.
# It may be intended to be passed to the Versioneer-versioned project,
# but that should not change where we get our version from.
env = os.environ.copy()
env.pop("GIT_DIR", None)
runner = functools.partial(runner, env=env)
_, rc = runner(GITS, ["rev-parse", "--git-dir"], cwd=root,
hide_stderr=not verbose)
if rc != 0:
if verbose:
print("Directory %s not under git control" % root)
raise NotThisMethod("'git rev-parse --git-dir' returned error")
# if there is a tag matching tag_prefix, this yields TAG-NUM-gHEX[-dirty]
# if there isn't one, this yields HEX[-dirty] (no NUM)
describe_out, rc = runner(GITS, [
"describe", "--tags", "--dirty", "--always", "--long",
"--match", f"{tag_prefix}[[:digit:]]*"
], cwd=root)
# --long was added in git-1.5.5
if describe_out is None:
raise NotThisMethod("'git describe' failed")
describe_out = describe_out.strip()
full_out, rc = runner(GITS, ["rev-parse", "HEAD"], cwd=root)
if full_out is None:
raise NotThisMethod("'git rev-parse' failed")
full_out = full_out.strip()
pieces = {}
pieces["long"] = full_out
pieces["short"] = full_out[:7] # maybe improved later
pieces["error"] = None
branch_name, rc = runner(GITS, ["rev-parse", "--abbrev-ref", "HEAD"],
cwd=root)
# --abbrev-ref was added in git-1.6.3
if rc != 0 or branch_name is None:
raise NotThisMethod("'git rev-parse --abbrev-ref' returned error")
branch_name = branch_name.strip()
if branch_name == "HEAD":
# If we aren't exactly on a branch, pick a branch which represents
# the current commit. If all else fails, we are on a branchless
# commit.
branches, rc = runner(GITS, ["branch", "--contains"], cwd=root)
# --contains was added in git-1.5.4
if rc != 0 or branches is None:
raise NotThisMethod("'git branch --contains' returned error")
branches = branches.split("\n")
# Remove the first line if we're running detached
if "(" in branches[0]:
branches.pop(0)
# Strip off the leading "* " from the list of branches.
branches = [branch[2:] for branch in branches]
if "master" in branches:
branch_name = "master"
elif not branches:
branch_name = None
else:
# Pick the first branch that is returned. Good or bad.
branch_name = branches[0]
pieces["branch"] = branch_name
# parse describe_out. It will be like TAG-NUM-gHEX[-dirty] or HEX[-dirty]
# TAG might have hyphens.
git_describe = describe_out
# look for -dirty suffix
dirty = git_describe.endswith("-dirty")
pieces["dirty"] = dirty
if dirty:
git_describe = git_describe[:git_describe.rindex("-dirty")]
# now we have TAG-NUM-gHEX or HEX
if "-" in git_describe:
# TAG-NUM-gHEX
mo = re.search(r'^(.+)-(\d+)-g([0-9a-f]+)$', git_describe)
if not mo:
# unparsable. Maybe git-describe is misbehaving?
pieces["error"] = ("unable to parse git-describe output: '%s'"
% describe_out)
return pieces
# tag
full_tag = mo.group(1)
if not full_tag.startswith(tag_prefix):
if verbose:
fmt = "tag '%s' doesn't start with prefix '%s'"
print(fmt % (full_tag, tag_prefix))
pieces["error"] = ("tag '%s' doesn't start with prefix '%s'"
% (full_tag, tag_prefix))
return pieces
pieces["closest-tag"] = full_tag[len(tag_prefix):]
# distance: number of commits since tag
pieces["distance"] = int(mo.group(2))
# commit: short hex revision ID
pieces["short"] = mo.group(3)
else:
# HEX: no tags
pieces["closest-tag"] = None
out, rc = runner(GITS, ["rev-list", "HEAD", "--left-right"], cwd=root)
pieces["distance"] = len(out.split()) # total number of commits
# commit date: see ISO-8601 comment in git_versions_from_keywords()
date = runner(GITS, ["show", "-s", "--format=%ci", "HEAD"], cwd=root)[0].strip()
# Use only the last line. Previous lines may contain GPG signature
# information.
date = date.splitlines()[-1]
pieces["date"] = date.strip().replace(" ", "T", 1).replace(" ", "", 1)
return pieces
def do_vcs_install(versionfile_source, ipy):
"""Git-specific installation logic for Versioneer.
For Git, this means creating/changing .gitattributes to mark _version.py
for export-subst keyword substitution.
"""
GITS = ["git"]
if sys.platform == "win32":
GITS = ["git.cmd", "git.exe"]
files = [versionfile_source]
if ipy:
files.append(ipy)
if "VERSIONEER_PEP518" not in globals():
try:
my_path = __file__
if my_path.endswith((".pyc", ".pyo")):
my_path = os.path.splitext(my_path)[0] + ".py"
versioneer_file = os.path.relpath(my_path)
except NameError:
versioneer_file = "versioneer.py"
files.append(versioneer_file)
present = False
try:
with open(".gitattributes", "r") as fobj:
for line in fobj:
if line.strip().startswith(versionfile_source):
if "export-subst" in line.strip().split()[1:]:
present = True
break
except OSError:
pass
if not present:
with open(".gitattributes", "a+") as fobj:
fobj.write(f"{versionfile_source} export-subst\n")
files.append(".gitattributes")
run_command(GITS, ["add", "--"] + files)
def versions_from_parentdir(parentdir_prefix, root, verbose):
"""Try to determine the version from the parent directory name.
Source tarballs conventionally unpack into a directory that includes both
the project name and a version string. We will also support searching up
two directory levels for an appropriately named parent directory
"""
rootdirs = []
for _ in range(3):
dirname = os.path.basename(root)
if dirname.startswith(parentdir_prefix):
return {"version": dirname[len(parentdir_prefix):],
"full-revisionid": None,
"dirty": False, "error": None, "date": None}
rootdirs.append(root)
root = os.path.dirname(root) # up a level
if verbose:
print("Tried directories %s but none started with prefix %s" %
(str(rootdirs), parentdir_prefix))
raise NotThisMethod("rootdir doesn't start with parentdir_prefix")
SHORT_VERSION_PY = """
# This file was generated by 'versioneer.py' (0.28) from
# revision-control system data, or from the parent directory name of an
# unpacked source archive. Distribution tarballs contain a pre-generated copy
# of this file.
import json
version_json = '''
%s
''' # END VERSION_JSON
def get_versions():
return json.loads(version_json)
"""
def versions_from_file(filename):
"""Try to determine the version from _version.py if present."""
try:
with open(filename) as f:
contents = f.read()
except OSError:
raise NotThisMethod("unable to read _version.py")
mo = re.search(r"version_json = '''\n(.*)''' # END VERSION_JSON",
contents, re.M | re.S)
if not mo:
mo = re.search(r"version_json = '''\r\n(.*)''' # END VERSION_JSON",
contents, re.M | re.S)
if not mo:
raise NotThisMethod("no version_json in _version.py")
return json.loads(mo.group(1))
def write_to_version_file(filename, versions):
"""Write the given version number to the given _version.py file."""
os.unlink(filename)
contents = json.dumps(versions, sort_keys=True,
indent=1, separators=(",", ": "))
with open(filename, "w") as f:
f.write(SHORT_VERSION_PY % contents)
print("set %s to '%s'" % (filename, versions["version"]))
def plus_or_dot(pieces):
"""Return a + if we don't already have one, else return a ."""
if "+" in pieces.get("closest-tag", ""):
return "."
return "+"
def render_pep440(pieces):
"""Build up version string, with post-release "local version identifier".
Our goal: TAG[+DISTANCE.gHEX[.dirty]] . Note that if you
get a tagged build and then dirty it, you'll get TAG+0.gHEX.dirty
Exceptions:
1: no tags. git_describe was just HEX. 0+untagged.DISTANCE.gHEX[.dirty]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += plus_or_dot(pieces)
rendered += "%d.g%s" % (pieces["distance"], pieces["short"])
if pieces["dirty"]:
rendered += ".dirty"
else:
# exception #1
rendered = "0+untagged.%d.g%s" % (pieces["distance"],
pieces["short"])
if pieces["dirty"]:
rendered += ".dirty"
return rendered
def render_pep440_branch(pieces):
"""TAG[[.dev0]+DISTANCE.gHEX[.dirty]] .
The ".dev0" means not master branch. Note that .dev0 sorts backwards
(a feature branch will appear "older" than the master branch).
Exceptions:
1: no tags. 0[.dev0]+untagged.DISTANCE.gHEX[.dirty]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
if pieces["branch"] != "master":
rendered += ".dev0"
rendered += plus_or_dot(pieces)
rendered += "%d.g%s" % (pieces["distance"], pieces["short"])
if pieces["dirty"]:
rendered += ".dirty"
else:
# exception #1
rendered = "0"
if pieces["branch"] != "master":
rendered += ".dev0"
rendered += "+untagged.%d.g%s" % (pieces["distance"],
pieces["short"])
if pieces["dirty"]:
rendered += ".dirty"
return rendered
def pep440_split_post(ver):
"""Split pep440 version string at the post-release segment.
Returns the release segments before the post-release and the
post-release version number (or -1 if no post-release segment is present).
"""
vc = str.split(ver, ".post")
return vc[0], int(vc[1] or 0) if len(vc) == 2 else None
def render_pep440_pre(pieces):
"""TAG[.postN.devDISTANCE] -- No -dirty.
Exceptions:
1: no tags. 0.post0.devDISTANCE
"""
if pieces["closest-tag"]:
if pieces["distance"]:
# update the post release segment
tag_version, post_version = pep440_split_post(pieces["closest-tag"])
rendered = tag_version
if post_version is not None:
rendered += ".post%d.dev%d" % (post_version + 1, pieces["distance"])
else:
rendered += ".post0.dev%d" % (pieces["distance"])
else:
# no commits, use the tag as the version
rendered = pieces["closest-tag"]
else:
# exception #1
rendered = "0.post0.dev%d" % pieces["distance"]
return rendered
def render_pep440_post(pieces):
"""TAG[.postDISTANCE[.dev0]+gHEX] .
The ".dev0" means dirty. Note that .dev0 sorts backwards
(a dirty tree will appear "older" than the corresponding clean one),
but you shouldn't be releasing software with -dirty anyways.
Exceptions:
1: no tags. 0.postDISTANCE[.dev0]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += ".post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
rendered += plus_or_dot(pieces)
rendered += "g%s" % pieces["short"]
else:
# exception #1
rendered = "0.post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
rendered += "+g%s" % pieces["short"]
return rendered
def render_pep440_post_branch(pieces):
"""TAG[.postDISTANCE[.dev0]+gHEX[.dirty]] .
The ".dev0" means not master branch.
Exceptions:
1: no tags. 0.postDISTANCE[.dev0]+gHEX[.dirty]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += ".post%d" % pieces["distance"]
if pieces["branch"] != "master":
rendered += ".dev0"
rendered += plus_or_dot(pieces)
rendered += "g%s" % pieces["short"]
if pieces["dirty"]:
rendered += ".dirty"
else:
# exception #1
rendered = "0.post%d" % pieces["distance"]
if pieces["branch"] != "master":
rendered += ".dev0"
rendered += "+g%s" % pieces["short"]
if pieces["dirty"]:
rendered += ".dirty"
return rendered
def render_pep440_old(pieces):
"""TAG[.postDISTANCE[.dev0]] .
The ".dev0" means dirty.
Exceptions:
1: no tags. 0.postDISTANCE[.dev0]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += ".post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
else:
# exception #1
rendered = "0.post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
return rendered
def render_git_describe(pieces):
"""TAG[-DISTANCE-gHEX][-dirty].
Like 'git describe --tags --dirty --always'.
Exceptions:
1: no tags. HEX[-dirty] (note: no 'g' prefix)
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"]:
rendered += "-%d-g%s" % (pieces["distance"], pieces["short"])
else:
# exception #1
rendered = pieces["short"]
if pieces["dirty"]:
rendered += "-dirty"
return rendered
def render_git_describe_long(pieces):
"""TAG-DISTANCE-gHEX[-dirty].
Like 'git describe --tags --dirty --always -long'.
The distance/hash is unconditional.
Exceptions:
1: no tags. HEX[-dirty] (note: no 'g' prefix)
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
rendered += "-%d-g%s" % (pieces["distance"], pieces["short"])
else:
# exception #1
rendered = pieces["short"]
if pieces["dirty"]:
rendered += "-dirty"
return rendered
def render(pieces, style):
"""Render the given version pieces into the requested style."""
if pieces["error"]:
return {"version": "unknown",
"full-revisionid": pieces.get("long"),
"dirty": None,
"error": pieces["error"],
"date": None}
if not style or style == "default":
style = "pep440" # the default
if style == "pep440":
rendered = render_pep440(pieces)
elif style == "pep440-branch":
rendered = render_pep440_branch(pieces)
elif style == "pep440-pre":
rendered = render_pep440_pre(pieces)
elif style == "pep440-post":
rendered = render_pep440_post(pieces)
elif style == "pep440-post-branch":
rendered = render_pep440_post_branch(pieces)
elif style == "pep440-old":
rendered = render_pep440_old(pieces)
elif style == "git-describe":
rendered = render_git_describe(pieces)
elif style == "git-describe-long":
rendered = render_git_describe_long(pieces)
else:
raise ValueError("unknown style '%s'" % style)
return {"version": rendered, "full-revisionid": pieces["long"],
"dirty": pieces["dirty"], "error": None,
"date": pieces.get("date")}
| NotThisMethod |
python | pyinstaller__pyinstaller | bootloader/waflib/Tools/asm.py | {
"start": 458,
"end": 703
} | class ____(c_preproc.c_parser):
def filter_comments(self, node):
code = node.read()
code = c_preproc.re_nl.sub('', code)
code = c_preproc.re_cpp.sub(c_preproc.repl, code)
return re_lines.findall(code)
| asm_parser |
python | PrefectHQ__prefect | src/prefect/filesystems.py | {
"start": 12099,
"end": 18280
} | class ____(WritableFileSystem, WritableDeploymentStorage):
"""
Store data as a file on a remote file system.
Supports any remote file system supported by `fsspec`. The file system is specified
using a protocol. For example, "s3://my-bucket/my-folder/" will use S3.
Example:
Load stored remote file system config:
```python
from prefect.filesystems import RemoteFileSystem
remote_file_system_block = RemoteFileSystem.load("BLOCK_NAME")
```
"""
_block_type_name = "Remote File System"
_logo_url = "https://cdn.sanity.io/images/3ugk85nk/production/e86b41bc0f9c99ba9489abeee83433b43d5c9365-48x48.png"
_documentation_url = (
"https://docs.prefect.io/latest/develop/results#specifying-a-default-filesystem"
)
basepath: str = Field(
default=...,
description="Default path for this block to write to.",
examples=["s3://my-bucket/my-folder/"],
)
settings: Dict[str, Any] = Field(
default_factory=dict,
description="Additional settings to pass through to fsspec.",
)
# Cache for the configured fsspec file system used for access
_filesystem: fsspec.AbstractFileSystem = None
@field_validator("basepath")
def check_basepath(cls, value: str) -> str:
return validate_basepath(value)
def _resolve_path(self, path: str) -> str:
base_scheme, base_netloc, base_urlpath, _, _ = urllib.parse.urlsplit(
self.basepath
)
scheme, netloc, urlpath, _, _ = urllib.parse.urlsplit(path)
# Confirm that absolute paths are valid
if scheme:
if scheme != base_scheme:
raise ValueError(
f"Path {path!r} with scheme {scheme!r} must use the same scheme as"
f" the base path {base_scheme!r}."
)
if netloc:
if (netloc != base_netloc) or not urlpath.startswith(base_urlpath):
raise ValueError(
f"Path {path!r} is outside of the base path {self.basepath!r}."
)
return f"{self.basepath.rstrip('/')}/{urlpath.lstrip('/')}"
@sync_compatible
async def get_directory(
self, from_path: Optional[str] = None, local_path: Optional[str] = None
) -> None:
"""
Downloads a directory from a given remote path to a local directory.
Defaults to downloading the entire contents of the block's basepath to the current working directory.
"""
if from_path is None:
from_path = str(self.basepath)
else:
from_path = self._resolve_path(from_path)
if local_path is None:
local_path = Path(".").absolute()
# validate that from_path has a trailing slash for proper fsspec behavior across versions
if not from_path.endswith("/"):
from_path += "/"
return self.filesystem.get(from_path, local_path, recursive=True)
@sync_compatible
async def put_directory(
self,
local_path: Optional[str] = None,
to_path: Optional[str] = None,
ignore_file: Optional[str] = None,
overwrite: bool = True,
) -> int:
"""
Uploads a directory from a given local path to a remote directory.
Defaults to uploading the entire contents of the current working directory to the block's basepath.
"""
if to_path is None:
to_path = str(self.basepath)
else:
to_path = self._resolve_path(to_path)
if local_path is None:
local_path = "."
included_files = None
if ignore_file:
with open(ignore_file) as f:
ignore_patterns = f.readlines()
included_files = filter_files(
local_path, ignore_patterns, include_dirs=True
)
counter = 0
for f in Path(local_path).rglob("*"):
relative_path = f.relative_to(local_path)
if included_files and str(relative_path) not in included_files:
continue
if to_path.endswith("/"):
fpath = to_path + relative_path.as_posix()
else:
fpath = to_path + "/" + relative_path.as_posix()
if f.is_dir():
pass
else:
f = f.as_posix()
if overwrite:
self.filesystem.put_file(f, fpath, overwrite=True)
else:
self.filesystem.put_file(f, fpath)
counter += 1
return counter
@sync_compatible
async def read_path(self, path: str) -> bytes:
path = self._resolve_path(path)
with self.filesystem.open(path, "rb") as file:
content = await run_sync_in_worker_thread(file.read)
return content
@sync_compatible
async def write_path(self, path: str, content: bytes) -> str:
path = self._resolve_path(path)
dirpath = path[: path.rindex("/")]
if self.basepath.startswith("smb://"):
parsed = urllib.parse.urlparse(dirpath)
dirpath = parsed.path
self.filesystem.makedirs(dirpath, exist_ok=True)
with self.filesystem.open(path, "wb") as file:
await run_sync_in_worker_thread(file.write, content)
return path
@property
def filesystem(self) -> fsspec.AbstractFileSystem:
if not self._filesystem:
scheme, _, _, _, _ = urllib.parse.urlsplit(self.basepath)
try:
self._filesystem = fsspec.filesystem(scheme, **self.settings)
except ImportError as exc:
# The path is a remote file system that uses a lib that is not installed
raise RuntimeError(
f"File system created with scheme {scheme!r} from base path "
f"{self.basepath!r} could not be created. "
"You are likely missing a Python module required to use the given "
"storage protocol."
) from exc
return self._filesystem
| RemoteFileSystem |
python | getsentry__sentry | tests/acceptance/test_oauth_authorize.py | {
"start": 117,
"end": 566
} | class ____(AcceptanceTestCase):
def setUp(self) -> None:
super().setUp()
self.user = self.create_user("foo@example.com", is_superuser=True)
self.login_as(self.user)
def test_simple(self) -> None:
self.browser.get("/debug/oauth/authorize/")
self.browser.wait_until_not(".loading")
self.browser.get("/debug/oauth/authorize/error/")
self.browser.wait_until_not(".loading")
| OAuthAuthorizeTest |
python | google__jax | jax/experimental/jax2tf/examples/keras_reuse_main_test.py | {
"start": 945,
"end": 1702
} | class ____(tf_test_util.JaxToTfTestCase):
def setUp(self):
super().setUp()
FLAGS.model_path = os.path.join(absltest.get_default_test_tmpdir(),
"saved_models")
FLAGS.num_epochs = 1
FLAGS.test_savedmodel = True
FLAGS.mock_data = True
FLAGS.show_images = False
FLAGS.serving_batch_size = 1
@parameterized.named_parameters(
dict(testcase_name=f"_{model}", model=model)
for model in ["mnist_pure_jax", "mnist_flax"])
@jtu.ignore_warning(message="the imp module is deprecated")
def test_keras_reuse(self, model="mnist_pure_jax"):
FLAGS.model = model
keras_reuse_main.main(None)
if __name__ == "__main__":
absltest.main(testLoader=jtu.JaxTestLoader())
| KerasReuseMainTest |
python | doocs__leetcode | solution/1200-1299/1208.Get Equal Substrings Within Budget/Solution3.py | {
"start": 0,
"end": 307
} | class ____:
def equalSubstring(self, s: str, t: str, maxCost: int) -> int:
cost = l = 0
for a, b in zip(s, t):
cost += abs(ord(a) - ord(b))
if cost > maxCost:
cost -= abs(ord(s[l]) - ord(t[l]))
l += 1
return len(s) - l
| Solution |
python | conda__conda | conda/base/constants.py | {
"start": 5028,
"end": 5181
} | class ____(Enum):
disabled = "disabled"
warn = "warn"
enabled = "enabled"
def __str__(self) -> str:
return self.value
| SafetyChecks |
python | pypa__pip | src/pip/_vendor/urllib3/exceptions.py | {
"start": 964,
"end": 1071
} | class ____(HTTPError):
"""Raised when SSL certificate fails in an HTTPS connection."""
pass
| SSLError |
python | prompt-toolkit__python-prompt-toolkit | src/prompt_toolkit/lexers/pygments.py | {
"start": 4517,
"end": 11928
} | class ____(Lexer):
"""
Lexer that calls a pygments lexer.
Example::
from pygments.lexers.html import HtmlLexer
lexer = PygmentsLexer(HtmlLexer)
Note: Don't forget to also load a Pygments compatible style. E.g.::
from prompt_toolkit.styles.from_pygments import style_from_pygments_cls
from pygments.styles import get_style_by_name
style = style_from_pygments_cls(get_style_by_name('monokai'))
:param pygments_lexer_cls: A `Lexer` from Pygments.
:param sync_from_start: Start lexing at the start of the document. This
will always give the best results, but it will be slow for bigger
documents. (When the last part of the document is display, then the
whole document will be lexed by Pygments on every key stroke.) It is
recommended to disable this for inputs that are expected to be more
than 1,000 lines.
:param syntax_sync: `SyntaxSync` object.
"""
# Minimum amount of lines to go backwards when starting the parser.
# This is important when the lines are retrieved in reverse order, or when
# scrolling upwards. (Due to the complexity of calculating the vertical
# scroll offset in the `Window` class, lines are not always retrieved in
# order.)
MIN_LINES_BACKWARDS = 50
# When a parser was started this amount of lines back, read the parser
# until we get the current line. Otherwise, start a new parser.
# (This should probably be bigger than MIN_LINES_BACKWARDS.)
REUSE_GENERATOR_MAX_DISTANCE = 100
def __init__(
self,
pygments_lexer_cls: type[PygmentsLexerCls],
sync_from_start: FilterOrBool = True,
syntax_sync: SyntaxSync | None = None,
) -> None:
self.pygments_lexer_cls = pygments_lexer_cls
self.sync_from_start = to_filter(sync_from_start)
# Instantiate the Pygments lexer.
self.pygments_lexer = pygments_lexer_cls(
stripnl=False, stripall=False, ensurenl=False
)
# Create syntax sync instance.
self.syntax_sync = syntax_sync or RegexSync.from_pygments_lexer_cls(
pygments_lexer_cls
)
@classmethod
def from_filename(
cls, filename: str, sync_from_start: FilterOrBool = True
) -> Lexer:
"""
Create a `Lexer` from a filename.
"""
# Inline imports: the Pygments dependency is optional!
from pygments.lexers import get_lexer_for_filename
from pygments.util import ClassNotFound
try:
pygments_lexer = get_lexer_for_filename(filename)
except ClassNotFound:
return SimpleLexer()
else:
return cls(pygments_lexer.__class__, sync_from_start=sync_from_start)
def lex_document(self, document: Document) -> Callable[[int], StyleAndTextTuples]:
"""
Create a lexer function that takes a line number and returns the list
of (style_str, text) tuples as the Pygments lexer returns for that line.
"""
LineGenerator = Generator[Tuple[int, StyleAndTextTuples], None, None]
# Cache of already lexed lines.
cache: dict[int, StyleAndTextTuples] = {}
# Pygments generators that are currently lexing.
# Map lexer generator to the line number.
line_generators: dict[LineGenerator, int] = {}
def get_syntax_sync() -> SyntaxSync:
"The Syntax synchronization object that we currently use."
if self.sync_from_start():
return SyncFromStart()
else:
return self.syntax_sync
def find_closest_generator(i: int) -> LineGenerator | None:
"Return a generator close to line 'i', or None if none was found."
for generator, lineno in line_generators.items():
if lineno < i and i - lineno < self.REUSE_GENERATOR_MAX_DISTANCE:
return generator
return None
def create_line_generator(start_lineno: int, column: int = 0) -> LineGenerator:
"""
Create a generator that yields the lexed lines.
Each iteration it yields a (line_number, [(style_str, text), ...]) tuple.
"""
def get_text_fragments() -> Iterable[tuple[str, str]]:
text = "\n".join(document.lines[start_lineno:])[column:]
# We call `get_text_fragments_unprocessed`, because `get_tokens` will
# still replace \r\n and \r by \n. (We don't want that,
# Pygments should return exactly the same amount of text, as we
# have given as input.)
for _, t, v in self.pygments_lexer.get_tokens_unprocessed(text):
# Turn Pygments `Token` object into prompt_toolkit style
# strings.
yield _token_cache[t], v
yield from enumerate(split_lines(list(get_text_fragments())), start_lineno)
def get_generator(i: int) -> LineGenerator:
"""
Find an already started generator that is close, or create a new one.
"""
# Find closest line generator.
generator = find_closest_generator(i)
if generator:
return generator
# No generator found. Determine starting point for the syntax
# synchronization first.
# Go at least x lines back. (Make scrolling upwards more
# efficient.)
i = max(0, i - self.MIN_LINES_BACKWARDS)
if i == 0:
row = 0
column = 0
else:
row, column = get_syntax_sync().get_sync_start_position(document, i)
# Find generator close to this point, or otherwise create a new one.
generator = find_closest_generator(i)
if generator:
return generator
else:
generator = create_line_generator(row, column)
# If the column is not 0, ignore the first line. (Which is
# incomplete. This happens when the synchronization algorithm tells
# us to start parsing in the middle of a line.)
if column:
next(generator)
row += 1
line_generators[generator] = row
return generator
def get_line(i: int) -> StyleAndTextTuples:
"Return the tokens for a given line number."
try:
return cache[i]
except KeyError:
generator = get_generator(i)
# Exhaust the generator, until we find the requested line.
for num, line in generator:
cache[num] = line
if num == i:
line_generators[generator] = i
# Remove the next item from the cache.
# (It could happen that it's already there, because of
# another generator that started filling these lines,
# but we want to synchronize these lines with the
# current lexer's state.)
if num + 1 in cache:
del cache[num + 1]
return cache[num]
return []
return get_line
| PygmentsLexer |
python | apache__airflow | providers/fab/src/airflow/providers/fab/www/api_connexion/exceptions.py | {
"start": 5252,
"end": 5783
} | class ____(ProblemException):
"""Returns a response body and status code for HTTP 500 exception."""
def __init__(
self,
title: str = "Internal Server Error",
detail: str | None = None,
headers: dict | None = None,
**kwargs: Any,
) -> None:
super().__init__(
status=HTTPStatus.INTERNAL_SERVER_ERROR,
type=EXCEPTIONS_LINK_MAP[500],
title=title,
detail=detail,
headers=headers,
**kwargs,
)
| Unknown |
python | Netflix__metaflow | test/unit/inheritance/flows/comprehensive_diamond_flow.py | {
"start": 461,
"end": 1796
} | class ____(BaseC):
"""
Comprehensive diamond inheritance flow.
Verifies:
- MRO correctly resolves diamond pattern
- Parameters from all branches accessible (param_a, param_b, param_c, final_param)
- Configs from all branches accessible (config_a, config_b, config_c)
- Steps from BaseA execute correctly
- Computations use values from all branches
"""
final_param = Parameter("final_param", help="Final parameter", default="complete")
@step
def end(self):
"""End step storing all verification artifacts"""
# Store all parameters
self.result_param_a = self.param_a
self.result_param_b = self.param_b
self.result_param_c = self.param_c
self.result_final_param = self.final_param
# Store all configs
self.result_config_a = dict(self.config_a)
self.result_config_b = dict(self.config_b)
self.result_config_c = dict(self.config_c)
# Store computed value
self.result_final = self.processed
print(f"Final result: {self.result_final}")
print(f"Final param: {self.result_final_param}")
print(f"Pathspec: {current.pathspec}")
print("ComprehensiveDiamondFlow completed successfully")
if __name__ == "__main__":
ComprehensiveDiamondFlow()
| ComprehensiveDiamondFlow |
python | spack__spack | lib/spack/spack/operating_systems/_operating_system.py | {
"start": 199,
"end": 1434
} | class ____:
"""Base class for all the Operating Systems.
On a multiple architecture machine, the architecture spec field can be set to
build a package against any target and operating system that is present on the
platform. On Cray platforms or any other architecture that has different front
and back end environments, the operating system will determine the method of
compiler detection.
There are two different types of compiler detection:
1. Through the $PATH env variable (front-end detection)
2. Through the module system. (back-end detection)
Depending on which operating system is specified, the compiler will be detected
using one of those methods.
For platforms such as linux and darwin, the operating system is autodetected.
"""
def __init__(self, name, version):
self.name = name.replace("-", "_")
self.version = str(version).replace("-", "_")
def __str__(self):
return "%s%s" % (self.name, self.version)
def __repr__(self):
return self.__str__()
def _cmp_iter(self):
yield self.name
yield self.version
def to_dict(self):
return {"name": self.name, "version": self.version}
| OperatingSystem |
python | sqlalchemy__sqlalchemy | lib/sqlalchemy/sql/ddl.py | {
"start": 28423,
"end": 29196
} | class ____(TableDropDDL):
"""'DROP VIEW' construct.
.. versionadded:: 2.1 the :class:`.DropView` construct became public
and was renamed from ``_DropView``.
"""
__visit_name__ = "drop_view"
materialized: bool
"""Boolean flag indicating if this is a materialized view."""
def __init__(
self,
element: Table,
*,
if_exists: bool = False,
materialized: bool = False,
) -> None:
super().__init__(element, if_exists=if_exists)
self.materialized = materialized
def to_metadata(self, metadata: MetaData, table: Table) -> Self:
new = self.__class__.__new__(self.__class__)
new.__dict__.update(self.__dict__)
new.element = table
return new
| DropView |
python | Farama-Foundation__Gymnasium | gymnasium/wrappers/stateful_reward.py | {
"start": 412,
"end": 5121
} | class ____(
gym.Wrapper[ObsType, ActType, ObsType, ActType], gym.utils.RecordConstructorArgs
):
r"""Normalizes immediate rewards such that their exponential moving average has an approximately fixed variance.
The property `_update_running_mean` allows to freeze/continue the running mean calculation of the reward
statistics. If `True` (default), the `RunningMeanStd` will get updated every time `self.normalize()` is called.
If False, the calculated statistics are used but not updated anymore; this may be used during evaluation.
A vector version of the wrapper exists :class:`gymnasium.wrappers.vector.NormalizeReward`.
Note:
In v0.27, NormalizeReward was updated as the forward discounted reward estimate was incorrectly computed in Gym v0.25+.
For more detail, read [#3154](https://github.com/openai/gym/pull/3152).
Note:
The scaling depends on past trajectories and rewards will not be scaled correctly if the wrapper was newly
instantiated or the policy was changed recently.
Example without the normalize reward wrapper:
>>> import numpy as np
>>> import gymnasium as gym
>>> env = gym.make("MountainCarContinuous-v0")
>>> _ = env.reset(seed=123)
>>> _ = env.action_space.seed(123)
>>> episode_rewards = []
>>> terminated, truncated = False, False
>>> while not (terminated or truncated):
... observation, reward, terminated, truncated, info = env.step(env.action_space.sample())
... episode_rewards.append(reward)
...
>>> env.close()
>>> np.var(episode_rewards)
np.float64(0.0008876301247721108)
Example with the normalize reward wrapper:
>>> import numpy as np
>>> import gymnasium as gym
>>> env = gym.make("MountainCarContinuous-v0")
>>> env = NormalizeReward(env, gamma=0.99, epsilon=1e-8)
>>> _ = env.reset(seed=123)
>>> _ = env.action_space.seed(123)
>>> episode_rewards = []
>>> terminated, truncated = False, False
>>> while not (terminated or truncated):
... observation, reward, terminated, truncated, info = env.step(env.action_space.sample())
... episode_rewards.append(reward)
...
>>> env.close()
>>> np.var(episode_rewards)
np.float64(0.010162116476634746)
Change logs:
* v0.21.0 - Initially added
* v1.0.0 - Add `update_running_mean` attribute to allow disabling of updating the running mean / standard
"""
def __init__(
self,
env: gym.Env[ObsType, ActType],
gamma: float = 0.99,
epsilon: float = 1e-8,
):
"""This wrapper will normalize immediate rewards s.t. their exponential moving average has an approximately fixed variance.
Args:
env (env): The environment to apply the wrapper
epsilon (float): A stability parameter
gamma (float): The discount factor that is used in the exponential moving average.
"""
gym.utils.RecordConstructorArgs.__init__(self, gamma=gamma, epsilon=epsilon)
gym.Wrapper.__init__(self, env)
self.return_rms = RunningMeanStd(shape=())
self.discounted_reward = np.array([0.0])
self.gamma = gamma
self.epsilon = epsilon
self._update_running_mean = True
@property
def update_running_mean(self) -> bool:
"""Property to freeze/continue the running mean calculation of the reward statistics."""
return self._update_running_mean
@update_running_mean.setter
def update_running_mean(self, setting: bool):
"""Sets the property to freeze/continue the running mean calculation of the reward statistics."""
self._update_running_mean = setting
def step(
self, action: ActType
) -> tuple[ObsType, SupportsFloat, bool, bool, dict[str, Any]]:
"""Steps through the environment, normalizing the reward returned."""
obs, reward, terminated, truncated, info = super().step(action)
# Using the `discounted_reward` rather than `reward` makes no sense but for backward compatibility, it is being kept
self.discounted_reward = self.discounted_reward * self.gamma * (
1 - terminated
) + float(reward)
if self._update_running_mean:
self.return_rms.update(self.discounted_reward)
# We don't (reward - self.return_rms.mean) see https://github.com/openai/baselines/issues/538
normalized_reward = reward / np.sqrt(self.return_rms.var + self.epsilon)
return obs, normalized_reward, terminated, truncated, info
| NormalizeReward |
python | django__django | django/db/models/functions/datetime.py | {
"start": 1010,
"end": 4323
} | class ____(TimezoneMixin, Transform):
lookup_name = None
output_field = IntegerField()
def __init__(self, expression, lookup_name=None, tzinfo=None, **extra):
if self.lookup_name is None:
self.lookup_name = lookup_name
if self.lookup_name is None:
raise ValueError("lookup_name must be provided")
self.tzinfo = tzinfo
super().__init__(expression, **extra)
def as_sql(self, compiler, connection):
sql, params = compiler.compile(self.lhs)
lhs_output_field = self.lhs.output_field
if isinstance(lhs_output_field, DateTimeField):
tzname = self.get_tzname()
sql, params = connection.ops.datetime_extract_sql(
self.lookup_name, sql, tuple(params), tzname
)
elif self.tzinfo is not None:
raise ValueError("tzinfo can only be used with DateTimeField.")
elif isinstance(lhs_output_field, DateField):
sql, params = connection.ops.date_extract_sql(
self.lookup_name, sql, tuple(params)
)
elif isinstance(lhs_output_field, TimeField):
sql, params = connection.ops.time_extract_sql(
self.lookup_name, sql, tuple(params)
)
elif isinstance(lhs_output_field, DurationField):
if not connection.features.has_native_duration_field:
raise ValueError(
"Extract requires native DurationField database support."
)
sql, params = connection.ops.time_extract_sql(
self.lookup_name, sql, tuple(params)
)
else:
# resolve_expression has already validated the output_field so this
# assert should never be hit.
assert False, "Tried to Extract from an invalid type."
return sql, params
def resolve_expression(
self, query=None, allow_joins=True, reuse=None, summarize=False, for_save=False
):
copy = super().resolve_expression(
query, allow_joins, reuse, summarize, for_save
)
field = getattr(copy.lhs, "output_field", None)
if field is None:
return copy
if not isinstance(field, (DateField, DateTimeField, TimeField, DurationField)):
raise ValueError(
"Extract input expression must be DateField, DateTimeField, "
"TimeField, or DurationField."
)
# Passing dates to functions expecting datetimes is most likely a
# mistake.
if type(field) is DateField and copy.lookup_name in (
"hour",
"minute",
"second",
):
raise ValueError(
"Cannot extract time component '%s' from DateField '%s'."
% (copy.lookup_name, field.name)
)
if isinstance(field, DurationField) and copy.lookup_name in (
"year",
"iso_year",
"month",
"week",
"week_day",
"iso_week_day",
"quarter",
):
raise ValueError(
"Cannot extract component '%s' from DurationField '%s'."
% (copy.lookup_name, field.name)
)
return copy
| Extract |
python | dagster-io__dagster | python_modules/libraries/dagster-dbt/dagster_dbt/core/dbt_cli_invocation.py | {
"start": 1655,
"end": 2059
} | class ____(NamedTuple):
"""Relation metadata queried from a database."""
name: str
columns: list[BaseColumn]
def _get_relation_from_adapter(adapter: BaseAdapter, relation_key: RelationKey) -> BaseRelation:
return adapter.Relation.create(
database=relation_key.database,
schema=relation_key.schema,
identifier=relation_key.identifier,
)
@dataclass
| RelationData |
python | django__django | tests/admin_views/tests.py | {
"start": 220243,
"end": 235199
} | class ____(TestCase):
@classmethod
def setUpTestData(cls):
cls.superuser = User.objects.create_superuser(
username="super", password="secret", email="super@example.com"
)
cls.collector = Collector.objects.create(pk=1, name="John Fowles")
def setUp(self):
self.post_data = {
"name": "Test Name",
"widget_set-TOTAL_FORMS": "3",
"widget_set-INITIAL_FORMS": "0",
"widget_set-MAX_NUM_FORMS": "0",
"widget_set-0-id": "",
"widget_set-0-owner": "1",
"widget_set-0-name": "",
"widget_set-1-id": "",
"widget_set-1-owner": "1",
"widget_set-1-name": "",
"widget_set-2-id": "",
"widget_set-2-owner": "1",
"widget_set-2-name": "",
"doohickey_set-TOTAL_FORMS": "3",
"doohickey_set-INITIAL_FORMS": "0",
"doohickey_set-MAX_NUM_FORMS": "0",
"doohickey_set-0-owner": "1",
"doohickey_set-0-code": "",
"doohickey_set-0-name": "",
"doohickey_set-1-owner": "1",
"doohickey_set-1-code": "",
"doohickey_set-1-name": "",
"doohickey_set-2-owner": "1",
"doohickey_set-2-code": "",
"doohickey_set-2-name": "",
"grommet_set-TOTAL_FORMS": "3",
"grommet_set-INITIAL_FORMS": "0",
"grommet_set-MAX_NUM_FORMS": "0",
"grommet_set-0-code": "",
"grommet_set-0-owner": "1",
"grommet_set-0-name": "",
"grommet_set-1-code": "",
"grommet_set-1-owner": "1",
"grommet_set-1-name": "",
"grommet_set-2-code": "",
"grommet_set-2-owner": "1",
"grommet_set-2-name": "",
"whatsit_set-TOTAL_FORMS": "3",
"whatsit_set-INITIAL_FORMS": "0",
"whatsit_set-MAX_NUM_FORMS": "0",
"whatsit_set-0-owner": "1",
"whatsit_set-0-index": "",
"whatsit_set-0-name": "",
"whatsit_set-1-owner": "1",
"whatsit_set-1-index": "",
"whatsit_set-1-name": "",
"whatsit_set-2-owner": "1",
"whatsit_set-2-index": "",
"whatsit_set-2-name": "",
"fancydoodad_set-TOTAL_FORMS": "3",
"fancydoodad_set-INITIAL_FORMS": "0",
"fancydoodad_set-MAX_NUM_FORMS": "0",
"fancydoodad_set-0-doodad_ptr": "",
"fancydoodad_set-0-owner": "1",
"fancydoodad_set-0-name": "",
"fancydoodad_set-0-expensive": "on",
"fancydoodad_set-1-doodad_ptr": "",
"fancydoodad_set-1-owner": "1",
"fancydoodad_set-1-name": "",
"fancydoodad_set-1-expensive": "on",
"fancydoodad_set-2-doodad_ptr": "",
"fancydoodad_set-2-owner": "1",
"fancydoodad_set-2-name": "",
"fancydoodad_set-2-expensive": "on",
"category_set-TOTAL_FORMS": "3",
"category_set-INITIAL_FORMS": "0",
"category_set-MAX_NUM_FORMS": "0",
"category_set-0-order": "",
"category_set-0-id": "",
"category_set-0-collector": "1",
"category_set-1-order": "",
"category_set-1-id": "",
"category_set-1-collector": "1",
"category_set-2-order": "",
"category_set-2-id": "",
"category_set-2-collector": "1",
}
self.client.force_login(self.superuser)
def test_simple_inline(self):
"A simple model can be saved as inlines"
# First add a new inline
self.post_data["widget_set-0-name"] = "Widget 1"
collector_url = reverse(
"admin:admin_views_collector_change", args=(self.collector.pk,)
)
response = self.client.post(collector_url, self.post_data)
self.assertEqual(response.status_code, 302)
self.assertEqual(Widget.objects.count(), 1)
self.assertEqual(Widget.objects.all()[0].name, "Widget 1")
widget_id = Widget.objects.all()[0].id
# The PK link exists on the rendered form
response = self.client.get(collector_url)
self.assertContains(response, 'name="widget_set-0-id"')
# No file or image fields, no enctype on the forms
self.assertIs(response.context["has_file_field"], False)
self.assertNotContains(response, MULTIPART_ENCTYPE)
# Now resave that inline
self.post_data["widget_set-INITIAL_FORMS"] = "1"
self.post_data["widget_set-0-id"] = str(widget_id)
self.post_data["widget_set-0-name"] = "Widget 1"
response = self.client.post(collector_url, self.post_data)
self.assertEqual(response.status_code, 302)
self.assertEqual(Widget.objects.count(), 1)
self.assertEqual(Widget.objects.all()[0].name, "Widget 1")
# Now modify that inline
self.post_data["widget_set-INITIAL_FORMS"] = "1"
self.post_data["widget_set-0-id"] = str(widget_id)
self.post_data["widget_set-0-name"] = "Widget 1 Updated"
response = self.client.post(collector_url, self.post_data)
self.assertEqual(response.status_code, 302)
self.assertEqual(Widget.objects.count(), 1)
self.assertEqual(Widget.objects.all()[0].name, "Widget 1 Updated")
def test_explicit_autofield_inline(self):
"""
A model with an explicit autofield primary key can be saved as inlines.
"""
# First add a new inline
self.post_data["grommet_set-0-name"] = "Grommet 1"
collector_url = reverse(
"admin:admin_views_collector_change", args=(self.collector.pk,)
)
response = self.client.post(collector_url, self.post_data)
self.assertEqual(response.status_code, 302)
self.assertEqual(Grommet.objects.count(), 1)
self.assertEqual(Grommet.objects.all()[0].name, "Grommet 1")
# The PK link exists on the rendered form
response = self.client.get(collector_url)
self.assertContains(response, 'name="grommet_set-0-code"')
# Now resave that inline
self.post_data["grommet_set-INITIAL_FORMS"] = "1"
self.post_data["grommet_set-0-code"] = str(Grommet.objects.all()[0].code)
self.post_data["grommet_set-0-name"] = "Grommet 1"
response = self.client.post(collector_url, self.post_data)
self.assertEqual(response.status_code, 302)
self.assertEqual(Grommet.objects.count(), 1)
self.assertEqual(Grommet.objects.all()[0].name, "Grommet 1")
# Now modify that inline
self.post_data["grommet_set-INITIAL_FORMS"] = "1"
self.post_data["grommet_set-0-code"] = str(Grommet.objects.all()[0].code)
self.post_data["grommet_set-0-name"] = "Grommet 1 Updated"
response = self.client.post(collector_url, self.post_data)
self.assertEqual(response.status_code, 302)
self.assertEqual(Grommet.objects.count(), 1)
self.assertEqual(Grommet.objects.all()[0].name, "Grommet 1 Updated")
def test_char_pk_inline(self):
"""
A model with a character PK can be saved as inlines. Regression for
#10992
"""
# First add a new inline
self.post_data["doohickey_set-0-code"] = "DH1"
self.post_data["doohickey_set-0-name"] = "Doohickey 1"
collector_url = reverse(
"admin:admin_views_collector_change", args=(self.collector.pk,)
)
response = self.client.post(collector_url, self.post_data)
self.assertEqual(response.status_code, 302)
self.assertEqual(DooHickey.objects.count(), 1)
self.assertEqual(DooHickey.objects.all()[0].name, "Doohickey 1")
# The PK link exists on the rendered form
response = self.client.get(collector_url)
self.assertContains(response, 'name="doohickey_set-0-code"')
# Now resave that inline
self.post_data["doohickey_set-INITIAL_FORMS"] = "1"
self.post_data["doohickey_set-0-code"] = "DH1"
self.post_data["doohickey_set-0-name"] = "Doohickey 1"
response = self.client.post(collector_url, self.post_data)
self.assertEqual(response.status_code, 302)
self.assertEqual(DooHickey.objects.count(), 1)
self.assertEqual(DooHickey.objects.all()[0].name, "Doohickey 1")
# Now modify that inline
self.post_data["doohickey_set-INITIAL_FORMS"] = "1"
self.post_data["doohickey_set-0-code"] = "DH1"
self.post_data["doohickey_set-0-name"] = "Doohickey 1 Updated"
response = self.client.post(collector_url, self.post_data)
self.assertEqual(response.status_code, 302)
self.assertEqual(DooHickey.objects.count(), 1)
self.assertEqual(DooHickey.objects.all()[0].name, "Doohickey 1 Updated")
def test_integer_pk_inline(self):
"""
A model with an integer PK can be saved as inlines. Regression for
#10992
"""
# First add a new inline
self.post_data["whatsit_set-0-index"] = "42"
self.post_data["whatsit_set-0-name"] = "Whatsit 1"
collector_url = reverse(
"admin:admin_views_collector_change", args=(self.collector.pk,)
)
response = self.client.post(collector_url, self.post_data)
self.assertEqual(response.status_code, 302)
self.assertEqual(Whatsit.objects.count(), 1)
self.assertEqual(Whatsit.objects.all()[0].name, "Whatsit 1")
# The PK link exists on the rendered form
response = self.client.get(collector_url)
self.assertContains(response, 'name="whatsit_set-0-index"')
# Now resave that inline
self.post_data["whatsit_set-INITIAL_FORMS"] = "1"
self.post_data["whatsit_set-0-index"] = "42"
self.post_data["whatsit_set-0-name"] = "Whatsit 1"
response = self.client.post(collector_url, self.post_data)
self.assertEqual(response.status_code, 302)
self.assertEqual(Whatsit.objects.count(), 1)
self.assertEqual(Whatsit.objects.all()[0].name, "Whatsit 1")
# Now modify that inline
self.post_data["whatsit_set-INITIAL_FORMS"] = "1"
self.post_data["whatsit_set-0-index"] = "42"
self.post_data["whatsit_set-0-name"] = "Whatsit 1 Updated"
response = self.client.post(collector_url, self.post_data)
self.assertEqual(response.status_code, 302)
self.assertEqual(Whatsit.objects.count(), 1)
self.assertEqual(Whatsit.objects.all()[0].name, "Whatsit 1 Updated")
def test_inherited_inline(self):
"An inherited model can be saved as inlines. Regression for #11042"
# First add a new inline
self.post_data["fancydoodad_set-0-name"] = "Fancy Doodad 1"
collector_url = reverse(
"admin:admin_views_collector_change", args=(self.collector.pk,)
)
response = self.client.post(collector_url, self.post_data)
self.assertEqual(response.status_code, 302)
self.assertEqual(FancyDoodad.objects.count(), 1)
self.assertEqual(FancyDoodad.objects.all()[0].name, "Fancy Doodad 1")
doodad_pk = FancyDoodad.objects.all()[0].pk
# The PK link exists on the rendered form
response = self.client.get(collector_url)
self.assertContains(response, 'name="fancydoodad_set-0-doodad_ptr"')
# Now resave that inline
self.post_data["fancydoodad_set-INITIAL_FORMS"] = "1"
self.post_data["fancydoodad_set-0-doodad_ptr"] = str(doodad_pk)
self.post_data["fancydoodad_set-0-name"] = "Fancy Doodad 1"
response = self.client.post(collector_url, self.post_data)
self.assertEqual(response.status_code, 302)
self.assertEqual(FancyDoodad.objects.count(), 1)
self.assertEqual(FancyDoodad.objects.all()[0].name, "Fancy Doodad 1")
# Now modify that inline
self.post_data["fancydoodad_set-INITIAL_FORMS"] = "1"
self.post_data["fancydoodad_set-0-doodad_ptr"] = str(doodad_pk)
self.post_data["fancydoodad_set-0-name"] = "Fancy Doodad 1 Updated"
response = self.client.post(collector_url, self.post_data)
self.assertEqual(response.status_code, 302)
self.assertEqual(FancyDoodad.objects.count(), 1)
self.assertEqual(FancyDoodad.objects.all()[0].name, "Fancy Doodad 1 Updated")
def test_ordered_inline(self):
"""
An inline with an editable ordering fields is updated correctly.
"""
# Create some objects with an initial ordering
Category.objects.create(id=1, order=1, collector=self.collector)
Category.objects.create(id=2, order=2, collector=self.collector)
Category.objects.create(id=3, order=0, collector=self.collector)
Category.objects.create(id=4, order=0, collector=self.collector)
# NB: The order values must be changed so that the items are reordered.
self.post_data.update(
{
"name": "Frederick Clegg",
"category_set-TOTAL_FORMS": "7",
"category_set-INITIAL_FORMS": "4",
"category_set-MAX_NUM_FORMS": "0",
"category_set-0-order": "14",
"category_set-0-id": "1",
"category_set-0-collector": "1",
"category_set-1-order": "13",
"category_set-1-id": "2",
"category_set-1-collector": "1",
"category_set-2-order": "1",
"category_set-2-id": "3",
"category_set-2-collector": "1",
"category_set-3-order": "0",
"category_set-3-id": "4",
"category_set-3-collector": "1",
"category_set-4-order": "",
"category_set-4-id": "",
"category_set-4-collector": "1",
"category_set-5-order": "",
"category_set-5-id": "",
"category_set-5-collector": "1",
"category_set-6-order": "",
"category_set-6-id": "",
"category_set-6-collector": "1",
}
)
collector_url = reverse(
"admin:admin_views_collector_change", args=(self.collector.pk,)
)
response = self.client.post(collector_url, self.post_data)
# Successful post will redirect
self.assertEqual(response.status_code, 302)
# The order values have been applied to the right objects
self.assertEqual(self.collector.category_set.count(), 4)
self.assertEqual(Category.objects.get(id=1).order, 14)
self.assertEqual(Category.objects.get(id=2).order, 13)
self.assertEqual(Category.objects.get(id=3).order, 1)
self.assertEqual(Category.objects.get(id=4).order, 0)
@override_settings(ROOT_URLCONF="admin_views.urls")
| AdminInlineTests |
python | python-openxml__python-docx | tests/oxml/unitdata/section.py | {
"start": 344,
"end": 483
} | class ____(BaseBuilder):
__tag__ = "w:pgSz"
__nspfxs__ = ("w",)
__attrs__ = ("w:w", "w:h", "w:orient", "w:code")
| CT_PageSzBuilder |
python | getsentry__sentry | src/sentry/feedback/endpoints/project_user_reports.py | {
"start": 1069,
"end": 1230
} | class ____(serializers.ModelSerializer):
class Meta:
model = UserReport
fields = ("name", "email", "comments", "event_id")
| UserReportSerializer |
python | sqlalchemy__sqlalchemy | test/orm/test_deprecations.py | {
"start": 48885,
"end": 49411
} | class ____(fixtures.MappedTest):
def test_config_errors(self):
sm = sessionmaker()
def go():
s = sm()
s._is_asyncio = True
return s
Session = scoped_session(go)
with expect_deprecated(
"Using `scoped_session` with asyncio is deprecated and "
"will raise an error in a future version. "
"Please use `async_scoped_session` instead."
):
Session()
Session.remove()
| DeprecationScopedSessionTest |
python | jina-ai__jina | tests/integration/inspect_deployments_flow/test_inspect_deployments_flow.py | {
"start": 123,
"end": 437
} | class ____(Executor):
tag = 1
@requests(on=['/index'])
def craft(self, docs, *args, **kwargs):
tmp_dir = os.environ.get('TEST_EVAL_FLOW_TMPDIR')
with open(f'{tmp_dir}/{self.tag}.txt', 'a', encoding='utf-8') as fp:
fp.write(f'{docs[0].id}')
return None
| DummyEvaluator1 |
python | getsentry__sentry | src/sentry/sentry_metrics/configuration.py | {
"start": 668,
"end": 1221
} | class ____(Enum):
RELEASE_HEALTH = "release-health"
PERFORMANCE = "performance"
# Rate limiter namespaces, the postgres (PG)
# values are the same as UseCaseKey to keep
# backwards compatibility
RELEASE_HEALTH_PG_NAMESPACE = "releasehealth"
PERFORMANCE_PG_NAMESPACE = "performance"
RELEASE_HEALTH_SCHEMA_VALIDATION_RULES_OPTION_NAME = (
"sentry-metrics.indexer.release-health.schema-validation-rules"
)
GENERIC_METRICS_SCHEMA_VALIDATION_RULES_OPTION_NAME = (
"sentry-metrics.indexer.generic-metrics.schema-validation-rules"
)
| UseCaseKey |
python | django-guardian__django-guardian | guardian/testapp/tests/test_admin.py | {
"start": 13146,
"end": 19732
} | class ____(TestCase):
def _get_gma(self, attrs=None, name=None, model=None):
"""
Returns ``GuardedModelAdmin`` instance.
"""
attrs = attrs or {}
name = str(name or "GMA")
model = model or User
GMA = type(name, (GuardedModelAdmin,), attrs)
gma = GMA(model, admin.site)
return gma
def test_obj_perms_manage_template_attr(self):
attrs = {"obj_perms_manage_template": "foobar.html"}
gma = self._get_gma(attrs=attrs)
self.assertTrue(gma.get_obj_perms_manage_template(), "foobar.html")
def test_obj_perms_manage_user_template_attr(self):
attrs = {"obj_perms_manage_user_template": "foobar.html"}
gma = self._get_gma(attrs=attrs)
self.assertTrue(gma.get_obj_perms_manage_user_template(), "foobar.html")
def test_obj_perms_manage_user_form_attr(self):
attrs = {"obj_perms_manage_user_form": forms.Form}
gma = self._get_gma(attrs=attrs)
self.assertTrue(issubclass(gma.get_obj_perms_manage_user_form(None), forms.Form))
def test_obj_perms_user_select_form_attr(self):
attrs = {"obj_perms_user_select_form": forms.Form}
gma = self._get_gma(attrs=attrs)
self.assertTrue(issubclass(gma.get_obj_perms_user_select_form(None), forms.Form))
def test_obj_perms_manage_group_template_attr(self):
attrs = {"obj_perms_manage_group_template": "foobar.html"}
gma = self._get_gma(attrs=attrs)
self.assertTrue(gma.get_obj_perms_manage_group_template(), "foobar.html")
def test_obj_perms_manage_group_form_attr(self):
attrs = {"obj_perms_manage_group_form": forms.Form}
gma = self._get_gma(attrs=attrs)
self.assertTrue(issubclass(gma.get_obj_perms_manage_group_form(None), forms.Form))
def test_obj_perms_group_select_form_attr(self):
attrs = {"obj_perms_group_select_form": forms.Form}
gma = self._get_gma(attrs=attrs)
self.assertTrue(issubclass(gma.get_obj_perms_group_select_form(None), forms.Form))
def test_user_can_acces_owned_objects_only(self):
attrs = {
"user_can_access_owned_objects_only": True,
"user_owned_objects_field": "user",
}
gma = self._get_gma(attrs=attrs, model=LogEntry)
joe = User.objects.create_user("joe", "joe@example.com", "joe")
jane = User.objects.create_user("jane", "jane@example.com", "jane")
ctype = ContentType.objects.get_for_model(User)
joe_entry = LogEntry.objects.create(
user=joe, content_type=ctype, object_id=joe.pk, action_flag=1, change_message="foo"
)
LogEntry.objects.create(user=jane, content_type=ctype, object_id=jane.pk, action_flag=1, change_message="bar")
request = HttpRequest()
request.user = joe
qs = gma.get_queryset(request)
self.assertEqual([e.pk for e in qs], [joe_entry.pk])
def test_user_can_acces_owned_objects_only_unless_superuser(self):
attrs = {
"user_can_access_owned_objects_only": True,
"user_owned_objects_field": "user",
}
gma = self._get_gma(attrs=attrs, model=LogEntry)
joe = User.objects.create_superuser("joe", "joe@example.com", "joe")
jane = User.objects.create_user("jane", "jane@example.com", "jane")
ctype = ContentType.objects.get_for_model(User)
joe_entry = LogEntry.objects.create(
user=joe, content_type=ctype, object_id=joe.pk, action_flag=1, change_message="foo"
)
jane_entry = LogEntry.objects.create(
user=jane, content_type=ctype, object_id=jane.pk, action_flag=1, change_message="bar"
)
request = HttpRequest()
request.user = joe
qs = gma.get_queryset(request)
self.assertEqual(sorted(e.pk for e in qs), sorted([joe_entry.pk, jane_entry.pk]))
def test_user_can_access_owned_by_group_objects_only(self):
attrs = {
"user_can_access_owned_by_group_objects_only": True,
"group_owned_objects_field": "group",
}
gma = self._get_gma(attrs=attrs, model=LogEntry)
joe = User.objects.create_user("joe", "joe@example.com", "joe")
joe_group = Group.objects.create(name="joe-group")
joe.groups.add(joe_group)
jane = User.objects.create_user("jane", "jane@example.com", "jane")
jane_group = Group.objects.create(name="jane-group")
jane.groups.add(jane_group)
ctype = ContentType.objects.get_for_model(User)
LogEntry.objects.create(user=joe, content_type=ctype, object_id=joe.pk, action_flag=1, change_message="foo")
LogEntry.objects.create(user=jane, content_type=ctype, object_id=jane.pk, action_flag=1, change_message="bar")
joe_entry_group = LogEntry.objects.create(
user=jane, content_type=ctype, object_id=joe.pk, action_flag=1, change_message="foo", group=joe_group
)
request = HttpRequest()
request.user = joe
qs = gma.get_queryset(request)
self.assertEqual([e.pk for e in qs], [joe_entry_group.pk])
def test_user_can_access_owned_by_group_objects_only_unless_superuser(self):
attrs = {
"user_can_access_owned_by_group_objects_only": True,
"group_owned_objects_field": "group",
}
gma = self._get_gma(attrs=attrs, model=LogEntry)
joe = User.objects.create_superuser("joe", "joe@example.com", "joe")
joe_group = Group.objects.create(name="joe-group")
joe.groups.add(joe_group)
jane = User.objects.create_user("jane", "jane@example.com", "jane")
jane_group = Group.objects.create(name="jane-group")
jane.groups.add(jane_group)
ctype = ContentType.objects.get_for_model(User)
LogEntry.objects.create(user=joe, content_type=ctype, object_id=joe.pk, action_flag=1, change_message="foo")
LogEntry.objects.create(user=jane, content_type=ctype, object_id=jane.pk, action_flag=1, change_message="bar")
LogEntry.objects.create(
user=jane, content_type=ctype, object_id=joe.pk, action_flag=1, change_message="foo", group=joe_group
)
LogEntry.objects.create(
user=joe, content_type=ctype, object_id=joe.pk, action_flag=1, change_message="foo", group=jane_group
)
request = HttpRequest()
request.user = joe
qs = gma.get_queryset(request)
self.assertEqual(sorted(e.pk for e in qs), sorted(LogEntry.objects.values_list("pk", flat=True)))
| GuardedModelAdminTests |
python | charliermarsh__ruff | crates/ruff_linter/resources/test/fixtures/pycodestyle/E30.py | {
"start": 10218,
"end": 10435
} | class ____:
def f():
x = 1
def g():
return 1
return 2
def f():
class Baz:
x = 1
def g():
return 1
return 2
# end
| Bar |
python | lepture__authlib | authlib/oauth2/rfc9207/parameter.py | {
"start": 173,
"end": 1695
} | class ____:
def __call__(self, authorization_server):
if isinstance(authorization_server, BaseGrant):
deprecate(
"IssueParameter should be used as an authorization server extension with 'authorization_server.register_extension(IssueParameter())'.",
version="1.8",
)
authorization_server.register_hook(
"after_authorization_response",
self.add_issuer_parameter,
)
else:
authorization_server.register_hook(
"after_create_authorization_response",
self.add_issuer_parameter,
)
def add_issuer_parameter(self, authorization_server, response):
if self.get_issuer() and response.location:
# RFC9207 §2
# In authorization responses to the client, including error responses,
# an authorization server supporting this specification MUST indicate
# its identity by including the iss parameter in the response.
new_location = add_params_to_uri(
response.location, {"iss": self.get_issuer()}
)
response.location = new_location
def get_issuer(self) -> Optional[str]:
"""Return the issuer URL.
Developers MAY implement this method if they want to support :rfc:`RFC9207 <9207>`::
def get_issuer(self) -> str:
return "https://auth.example.org"
"""
return None
| IssuerParameter |
python | tensorflow__tensorflow | tensorflow/python/distribute/coordinator/cluster_coordinator.py | {
"start": 7251,
"end": 11564
} | class ____(object):
"""Hold a function to be scheduled and its arguments."""
def __init__(self, function, cancellation_mgr, args=None, kwargs=None):
if not callable(function):
raise ValueError("Function passed to `ClusterCoordinator.schedule` must "
"be a callable object.")
self._args = args or ()
self._kwargs = kwargs or {}
_disallow_remote_value_as_input(self._args)
_disallow_remote_value_as_input(self._kwargs)
if isinstance(function, def_function.Function):
replica_args = _select_worker_slice(0, self._args)
replica_kwargs = _select_worker_slice(0, self._kwargs)
# Note: no need to handle function registration failure since this kind of
# failure will not raise exceptions as designed in the runtime. The
# coordinator has to rely on subsequent operations that raise to catch
# function registration failure.
# Record the function tracing overhead. Note that we pass in the tracing
# count of the def_function.Function as a state tracker, so that metrics
# will only record the time for actual function tracing (i.e., excluding
# function cache lookups).
with metric_utils.monitored_timer(
"function_tracing", state_tracker=function._get_tracing_count): # pylint: disable=protected-access
self._concrete_function = function.get_concrete_function(
*nest.map_structure(_maybe_as_type_spec, replica_args),
**nest.map_structure(_maybe_as_type_spec, replica_kwargs))
elif isinstance(function, tf_function.ConcreteFunction):
self._concrete_function = function
if hasattr(self, "_concrete_function"):
# If we have a concrete function, we get to retrieve the output type spec
# via the structured_output.
self._output_type_spec = func_graph.convert_structure_to_signature(
self._concrete_function.structured_outputs)
self._function = cancellation_mgr.get_cancelable_function(
self._concrete_function)
else:
# Otherwise (i.e. what is passed in is a regular python function), we have
# no such information.
self._output_type_spec = None
self._function = function
self._output_remote_value_ref = None
def build_output_remote_value(self):
if self._output_remote_value_ref is None:
ret = RemoteValueImpl(None, self._output_type_spec)
self._output_remote_value_ref = weakref.ref(ret)
return ret
else:
raise ValueError(
"The output of the Closure cannot be built more than once.")
def maybe_call_with_output_remote_value(self, method):
if self._output_remote_value_ref is None:
return None
output_remote_value = self._output_remote_value_ref()
if output_remote_value is not None:
return method(output_remote_value)
return None
def mark_cancelled(self):
e = errors.CancelledError(
None, None, "The corresponding function is "
"cancelled. Please reschedule the function.")
self.maybe_call_with_output_remote_value(lambda r: r._set_error(e)) # pylint: disable=protected-access
def execute_on(self, worker):
"""Executes the closure on the given worker.
Args:
worker: a `Worker` object.
"""
replica_args = _select_worker_slice(worker.worker_index, self._args)
replica_kwargs = _select_worker_slice(worker.worker_index, self._kwargs)
e = (
_get_error_from_remote_values(replica_args) or
_get_error_from_remote_values(replica_kwargs))
if e:
if not isinstance(e, ClosureInputError):
e = ClosureInputError(e)
raise e
with ops.device(worker.device_name):
with context.executor_scope(worker.executor):
with coordinator_context.with_dispatch_context(worker):
with metric_utils.monitored_timer("closure_execution"):
output_values = self._function(
*nest.map_structure(coordinator_context.maybe_get_remote_value,
replica_args),
**nest.map_structure(coordinator_context.maybe_get_remote_value,
replica_kwargs))
self.maybe_call_with_output_remote_value(
lambda r: r._set_values(output_values)) # pylint: disable=protected-access
| Closure |
python | bokeh__bokeh | src/bokeh/models/plots.py | {
"start": 35491,
"end": 37054
} | class ____(_list_attr_splat):
def __setattr__(self, attr, value):
if not len(self):
from ..util.warnings import warn
warn(_LEGEND_EMPTY_WARNING % attr)
return super().__setattr__(attr, value)
def _select_helper(args, kwargs):
""" Allow flexible selector syntax.
Returns:
dict
"""
if len(args) > 1:
raise TypeError("select accepts at most ONE positional argument.")
if len(args) > 0 and len(kwargs) > 0:
raise TypeError("select accepts EITHER a positional argument, OR keyword arguments (not both).")
if len(args) == 0 and len(kwargs) == 0:
raise TypeError("select requires EITHER a positional argument, OR keyword arguments.")
if args:
arg = args[0]
if isinstance(arg, dict):
selector = arg
elif isinstance(arg, str):
selector = dict(name=arg)
elif isinstance(arg, type) and issubclass(arg, Model):
selector = {"type": arg}
else:
raise TypeError("selector must be a dictionary, string or plot object.")
elif 'selector' in kwargs:
if len(kwargs) == 1:
selector = kwargs['selector']
else:
raise TypeError("when passing 'selector' keyword arg, not other keyword args may be present")
else:
selector = kwargs
return selector
#-----------------------------------------------------------------------------
# Code
#-----------------------------------------------------------------------------
| _legend_attr_splat |
python | Textualize__textual | tests/snapshot_tests/snapshot_apps/recompose_on_mount.py | {
"start": 539,
"end": 768
} | class ____(Screen):
def compose(self) -> ComposeResult:
"""Create child widgets for the app."""
yield Header()
yield Static(" Profile ", id="title")
yield Profile()
yield Footer()
| Landing |
python | pytorch__pytorch | test/dynamo/test_modules.py | {
"start": 6489,
"end": 6781
} | class ____(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.linear1 = torch.nn.Linear(10, 10)
self.count = 3
def forward(self, x):
for _ in range(self.count):
x = torch.sigmoid(self.linear1(x))
return x
| ConstLoop |
python | davidhalter__jedi | jedi/inference/compiled/mixed.py | {
"start": 3497,
"end": 3640
} | class ____(CompiledContext, TreeContextMixin):
@property
def compiled_value(self):
return self._value.compiled_value
| MixedContext |
python | lepture__authlib | authlib/jose/errors.py | {
"start": 232,
"end": 314
} | class ____(JoseError):
error = "unsupported_algorithm"
| UnsupportedAlgorithmError |
python | pypa__virtualenv | src/virtualenv/create/via_global_ref/builtin/pypy/pypy3.py | {
"start": 481,
"end": 2010
} | class ____(PyPy3, PosixSupports):
"""PyPy 3 on POSIX."""
@classmethod
def _shared_libs(cls, python_dir):
# glob for libpypy3-c.so, libpypy3-c.dylib, libpypy3.9-c.so ...
return python_dir.glob("libpypy3*.*")
def to_lib(self, src):
return self.dest / "lib" / src.name
@classmethod
def sources(cls, interpreter):
yield from super().sources(interpreter)
# PyPy >= 3.8 supports a standard prefix installation, where older
# versions always used a portable/development style installation.
# If this is a standard prefix installation, skip the below:
if interpreter.system_prefix == "/usr":
return
# Also copy/symlink anything under prefix/lib, which, for "portable"
# PyPy builds, includes the tk,tcl runtime and a number of shared
# objects. In distro-specific builds or on conda this should be empty
# (on PyPy3.8+ it will, like on CPython, hold the stdlib).
host_lib = Path(interpreter.system_prefix) / "lib"
stdlib = Path(interpreter.system_stdlib)
if host_lib.exists() and host_lib.is_dir():
for path in host_lib.iterdir():
if stdlib == path:
# For PyPy3.8+ the stdlib lives in lib/pypy3.8
# We need to avoid creating a symlink to it since that
# will defeat the purpose of a virtualenv
continue
yield PathRefToDest(path, dest=cls.to_lib)
| PyPy3Posix |
python | jazzband__tablib | src/tablib/exceptions.py | {
"start": 402,
"end": 540
} | class ____(TablibException, AttributeError):
"""Header parameter must be given when appending a column to this Dataset."""
| HeadersNeeded |
python | pydantic__pydantic | tests/typechecking/decorators.py | {
"start": 3833,
"end": 4932
} | class ____(BaseModel):
"""Same tests should apply to `mode='plain'`."""
@field_validator('foo', mode='before')
def no_classmethod(self, value: Any) -> Any:
"""TODO this shouldn't be valid, the decorator should only work on classmethods.
We might want to do the same type checking as wrap model validators.
"""
@field_validator('foo', mode='before')
@classmethod
def valid_classmethod(cls, value: Any) -> Any: ...
@field_validator('foo', mode='before') # type: ignore[type-var] # pyright: ignore[reportArgumentType]
@classmethod
def invalid_with_info(cls, value: Any, info: int) -> Any: ...
@field_validator('foo', mode='before', json_schema_input_type=int) # `json_schema_input_type` allowed here.
@classmethod
def valid_with_info_default(cls, value: Any, info: ValidationInfo) -> Any: ...
@field_validator('foo', mode='before', json_schema_input_type=int) # `json_schema_input_type` allowed here.
@classmethod
def valid_with_info(cls, value: Any, info: ValidationInfo[int]) -> Any: ...
| BeforeFieldValidator |
python | vyperlang__vyper | vyper/semantics/analysis/base.py | {
"start": 3253,
"end": 4127
} | class ____(AnalysisResult):
module_t: "ModuleT"
alias: str
# import_node: vy_ast._ImportStmt # maybe could be useful
ownership: ModuleOwnership = ModuleOwnership.NO_OWNERSHIP
ownership_decl: Optional[vy_ast.VyperNode] = None
@property
def module_node(self):
return self.module_t._module
# duck type, conform to interface of VarInfo and ExprInfo
@property
def typ(self):
return self.module_t
def set_ownership(self, module_ownership: ModuleOwnership, node: Optional[vy_ast.VyperNode]):
if self.ownership != ModuleOwnership.NO_OWNERSHIP:
raise StructureException(
f"ownership already set to `{self.ownership}`", node, self.ownership_decl
)
self.ownership = module_ownership
def __hash__(self):
return hash(id(self.module_t))
@dataclass
| ModuleInfo |
python | scrapy__scrapy | tests/test_exporters.py | {
"start": 7531,
"end": 7676
} | class ____(TestPickleItemExporter):
item_class = MyDataClass
custom_field_item_class = CustomFieldDataclass
| TestPickleItemExporterDataclass |
python | tensorflow__tensorflow | tensorflow/python/distribute/shared_variable_creator_test.py | {
"start": 1630,
"end": 2454
} | class ____(test.TestCase):
@test_util.run_in_graph_and_eager_modes
def testSharedVariable(self):
shared_variable_store = {}
num_devices = 3
creator_fns = []
for i in range(num_devices):
creator_fn = shared_variable_creator.make_fn(shared_variable_store, i)
creator_fns.append(creator_fn)
with variable_scope.variable_creator_scope(creator_fns[0]):
v0 = variable_v1.VariableV1(1.0, name="foo")
with variable_scope.variable_creator_scope(creator_fns[1]):
v1 = variable_v1.VariableV1(1.0, name="foo")
with variable_scope.variable_creator_scope(creator_fns[2]):
v2 = variable_v1.VariableV1(1.0, name="foo")
# v1 and v2 should be same as v0
self.assertIs(v1, v0)
self.assertIs(v2, v0)
if __name__ == "__main__":
test.main()
| SharedVariableCreatorTest |
python | psf__requests | tests/test_requests.py | {
"start": 94623,
"end": 106562
} | class ____:
@pytest.mark.parametrize(
"url,expected",
(
("http://google.com", "http://google.com/"),
("http://ジェーピーニック.jp", "http://xn--hckqz9bzb1cyrb.jp/"),
("http://xn--n3h.net/", "http://xn--n3h.net/"),
("http://ジェーピーニック.jp".encode(), "http://xn--hckqz9bzb1cyrb.jp/"),
("http://straße.de/straße", "http://xn--strae-oqa.de/stra%C3%9Fe"),
(
"http://straße.de/straße".encode(),
"http://xn--strae-oqa.de/stra%C3%9Fe",
),
(
"http://Königsgäßchen.de/straße",
"http://xn--knigsgchen-b4a3dun.de/stra%C3%9Fe",
),
(
"http://Königsgäßchen.de/straße".encode(),
"http://xn--knigsgchen-b4a3dun.de/stra%C3%9Fe",
),
(b"http://xn--n3h.net/", "http://xn--n3h.net/"),
(
b"http://[1200:0000:ab00:1234:0000:2552:7777:1313]:12345/",
"http://[1200:0000:ab00:1234:0000:2552:7777:1313]:12345/",
),
(
"http://[1200:0000:ab00:1234:0000:2552:7777:1313]:12345/",
"http://[1200:0000:ab00:1234:0000:2552:7777:1313]:12345/",
),
),
)
def test_preparing_url(self, url, expected):
def normalize_percent_encode(x):
# Helper function that normalizes equivalent
# percent-encoded bytes before comparisons
for c in re.findall(r"%[a-fA-F0-9]{2}", x):
x = x.replace(c, c.upper())
return x
r = requests.Request("GET", url=url)
p = r.prepare()
assert normalize_percent_encode(p.url) == expected
@pytest.mark.parametrize(
"url",
(
b"http://*.google.com",
b"http://*",
"http://*.google.com",
"http://*",
"http://☃.net/",
),
)
def test_preparing_bad_url(self, url):
r = requests.Request("GET", url=url)
with pytest.raises(requests.exceptions.InvalidURL):
r.prepare()
@pytest.mark.parametrize("url, exception", (("http://:1", InvalidURL),))
def test_redirecting_to_bad_url(self, httpbin, url, exception):
with pytest.raises(exception):
requests.get(httpbin("redirect-to"), params={"url": url})
@pytest.mark.parametrize(
"input, expected",
(
(
b"http+unix://%2Fvar%2Frun%2Fsocket/path%7E",
"http+unix://%2Fvar%2Frun%2Fsocket/path~",
),
(
"http+unix://%2Fvar%2Frun%2Fsocket/path%7E",
"http+unix://%2Fvar%2Frun%2Fsocket/path~",
),
(
b"mailto:user@example.org",
"mailto:user@example.org",
),
(
"mailto:user@example.org",
"mailto:user@example.org",
),
(
b"data:SSDimaUgUHl0aG9uIQ==",
"data:SSDimaUgUHl0aG9uIQ==",
),
),
)
def test_url_mutation(self, input, expected):
"""
This test validates that we correctly exclude some URLs from
preparation, and that we handle others. Specifically, it tests that
any URL whose scheme doesn't begin with "http" is left alone, and
those whose scheme *does* begin with "http" are mutated.
"""
r = requests.Request("GET", url=input)
p = r.prepare()
assert p.url == expected
@pytest.mark.parametrize(
"input, params, expected",
(
(
b"http+unix://%2Fvar%2Frun%2Fsocket/path",
{"key": "value"},
"http+unix://%2Fvar%2Frun%2Fsocket/path?key=value",
),
(
"http+unix://%2Fvar%2Frun%2Fsocket/path",
{"key": "value"},
"http+unix://%2Fvar%2Frun%2Fsocket/path?key=value",
),
(
b"mailto:user@example.org",
{"key": "value"},
"mailto:user@example.org",
),
(
"mailto:user@example.org",
{"key": "value"},
"mailto:user@example.org",
),
),
)
def test_parameters_for_nonstandard_schemes(self, input, params, expected):
"""
Setting parameters for nonstandard schemes is allowed if those schemes
begin with "http", and is forbidden otherwise.
"""
r = requests.Request("GET", url=input, params=params)
p = r.prepare()
assert p.url == expected
def test_post_json_nan(self, httpbin):
data = {"foo": float("nan")}
with pytest.raises(requests.exceptions.InvalidJSONError):
requests.post(httpbin("post"), json=data)
def test_json_decode_compatibility(self, httpbin):
r = requests.get(httpbin("bytes/20"))
with pytest.raises(requests.exceptions.JSONDecodeError) as excinfo:
r.json()
assert isinstance(excinfo.value, RequestException)
assert isinstance(excinfo.value, JSONDecodeError)
assert r.text not in str(excinfo.value)
def test_json_decode_persists_doc_attr(self, httpbin):
r = requests.get(httpbin("bytes/20"))
with pytest.raises(requests.exceptions.JSONDecodeError) as excinfo:
r.json()
assert excinfo.value.doc == r.text
def test_status_code_425(self):
r1 = requests.codes.get("TOO_EARLY")
r2 = requests.codes.get("too_early")
r3 = requests.codes.get("UNORDERED")
r4 = requests.codes.get("unordered")
r5 = requests.codes.get("UNORDERED_COLLECTION")
r6 = requests.codes.get("unordered_collection")
assert r1 == 425
assert r2 == 425
assert r3 == 425
assert r4 == 425
assert r5 == 425
assert r6 == 425
def test_different_connection_pool_for_tls_settings_verify_True(self):
def response_handler(sock):
consume_socket_content(sock, timeout=0.5)
sock.send(
b"HTTP/1.1 200 OK\r\n"
b"Content-Length: 18\r\n\r\n"
b'\xff\xfe{\x00"\x00K0"\x00=\x00"\x00\xab0"\x00\r\n'
)
s = requests.Session()
close_server = threading.Event()
server = TLSServer(
handler=response_handler,
wait_to_close_event=close_server,
requests_to_handle=3,
cert_chain="tests/certs/expired/server/server.pem",
keyfile="tests/certs/expired/server/server.key",
)
with server as (host, port):
url = f"https://{host}:{port}"
r1 = s.get(url, verify=False)
assert r1.status_code == 200
# Cannot verify self-signed certificate
with pytest.raises(requests.exceptions.SSLError):
s.get(url)
close_server.set()
assert 2 == len(s.adapters["https://"].poolmanager.pools)
def test_different_connection_pool_for_tls_settings_verify_bundle_expired_cert(
self,
):
def response_handler(sock):
consume_socket_content(sock, timeout=0.5)
sock.send(
b"HTTP/1.1 200 OK\r\n"
b"Content-Length: 18\r\n\r\n"
b'\xff\xfe{\x00"\x00K0"\x00=\x00"\x00\xab0"\x00\r\n'
)
s = requests.Session()
close_server = threading.Event()
server = TLSServer(
handler=response_handler,
wait_to_close_event=close_server,
requests_to_handle=3,
cert_chain="tests/certs/expired/server/server.pem",
keyfile="tests/certs/expired/server/server.key",
)
with server as (host, port):
url = f"https://{host}:{port}"
r1 = s.get(url, verify=False)
assert r1.status_code == 200
# Has right trust bundle, but certificate expired
with pytest.raises(requests.exceptions.SSLError):
s.get(url, verify="tests/certs/expired/ca/ca.crt")
close_server.set()
assert 2 == len(s.adapters["https://"].poolmanager.pools)
def test_different_connection_pool_for_tls_settings_verify_bundle_unexpired_cert(
self,
):
def response_handler(sock):
consume_socket_content(sock, timeout=0.5)
sock.send(
b"HTTP/1.1 200 OK\r\n"
b"Content-Length: 18\r\n\r\n"
b'\xff\xfe{\x00"\x00K0"\x00=\x00"\x00\xab0"\x00\r\n'
)
s = requests.Session()
close_server = threading.Event()
server = TLSServer(
handler=response_handler,
wait_to_close_event=close_server,
requests_to_handle=3,
cert_chain="tests/certs/valid/server/server.pem",
keyfile="tests/certs/valid/server/server.key",
)
with server as (host, port):
url = f"https://{host}:{port}"
r1 = s.get(url, verify=False)
assert r1.status_code == 200
r2 = s.get(url, verify="tests/certs/valid/ca/ca.crt")
assert r2.status_code == 200
close_server.set()
assert 2 == len(s.adapters["https://"].poolmanager.pools)
def test_different_connection_pool_for_mtls_settings(self):
client_cert = None
def response_handler(sock):
nonlocal client_cert
client_cert = sock.getpeercert()
consume_socket_content(sock, timeout=0.5)
sock.send(
b"HTTP/1.1 200 OK\r\n"
b"Content-Length: 18\r\n\r\n"
b'\xff\xfe{\x00"\x00K0"\x00=\x00"\x00\xab0"\x00\r\n'
)
s = requests.Session()
close_server = threading.Event()
server = TLSServer(
handler=response_handler,
wait_to_close_event=close_server,
requests_to_handle=2,
cert_chain="tests/certs/expired/server/server.pem",
keyfile="tests/certs/expired/server/server.key",
mutual_tls=True,
cacert="tests/certs/expired/ca/ca.crt",
)
cert = (
"tests/certs/mtls/client/client.pem",
"tests/certs/mtls/client/client.key",
)
with server as (host, port):
url = f"https://{host}:{port}"
r1 = s.get(url, verify=False, cert=cert)
assert r1.status_code == 200
with pytest.raises(requests.exceptions.SSLError):
s.get(url, cert=cert)
close_server.set()
assert client_cert is not None
def test_content_length_for_bytes_data(httpbin):
data = "This is a string containing multi-byte UTF-8 ☃️"
encoded_data = data.encode("utf-8")
length = str(len(encoded_data))
req = requests.Request("POST", httpbin("post"), data=encoded_data)
p = req.prepare()
assert p.headers["Content-Length"] == length
@pytest.mark.skipif(
is_urllib3_1,
reason="urllib3 2.x encodes all strings to utf-8, urllib3 1.x uses latin-1",
)
def test_content_length_for_string_data_counts_bytes(httpbin):
data = "This is a string containing multi-byte UTF-8 ☃️"
length = str(len(data.encode("utf-8")))
req = requests.Request("POST", httpbin("post"), data=data)
p = req.prepare()
assert p.headers["Content-Length"] == length
def test_json_decode_errors_are_serializable_deserializable():
json_decode_error = requests.exceptions.JSONDecodeError(
"Extra data",
'{"responseCode":["706"],"data":null}{"responseCode":["706"],"data":null}',
36,
)
deserialized_error = pickle.loads(pickle.dumps(json_decode_error))
assert repr(json_decode_error) == repr(deserialized_error)
| TestPreparingURLs |
python | python-poetry__poetry | src/poetry/utils/helpers.py | {
"start": 3185,
"end": 4514
} | class ____(Exception):
"""Raised when server unexpectedly supports byte ranges."""
def download_file(
url: str,
dest: Path,
*,
session: Authenticator | Session | None = None,
chunk_size: int = 1024,
raise_accepts_ranges: bool = False,
max_retries: int = 0,
) -> None:
from poetry.puzzle.provider import Indicator
downloader = Downloader(url, dest, session, max_retries=max_retries)
if raise_accepts_ranges and downloader.accepts_ranges:
raise HTTPRangeRequestSupportedError(f"URL {url} supports range requests.")
set_indicator = False
with Indicator.context() as update_context:
update_context(f"Downloading {url}")
total_size = downloader.total_size
if total_size > 0:
fetched_size = 0
last_percent = 0
# if less than 1MB, we simply show that we're downloading
# but skip the updating
set_indicator = total_size > 1024 * 1024
for fetched_size in downloader.download_with_progress(chunk_size):
if set_indicator:
percent = (fetched_size * 100) // total_size
if percent > last_percent:
last_percent = percent
update_context(f"Downloading {url} {percent:3}%")
| HTTPRangeRequestSupportedError |
python | lepture__authlib | authlib/jose/rfc7517/key_set.py | {
"start": 49,
"end": 1606
} | class ____:
"""This class represents a JSON Web Key Set."""
def __init__(self, keys):
self.keys = keys
def as_dict(self, is_private=False, **params):
"""Represent this key as a dict of the JSON Web Key Set."""
return {"keys": [k.as_dict(is_private, **params) for k in self.keys]}
def as_json(self, is_private=False, **params):
"""Represent this key set as a JSON string."""
obj = self.as_dict(is_private, **params)
return json_dumps(obj)
def find_by_kid(self, kid, **params):
"""Find the key matches the given kid value.
:param kid: A string of kid
:return: Key instance
:raise: ValueError
"""
# Proposed fix, feel free to do something else but the idea is that we take the only key
# of the set if no kid is specified
if kid is None and len(self.keys) == 1:
return self.keys[0]
keys = [key for key in self.keys if key.kid == kid]
if params:
keys = list(_filter_keys_by_params(keys, **params))
if keys:
return keys[0]
raise ValueError("Key not found")
def _filter_keys_by_params(keys, **params):
_use = params.get("use")
_alg = params.get("alg")
for key in keys:
designed_use = key.tokens.get("use")
if designed_use and _use and designed_use != _use:
continue
designed_alg = key.tokens.get("alg")
if designed_alg and _alg and designed_alg != _alg:
continue
yield key
| KeySet |
python | pallets__jinja | tests/test_api.py | {
"start": 8986,
"end": 15601
} | class ____:
def test_stopiteration_is_undefined(self):
def test():
raise StopIteration()
t = Template("A{{ test() }}B")
assert t.render(test=test) == "AB"
t = Template("A{{ test().missingattribute }}B")
pytest.raises(UndefinedError, t.render, test=test)
def test_undefined_and_special_attributes(self):
with pytest.raises(AttributeError):
Undefined("Foo").__dict__ # noqa B018
def test_undefined_attribute_error(self):
# Django's LazyObject turns the __class__ attribute into a
# property that resolves the wrapped function. If that wrapped
# function raises an AttributeError, printing the repr of the
# object in the undefined message would cause a RecursionError.
class Error:
@property # type: ignore
def __class__(self):
raise AttributeError()
u = Undefined(obj=Error(), name="hello")
with pytest.raises(UndefinedError):
getattr(u, "recursion", None)
def test_logging_undefined(self):
_messages = []
class DebugLogger:
def warning(self, msg, *args):
_messages.append("W:" + msg % args)
def error(self, msg, *args):
_messages.append("E:" + msg % args)
logging_undefined = make_logging_undefined(DebugLogger())
env = Environment(undefined=logging_undefined)
assert env.from_string("{{ missing }}").render() == ""
pytest.raises(UndefinedError, env.from_string("{{ missing.attribute }}").render)
assert env.from_string("{{ missing|list }}").render() == "[]"
assert env.from_string("{{ missing is not defined }}").render() == "True"
assert env.from_string("{{ foo.missing }}").render(foo=42) == ""
assert env.from_string("{{ not missing }}").render() == "True"
assert _messages == [
"W:Template variable warning: 'missing' is undefined",
"E:Template variable error: 'missing' is undefined",
"W:Template variable warning: 'missing' is undefined",
"W:Template variable warning: 'int object' has no attribute 'missing'",
"W:Template variable warning: 'missing' is undefined",
]
def test_default_undefined(self):
env = Environment(undefined=Undefined)
assert env.from_string("{{ missing }}").render() == ""
pytest.raises(UndefinedError, env.from_string("{{ missing.attribute }}").render)
assert env.from_string("{{ missing|list }}").render() == "[]"
assert env.from_string("{{ missing is not defined }}").render() == "True"
assert env.from_string("{{ foo.missing }}").render(foo=42) == ""
assert env.from_string("{{ not missing }}").render() == "True"
pytest.raises(UndefinedError, env.from_string("{{ missing - 1}}").render)
assert env.from_string("{{ 'foo' in missing }}").render() == "False"
und1 = Undefined(name="x")
und2 = Undefined(name="y")
assert und1 == und2
assert und1 != 42
assert hash(und1) == hash(und2) == hash(Undefined())
def test_chainable_undefined(self):
env = Environment(undefined=ChainableUndefined)
# The following tests are copied from test_default_undefined
assert env.from_string("{{ missing }}").render() == ""
assert env.from_string("{{ missing|list }}").render() == "[]"
assert env.from_string("{{ missing is not defined }}").render() == "True"
assert env.from_string("{{ foo.missing }}").render(foo=42) == ""
assert env.from_string("{{ not missing }}").render() == "True"
pytest.raises(UndefinedError, env.from_string("{{ missing - 1}}").render)
# The following tests ensure subclass functionality works as expected
assert env.from_string('{{ missing.bar["baz"] }}').render() == ""
assert env.from_string('{{ foo.bar["baz"]._undefined_name }}').render() == "foo"
assert (
env.from_string('{{ foo.bar["baz"]._undefined_name }}').render(foo=42)
== "bar"
)
assert (
env.from_string('{{ foo.bar["baz"]._undefined_name }}').render(
foo={"bar": 42}
)
== "baz"
)
def test_debug_undefined(self):
env = Environment(undefined=DebugUndefined)
assert env.from_string("{{ missing }}").render() == "{{ missing }}"
pytest.raises(UndefinedError, env.from_string("{{ missing.attribute }}").render)
assert env.from_string("{{ missing|list }}").render() == "[]"
assert env.from_string("{{ missing is not defined }}").render() == "True"
assert (
env.from_string("{{ foo.missing }}").render(foo=42)
== "{{ no such element: int object['missing'] }}"
)
assert env.from_string("{{ not missing }}").render() == "True"
undefined_hint = "this is testing undefined hint of DebugUndefined"
assert (
str(DebugUndefined(hint=undefined_hint))
== f"{{{{ undefined value printed: {undefined_hint} }}}}"
)
def test_strict_undefined(self):
env = Environment(undefined=StrictUndefined)
pytest.raises(UndefinedError, env.from_string("{{ missing }}").render)
pytest.raises(UndefinedError, env.from_string("{{ missing.attribute }}").render)
pytest.raises(UndefinedError, env.from_string("{{ missing|list }}").render)
pytest.raises(UndefinedError, env.from_string("{{ 'foo' in missing }}").render)
assert env.from_string("{{ missing is not defined }}").render() == "True"
pytest.raises(
UndefinedError, env.from_string("{{ foo.missing }}").render, foo=42
)
pytest.raises(UndefinedError, env.from_string("{{ not missing }}").render)
assert (
env.from_string('{{ missing|default("default", true) }}').render()
== "default"
)
assert env.from_string('{{ "foo" if false }}').render() == ""
def test_indexing_gives_undefined(self):
t = Template("{{ var[42].foo }}")
pytest.raises(UndefinedError, t.render, var=0)
def test_none_gives_proper_error(self):
with pytest.raises(UndefinedError, match="'None' has no attribute 'split'"):
Environment().getattr(None, "split")()
def test_object_repr(self):
with pytest.raises(
UndefinedError, match="'int object' has no attribute 'upper'"
):
Undefined(obj=42, name="upper")()
| TestUndefined |
python | tensorflow__tensorflow | tensorflow/python/kernel_tests/array_ops/gather_nd_op_test.py | {
"start": 1795,
"end": 18043
} | class ____(test.TestCase):
def _testSimpleDtype(self, dtype, itype):
with self.cached_session():
params = constant_op.constant(np.array([8, 1, 2, 3, 7, 5], dtype=dtype))
indices = constant_op.constant([[4], [4], [0]], dtype=itype)
gather_nd_t = array_ops.gather_nd(params, indices)
gather_nd_val = self.evaluate(gather_nd_t)
self.assertAllEqual(np.array([7, 7, 8], dtype=dtype), gather_nd_val)
self.assertEqual([3], gather_nd_t.get_shape())
def testSimpleDtype(self):
for dtype in _TEST_DTYPES:
for itype in _TEST_ITYPES:
self._testSimpleDtype(dtype, itype)
@test_util.run_deprecated_v1
@test_util.disable_xla("b/123337890") # Error messages differ
def testEmptyIndicesAndParamsOKButJustEmptyParamsFails(self):
with self.session():
params = np.ones((3, 3), dtype=np.float32)
indices_empty = np.empty((0, 2), dtype=np.int32)
gather_nd_ok_t = array_ops.gather_nd(params, indices_empty)
gather_nd_ok_val = self.evaluate(gather_nd_ok_t)
self.assertEqual([0], gather_nd_ok_t.get_shape())
self.assertAllClose(np.empty((0,), dtype=np.float32), gather_nd_ok_val)
indices_empty = np.empty((0, 1), dtype=np.int32)
gather_nd_ok_t = array_ops.gather_nd(params, indices_empty)
gather_nd_ok_val = self.evaluate(gather_nd_ok_t)
self.assertEqual([0, 3], gather_nd_ok_t.get_shape())
self.assertAllClose(np.empty((0, 3), dtype=np.float32), gather_nd_ok_val)
params_empty = np.empty((0, 3), dtype=np.float32)
indices_empty = np.empty((0, 2), dtype=np.int32)
gather_nd_ok_t = array_ops.gather_nd(params_empty, indices_empty)
gather_nd_ok_val = self.evaluate(gather_nd_ok_t)
self.assertEqual([0], gather_nd_ok_t.get_shape())
self.assertAllClose(np.empty((0,), dtype=np.float32), gather_nd_ok_val)
params_empty = np.empty((0, 3), dtype=np.float32)
indices_nonempty = np.zeros((1, 2), dtype=np.int32)
gather_nd_break_t = array_ops.gather_nd(params_empty, indices_nonempty)
with self.assertRaisesOpError(
r"Requested more than 0 entries, but params is empty."):
self.evaluate(gather_nd_break_t)
self.assertAllClose(np.empty((0,), dtype=np.float32), gather_nd_ok_val)
def testIndexScalar(self):
with self.session():
params = np.array(
[[-8, -1, -2, -3, -7, -5], [8, 1, 2, 3, 7, 5]], dtype=np.float32).T
indices = constant_op.constant([4, 1])
gather_nd_t = array_ops.gather_nd(params, indices)
gather_nd_val = self.evaluate(gather_nd_t)
self.assertEqual([], gather_nd_t.get_shape())
self.assertAllEqual(np.array(7), gather_nd_val)
def testParamsRankLargerThanIndexIndexScalarSlices(self):
with self.session():
params = np.array(
[[-8, -1, -2, -3, -7, -5], [8, 1, 2, 3, 7, 5]], dtype=np.float32).T
indices = constant_op.constant([4])
gather_nd_t = array_ops.gather_nd(params, indices)
gather_nd_val = self.evaluate(gather_nd_t)
self.assertEqual([2], gather_nd_t.get_shape())
self.assertAllEqual(np.array([-7, 7]), gather_nd_val)
def testParamsRankLargerThanIndexSlices(self):
with self.session():
params = np.array(
[[-8, -1, -2, -3, -7, -5], [8, 1, 2, 3, 7, 5]], dtype=np.float32).T
indices = constant_op.constant([[4], [4], [0]])
gather_nd_t = array_ops.gather_nd(params, indices)
gather_nd_val = self.evaluate(gather_nd_t)
self.assertEqual([3, 2], gather_nd_t.get_shape())
self.assertAllEqual(np.array([[-7, 7], [-7, 7], [-8, 8]]), gather_nd_val)
def testHigherRankParamsLargerThanIndexSlices(self):
with self.session():
params = np.array(
[[[-8, -1, -2, -3, -7, -5], [8, 1, 2, 3, 7, 5]],
[[-80, -10, -20, -30, -70, -50], [80, 10, 20, 30, 70, 50]]],
dtype=np.float32).T
params_t = constant_op.constant(params)
indices = constant_op.constant([[4], [4], [0]])
gather_nd_t = array_ops.gather_nd(params_t, indices)
gather_nd_val = self.evaluate(gather_nd_t)
self.assertEqual([3, 2, 2], gather_nd_t.get_shape())
self.assertAllEqual(params[[4, 4, 0]], gather_nd_val)
def testEmptyIndicesLastRankMeansCopyEntireTensor(self):
with self.session():
params = np.array(
[[[-8, -1, -2, -3, -7, -5], [8, 1, 2, 3, 7, 5]],
[[-80, -10, -20, -30, -70, -50], [80, 10, 20, 30, 70, 50]]],
dtype=np.float32).T
params_t = constant_op.constant(params)
indices = constant_op.constant(
[[], []], dtype=dtypes.int32) # Size (2, 0)
gather_nd_t = array_ops.gather_nd(params_t, indices)
gather_nd_val = self.evaluate(gather_nd_t)
self.assertEqual([2, 6, 2, 2], gather_nd_t.get_shape())
self.assertAllEqual(
np.vstack((params[np.newaxis, :], params[np.newaxis, :])),
gather_nd_val)
def testHigherRankParamsAndIndicesLargerThanIndexSlices(self):
with self.session():
params = np.array(
[[[-8, -1, -2, -3, -7, -5], [8, 1, 2, 3, 7, 5]],
[[-80, -10, -20, -30, -70, -50], [80, 10, 20, 30, 70, 50]]],
dtype=np.float32).T
params_t = constant_op.constant(params)
indices = constant_op.constant([[[3], [2], [1]], [[4], [4], [0]]])
gather_nd_t = array_ops.gather_nd(params_t, indices)
gather_nd_val = self.evaluate(gather_nd_t)
self.assertEqual([2, 3, 2, 2], gather_nd_t.get_shape())
self.assertAllEqual(params[[3, 2, 1, 4, 4, 0]].reshape(2, 3, 2, 2),
gather_nd_val)
def testHigherRankParams(self):
with self.session():
shape = (10, 20, 5, 1, 17)
params = np.random.rand(*shape)
indices = np.vstack([np.random.randint(0, s, size=2000) for s in shape]).T
gather_nd_t = array_ops.gather_nd(params, indices)
gather_nd_val = self.evaluate(gather_nd_t)
expected = params[tuple(indices.T)]
self.assertAllEqual(expected, gather_nd_val)
self.assertEqual([2000], gather_nd_t.get_shape())
def testHigherRankParamsAndIndices(self):
with self.session():
shape = (10, 20, 5, 1, 17)
params = np.random.rand(*shape)
indices = np.vstack([np.random.randint(0, s, size=2000) for s in shape]).T
indices_reshaped = indices.reshape([10, 10, 20, 5])
gather_nd_t = array_ops.gather_nd(params, indices_reshaped)
gather_nd_val = self.evaluate(gather_nd_t)
expected = params[tuple(indices.T)]
self.assertAllEqual(expected.reshape([10, 10, 20]), gather_nd_val)
self.assertEqual([10, 10, 20], gather_nd_t.get_shape())
def assertIndexedSlices(self, t):
self.assertIsInstance(t, indexed_slices.IndexedSlices)
@test_util.run_deprecated_v1
def testUnknownIndices(self):
params = constant_op.constant([[0, 1, 2]])
indices = array_ops.placeholder(dtypes.int32)
gather_nd_t = array_ops.gather_nd(params, indices)
shape = gather_nd_t.get_shape()
self.assertEqual(None, shape.ndims)
self.assertEqual(None, tensor_shape.dimension_value(shape[0]))
@test_util.run_deprecated_v1
@test_util.disable_xla("XLA does not have assertions in kernels.")
def testBadIndicesCPU(self):
with self.session(use_gpu=False):
params = [0, 1, 2]
indices = [[[0], [7]]] # Make this one higher rank
gather_nd = array_ops.gather_nd(params, indices)
with self.assertRaisesOpError(
r"indices\[0,1\] = \[7\] does not index into param shape \[3\]"):
self.evaluate(gather_nd)
@test_util.run_deprecated_v1
@test_util.disable_xla("XLA does not have assertions in kernels.")
def testBadIndicesCPUWithDefaultPolicy(self):
with self.session(use_gpu=False):
params = [0, 1, 2]
indices = [[[0], [7]]] # Make this one higher rank
gather_nd = array_ops.gather_nd(
params, indices, bad_indices_policy="DEFAULT"
)
with self.assertRaisesOpError(
r"indices\[0,1\] = \[7\] does not index into param shape \[3\]"
):
self.evaluate(gather_nd)
@test_util.run_deprecated_v1
@test_util.disable_xla("XLA does not have assertions in kernels.")
def testBadIndicesCPUWithExplicitErrorPolicy(self):
with self.session(use_gpu=False):
params = [0, 1, 2]
indices = [[[0], [7]]] # Make this one higher rank
gather_nd = array_ops.gather_nd(
params, indices, bad_indices_policy="ERROR"
)
with self.assertRaisesOpError(
r"indices\[0,1\] = \[7\] does not index into param shape \[3\]"
):
self.evaluate(gather_nd)
@test_util.disable_xla("XLA does not have assertions in kernels.")
def testBadIndicesCPUIgnore(self):
with self.session(use_gpu=False):
params = [10, 1, 2]
indices = [[0], [7], [1]] # Make this one higher rank
gather_nd = array_ops.gather_nd(
params, indices, bad_indices_policy="IGNORE"
)
gather_nd_val = self.evaluate(gather_nd)
self.assertAllEqual(gather_nd_val, [10, 0, 1])
def _disabledTestBadIndicesGPU(self):
# TODO disabled due to different behavior on GPU and CPU
# On GPU the bad indices do not raise error but fetch 0 values
if not test.is_gpu_available():
return
with self.session():
params = [0, 1, 2]
indices = [[[0], [7]]] # Make this one higher rank
gather_nd = array_ops.gather_nd(params, indices)
with self.assertRaisesOpError(
r"indices\[0,1\] = \[7\] does not index into param shape \[3\]"):
self.evaluate(gather_nd)
@test_util.run_deprecated_v1
@test_util.disable_xla("XLA does not have assertions in kernels.")
def testBadIndicesWithSlicesCPU(self):
with self.session(use_gpu=False):
params = [[0, 1, 2]]
indices = [[[0], [0], [1]]] # Make this one higher rank
gather_nd = array_ops.gather_nd(params, indices)
with self.assertRaisesOpError(
r"indices\[0,2\] = \[1\] does not index into param shape \[1,3\]"):
self.evaluate(gather_nd)
def _disabledTestBadIndicesWithSlicesGPU(self):
# TODO disabled due to different behavior on GPU and CPU
# On GPU the bad indices do not raise error but fetch 0 values
if not test.is_gpu_available():
return
with self.session():
params = [[0, 1, 2]]
indices = [[[0], [0], [1]]] # Make this one higher rank
gather_nd = array_ops.gather_nd(params, indices)
with self.assertRaisesOpError(
r"indices\[0,2\] = \[1\] does not index into param shape \[1,3\]"):
self.evaluate(gather_nd)
@test_util.run_deprecated_v1
def testGradientsRank2Elements(self):
indices = constant_op.constant([[0, 0], [1, 1]], dtype=dtypes.int32)
inputs = constant_op.constant([[1, 2], [3, 4]], dtype=dtypes.float64)
outputs = array_ops.gather_nd(inputs, indices)
grad_vals = constant_op.constant([1, 2], dtype=dtypes.float64)
grads = gradients_impl.gradients([outputs], [inputs], [grad_vals])[0]
expected_grads = np.array([[1, 0], [0, 2]], dtype=np.float64)
with self.session():
assert np.array_equal(expected_grads, self.evaluate(grads))
@test_util.run_deprecated_v1
def testGradientsRank2Slices(self):
indices = constant_op.constant([[1], [0]], dtype=dtypes.int32)
inputs = constant_op.constant([[1, 2], [3, 4]], dtype=dtypes.float64)
outputs = array_ops.gather_nd(inputs, indices)
grad_vals = constant_op.constant([[1, 2], [3, 4]], dtype=dtypes.float64)
grads = gradients_impl.gradients([outputs], [inputs], [grad_vals])[0]
expected_grads = np.array([[3, 4], [1, 2]], dtype=np.float64)
with self.session():
self.assertIndexedSlices(grads)
self.assertAllEqual(expected_grads, ops.convert_to_tensor(grads))
@test_util.run_deprecated_v1
def testGradientsRank3Elements(self):
indices = constant_op.constant(
[[[0, 1], [1, 0]], [[0, 0], [1, 1]]], dtype=dtypes.int32)
inputs = constant_op.constant(
[[[1, 3], [5, 7]], [[2, 4], [6, 8]]], dtype=dtypes.float64)
outputs = array_ops.gather_nd(inputs, indices)
grad_vals = constant_op.constant(
[[[1, 2], [3, 4]], [[5, 6], [7, 8]]], dtype=dtypes.float64)
grads = gradients_impl.gradients([outputs], [inputs], [grad_vals])[0]
expected_grads = np.array(
[[[5, 6], [1, 2]], [[3, 4], [7, 8]]], dtype=np.float64)
with self.session():
self.assertAllEqual(expected_grads, self.evaluate(grads))
@test_util.run_deprecated_v1
def testGradientsRank7Elements(self):
# Shape [1,1,2,1,1,2,2]
indices = constant_op.constant(
[[[
[[[[0, 0, 0, 0, 0, 1], [0, 0, 1, 0, 0, 0]]]],
[[[[0, 0, 0, 0, 0, 0], [0, 0, 1, 0, 0, 1]]]]
]]],
dtype=dtypes.int32)
inputs = constant_op.constant(
[[[
[[[[1, 3], [5, 7]]]],
[[[[2, 4], [6, 8]]]]
]]], dtype=dtypes.float64)
outputs = array_ops.gather_nd(inputs, indices)
grad_vals = constant_op.constant(
[[[
[[[[1, 2], [3, 4]]]],
[[[[5, 6], [7, 8]]]]
]]], dtype=dtypes.float64)
grads = gradients_impl.gradients([outputs], [inputs], [grad_vals])[0]
expected_grads = np.array(
[[[
[[[[5, 6], [1, 2]]]],
[[[[3, 4], [7, 8]]]]
]]], dtype=np.float64)
with self.session():
self.assertAllEqual(expected_grads, self.evaluate(grads))
@test_util.run_deprecated_v1
def testGradientsInt64Indices(self):
indices = constant_op.constant(
[[[0, 1], [1, 0]], [[0, 0], [1, 1]]], dtype=dtypes.int64)
inputs = constant_op.constant(
[[[1, 3], [5, 7]], [[2, 4], [6, 8]]], dtype=dtypes.float64)
outputs = array_ops.gather_nd(inputs, indices)
grad_vals = constant_op.constant(
[[[1, 2], [3, 4]], [[5, 6], [7, 8]]], dtype=dtypes.float64)
grads = gradients_impl.gradients([outputs], [inputs], [grad_vals])[0]
expected_grads = np.array(
[[[5, 6], [1, 2]], [[3, 4], [7, 8]]], dtype=np.float64)
with self.session():
self.assertAllEqual(expected_grads, self.evaluate(grads))
@test_util.run_deprecated_v1
def testGradientsRank2SlicesWithEmptySpace(self):
indices = constant_op.constant([[2], [0], [5]], dtype=dtypes.int32)
inputs = constant_op.constant(
[[1, 2, 3, 4, 5, 6, 7, 8, 9], [1, 2, 3, 4, 5, 6, 7, 8, 9],
[1, 2, 3, 4, 5, 6, 7, 8, 9], [1, 2, 3, 4, 5, 6, 7, 8, 9],
[1, 2, 3, 4, 5, 6, 7, 8, 9], [1, 2, 3, 4, 5, 6, 7, 8, 9]],
dtype=dtypes.float64)
outputs = array_ops.gather_nd(inputs, indices)
grad_vals = constant_op.constant(
[[1, 1, 1, 1, 1, 1, 1, 1, 1], [2, 2, 2, 2, 2, 2, 2, 2, 2],
[3, 3, 3, 3, 3, 3, 3, 3, 3]],
dtype=dtypes.float64)
grads = gradients_impl.gradients([outputs], [inputs], [grad_vals])[0]
expected_grads = np.array(
[[2, 2, 2, 2, 2, 2, 2, 2, 2], [0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0], [3, 3, 3, 3, 3, 3, 3, 3, 3]],
dtype=np.float64)
with self.session():
self.assertIndexedSlices(grads)
self.assertAllEqual(expected_grads, ops.convert_to_tensor(grads))
@test_util.run_v1_only("RefVariable is not supported in v2")
def testGatherNdRefVariable(self):
with self.cached_session():
v = ref_variable.RefVariable(
constant_op.constant([[1, 2], [3, 4], [5, 6]]))
self.evaluate(variables.global_variables_initializer())
gather = array_ops.gather_nd(v, [[0, 1], [2, 0]])
if not context.executing_eagerly(): # .op doesn't make sense in Eager
self.assertEqual("GatherNd", gather.op.name)
self.assertAllEqual([2, 5], gather)
@test_util.run_in_graph_and_eager_modes
def testGatherNdResourceVariable(self):
with self.cached_session():
v = resource_variable_ops.ResourceVariable(
constant_op.constant([[1, 2], [3, 4], [5, 6]]))
self.evaluate(variables.global_variables_initializer())
gather = array_ops.gather_nd(v, [[0, 1], [2, 0]])
if not context.executing_eagerly(): # .op doesn't make sense in Eager
self.assertEqual("ResourceGatherNd", gather.op.inputs[0].op.type)
self.assertAllEqual([2, 5], gather)
| GatherNdTest |
python | mlflow__mlflow | tests/langgraph/sample_code/langgraph_chat_agent.py | {
"start": 3404,
"end": 4762
} | class ____(ChatAgent):
def __init__(self, agent: CompiledStateGraph):
self.agent = agent
def predict(
self,
messages: list[ChatAgentMessage],
context: ChatContext | None = None,
custom_inputs: dict[str, Any] | None = None,
) -> ChatAgentResponse:
request = {"messages": self._convert_messages_to_dict(messages)}
messages = []
for event in self.agent.stream(request, stream_mode="updates"):
for node_data in event.values():
messages.extend(ChatAgentMessage(**msg) for msg in node_data.get("messages", []))
return ChatAgentResponse(messages=messages)
def predict_stream(
self,
messages: list[ChatAgentMessage],
context: ChatContext | None = None,
custom_inputs: dict[str, Any] | None = None,
) -> Generator[ChatAgentChunk, None, None]:
request = {"messages": self._convert_messages_to_dict(messages)}
for event in self.agent.stream(request, stream_mode="updates"):
for node_data in event.values():
yield from (ChatAgentChunk(**{"delta": msg}) for msg in node_data["messages"])
mlflow.langchain.autolog()
llm = FakeOpenAI()
graph = create_tool_calling_agent(llm, tools)
chat_agent = LangGraphChatAgent(graph)
mlflow.models.set_model(chat_agent)
| LangGraphChatAgent |
python | allegroai__clearml | clearml/backend_api/services/v2_23/events.py | {
"start": 144001,
"end": 146003
} | class ____(Response):
"""
Response of events.get_task_single_value_metrics endpoint.
:param tasks: Single value metrics grouped by task
:type tasks: Sequence[dict]
"""
_service = "events"
_action = "get_task_single_value_metrics"
_version = "2.23"
_schema = {
"definitions": {},
"properties": {
"tasks": {
"description": "Single value metrics grouped by task",
"items": {
"properties": {
"task": {"description": "Task ID", "type": "string"},
"values": {
"items": {
"properties": {
"metric": {"type": "string"},
"timestamp": {"type": "number"},
"value": {"type": "number"},
"variant": {"type": "string"},
},
"type": "object",
},
"type": "array",
},
},
"type": "object",
},
"type": ["array", "null"],
}
},
"type": "object",
}
def __init__(self, tasks: Optional[List[dict]] = None, **kwargs: Any) -> None:
super(GetTaskSingleValueMetricsResponse, self).__init__(**kwargs)
self.tasks = tasks
@schema_property("tasks")
def tasks(self) -> Optional[List[dict]]:
return self._property_tasks
@tasks.setter
def tasks(self, value: Optional[List[dict]]) -> None:
if value is None:
self._property_tasks = None
return
self.assert_isinstance(value, "tasks", (list, tuple))
self.assert_isinstance(value, "tasks", (dict,), is_array=True)
self._property_tasks = value
| GetTaskSingleValueMetricsResponse |
python | walkccc__LeetCode | solutions/314. Binary Tree Vertical Order Traversal/314.py | {
"start": 0,
"end": 770
} | class ____:
def verticalOrder(self, root: TreeNode | None) -> list[list[int]]:
if not root:
return []
range_ = [0] * 2
def getRange(root: TreeNode | None, x: int) -> None:
if not root:
return
range_[0] = min(range_[0], x)
range_[1] = max(range_[1], x)
getRange(root.left, x - 1)
getRange(root.right, x + 1)
getRange(root, 0) # Get the leftmost and the rightmost x index.
ans = [[] for _ in range(range_[1] - range_[0] + 1)]
q = collections.deque([(root, -range_[0])]) # (TreeNode, x)
while q:
node, x = q.popleft()
ans[x].append(node.val)
if node.left:
q.append((node.left, x - 1))
if node.right:
q.append((node.right, x + 1))
return ans
| Solution |
python | marshmallow-code__marshmallow | tests/test_options.py | {
"start": 73,
"end": 343
} | class ____(Schema):
name = fields.String(allow_none=True)
email = fields.Email(allow_none=True)
age = fields.Integer()
created = fields.DateTime()
id = fields.Integer(allow_none=True)
homepage = fields.Url()
birthdate = fields.Date()
| UserSchema |
python | getsentry__sentry | src/sentry/dynamic_sampling/rules/biases/base.py | {
"start": 296,
"end": 554
} | class ____(ABC):
"""
Base class representing the generator of rules connected to a bias.
"""
@abstractmethod
def generate_rules(self, project: Project, base_sample_rate: float) -> list[PolymorphicRule]:
raise NotImplementedError
| Bias |
python | keras-team__keras | keras/src/layers/activations/relu_test.py | {
"start": 112,
"end": 2915
} | class ____(testing.TestCase):
@pytest.mark.requires_trainable_backend
def test_relu(self):
self.run_layer_test(
relu.ReLU,
init_kwargs={
"max_value": 10,
"negative_slope": 1,
"threshold": 0.5,
},
input_shape=(2, 3, 4),
supports_masking=True,
assert_built_after_instantiation=True,
)
def test_normal_relu_correctness(self):
relu_layer = relu.ReLU(max_value=10, negative_slope=0.0, threshold=0)
input = np.array([-10, -5, 0.0, 5, 10])
expected_output = np.array([0.0, 0.0, 0.0, 5.0, 10.0])
result = relu_layer(input)
self.assertAllClose(result, expected_output)
def test_leaky_relu_correctness(self):
relu_layer = relu.ReLU(max_value=10, negative_slope=0.5, threshold=0)
input = np.array([-10, -5, 0.0, 5, 10])
expected_output = np.array([-5.0, -2.5, 0.0, 5.0, 10.0])
result = relu_layer(input)
self.assertAllClose(result, expected_output)
def test_threshold_relu_correctness(self):
relu_layer = relu.ReLU(max_value=8, negative_slope=0.0, threshold=5)
input = np.array([6.0, 7.0, 0.0, 5, 10])
expected_output = np.array([6.0, 7.0, 0.0, 0.0, 8.0])
result = relu_layer(input)
self.assertAllClose(result, expected_output)
def test_invalid_usage(self):
with self.assertRaisesRegex(
ValueError,
"max_value of a ReLU layer cannot be a negative value",
):
self.run_layer_test(
relu.ReLU,
init_kwargs={
"max_value": -10,
"negative_slope": 1,
"threshold": 0.5,
},
input_shape=(2, 3, 4),
supports_masking=True,
)
with self.assertRaisesRegex(
ValueError,
"negative_slope of a ReLU layer cannot be a negative value",
):
self.run_layer_test(
relu.ReLU,
init_kwargs={
"max_value": 10,
"negative_slope": -10,
"threshold": 0.5,
},
input_shape=(2, 3, 4),
supports_masking=True,
)
with self.assertRaisesRegex(
ValueError, "threshold of a ReLU layer cannot be a negative value"
):
self.run_layer_test(
relu.ReLU,
init_kwargs={
"max_value": 10,
"negative_slope": 1,
"threshold": -10,
},
input_shape=(2, 3, 4),
supports_masking=True,
)
| ReLUTest |
python | RaRe-Technologies__gensim | gensim/test/test_scripts.py | {
"start": 767,
"end": 4208
} | class ____(unittest.TestCase):
def setUp(self):
self.fname = datapath('enwiki-latest-pages-articles1.xml-p000000010p000030302-shortened.bz2')
self.expected_title = 'Anarchism'
self.expected_section_titles = [
'Introduction',
'Etymology and terminology',
'History',
'Anarchist schools of thought',
'Internal issues and debates',
'Topics of interest',
'Criticisms',
'References',
'Further reading',
'External links'
]
def tearDown(self):
# remove all temporary test files
fname = get_tmpfile('script.tst')
extensions = ['', '.json']
for ext in extensions:
try:
os.remove(fname + ext)
except OSError:
pass
def test_segment_all_articles(self):
title, sections, interlinks = next(segment_all_articles(self.fname, include_interlinks=True))
# Check title
self.assertEqual(title, self.expected_title)
# Check section titles
section_titles = [s[0] for s in sections]
self.assertEqual(section_titles, self.expected_section_titles)
# Check text
first_section_text = sections[0][1]
first_sentence = "'''Anarchism''' is a political philosophy that advocates self-governed societies"
self.assertTrue(first_sentence in first_section_text)
# Check interlinks
self.assertEqual(len(interlinks), 685)
self.assertTrue(interlinks[0] == ("political philosophy", "political philosophy"))
self.assertTrue(interlinks[1] == ("self-governance", "self-governed"))
self.assertTrue(interlinks[2] == ("stateless society", "stateless societies"))
def test_generator_len(self):
expected_num_articles = 106
num_articles = sum(1 for x in segment_all_articles(self.fname))
self.assertEqual(num_articles, expected_num_articles)
def test_json_len(self):
tmpf = get_tmpfile('script.tst.json')
segment_and_write_all_articles(self.fname, tmpf, workers=1)
expected_num_articles = 106
with utils.open(tmpf, 'rb') as f:
num_articles = sum(1 for line in f)
self.assertEqual(num_articles, expected_num_articles)
def test_segment_and_write_all_articles(self):
tmpf = get_tmpfile('script.tst.json')
segment_and_write_all_articles(self.fname, tmpf, workers=1, include_interlinks=True)
# Get the first line from the text file we created.
with open(tmpf) as f:
first = next(f)
# decode JSON line into a Python dictionary object
article = json.loads(first)
title, section_titles, interlinks = article['title'], article['section_titles'], article['interlinks']
self.assertEqual(title, self.expected_title)
self.assertEqual(section_titles, self.expected_section_titles)
# Check interlinks
# JSON has no tuples, only lists. So, we convert lists to tuples explicitly before comparison.
self.assertEqual(len(interlinks), 685)
self.assertEqual(tuple(interlinks[0]), ("political philosophy", "political philosophy"))
self.assertEqual(tuple(interlinks[1]), ("self-governance", "self-governed"))
self.assertEqual(tuple(interlinks[2]), ("stateless society", "stateless societies"))
| TestSegmentWiki |
python | pytorch__pytorch | test/test_datapipe.py | {
"start": 109144,
"end": 110042
} | class ____(TestCase):
@skipIfNoDill
def test_spawn_lambdas_iter(self):
idp = dp.iter.IterableWrapper(range(3)).map(lambda x: x + 1).shuffle()
dl = DataLoader(
idp,
num_workers=2,
shuffle=True,
multiprocessing_context="spawn",
collate_fn=unbatch,
batch_size=1,
)
result = list(dl)
self.assertEqual([1, 1, 2, 2, 3, 3], sorted(result))
@skipIfNoDill
def test_spawn_lambdas_map(self):
mdp = dp.map.SequenceWrapper(range(3)).map(lambda x: x + 1).shuffle()
dl = DataLoader(
mdp,
num_workers=2,
shuffle=True,
multiprocessing_context="spawn",
collate_fn=unbatch,
batch_size=1,
)
result = list(dl)
self.assertEqual([1, 1, 2, 2, 3, 3], sorted(result))
| TestSerialization |
python | pydantic__pydantic | pydantic-core/python/pydantic_core/core_schema.py | {
"start": 60261,
"end": 62117
} | class ____(TypedDict, total=False):
type: Required[Literal['set']]
items_schema: CoreSchema
min_length: int
max_length: int
fail_fast: bool
strict: bool
ref: str
metadata: dict[str, Any]
serialization: SerSchema
def set_schema(
items_schema: CoreSchema | None = None,
*,
min_length: int | None = None,
max_length: int | None = None,
fail_fast: bool | None = None,
strict: bool | None = None,
ref: str | None = None,
metadata: dict[str, Any] | None = None,
serialization: SerSchema | None = None,
) -> SetSchema:
"""
Returns a schema that matches a set of a given schema, e.g.:
```py
from pydantic_core import SchemaValidator, core_schema
schema = core_schema.set_schema(
items_schema=core_schema.int_schema(), min_length=0, max_length=10
)
v = SchemaValidator(schema)
assert v.validate_python({1, '2', 3}) == {1, 2, 3}
```
Args:
items_schema: The value must be a set with items that match this schema
min_length: The value must be a set with at least this many items
max_length: The value must be a set with at most this many items
fail_fast: Stop validation on the first error
strict: The value must be a set with exactly this many items
ref: optional unique identifier of the schema, used to reference the schema in other places
metadata: Any other information you want to include with the schema, not used by pydantic-core
serialization: Custom serialization schema
"""
return _dict_not_none(
type='set',
items_schema=items_schema,
min_length=min_length,
max_length=max_length,
fail_fast=fail_fast,
strict=strict,
ref=ref,
metadata=metadata,
serialization=serialization,
)
| SetSchema |
python | nedbat__coveragepy | coverage/report.py | {
"start": 652,
"end": 10817
} | class ____:
"""A reporter for writing the summary report."""
def __init__(self, coverage: Coverage) -> None:
self.coverage = coverage
self.config = self.coverage.config
self.branches = coverage.get_data().has_arcs()
self.outfile: IO[str] | None = None
self.output_format = self.config.format or "text"
if self.output_format not in {"text", "markdown", "total"}:
raise ConfigError(f"Unknown report format choice: {self.output_format!r}")
self.fr_analyses: list[tuple[FileReporter, Analysis]] = []
self.skipped_count = 0
self.empty_count = 0
self.total = Numbers(precision=self.config.precision)
def write(self, line: str) -> None:
"""Write a line to the output, adding a newline."""
assert self.outfile is not None
self.outfile.write(line.rstrip())
self.outfile.write("\n")
def write_items(self, items: Iterable[str]) -> None:
"""Write a list of strings, joined together."""
self.write("".join(items))
def report_text(
self,
header: list[str],
lines_values: list[list[Any]],
total_line: list[Any],
end_lines: list[str],
) -> None:
"""Internal method that prints report data in text format.
`header` is a list with captions.
`lines_values` is list of lists of sortable values.
`total_line` is a list with values of the total line.
`end_lines` is a list of ending lines with information about skipped files.
"""
# Prepare the formatting strings, header, and column sorting.
max_name = max([len(line[0]) for line in lines_values] + [5]) + 1
max_n = max(len(total_line[header.index("Cover")]) + 2, len(" Cover")) + 1
max_n = max([max_n] + [len(line[header.index("Cover")]) + 2 for line in lines_values])
formats = dict(
Name="{:{name_len}}",
Stmts="{:>7}",
Miss="{:>7}",
Branch="{:>7}",
BrPart="{:>7}",
Cover="{:>{n}}",
Missing="{:>10}",
)
header_items = [formats[item].format(item, name_len=max_name, n=max_n) for item in header]
header_str = "".join(header_items)
rule = "-" * len(header_str)
# Write the header
self.write(header_str)
self.write(rule)
# Write the data lines
formats.update(
dict(
Cover="{:>{n}}%",
Missing=" {:9}",
)
)
for values in lines_values:
self.write_items(
(
formats[item].format(str(value), name_len=max_name, n=max_n - 1)
for item, value in zip(header, values)
)
)
# Write a TOTAL line
if lines_values:
self.write(rule)
self.write_items(
(
formats[item].format(str(value), name_len=max_name, n=max_n - 1)
for item, value in zip(header, total_line)
)
)
for end_line in end_lines:
self.write(end_line)
def report_markdown(
self,
header: list[str],
lines_values: list[list[Any]],
total_line: list[Any],
end_lines: list[str],
) -> None:
"""Internal method that prints report data in markdown format.
`header` is a list with captions.
`lines_values` is a sorted list of lists containing coverage information.
`total_line` is a list with values of the total line.
`end_lines` is a list of ending lines with information about skipped files.
"""
# Prepare the formatting strings, header, and column sorting.
max_name = max((len(line[0].replace("_", "\\_")) for line in lines_values), default=0)
max_name = max(max_name, len("**TOTAL**")) + 1
formats = dict(
Name="| {:{name_len}}|",
Stmts="{:>9} |",
Miss="{:>9} |",
Branch="{:>9} |",
BrPart="{:>9} |",
Cover="{:>{n}} |",
Missing="{:>10} |",
)
max_n = max(len(total_line[header.index("Cover")]) + 6, len(" Cover "))
header_items = [formats[item].format(item, name_len=max_name, n=max_n) for item in header]
header_str = "".join(header_items)
rule_str = "|" + " ".join(
["- |".rjust(len(header_items[0]) - 1, "-")]
+ ["-: |".rjust(len(item) - 1, "-") for item in header_items[1:]],
)
# Write the header
self.write(header_str)
self.write(rule_str)
# Write the data lines
for values in lines_values:
formats.update(
dict(
Cover="{:>{n}}% |",
)
)
self.write_items(
(
formats[item].format(
str(value).replace("_", "\\_"), name_len=max_name, n=max_n - 1
)
for item, value in zip(header, values)
)
)
# Write the TOTAL line
formats.update(
dict(
Name="|{:{name_len}} |",
Cover="{:>{n}} |",
),
)
total_line_items: list[str] = []
for item, value in zip(header, total_line):
if value == "":
insert = value
elif item == "Cover":
insert = f" **{value}%**"
else:
insert = f" **{value}**"
total_line_items += formats[item].format(insert, name_len=max_name, n=max_n)
self.write_items(total_line_items)
for end_line in end_lines:
self.write(end_line)
def report(self, morfs: Iterable[TMorf] | None, outfile: IO[str] | None = None) -> float:
"""Writes a report summarizing coverage statistics per module.
`outfile` is a text-mode file object to write the summary to.
"""
self.outfile = outfile or sys.stdout
self.coverage.get_data().set_query_contexts(self.config.report_contexts)
for fr, analysis in get_analysis_to_report(self.coverage, morfs):
self.report_one_file(fr, analysis)
if not self.total.n_files and not self.skipped_count:
raise NoDataError("No data to report.")
if self.output_format == "total":
self.write(self.total.pc_covered_str)
else:
self.tabular_report()
return self.total.pc_covered
def tabular_report(self) -> None:
"""Writes tabular report formats."""
# Prepare the header line and column sorting.
header = ["Name", "Stmts", "Miss"]
if self.branches:
header += ["Branch", "BrPart"]
header += ["Cover"]
if self.config.show_missing:
header += ["Missing"]
column_order = dict(name=0, stmts=1, miss=2, cover=-1)
if self.branches:
column_order.update(dict(branch=3, brpart=4))
# `lines_values` is list of lists of sortable values.
lines_values = []
for fr, analysis in self.fr_analyses:
nums = analysis.numbers
args = [fr.relative_filename(), nums.n_statements, nums.n_missing]
if self.branches:
args += [nums.n_branches, nums.n_partial_branches]
args += [nums.pc_covered_str]
if self.config.show_missing:
args += [analysis.missing_formatted(branches=True)]
args += [nums.pc_covered]
lines_values.append(args)
# Line sorting.
sort_option = (self.config.sort or "name").lower()
reverse = False
if sort_option[0] == "-":
reverse = True
sort_option = sort_option[1:]
elif sort_option[0] == "+":
sort_option = sort_option[1:]
sort_idx = column_order.get(sort_option)
if sort_idx is None:
raise ConfigError(f"Invalid sorting option: {self.config.sort!r}")
if sort_option == "name":
lines_values = human_sorted_items(lines_values, reverse=reverse)
else:
lines_values.sort(
key=lambda line: (line[sort_idx], line[0]),
reverse=reverse,
)
# Calculate total if we had at least one file.
total_line = ["TOTAL", self.total.n_statements, self.total.n_missing]
if self.branches:
total_line += [self.total.n_branches, self.total.n_partial_branches]
total_line += [self.total.pc_covered_str]
if self.config.show_missing:
total_line += [""]
# Create other final lines.
end_lines = []
if self.config.skip_covered and self.skipped_count:
files = plural(self.skipped_count, "file")
end_lines.append(
f"\n{self.skipped_count} {files} skipped due to complete coverage.",
)
if self.config.skip_empty and self.empty_count:
files = plural(self.empty_count, "file")
end_lines.append(f"\n{self.empty_count} empty {files} skipped.")
if self.output_format == "markdown":
formatter = self.report_markdown
else:
formatter = self.report_text
formatter(header, lines_values, total_line, end_lines)
def report_one_file(self, fr: FileReporter, analysis: Analysis) -> None:
"""Report on just one file, the callback from report()."""
nums = analysis.numbers
self.total += nums
no_missing_lines = (nums.n_missing == 0) # fmt: skip
no_missing_branches = (nums.n_partial_branches == 0) # fmt: skip
if self.config.skip_covered and no_missing_lines and no_missing_branches:
# Don't report on 100% files.
self.skipped_count += 1
elif self.config.skip_empty and nums.n_statements == 0:
# Don't report on empty files.
self.empty_count += 1
else:
self.fr_analyses.append((fr, analysis))
| SummaryReporter |
python | huggingface__transformers | src/transformers/models/olmoe/modular_olmoe.py | {
"start": 1585,
"end": 1711
} | class ____(LlamaRMSNorm):
def __init__(self, hidden_size, eps=1e-5):
super().__init__(hidden_size, eps)
| OlmoeRMSNorm |
python | google__pytype | pytype/overlays/typing_overlay.py | {
"start": 22746,
"end": 26714
} | class ____(abstract.SimpleValue):
"""Minimal implementation of typing.dataclass_transform."""
def __init__(self, ctx):
super().__init__("<dataclass_transform>", ctx)
def call(self, node, func, args, alias_map=None):
del func, alias_map # unused
arg = args.posargs[0]
for d in arg.data:
if isinstance(d, abstract.Function):
d.decorators.append("typing.dataclass_transform")
elif isinstance(d, abstract.Class):
d.decorators.append("typing.dataclass_transform")
d.metadata["__dataclass_transform__"] = True
elif isinstance(d, abstract.AMBIGUOUS_OR_EMPTY):
pass
else:
message = "Can only apply dataclass_transform to a class or function."
self.ctx.errorlog.dataclass_error(self.ctx.vm.frames, details=message)
return node, arg
def build_any(ctx):
return ctx.convert.unsolvable
def build_never(ctx):
return ctx.convert.never
def build_typechecking(ctx):
return ctx.convert.true
def get_re_builder(member):
def build_re_member(ctx, module):
del module # unused
pyval = ctx.loader.lookup_pytd("re", member)
return ctx.convert.constant_to_value(pyval)
return build_re_member
# name -> lowest_supported_version
_unsupported_members = {
"LiteralString": (3, 11),
"TypeVarTuple": (3, 11),
"Unpack": (3, 11),
}
# name -> (builder, lowest_supported_version)
typing_overlay = {
"Annotated": (_builder("Annotated", Annotated), (3, 9)),
"Any": (overlay.drop_module(build_any), None),
"Callable": (_builder("Callable", Callable), None),
"Concatenate": (_builder("Concatenate", Concatenate), None),
"final": (FinalDecorator.make, (3, 8)),
"Final": (_builder("Final", Final), (3, 8)),
"ForwardRef": (ForwardRef, None),
"Generic": (_builder("Generic", Generic), None),
"Literal": (_builder("Literal", Literal), (3, 8)),
"Match": (get_re_builder("Match"), None),
"NamedTuple": (named_tuple.NamedTupleClassBuilder, None),
"Never": (overlay.drop_module(build_never), (3, 11)),
"NewType": (overlay.add_name("NewType", NewType.make), None),
"NoReturn": (overlay.drop_module(build_never), None),
"NotRequired": (_builder("NotRequired", typed_dict.NotRequired), (3, 11)),
"Optional": (_builder("Optional", Optional), None),
"ParamSpec": (ParamSpec.make, (3, 10)),
"Pattern": (get_re_builder("Pattern"), None),
"Required": (_builder("Required", typed_dict.Required), (3, 11)),
"Self": (_builder_from_name("Self"), (3, 11)),
"Tuple": (_builder("Tuple", Tuple), None),
"TypeGuard": (_builder_from_name("TypeGuard"), (3, 10)),
"TypeIs": (_builder_from_name("TypeIs"), (3, 13)),
"TypeVar": (TypeVar.make, None),
"TypedDict": (overlay.drop_module(typed_dict.TypedDictBuilder), (3, 8)),
"Union": (overlay.drop_module(Union), None),
"TYPE_CHECKING": (overlay.drop_module(build_typechecking), None),
"assert_never": (_builder_from_name("assert_never"), (3, 11)),
"assert_type": (
overlay.add_name("assert_type", special_builtins.AssertType.make_alias),
(3, 11),
),
"cast": (overlay.add_name("cast", Cast.make), None),
"clear_overloads": (_builder_from_name("clear_overloads"), (3, 11)),
"dataclass_transform": (
overlay.add_name("dataclass_transform", DataclassTransformBuilder.make),
(3, 11),
),
"get_overloads": (_builder_from_name("get_overloads"), (3, 11)),
"is_typeddict": (
overlay.add_name("is_typeddict", typed_dict.IsTypedDict.make),
(3, 10),
),
"overload": (overlay.add_name("overload", Overload.make), None),
"override": (_builder_from_name("override"), (3, 12)),
"reveal_type": (
overlay.add_name("reveal_type", special_builtins.RevealType.make_alias),
(3, 11),
),
**{
k: (overlay.add_name(k, overlay_utils.not_supported_yet), v)
for k, v in _unsupported_members.items()
},
}
| DataclassTransform |
python | huggingface__transformers | tests/models/tvp/test_modeling_tvp.py | {
"start": 1466,
"end": 6404
} | class ____:
def __init__(
self,
parent,
batch_size=1,
seq_length=2,
alpha=1.0,
beta=0.1,
visual_prompter_type="framepad",
visual_prompter_apply="replace",
num_frames=2,
max_img_size=448,
visual_prompt_size=96,
vocab_size=100,
hidden_size=32,
intermediate_size=32,
num_hidden_layers=2,
num_attention_heads=4,
max_position_embeddings=30,
max_grid_col_position_embeddings=30,
max_grid_row_position_embeddings=30,
hidden_dropout_prob=0.1,
hidden_act="gelu",
layer_norm_eps=1e-12,
initializer_range=0.02,
pad_token_id=0,
type_vocab_size=2,
attention_probs_dropout_prob=0.1,
):
self.parent = parent
self.batch_size = batch_size
self.input_id_length = seq_length
self.seq_length = seq_length + 10 + 784 # include text prompt length and visual input length
self.alpha = alpha
self.beta = beta
self.visual_prompter_type = visual_prompter_type
self.visual_prompter_apply = visual_prompter_apply
self.num_frames = num_frames
self.max_img_size = max_img_size
self.visual_prompt_size = visual_prompt_size
self.vocab_size = vocab_size
self.hidden_size = hidden_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.intermediate_size = intermediate_size
self.hidden_act = hidden_act
self.hidden_dropout_prob = hidden_dropout_prob
self.attention_probs_dropout_prob = attention_probs_dropout_prob
self.max_position_embeddings = max_position_embeddings
self.max_grid_col_position_embeddings = max_grid_col_position_embeddings
self.max_grid_row_position_embeddings = max_grid_row_position_embeddings
self.layer_norm_eps = layer_norm_eps
self.initializer_range = initializer_range
self.pad_token_id = pad_token_id
self.type_vocab_size = type_vocab_size
self.is_training = False
self.num_channels = 3
def prepare_config_and_inputs(self):
input_ids = ids_tensor([self.batch_size, self.input_id_length], self.vocab_size)
attention_mask = random_attention_mask([self.batch_size, self.input_id_length])
pixel_values = floats_tensor(
[self.batch_size, self.num_frames, self.num_channels, self.max_img_size, self.max_img_size]
)
config = self.get_config()
return (config, input_ids, pixel_values, attention_mask)
def get_config(self):
resnet_config = ResNetConfig(
num_channels=3,
embeddings_size=64,
hidden_sizes=[64, 128],
depths=[2, 2],
hidden_act="relu",
out_features=["stage2"],
out_indices=[2],
)
return TvpConfig(
backbone_config=resnet_config,
backbone=None,
alpha=self.alpha,
beta=self.beta,
visual_prompter_type=self.visual_prompter_type,
visual_prompter_apply=self.visual_prompter_apply,
num_frames=self.num_frames,
max_img_size=self.max_img_size,
visual_prompt_size=self.visual_prompt_size,
vocab_size=self.vocab_size,
hidden_size=self.hidden_size,
num_hidden_layers=self.num_hidden_layers,
num_attention_heads=self.num_attention_heads,
intermediate_size=self.intermediate_size,
hidden_act=self.hidden_act,
hidden_dropout_prob=self.hidden_dropout_prob,
attention_probs_dropout_prob=self.attention_probs_dropout_prob,
max_position_embeddings=self.max_position_embeddings,
max_grid_col_position_embeddings=self.max_grid_col_position_embeddings,
max_grid_row_position_embeddings=self.max_grid_row_position_embeddings,
layer_norm_eps=self.layer_norm_eps,
initializer_range=self.initializer_range,
pad_token_id=self.pad_token_id,
type_vocab_size=self.type_vocab_size,
)
def create_and_check_model(self, config, input_ids, pixel_values, attention_mask):
model = TvpModel(config=config)
model.to(torch_device)
model.eval()
result = model(input_ids, pixel_values, attention_mask)
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size))
def prepare_config_and_inputs_for_common(self):
config_and_inputs = self.prepare_config_and_inputs()
config, input_ids, pixel_values, attention_mask = config_and_inputs
inputs_dict = {"input_ids": input_ids, "pixel_values": pixel_values, "attention_mask": attention_mask}
return config, inputs_dict
@require_torch
| TVPModelTester |
python | django-compressor__django-compressor | compressor/filters/datauri.py | {
"start": 1638,
"end": 1825
} | class ____(DataUriFilter):
"""Filter for embedding media as data: URIs in CSS files.
See DataUriFilter.
"""
url_patterns = (re.compile(r"url\(([^\)]+)\)"),)
| CssDataUriFilter |
python | PyCQA__pylint | tests/functional/u/useless/useless_parent_delegation.py | {
"start": 12822,
"end": 12914
} | class ____(Super):
def __init__(self, a, *args):
super().__init__(a, *args)
| SubTwo |
python | jmcnamara__XlsxWriter | xlsxwriter/test/comparison/test_image08.py | {
"start": 315,
"end": 898
} | class ____(ExcelComparisonTest):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.set_filename("image08.xlsx")
def test_create_file(self):
"""Test the creation of a simple XlsxWriter file with image(s)."""
workbook = Workbook(self.got_filename)
worksheet = workbook.add_worksheet()
worksheet.insert_image(
"B3", self.image_dir + "grey.png", {"x_scale": 0.5, "y_scale": 0.5}
)
workbook.close()
self.assertExcelEqual()
| TestCompareXLSXFiles |
python | numba__numba | numba/core/removerefctpass.py | {
"start": 174,
"end": 3398
} | class ____(CallVisitor):
"""
A pass to mark all NRT_incref and NRT_decref.
"""
def __init__(self):
self.marked = set()
def visit_Call(self, instr):
if getattr(instr.callee, 'name', '') in _accepted_nrtfns:
self.marked.add(instr)
def _rewrite_function(function):
# Mark NRT usage
markpass = _MarkNrtCallVisitor()
markpass.visit_Function(function)
# Remove NRT usage
for bb in function.basic_blocks:
for inst in list(bb.instructions):
if inst in markpass.marked:
bb.instructions.remove(inst)
_accepted_nrtfns = 'NRT_incref', 'NRT_decref'
def _legalize(module, dmm, fndesc):
"""
Legalize the code in the module.
Returns True if the module is legal for the rewrite pass that removes
unnecessary refcounts.
"""
def valid_output(ty):
"""
Valid output are any type that does not need refcount
"""
model = dmm[ty]
return not model.contains_nrt_meminfo()
def valid_input(ty):
"""
Valid input are any type that does not need refcount except Array.
"""
return valid_output(ty) or isinstance(ty, types.Array)
# Ensure no reference to function marked as
# "numba_args_may_always_need_nrt"
try:
nmd = module.get_named_metadata("numba_args_may_always_need_nrt")
except KeyError:
# Nothing marked
pass
else:
# Has functions marked as "numba_args_may_always_need_nrt"
if len(nmd.operands) > 0:
# The pass is illegal for this compilation unit.
return False
# More legalization base on function type
argtypes = fndesc.argtypes
restype = fndesc.restype
calltypes = fndesc.calltypes
# Legalize function arguments
for argty in argtypes:
if not valid_input(argty):
return False
# Legalize function return
if not valid_output(restype):
return False
# Legalize all called functions
for callty in calltypes.values():
if callty is not None and not valid_output(callty.return_type):
return False
# Ensure no allocation
for fn in module.functions:
if fn.name.startswith("NRT_"):
if fn.name not in _accepted_nrtfns:
return False
return True
def remove_unnecessary_nrt_usage(function, context, fndesc):
"""
Remove unnecessary NRT incref/decref in the given LLVM function.
It uses highlevel type info to determine if the function does not need NRT.
Such a function does not:
- return array object(s);
- take arguments that need refcounting except array;
- call function(s) that return refcounted object.
In effect, the function will not capture or create references that extend
the lifetime of any refcounted objects beyond the lifetime of the function.
The rewrite is performed in place.
If rewrite has happened, this function returns True, otherwise, it returns False.
"""
dmm = context.data_model_manager
if _legalize(function.module, dmm, fndesc):
_rewrite_function(function)
return True
else:
return False
| _MarkNrtCallVisitor |
python | astropy__astropy | astropy/cosmology/_src/tests/io/test_connect.py | {
"start": 4495,
"end": 6749
} | class ____(ReadWriteTestMixin):
"""Test the classes CosmologyRead/Write."""
@pytest.fixture(scope="class", params=cosmo_instances)
def cosmo(self, request):
return getattr(cosmology.realizations, request.param)
@pytest.fixture(scope="class")
def cosmo_cls(self, cosmo):
return cosmo.__class__
# ==============================================================
@pytest.mark.parametrize("format, _, has_deps", readwrite_formats)
def test_write_methods_have_explicit_kwarg_overwrite(self, format, _, has_deps):
if not has_deps:
pytest.skip("missing a dependency")
if (format, Cosmology) not in readwrite_registry._readers:
pytest.xfail(f"no read method is registered for format {format!r}")
writer = readwrite_registry.get_writer(format, Cosmology)
# test in signature
sig = inspect.signature(writer)
assert "overwrite" in sig.parameters
# also in docstring
if not sys.flags.optimize:
assert "overwrite : bool" in writer.__doc__
@pytest.mark.parametrize("format, _, has_deps", readwrite_formats)
def test_readwrite_reader_class_mismatch(
self, cosmo, tmp_path, format, _, has_deps
):
"""Test when the reader class doesn't match the file."""
if not has_deps:
pytest.skip("missing a dependency")
if (format, Cosmology) not in readwrite_registry._readers:
pytest.xfail(f"no read method is registered for format {format!r}")
fname = tmp_path / f"{cosmo.name}.{format}"
cosmo.write(fname, format=format)
# class mismatch
# when reading directly
with pytest.raises(TypeError, match="missing 1 required"):
w0wzCDM.read(fname, format=format)
with pytest.raises(TypeError, match="missing 1 required"):
Cosmology.read(fname, format=format, cosmology=w0wzCDM)
# when specifying the class
with pytest.raises(ValueError, match="`cosmology` must be either"):
w0wzCDM.read(fname, format=format, cosmology="FlatLambdaCDM")
###############################################################################
# To/From_Format Tests
| TestCosmologyReadWrite |
python | kamyu104__LeetCode-Solutions | Python/construct-the-lexicographically-largest-valid-sequence.py | {
"start": 30,
"end": 900
} | class ____(object):
def constructDistancedSequence(self, n):
"""
:type n: int
:rtype: List[int]
"""
def backtracking(n, i, result, lookup):
if i == len(result):
return True
if result[i]:
return backtracking(n, i+1, result, lookup)
for x in reversed(xrange(1, n+1)):
j = i if x == 1 else i+x
if lookup[x] or j >= len(result) or result[j]:
continue
result[i], result[j], lookup[x] = x, x, True
if backtracking(n, i+1, result, lookup):
return True
result[i], result[j], lookup[x] = 0, 0, False
return False
result, lookup = [0]*(2*n-1), [False]*(n+1)
backtracking(n, 0, result, lookup)
return result
| Solution |
python | python__mypy | mypy/build.py | {
"start": 120968,
"end": 148810
} | class ____:
"""Some info about a node in the graph of SCCs."""
def __init__(self, index: int, scc: list[str]) -> None:
self.node_id = "n%d" % index
self.scc = scc
self.sizes: dict[str, int] = {} # mod -> size in bytes
self.deps: dict[str, int] = {} # node_id -> pri
def dumps(self) -> str:
"""Convert to JSON string."""
total_size = sum(self.sizes.values())
return "[{}, {}, {},\n {},\n {}]".format(
json.dumps(self.node_id),
json.dumps(total_size),
json.dumps(self.scc),
json.dumps(self.sizes),
json.dumps(self.deps),
)
def dump_timing_stats(path: str, graph: Graph) -> None:
"""Dump timing stats for each file in the given graph."""
with open(path, "w") as f:
for id in sorted(graph):
f.write(f"{id} {graph[id].time_spent_us}\n")
def dump_line_checking_stats(path: str, graph: Graph) -> None:
"""Dump per-line expression type checking stats."""
with open(path, "w") as f:
for id in sorted(graph):
if not graph[id].per_line_checking_time_ns:
continue
f.write(f"{id}:\n")
for line in sorted(graph[id].per_line_checking_time_ns):
line_time = graph[id].per_line_checking_time_ns[line]
f.write(f"{line:>5} {line_time/1000:8.1f}\n")
def dump_graph(graph: Graph, stdout: TextIO | None = None) -> None:
"""Dump the graph as a JSON string to stdout.
This copies some of the work by process_graph()
(sorted_components() and order_ascc()).
"""
stdout = stdout or sys.stdout
nodes = []
sccs = sorted_components(graph)
for i, ascc in enumerate(sccs):
scc = order_ascc(graph, ascc.mod_ids)
node = NodeInfo(i, scc)
nodes.append(node)
inv_nodes = {} # module -> node_id
for node in nodes:
for mod in node.scc:
inv_nodes[mod] = node.node_id
for node in nodes:
for mod in node.scc:
state = graph[mod]
size = 0
if state.path:
try:
size = os.path.getsize(state.path)
except OSError:
pass
node.sizes[mod] = size
for dep in state.dependencies:
if dep in state.priorities:
pri = state.priorities[dep]
if dep in inv_nodes:
dep_id = inv_nodes[dep]
if dep_id != node.node_id and (
dep_id not in node.deps or pri < node.deps[dep_id]
):
node.deps[dep_id] = pri
print("[" + ",\n ".join(node.dumps() for node in nodes) + "\n]", file=stdout)
def load_graph(
sources: list[BuildSource],
manager: BuildManager,
old_graph: Graph | None = None,
new_modules: list[State] | None = None,
) -> Graph:
"""Given some source files, load the full dependency graph.
If an old_graph is passed in, it is used as the starting point and
modified during graph loading.
If a new_modules is passed in, any modules that are loaded are
added to the list. This is an argument and not a return value
so that the caller can access it even if load_graph fails.
As this may need to parse files, this can raise CompileError in case
there are syntax errors.
"""
graph: Graph = old_graph if old_graph is not None else {}
# The deque is used to implement breadth-first traversal.
# TODO: Consider whether to go depth-first instead. This may
# affect the order in which we process files within import cycles.
new = new_modules if new_modules is not None else []
entry_points: set[str] = set()
# Seed the graph with the initial root sources.
for bs in sources:
try:
st = State(
id=bs.module,
path=bs.path,
source=bs.text,
manager=manager,
root_source=not bs.followed,
)
except ModuleNotFound:
continue
if st.id in graph:
manager.errors.set_file(st.xpath, st.id, manager.options)
manager.errors.report(
-1,
-1,
f'Duplicate module named "{st.id}" (also at "{graph[st.id].xpath}")',
blocker=True,
)
manager.errors.report(
-1,
-1,
"See https://mypy.readthedocs.io/en/stable/running_mypy.html#mapping-file-paths-to-modules "
"for more info",
severity="note",
)
manager.errors.report(
-1,
-1,
"Common resolutions include: a) using `--exclude` to avoid checking one of them, "
"b) adding `__init__.py` somewhere, c) using `--explicit-package-bases` or "
"adjusting MYPYPATH",
severity="note",
)
manager.errors.raise_error()
graph[st.id] = st
new.append(st)
entry_points.add(bs.module)
# Note: Running this each time could be slow in the daemon. If it's a problem, we
# can do more work to maintain this incrementally.
seen_files = {st.abspath: st for st in graph.values() if st.path}
# Collect dependencies. We go breadth-first.
# More nodes might get added to new as we go, but that's fine.
for st in new:
assert st.ancestors is not None
# Strip out indirect dependencies. These will be dealt with
# when they show up as direct dependencies, and there's a
# scenario where they hurt:
# - Suppose A imports B and B imports C.
# - Suppose on the next round:
# - C is deleted;
# - B is updated to remove the dependency on C;
# - A is unchanged.
# - In this case A's cached *direct* dependencies are still valid
# (since direct dependencies reflect the imports found in the source)
# but A's cached *indirect* dependency on C is wrong.
dependencies = [dep for dep in st.dependencies if st.priorities.get(dep) != PRI_INDIRECT]
if not manager.use_fine_grained_cache():
# TODO: Ideally we could skip here modules that appeared in st.suppressed
# because they are not in build with `follow-imports=skip`.
# This way we could avoid overhead of cloning options in `State.__init__()`
# below to get the option value. This is quite minor performance loss however.
added = [dep for dep in st.suppressed if find_module_simple(dep, manager)]
else:
# During initial loading we don't care about newly added modules,
# they will be taken care of during fine grained update. See also
# comment about this in `State.__init__()`.
added = []
for dep in st.ancestors + dependencies + st.suppressed:
ignored = dep in st.suppressed_set and dep not in entry_points
if ignored and dep not in added:
manager.missing_modules.add(dep)
elif dep not in graph:
try:
if dep in st.ancestors:
# TODO: Why not 'if dep not in st.dependencies' ?
# Ancestors don't have import context.
newst = State(
id=dep, path=None, source=None, manager=manager, ancestor_for=st
)
else:
newst = State(
id=dep,
path=None,
source=None,
manager=manager,
caller_state=st,
caller_line=st.dep_line_map.get(dep, 1),
)
except ModuleNotFound:
if dep in st.dependencies_set:
st.suppress_dependency(dep)
else:
if newst.path:
newst_path = os.path.abspath(newst.path)
if newst_path in seen_files:
manager.errors.report(
-1,
0,
"Source file found twice under different module names: "
'"{}" and "{}"'.format(seen_files[newst_path].id, newst.id),
blocker=True,
)
manager.errors.report(
-1,
0,
"See https://mypy.readthedocs.io/en/stable/running_mypy.html#mapping-file-paths-to-modules "
"for more info",
severity="note",
)
manager.errors.report(
-1,
0,
"Common resolutions include: a) adding `__init__.py` somewhere, "
"b) using `--explicit-package-bases` or adjusting MYPYPATH",
severity="note",
)
manager.errors.raise_error()
seen_files[newst_path] = newst
assert newst.id not in graph, newst.id
graph[newst.id] = newst
new.append(newst)
if dep in graph and dep in st.suppressed_set:
# Previously suppressed file is now visible
st.add_dependency(dep)
# In the loop above we skip indirect dependencies, so to make indirect dependencies behave
# more consistently with regular ones, we suppress them manually here (when needed).
for st in graph.values():
indirect = [dep for dep in st.dependencies if st.priorities.get(dep) == PRI_INDIRECT]
for dep in indirect:
if dep not in graph:
st.suppress_dependency(dep)
manager.plugin.set_modules(manager.modules)
return graph
def order_ascc_ex(graph: Graph, ascc: SCC) -> list[str]:
"""Apply extra heuristics on top of order_ascc().
This should be used only for actual SCCs, not for "inner" SCCs
we create recursively during ordering of the SCC. Currently, this
has only some special handling for builtin SCC.
"""
scc = order_ascc(graph, ascc.mod_ids)
# Make the order of the SCC that includes 'builtins' and 'typing',
# among other things, predictable. Various things may break if
# the order changes.
if "builtins" in ascc.mod_ids:
scc = sorted(scc, reverse=True)
# If builtins is in the list, move it last. (This is a bit of
# a hack, but it's necessary because the builtins module is
# part of a small cycle involving at least {builtins, abc,
# typing}. Of these, builtins must be processed last or else
# some builtin objects will be incompletely processed.)
scc.remove("builtins")
scc.append("builtins")
return scc
def find_stale_sccs(
sccs: list[SCC], graph: Graph, manager: BuildManager
) -> tuple[list[SCC], list[SCC]]:
"""Split a list of ready SCCs into stale and fresh.
Fresh SCCs are those where:
* We have valid cache files for all modules in the SCC.
* There are no changes in dependencies (files removed from/added to the build).
* The interface hashes of direct dependents matches those recorded in the cache.
The first and second conditions are verified by is_fresh().
"""
stale_sccs = []
fresh_sccs = []
for ascc in sccs:
stale_scc = {id for id in ascc.mod_ids if not graph[id].is_fresh()}
fresh = not stale_scc
# Verify that interfaces of dependencies still present in graph are up-to-date (fresh).
stale_deps = set()
for id in ascc.mod_ids:
for dep in graph[id].dep_hashes:
if dep in graph and graph[dep].interface_hash != graph[id].dep_hashes[dep]:
stale_deps.add(dep)
fresh = fresh and not stale_deps
if fresh:
fresh_msg = "fresh"
elif stale_scc:
fresh_msg = "inherently stale"
if stale_scc != ascc.mod_ids:
fresh_msg += f" ({' '.join(sorted(stale_scc))})"
if stale_deps:
fresh_msg += f" with stale deps ({' '.join(sorted(stale_deps))})"
else:
fresh_msg = f"stale due to deps ({' '.join(sorted(stale_deps))})"
scc_str = " ".join(ascc.mod_ids)
if fresh:
manager.trace(f"Found {fresh_msg} SCC ({scc_str})")
# If there is at most one file with errors we can skip the ordering to save time.
mods_with_errors = [id for id in ascc.mod_ids if graph[id].error_lines]
if len(mods_with_errors) <= 1:
scc = mods_with_errors
else:
# Use exactly the same order as for stale SCCs for stability.
scc = order_ascc_ex(graph, ascc)
for id in scc:
if graph[id].error_lines:
manager.flush_errors(
manager.errors.simplify_path(graph[id].xpath), graph[id].error_lines, False
)
fresh_sccs.append(ascc)
else:
size = len(ascc.mod_ids)
if size == 1:
manager.log(f"Scheduling SCC singleton ({scc_str}) as {fresh_msg}")
else:
manager.log("Scheduling SCC of size %d (%s) as %s" % (size, scc_str, fresh_msg))
stale_sccs.append(ascc)
return stale_sccs, fresh_sccs
def process_graph(graph: Graph, manager: BuildManager) -> None:
"""Process everything in dependency order."""
sccs = sorted_components(graph)
manager.log(
"Found %d SCCs; largest has %d nodes" % (len(sccs), max(len(scc.mod_ids) for scc in sccs))
)
scc_by_id = {scc.id: scc for scc in sccs}
manager.scc_by_id = scc_by_id
manager.top_order = [scc.id for scc in sccs]
# Prime the ready list with leaf SCCs (that have no dependencies).
ready = []
not_ready = []
for scc in sccs:
if not scc.deps:
ready.append(scc)
else:
not_ready.append(scc)
still_working = False
while ready or not_ready or still_working:
stale, fresh = find_stale_sccs(ready, graph, manager)
if stale:
manager.submit(stale)
still_working = True
# We eagerly walk over fresh SCCs to reach as many stale SCCs as soon
# as possible. Only when there are no fresh SCCs, we wait on scheduled stale ones.
# This strategy, similar to a naive strategy in minesweeper game, will allow us
# to leverage parallelism as much as possible.
if fresh:
done = fresh
else:
done, still_working = manager.wait_for_done(graph)
ready = []
for done_scc in done:
for dependent in done_scc.direct_dependents:
scc_by_id[dependent].not_ready_deps.discard(done_scc.id)
if not scc_by_id[dependent].not_ready_deps:
not_ready.remove(scc_by_id[dependent])
ready.append(scc_by_id[dependent])
def order_ascc(graph: Graph, ascc: AbstractSet[str], pri_max: int = PRI_INDIRECT) -> list[str]:
"""Come up with the ideal processing order within an SCC.
Using the priorities assigned by all_imported_modules_in_file(),
try to reduce the cycle to a DAG, by omitting arcs representing
dependencies of lower priority.
In the simplest case, if we have A <--> B where A has a top-level
"import B" (medium priority) but B only has the reverse "import A"
inside a function (low priority), we turn the cycle into a DAG by
dropping the B --> A arc, which leaves only A --> B.
If all arcs have the same priority, we fall back to sorting by
reverse global order (the order in which modules were first
encountered).
The algorithm is recursive, as follows: when as arcs of different
priorities are present, drop all arcs of the lowest priority,
identify SCCs in the resulting graph, and apply the algorithm to
each SCC thus found. The recursion is bounded because at each
recursion the spread in priorities is (at least) one less.
In practice there are only a few priority levels (less than a
dozen) and in the worst case we just carry out the same algorithm
for finding SCCs N times. Thus, the complexity is no worse than
the complexity of the original SCC-finding algorithm -- see
strongly_connected_components() below for a reference.
"""
if len(ascc) == 1:
return list(ascc)
pri_spread = set()
for id in ascc:
state = graph[id]
for dep in state.dependencies:
if dep in ascc:
pri = state.priorities.get(dep, PRI_HIGH)
if pri < pri_max:
pri_spread.add(pri)
if len(pri_spread) == 1:
# Filtered dependencies are uniform -- order by global order.
return sorted(ascc, key=lambda id: -graph[id].order)
pri_max = max(pri_spread)
sccs = sorted_components_inner(graph, ascc, pri_max)
# The recursion is bounded by the len(pri_spread) check above.
return [s for ss in sccs for s in order_ascc(graph, ss, pri_max)]
def process_fresh_modules(graph: Graph, modules: list[str], manager: BuildManager) -> None:
"""Process the modules in one group of modules from their cached data.
This can be used to process an SCC of modules. This involves loading the tree (i.e.
module symbol tables) from cache file and then fixing cross-references in the symbols.
"""
t0 = time.time()
for id in modules:
graph[id].load_tree()
t1 = time.time()
for id in modules:
graph[id].fix_cross_refs()
t2 = time.time()
manager.add_stats(process_fresh_time=t2 - t0, load_tree_time=t1 - t0)
def process_stale_scc(graph: Graph, ascc: SCC, manager: BuildManager) -> None:
"""Process the modules in one SCC from source code."""
# First verify if all transitive dependencies are loaded in the current process.
missing_sccs = set()
sccs_to_find = ascc.deps.copy()
while sccs_to_find:
dep_scc = sccs_to_find.pop()
if dep_scc in manager.done_sccs or dep_scc in missing_sccs:
continue
missing_sccs.add(dep_scc)
sccs_to_find.update(manager.scc_by_id[dep_scc].deps)
if missing_sccs:
# Load missing SCCs from cache.
# TODO: speed-up ordering if this causes problems for large builds.
fresh_sccs_to_load = [
manager.scc_by_id[sid] for sid in manager.top_order if sid in missing_sccs
]
manager.log(f"Processing {len(fresh_sccs_to_load)} fresh SCCs")
if (
not manager.options.test_env
and platform.python_implementation() == "CPython"
and manager.gc_freeze_cycles < MAX_GC_FREEZE_CYCLES
):
# When deserializing cache we create huge amount of new objects, so even
# with our generous GC thresholds, GC is still doing a lot of pointless
# work searching for garbage. So, we temporarily disable it when
# processing fresh SCCs, and then move all the new objects to the oldest
# generation with the freeze()/unfreeze() trick below. This is arguably
# a hack, but it gives huge performance wins for large third-party
# libraries, like torch.
gc.collect()
gc.disable()
for prev_scc in fresh_sccs_to_load:
manager.done_sccs.add(prev_scc.id)
process_fresh_modules(graph, sorted(prev_scc.mod_ids), manager)
if (
not manager.options.test_env
and platform.python_implementation() == "CPython"
and manager.gc_freeze_cycles < MAX_GC_FREEZE_CYCLES
):
manager.gc_freeze_cycles += 1
gc.freeze()
gc.unfreeze()
gc.enable()
# Process the SCC in stable order.
scc = order_ascc_ex(graph, ascc)
stale = scc
for id in stale:
# We may already have parsed the module, or not.
# If the former, parse_file() is a no-op.
graph[id].parse_file()
if "typing" in scc:
# For historical reasons we need to manually add typing aliases
# for built-in generic collections, see docstring of
# SemanticAnalyzerPass2.add_builtin_aliases for details.
typing_mod = graph["typing"].tree
assert typing_mod, "The typing module was not parsed"
mypy.semanal_main.semantic_analysis_for_scc(graph, scc, manager.errors)
# Track what modules aren't yet done, so we can finish them as soon
# as possible, saving memory.
unfinished_modules = set(stale)
for id in stale:
graph[id].type_check_first_pass()
if not graph[id].type_checker().deferred_nodes:
unfinished_modules.discard(id)
graph[id].detect_possibly_undefined_vars()
graph[id].finish_passes()
while unfinished_modules:
for id in stale:
if id not in unfinished_modules:
continue
if not graph[id].type_check_second_pass():
unfinished_modules.discard(id)
graph[id].detect_possibly_undefined_vars()
graph[id].finish_passes()
for id in stale:
graph[id].generate_unused_ignore_notes()
graph[id].generate_ignore_without_code_notes()
# Flush errors, and write cache in two phases: first data files, then meta files.
meta_tuples = {}
errors_by_id = {}
for id in stale:
if graph[id].xpath not in manager.errors.ignored_files:
errors = manager.errors.file_messages(
graph[id].xpath, formatter=manager.error_formatter
)
manager.flush_errors(manager.errors.simplify_path(graph[id].xpath), errors, False)
errors_by_id[id] = errors
meta_tuples[id] = graph[id].write_cache()
graph[id].mark_as_rechecked()
for id in stale:
meta_tuple = meta_tuples[id]
if meta_tuple is None:
continue
meta, meta_file = meta_tuple
meta.dep_hashes = [graph[dep].interface_hash for dep in graph[id].dependencies]
meta.error_lines = errors_by_id.get(id, [])
write_cache_meta(meta, manager, meta_file)
manager.done_sccs.add(ascc.id)
def prepare_sccs_full(
raw_sccs: Iterator[set[str]], edges: dict[str, list[str]]
) -> dict[SCC, set[SCC]]:
"""Turn raw SCC sets into SCC objects and build dependency graph for SCCs."""
sccs = [SCC(raw_scc) for raw_scc in raw_sccs]
scc_map = {}
for scc in sccs:
for id in scc.mod_ids:
scc_map[id] = scc
scc_deps_map: dict[SCC, set[SCC]] = {}
for scc in sccs:
for id in scc.mod_ids:
scc_deps_map.setdefault(scc, set()).update(scc_map[dep] for dep in edges[id])
for scc in sccs:
# Remove trivial dependency on itself.
scc_deps_map[scc].discard(scc)
for dep_scc in scc_deps_map[scc]:
scc.deps.add(dep_scc.id)
scc.not_ready_deps.add(dep_scc.id)
return scc_deps_map
def sorted_components(graph: Graph) -> list[SCC]:
"""Return the graph's SCCs, topologically sorted by dependencies.
The sort order is from leaves (nodes without dependencies) to
roots (nodes on which no other nodes depend).
"""
# Compute SCCs.
vertices = set(graph)
edges = {id: deps_filtered(graph, vertices, id, PRI_INDIRECT) for id in vertices}
scc_dep_map = prepare_sccs_full(strongly_connected_components(vertices, edges), edges)
# Topsort.
res = []
for ready in topsort(scc_dep_map):
# Sort the sets in ready by reversed smallest State.order. Examples:
#
# - If ready is [{x}, {y}], x.order == 1, y.order == 2, we get
# [{y}, {x}].
#
# - If ready is [{a, b}, {c, d}], a.order == 1, b.order == 3,
# c.order == 2, d.order == 4, the sort keys become [1, 2]
# and the result is [{c, d}, {a, b}].
sorted_ready = sorted(ready, key=lambda scc: -min(graph[id].order for id in scc.mod_ids))
for scc in sorted_ready:
for dep in scc_dep_map[scc]:
dep.direct_dependents.append(scc.id)
res.extend(sorted_ready)
return res
def sorted_components_inner(
graph: Graph, vertices: AbstractSet[str], pri_max: int
) -> list[AbstractSet[str]]:
"""Simplified version of sorted_components() to work with sub-graphs.
This doesn't create SCC objects, and operates with raw sets. This function
also allows filtering dependencies to take into account when building SCCs.
This is used for heuristic ordering of modules within actual SCCs.
"""
edges = {id: deps_filtered(graph, vertices, id, pri_max) for id in vertices}
sccs = list(strongly_connected_components(vertices, edges))
res = []
for ready in topsort(prepare_sccs(sccs, edges)):
res.extend(sorted(ready, key=lambda scc: -min(graph[id].order for id in scc)))
return res
def deps_filtered(graph: Graph, vertices: AbstractSet[str], id: str, pri_max: int) -> list[str]:
"""Filter dependencies for id with pri < pri_max."""
if id not in vertices:
return []
state = graph[id]
return [
dep
for dep in state.dependencies
if dep in vertices and state.priorities.get(dep, PRI_HIGH) < pri_max
]
def missing_stubs_file(cache_dir: str) -> str:
return os.path.join(cache_dir, "missing_stubs")
def record_missing_stub_packages(cache_dir: str, missing_stub_packages: set[str]) -> None:
"""Write a file containing missing stub packages.
This allows a subsequent "mypy --install-types" run (without other arguments)
to install missing stub packages.
"""
fnam = missing_stubs_file(cache_dir)
if missing_stub_packages:
with open(fnam, "w") as f:
for pkg in sorted(missing_stub_packages):
f.write(f"{pkg}\n")
else:
if os.path.isfile(fnam):
os.remove(fnam)
def is_silent_import_module(manager: BuildManager, path: str) -> bool:
if manager.options.no_silence_site_packages:
return False
# Silence errors in site-package dirs and typeshed
if any(is_sub_path_normabs(path, dir) for dir in manager.search_paths.package_path):
return True
return any(is_sub_path_normabs(path, dir) for dir in manager.search_paths.typeshed_path)
def write_undocumented_ref_info(
state: State, metastore: MetadataStore, options: Options, type_map: dict[Expression, Type]
) -> None:
# This exports some dependency information in a rather ad-hoc fashion, which
# can be helpful for some tools. This is all highly experimental and could be
# removed at any time.
from mypy.refinfo import get_undocumented_ref_info_json
if not state.tree:
# We need a full AST for this.
return
_, data_file, _ = get_cache_names(state.id, state.xpath, options)
ref_info_file = ".".join(data_file.split(".")[:-2]) + ".refs.json"
assert not ref_info_file.startswith(".")
deps_json = get_undocumented_ref_info_json(state.tree, type_map)
metastore.write(ref_info_file, json_dumps(deps_json))
| NodeInfo |
python | getsentry__sentry | src/sentry/issues/endpoints/grouping_configs.py | {
"start": 397,
"end": 869
} | class ____(Endpoint):
owner = ApiOwner.ISSUES
publish_status = {
"GET": ApiPublishStatus.PRIVATE,
}
permission_classes = ()
def get(self, request: Request, **kwargs) -> Response:
return Response(
serialize(
[
config.as_dict()
for config in sorted(GROUPING_CONFIG_CLASSES.values(), key=lambda x: str(x.id))
]
)
)
| GroupingConfigsEndpoint |
python | great-expectations__great_expectations | great_expectations/data_context/data_context_variables.py | {
"start": 1131,
"end": 2087
} | class ____(str, enum.Enum):
ALL_VARIABLES = "data_context_variables" # If retrieving/setting the entire config at once
CONFIG_VERSION = "config_version"
DATASOURCES = "datasources"
FLUENT_DATASOURCES = "fluent_datasources"
EXPECTATIONS_STORE_NAME = "expectations_store_name"
VALIDATIONS_STORE_NAME = "validation_results_store_name"
CHECKPOINT_STORE_NAME = "checkpoint_store_name"
PLUGINS_DIRECTORY = "plugins_directory"
STORES = "stores"
DATA_DOCS_SITES = "data_docs_sites"
CONFIG_VARIABLES_FILE_PATH = "config_variables_file_path"
ANALYTICS_ENABLED = "analytics_enabled"
DATA_CONTEXT_ID = "data_context_id"
PROGRESS_BARS = "progress_bars"
@classmethod
def has_value(cls, value: str) -> bool:
"""
Checks whether or not a string is a value from the possible enum pairs.
"""
return value in cls._value2member_map_
@public_api
@dataclass
| DataContextVariableSchema |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/classes11.py | {
"start": 511,
"end": 564
} | class ____(Sequence[float], Mapping[float, int]): ...
| D |
python | PrefectHQ__prefect | src/integrations/prefect-github/prefect_github/schemas/graphql_schema.py | {
"start": 973933,
"end": 974716
} | class ____(sgqlc.types.Type, Node):
"""
See source code for more info.
"""
__schema__ = graphql_schema
__field_names__ = (
"email",
"invitee",
"inviter",
"permalink",
"permission",
"repository",
)
email = sgqlc.types.Field(String, graphql_name="email")
invitee = sgqlc.types.Field("User", graphql_name="invitee")
inviter = sgqlc.types.Field(sgqlc.types.non_null("User"), graphql_name="inviter")
permalink = sgqlc.types.Field(sgqlc.types.non_null(URI), graphql_name="permalink")
permission = sgqlc.types.Field(
sgqlc.types.non_null(RepositoryPermission), graphql_name="permission"
)
repository = sgqlc.types.Field(RepositoryInfo, graphql_name="repository")
| RepositoryInvitation |
python | django__django | django/contrib/postgres/operations.py | {
"start": 3707,
"end": 4058
} | class ____:
def _ensure_not_in_transaction(self, schema_editor):
if schema_editor.connection.in_atomic_block:
raise NotSupportedError(
"The %s operation cannot be executed inside a transaction "
"(set atomic = False on the migration)." % self.__class__.__name__
)
| NotInTransactionMixin |
python | jina-ai__jina | jina/serve/instrumentation/__init__.py | {
"start": 5239,
"end": 6869
} | class ____:
"""
Helper dataclass that accepts optional Summary or Histogram recorders which are used to record the time take to execute
the decorated or context managed function
"""
def __init__(
self,
summary_metric: Optional['Summary'],
histogram: Optional['Histogram'],
histogram_metric_labels: Optional[Dict[str, str]] = None,
) -> None:
if histogram_metric_labels is None:
histogram_metric_labels = {}
self._summary_metric = summary_metric
self._histogram = histogram
self._histogram_metric_labels = histogram_metric_labels
def _new_timer(self):
return self.__class__(
self._summary_metric, self._histogram, self._histogram_metric_labels
)
def __enter__(self):
self._start = default_timer()
return self
def __exit__(self, *exc):
duration = max(default_timer() - self._start, 0)
if self._summary_metric:
self._summary_metric.observe(duration)
if self._histogram:
self._histogram.record(duration, attributes=self._histogram_metric_labels)
def __call__(self, f):
"""function that gets called when this class is used as a decortor
:param f: function that is decorated
:return: wrapped function
"""
@functools.wraps(f)
def wrapped(*args, **kwargs):
# Obtaining new instance of timer every time
# ensures thread safety and reentrancy.
with self._new_timer():
return f(*args, **kwargs)
return wrapped
| MetricsTimer |
python | huggingface__transformers | tests/models/vits/test_modeling_vits.py | {
"start": 5560,
"end": 15058
} | class ____(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase):
all_model_classes = (VitsModel,) if is_torch_available() else ()
pipeline_model_mapping = (
{"feature-extraction": VitsModel, "text-to-audio": VitsModel} if is_torch_available() else {}
)
is_encoder_decoder = False
test_resize_embeddings = False
has_attentions = False
def setUp(self):
self.model_tester = VitsModelTester(self)
self.config_tester = ConfigTester(self, config_class=VitsConfig, hidden_size=37)
def test_config(self):
self.config_tester.run_common_tests()
# TODO: @ydshieh
@is_flaky(description="torch 2.2.0 gives `Timeout >120.0s`")
def test_pipeline_feature_extraction(self):
super().test_pipeline_feature_extraction()
@is_flaky(description="torch 2.2.0 gives `Timeout >120.0s`")
def test_pipeline_feature_extraction_fp16(self):
super().test_pipeline_feature_extraction_fp16()
@unittest.skip(reason="Need to fix this after #26538")
def test_model_forward(self):
set_seed(12345)
global_rng.seed(12345)
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model_forward(*config_and_inputs)
@require_torch_multi_gpu
# override to force all elements of the batch to have the same sequence length across GPUs
def test_multi_gpu_data_parallel_forward(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
config.use_stochastic_duration_prediction = False
# move input tensors to cuda:O
for key, value in inputs_dict.items():
if torch.is_tensor(value):
# make all elements of the batch the same -> ensures the output seq lengths are the same for DP
value[1:] = value[0]
inputs_dict[key] = value.to(0)
for model_class in self.all_model_classes:
model = model_class(config=config)
model.to(0)
model.eval()
# Wrap model in nn.DataParallel
model = torch.nn.DataParallel(model)
set_seed(555)
with torch.no_grad():
_ = model(**self._prepare_for_class(inputs_dict, model_class)).waveform
@unittest.skip(reason="VITS is not deterministic")
def test_determinism(self):
pass
@unittest.skip(reason="VITS is not deterministic")
def test_batching_equivalence(self):
pass
@unittest.skip(reason="VITS has no inputs_embeds")
def test_inputs_embeds(self):
pass
@unittest.skip(reason="VITS has no input embeddings")
def test_model_get_set_embeddings(self):
pass
# override since the model is not deterministic, so we need to set the seed for each forward pass
def test_model_outputs_equivalence(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
def set_nan_tensor_to_zero(t):
t[t != t] = 0
return t
def check_equivalence(model, tuple_inputs, dict_inputs, additional_kwargs={}):
with torch.no_grad():
set_seed(0)
tuple_output = model(**tuple_inputs, return_dict=False, **additional_kwargs)
set_seed(0)
dict_output = model(**dict_inputs, return_dict=True, **additional_kwargs).to_tuple()
def recursive_check(tuple_object, dict_object):
if isinstance(tuple_object, (list, tuple)):
for tuple_iterable_value, dict_iterable_value in zip(tuple_object, dict_object):
recursive_check(tuple_iterable_value, dict_iterable_value)
elif isinstance(tuple_object, dict):
for tuple_iterable_value, dict_iterable_value in zip(
tuple_object.values(), dict_object.values()
):
recursive_check(tuple_iterable_value, dict_iterable_value)
elif tuple_object is None:
return
else:
self.assertTrue(
torch.allclose(
set_nan_tensor_to_zero(tuple_object), set_nan_tensor_to_zero(dict_object), atol=1e-5
),
msg=(
"Tuple and dict output are not equal. Difference:"
f" {torch.max(torch.abs(tuple_object - dict_object))}. Tuple has `nan`:"
f" {torch.isnan(tuple_object).any()} and `inf`: {torch.isinf(tuple_object)}. Dict has"
f" `nan`: {torch.isnan(dict_object).any()} and `inf`: {torch.isinf(dict_object)}."
),
)
recursive_check(tuple_output, dict_output)
for model_class in self.all_model_classes:
model = model_class(config)
model.to(torch_device)
model.eval()
tuple_inputs = self._prepare_for_class(inputs_dict, model_class)
dict_inputs = self._prepare_for_class(inputs_dict, model_class)
check_equivalence(model, tuple_inputs, dict_inputs)
tuple_inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True)
dict_inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True)
check_equivalence(model, tuple_inputs, dict_inputs)
tuple_inputs = self._prepare_for_class(inputs_dict, model_class)
dict_inputs = self._prepare_for_class(inputs_dict, model_class)
check_equivalence(model, tuple_inputs, dict_inputs, {"output_hidden_states": True})
tuple_inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True)
dict_inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True)
check_equivalence(model, tuple_inputs, dict_inputs, {"output_hidden_states": True})
if self.has_attentions:
tuple_inputs = self._prepare_for_class(inputs_dict, model_class)
dict_inputs = self._prepare_for_class(inputs_dict, model_class)
check_equivalence(model, tuple_inputs, dict_inputs, {"output_attentions": True})
tuple_inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True)
dict_inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True)
check_equivalence(model, tuple_inputs, dict_inputs, {"output_attentions": True})
tuple_inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True)
dict_inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True)
check_equivalence(
model, tuple_inputs, dict_inputs, {"output_hidden_states": True, "output_attentions": True}
)
# override since the model is not deterministic, so we need to set the seed for each forward pass
def test_save_load(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
def check_save_load(out1, out2):
# make sure we don't have nans
out_2 = out2.cpu().numpy()
out_2[np.isnan(out_2)] = 0
out_1 = out1.cpu().numpy()
out_1[np.isnan(out_1)] = 0
max_diff = np.amax(np.abs(out_1 - out_2))
self.assertLessEqual(max_diff, 1e-5)
for model_class in self.all_model_classes:
model = model_class(config)
model.to(torch_device)
model.eval()
with torch.no_grad():
set_seed(0)
first = model(**self._prepare_for_class(inputs_dict, model_class))[0]
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(tmpdirname)
# the config file (and the generation config file, if it can generate) should be saved
self.assertTrue(os.path.exists(os.path.join(tmpdirname, CONFIG_NAME)))
self.assertEqual(
model.can_generate(), os.path.exists(os.path.join(tmpdirname, GENERATION_CONFIG_NAME))
)
model = model_class.from_pretrained(tmpdirname)
model.to(torch_device)
with torch.no_grad():
set_seed(0)
second = model(**self._prepare_for_class(inputs_dict, model_class))[0]
if isinstance(first, tuple) and isinstance(second, tuple):
for tensor1, tensor2 in zip(first, second):
check_save_load(tensor1, tensor2)
else:
check_save_load(first, second)
# overwrite from test_modeling_common
def _mock_init_weights(self, module):
if hasattr(module, "weight") and module.weight is not None:
module.weight.fill_(3)
if hasattr(module, "weight_g") and module.weight_g is not None:
module.weight_g.data.fill_(3)
if hasattr(module, "weight_v") and module.weight_v is not None:
module.weight_v.data.fill_(3)
if hasattr(module, "bias") and module.bias is not None:
module.bias.fill_(3)
@require_torch
@slow
| VitsModelTest |
python | getsentry__sentry | src/sentry/workflow_engine/migrations/0068_migrate_anomaly_detection_alerts.py | {
"start": 1041,
"end": 1131
} | class ____(Enum):
ABOVE = 0
BELOW = 1
ABOVE_AND_BELOW = 2
| AlertRuleThresholdType |
python | jina-ai__jina | jina/excepts.py | {
"start": 2818,
"end": 4579
} | class ____(grpc.aio.AioRpcError, BaseJinaException):
"""
Raised when communication between microservices fails.
Needed to propagate information about the root cause event, such as request_id and dest_addr.
"""
def __init__(
self,
og_exception: grpc.aio.AioRpcError,
request_id: str = '',
dest_addr: Union[str, Set[str]] = {''},
details: str = '',
):
"""
:param og_exception: the original exception that caused the network error
:param request_id: id of the request that caused the error
:param dest_addr: destination (microservice) address(es) of the problematic network call(s)
:param details: details of the error
"""
self.og_exception = og_exception
self.request_id = request_id
self.dest_addr = dest_addr
self._details = details
super().__init__(
og_exception.code(),
og_exception.initial_metadata(),
og_exception.trailing_metadata(),
self.details(),
og_exception.debug_error_string(),
)
def __str__(self):
return self.details()
def __repr__(self):
return self.__str__()
def code(self):
"""
:return: error code of this exception
"""
return self.og_exception.code()
def details(self):
"""
:return: details of this exception
"""
if self._details:
trailing_metadata = extract_trailing_metadata(self.og_exception)
if trailing_metadata:
return f'{self._details}\n{trailing_metadata}'
else:
return self._details
return self.og_exception.details()
| InternalNetworkError |
python | numpy__numpy | numpy/_core/tests/test_defchararray.py | {
"start": 6190,
"end": 6412
} | class ____(TestComparisons):
"""Ticket #1276"""
def B(self):
return np.array(
[['efg', 'efg', '123 '],
['051', 'efgg', 'tuv']], np.str_).view(np.char.chararray)
| TestComparisonsMixed1 |
python | dask__distributed | distributed/tests/test_active_memory_manager.py | {
"start": 1353,
"end": 42132
} | class ____(ActiveMemoryManagerPolicy):
"""Drop or replicate a key n times"""
def __init__(
self,
action: Literal["drop", "replicate"],
key: str,
n: int,
candidates: list[int] | None,
):
self.action = action
self.key = key
self.n = n
self.candidates = candidates
def run(self):
candidates = self.candidates
if candidates is not None:
candidates = {
ws
for i, ws in enumerate(self.manager.scheduler.workers.values())
if i in candidates
}
for ts in self.manager.scheduler.tasks.values():
if ts.key == self.key:
for _ in range(self.n):
yield self.action, ts, candidates
def demo_config(
action: Literal["drop", "replicate"],
key: str = "x",
n: int = 10,
candidates: list[int] | None = None,
start: bool = False,
interval: float = 0.1,
measure: str = "managed",
) -> dict[str, Any]:
"""Create a dask config for AMM with DemoPolicy"""
return {
"distributed.scheduler.active-memory-manager.start": start,
"distributed.scheduler.active-memory-manager.interval": interval,
"distributed.scheduler.active-memory-manager.measure": measure,
"distributed.scheduler.active-memory-manager.policies": [
{
"class": "distributed.tests.test_active_memory_manager.DemoPolicy",
"action": action,
"key": key,
"n": n,
"candidates": candidates,
},
],
# If pause is required, do it manually by setting Worker.status = Status.paused
"distributed.worker.memory.pause": False,
}
@gen_cluster(
client=True,
config={
"distributed.scheduler.active-memory-manager.start": False,
"distributed.scheduler.active-memory-manager.policies": [],
},
)
async def test_no_policies(c, s, a, b):
s.extensions["amm"].run_once()
@gen_cluster(nthreads=[("", 1)] * 4, client=True, config=demo_config("drop", n=5))
async def test_drop(c, s, *workers):
# Logging is quiet if there are no suggestions
with assert_amm_log(
[
"Running policy: DemoPolicy()",
"Active Memory Manager run in ",
],
):
s.extensions["amm"].run_once()
futures = await c.scatter({"x": 123}, broadcast=True)
assert len(s.tasks["x"].who_has) == 4
# Also test the extension handler
with assert_amm_log(
[
"Running policy: DemoPolicy()",
"(drop, <TaskState 'x' memory>, None): dropping from ",
"(drop, <TaskState 'x' memory>, None): dropping from ",
"(drop, <TaskState 'x' memory>, None): dropping from ",
"(drop, <TaskState 'x' memory>, None) rejected: less than 2 replicas exist",
"(drop, <TaskState 'x' memory>, None) rejected: less than 2 replicas exist",
"Enacting suggestions for 1 tasks:",
"- <WorkerState ",
"- <WorkerState ",
"- <WorkerState ",
"Active Memory Manager run in ",
],
):
s.extensions["amm"].run_once()
while len(s.tasks["x"].who_has) > 1:
await asyncio.sleep(0.01)
# The last copy is never dropped even if the policy asks so
await asyncio.sleep(0.2)
assert len(s.tasks["x"].who_has) == 1
@gen_cluster(client=True, config=demo_config("drop"))
async def test_start_stop(c, s, a, b):
x = c.submit(lambda: 123, key="x")
await c.replicate(x, 2)
assert len(s.tasks["x"].who_has) == 2
s.extensions["amm"].start()
while len(s.tasks["x"].who_has) > 1:
await asyncio.sleep(0.01)
s.extensions["amm"].start() # Double start is a no-op
s.extensions["amm"].stop()
s.extensions["amm"].stop() # Double stop is a no-op
# AMM is not running anymore
await c.replicate(x, 2)
await asyncio.sleep(0.2)
assert len(s.tasks["x"].who_has) == 2
@gen_cluster(client=True, config=demo_config("drop", start=True, interval=0.1))
async def test_auto_start(c, s, a, b):
futures = await c.scatter({"x": 123}, broadcast=True)
# The AMM should run within 0.1s of the broadcast.
# Add generous extra padding to prevent flakiness.
await asyncio.sleep(0.5)
assert len(s.tasks["x"].who_has) == 1
@gen_cluster(client=True, config=demo_config("drop", key="x"))
async def test_add_policy(c, s, a, b):
p2 = DemoPolicy(action="drop", key="y", n=10, candidates=None)
p3 = DemoPolicy(action="drop", key="z", n=10, candidates=None)
# policies parameter can be:
# - None: get from config
# - explicit set, which can be empty
m1 = s.extensions["amm"]
m2 = ActiveMemoryManagerExtension(s, {p2}, register=False, start=False)
m3 = ActiveMemoryManagerExtension(s, set(), register=False, start=False)
assert len(m1.policies) == 1
assert len(m2.policies) == 1
assert len(m3.policies) == 0
m3.add_policy(p3)
assert len(m3.policies) == 1
futures = await c.scatter({"x": 1, "y": 2, "z": 3}, broadcast=True)
m1.run_once()
while len(s.tasks["x"].who_has) == 2:
await asyncio.sleep(0.01)
m2.run_once()
while len(s.tasks["y"].who_has) == 2:
await asyncio.sleep(0.01)
m3.run_once()
while len(s.tasks["z"].who_has) == 2:
await asyncio.sleep(0.01)
with pytest.raises(TypeError):
m3.add_policy("not a policy")
@gen_cluster(client=True, config=demo_config("drop", key="x", start=False))
async def test_multi_start(c, s, a, b):
"""Multiple AMMs can be started in parallel"""
p2 = DemoPolicy(action="drop", key="y", n=10, candidates=None)
p3 = DemoPolicy(action="drop", key="z", n=10, candidates=None)
# policies parameter can be:
# - None: get from config
# - explicit set, which can be empty
m1 = s.extensions["amm"]
m2 = ActiveMemoryManagerExtension(s, {p2}, register=False, start=True, interval=0.1)
m3 = ActiveMemoryManagerExtension(s, {p3}, register=False, start=True, interval=0.1)
assert not m1.running
assert m2.running
assert m3.running
futures = await c.scatter({"x": 1, "y": 2, "z": 3}, broadcast=True)
# The AMMs should run within 0.1s of the broadcast.
# Add generous extra padding to prevent flakiness.
await asyncio.sleep(0.5)
assert len(s.tasks["x"].who_has) == 2
assert len(s.tasks["y"].who_has) == 1
assert len(s.tasks["z"].who_has) == 1
@gen_cluster(client=True, config=NO_AMM)
async def test_not_registered(c, s, a, b):
futures = await c.scatter({"x": 1}, broadcast=True)
assert len(s.tasks["x"].who_has) == 2
class Policy(ActiveMemoryManagerPolicy):
def run(self):
yield "drop", s.tasks["x"], None
amm = ActiveMemoryManagerExtension(s, {Policy()}, register=False, start=False)
amm.run_once()
assert amm is not s.extensions["amm"]
while len(s.tasks["x"].who_has) > 1:
await asyncio.sleep(0.01)
def test_client_proxy_sync(client_no_amm):
c = client_no_amm
assert not c.amm.running()
c.amm.start()
assert c.amm.running()
c.amm.stop()
assert not c.amm.running()
c.amm.run_once()
@gen_cluster(client=True, config=NO_AMM)
async def test_client_proxy_async(c, s, a, b):
assert not await c.amm.running()
await c.amm.start()
assert await c.amm.running()
await c.amm.stop()
assert not await c.amm.running()
await c.amm.run_once()
@gen_cluster(client=True, config=demo_config("drop"))
async def test_drop_not_in_memory(c, s, a, b):
"""ts.who_has is empty"""
x = c.submit(slowinc, 1, key="x")
while "x" not in s.tasks:
await asyncio.sleep(0.01)
assert not x.done()
s.extensions["amm"].run_once()
assert await x == 2
@gen_cluster(client=True, config=demo_config("drop"))
async def test_drop_with_waiter(c, s, a, b):
"""Tasks with a waiter are never dropped"""
x = (await c.scatter({"x": 1}, broadcast=True))["x"]
y1 = c.submit(slowinc, x, delay=0.4, key="y1", workers=[a.address])
y2 = c.submit(slowinc, x, delay=0.8, key="y2", workers=[b.address])
for key in ("y1", "y2"):
while key not in s.tasks or s.tasks[key].state != "processing":
await asyncio.sleep(0.01)
s.extensions["amm"].run_once()
await asyncio.sleep(0.2)
assert {ws.address for ws in s.tasks["x"].who_has} == {a.address, b.address}
assert await y1 == 2
# y1 is finished so there's a worker available without a waiter
s.extensions["amm"].run_once()
while {ws.address for ws in s.tasks["x"].who_has} != {b.address}:
await asyncio.sleep(0.01)
assert not y2.done()
@gen_cluster(client=True, config=NO_AMM)
async def test_double_drop(c, s, a, b):
"""An AMM drop policy runs once to drop one of the two replicas of a key.
Then it runs again, before the recommendations from the first iteration had the time
to either be enacted or rejected, and chooses a different worker to drop from.
Test that, in this use case, the last replica of a key is never dropped.
"""
futures = await c.scatter({"x": 1}, broadcast=True)
assert len(s.tasks["x"].who_has) == 2
ws_iter = iter(s.workers.values())
class Policy(ActiveMemoryManagerPolicy):
def run(self):
yield "drop", s.tasks["x"], {next(ws_iter)}
amm = ActiveMemoryManagerExtension(s, {Policy()}, register=False, start=False)
amm.run_once()
amm.run_once()
while len(s.tasks["x"].who_has) > 1:
await asyncio.sleep(0.01)
await asyncio.sleep(0.2)
assert len(s.tasks["x"].who_has) == 1
@gen_cluster(client=True, config=demo_config("drop"))
async def test_double_drop_stress(c, s, a, b):
"""AMM runs many times before the recommendations of the first run are enacted"""
futures = await c.scatter({"x": 1}, broadcast=True)
assert len(s.tasks["x"].who_has) == 2
for _ in range(10):
s.extensions["amm"].run_once()
while len(s.tasks["x"].who_has) > 1:
await asyncio.sleep(0.01)
await asyncio.sleep(0.2)
assert len(s.tasks["x"].who_has) == 1
@gen_cluster(nthreads=[("", 1)] * 4, client=True, config=demo_config("drop", n=1))
async def test_drop_from_worker_with_least_free_memory(c, s, *workers):
ws1, ws2, ws3, ws4 = s.workers.values()
futures = await c.scatter({"x": 1}, broadcast=True)
assert s.tasks["x"].who_has == {ws1, ws2, ws3, ws4}
clog = c.submit(lambda: "x" * 100, workers=[ws3.address])
await wait(clog)
s.extensions["amm"].run_once()
while s.tasks["x"].who_has != {ws1, ws2, ws4}:
await asyncio.sleep(0.01)
@gen_cluster(
nthreads=[("", 1)] * 8,
client=True,
config=demo_config("drop", n=1, candidates=[5, 6]),
)
async def test_drop_with_candidates(c, s, *workers):
futures = await c.scatter({"x": 1}, broadcast=True)
s.extensions["amm"].run_once()
wss = list(s.workers.values())
expect1 = {wss[0], wss[1], wss[2], wss[3], wss[4], wss[6], wss[7]}
expect2 = {wss[0], wss[1], wss[2], wss[3], wss[4], wss[5], wss[7]}
while s.tasks["x"].who_has not in (expect1, expect2):
await asyncio.sleep(0.01)
@gen_cluster(client=True, config=demo_config("drop", candidates=[]))
async def test_drop_with_empty_candidates(c, s, a, b):
"""Key is not dropped as the plugin proposes an empty set of candidates,
not to be confused with None
"""
futures = await c.scatter({"x": 1}, broadcast=True)
s.extensions["amm"].run_once()
await asyncio.sleep(0.2)
assert len(s.tasks["x"].who_has) == 2
@gen_cluster(
client=True,
nthreads=[("", 1)] * 3,
config=demo_config("drop", candidates=[2]),
)
async def test_drop_from_candidates_without_key(c, s, *workers):
"""Key is not dropped as none of the candidates hold a replica"""
ws0, ws1, ws2 = s.workers.values()
x = (await c.scatter({"x": 1}, workers=[ws0.address]))["x"]
y = c.submit(inc, x, key="y", workers=[ws1.address])
await y
assert s.tasks["x"].who_has == {ws0, ws1}
s.extensions["amm"].run_once()
await asyncio.sleep(0.2)
assert s.tasks["x"].who_has == {ws0, ws1}
@gen_cluster(client=True, config=demo_config("drop", candidates=[0]))
async def test_drop_with_bad_candidates(c, s, a, b):
"""Key is not dropped as all candidates hold waiter tasks"""
ws0, ws1 = s.workers.values() # Not necessarily a, b; it could be b, a!
x = (await c.scatter({"x": 1}, broadcast=True))["x"]
y = c.submit(slowinc, x, 0.3, key="y", workers=[ws0.address])
while "y" not in s.tasks:
await asyncio.sleep(0.01)
s.extensions["amm"].run_once()
await y
assert s.tasks["x"].who_has == {ws0, ws1}
@gen_cluster(client=True, nthreads=[("", 1)] * 10, config=demo_config("drop", n=1))
async def test_drop_prefers_paused_workers(c, s, *workers):
x = await c.scatter({"x": 1}, broadcast=True)
ts = s.tasks["x"]
assert len(ts.who_has) == 10
ws = s.workers[workers[3].address]
workers[3].status = Status.paused
while ws.status != Status.paused:
await asyncio.sleep(0.01)
s.extensions["amm"].run_once()
while len(ts.who_has) != 9:
await asyncio.sleep(0.01)
assert ws not in ts.who_has
@gen_cluster(client=True, config=demo_config("drop"))
async def test_drop_with_paused_workers_with_running_tasks_1(c, s, a, b):
"""If there is exactly 1 worker that holds a replica of a task that isn't paused or
retiring, and there are 1+ paused/retiring workers with the same task, don't drop
anything.
Use case 1 (don't drop):
a is paused and with dependent tasks executing on it
b is running and has no dependent tasks
"""
lock = Lock()
async with lock:
x = (await c.scatter({"x": 1}, broadcast=True))["x"]
y = c.submit(lock_inc, x, lock=lock, key="y", workers=[a.address])
await wait_for_state("y", "executing", a)
a.status = Status.paused
while s.workers[a.address].status != Status.paused:
await asyncio.sleep(0.01)
assert a.state.tasks["y"].state == "executing"
s.extensions["amm"].run_once()
await y
assert len(s.tasks["x"].who_has) == 2
@gen_cluster(client=True, config=demo_config("drop"))
async def test_drop_with_paused_workers_with_running_tasks_2(c, s, a, b):
"""If there is exactly 1 worker that holds a replica of a task that isn't paused or
retiring, and there are 1+ paused/retiring workers with the same task, don't drop
anything.
Use case 2 (drop from a):
a is paused and has no dependent tasks
b is running and has no dependent tasks
"""
x = (await c.scatter({"x": 1}, broadcast=True))["x"]
a.status = Status.paused
while s.workers[a.address].status != Status.paused:
await asyncio.sleep(0.01)
s.extensions["amm"].run_once()
await asyncio.sleep(0.2)
assert {ws.address for ws in s.tasks["x"].who_has} == {b.address}
@pytest.mark.parametrize("pause", [True, False])
@gen_cluster(client=True, config=demo_config("drop"))
async def test_drop_with_paused_workers_with_running_tasks_3_4(c, s, a, b, pause):
"""If there is exactly 1 worker that holds a replica of a task that isn't paused or
retiring, and there are 1+ paused/retiring workers with the same task, don't drop
anything.
Use case 3 (drop from b):
a is paused and with dependent tasks executing on it
b is paused and has no dependent tasks
Use case 4 (drop from b):
a is running and with dependent tasks executing on it
b is running and has no dependent tasks
"""
lock = Lock()
async with lock:
x = (await c.scatter({"x": 1}, broadcast=True))["x"]
y = c.submit(lock_inc, x, lock, key="y", workers=[a.address])
await wait_for_state("y", "executing", a)
if pause:
a.status = Status.paused
b.status = Status.paused
while any(ws.status != Status.paused for ws in s.workers.values()):
await asyncio.sleep(0.01)
assert s.tasks["y"].state == "processing"
assert a.state.tasks["y"].state == "executing"
s.extensions["amm"].run_once()
await y
assert {ws.address for ws in s.tasks["x"].who_has} == {a.address}
@gen_cluster(client=True, nthreads=[("", 1)] * 3, config=demo_config("drop"))
async def test_drop_with_paused_workers_with_running_tasks_5(c, s, w1, w2, w3):
"""If there is exactly 1 worker that holds a replica of a task that isn't paused or
retiring, and there are 1+ paused/retiring workers with the same task, don't drop
anything.
Use case 5 (drop from w2):
w1 is paused and with dependent tasks executing on it
w2 is running and has no dependent tasks
w3 is running and with dependent tasks executing on it
"""
lock = Lock()
async with lock:
x = (await c.scatter({"x": 1}, broadcast=True))["x"]
y1 = c.submit(lock_inc, x, lock=lock, key="y1", workers=[w1.address])
y2 = c.submit(lock_inc, x, lock=lock, key="y2", workers=[w3.address])
def executing() -> bool:
return (
"y1" in w1.state.tasks
and w1.state.tasks["y1"].state == "executing"
and "y2" in w3.state.tasks
and w3.state.tasks["y2"].state == "executing"
)
while not executing():
await asyncio.sleep(0.01)
w1.status = Status.paused
while s.workers[w1.address].status != Status.paused:
await asyncio.sleep(0.01)
assert executing()
s.extensions["amm"].run_once()
while {ws.address for ws in s.tasks["x"].who_has} != {w1.address, w3.address}:
await asyncio.sleep(0.01)
assert executing()
@gen_cluster(nthreads=[("", 1)] * 4, client=True, config=demo_config("replicate", n=2))
async def test_replicate(c, s, *workers):
futures = await c.scatter({"x": 123})
assert len(s.tasks["x"].who_has) == 1
s.extensions["amm"].run_once()
while len(s.tasks["x"].who_has) < 3:
await asyncio.sleep(0.01)
await asyncio.sleep(0.2)
assert len(s.tasks["x"].who_has) == 3
s.extensions["amm"].run_once()
while len(s.tasks["x"].who_has) < 4:
await asyncio.sleep(0.01)
for w in workers:
assert w.data["x"] == 123
@gen_cluster(client=True, config=demo_config("replicate"))
async def test_replicate_not_in_memory(c, s, a, b):
"""ts.who_has is empty"""
x = c.submit(slowinc, 1, key="x")
while "x" not in s.tasks:
await asyncio.sleep(0.01)
assert not x.done()
s.extensions["amm"].run_once()
assert await x == 2
assert len(s.tasks["x"].who_has) == 1
s.extensions["amm"].run_once()
while len(s.tasks["x"].who_has) < 2:
await asyncio.sleep(0.01)
@gen_cluster(client=True, config=demo_config("replicate"))
async def test_double_replicate_stress(c, s, a, b):
"""AMM runs many times before the recommendations of the first run are enacted"""
futures = await c.scatter({"x": 1})
assert len(s.tasks["x"].who_has) == 1
for _ in range(10):
s.extensions["amm"].run_once()
while len(s.tasks["x"].who_has) < 2:
await asyncio.sleep(0.01)
@gen_cluster(nthreads=[("", 1)] * 4, client=True, config=demo_config("replicate", n=1))
async def test_replicate_to_worker_with_most_free_memory(c, s, *workers):
ws1, ws2, ws3, ws4 = s.workers.values()
x = await c.scatter({"x": 1}, workers=[ws1.address])
clogs = await c.scatter([2, 3], workers=[ws2.address, ws4.address])
assert s.tasks["x"].who_has == {ws1}
s.extensions["amm"].run_once()
while s.tasks["x"].who_has != {ws1, ws3}:
await asyncio.sleep(0.01)
@gen_cluster(
nthreads=[("", 1)] * 8,
client=True,
config=demo_config("replicate", n=1, candidates=[5, 6]),
)
async def test_replicate_with_candidates(c, s, *workers):
wss = list(s.workers.values())
futures = await c.scatter({"x": 1}, workers=[wss[0].address])
s.extensions["amm"].run_once()
expect1 = {wss[0], wss[5]}
expect2 = {wss[0], wss[6]}
while s.tasks["x"].who_has not in (expect1, expect2):
await asyncio.sleep(0.01)
@gen_cluster(client=True, config=demo_config("replicate", candidates=[]))
async def test_replicate_with_empty_candidates(c, s, a, b):
"""Key is not replicated as the plugin proposes an empty set of candidates,
not to be confused with None
"""
futures = await c.scatter({"x": 1})
s.extensions["amm"].run_once()
await asyncio.sleep(0.2)
assert len(s.tasks["x"].who_has) == 1
@gen_cluster(client=True, config=demo_config("replicate", candidates=[0]))
async def test_replicate_to_candidates_with_key(c, s, a, b):
"""Key is not replicated as all candidates already hold replicas"""
ws0, ws1 = s.workers.values() # Not necessarily a, b; it could be b, a!
futures = await c.scatter({"x": 1}, workers=[ws0.address])
s.extensions["amm"].run_once()
await asyncio.sleep(0.2)
assert s.tasks["x"].who_has == {ws0}
@gen_cluster(client=True, nthreads=[("", 1)] * 3, config=demo_config("replicate"))
async def test_replicate_avoids_paused_workers_1(c, s, w0, w1, w2):
w1.status = Status.paused
while s.workers[w1.address].status != Status.paused:
await asyncio.sleep(0.01)
futures = await c.scatter({"x": 1}, workers=[w0.address])
s.extensions["amm"].run_once()
while "x" not in w2.data:
await asyncio.sleep(0.01)
await asyncio.sleep(0.2)
assert "x" not in w1.data
@gen_cluster(client=True, config=demo_config("replicate"))
async def test_replicate_avoids_paused_workers_2(c, s, a, b):
b.status = Status.paused
while s.workers[b.address].status != Status.paused:
await asyncio.sleep(0.01)
futures = await c.scatter({"x": 1}, workers=[a.address])
s.extensions["amm"].run_once()
await asyncio.sleep(0.2)
assert "x" not in b.data
@gen_test()
async def test_bad_measure():
with dask.config.set(
{"distributed.scheduler.active-memory-manager.measure": "notexist"}
):
with pytest.raises(ValueError) as e:
await Scheduler(dashboard_address=":0")
assert "measure must be one of " in str(e.value)
@gen_cluster(
nthreads=[("", 1)] * 4,
client=True,
config={
"distributed.scheduler.active-memory-manager.start": False,
"distributed.scheduler.active-memory-manager.policies": [
{"class": "distributed.active_memory_manager.ReduceReplicas"},
# Run two instances of the plugin in sequence, to emulate multiple plugins
# that issues drop suggestions for the same keys
{"class": "distributed.active_memory_manager.ReduceReplicas"},
],
},
)
async def test_ReduceReplicas(c, s, *workers):
# Logging is quiet if there are no suggestions
with assert_amm_log(
[
"Running policy: ReduceReplicas()",
"Running policy: ReduceReplicas()",
"Active Memory Manager run in ",
],
):
s.extensions["amm"].run_once()
futures = await c.scatter({"x": 123}, broadcast=True)
assert len(s.tasks["x"].who_has) == 4
with assert_amm_log(
[
"Running policy: ReduceReplicas()",
"(drop, <TaskState 'x' memory>, None): dropping from <WorkerState ",
"(drop, <TaskState 'x' memory>, None): dropping from <WorkerState ",
"(drop, <TaskState 'x' memory>, None): dropping from <WorkerState ",
"ReduceReplicas: Dropping 3 superfluous replicas of 1 tasks",
"Running policy: ReduceReplicas()",
"Enacting suggestions for 1 tasks:",
"- <WorkerState ",
"- <WorkerState ",
"- <WorkerState ",
"Active Memory Manager run in ",
],
):
s.extensions["amm"].run_once()
await async_poll_for(lambda: len(s.tasks["x"].who_has) == 1, timeout=5)
@pytest.mark.parametrize(
"nwaiters_w1,nwaiters_w2,nwaiters_nonproc,nreplicas",
[
(0, 0, 0, 1),
(1, 0, 0, 1),
(0, 0, 1, 1),
(2, 0, 0, 1),
(1, 1, 0, 2),
(1, 1, 1, 3),
(1, 1, 2, 4),
(2, 1, 1, 3),
(1, 1, 17, 4),
(17, 1, 1, 3),
# Fast code path: if there are 20+ waiters, don't check processing_on
(18, 1, 1, 4),
],
)
@gen_cluster(
nthreads=[("", 1)] * 4,
client=True,
config={
"distributed.scheduler.active-memory-manager.start": False,
"distributed.scheduler.active-memory-manager.policies": [
{"class": "distributed.active_memory_manager.ReduceReplicas"},
],
},
)
async def test_ReduceReplicas_with_waiters(
c, s, w1, w2, w3, w4, nwaiters_w1, nwaiters_w2, nwaiters_nonproc, nreplicas
):
"""If there are waiters, even if they are not processing on a worker yet, preserve
extra replicas.
If there are between 2 and 19 waiters, consider which workers they've been assigned
to and don't double count waiters that have been assigned to the same worker.
"""
ev = Event()
x = (await c.scatter({"x": 123}, broadcast=True))["x"]
y = c.submit(lambda ev: ev.wait(), ev, key="y", workers=[w4.address])
waiters_w1 = [
c.submit(lambda x, ev: ev.wait(), x, ev, key=("zw1", i), workers=[w1.address])
for i in range(nwaiters_w1)
]
waiters_w2 = [
c.submit(lambda x, ev: ev.wait(), x, ev, key=("zw2", i), workers=[w2.address])
for i in range(nwaiters_w2)
]
waiters_nonproc = [
c.submit(lambda x, y: None, x, y, key=("znp", i))
for i in range(nwaiters_nonproc)
]
nwaiters = nwaiters_w1 + nwaiters_w2 + nwaiters_nonproc
await async_poll_for(lambda: len(s.tasks) == nwaiters + 2, timeout=5)
for fut in waiters_w1:
assert s.tasks[fut.key].processing_on == s.workers[w1.address]
for fut in waiters_w2:
assert s.tasks[fut.key].processing_on == s.workers[w2.address]
for fut in waiters_nonproc:
assert s.tasks[fut.key].processing_on is None
s.extensions["amm"].run_once()
await asyncio.sleep(0.2) # Test no excessive drops
await async_poll_for(lambda: len(s.tasks["x"].who_has) == nreplicas, timeout=5)
await ev.set()
@pytest.mark.parametrize("start_amm", [False, True])
@gen_cluster(client=True)
async def test_RetireWorker_amm_on_off(c, s, a, b, start_amm):
"""retire_workers must work both with and without the AMM started"""
if start_amm:
await c.amm.start()
else:
await c.amm.stop()
futures = await c.scatter({"x": 1}, workers=[a.address])
await c.retire_workers([a.address])
assert a.address not in s.workers
assert "x" in b.data
@gen_cluster(
client=True,
scheduler_kwargs={"extensions": {}},
worker_kwargs={"extensions": {}},
)
async def test_RetireWorker_no_extension(c, s, a, b):
"""retire_workers must work when the AMM extension is not loaded"""
futures = await c.scatter({"x": 1}, workers=[a.address])
await c.retire_workers([a.address])
assert a.address not in s.workers
assert "x" in b.data
@gen_cluster(
client=True,
config={
"distributed.scheduler.active-memory-manager.start": True,
"distributed.scheduler.active-memory-manager.interval": 0.1,
"distributed.scheduler.active-memory-manager.policies": [],
},
)
async def test_RetireWorker_no_remove(c, s, a, b):
"""Test RetireWorker behaviour on retire_workers(..., remove=False)"""
x = await c.scatter({"x": "x"}, workers=[a.address])
await c.retire_workers([a.address], close_workers=False, remove=False)
# Wait 2 AMM iterations
# retire_workers may return before all keys have been dropped from a
while s.tasks["x"].who_has != {s.workers[b.address]}:
await asyncio.sleep(0.01)
assert a.address in s.workers
assert a.status == Status.closing_gracefully
assert s.workers[a.address].status == Status.closing_gracefully
# Policy has been removed without waiting for worker to disappear from
# Scheduler.workers
assert not s.extensions["amm"].policies
@pytest.mark.parametrize("use_ReduceReplicas", [False, True])
@gen_cluster(
client=True,
config={
"distributed.scheduler.active-memory-manager.start": True,
"distributed.scheduler.active-memory-manager.interval": 0.1,
"distributed.scheduler.active-memory-manager.measure": "managed",
"distributed.scheduler.active-memory-manager.policies": [
{"class": "distributed.active_memory_manager.ReduceReplicas"},
],
},
)
async def test_RetireWorker_with_ReduceReplicas(c, s, *workers, use_ReduceReplicas):
"""RetireWorker and ReduceReplicas work well with each other.
If ReduceReplicas is enabled,
1. On the first AMM iteration, either ReduceReplicas or RetireWorker (arbitrarily
depending on which comes first in the iteration of
ActiveMemoryManagerExtension.policies) deletes non-unique keys, choosing from
workers to be retired first. At the same time, RetireWorker replicates unique
keys.
2. On the second AMM iteration, either ReduceReplicas or RetireWorker deletes the
keys replicated at the previous round from the worker to be retired.
If ReduceReplicas is not enabled, all drops are performed by RetireWorker.
This test fundamentally relies on workers in the process of being retired to be
always picked first by ActiveMemoryManagerExtension._find_dropper.
"""
ws_a, ws_b = s.workers.values()
if not use_ReduceReplicas:
s.extensions["amm"].policies.clear()
x = c.submit(lambda: "x", key="x", workers=[ws_a.address])
y = c.submit(lambda: "y", key="y", workers=[ws_a.address])
z = c.submit(lambda x: None, x, key="z", workers=[ws_b.address]) # copy x to ws_b
# Make sure that the worker NOT being retired has the most RAM usage to test that
# it is not being picked first since there's a retiring worker.
w = c.submit(lambda: "w" * 100, key="w", workers=[ws_b.address])
await wait([x, y, z, w])
await c.retire_workers([ws_a.address], remove=False)
# retire_workers may return before all keys have been dropped from a
while ws_a.has_what:
await asyncio.sleep(0.01)
assert {ts.key for ts in ws_b.has_what} == {"x", "y", "z", "w"}
@gen_cluster(client=True, nthreads=[("", 1)] * 3, config=NO_AMM)
async def test_RetireWorker_all_replicas_are_being_retired(c, s, w1, w2, w3):
"""There are multiple replicas of a key, but they all reside on workers that are
being retired
"""
ws1 = s.workers[w1.address]
ws2 = s.workers[w2.address]
ws3 = s.workers[w3.address]
fut = await c.scatter({"x": "x"}, workers=[w1.address, w2.address], broadcast=True)
assert s.tasks["x"].who_has == {ws1, ws2}
await c.retire_workers([w1.address, w2.address])
assert s.tasks["x"].who_has == {ws3}
@gen_cluster(
client=True,
nthreads=[("", 1)] * 4,
config={
"distributed.scheduler.active-memory-manager.start": True,
# test that we're having a manual amm.run_once() "kick" from retire_workers
"distributed.scheduler.active-memory-manager.interval": 999,
"distributed.scheduler.active-memory-manager.policies": [],
},
)
async def test_RetireWorker_no_recipients(c, s, w1, w2, w3, w4):
"""All workers are retired at once.
Test use cases:
1. (w1) worker contains no data -> it is retired
2. (w2) worker contains unique data -> it is not retired
3. (w3, w4) worker contains non-unique data, but all replicas are on workers that
are being retired -> all but one are retired
"""
x = await c.scatter({"x": "x"}, workers=[w2.address])
y = await c.scatter({"y": "y"}, workers=[w3.address, w4.address], broadcast=True)
out = await c.retire_workers([w1.address, w2.address, w3.address, w4.address])
assert set(out) in ({w1.address, w3.address}, {w1.address, w4.address})
assert not s.extensions["amm"].policies
assert set(s.workers) in ({w2.address, w3.address}, {w2.address, w4.address})
# After a Scheduler -> Worker -> Scheduler roundtrip, workers that failed to retire
# went back from closing_gracefully to running and can run tasks
while any(ws.status != Status.running for ws in s.workers.values()):
await asyncio.sleep(0.01)
assert await c.submit(inc, 1) == 2
@gen_cluster(
client=True,
config={
"distributed.scheduler.active-memory-manager.start": True,
"distributed.scheduler.active-memory-manager.interval": 999,
"distributed.scheduler.active-memory-manager.policies": [],
"distributed.worker.memory.pause": False,
},
)
async def test_RetireWorker_all_recipients_are_paused(c, s, a, b):
ws_a = s.workers[a.address]
ws_b = s.workers[b.address]
b.status = Status.paused
while ws_b.status != Status.paused:
await asyncio.sleep(0.01)
x = await c.scatter("x", workers=[a.address])
out = await c.retire_workers([a.address])
assert not out
assert not s.extensions["amm"].policies
assert set(s.workers) == {a.address, b.address}
# After a Scheduler -> Worker -> Scheduler roundtrip, workers that failed to
# retire went back from closing_gracefully to running and can run tasks
while ws_a.status != Status.running:
await asyncio.sleep(0.01)
assert await c.submit(inc, 1) == 2
@gen_cluster(
client=True,
config={
# Don't use one-off AMM instance
"distributed.scheduler.active-memory-manager.start": True,
"distributed.scheduler.active-memory-manager.policies": [],
},
)
async def test_RetireWorker_new_keys_arrive_after_all_keys_moved_away(c, s, a, b):
"""
If all keys have been moved off a worker, but then new keys arrive (due to task
completion or `gather_dep`) before the worker has actually closed, make sure we
still retire it (instead of hanging forever).
This test is timing-sensitive. If it runs too slowly, it *should* `pytest.skip`
itself.
See https://github.com/dask/distributed/issues/6223 for motivation.
"""
ws_a = s.workers[a.address]
ws_b = s.workers[b.address]
event = Event()
# Put 200 keys on the worker, so `_track_retire_worker` will sleep for 0.5s
xs = c.map(lambda x: x, range(200), workers=[a.address])
await wait(xs)
# Put an extra task on the worker, which we will allow to complete once the `xs`
# have been replicated.
extra = c.submit(
lambda: event.wait("2s"),
workers=[a.address],
allow_other_workers=True,
key="extra",
)
await wait_for_state(extra.key, "executing", a)
t = asyncio.create_task(c.retire_workers([a.address]))
amm = s.extensions["amm"]
while not amm.policies:
await asyncio.sleep(0)
policy = next(iter(amm.policies))
assert isinstance(policy, RetireWorker)
# Wait for all `xs` to be replicated.
while len(ws_b.has_what) != len(xs):
await asyncio.sleep(0)
# `_track_retire_worker` _should_ now be sleeping for 0.5s, because there were >=200
# keys on A. In this test, everything from the beginning of the transfers needs to
# happen within 0.5s.
# Simulate waiting for the policy to run again.
# Note that the interval at which the policy runs is inconsequential for this test.
amm.run_once()
# The policy has removed itself, because all `xs` have been replicated.
assert not amm.policies
assert policy.done(), {ts.key: ts.who_has for ts in ws_a.has_what}
# But what if a new key arrives now while `_track_retire_worker` is still (maybe)
# sleeping? Let `extra` complete and wait for it to hit the scheduler.
await event.set()
await wait(extra)
if a.address not in s.workers:
# It took more than 0.5s to get here, and the scheduler closed our worker. Dang.
pytest.xfail(
"Timing didn't work out: `_track_retire_worker` finished before "
"`extra` completed."
)
# `retire_workers` doesn't hang
await t
assert a.address not in s.workers
assert not amm.policies
# `extra` was not transferred from `a` to `b`. Instead, it was recomputed on `b`.
story = b.state.story(extra.key)
assert_story(
story,
[
(extra.key, "compute-task", "released"),
(extra.key, "released", "waiting", "waiting", {"extra": "ready"}),
(extra.key, "waiting", "ready", "ready", {"extra": "executing"}),
],
)
# `extra` completes successfully and is fetched from the other worker.
await extra.result()
@gen_cluster(
client=True,
config={
"distributed.scheduler.active-memory-manager.start": True,
"distributed.scheduler.active-memory-manager.interval": 0.05,
"distributed.scheduler.active-memory-manager.measure": "managed",
"distributed.scheduler.active-memory-manager.policies": [],
},
)
async def test_RetireWorker_faulty_recipient(c, s, w1, w2):
"""RetireWorker requests to replicate a key onto an unresponsive worker.
The AMM will iterate multiple times, repeating the command, until eventually the
scheduler declares the worker dead and removes it from the pool; at that point the
AMM will choose another valid worker and complete the job.
"""
# w1 is being retired
# w3 has the lowest RAM usage and is chosen as a recipient, but is unresponsive
x = c.submit(lambda: 123, key="x", workers=[w1.address])
await wait(x)
# Fill w2 with dummy data so that it's got the highest memory usage
# among the workers that are not being retired (w2 and w3).
clutter = await c.scatter(456, workers=[w2.address])
async with BlockedGatherDep(s.address) as w3:
await c.wait_for_workers(3)
retire_fut = asyncio.create_task(c.retire_workers([w1.address]))
# w3 is chosen as the recipient for x, because it's got the lowest memory usage
await w3.in_gather_dep.wait()
# AMM unfruitfully sends to w3 a new {op: acquire-replicas} message every 0.05s
while (
sum(isinstance(ev, AcquireReplicasEvent) for ev in w3.state.stimulus_log)
< 3
):
await asyncio.sleep(0.01)
assert not retire_fut.done()
# w3 has been shut down. At this point, AMM switches to w2.
await retire_fut
assert w1.address not in s.workers
assert w3.address not in s.workers
assert dict(w2.data) == {"x": 123, clutter.key: 456}
@gen_cluster(
client=True,
nthreads=[("", 1)] * 10,
config={
"distributed.scheduler.active-memory-manager.start": True,
"distributed.scheduler.active-memory-manager.interval": 0.05,
"distributed.scheduler.active-memory-manager.measure": "managed",
"distributed.scheduler.active-memory-manager.policies": [],
},
)
async def test_RetireWorker_mass(c, s, *workers):
"""Retire 90% of a cluster at once."""
# Note: by using scatter instead of submit/map, we're also testing that tasks
# aren't being recomputed
data = await c.scatter(range(100))
for w in workers:
assert len(w.data) == 10
await c.retire_workers([w.address for w in workers[:-1]])
assert set(s.workers) == {workers[-1].address}
assert len(workers[-1].data) == 100
@gen_cluster(
client=True,
config={
"distributed.scheduler.active-memory-manager.start": True,
"distributed.scheduler.active-memory-manager.interval": 0.05,
"distributed.scheduler.active-memory-manager.measure": "managed",
"distributed.scheduler.active-memory-manager.policies": [],
},
)
async def test_RetireWorker_incremental(c, s, w2, w3):
"""Retire worker w1; this causes its keys to be replicated onto w2.
Before that can happen, retire w2 too.
"""
async with BlockedGetData(s.address) as w1:
# Note: by using scatter instead of submit/map, we're also testing that tasks
# aren't being recomputed
x = await c.scatter({"x": 1}, workers=[w1.address])
y = await c.scatter({"y": 2}, workers=[w3.address])
# Because w2's memory is lower than w3, AMM will choose w2
retire1 = asyncio.create_task(c.retire_workers([w1.address]))
await w1.in_get_data.wait()
assert w2.state.tasks["x"].state == "flight"
await c.retire_workers([w2.address])
w1.block_get_data.set()
await retire1
assert set(s.workers) == {w3.address}
assert set(w3.data) == {"x", "y"}
| DemoPolicy |
python | huggingface__transformers | src/transformers/models/sam2_video/modeling_sam2_video.py | {
"start": 39160,
"end": 41628
} | class ____(nn.Module):
def __init__(self, config: Sam2VideoConfig):
super().__init__()
self.layers = nn.ModuleList(
[Sam2VideoMemoryAttentionLayer(config) for _ in range(config.memory_attention_num_layers)]
)
self.layer_norm = nn.LayerNorm(config.memory_attention_hidden_size)
self.rotary_emb = Sam2VideoVisionRotaryEmbedding(config=config)
def forward(
self,
current_vision_features: torch.Tensor,
memory: torch.Tensor,
current_vision_position_embeddings: Optional[Tensor] = None,
memory_posision_embeddings: Optional[Tensor] = None,
num_object_pointer_tokens: int = 0,
):
"""
Args:
current_vision_features (`torch.FloatTensor`):
The current vision features used for self-attention.
memory (`torch.FloatTensor`):
The memory features used for cross-attention.
current_vision_position_embeddings (`torch.FloatTensor`, *optional*):
The position embeddings for the current vision features.
memory_posision_embeddings (`torch.FloatTensor`, *optional*):
The position embeddings for the memory features.
num_object_pointer_tokens (`int`, *optional*, defaults to 0):
The number of object pointer tokens.
"""
output = current_vision_features
if current_vision_position_embeddings is not None:
output = output + 0.1 * current_vision_position_embeddings
# Convert to batch first
output = output.transpose(0, 1)
memory = memory.transpose(0, 1).unsqueeze(1)
memory_posision_embeddings = memory_posision_embeddings.transpose(0, 1).unsqueeze(1)
rope_position_embeddings = self.rotary_emb()
for layer in self.layers:
output = layer(
queries=output.unsqueeze(1) if output.ndim == 3 else output,
keys=memory,
key_point_embedding=memory_posision_embeddings,
rope_position_embeddings=rope_position_embeddings,
num_k_exclude_rope=num_object_pointer_tokens,
)
normed_output = self.layer_norm(output)
# Convert back to seq first
normed_output = normed_output.transpose(0, 1)
return normed_output
# Lightly adapted from ConvNext (https://github.com/facebookresearch/ConvNeXt)
| Sam2VideoMemoryAttention |
python | pydantic__pydantic | pydantic-core/tests/validators/test_dataclasses.py | {
"start": 6934,
"end": 8250
} | class ____:
a: str
b: bool
def test_dataclass():
schema = core_schema.dataclass_schema(
FooDataclass,
core_schema.dataclass_args_schema(
'FooDataclass',
[
core_schema.dataclass_field(name='a', schema=core_schema.str_schema()),
core_schema.dataclass_field(name='b', schema=core_schema.bool_schema()),
],
),
['a', 'b'],
)
v = SchemaValidator(schema)
foo = v.validate_python({'a': 'hello', 'b': True})
assert dataclasses.is_dataclass(foo)
assert foo.a == 'hello'
assert foo.b is True
assert dataclasses.asdict(v.validate_python(FooDataclass(a='hello', b=True))) == {'a': 'hello', 'b': True}
with pytest.raises(ValidationError, match='Input should be an instance of FooDataclass') as exc_info:
v.validate_python({'a': 'hello', 'b': True}, strict=True)
# insert_assert(exc_info.value.errors(include_url=False))
assert exc_info.value.errors(include_url=False) == [
{
'type': 'dataclass_exact_type',
'loc': (),
'msg': 'Input should be an instance of FooDataclass',
'input': {'a': 'hello', 'b': True},
'ctx': {'class_name': 'FooDataclass'},
}
]
@dataclasses.dataclass
| FooDataclass |
python | allegroai__clearml | clearml/utilities/enum.py | {
"start": 840,
"end": 1093
} | class ____(object):
"""Base class for an Options class which allow getting all class properties as a key/value mapping"""
@classmethod
def _all(cls) -> Dict[str, Any]:
return {k: v for k, v in vars(cls) if not k.startswith("_")}
| Options |
python | plotly__plotly.py | plotly/graph_objs/scattermapbox/_hoverlabel.py | {
"start": 233,
"end": 11283
} | class ____(_BaseTraceHierarchyType):
_parent_path_str = "scattermapbox"
_path_str = "scattermapbox.hoverlabel"
_valid_props = {
"align",
"alignsrc",
"bgcolor",
"bgcolorsrc",
"bordercolor",
"bordercolorsrc",
"font",
"namelength",
"namelengthsrc",
"showarrow",
}
@property
def align(self):
"""
Sets the horizontal alignment of the text content within hover
label box. Has an effect only if the hover label text spans
more two or more lines
The 'align' property is an enumeration that may be specified as:
- One of the following enumeration values:
['left', 'right', 'auto']
- A tuple, list, or one-dimensional numpy array of the above
Returns
-------
Any|numpy.ndarray
"""
return self["align"]
@align.setter
def align(self, val):
self["align"] = val
@property
def alignsrc(self):
"""
Sets the source reference on Chart Studio Cloud for `align`.
The 'alignsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["alignsrc"]
@alignsrc.setter
def alignsrc(self, val):
self["alignsrc"] = val
@property
def bgcolor(self):
"""
Sets the background color of the hover labels for this trace
The 'bgcolor' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color: see https://plotly.com/python/css-colors/ for a list
- A list or array of any of the above
Returns
-------
str|numpy.ndarray
"""
return self["bgcolor"]
@bgcolor.setter
def bgcolor(self, val):
self["bgcolor"] = val
@property
def bgcolorsrc(self):
"""
Sets the source reference on Chart Studio Cloud for `bgcolor`.
The 'bgcolorsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["bgcolorsrc"]
@bgcolorsrc.setter
def bgcolorsrc(self, val):
self["bgcolorsrc"] = val
@property
def bordercolor(self):
"""
Sets the border color of the hover labels for this trace.
The 'bordercolor' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color: see https://plotly.com/python/css-colors/ for a list
- A list or array of any of the above
Returns
-------
str|numpy.ndarray
"""
return self["bordercolor"]
@bordercolor.setter
def bordercolor(self, val):
self["bordercolor"] = val
@property
def bordercolorsrc(self):
"""
Sets the source reference on Chart Studio Cloud for
`bordercolor`.
The 'bordercolorsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["bordercolorsrc"]
@bordercolorsrc.setter
def bordercolorsrc(self, val):
self["bordercolorsrc"] = val
@property
def font(self):
"""
Sets the font used in hover labels.
The 'font' property is an instance of Font
that may be specified as:
- An instance of :class:`plotly.graph_objs.scattermapbox.hoverlabel.Font`
- A dict of string/value properties that will be passed
to the Font constructor
Returns
-------
plotly.graph_objs.scattermapbox.hoverlabel.Font
"""
return self["font"]
@font.setter
def font(self, val):
self["font"] = val
@property
def namelength(self):
"""
Sets the default length (in number of characters) of the trace
name in the hover labels for all traces. -1 shows the whole
name regardless of length. 0-3 shows the first 0-3 characters,
and an integer >3 will show the whole name if it is less than
that many characters, but if it is longer, will truncate to
`namelength - 3` characters and add an ellipsis.
The 'namelength' property is a integer and may be specified as:
- An int (or float that will be cast to an int)
in the interval [-1, 9223372036854775807]
- A tuple, list, or one-dimensional numpy array of the above
Returns
-------
int|numpy.ndarray
"""
return self["namelength"]
@namelength.setter
def namelength(self, val):
self["namelength"] = val
@property
def namelengthsrc(self):
"""
Sets the source reference on Chart Studio Cloud for
`namelength`.
The 'namelengthsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["namelengthsrc"]
@namelengthsrc.setter
def namelengthsrc(self, val):
self["namelengthsrc"] = val
@property
def showarrow(self):
"""
Sets whether or not to show the hover label arrow/triangle
pointing to the data point.
The 'showarrow' property must be specified as a bool
(either True, or False)
Returns
-------
bool
"""
return self["showarrow"]
@showarrow.setter
def showarrow(self, val):
self["showarrow"] = val
@property
def _prop_descriptions(self):
return """\
align
Sets the horizontal alignment of the text content
within hover label box. Has an effect only if the hover
label text spans more two or more lines
alignsrc
Sets the source reference on Chart Studio Cloud for
`align`.
bgcolor
Sets the background color of the hover labels for this
trace
bgcolorsrc
Sets the source reference on Chart Studio Cloud for
`bgcolor`.
bordercolor
Sets the border color of the hover labels for this
trace.
bordercolorsrc
Sets the source reference on Chart Studio Cloud for
`bordercolor`.
font
Sets the font used in hover labels.
namelength
Sets the default length (in number of characters) of
the trace name in the hover labels for all traces. -1
shows the whole name regardless of length. 0-3 shows
the first 0-3 characters, and an integer >3 will show
the whole name if it is less than that many characters,
but if it is longer, will truncate to `namelength - 3`
characters and add an ellipsis.
namelengthsrc
Sets the source reference on Chart Studio Cloud for
`namelength`.
showarrow
Sets whether or not to show the hover label
arrow/triangle pointing to the data point.
"""
def __init__(
self,
arg=None,
align=None,
alignsrc=None,
bgcolor=None,
bgcolorsrc=None,
bordercolor=None,
bordercolorsrc=None,
font=None,
namelength=None,
namelengthsrc=None,
showarrow=None,
**kwargs,
):
"""
Construct a new Hoverlabel object
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of
:class:`plotly.graph_objs.scattermapbox.Hoverlabel`
align
Sets the horizontal alignment of the text content
within hover label box. Has an effect only if the hover
label text spans more two or more lines
alignsrc
Sets the source reference on Chart Studio Cloud for
`align`.
bgcolor
Sets the background color of the hover labels for this
trace
bgcolorsrc
Sets the source reference on Chart Studio Cloud for
`bgcolor`.
bordercolor
Sets the border color of the hover labels for this
trace.
bordercolorsrc
Sets the source reference on Chart Studio Cloud for
`bordercolor`.
font
Sets the font used in hover labels.
namelength
Sets the default length (in number of characters) of
the trace name in the hover labels for all traces. -1
shows the whole name regardless of length. 0-3 shows
the first 0-3 characters, and an integer >3 will show
the whole name if it is less than that many characters,
but if it is longer, will truncate to `namelength - 3`
characters and add an ellipsis.
namelengthsrc
Sets the source reference on Chart Studio Cloud for
`namelength`.
showarrow
Sets whether or not to show the hover label
arrow/triangle pointing to the data point.
Returns
-------
Hoverlabel
"""
super().__init__("hoverlabel")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError("""\
The first argument to the plotly.graph_objs.scattermapbox.Hoverlabel
constructor must be a dict or
an instance of :class:`plotly.graph_objs.scattermapbox.Hoverlabel`""")
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
self._set_property("align", arg, align)
self._set_property("alignsrc", arg, alignsrc)
self._set_property("bgcolor", arg, bgcolor)
self._set_property("bgcolorsrc", arg, bgcolorsrc)
self._set_property("bordercolor", arg, bordercolor)
self._set_property("bordercolorsrc", arg, bordercolorsrc)
self._set_property("font", arg, font)
self._set_property("namelength", arg, namelength)
self._set_property("namelengthsrc", arg, namelengthsrc)
self._set_property("showarrow", arg, showarrow)
self._process_kwargs(**dict(arg, **kwargs))
self._skip_invalid = False
| Hoverlabel |
python | langchain-ai__langchain | libs/core/langchain_core/runnables/graph.py | {
"start": 3107,
"end": 3372
} | class ____(NamedTuple):
"""Branch in a graph."""
condition: Callable[..., str]
"""A callable that returns a string representation of the condition."""
ends: dict[str, str] | None
"""Optional dictionary of end node IDs for the branches. """
| Branch |
python | pallets__jinja | src/jinja2/loaders.py | {
"start": 16204,
"end": 17490
} | class ____(BaseLoader):
"""A loader that is passed a function which does the loading. The
function receives the name of the template and has to return either
a string with the template source, a tuple in the form ``(source,
filename, uptodatefunc)`` or `None` if the template does not exist.
>>> def load_template(name):
... if name == 'index.html':
... return '...'
...
>>> loader = FunctionLoader(load_template)
The `uptodatefunc` is a function that is called if autoreload is enabled
and has to return `True` if the template is still up to date. For more
details have a look at :meth:`BaseLoader.get_source` which has the same
return value.
"""
def __init__(
self,
load_func: t.Callable[
[str],
str | tuple[str, str | None, t.Callable[[], bool] | None] | None,
],
) -> None:
self.load_func = load_func
def get_source(
self, environment: "Environment", template: str
) -> tuple[str, str | None, t.Callable[[], bool] | None]:
rv = self.load_func(template)
if rv is None:
raise TemplateNotFound(template)
if isinstance(rv, str):
return rv, None, None
return rv
| FunctionLoader |
python | wandb__wandb | wandb/sdk/data_types/helper_types/image_mask.py | {
"start": 362,
"end": 8878
} | class ____(Media):
"""Format image masks or overlays for logging to W&B.
Args:
val: (dictionary)
One of these two keys to represent the image:
mask_data : (2D numpy array) The mask containing an integer class label
for each pixel in the image
path : (string) The path to a saved image file of the mask
class_labels : (dictionary of integers to strings, optional) A mapping of the
integer class labels in the mask to readable class names. These will default
to class_0, class_1, class_2, etc.
key: (string)
The readable name or id for this mask type (e.g. predictions, ground_truth)
Examples:
### Logging a single masked image
```python
import numpy as np
import wandb
run = wandb.init()
image = np.random.randint(low=0, high=256, size=(100, 100, 3), dtype=np.uint8)
predicted_mask = np.empty((100, 100), dtype=np.uint8)
ground_truth_mask = np.empty((100, 100), dtype=np.uint8)
predicted_mask[:50, :50] = 0
predicted_mask[50:, :50] = 1
predicted_mask[:50, 50:] = 2
predicted_mask[50:, 50:] = 3
ground_truth_mask[:25, :25] = 0
ground_truth_mask[25:, :25] = 1
ground_truth_mask[:25, 25:] = 2
ground_truth_mask[25:, 25:] = 3
class_labels = {0: "person", 1: "tree", 2: "car", 3: "road"}
masked_image = wandb.Image(
image,
masks={
"predictions": {
"mask_data": predicted_mask,
"class_labels": class_labels,
},
"ground_truth": {
"mask_data": ground_truth_mask,
"class_labels": class_labels,
},
},
)
run.log({"img_with_masks": masked_image})
```
### Log a masked image inside a Table
```python
import numpy as np
import wandb
run = wandb.init()
image = np.random.randint(low=0, high=256, size=(100, 100, 3), dtype=np.uint8)
predicted_mask = np.empty((100, 100), dtype=np.uint8)
ground_truth_mask = np.empty((100, 100), dtype=np.uint8)
predicted_mask[:50, :50] = 0
predicted_mask[50:, :50] = 1
predicted_mask[:50, 50:] = 2
predicted_mask[50:, 50:] = 3
ground_truth_mask[:25, :25] = 0
ground_truth_mask[25:, :25] = 1
ground_truth_mask[:25, 25:] = 2
ground_truth_mask[25:, 25:] = 3
class_labels = {0: "person", 1: "tree", 2: "car", 3: "road"}
class_set = wandb.Classes(
[
{"name": "person", "id": 0},
{"name": "tree", "id": 1},
{"name": "car", "id": 2},
{"name": "road", "id": 3},
]
)
masked_image = wandb.Image(
image,
masks={
"predictions": {
"mask_data": predicted_mask,
"class_labels": class_labels,
},
"ground_truth": {
"mask_data": ground_truth_mask,
"class_labels": class_labels,
},
},
classes=class_set,
)
table = wandb.Table(columns=["image"])
table.add_data(masked_image)
run.log({"random_field": table})
```
"""
_log_type = "mask"
def __init__(self, val: dict, key: str) -> None:
"""Initialize an ImageMask object.
Args:
val: (dictionary) One of these two keys to represent the image:
mask_data : (2D numpy array) The mask containing an integer class label
for each pixel in the image
path : (string) The path to a saved image file of the mask
class_labels : (dictionary of integers to strings, optional) A mapping
of the integer class labels in the mask to readable class names.
These will default to class_0, class_1, class_2, etc.
key: (string)
The readable name or id for this mask type (e.g. predictions, ground_truth)
"""
super().__init__()
if "path" in val:
self._set_file(val["path"])
else:
np = util.get_module("numpy", required="Image mask support requires numpy")
if util.is_pytorch_tensor_typename(
util.get_full_typename(val["mask_data"])
):
val["mask_data"] = val["mask_data"].cpu().numpy()
# Add default class mapping
if "class_labels" not in val:
classes = np.unique(val["mask_data"]).astype(np.int32).tolist()
class_labels = {c: "class_" + str(c) for c in classes}
val["class_labels"] = class_labels
self.validate(val)
self._val = val
self._key = key
ext = "." + self.type_name() + ".png"
tmp_path = os.path.join(MEDIA_TMP.name, runid.generate_id() + ext)
pil_image = util.get_module(
"PIL.Image",
required='wandb.Image needs the PIL package. To get it, run "pip install pillow".',
)
image = pil_image.fromarray(val["mask_data"].astype(np.int8)).convert("L")
image.save(tmp_path, transparency=None)
self._set_file(tmp_path, is_tmp=True, extension=ext)
def bind_to_run(
self,
run: "LocalRun",
key: Union[int, str],
step: Union[int, str],
id_: Optional[Union[int, str]] = None,
ignore_copy_err: Optional[bool] = None,
) -> None:
# bind_to_run key argument is the Image parent key
# the self._key value is the mask's sub key
super().bind_to_run(run, key, step, id_=id_, ignore_copy_err=ignore_copy_err)
if hasattr(self, "_val") and "class_labels" in self._val:
class_labels = self._val["class_labels"]
run._add_singleton(
"mask/class_labels",
str(key) + "_wandb_delimeter_" + self._key,
class_labels,
)
@classmethod
def get_media_subdir(cls: Type["ImageMask"]) -> str:
return os.path.join("media", "images", cls.type_name())
@classmethod
def from_json(
cls: Type["ImageMask"], json_obj: dict, source_artifact: "Artifact"
) -> "ImageMask":
return cls(
{"path": source_artifact.get_entry(json_obj["path"]).download()},
key="",
)
def to_json(self, run_or_artifact: Union["LocalRun", "Artifact"]) -> dict:
json_dict = super().to_json(run_or_artifact)
if isinstance(run_or_artifact, wandb.Run):
json_dict["_type"] = self.type_name()
return json_dict
elif isinstance(run_or_artifact, wandb.Artifact):
# Nothing special to add (used to add "digest", but no longer used.)
return json_dict
else:
raise TypeError("to_json accepts wandb_run.Run or wandb.Artifact")
@classmethod
def type_name(cls: Type["ImageMask"]) -> str:
return cls._log_type
def validate(self, val: dict) -> bool:
np = util.get_module("numpy", required="Image mask support requires numpy")
# 2D Make this work with all tensor(like) types
if "mask_data" not in val:
raise TypeError(
'Missing key "mask_data": An image mask requires mask data: a 2D array representing the predictions'
)
else:
error_str = "mask_data must be a 2D array"
shape = val["mask_data"].shape
if len(shape) != 2:
raise TypeError(error_str)
if not (
(val["mask_data"] >= 0).all() and (val["mask_data"] <= 255).all()
) and issubclass(val["mask_data"].dtype.type, np.integer):
raise TypeError("Mask data must be integers between 0 and 255")
# Optional argument
if "class_labels" in val:
for k, v in list(val["class_labels"].items()):
if (not isinstance(k, numbers.Number)) or (not isinstance(v, str)):
raise TypeError(
"Class labels must be a dictionary of numbers to strings"
)
return True
| ImageMask |
python | getsentry__sentry | src/sentry/workflow_engine/migrations/0068_migrate_anomaly_detection_alerts.py | {
"start": 2833,
"end": 3651
} | class ____(StrEnum):
"""
SentryAppIdentifier is an enum that represents the identifier for a Sentry app.
"""
SENTRY_APP_INSTALLATION_UUID = "sentry_app_installation_uuid"
SENTRY_APP_SLUG = "sentry_app_slug"
SENTRY_APP_ID = "sentry_app_id"
FIELDS_TO_DETECTOR_FIELDS = {
"name": "name",
"description": "description",
"user_id": "owner_user_id",
"team_id": "owner_team_id",
}
TYPE_TO_PROVIDER = {
0: "email",
1: "pagerduty",
2: "slack",
3: "msteams",
4: "sentry_app",
6: "opsgenie",
7: "discord",
}
PRIORITY_MAP = {
"warning": DetectorPriorityLevel.MEDIUM, # not actually used for anomaly detection alerts
"critical": DetectorPriorityLevel.HIGH,
}
OPSGENIE_DEFAULT_PRIORITY = "P3"
PAGERDUTY_DEFAULT_SEVERITY = "default"
| SentryAppIdentifier |
python | django__django | tests/admin_widgets/tests.py | {
"start": 50784,
"end": 50879
} | class ____(DateTimePickerShortcutsSeleniumTests):
pass
| DateTimePickerAltTimezoneSeleniumTests |
python | django__django | tests/backends/base/test_operations.py | {
"start": 6984,
"end": 8327
} | class ____(TransactionTestCase):
available_apps = ["backends"]
def test_sql_flush_no_tables(self):
self.assertEqual(connection.ops.sql_flush(no_style(), []), [])
def test_execute_sql_flush_statements(self):
with transaction.atomic():
author = Author.objects.create(name="George Orwell")
Book.objects.create(author=author)
author = Author.objects.create(name="Harper Lee")
Book.objects.create(author=author)
Book.objects.create(author=author)
self.assertIs(Author.objects.exists(), True)
self.assertIs(Book.objects.exists(), True)
sql_list = connection.ops.sql_flush(
no_style(),
[Author._meta.db_table, Book._meta.db_table],
reset_sequences=True,
allow_cascade=True,
)
connection.ops.execute_sql_flush(sql_list)
with transaction.atomic():
self.assertIs(Author.objects.exists(), False)
self.assertIs(Book.objects.exists(), False)
if connection.features.supports_sequence_reset:
author = Author.objects.create(name="F. Scott Fitzgerald")
self.assertEqual(author.pk, 1)
book = Book.objects.create(author=author)
self.assertEqual(book.pk, 1)
| SqlFlushTests |
python | pandas-dev__pandas | asv_bench/benchmarks/tslibs/resolution.py | {
"start": 732,
"end": 1336
} | class ____:
params = (
["D", "h", "m", "s", "us", "ns"],
_sizes,
_tzs,
)
param_names = ["unit", "size", "tz"]
def setup(self, unit, size, tz):
if size == 10**6 and tz is tzlocal_obj:
# tzlocal is cumbersomely slow, so skip to keep runtime in check
raise NotImplementedError
arr = np.random.randint(0, 10, size=size, dtype="i8")
arr = arr.view(f"M8[{unit}]").astype("M8[ns]").view("i8")
self.i8data = arr
def time_get_resolution(self, unit, size, tz):
get_resolution(self.i8data, tz)
| TimeResolution |
python | python-openxml__python-docx | src/docx/text/paragraph.py | {
"start": 652,
"end": 6828
} | class ____(StoryChild):
"""Proxy object wrapping a `<w:p>` element."""
def __init__(self, p: CT_P, parent: t.ProvidesStoryPart):
super(Paragraph, self).__init__(parent)
self._p = self._element = p
def add_run(self, text: str | None = None, style: str | CharacterStyle | None = None) -> Run:
"""Append run containing `text` and having character-style `style`.
`text` can contain tab (``\\t``) characters, which are converted to the
appropriate XML form for a tab. `text` can also include newline (``\\n``) or
carriage return (``\\r``) characters, each of which is converted to a line
break. When `text` is `None`, the new run is empty.
"""
r = self._p.add_r()
run = Run(r, self)
if text:
run.text = text
if style:
run.style = style
return run
@property
def alignment(self) -> WD_PARAGRAPH_ALIGNMENT | None:
"""A member of the :ref:`WdParagraphAlignment` enumeration specifying the
justification setting for this paragraph.
A value of |None| indicates the paragraph has no directly-applied alignment
value and will inherit its alignment value from its style hierarchy. Assigning
|None| to this property removes any directly-applied alignment value.
"""
return self._p.alignment
@alignment.setter
def alignment(self, value: WD_PARAGRAPH_ALIGNMENT):
self._p.alignment = value
def clear(self):
"""Return this same paragraph after removing all its content.
Paragraph-level formatting, such as style, is preserved.
"""
self._p.clear_content()
return self
@property
def contains_page_break(self) -> bool:
"""`True` when one or more rendered page-breaks occur in this paragraph."""
return bool(self._p.lastRenderedPageBreaks)
@property
def hyperlinks(self) -> List[Hyperlink]:
"""A |Hyperlink| instance for each hyperlink in this paragraph."""
return [Hyperlink(hyperlink, self) for hyperlink in self._p.hyperlink_lst]
def insert_paragraph_before(
self, text: str | None = None, style: str | ParagraphStyle | None = None
) -> Paragraph:
"""Return a newly created paragraph, inserted directly before this paragraph.
If `text` is supplied, the new paragraph contains that text in a single run. If
`style` is provided, that style is assigned to the new paragraph.
"""
paragraph = self._insert_paragraph_before()
if text:
paragraph.add_run(text)
if style is not None:
paragraph.style = style
return paragraph
def iter_inner_content(self) -> Iterator[Run | Hyperlink]:
"""Generate the runs and hyperlinks in this paragraph, in the order they appear.
The content in a paragraph consists of both runs and hyperlinks. This method
allows accessing each of those separately, in document order, for when the
precise position of the hyperlink within the paragraph text is important. Note
that a hyperlink itself contains runs.
"""
for r_or_hlink in self._p.inner_content_elements:
yield (
Run(r_or_hlink, self)
if isinstance(r_or_hlink, CT_R)
else Hyperlink(r_or_hlink, self)
)
@property
def paragraph_format(self):
"""The |ParagraphFormat| object providing access to the formatting properties
for this paragraph, such as line spacing and indentation."""
return ParagraphFormat(self._element)
@property
def rendered_page_breaks(self) -> List[RenderedPageBreak]:
"""All rendered page-breaks in this paragraph.
Most often an empty list, sometimes contains one page-break, but can contain
more than one is rare or contrived cases.
"""
return [RenderedPageBreak(lrpb, self) for lrpb in self._p.lastRenderedPageBreaks]
@property
def runs(self) -> List[Run]:
"""Sequence of |Run| instances corresponding to the <w:r> elements in this
paragraph."""
return [Run(r, self) for r in self._p.r_lst]
@property
def style(self) -> ParagraphStyle | None:
"""Read/Write.
|_ParagraphStyle| object representing the style assigned to this paragraph. If
no explicit style is assigned to this paragraph, its value is the default
paragraph style for the document. A paragraph style name can be assigned in lieu
of a paragraph style object. Assigning |None| removes any applied style, making
its effective value the default paragraph style for the document.
"""
style_id = self._p.style
style = self.part.get_style(style_id, WD_STYLE_TYPE.PARAGRAPH)
return cast(ParagraphStyle, style)
@style.setter
def style(self, style_or_name: str | ParagraphStyle | None):
style_id = self.part.get_style_id(style_or_name, WD_STYLE_TYPE.PARAGRAPH)
self._p.style = style_id
@property
def text(self) -> str:
"""The textual content of this paragraph.
The text includes the visible-text portion of any hyperlinks in the paragraph.
Tabs and line breaks in the XML are mapped to ``\\t`` and ``\\n`` characters
respectively.
Assigning text to this property causes all existing paragraph content to be
replaced with a single run containing the assigned text. A ``\\t`` character in
the text is mapped to a ``<w:tab/>`` element and each ``\\n`` or ``\\r``
character is mapped to a line break. Paragraph-level formatting, such as style,
is preserved. All run-level formatting, such as bold or italic, is removed.
"""
return self._p.text
@text.setter
def text(self, text: str | None):
self.clear()
self.add_run(text)
def _insert_paragraph_before(self):
"""Return a newly created paragraph, inserted directly before this paragraph."""
p = self._p.add_p_before()
return Paragraph(p, self._parent)
| Paragraph |
python | HypothesisWorks__hypothesis | hypothesis-python/tests/cover/test_lookup.py | {
"start": 32889,
"end": 33615
} | class ____(AnnotatedConstructorWithSignature):
__signature__ = signature(selfless_signature)
def really_takes_str(value: int) -> None:
"""By this example we show, that ``__signature__`` is the most important source."""
assert isinstance(value, str)
really_takes_str.__signature__ = signature(selfless_signature)
@pytest.mark.parametrize(
"thing",
[
AnnotatedConstructorWithSignature,
AnnotatedConstructorWithSelflessSignature,
really_takes_str,
],
)
def test_signature_is_the_most_important_source(thing):
"""Signature types should take precedence over all other annotations."""
check_can_generate_examples(st.builds(thing))
| AnnotatedConstructorWithSelflessSignature |
python | google__jax | tests/mosaic/gpu_test.py | {
"start": 218231,
"end": 233476
} | class ____(TestCase):
def test_inout(self):
def kernel(ctx, src, inout, dst, smem):
val = memref.load(inout, [])
gpu.barrier()
new_val = arith.constant(ir.IntegerType.get_signless(32), 42)
memref.store(new_val, inout, [])
x = mgpu.FragmentedArray.load_strided(src, is_signed=True)
(x + val).store_untiled(dst)
x = jnp.arange(128, dtype=jnp.int32)
y = jnp.asarray(2.0, dtype=jnp.int32)
kernel = mgpu.as_gpu_kernel(
kernel, (1, 1, 1), (128, 1, 1), x, x, (), inout_shape=y,
)
xo, yo = kernel(x, y)
np.testing.assert_array_equal(xo, x + 2.0)
np.testing.assert_array_equal(yo, jnp.asarray(42, dtype=jnp.int32))
def test_serialize_uses_bytecode_format(self):
def kernel(ctx, src, dst, smem):
del ctx, smem
x = mgpu.FragmentedArray.load_strided(src, is_signed=True)
(x + 1).store_untiled(dst)
x = jnp.arange(128, dtype=jnp.int32)
with self.subTest("bytecode"):
f = mgpu.as_gpu_kernel(
kernel, (1, 1, 1), (128, 1, 1), x, x, (),
)
bytecode_stablehlo = jax.jit(f).lower(x).as_text()
module_prefix = "module = \"ML\\EFR"
if hp is not None:
@hps.composite
def tiled_layouts(
draw, initial_tile, vector_transfer: bool = False
) -> fa.TiledLayout:
assert all(t.bit_count() == 1 for t in initial_tile)
assert math.prod(initial_tile) >= 128
tiles = [initial_tile]
dim_offset = len(initial_tile)
if draw(hps.booleans()):
warp_dims = [fa.Replicated(2) if draw(hps.booleans()) else None for _ in range(2)]
else:
warp_dims = [fa.Replicated(4) if draw(hps.booleans()) else None]
for i, dim in enumerate(warp_dims):
if isinstance(dim, fa.Replicated):
continue
dim_size = 4 // len(warp_dims)
warp_dim = draw(
hps.sampled_from(
[i for i, t in enumerate(tiles[-1]) if t % dim_size == 0]
)
)
warp_tile = list(tiles[-1])
warp_tile[warp_dim] //= dim_size
warp_dims[i] = dim_offset + warp_dim
tiles.append(warp_tile)
dim_offset += len(warp_tile)
lane_dims = [fa.Replicated(2) if draw(hps.booleans()) else None for _ in range(5)]
for i, dim in enumerate(lane_dims):
if isinstance(dim, fa.Replicated):
continue
lane_dim = draw(hps.sampled_from(
[i for i, t in enumerate(tiles[-1]) if t % 2 == 0]
))
lane_tile = list(tiles[-1])
lane_tile[lane_dim] //= 2
lane_dims[i] = dim_offset + lane_dim
tiles.append(lane_tile)
dim_offset += len(lane_tile)
# Permute lane dims so that they don't always partition the data in order.
lane_dims = draw(hps.permutations(lane_dims))
if vector_transfer:
min_vector_dim = len(tiles[-1]) - 1
else:
min_vector_dim = 0
vector_dim = draw(hps.integers(min_vector_dim, len(tiles[-1]) - 1))
vector_size = 2 ** draw(
hps.integers(0, tiles[-1][vector_dim].bit_length() - 1)
)
vector_tile = list(tiles[-1])
assert vector_tile[vector_dim] % vector_size == 0
vector_tile[vector_dim] //= vector_size
tiles.append(vector_tile)
dim_offset += len(vector_tile)
vector_dim += dim_offset
dim_offset += len(vector_tile) # This is the remainder after tiling!
warp_dims = tuple(
d if isinstance(d, fa.Replicated) else d - dim_offset
for d in warp_dims
)
lane_dims = tuple(
d if isinstance(d, fa.Replicated) else d - dim_offset
for d in lane_dims
)
vector_dim = vector_dim - dim_offset
return fa.TiledLayout(
tiling=fa.Tiling(tuple(map(tuple, tiles))),
warp_dims=warp_dims,
lane_dims=lane_dims,
vector_dim=vector_dim,
_check_canonical=False,
).canonicalize()
@hps.composite
def shape_and_tiled_layout(
draw, vector_transfer: bool = False
) -> tuple[tuple[int, ...], fa.TiledLayout]:
rank = draw(hps.integers(2, 3))
initial_tile = tuple(
draw(hps.sampled_from([1, 2, 4, 8, 16, 32, 64, 128]))
for _ in range(rank)
)
hp.assume(128 <= math.prod(initial_tile) < 128 * 32)
shape = tuple(t * draw(hps.integers(1, 5)) for t in initial_tile)
hp.assume(math.prod(shape) <= 128 * 128)
layout = draw(tiled_layouts(initial_tile, vector_transfer=vector_transfer))
return shape, layout
class HypothesisTest(TestCase):
def test_reduce(self):
@hps.composite
def strategy(draw):
shape, layout = draw(shape_and_tiled_layout(vector_transfer=True))
rank = len(shape)
reduced_dims = draw(hps.sets(hps.integers(0, rank - 1), min_size=1))
dtype = draw(hps.sampled_from([jnp.int32, jnp.int16]))
return shape, layout, tuple(reduced_dims), dtype
warp_replicated_major = fa.TiledLayout(
fa.Tiling(((2,), (1,))), (fa.Replicated(2,), -2), (fa.Replicated(32,),), -1
)
warp_replicated_minor = fa.TiledLayout(
fa.Tiling(((2,), (1,))), (-2, fa.Replicated(2,)), (fa.Replicated(32,),), -1
)
warp_row_col_layout = fa.TiledLayout(
fa.Tiling(((2, 2), (1,))), (-3, -2), (fa.Replicated(32,),), -1
)
@hp.given(strategy())
@hp.example(((16,), warp_replicated_major, (0,), jnp.int32))
@hp.example(((16,), warp_replicated_minor, (0,), jnp.int32))
@hp.example(((16, 16), warp_row_col_layout, (0,), jnp.int32))
@hp.example(((16, 16), warp_row_col_layout, (1,), jnp.int32))
def run(args):
shape, layout, reduced_dims, dtype = args
out_shape = list(shape)
for d in sorted(reduced_dims, reverse=True):
del out_shape[d]
def kernel(ctx, src, dst, scratch):
del ctx
arr = fa.FragmentedArray.load_untiled(src, layout=layout, optimized=False, is_signed=True)
arr.reduce("add", reduced_dims, scratch).store_untiled(dst, optimized=False)
x = jax.random.randint(jax.random.key(1234), shape, -1000, 1000, dtype)
out_type = jax.ShapeDtypeStruct(out_shape, dtype)
scratch_type = jax.ShapeDtypeStruct((2048,), dtype)
hp.assume(layout.vector_length <= 16) # Otherwise we run out of scratch
try:
result = mgpu.as_gpu_kernel(
kernel, (1, 1, 1), (128, 1, 1), x, out_type, scratch_type
)(x)
except NotImplementedError:
hp.assume(False)
return
np.testing.assert_array_equal(result, x.sum(reduced_dims, dtype=dtype))
run()
def test_slice(self):
i32 = ir.IntegerType.get_signless(32)
index = ir.IndexType.get()
@hps.composite
def strategy(draw):
shape, layout = draw(shape_and_tiled_layout(vector_transfer=True))
tiling = layout.base_tile_shape
tiled_shape = mgpu.tile_shape(shape, tiling)[:len(shape)]
def draw_slice(size, tile):
start = draw(hps.integers(0, size - 1))
length = draw(hps.integers(1, size - start))
return slice(start * tile, (start + length) * tile)
slices = tuple(map(draw_slice, tiled_shape, tiling))
return shape, layout, slices
basic_slices = (slice(128, 256), slice(16, 16 + 32))
@hp.given(strategy())
@hp.example(((256, 256), fa.WGMMA_LAYOUT, basic_slices))
@hp.example(((256, 256), tcgen05.LAYOUT, basic_slices))
@hp.example(((256, 256), tcgen05.TMEM_NATIVE_LAYOUT, basic_slices))
def run(args):
shape, layout, slices = args
def kernel(ctx, dst, _):
def linear_index(*idxs):
total = arith.constant(index, 0)
stride = 1
for i, size in zip(idxs[::-1], shape[::-1]):
total = arith.addi(total, arith.muli(i, c(stride, index)))
stride *= size
return arith.index_cast(i32, total)
x = mgpu.FragmentedArray.build(
shape, layout, linear_index, is_signed=True
)
x[slices].store_untiled(dst, optimized=False)
slice_shape = tuple(len(range(size)[s]) for s, size in zip(slices, shape))
out_shape = jax.ShapeDtypeStruct(shape=slice_shape, dtype=jnp.int32)
result = mgpu.as_gpu_kernel(
kernel, (1, 1, 1), (128, 1, 1), (), out_shape, ()
)()
iota = np.arange(np.prod(shape), dtype=jnp.int32).reshape(*shape)
np.testing.assert_array_equal(result, iota[slices])
run()
def test_broadcast(self):
@hps.composite
def strategy(draw):
shape, layout = draw(shape_and_tiled_layout(vector_transfer=True))
rank = len(shape)
broadcast_dims = draw(
hps.sets(hps.integers(0, rank - 1), min_size=1, max_size=rank - 1)
)
dtype = draw(hps.sampled_from([jnp.float32, jnp.bfloat16]))
return shape, layout, tuple(broadcast_dims), dtype
@hp.given(strategy())
def run(args):
out_shape, out_layout, broadcast_dims, dtype = args
in_shape = list(out_shape)
for d in sorted(broadcast_dims, reverse=True):
del in_shape[d]
in_layout = out_layout.reduce(broadcast_dims)
dims = tuple(d for d in range(len(out_shape)) if d not in broadcast_dims)
def kernel(ctx, src, dst, scratch):
del ctx, scratch # Unused.
arr = fa.FragmentedArray.load_untiled(src, layout=in_layout, optimized=False)
arr.broadcast_in_dim(out_shape, dims, out_layout).store_untiled(dst, optimized=False)
x = jax.random.normal(jax.random.key(1234), in_shape, dtype)
out_type = jax.ShapeDtypeStruct(out_shape, dtype)
try:
result = mgpu.as_gpu_kernel(
kernel, (1, 1, 1), (128, 1, 1), x, out_type, ()
)(x)
except NotImplementedError:
hp.assume(False)
return
np.testing.assert_array_equal(result, jax.lax.broadcast_in_dim(x, out_shape, dims))
run()
@hp.given(hps.data())
def test_canonicalize_trivial_dims(self, data):
layout = data.draw(tiled_layouts((128, 1)))
trivial_dims = [
i
for i, d in fa.enumerate_negative(layout.tiled_tiling_shape)
if d == 1 and i != layout.vector_dim
]
if not trivial_dims:
hp.assume(False)
# That should not happen in canonical layouts.
self.assertNoCommonElements(trivial_dims, layout.partitioned_warp_dims)
self.assertNoCommonElements(trivial_dims, layout.partitioned_lane_dims)
# vector_dim can be trivial.
canonical_layout = layout
use_trivial_dim = data.draw(
hps.lists(hps.booleans(), min_size=len(trivial_dims), max_size=len(trivial_dims))
)
hp.assume(any(use_trivial_dim))
for d, use in zip(trivial_dims, use_trivial_dim):
if not use:
continue
if data.draw(hps.booleans()): # Should we put it in warp or lane dims?
new_warp_dims = list(layout.warp_dims)
position = data.draw(hps.integers(0, len(layout.warp_dims)))
new_warp_dims.insert(position, d)
layout = dataclasses.replace(
layout, warp_dims=tuple(new_warp_dims), _check_canonical=False
)
else:
new_lane_dims = list(layout.lane_dims)
position = data.draw(hps.integers(0, len(layout.lane_dims)))
new_lane_dims.insert(position, d)
layout = dataclasses.replace(
layout, lane_dims=tuple(new_lane_dims), _check_canonical=False
)
self.assertNotEqual(layout, canonical_layout)
self.assertEqual(layout.canonicalize(), canonical_layout)
def test_copy_tiled(self):
@hps.composite
def strategy(draw):
swizzle = draw(hps.sampled_from([16, 32, 64, 128]))
dtype = draw(hps.sampled_from([jnp.int32, jnp.int16, jnp.int8]))
tiling = (8, swizzle // jnp.dtype(dtype).itemsize)
shape = [draw(hps.integers(1, 6)) for t in tiling]
while math.prod(shape) % 4:
shape[draw(hps.booleans())] *= 2
shape = [s * t for s, t in zip(shape, tiling)]
to_smem = draw(hps.booleans())
return shape, dtype, swizzle, to_smem
@hp.given(strategy())
@hp.example(((48, 64), jnp.int32, 16, False))
@hp.example(((48, 64), jnp.int32, 32, False))
@hp.example(((48, 64), jnp.int32, 64, False))
@hp.example(((48, 64), jnp.int32, 128, False))
@hp.example(((64, 4), jnp.int32, 16, False))
def run(args):
shape, dtype, swizzle, to_smem = args
tiling = (8, 8 * swizzle // jnp.iinfo(dtype).bits)
def kernel(ctx, src, dst, scratch):
smem, barrier = scratch
if to_smem:
mgpu.copy_tiled(src, smem, swizzle=swizzle)
mgpu.commit_shared()
ctx.async_copy(
src_ref=smem,
dst_ref=dst,
gmem_transform=mgpu.TileTransform(tiling),
swizzle=swizzle,
)
ctx.await_async_copy(0)
else:
ctx.async_copy(
src_ref=src,
dst_ref=smem,
gmem_transform=mgpu.TileTransform(tiling),
swizzle=swizzle,
barrier=barrier,
)
barrier.wait()
mgpu.copy_tiled(smem, dst, swizzle=swizzle)
x = jnp.arange(math.prod(shape), dtype=dtype).reshape(shape)
scratch_shape = [
jax.ShapeDtypeStruct(mgpu.tile_shape(shape, tiling), dtype),
mgpu.TMABarrier(1),
]
y = mgpu.as_gpu_kernel(
kernel, (1, 1, 1), (128, 1, 1), x, x, scratch_shape
)(x)
np.testing.assert_array_equal(y, x)
run()
def test_dialect_vector_load_store(self):
@hps.composite
def strategy(draw):
shape, layout = draw(shape_and_tiled_layout(vector_transfer=True))
return shape, layout
@hp.given(strategy())
@hp.example(((128, 128), fa.WGMMA_LAYOUT))
@hp.example(((128, 128), fa.TCGEN05_LAYOUT))
@hp.example(((128, 128), fa.TMEM_NATIVE_LAYOUT))
def run(args):
shape, layout = args
dtype = jnp.float32
layout_attr = layouts.to_layout_attr(layout)
def body(ctx, input, result, smem):
del ctx
# GMEM -> Registers
reg = mgpu_dialect.vector_load(input)
reg = mgpu_dialect.layout_cast(reg, layout_attr)
# Registers -> SMEM
mgpu_dialect.vector_store(reg, smem)
# SMEM -> Registers
reg = mgpu_dialect.vector_load(smem)
reg = mgpu_dialect.layout_cast(reg, layout_attr)
# Registers -> GMEM
mgpu_dialect.vector_store(reg, result)
jax_shape = jax.ShapeDtypeStruct(shape, dtype)
kernel = mgpu.as_gpu_kernel(
body,
grid=(1, 1, 1),
block=(128, 1, 1),
in_shape=jax_shape,
out_shape=jax_shape,
smem_scratch_shape=jax_shape,
thread_semantics=mgpu.LoweringSemantics.Warpgroup,
)
input = self.prng.uniform(-1, 1, shape).astype(dtype)
np.testing.assert_array_equal(kernel(input), input)
run()
if __name__ == "__main__":
absltest.main(argv=["python"], testLoader=jtu.JaxTestLoader())
| ApiTest |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/constructor30.py | {
"start": 426,
"end": 665
} | class ____(Generic[P, T]):
def __init__(
self, _type: Callable[P, T], *args: P.args, **kwargs: P.kwargs
) -> None: ...
def func1(t: type[TA]) -> TA: ...
b = B(func1, A)
reveal_type(b, expected_text="B[(t: type[A]), A]")
| B |
python | apache__airflow | providers/amazon/tests/unit/amazon/aws/links/test_emr.py | {
"start": 10340,
"end": 11565
} | class ____(BaseAwsLinksTestCase):
link_class = EmrServerlessCloudWatchLogsLink
def test_extra_link(self, mock_supervisor_comms):
if AIRFLOW_V_3_0_PLUS and mock_supervisor_comms:
mock_supervisor_comms.send.return_value = XComResult(
key=self.link_class.key,
value={
"region_name": "us-west-1",
"aws_domain": self.link_class.get_aws_domain("aws"),
"aws_partition": "aws",
"awslogs_group": "/aws/emrs",
"stream_prefix": "some-prefix",
"application_id": "app-id",
"job_run_id": "job-run-id",
},
)
self.assert_extra_link_url(
expected_url=(
"https://console.aws.amazon.com/cloudwatch/home?region=us-west-1#logsV2:log-groups/log-group/%2Faws%2Femrs$3FlogStreamNameFilter$3Dsome-prefix"
),
region_name="us-west-1",
aws_partition="aws",
awslogs_group="/aws/emrs",
stream_prefix="some-prefix",
application_id="app-id",
job_run_id="job-run-id",
)
| TestEmrServerlessCloudWatchLogsLink |
python | kamyu104__LeetCode-Solutions | Python/lowest-common-ancestor-of-deepest-leaves.py | {
"start": 191,
"end": 738
} | class ____(object):
def lcaDeepestLeaves(self, root):
"""
:type root: TreeNode
:rtype: TreeNode
"""
def lcaDeepestLeavesHelper(root):
if not root:
return 0, None
d1, lca1 = lcaDeepestLeavesHelper(root.left)
d2, lca2 = lcaDeepestLeavesHelper(root.right)
if d1 > d2:
return d1+1, lca1
if d1 < d2:
return d2+1, lca2
return d1+1, root
return lcaDeepestLeavesHelper(root)[1]
| Solution |
python | anthropics__anthropic-sdk-python | src/anthropic/types/beta/beta_tool_search_tool_regex_20251119_param.py | {
"start": 350,
"end": 1063
} | class ____(TypedDict, total=False):
name: Required[Literal["tool_search_tool_regex"]]
"""Name of the tool.
This is how the tool will be called by the model and in `tool_use` blocks.
"""
type: Required[Literal["tool_search_tool_regex_20251119", "tool_search_tool_regex"]]
allowed_callers: List[Literal["direct", "code_execution_20250825"]]
cache_control: Optional[BetaCacheControlEphemeralParam]
"""Create a cache control breakpoint at this content block."""
defer_loading: bool
"""If true, tool will not be included in initial system prompt.
Only loaded when returned via tool_reference from tool search.
"""
strict: bool
| BetaToolSearchToolRegex20251119Param |
python | django__django | django/contrib/postgres/forms/ranges.py | {
"start": 3120,
"end": 3295
} | class ____(BaseRangeField):
default_error_messages = {"invalid": _("Enter two numbers.")}
base_field = forms.DecimalField
range_type = NumericRange
| DecimalRangeField |
python | mwaskom__seaborn | tests/_core/test_plot.py | {
"start": 75273,
"end": 77797
} | class ____:
@pytest.fixture(autouse=True)
def reset_config(self):
yield
Plot.config.display.update(PlotConfig().display)
def test_png_format(self):
Plot.config.display["format"] = "png"
assert Plot()._repr_svg_() is None
assert Plot().plot()._repr_svg_() is None
def assert_valid_png(p):
data, metadata = p._repr_png_()
img = Image.open(io.BytesIO(data))
assert img.format == "PNG"
assert sorted(metadata) == ["height", "width"]
assert_valid_png(Plot())
assert_valid_png(Plot().plot())
def test_svg_format(self):
Plot.config.display["format"] = "svg"
assert Plot()._repr_png_() is None
assert Plot().plot()._repr_png_() is None
def assert_valid_svg(p):
res = p._repr_svg_()
root = xml.etree.ElementTree.fromstring(res)
assert root.tag == "{http://www.w3.org/2000/svg}svg"
assert_valid_svg(Plot())
assert_valid_svg(Plot().plot())
def test_png_scaling(self):
Plot.config.display["scaling"] = 1.
res1, meta1 = Plot()._repr_png_()
Plot.config.display["scaling"] = .5
res2, meta2 = Plot()._repr_png_()
assert meta1["width"] / 2 == meta2["width"]
assert meta1["height"] / 2 == meta2["height"]
img1 = Image.open(io.BytesIO(res1))
img2 = Image.open(io.BytesIO(res2))
assert img1.size == img2.size
def test_svg_scaling(self):
Plot.config.display["format"] = "svg"
Plot.config.display["scaling"] = 1.
res1 = Plot()._repr_svg_()
Plot.config.display["scaling"] = .5
res2 = Plot()._repr_svg_()
root1 = xml.etree.ElementTree.fromstring(res1)
root2 = xml.etree.ElementTree.fromstring(res2)
def getdim(root, dim):
return float(root.attrib[dim][:-2])
assert getdim(root1, "width") / 2 == getdim(root2, "width")
assert getdim(root1, "height") / 2 == getdim(root2, "height")
def test_png_hidpi(self):
res1, meta1 = Plot()._repr_png_()
Plot.config.display["hidpi"] = False
res2, meta2 = Plot()._repr_png_()
assert meta1["width"] == meta2["width"]
assert meta1["height"] == meta2["height"]
img1 = Image.open(io.BytesIO(res1))
img2 = Image.open(io.BytesIO(res2))
assert img1.size[0] // 2 == img2.size[0]
assert img1.size[1] // 2 == img2.size[1]
| TestDisplayConfig |
python | numba__numba | numba/cuda/target.py | {
"start": 14511,
"end": 16837
} | class ____(BaseCallConv):
"""
Calling convention aimed at matching the CUDA C/C++ ABI. The implemented
function signature is:
<Python return type> (<Python arguments>)
Exceptions are unsupported in this convention.
"""
def _make_call_helper(self, builder):
# Call helpers are used to help report exceptions back to Python, so
# none is required here.
return None
def return_value(self, builder, retval):
return builder.ret(retval)
def return_user_exc(self, builder, exc, exc_args=None, loc=None,
func_name=None):
msg = "Python exceptions are unsupported in the CUDA C/C++ ABI"
raise NotImplementedError(msg)
def return_status_propagate(self, builder, status):
msg = "Return status is unsupported in the CUDA C/C++ ABI"
raise NotImplementedError(msg)
def get_function_type(self, restype, argtypes):
"""
Get the LLVM IR Function type for *restype* and *argtypes*.
"""
arginfo = self._get_arg_packer(argtypes)
argtypes = list(arginfo.argument_types)
fnty = ir.FunctionType(self.get_return_type(restype), argtypes)
return fnty
def decorate_function(self, fn, args, fe_argtypes, noalias=False):
"""
Set names and attributes of function arguments.
"""
assert not noalias
arginfo = self._get_arg_packer(fe_argtypes)
arginfo.assign_names(self.get_arguments(fn),
['arg.' + a for a in args])
def get_arguments(self, func):
"""
Get the Python-level arguments of LLVM *func*.
"""
return func.args
def call_function(self, builder, callee, resty, argtys, args):
"""
Call the Numba-compiled *callee*.
"""
arginfo = self._get_arg_packer(argtys)
realargs = arginfo.as_arguments(builder, args)
code = builder.call(callee, realargs)
# No status required as we don't support exceptions or a distinct None
# value in a C ABI.
status = None
out = self.context.get_returned_value(builder, resty, code)
return status, out
def get_return_type(self, ty):
return self.context.data_model_manager[ty].get_return_type()
| CUDACABICallConv |
python | jina-ai__jina | jina/jaml/parsers/base.py | {
"start": 1612,
"end": 3169
} | class ____(VersionedYAMLParser, ABC):
"""
BaseLegacyParser for classes that need parameter injection and that will be managed inside a runtime
for instance, :class:`BaseExecutor` and :class:`BaseGateway`
"""
@staticmethod
def _get_all_arguments(class_):
"""
:param class_: target class from which we want to retrieve arguments
:return: all the arguments of all the classes from which `class_` inherits
"""
def get_class_arguments(class_):
"""
:param class_: the class to check
:return: a list containing the arguments from `class_`
"""
signature = inspect.signature(class_.__init__)
class_arguments = [p.name for p in signature.parameters.values()]
return class_arguments
def accumulate_classes(cls) -> Set[Type]:
"""
:param cls: the class to check
:return: all classes from which cls inherits from
"""
def _accumulate_classes(c, cs):
cs.append(c)
if cls == object:
return cs
for base in c.__bases__:
_accumulate_classes(base, cs)
return cs
classes = []
_accumulate_classes(cls, classes)
return set(classes)
all_classes = accumulate_classes(class_)
args = list(map(lambda x: get_class_arguments(x), all_classes))
return set(reduce(lambda x, y: x + y, args))
| BaseLegacyParser |
python | pypa__pipenv | pipenv/patched/pip/_internal/req/req_file.py | {
"start": 2851,
"end": 3287
} | class ____:
# TODO: replace this with slots=True when dropping Python 3.9 support.
__slots__ = (
"requirement",
"is_editable",
"comes_from",
"constraint",
"options",
"line_source",
)
requirement: str
is_editable: bool
comes_from: str
constraint: bool
options: Optional[Dict[str, Any]]
line_source: Optional[str]
@dataclass(frozen=True)
| ParsedRequirement |
python | apache__airflow | providers/google/src/airflow/providers/google/cloud/operators/vision.py | {
"start": 1695,
"end": 5869
} | class ____(GoogleCloudBaseOperator):
"""
Create a new ProductSet resource.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:CloudVisionCreateProductSetOperator`
:param product_set: (Required) The ProductSet to create. If a dict is provided, it must be of the same
form as the protobuf message `ProductSet`.
:param location: (Required) The region where the ProductSet should be created. Valid regions
(as of 2019-02-05) are: us-east1, us-west1, europe-west1, asia-east1
:param project_id: (Optional) The project in which the ProductSet should be created. If set to None or
missing, the default project_id from the Google Cloud connection is used.
:param product_set_id: (Optional) A user-supplied resource id for this ProductSet.
If set, the server will attempt to use this value as the resource id. If it is
already in use, an error is returned with code ALREADY_EXISTS. Must be at most
128 characters long. It cannot contain the character /.
:param retry: (Optional) A retry object used to retry requests. If `None` is
specified, requests will not be retried.
:param timeout: (Optional) The amount of time, in seconds, to wait for the request to
complete. Note that if retry is specified, the timeout applies to each individual
attempt.
:param metadata: (Optional) Additional metadata that is provided to the method.
:param gcp_conn_id: (Optional) The connection ID used to connect to Google Cloud.
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
"""
# [START vision_productset_create_template_fields]
template_fields: Sequence[str] = (
"location",
"project_id",
"product_set_id",
"gcp_conn_id",
"impersonation_chain",
)
# [END vision_productset_create_template_fields]
def __init__(
self,
*,
product_set: dict | ProductSet,
location: str,
project_id: str = PROVIDE_PROJECT_ID,
product_set_id: str | None = None,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: MetaData = (),
gcp_conn_id: str = "google_cloud_default",
impersonation_chain: str | Sequence[str] | None = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.location = location
self.project_id = project_id
self.product_set = product_set
self.product_set_id = product_set_id
self.retry = retry
self.timeout = timeout
self.metadata = metadata
self.gcp_conn_id = gcp_conn_id
self.impersonation_chain = impersonation_chain
def execute(self, context: Context):
hook = CloudVisionHook(
gcp_conn_id=self.gcp_conn_id,
impersonation_chain=self.impersonation_chain,
)
try:
return hook.create_product_set(
location=self.location,
project_id=self.project_id,
product_set=self.product_set,
product_set_id=self.product_set_id,
retry=self.retry,
timeout=self.timeout,
metadata=self.metadata,
)
except AlreadyExists:
self.log.info(
"Product set with id %s already exists. Exiting from the create operation.",
self.product_set_id,
)
return self.product_set_id
| CloudVisionCreateProductSetOperator |
python | pyca__cryptography | tests/hazmat/primitives/decrepit/test_algorithms.py | {
"start": 9325,
"end": 9607
} | class ____:
test_cfb = generate_encrypt_test(
load_nist_vectors,
os.path.join("ciphers", "IDEA"),
["idea-cfb.txt"],
lambda key, **kwargs: IDEA(binascii.unhexlify(key)),
lambda iv, **kwargs: CFB(binascii.unhexlify(iv)),
)
| TestIDEAModeCFB |
python | pytorch__pytorch | test/torch_np/numpy_tests/core/test_scalarinherit.py | {
"start": 573,
"end": 680
} | class ____(np.float64, HasNew):
pass
@skip(reason="scalar repr: numpy plans to make it more explicit")
| B1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.