id int64 0 190k | prompt stringlengths 21 13.4M | docstring stringlengths 1 12k ⌀ |
|---|---|---|
15,067 | import configparser
import errno
import json
import os
import re
import subprocess
import sys
HANDLERS = {}
The provided code snippet includes necessary dependencies for implementing the `register_vcs_handler` function. Write a Python function `def register_vcs_handler(vcs, method)` to solve the following problem:
Create decorator to mark a method as the handler of a VCS.
Here is the function:
def register_vcs_handler(vcs, method): # decorator
"""Create decorator to mark a method as the handler of a VCS."""
def decorate(f):
"""Store f in HANDLERS[vcs][method]."""
if vcs not in HANDLERS:
HANDLERS[vcs] = {}
HANDLERS[vcs][method] = f
return f
return decorate | Create decorator to mark a method as the handler of a VCS. |
15,068 | import configparser
import errno
import json
import os
import re
import subprocess
import sys
The provided code snippet includes necessary dependencies for implementing the `git_get_keywords` function. Write a Python function `def git_get_keywords(versionfile_abs)` to solve the following problem:
Extract version information from the given file.
Here is the function:
def git_get_keywords(versionfile_abs):
"""Extract version information from the given file."""
# the code embedded in _version.py can just fetch the value of these
# keywords. When used from setup.py, we don't want to import _version.py,
# so we do it with a regexp instead. This function is not used from
# _version.py.
keywords = {}
try:
f = open(versionfile_abs, "r")
for line in f.readlines():
if line.strip().startswith("git_refnames ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["refnames"] = mo.group(1)
if line.strip().startswith("git_full ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["full"] = mo.group(1)
if line.strip().startswith("git_date ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["date"] = mo.group(1)
f.close()
except EnvironmentError:
pass
return keywords | Extract version information from the given file. |
15,069 | import configparser
import errno
import json
import os
import re
import subprocess
import sys
class NotThisMethod(Exception):
"""Exception raised if a method is not valid for the current scenario."""
The provided code snippet includes necessary dependencies for implementing the `git_versions_from_keywords` function. Write a Python function `def git_versions_from_keywords(keywords, tag_prefix, verbose)` to solve the following problem:
Get version information from git keywords.
Here is the function:
def git_versions_from_keywords(keywords, tag_prefix, verbose):
"""Get version information from git keywords."""
if not keywords:
raise NotThisMethod("no keywords at all, weird")
date = keywords.get("date")
if date is not None:
# Use only the last line. Previous lines may contain GPG signature
# information.
date = date.splitlines()[-1]
# git-2.2.0 added "%cI", which expands to an ISO-8601 -compliant
# datestamp. However we prefer "%ci" (which expands to an "ISO-8601
# -like" string, which we must then edit to make compliant), because
# it's been around since git-1.5.3, and it's too difficult to
# discover which version we're using, or to work around using an
# older one.
date = date.strip().replace(" ", "T", 1).replace(" ", "", 1)
refnames = keywords["refnames"].strip()
if refnames.startswith("$Format"):
if verbose:
print("keywords are unexpanded, not using")
raise NotThisMethod("unexpanded keywords, not a git-archive tarball")
refs = set([r.strip() for r in refnames.strip("()").split(",")])
# starting in git-1.8.3, tags are listed as "tag: foo-1.0" instead of
# just "foo-1.0". If we see a "tag: " prefix, prefer those.
TAG = "tag: "
tags = set([r[len(TAG) :] for r in refs if r.startswith(TAG)])
if not tags:
# Either we're using git < 1.8.3, or there really are no tags. We use
# a heuristic: assume all version tags have a digit. The old git %d
# expansion behaves like git log --decorate=short and strips out the
# refs/heads/ and refs/tags/ prefixes that would let us distinguish
# between branches and tags. By ignoring refnames without digits, we
# filter out many common branch names like "release" and
# "stabilization", as well as "HEAD" and "master".
tags = set([r for r in refs if re.search(r"\d", r)])
if verbose:
print("discarding '%s', no digits" % ",".join(refs - tags))
if verbose:
print("likely tags: %s" % ",".join(sorted(tags)))
for ref in sorted(tags):
# sorting will prefer e.g. "2.0" over "2.0rc1"
if ref.startswith(tag_prefix):
r = ref[len(tag_prefix) :]
if verbose:
print("picking %s" % r)
return {
"version": r,
"full-revisionid": keywords["full"].strip(),
"dirty": False,
"error": None,
"date": date,
}
# no suitable tags, so version is "0+unknown", but full hex is still there
if verbose:
print("no suitable tags, using unknown + full revision id")
return {
"version": "0+unknown",
"full-revisionid": keywords["full"].strip(),
"dirty": False,
"error": "no suitable tags",
"date": None,
} | Get version information from git keywords. |
15,070 | import configparser
import errno
import json
import os
import re
import subprocess
import sys
class NotThisMethod(Exception):
"""Exception raised if a method is not valid for the current scenario."""
The provided code snippet includes necessary dependencies for implementing the `git_pieces_from_vcs` function. Write a Python function `def git_pieces_from_vcs(tag_prefix, root, verbose, run_command=run_command)` to solve the following problem:
Get version from 'git describe' in the root of the source tree. This only gets called if the git-archive 'subst' keywords were *not* expanded, and _version.py hasn't already been rewritten with a short version string, meaning we're inside a checked out source tree.
Here is the function:
def git_pieces_from_vcs(tag_prefix, root, verbose, run_command=run_command):
"""Get version from 'git describe' in the root of the source tree.
This only gets called if the git-archive 'subst' keywords were *not*
expanded, and _version.py hasn't already been rewritten with a short
version string, meaning we're inside a checked out source tree.
"""
GITS = ["git"]
if sys.platform == "win32":
GITS = ["git.cmd", "git.exe"]
out, rc = run_command(GITS, ["rev-parse", "--git-dir"], cwd=root, hide_stderr=True)
if rc != 0:
if verbose:
print("Directory %s not under git control" % root)
raise NotThisMethod("'git rev-parse --git-dir' returned error")
# if there is a tag matching tag_prefix, this yields TAG-NUM-gHEX[-dirty]
# if there isn't one, this yields HEX[-dirty] (no NUM)
describe_out, rc = run_command(
GITS,
[
"describe",
"--tags",
"--dirty",
"--always",
"--long",
"--match",
"%s*" % tag_prefix,
],
cwd=root,
)
# --long was added in git-1.5.5
if describe_out is None:
raise NotThisMethod("'git describe' failed")
describe_out = describe_out.strip()
full_out, rc = run_command(GITS, ["rev-parse", "HEAD"], cwd=root)
if full_out is None:
raise NotThisMethod("'git rev-parse' failed")
full_out = full_out.strip()
pieces = {}
pieces["long"] = full_out
pieces["short"] = full_out[:7] # maybe improved later
pieces["error"] = None
# parse describe_out. It will be like TAG-NUM-gHEX[-dirty] or HEX[-dirty]
# TAG might have hyphens.
git_describe = describe_out
# look for -dirty suffix
dirty = git_describe.endswith("-dirty")
pieces["dirty"] = dirty
if dirty:
git_describe = git_describe[: git_describe.rindex("-dirty")]
# now we have TAG-NUM-gHEX or HEX
if "-" in git_describe:
# TAG-NUM-gHEX
mo = re.search(r"^(.+)-(\d+)-g([0-9a-f]+)$", git_describe)
if not mo:
# unparsable. Maybe git-describe is misbehaving?
pieces["error"] = "unable to parse git-describe output: '%s'" % describe_out
return pieces
# tag
full_tag = mo.group(1)
if not full_tag.startswith(tag_prefix):
if verbose:
fmt = "tag '%s' doesn't start with prefix '%s'"
print(fmt % (full_tag, tag_prefix))
pieces["error"] = "tag '%s' doesn't start with prefix '%s'" % (
full_tag,
tag_prefix,
)
return pieces
pieces["closest-tag"] = full_tag[len(tag_prefix) :]
# distance: number of commits since tag
pieces["distance"] = int(mo.group(2))
# commit: short hex revision ID
pieces["short"] = mo.group(3)
else:
# HEX: no tags
pieces["closest-tag"] = None
count_out, rc = run_command(GITS, ["rev-list", "HEAD", "--count"], cwd=root)
pieces["distance"] = int(count_out) # total number of commits
# commit date: see ISO-8601 comment in git_versions_from_keywords()
date = run_command(GITS, ["show", "-s", "--format=%ci", "HEAD"], cwd=root)[
0
].strip()
# Use only the last line. Previous lines may contain GPG signature
# information.
date = date.splitlines()[-1]
pieces["date"] = date.strip().replace(" ", "T", 1).replace(" ", "", 1)
return pieces | Get version from 'git describe' in the root of the source tree. This only gets called if the git-archive 'subst' keywords were *not* expanded, and _version.py hasn't already been rewritten with a short version string, meaning we're inside a checked out source tree. |
15,071 | import configparser
import errno
import json
import os
import re
import subprocess
import sys
def get_versions(verbose=False):
"""Get the project version from whatever source is available.
Returns dict with two keys: 'version' and 'full'.
"""
if "versioneer" in sys.modules:
# see the discussion in cmdclass.py:get_cmdclass()
del sys.modules["versioneer"]
root = get_root()
cfg = get_config_from_root(root)
assert cfg.VCS is not None, "please set [versioneer]VCS= in setup.cfg"
handlers = HANDLERS.get(cfg.VCS)
assert handlers, "unrecognized VCS '%s'" % cfg.VCS
verbose = verbose or cfg.verbose
assert (
cfg.versionfile_source is not None
), "please set versioneer.versionfile_source"
assert cfg.tag_prefix is not None, "please set versioneer.tag_prefix"
versionfile_abs = os.path.join(root, cfg.versionfile_source)
# extract version from first of: _version.py, VCS command (e.g. 'git
# describe'), parentdir. This is meant to work for developers using a
# source checkout, for users of a tarball created by 'setup.py sdist',
# and for users of a tarball/zipball created by 'git archive' or github's
# download-from-tag feature or the equivalent in other VCSes.
get_keywords_f = handlers.get("get_keywords")
from_keywords_f = handlers.get("keywords")
if get_keywords_f and from_keywords_f:
try:
keywords = get_keywords_f(versionfile_abs)
ver = from_keywords_f(keywords, cfg.tag_prefix, verbose)
if verbose:
print("got version from expanded keyword %s" % ver)
return ver
except NotThisMethod:
pass
try:
ver = versions_from_file(versionfile_abs)
if verbose:
print("got version from file %s %s" % (versionfile_abs, ver))
return ver
except NotThisMethod:
pass
from_vcs_f = handlers.get("pieces_from_vcs")
if from_vcs_f:
try:
pieces = from_vcs_f(cfg.tag_prefix, root, verbose)
ver = render(pieces, cfg.style)
if verbose:
print("got version from VCS %s" % ver)
return ver
except NotThisMethod:
pass
try:
if cfg.parentdir_prefix:
ver = versions_from_parentdir(cfg.parentdir_prefix, root, verbose)
if verbose:
print("got version from parentdir %s" % ver)
return ver
except NotThisMethod:
pass
if verbose:
print("unable to compute version")
return {
"version": "0+unknown",
"full-revisionid": None,
"dirty": None,
"error": "unable to compute version",
"date": None,
}
The provided code snippet includes necessary dependencies for implementing the `get_version` function. Write a Python function `def get_version()` to solve the following problem:
Get the short version string for this project.
Here is the function:
def get_version():
"""Get the short version string for this project."""
return get_versions()["version"] | Get the short version string for this project. |
15,072 | import configparser
import errno
import json
import os
import re
import subprocess
import sys
def get_root():
"""Get the project root directory.
We require that all commands are run from the project root, i.e. the
directory that contains setup.py, setup.cfg, and versioneer.py .
"""
root = os.path.realpath(os.path.abspath(os.getcwd()))
setup_py = os.path.join(root, "setup.py")
versioneer_py = os.path.join(root, "versioneer.py")
if not (os.path.exists(setup_py) or os.path.exists(versioneer_py)):
# allow 'python path/to/setup.py COMMAND'
root = os.path.dirname(os.path.realpath(os.path.abspath(sys.argv[0])))
setup_py = os.path.join(root, "setup.py")
versioneer_py = os.path.join(root, "versioneer.py")
if not (os.path.exists(setup_py) or os.path.exists(versioneer_py)):
err = (
"Versioneer was unable to run the project root directory. "
"Versioneer requires setup.py to be executed from "
"its immediate directory (like 'python setup.py COMMAND'), "
"or in a way that lets it use sys.argv[0] to find the root "
"(like 'python path/to/setup.py COMMAND')."
)
raise VersioneerBadRootError(err)
try:
# Certain runtime workflows (setup.py install/develop in a setuptools
# tree) execute all dependencies in a single python process, so
# "versioneer" may be imported multiple times, and python's shared
# module-import table will cache the first one. So we can't use
# os.path.dirname(__file__), as that will find whichever
# versioneer.py was first imported, even in later projects.
me = os.path.realpath(os.path.abspath(__file__))
me_dir = os.path.normcase(os.path.splitext(me)[0])
vsr_dir = os.path.normcase(os.path.splitext(versioneer_py)[0])
if me_dir != vsr_dir:
print(
"Warning: build in %s is using versioneer.py from %s"
% (os.path.dirname(me), versioneer_py)
)
except NameError:
pass
return root
def get_config_from_root(root):
"""Read the project setup.cfg file to determine Versioneer config."""
# This might raise EnvironmentError (if setup.cfg is missing), or
# configparser.NoSectionError (if it lacks a [versioneer] section), or
# configparser.NoOptionError (if it lacks "VCS="). See the docstring at
# the top of versioneer.py for instructions on writing your setup.cfg .
setup_cfg = os.path.join(root, "setup.cfg")
parser = configparser.ConfigParser()
with open(setup_cfg, "r") as f:
parser.read_file(f)
VCS = parser.get("versioneer", "VCS") # mandatory
def get(parser, name):
if parser.has_option("versioneer", name):
return parser.get("versioneer", name)
return None
cfg = VersioneerConfig()
cfg.VCS = VCS
cfg.style = get(parser, "style") or ""
cfg.versionfile_source = get(parser, "versionfile_source")
cfg.versionfile_build = get(parser, "versionfile_build")
cfg.tag_prefix = get(parser, "tag_prefix")
if cfg.tag_prefix in ("''", '""'):
cfg.tag_prefix = ""
cfg.parentdir_prefix = get(parser, "parentdir_prefix")
cfg.verbose = get(parser, "verbose")
return cfg
LONG_VERSION_PY = {}
LONG_VERSION_PY[
"git"
] = r'''
# This file helps to compute a version number in source trees obtained from
# git-archive tarball (such as those provided by githubs download-from-tag
# feature). Distribution tarballs (built by setup.py sdist) and build
# directories (produced by setup.py build) will contain a much shorter file
# that just contains the computed version number.
# This file is released into the public domain. Generated by
# versioneer-0.19 (https://github.com/python-versioneer/python-versioneer)
"""Git implementation of _version.py."""
import errno
import os
import re
import subprocess
import sys
def get_keywords():
"""Get the keywords needed to look up the version information."""
# these strings will be replaced by git during git-archive.
# setup.py/versioneer.py will grep for the variable names, so they must
# each be defined on a line of their own. _version.py will just call
# get_keywords().
git_refnames = "%(DOLLAR)sFormat:%%d%(DOLLAR)s"
git_full = "%(DOLLAR)sFormat:%%H%(DOLLAR)s"
git_date = "%(DOLLAR)sFormat:%%ci%(DOLLAR)s"
keywords = {"refnames": git_refnames, "full": git_full, "date": git_date}
return keywords
class VersioneerConfig:
"""Container for Versioneer configuration parameters."""
def get_config():
"""Create, populate and return the VersioneerConfig() object."""
# these strings are filled in when 'setup.py versioneer' creates
# _version.py
cfg = VersioneerConfig()
cfg.VCS = "git"
cfg.style = "%(STYLE)s"
cfg.tag_prefix = "%(TAG_PREFIX)s"
cfg.parentdir_prefix = "%(PARENTDIR_PREFIX)s"
cfg.versionfile_source = "%(VERSIONFILE_SOURCE)s"
cfg.verbose = False
return cfg
class NotThisMethod(Exception):
"""Exception raised if a method is not valid for the current scenario."""
LONG_VERSION_PY = {}
HANDLERS = {}
def register_vcs_handler(vcs, method): # decorator
"""Create decorator to mark a method as the handler of a VCS."""
def decorate(f):
"""Store f in HANDLERS[vcs][method]."""
if vcs not in HANDLERS:
HANDLERS[vcs] = {}
HANDLERS[vcs][method] = f
return f
return decorate
def run_command(commands, args, cwd=None, verbose=False, hide_stderr=False,
env=None):
"""Call the given command(s)."""
assert isinstance(commands, list)
p = None
for c in commands:
try:
dispcmd = str([c] + args)
# remember shell=False, so use git.cmd on windows, not just git
p = subprocess.Popen([c] + args, cwd=cwd, env=env,
stdout=subprocess.PIPE,
stderr=(subprocess.PIPE if hide_stderr
else None))
break
except EnvironmentError:
e = sys.exc_info()[1]
if e.errno == errno.ENOENT:
continue
if verbose:
print("unable to run %%s" %% dispcmd)
print(e)
return None, None
else:
if verbose:
print("unable to find command, tried %%s" %% (commands,))
return None, None
stdout = p.communicate()[0].strip().decode()
if p.returncode != 0:
if verbose:
print("unable to run %%s (error)" %% dispcmd)
print("stdout was %%s" %% stdout)
return None, p.returncode
return stdout, p.returncode
def versions_from_parentdir(parentdir_prefix, root, verbose):
"""Try to determine the version from the parent directory name.
Source tarballs conventionally unpack into a directory that includes both
the project name and a version string. We will also support searching up
two directory levels for an appropriately named parent directory
"""
rootdirs = []
for i in range(3):
dirname = os.path.basename(root)
if dirname.startswith(parentdir_prefix):
return {"version": dirname[len(parentdir_prefix):],
"full-revisionid": None,
"dirty": False, "error": None, "date": None}
else:
rootdirs.append(root)
root = os.path.dirname(root) # up a level
if verbose:
print("Tried directories %%s but none started with prefix %%s" %%
(str(rootdirs), parentdir_prefix))
raise NotThisMethod("rootdir doesn't start with parentdir_prefix")
def git_get_keywords(versionfile_abs):
"""Extract version information from the given file."""
# the code embedded in _version.py can just fetch the value of these
# keywords. When used from setup.py, we don't want to import _version.py,
# so we do it with a regexp instead. This function is not used from
# _version.py.
keywords = {}
try:
f = open(versionfile_abs, "r")
for line in f.readlines():
if line.strip().startswith("git_refnames ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["refnames"] = mo.group(1)
if line.strip().startswith("git_full ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["full"] = mo.group(1)
if line.strip().startswith("git_date ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["date"] = mo.group(1)
f.close()
except EnvironmentError:
pass
return keywords
def git_versions_from_keywords(keywords, tag_prefix, verbose):
"""Get version information from git keywords."""
if not keywords:
raise NotThisMethod("no keywords at all, weird")
date = keywords.get("date")
if date is not None:
# Use only the last line. Previous lines may contain GPG signature
# information.
date = date.splitlines()[-1]
# git-2.2.0 added "%%cI", which expands to an ISO-8601 -compliant
# datestamp. However we prefer "%%ci" (which expands to an "ISO-8601
# -like" string, which we must then edit to make compliant), because
# it's been around since git-1.5.3, and it's too difficult to
# discover which version we're using, or to work around using an
# older one.
date = date.strip().replace(" ", "T", 1).replace(" ", "", 1)
refnames = keywords["refnames"].strip()
if refnames.startswith("$Format"):
if verbose:
print("keywords are unexpanded, not using")
raise NotThisMethod("unexpanded keywords, not a git-archive tarball")
refs = set([r.strip() for r in refnames.strip("()").split(",")])
# starting in git-1.8.3, tags are listed as "tag: foo-1.0" instead of
# just "foo-1.0". If we see a "tag: " prefix, prefer those.
TAG = "tag: "
tags = set([r[len(TAG):] for r in refs if r.startswith(TAG)])
if not tags:
# Either we're using git < 1.8.3, or there really are no tags. We use
# a heuristic: assume all version tags have a digit. The old git %%d
# expansion behaves like git log --decorate=short and strips out the
# refs/heads/ and refs/tags/ prefixes that would let us distinguish
# between branches and tags. By ignoring refnames without digits, we
# filter out many common branch names like "release" and
# "stabilization", as well as "HEAD" and "master".
tags = set([r for r in refs if re.search(r'\d', r)])
if verbose:
print("discarding '%%s', no digits" %% ",".join(refs - tags))
if verbose:
print("likely tags: %%s" %% ",".join(sorted(tags)))
for ref in sorted(tags):
# sorting will prefer e.g. "2.0" over "2.0rc1"
if ref.startswith(tag_prefix):
r = ref[len(tag_prefix):]
if verbose:
print("picking %%s" %% r)
return {"version": r,
"full-revisionid": keywords["full"].strip(),
"dirty": False, "error": None,
"date": date}
# no suitable tags, so version is "0+unknown", but full hex is still there
if verbose:
print("no suitable tags, using unknown + full revision id")
return {"version": "0+unknown",
"full-revisionid": keywords["full"].strip(),
"dirty": False, "error": "no suitable tags", "date": None}
def git_pieces_from_vcs(tag_prefix, root, verbose, run_command=run_command):
"""Get version from 'git describe' in the root of the source tree.
This only gets called if the git-archive 'subst' keywords were *not*
expanded, and _version.py hasn't already been rewritten with a short
version string, meaning we're inside a checked out source tree.
"""
GITS = ["git"]
if sys.platform == "win32":
GITS = ["git.cmd", "git.exe"]
out, rc = run_command(GITS, ["rev-parse", "--git-dir"], cwd=root,
hide_stderr=True)
if rc != 0:
if verbose:
print("Directory %%s not under git control" %% root)
raise NotThisMethod("'git rev-parse --git-dir' returned error")
# if there is a tag matching tag_prefix, this yields TAG-NUM-gHEX[-dirty]
# if there isn't one, this yields HEX[-dirty] (no NUM)
describe_out, rc = run_command(GITS, ["describe", "--tags", "--dirty",
"--always", "--long",
"--match", "%%s*" %% tag_prefix],
cwd=root)
# --long was added in git-1.5.5
if describe_out is None:
raise NotThisMethod("'git describe' failed")
describe_out = describe_out.strip()
full_out, rc = run_command(GITS, ["rev-parse", "HEAD"], cwd=root)
if full_out is None:
raise NotThisMethod("'git rev-parse' failed")
full_out = full_out.strip()
pieces = {}
pieces["long"] = full_out
pieces["short"] = full_out[:7] # maybe improved later
pieces["error"] = None
# parse describe_out. It will be like TAG-NUM-gHEX[-dirty] or HEX[-dirty]
# TAG might have hyphens.
git_describe = describe_out
# look for -dirty suffix
dirty = git_describe.endswith("-dirty")
pieces["dirty"] = dirty
if dirty:
git_describe = git_describe[:git_describe.rindex("-dirty")]
# now we have TAG-NUM-gHEX or HEX
if "-" in git_describe:
# TAG-NUM-gHEX
mo = re.search(r'^(.+)-(\d+)-g([0-9a-f]+)$', git_describe)
if not mo:
# unparsable. Maybe git-describe is misbehaving?
pieces["error"] = ("unable to parse git-describe output: '%%s'"
%% describe_out)
return pieces
# tag
full_tag = mo.group(1)
if not full_tag.startswith(tag_prefix):
if verbose:
fmt = "tag '%%s' doesn't start with prefix '%%s'"
print(fmt %% (full_tag, tag_prefix))
pieces["error"] = ("tag '%%s' doesn't start with prefix '%%s'"
%% (full_tag, tag_prefix))
return pieces
pieces["closest-tag"] = full_tag[len(tag_prefix):]
# distance: number of commits since tag
pieces["distance"] = int(mo.group(2))
# commit: short hex revision ID
pieces["short"] = mo.group(3)
else:
# HEX: no tags
pieces["closest-tag"] = None
count_out, rc = run_command(GITS, ["rev-list", "HEAD", "--count"],
cwd=root)
pieces["distance"] = int(count_out) # total number of commits
# commit date: see ISO-8601 comment in git_versions_from_keywords()
date = run_command(GITS, ["show", "-s", "--format=%%ci", "HEAD"],
cwd=root)[0].strip()
# Use only the last line. Previous lines may contain GPG signature
# information.
date = date.splitlines()[-1]
pieces["date"] = date.strip().replace(" ", "T", 1).replace(" ", "", 1)
return pieces
def plus_or_dot(pieces):
"""Return a + if we don't already have one, else return a ."""
if "+" in pieces.get("closest-tag", ""):
return "."
return "+"
def render_pep440(pieces):
"""Build up version string, with post-release "local version identifier".
Our goal: TAG[+DISTANCE.gHEX[.dirty]] . Note that if you
get a tagged build and then dirty it, you'll get TAG+0.gHEX.dirty
Exceptions:
1: no tags. git_describe was just HEX. 0+untagged.DISTANCE.gHEX[.dirty]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += plus_or_dot(pieces)
rendered += "%%d.g%%s" %% (pieces["distance"], pieces["short"])
if pieces["dirty"]:
rendered += ".dirty"
else:
# exception #1
rendered = "0+untagged.%%d.g%%s" %% (pieces["distance"],
pieces["short"])
if pieces["dirty"]:
rendered += ".dirty"
return rendered
def render_pep440_pre(pieces):
"""TAG[.post0.devDISTANCE] -- No -dirty.
Exceptions:
1: no tags. 0.post0.devDISTANCE
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"]:
rendered += ".post0.dev%%d" %% pieces["distance"]
else:
# exception #1
rendered = "0.post0.dev%%d" %% pieces["distance"]
return rendered
def render_pep440_post(pieces):
"""TAG[.postDISTANCE[.dev0]+gHEX] .
The ".dev0" means dirty. Note that .dev0 sorts backwards
(a dirty tree will appear "older" than the corresponding clean one),
but you shouldn't be releasing software with -dirty anyways.
Exceptions:
1: no tags. 0.postDISTANCE[.dev0]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += ".post%%d" %% pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
rendered += plus_or_dot(pieces)
rendered += "g%%s" %% pieces["short"]
else:
# exception #1
rendered = "0.post%%d" %% pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
rendered += "+g%%s" %% pieces["short"]
return rendered
def render_pep440_old(pieces):
"""TAG[.postDISTANCE[.dev0]] .
The ".dev0" means dirty.
Exceptions:
1: no tags. 0.postDISTANCE[.dev0]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += ".post%%d" %% pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
else:
# exception #1
rendered = "0.post%%d" %% pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
return rendered
def render_git_describe(pieces):
"""TAG[-DISTANCE-gHEX][-dirty].
Like 'git describe --tags --dirty --always'.
Exceptions:
1: no tags. HEX[-dirty] (note: no 'g' prefix)
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"]:
rendered += "-%%d-g%%s" %% (pieces["distance"], pieces["short"])
else:
# exception #1
rendered = pieces["short"]
if pieces["dirty"]:
rendered += "-dirty"
return rendered
def render_git_describe_long(pieces):
"""TAG-DISTANCE-gHEX[-dirty].
Like 'git describe --tags --dirty --always -long'.
The distance/hash is unconditional.
Exceptions:
1: no tags. HEX[-dirty] (note: no 'g' prefix)
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
rendered += "-%%d-g%%s" %% (pieces["distance"], pieces["short"])
else:
# exception #1
rendered = pieces["short"]
if pieces["dirty"]:
rendered += "-dirty"
return rendered
def render(pieces, style):
"""Render the given version pieces into the requested style."""
if pieces["error"]:
return {"version": "unknown",
"full-revisionid": pieces.get("long"),
"dirty": None,
"error": pieces["error"],
"date": None}
if not style or style == "default":
style = "pep440" # the default
if style == "pep440":
rendered = render_pep440(pieces)
elif style == "pep440-pre":
rendered = render_pep440_pre(pieces)
elif style == "pep440-post":
rendered = render_pep440_post(pieces)
elif style == "pep440-old":
rendered = render_pep440_old(pieces)
elif style == "git-describe":
rendered = render_git_describe(pieces)
elif style == "git-describe-long":
rendered = render_git_describe_long(pieces)
else:
raise ValueError("unknown style '%%s'" %% style)
return {"version": rendered, "full-revisionid": pieces["long"],
"dirty": pieces["dirty"], "error": None,
"date": pieces.get("date")}
def get_versions():
"""Get version information or return default if unable to do so."""
# I am in _version.py, which lives at ROOT/VERSIONFILE_SOURCE. If we have
# __file__, we can work backwards from there to the root. Some
# py2exe/bbfreeze/non-CPython implementations don't do __file__, in which
# case we can only use expanded keywords.
cfg = get_config()
verbose = cfg.verbose
try:
return git_versions_from_keywords(get_keywords(), cfg.tag_prefix,
verbose)
except NotThisMethod:
pass
try:
root = os.path.realpath(__file__)
# versionfile_source is the relative path from the top of the source
# tree (where the .git directory might live) to this file. Invert
# this to find the root from __file__.
for i in cfg.versionfile_source.split('/'):
root = os.path.dirname(root)
except NameError:
return {"version": "0+unknown", "full-revisionid": None,
"dirty": None,
"error": "unable to find root of source tree",
"date": None}
try:
pieces = git_pieces_from_vcs(cfg.tag_prefix, root, verbose)
return render(pieces, cfg.style)
except NotThisMethod:
pass
try:
if cfg.parentdir_prefix:
return versions_from_parentdir(cfg.parentdir_prefix, root, verbose)
except NotThisMethod:
pass
return {"version": "0+unknown", "full-revisionid": None,
"dirty": None,
"error": "unable to compute version", "date": None}
'''
def write_to_version_file(filename, versions):
"""Write the given version number to the given _version.py file."""
os.unlink(filename)
contents = json.dumps(versions, sort_keys=True, indent=1, separators=(",", ": "))
with open(filename, "w") as f:
f.write(SHORT_VERSION_PY % contents)
print("set %s to '%s'" % (filename, versions["version"]))
def get_versions(verbose=False):
"""Get the project version from whatever source is available.
Returns dict with two keys: 'version' and 'full'.
"""
if "versioneer" in sys.modules:
# see the discussion in cmdclass.py:get_cmdclass()
del sys.modules["versioneer"]
root = get_root()
cfg = get_config_from_root(root)
assert cfg.VCS is not None, "please set [versioneer]VCS= in setup.cfg"
handlers = HANDLERS.get(cfg.VCS)
assert handlers, "unrecognized VCS '%s'" % cfg.VCS
verbose = verbose or cfg.verbose
assert (
cfg.versionfile_source is not None
), "please set versioneer.versionfile_source"
assert cfg.tag_prefix is not None, "please set versioneer.tag_prefix"
versionfile_abs = os.path.join(root, cfg.versionfile_source)
# extract version from first of: _version.py, VCS command (e.g. 'git
# describe'), parentdir. This is meant to work for developers using a
# source checkout, for users of a tarball created by 'setup.py sdist',
# and for users of a tarball/zipball created by 'git archive' or github's
# download-from-tag feature or the equivalent in other VCSes.
get_keywords_f = handlers.get("get_keywords")
from_keywords_f = handlers.get("keywords")
if get_keywords_f and from_keywords_f:
try:
keywords = get_keywords_f(versionfile_abs)
ver = from_keywords_f(keywords, cfg.tag_prefix, verbose)
if verbose:
print("got version from expanded keyword %s" % ver)
return ver
except NotThisMethod:
pass
try:
ver = versions_from_file(versionfile_abs)
if verbose:
print("got version from file %s %s" % (versionfile_abs, ver))
return ver
except NotThisMethod:
pass
from_vcs_f = handlers.get("pieces_from_vcs")
if from_vcs_f:
try:
pieces = from_vcs_f(cfg.tag_prefix, root, verbose)
ver = render(pieces, cfg.style)
if verbose:
print("got version from VCS %s" % ver)
return ver
except NotThisMethod:
pass
try:
if cfg.parentdir_prefix:
ver = versions_from_parentdir(cfg.parentdir_prefix, root, verbose)
if verbose:
print("got version from parentdir %s" % ver)
return ver
except NotThisMethod:
pass
if verbose:
print("unable to compute version")
return {
"version": "0+unknown",
"full-revisionid": None,
"dirty": None,
"error": "unable to compute version",
"date": None,
}
The provided code snippet includes necessary dependencies for implementing the `get_cmdclass` function. Write a Python function `def get_cmdclass(cmdclass=None)` to solve the following problem:
Get the custom setuptools/distutils subclasses used by Versioneer. If the package uses a different cmdclass (e.g. one from numpy), it should be provide as an argument.
Here is the function:
def get_cmdclass(cmdclass=None):
"""Get the custom setuptools/distutils subclasses used by Versioneer.
If the package uses a different cmdclass (e.g. one from numpy), it
should be provide as an argument.
"""
if "versioneer" in sys.modules:
del sys.modules["versioneer"]
# this fixes the "python setup.py develop" case (also 'install' and
# 'easy_install .'), in which subdependencies of the main project are
# built (using setup.py bdist_egg) in the same python process. Assume
# a main project A and a dependency B, which use different versions
# of Versioneer. A's setup.py imports A's Versioneer, leaving it in
# sys.modules by the time B's setup.py is executed, causing B to run
# with the wrong versioneer. Setuptools wraps the sub-dep builds in a
# sandbox that restores sys.modules to it's pre-build state, so the
# parent is protected against the child's "import versioneer". By
# removing ourselves from sys.modules here, before the child build
# happens, we protect the child from the parent's versioneer too.
# Also see https://github.com/python-versioneer/python-versioneer/issues/52
cmds = {} if cmdclass is None else cmdclass.copy()
# we add "version" to both distutils and setuptools
from distutils.core import Command
class cmd_version(Command):
description = "report generated version string"
user_options = []
boolean_options = []
def initialize_options(self):
pass
def finalize_options(self):
pass
def run(self):
vers = get_versions(verbose=True)
print("Version: %s" % vers["version"])
print(" full-revisionid: %s" % vers.get("full-revisionid"))
print(" dirty: %s" % vers.get("dirty"))
print(" date: %s" % vers.get("date"))
if vers["error"]:
print(" error: %s" % vers["error"])
cmds["version"] = cmd_version
# we override "build_py" in both distutils and setuptools
#
# most invocation pathways end up running build_py:
# distutils/build -> build_py
# distutils/install -> distutils/build ->..
# setuptools/bdist_wheel -> distutils/install ->..
# setuptools/bdist_egg -> distutils/install_lib -> build_py
# setuptools/install -> bdist_egg ->..
# setuptools/develop -> ?
# pip install:
# copies source tree to a tempdir before running egg_info/etc
# if .git isn't copied too, 'git describe' will fail
# then does setup.py bdist_wheel, or sometimes setup.py install
# setup.py egg_info -> ?
# we override different "build_py" commands for both environments
if "build_py" in cmds:
_build_py = cmds["build_py"]
elif "setuptools" in sys.modules:
from setuptools.command.build_py import build_py as _build_py
else:
from distutils.command.build_py import build_py as _build_py
class cmd_build_py(_build_py):
def run(self):
root = get_root()
cfg = get_config_from_root(root)
versions = get_versions()
_build_py.run(self)
# now locate _version.py in the new build/ directory and replace
# it with an updated value
if cfg.versionfile_build:
target_versionfile = os.path.join(self.build_lib, cfg.versionfile_build)
print("UPDATING %s" % target_versionfile)
write_to_version_file(target_versionfile, versions)
cmds["build_py"] = cmd_build_py
if "setuptools" in sys.modules:
from setuptools.command.build_ext import build_ext as _build_ext
else:
from distutils.command.build_ext import build_ext as _build_ext
class cmd_build_ext(_build_ext):
def run(self):
root = get_root()
cfg = get_config_from_root(root)
versions = get_versions()
_build_ext.run(self)
if self.inplace:
# build_ext --inplace will only build extensions in
# build/lib<..> dir with no _version.py to write to.
# As in place builds will already have a _version.py
# in the module dir, we do not need to write one.
return
# now locate _version.py in the new build/ directory and replace
# it with an updated value
target_versionfile = os.path.join(self.build_lib, cfg.versionfile_source)
print("UPDATING %s" % target_versionfile)
write_to_version_file(target_versionfile, versions)
cmds["build_ext"] = cmd_build_ext
if "cx_Freeze" in sys.modules: # cx_freeze enabled?
from cx_Freeze.dist import build_exe as _build_exe
# nczeczulin reports that py2exe won't like the pep440-style string
# as FILEVERSION, but it can be used for PRODUCTVERSION, e.g.
# setup(console=[{
# "version": versioneer.get_version().split("+", 1)[0], # FILEVERSION
# "product_version": versioneer.get_version(),
# ...
class cmd_build_exe(_build_exe):
def run(self):
root = get_root()
cfg = get_config_from_root(root)
versions = get_versions()
target_versionfile = cfg.versionfile_source
print("UPDATING %s" % target_versionfile)
write_to_version_file(target_versionfile, versions)
_build_exe.run(self)
os.unlink(target_versionfile)
with open(cfg.versionfile_source, "w") as f:
LONG = LONG_VERSION_PY[cfg.VCS]
f.write(
LONG
% {
"DOLLAR": "$",
"STYLE": cfg.style,
"TAG_PREFIX": cfg.tag_prefix,
"PARENTDIR_PREFIX": cfg.parentdir_prefix,
"VERSIONFILE_SOURCE": cfg.versionfile_source,
}
)
cmds["build_exe"] = cmd_build_exe
del cmds["build_py"]
if "py2exe" in sys.modules: # py2exe enabled?
from py2exe.distutils_buildexe import py2exe as _py2exe
class cmd_py2exe(_py2exe):
def run(self):
root = get_root()
cfg = get_config_from_root(root)
versions = get_versions()
target_versionfile = cfg.versionfile_source
print("UPDATING %s" % target_versionfile)
write_to_version_file(target_versionfile, versions)
_py2exe.run(self)
os.unlink(target_versionfile)
with open(cfg.versionfile_source, "w") as f:
LONG = LONG_VERSION_PY[cfg.VCS]
f.write(
LONG
% {
"DOLLAR": "$",
"STYLE": cfg.style,
"TAG_PREFIX": cfg.tag_prefix,
"PARENTDIR_PREFIX": cfg.parentdir_prefix,
"VERSIONFILE_SOURCE": cfg.versionfile_source,
}
)
cmds["py2exe"] = cmd_py2exe
# we override different "sdist" commands for both environments
if "sdist" in cmds:
_sdist = cmds["sdist"]
elif "setuptools" in sys.modules:
from setuptools.command.sdist import sdist as _sdist
else:
from distutils.command.sdist import sdist as _sdist
class cmd_sdist(_sdist):
def run(self):
versions = get_versions()
self._versioneer_generated_versions = versions
# unless we update this, the command will keep using the old
# version
self.distribution.metadata.version = versions["version"]
return _sdist.run(self)
def make_release_tree(self, base_dir, files):
root = get_root()
cfg = get_config_from_root(root)
_sdist.make_release_tree(self, base_dir, files)
# now locate _version.py in the new base_dir directory
# (remembering that it may be a hardlink) and replace it with an
# updated value
target_versionfile = os.path.join(base_dir, cfg.versionfile_source)
print("UPDATING %s" % target_versionfile)
write_to_version_file(
target_versionfile, self._versioneer_generated_versions
)
cmds["sdist"] = cmd_sdist
return cmds | Get the custom setuptools/distutils subclasses used by Versioneer. If the package uses a different cmdclass (e.g. one from numpy), it should be provide as an argument. |
15,073 | import configparser
import errno
import json
import os
import re
import subprocess
import sys
def get_root():
"""Get the project root directory.
We require that all commands are run from the project root, i.e. the
directory that contains setup.py, setup.cfg, and versioneer.py .
"""
root = os.path.realpath(os.path.abspath(os.getcwd()))
setup_py = os.path.join(root, "setup.py")
versioneer_py = os.path.join(root, "versioneer.py")
if not (os.path.exists(setup_py) or os.path.exists(versioneer_py)):
# allow 'python path/to/setup.py COMMAND'
root = os.path.dirname(os.path.realpath(os.path.abspath(sys.argv[0])))
setup_py = os.path.join(root, "setup.py")
versioneer_py = os.path.join(root, "versioneer.py")
if not (os.path.exists(setup_py) or os.path.exists(versioneer_py)):
err = (
"Versioneer was unable to run the project root directory. "
"Versioneer requires setup.py to be executed from "
"its immediate directory (like 'python setup.py COMMAND'), "
"or in a way that lets it use sys.argv[0] to find the root "
"(like 'python path/to/setup.py COMMAND')."
)
raise VersioneerBadRootError(err)
try:
# Certain runtime workflows (setup.py install/develop in a setuptools
# tree) execute all dependencies in a single python process, so
# "versioneer" may be imported multiple times, and python's shared
# module-import table will cache the first one. So we can't use
# os.path.dirname(__file__), as that will find whichever
# versioneer.py was first imported, even in later projects.
me = os.path.realpath(os.path.abspath(__file__))
me_dir = os.path.normcase(os.path.splitext(me)[0])
vsr_dir = os.path.normcase(os.path.splitext(versioneer_py)[0])
if me_dir != vsr_dir:
print(
"Warning: build in %s is using versioneer.py from %s"
% (os.path.dirname(me), versioneer_py)
)
except NameError:
pass
return root
def get_config_from_root(root):
"""Read the project setup.cfg file to determine Versioneer config."""
# This might raise EnvironmentError (if setup.cfg is missing), or
# configparser.NoSectionError (if it lacks a [versioneer] section), or
# configparser.NoOptionError (if it lacks "VCS="). See the docstring at
# the top of versioneer.py for instructions on writing your setup.cfg .
setup_cfg = os.path.join(root, "setup.cfg")
parser = configparser.ConfigParser()
with open(setup_cfg, "r") as f:
parser.read_file(f)
VCS = parser.get("versioneer", "VCS") # mandatory
def get(parser, name):
if parser.has_option("versioneer", name):
return parser.get("versioneer", name)
return None
cfg = VersioneerConfig()
cfg.VCS = VCS
cfg.style = get(parser, "style") or ""
cfg.versionfile_source = get(parser, "versionfile_source")
cfg.versionfile_build = get(parser, "versionfile_build")
cfg.tag_prefix = get(parser, "tag_prefix")
if cfg.tag_prefix in ("''", '""'):
cfg.tag_prefix = ""
cfg.parentdir_prefix = get(parser, "parentdir_prefix")
cfg.verbose = get(parser, "verbose")
return cfg
LONG_VERSION_PY = {}
LONG_VERSION_PY[
"git"
] = r'''
# This file helps to compute a version number in source trees obtained from
# git-archive tarball (such as those provided by githubs download-from-tag
# feature). Distribution tarballs (built by setup.py sdist) and build
# directories (produced by setup.py build) will contain a much shorter file
# that just contains the computed version number.
# This file is released into the public domain. Generated by
# versioneer-0.19 (https://github.com/python-versioneer/python-versioneer)
"""Git implementation of _version.py."""
import errno
import os
import re
import subprocess
import sys
def get_keywords():
"""Get the keywords needed to look up the version information."""
# these strings will be replaced by git during git-archive.
# setup.py/versioneer.py will grep for the variable names, so they must
# each be defined on a line of their own. _version.py will just call
# get_keywords().
git_refnames = "%(DOLLAR)sFormat:%%d%(DOLLAR)s"
git_full = "%(DOLLAR)sFormat:%%H%(DOLLAR)s"
git_date = "%(DOLLAR)sFormat:%%ci%(DOLLAR)s"
keywords = {"refnames": git_refnames, "full": git_full, "date": git_date}
return keywords
class VersioneerConfig:
"""Container for Versioneer configuration parameters."""
def get_config():
"""Create, populate and return the VersioneerConfig() object."""
# these strings are filled in when 'setup.py versioneer' creates
# _version.py
cfg = VersioneerConfig()
cfg.VCS = "git"
cfg.style = "%(STYLE)s"
cfg.tag_prefix = "%(TAG_PREFIX)s"
cfg.parentdir_prefix = "%(PARENTDIR_PREFIX)s"
cfg.versionfile_source = "%(VERSIONFILE_SOURCE)s"
cfg.verbose = False
return cfg
class NotThisMethod(Exception):
"""Exception raised if a method is not valid for the current scenario."""
LONG_VERSION_PY = {}
HANDLERS = {}
def register_vcs_handler(vcs, method): # decorator
"""Create decorator to mark a method as the handler of a VCS."""
def decorate(f):
"""Store f in HANDLERS[vcs][method]."""
if vcs not in HANDLERS:
HANDLERS[vcs] = {}
HANDLERS[vcs][method] = f
return f
return decorate
def run_command(commands, args, cwd=None, verbose=False, hide_stderr=False,
env=None):
"""Call the given command(s)."""
assert isinstance(commands, list)
p = None
for c in commands:
try:
dispcmd = str([c] + args)
# remember shell=False, so use git.cmd on windows, not just git
p = subprocess.Popen([c] + args, cwd=cwd, env=env,
stdout=subprocess.PIPE,
stderr=(subprocess.PIPE if hide_stderr
else None))
break
except EnvironmentError:
e = sys.exc_info()[1]
if e.errno == errno.ENOENT:
continue
if verbose:
print("unable to run %%s" %% dispcmd)
print(e)
return None, None
else:
if verbose:
print("unable to find command, tried %%s" %% (commands,))
return None, None
stdout = p.communicate()[0].strip().decode()
if p.returncode != 0:
if verbose:
print("unable to run %%s (error)" %% dispcmd)
print("stdout was %%s" %% stdout)
return None, p.returncode
return stdout, p.returncode
def versions_from_parentdir(parentdir_prefix, root, verbose):
"""Try to determine the version from the parent directory name.
Source tarballs conventionally unpack into a directory that includes both
the project name and a version string. We will also support searching up
two directory levels for an appropriately named parent directory
"""
rootdirs = []
for i in range(3):
dirname = os.path.basename(root)
if dirname.startswith(parentdir_prefix):
return {"version": dirname[len(parentdir_prefix):],
"full-revisionid": None,
"dirty": False, "error": None, "date": None}
else:
rootdirs.append(root)
root = os.path.dirname(root) # up a level
if verbose:
print("Tried directories %%s but none started with prefix %%s" %%
(str(rootdirs), parentdir_prefix))
raise NotThisMethod("rootdir doesn't start with parentdir_prefix")
def git_get_keywords(versionfile_abs):
"""Extract version information from the given file."""
# the code embedded in _version.py can just fetch the value of these
# keywords. When used from setup.py, we don't want to import _version.py,
# so we do it with a regexp instead. This function is not used from
# _version.py.
keywords = {}
try:
f = open(versionfile_abs, "r")
for line in f.readlines():
if line.strip().startswith("git_refnames ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["refnames"] = mo.group(1)
if line.strip().startswith("git_full ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["full"] = mo.group(1)
if line.strip().startswith("git_date ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["date"] = mo.group(1)
f.close()
except EnvironmentError:
pass
return keywords
def git_versions_from_keywords(keywords, tag_prefix, verbose):
"""Get version information from git keywords."""
if not keywords:
raise NotThisMethod("no keywords at all, weird")
date = keywords.get("date")
if date is not None:
# Use only the last line. Previous lines may contain GPG signature
# information.
date = date.splitlines()[-1]
# git-2.2.0 added "%%cI", which expands to an ISO-8601 -compliant
# datestamp. However we prefer "%%ci" (which expands to an "ISO-8601
# -like" string, which we must then edit to make compliant), because
# it's been around since git-1.5.3, and it's too difficult to
# discover which version we're using, or to work around using an
# older one.
date = date.strip().replace(" ", "T", 1).replace(" ", "", 1)
refnames = keywords["refnames"].strip()
if refnames.startswith("$Format"):
if verbose:
print("keywords are unexpanded, not using")
raise NotThisMethod("unexpanded keywords, not a git-archive tarball")
refs = set([r.strip() for r in refnames.strip("()").split(",")])
# starting in git-1.8.3, tags are listed as "tag: foo-1.0" instead of
# just "foo-1.0". If we see a "tag: " prefix, prefer those.
TAG = "tag: "
tags = set([r[len(TAG):] for r in refs if r.startswith(TAG)])
if not tags:
# Either we're using git < 1.8.3, or there really are no tags. We use
# a heuristic: assume all version tags have a digit. The old git %%d
# expansion behaves like git log --decorate=short and strips out the
# refs/heads/ and refs/tags/ prefixes that would let us distinguish
# between branches and tags. By ignoring refnames without digits, we
# filter out many common branch names like "release" and
# "stabilization", as well as "HEAD" and "master".
tags = set([r for r in refs if re.search(r'\d', r)])
if verbose:
print("discarding '%%s', no digits" %% ",".join(refs - tags))
if verbose:
print("likely tags: %%s" %% ",".join(sorted(tags)))
for ref in sorted(tags):
# sorting will prefer e.g. "2.0" over "2.0rc1"
if ref.startswith(tag_prefix):
r = ref[len(tag_prefix):]
if verbose:
print("picking %%s" %% r)
return {"version": r,
"full-revisionid": keywords["full"].strip(),
"dirty": False, "error": None,
"date": date}
# no suitable tags, so version is "0+unknown", but full hex is still there
if verbose:
print("no suitable tags, using unknown + full revision id")
return {"version": "0+unknown",
"full-revisionid": keywords["full"].strip(),
"dirty": False, "error": "no suitable tags", "date": None}
def git_pieces_from_vcs(tag_prefix, root, verbose, run_command=run_command):
"""Get version from 'git describe' in the root of the source tree.
This only gets called if the git-archive 'subst' keywords were *not*
expanded, and _version.py hasn't already been rewritten with a short
version string, meaning we're inside a checked out source tree.
"""
GITS = ["git"]
if sys.platform == "win32":
GITS = ["git.cmd", "git.exe"]
out, rc = run_command(GITS, ["rev-parse", "--git-dir"], cwd=root,
hide_stderr=True)
if rc != 0:
if verbose:
print("Directory %%s not under git control" %% root)
raise NotThisMethod("'git rev-parse --git-dir' returned error")
# if there is a tag matching tag_prefix, this yields TAG-NUM-gHEX[-dirty]
# if there isn't one, this yields HEX[-dirty] (no NUM)
describe_out, rc = run_command(GITS, ["describe", "--tags", "--dirty",
"--always", "--long",
"--match", "%%s*" %% tag_prefix],
cwd=root)
# --long was added in git-1.5.5
if describe_out is None:
raise NotThisMethod("'git describe' failed")
describe_out = describe_out.strip()
full_out, rc = run_command(GITS, ["rev-parse", "HEAD"], cwd=root)
if full_out is None:
raise NotThisMethod("'git rev-parse' failed")
full_out = full_out.strip()
pieces = {}
pieces["long"] = full_out
pieces["short"] = full_out[:7] # maybe improved later
pieces["error"] = None
# parse describe_out. It will be like TAG-NUM-gHEX[-dirty] or HEX[-dirty]
# TAG might have hyphens.
git_describe = describe_out
# look for -dirty suffix
dirty = git_describe.endswith("-dirty")
pieces["dirty"] = dirty
if dirty:
git_describe = git_describe[:git_describe.rindex("-dirty")]
# now we have TAG-NUM-gHEX or HEX
if "-" in git_describe:
# TAG-NUM-gHEX
mo = re.search(r'^(.+)-(\d+)-g([0-9a-f]+)$', git_describe)
if not mo:
# unparsable. Maybe git-describe is misbehaving?
pieces["error"] = ("unable to parse git-describe output: '%%s'"
%% describe_out)
return pieces
# tag
full_tag = mo.group(1)
if not full_tag.startswith(tag_prefix):
if verbose:
fmt = "tag '%%s' doesn't start with prefix '%%s'"
print(fmt %% (full_tag, tag_prefix))
pieces["error"] = ("tag '%%s' doesn't start with prefix '%%s'"
%% (full_tag, tag_prefix))
return pieces
pieces["closest-tag"] = full_tag[len(tag_prefix):]
# distance: number of commits since tag
pieces["distance"] = int(mo.group(2))
# commit: short hex revision ID
pieces["short"] = mo.group(3)
else:
# HEX: no tags
pieces["closest-tag"] = None
count_out, rc = run_command(GITS, ["rev-list", "HEAD", "--count"],
cwd=root)
pieces["distance"] = int(count_out) # total number of commits
# commit date: see ISO-8601 comment in git_versions_from_keywords()
date = run_command(GITS, ["show", "-s", "--format=%%ci", "HEAD"],
cwd=root)[0].strip()
# Use only the last line. Previous lines may contain GPG signature
# information.
date = date.splitlines()[-1]
pieces["date"] = date.strip().replace(" ", "T", 1).replace(" ", "", 1)
return pieces
def plus_or_dot(pieces):
"""Return a + if we don't already have one, else return a ."""
if "+" in pieces.get("closest-tag", ""):
return "."
return "+"
def render_pep440(pieces):
"""Build up version string, with post-release "local version identifier".
Our goal: TAG[+DISTANCE.gHEX[.dirty]] . Note that if you
get a tagged build and then dirty it, you'll get TAG+0.gHEX.dirty
Exceptions:
1: no tags. git_describe was just HEX. 0+untagged.DISTANCE.gHEX[.dirty]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += plus_or_dot(pieces)
rendered += "%%d.g%%s" %% (pieces["distance"], pieces["short"])
if pieces["dirty"]:
rendered += ".dirty"
else:
# exception #1
rendered = "0+untagged.%%d.g%%s" %% (pieces["distance"],
pieces["short"])
if pieces["dirty"]:
rendered += ".dirty"
return rendered
def render_pep440_pre(pieces):
"""TAG[.post0.devDISTANCE] -- No -dirty.
Exceptions:
1: no tags. 0.post0.devDISTANCE
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"]:
rendered += ".post0.dev%%d" %% pieces["distance"]
else:
# exception #1
rendered = "0.post0.dev%%d" %% pieces["distance"]
return rendered
def render_pep440_post(pieces):
"""TAG[.postDISTANCE[.dev0]+gHEX] .
The ".dev0" means dirty. Note that .dev0 sorts backwards
(a dirty tree will appear "older" than the corresponding clean one),
but you shouldn't be releasing software with -dirty anyways.
Exceptions:
1: no tags. 0.postDISTANCE[.dev0]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += ".post%%d" %% pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
rendered += plus_or_dot(pieces)
rendered += "g%%s" %% pieces["short"]
else:
# exception #1
rendered = "0.post%%d" %% pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
rendered += "+g%%s" %% pieces["short"]
return rendered
def render_pep440_old(pieces):
"""TAG[.postDISTANCE[.dev0]] .
The ".dev0" means dirty.
Exceptions:
1: no tags. 0.postDISTANCE[.dev0]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += ".post%%d" %% pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
else:
# exception #1
rendered = "0.post%%d" %% pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
return rendered
def render_git_describe(pieces):
"""TAG[-DISTANCE-gHEX][-dirty].
Like 'git describe --tags --dirty --always'.
Exceptions:
1: no tags. HEX[-dirty] (note: no 'g' prefix)
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"]:
rendered += "-%%d-g%%s" %% (pieces["distance"], pieces["short"])
else:
# exception #1
rendered = pieces["short"]
if pieces["dirty"]:
rendered += "-dirty"
return rendered
def render_git_describe_long(pieces):
"""TAG-DISTANCE-gHEX[-dirty].
Like 'git describe --tags --dirty --always -long'.
The distance/hash is unconditional.
Exceptions:
1: no tags. HEX[-dirty] (note: no 'g' prefix)
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
rendered += "-%%d-g%%s" %% (pieces["distance"], pieces["short"])
else:
# exception #1
rendered = pieces["short"]
if pieces["dirty"]:
rendered += "-dirty"
return rendered
def render(pieces, style):
"""Render the given version pieces into the requested style."""
if pieces["error"]:
return {"version": "unknown",
"full-revisionid": pieces.get("long"),
"dirty": None,
"error": pieces["error"],
"date": None}
if not style or style == "default":
style = "pep440" # the default
if style == "pep440":
rendered = render_pep440(pieces)
elif style == "pep440-pre":
rendered = render_pep440_pre(pieces)
elif style == "pep440-post":
rendered = render_pep440_post(pieces)
elif style == "pep440-old":
rendered = render_pep440_old(pieces)
elif style == "git-describe":
rendered = render_git_describe(pieces)
elif style == "git-describe-long":
rendered = render_git_describe_long(pieces)
else:
raise ValueError("unknown style '%%s'" %% style)
return {"version": rendered, "full-revisionid": pieces["long"],
"dirty": pieces["dirty"], "error": None,
"date": pieces.get("date")}
def get_versions():
"""Get version information or return default if unable to do so."""
# I am in _version.py, which lives at ROOT/VERSIONFILE_SOURCE. If we have
# __file__, we can work backwards from there to the root. Some
# py2exe/bbfreeze/non-CPython implementations don't do __file__, in which
# case we can only use expanded keywords.
cfg = get_config()
verbose = cfg.verbose
try:
return git_versions_from_keywords(get_keywords(), cfg.tag_prefix,
verbose)
except NotThisMethod:
pass
try:
root = os.path.realpath(__file__)
# versionfile_source is the relative path from the top of the source
# tree (where the .git directory might live) to this file. Invert
# this to find the root from __file__.
for i in cfg.versionfile_source.split('/'):
root = os.path.dirname(root)
except NameError:
return {"version": "0+unknown", "full-revisionid": None,
"dirty": None,
"error": "unable to find root of source tree",
"date": None}
try:
pieces = git_pieces_from_vcs(cfg.tag_prefix, root, verbose)
return render(pieces, cfg.style)
except NotThisMethod:
pass
try:
if cfg.parentdir_prefix:
return versions_from_parentdir(cfg.parentdir_prefix, root, verbose)
except NotThisMethod:
pass
return {"version": "0+unknown", "full-revisionid": None,
"dirty": None,
"error": "unable to compute version", "date": None}
'''
def do_vcs_install(manifest_in, versionfile_source, ipy):
"""Git-specific installation logic for Versioneer.
For Git, this means creating/changing .gitattributes to mark _version.py
for export-subst keyword substitution.
"""
GITS = ["git"]
if sys.platform == "win32":
GITS = ["git.cmd", "git.exe"]
files = [manifest_in, versionfile_source]
if ipy:
files.append(ipy)
try:
me = __file__
if me.endswith(".pyc") or me.endswith(".pyo"):
me = os.path.splitext(me)[0] + ".py"
versioneer_file = os.path.relpath(me)
except NameError:
versioneer_file = "versioneer.py"
files.append(versioneer_file)
present = False
try:
f = open(".gitattributes", "r")
for line in f.readlines():
if line.strip().startswith(versionfile_source):
if "export-subst" in line.strip().split()[1:]:
present = True
f.close()
except EnvironmentError:
pass
if not present:
f = open(".gitattributes", "a+")
f.write("%s export-subst\n" % versionfile_source)
f.close()
files.append(".gitattributes")
run_command(GITS, ["add", "--"] + files)
CONFIG_ERROR = """
setup.cfg is missing the necessary Versioneer configuration. You need
a section like:
[versioneer]
VCS = git
style = pep440
versionfile_source = src/myproject/_version.py
versionfile_build = myproject/_version.py
tag_prefix =
parentdir_prefix = myproject-
You will also need to edit your setup.py to use the results:
import versioneer
setup(version=versioneer.get_version(),
cmdclass=versioneer.get_cmdclass(), ...)
Please read the docstring in ./versioneer.py for configuration instructions,
edit setup.cfg, and re-run the installer or 'python versioneer.py setup'.
"""
SAMPLE_CONFIG = """
# See the docstring in versioneer.py for instructions. Note that you must
# re-run 'versioneer.py setup' after changing this section, and commit the
# resulting files.
[versioneer]
#VCS = git
#style = pep440
#versionfile_source =
#versionfile_build =
#tag_prefix =
#parentdir_prefix =
"""
INIT_PY_SNIPPET = """
from ._version import get_versions
__version__ = get_versions()['version']
del get_versions
"""
The provided code snippet includes necessary dependencies for implementing the `do_setup` function. Write a Python function `def do_setup()` to solve the following problem:
Do main VCS-independent setup function for installing Versioneer.
Here is the function:
def do_setup():
"""Do main VCS-independent setup function for installing Versioneer."""
root = get_root()
try:
cfg = get_config_from_root(root)
except (
EnvironmentError,
configparser.NoSectionError,
configparser.NoOptionError,
) as e:
if isinstance(e, (EnvironmentError, configparser.NoSectionError)):
print("Adding sample versioneer config to setup.cfg", file=sys.stderr)
with open(os.path.join(root, "setup.cfg"), "a") as f:
f.write(SAMPLE_CONFIG)
print(CONFIG_ERROR, file=sys.stderr)
return 1
print(" creating %s" % cfg.versionfile_source)
with open(cfg.versionfile_source, "w") as f:
LONG = LONG_VERSION_PY[cfg.VCS]
f.write(
LONG
% {
"DOLLAR": "$",
"STYLE": cfg.style,
"TAG_PREFIX": cfg.tag_prefix,
"PARENTDIR_PREFIX": cfg.parentdir_prefix,
"VERSIONFILE_SOURCE": cfg.versionfile_source,
}
)
ipy = os.path.join(os.path.dirname(cfg.versionfile_source), "__init__.py")
if os.path.exists(ipy):
try:
with open(ipy, "r") as f:
old = f.read()
except EnvironmentError:
old = ""
if INIT_PY_SNIPPET not in old:
print(" appending to %s" % ipy)
with open(ipy, "a") as f:
f.write(INIT_PY_SNIPPET)
else:
print(" %s unmodified" % ipy)
else:
print(" %s doesn't exist, ok" % ipy)
ipy = None
# Make sure both the top-level "versioneer.py" and versionfile_source
# (PKG/_version.py, used by runtime code) are in MANIFEST.in, so
# they'll be copied into source distributions. Pip won't be able to
# install the package without this.
manifest_in = os.path.join(root, "MANIFEST.in")
simple_includes = set()
try:
with open(manifest_in, "r") as f:
for line in f:
if line.startswith("include "):
for include in line.split()[1:]:
simple_includes.add(include)
except EnvironmentError:
pass
# That doesn't cover everything MANIFEST.in can do
# (http://docs.python.org/2/distutils/sourcedist.html#commands), so
# it might give some false negatives. Appending redundant 'include'
# lines is safe, though.
if "versioneer.py" not in simple_includes:
print(" appending 'versioneer.py' to MANIFEST.in")
with open(manifest_in, "a") as f:
f.write("include versioneer.py\n")
else:
print(" 'versioneer.py' already in MANIFEST.in")
if cfg.versionfile_source not in simple_includes:
print(
" appending versionfile_source ('%s') to MANIFEST.in"
% cfg.versionfile_source
)
with open(manifest_in, "a") as f:
f.write("include %s\n" % cfg.versionfile_source)
else:
print(" versionfile_source already in MANIFEST.in")
# Make VCS-specific changes. For git, this means creating/changing
# .gitattributes to mark _version.py for export-subst keyword
# substitution.
do_vcs_install(manifest_in, cfg.versionfile_source, ipy)
return 0 | Do main VCS-independent setup function for installing Versioneer. |
15,074 | import configparser
import errno
import json
import os
import re
import subprocess
import sys
The provided code snippet includes necessary dependencies for implementing the `scan_setup_py` function. Write a Python function `def scan_setup_py()` to solve the following problem:
Validate the contents of setup.py against Versioneer's expectations.
Here is the function:
def scan_setup_py():
"""Validate the contents of setup.py against Versioneer's expectations."""
found = set()
setters = False
errors = 0
with open("setup.py", "r") as f:
for line in f.readlines():
if "import versioneer" in line:
found.add("import")
if "versioneer.get_cmdclass()" in line:
found.add("cmdclass")
if "versioneer.get_version()" in line:
found.add("get_version")
if "versioneer.VCS" in line:
setters = True
if "versioneer.versionfile_source" in line:
setters = True
if len(found) != 3:
print("")
print("Your setup.py appears to be missing some important items")
print("(but I might be wrong). Please make sure it has something")
print("roughly like the following:")
print("")
print(" import versioneer")
print(" setup( version=versioneer.get_version(),")
print(" cmdclass=versioneer.get_cmdclass(), ...)")
print("")
errors += 1
if setters:
print("You should remove lines like 'versioneer.VCS = ' and")
print("'versioneer.versionfile_source = ' . This configuration")
print("now lives in setup.cfg, and should be removed from setup.py")
print("")
errors += 1
return errors | Validate the contents of setup.py against Versioneer's expectations. |
15,075 | from glob import glob
from setuptools import setup
import versioneer
def read_file(fname):
with open(fname, "r", encoding="utf8") as f:
return f.read() | null |
15,076 | from ipyflow._version import get_versions
__version__ = get_versions()["version"]
def make_version_tuple(vstr=None):
if vstr is None:
vstr = __version__
if vstr[0] == "v":
vstr = vstr[1:]
components = []
for component in vstr.split("+")[0].split("."):
try:
components.append(int(component))
except ValueError:
break
return tuple(components) | null |
15,077 | import argparse
import json
import os
import platform
import sys
from IPython.utils.tempdir import TemporaryDirectory
from jupyter_client.kernelspec import KernelSpecManager
PACKAGE = __package__.split(".")[0]
kernel_json = {
"argv": [
sys.executable,
"-m",
"ipyflow.kernel",
"-f",
"{connection_file}",
],
"display_name": DISPLAY_NAME,
"language": "python",
"codemirror_mode": "shell",
"metadata": {
"debugger": True,
},
}
def install_my_kernel_spec(user=True, prefix=None):
with TemporaryDirectory() as td:
os.chmod(td, 0o755) # Starts off as 700, not user readable
cp = "cp"
if platform.system().lower().startswith("win"):
cp = "copy"
import ipyflow
resources = os.path.join(
os.path.dirname(os.path.abspath(ipyflow.__file__)), "resources", "kernel"
)
logo32 = os.path.join(resources, "logo-32x32.png")
logo64 = os.path.join(resources, "logo-64x64.png")
os.system(f"{cp} {logo32} {logo64} {td}")
with open(os.path.join(td, "kernel.json"), "w") as f:
json.dump(kernel_json, f, sort_keys=True)
print(f"Installing KernelSpec for {PACKAGE} kernel")
KernelSpecManager().install_kernel_spec(
td, kernel_name=f"{PACKAGE}", user=user, prefix=prefix
) | null |
15,078 | import argparse
import json
import os
import platform
import sys
from IPython.utils.tempdir import TemporaryDirectory
from jupyter_client.kernelspec import KernelSpecManager
def _is_root():
try:
return os.geteuid() == 0
except AttributeError:
return False # assume not an admin on non-Unix platforms | null |
15,079 | import asyncio
import inspect
import logging
from typing import TYPE_CHECKING, NamedTuple, Optional
from typing import Type as TypeType
from ipykernel.ipkernel import IPythonKernel
from IPython import get_ipython
from traitlets import Type
from ipyflow import singletons
from ipyflow.flow import NotebookFlow
from ipyflow.shell.zmqshell import IPyflowZMQInteractiveShell
from ipyflow.singletons import flow
from ipyflow.utils.ipython_utils import make_mro_inserter_metaclass
from ipyflow.utils.misc_utils import is_project_file
from ipyflow.version import __version__
def is_project_file(filename: str) -> bool:
return bool(_PROJECT_FILE_REGEX.search(filename))
def patch_pydevd_file_filters() -> None:
try:
from _pydevd_bundle.pydevd_filtering import FilesFiltering
orig_in_project_roots = FilesFiltering.in_project_roots
def in_project_roots(self, received_filename):
if is_project_file(received_filename):
return False
return orig_in_project_roots(self, received_filename)
FilesFiltering.in_project_roots = in_project_roots
except: # noqa: E722
pass | null |
15,080 | import asyncio
import inspect
import logging
from typing import TYPE_CHECKING, NamedTuple, Optional
from typing import Type as TypeType
from ipykernel.ipkernel import IPythonKernel
from IPython import get_ipython
from traitlets import Type
from ipyflow import singletons
from ipyflow.flow import NotebookFlow
from ipyflow.shell.zmqshell import IPyflowZMQInteractiveShell
from ipyflow.singletons import flow
from ipyflow.utils.ipython_utils import make_mro_inserter_metaclass
from ipyflow.utils.misc_utils import is_project_file
from ipyflow.version import __version__
def patched_taskrunner_run(_self, coro):
try:
loop = asyncio.get_running_loop()
except RuntimeError:
# Workaround for bugs.python.org/issue39529.
try:
loop = asyncio.get_event_loop_policy().get_event_loop()
except RuntimeError:
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
from ipyflow.kernel import patched_nest_asyncio
patched_nest_asyncio.apply(loop)
future = asyncio.ensure_future(coro, loop=loop)
try:
return loop.run_until_complete(future)
except BaseException as e:
future.cancel()
raise e
def patch_jupyter_taskrunner_run():
# workaround for the issue described in
# https://github.com/jupyter/notebook/issues/6721
try:
import jupyter_core.utils
jupyter_core.utils._TaskRunner.run = patched_taskrunner_run
except: # noqa: E722
pass | null |
15,081 | from typing import TYPE_CHECKING, Any, List, Set, Union, cast
from ipyflow.data_model.symbol import Symbol
from ipyflow.data_model.timestamp import Timestamp
from ipyflow.tracing.watchpoint import Watchpoints
def _validate(sym: Any) -> Symbol:
if sym is None or not isinstance(sym, Symbol):
raise ValueError("unable to lookup metadata for symbol")
return cast(Symbol, sym)
class Symbol:
NULL = object()
# object for virtual display symbol
DISPLAY = object()
IMMUTABLE_TYPES = set(IMMUTABLE_PRIMITIVE_TYPES)
IPYFLOW_MUTATION_VIRTUAL_SYMBOL_NAME = "__ipyflow_mutation"
def __init__(
self,
name: SupportedIndexType,
symbol_type: SymbolType,
obj: Any,
containing_scope: "Scope",
stmt_node: Optional[ast.stmt] = None,
symbol_node: Optional[ast.AST] = None,
refresh_cached_obj: bool = False,
implicit: bool = False,
) -> None:
if refresh_cached_obj:
# TODO: clean up redundancies
assert implicit
assert stmt_node is None
self.name = name
self.symbol_type = symbol_type
self.obj = obj
# additional user-specific metadata
self._tags: Set[str] = set()
self.extra_metadata: Dict[str, Any] = {}
self._tombstone = False
self._cached_out_of_sync = True
self.cached_obj_id: Optional[int] = None
self.cached_obj_type: Optional[Type[object]] = None
self.cached_obj_len: Optional[int] = None
if refresh_cached_obj:
self._refresh_cached_obj()
self.containing_scope = containing_scope or flow().global_scope
self.call_scope: Optional[Scope] = None
self.func_def_stmt: Optional[ast.stmt] = None
self.stmt_node = self.update_stmt_node(stmt_node)
self.symbol_node = symbol_node
self._funcall_live_symbols = None
self.parents: Dict["Symbol", List[Timestamp]] = {}
self.children: Dict["Symbol", List[Timestamp]] = {}
# initialize at -1 for implicit since the corresponding piece of data could already be around,
# and we don't want liveness checker to think this was newly created unless we
# explicitly trace an update somewhere
self._timestamp: Timestamp = (
Timestamp.uninitialized() if implicit else Timestamp.current()
)
self._snapshot_timestamps: List[Timestamp] = []
self._snapshot_timestamp_ubounds: List[Timestamp] = []
self._defined_cell_num = cells().exec_counter()
self._is_dangling_on_edges = False
self._cascading_reactive_cell_num = -1
self._override_ready_liveness_cell_num = -1
self._override_timestamp: Optional[Timestamp] = None
self.watchpoints = Watchpoints()
# The necessary last-updated timestamp / cell counter for this symbol to not be waiting
self.required_timestamp: Timestamp = self.timestamp
# for each usage of this sym, the version that was used, if different from the timestamp of usage
self.timestamp_by_used_time: Dict[Timestamp, Timestamp] = {}
self.used_node_by_used_time: Dict[Timestamp, ast.AST] = {}
# History of definitions at time of liveness
self.timestamp_by_liveness_time: Dict[Timestamp, Timestamp] = {}
# All timestamps associated with updates to this symbol
self.updated_timestamps: Set[Timestamp] = set()
# The most recent timestamp associated with a particular object id
self.last_updated_timestamp_by_obj_id: Dict[int, Timestamp] = {}
self.fresher_ancestors: Set["Symbol"] = set()
self.fresher_ancestor_timestamps: Set[Timestamp] = set()
# cells where this symbol was live
self.cells_where_deep_live: Set[Cell] = set()
self.cells_where_shallow_live: Set[Cell] = set()
self._last_computed_ready_or_waiting_cache_ts: int = -1
self._is_ready_or_waiting_at_position_cache: Dict[Tuple[int, bool], bool] = {}
# if implicitly created when tracing non-store-context ast nodes
self._implicit = implicit
# Will never be stale if no_warning is True
self.disable_warnings = False
self._temp_disable_warnings = False
self._num_ipywidget_observers = 0
self._num_mercury_widget_observers = 0
flow().aliases.setdefault(id(obj), set()).add(self)
if (
isinstance(self.name, str)
and not self.is_anonymous
and not self.containing_scope.is_namespace_scope
):
ns = self.namespace
if ns is not None and ns.scope_name == "self":
# hack to get a better name than `self.whatever` for fields of this object
# not ideal because it relies on the `self` convention but is probably
# acceptable for the use case of improving readable names
ns.scope_name = self.name
def aliases(self) -> List["Symbol"]:
return list(flow().aliases.get(self.obj_id, []))
def cells_where_live(self) -> Set[Cell]:
return self.cells_where_deep_live | self.cells_where_shallow_live
def __repr__(self) -> str:
return f"<{self.readable_name}>"
def __str__(self) -> str:
return self.readable_name
def __hash__(self) -> int:
return hash(id(self))
def __lt__(self, other) -> bool:
return id(self) < id(other)
def add_tag(self, tag_value: str) -> None:
self._tags.add(tag_value)
def remove_tag(self, tag_value: str) -> None:
self._tags.discard(tag_value)
def has_tag(self, tag_value: str) -> bool:
return tag_value in self._tags
def temporary_disable_warnings(self) -> None:
self._temp_disable_warnings = True
def last_used_timestamp(self) -> Timestamp:
if len(self.timestamp_by_used_time) == 0:
return Timestamp.uninitialized()
else:
return max(self.timestamp_by_used_time.keys())
def namespace_waiting_symbols(self) -> Set["Symbol"]:
ns = self.namespace
return set() if ns is None else ns.namespace_waiting_symbols
def shallow_timestamp(self) -> Timestamp:
if self._override_timestamp is None:
return self._timestamp
else:
return max(self._timestamp, self._override_timestamp)
def visible_timestamp(self) -> Optional[Timestamp]:
for ts in sorted(self.updated_timestamps, reverse=True):
if cells().at_timestamp(ts).is_visible:
return ts
return None
def memoize_timestamp(self) -> Optional[Timestamp]:
return self.last_updated_timestamp_by_obj_id.get(self.obj_id)
def timestamp(self) -> Timestamp:
ts = self.shallow_timestamp
if self.is_import or self.is_module:
return ts
ns = self.namespace
return ts if ns is None else max(ts, ns.max_descendent_timestamp)
def _compute_namespace_timestamps(
self,
seen: Optional[Set["Symbol"]] = None,
version_ubound: Optional[Timestamp] = None,
) -> Set[Timestamp]:
if version_ubound is None:
timestamps = {self.shallow_timestamp, self.timestamp}
else:
max_leq_ubound = Timestamp.uninitialized()
for ts in reversed(self._snapshot_timestamps):
if ts <= version_ubound:
max_leq_ubound = ts
break
if max_leq_ubound.is_initialized:
timestamps = {max_leq_ubound}
else:
timestamps = set()
ns = self.namespace
if ns is None:
return timestamps
if seen is None:
seen = set()
if self in seen:
return timestamps
seen.add(self)
for sym in ns.all_symbols_this_indentation():
timestamps |= sym._compute_namespace_timestamps(
seen=seen, version_ubound=version_ubound
)
return timestamps
def _get_timestamps_for_version(self, version: int) -> Set[Timestamp]:
if len(self._snapshot_timestamps) == 0:
return {self.timestamp}
ts = self._snapshot_timestamps[version]
if ts.cell_num == -1:
return {Timestamp(self.defined_cell_num, ts.stmt_num)}
else:
return self._compute_namespace_timestamps(
version_ubound=None if version == -1 else ts
)
def code(
self, format_type: Optional[Type[FormatType]] = None, version: int = -1
) -> Slice:
return statements().format_multi_slice(
self._get_timestamps_for_version(version=version),
blacken=True,
format_type=format_type,
)
def cascading_reactive_cell_num(
self,
seen: Optional[Set["Symbol"]] = None,
consider_containing_symbols: bool = True,
) -> int:
if seen is None:
seen = set()
if self in seen:
return -1
seen.add(self)
cell_num = self._cascading_reactive_cell_num
ns = self.namespace
ret = (
cell_num
if ns is None
else max(
cell_num,
ns.max_cascading_reactive_cell_num(seen),
)
)
if not consider_containing_symbols:
return ret
for sym in self.iter_containing_symbols():
ret = max(ret, sym.cascading_reactive_cell_num(seen=seen))
return ret
def bump_cascading_reactive_cell_num(self, ctr: Optional[int] = None) -> None:
self._cascading_reactive_cell_num = max(
self._cascading_reactive_cell_num,
flow().cell_counter() if ctr is None else ctr,
)
def iter_containing_symbols(self) -> Generator["Symbol", None, None]:
yield self
ns = self.containing_namespace
if ns is None or not ns.is_namespace_scope:
return
for containing_ns in ns.iter_containing_namespaces():
yield from flow().aliases.get(containing_ns.obj_id, [])
def waiting_timestamp(self) -> int:
return max(self._timestamp.cell_num, flow().min_timestamp)
def defined_cell_num(self) -> int:
return self._defined_cell_num
def readable_name(self) -> str:
return self.containing_scope.make_namespace_qualified_name(self)
def is_subscript(self) -> bool:
return self.symbol_type == SymbolType.SUBSCRIPT
def is_class(self) -> bool:
return self.symbol_type == SymbolType.CLASS
def is_function(self) -> bool:
return self.symbol_type == SymbolType.FUNCTION
def is_lambda(self) -> bool:
# TODO: this is terrible
return type(self.name) is str and self.name.startswith( # noqa: E721
"<lambda_sym_"
)
def is_import(self) -> bool:
return self.symbol_type == SymbolType.IMPORT
def is_module(self) -> bool:
return self.symbol_type == SymbolType.MODULE
def imported_module(self) -> str:
if not self.is_import:
raise ValueError("only IMPORT symbols have `imported_module` property")
if isinstance(self.stmt_node, ast.Import):
for alias in self.stmt_node.names:
name = alias.asname or alias.name
if name == self.name:
return alias.name
raise ValueError(
"Unable to find module for symbol %s is stmt %s"
% (self, ast.dump(self.stmt_node))
)
elif isinstance(self.stmt_node, ast.ImportFrom):
return self.stmt_node.module
else:
raise TypeError(
"Invalid stmt type for import symbol: %s" % ast.dump(self.stmt_node)
)
def imported_symbol_original_name(self) -> str:
if not self.is_import:
raise ValueError(
"only IMPORT symbols have `imported_symbol_original_name` property"
)
if isinstance(self.stmt_node, ast.Import):
return self.imported_module
elif isinstance(self.stmt_node, ast.ImportFrom):
for alias in self.stmt_node.names:
name = alias.asname or alias.name
if name == self.name:
return alias.name
raise ValueError(
"Unable to find module for symbol %s is stmt %s"
% (self, ast.dump(self.stmt_node))
)
else:
raise TypeError(
"Invalid stmt type for import symbol: %s" % ast.dump(self.stmt_node)
)
def is_cascading_reactive_at_counter(self, ctr: int) -> bool:
return self.cascading_reactive_cell_num() > max(
ctr, flow().min_cascading_reactive_cell_num
)
def get_top_level(self) -> Optional["Symbol"]:
if not self.containing_scope.is_namespace_scope:
return self
else:
containing_scope = cast("Namespace", self.containing_scope)
for alias in flow().aliases.get(containing_scope.obj_id, []):
if alias.is_globally_accessible:
return alias.get_top_level()
return None
def get_import_string(self) -> str:
if not self.is_import:
raise ValueError("only IMPORT symbols support recreating the import string")
module = self.imported_module
if isinstance(self.stmt_node, ast.Import):
if module == self.name:
return f"import {module}"
else:
return f"import {module} as {self.name}"
elif isinstance(self.stmt_node, ast.ImportFrom):
original_symbol_name = self.imported_symbol_original_name
if original_symbol_name == self.name:
return f"from {module} import {original_symbol_name}"
else:
return f"from {module} import {original_symbol_name} as {self.name}"
else:
raise TypeError(
"Invalid stmt type for import symbol: %s" % ast.dump(self.stmt_node)
)
def is_anonymous(self) -> bool:
if self.symbol_type == SymbolType.ANONYMOUS:
return True
ns = self.containing_namespace
if ns is not None and ns.is_anonymous:
return True
return False
def is_implicit(self) -> bool:
return self._implicit
def shallow_clone(
self, new_obj: Any, new_containing_scope: "Scope", symbol_type: SymbolType
) -> "Symbol":
return self.__class__(self.name, symbol_type, new_obj, new_containing_scope)
def obj_id(self) -> int:
return id(self.obj)
def obj_len(self) -> Optional[int]:
try:
if not self.is_obj_lazy_module and hasattr(self.obj, "__len__"):
return len(self.obj)
except: # noqa: E722
pass
return None
def obj_type(self) -> Type[Any]:
return type(self.obj)
def is_immutable(self) -> bool:
return self.obj_type in self.IMMUTABLE_TYPES
def is_mutation_virtual_symbol(self) -> bool:
return self.name == self.IPYFLOW_MUTATION_VIRTUAL_SYMBOL_NAME
def is_underscore(self) -> bool:
return self.name == "_" and self.containing_scope.is_global
def is_obj_lazy_module(self) -> bool:
return self.obj_type is _LazyModule
def get_type_annotation(self):
return get_type_annotation(self.obj)
def get_type_annotation_string(self) -> str:
return make_annotation_string(self.get_type_annotation())
def namespace(self) -> Optional["Namespace"]:
return flow().namespaces.get(self.obj_id)
def containing_namespace(self) -> Optional["Namespace"]:
if self.containing_scope.is_namespace_scope:
return cast("Namespace", self.containing_scope)
else:
return None
def full_path(self) -> Tuple[str, ...]:
return self.containing_scope.full_path + (str(self.name),)
def full_namespace_path(self) -> str:
return self.containing_scope.make_namespace_qualified_name(self)
def is_garbage(self) -> bool:
return self._tombstone
def is_new_garbage(self) -> bool:
if self._tombstone:
return False
containing_ns = self.containing_namespace
numpy = sys.modules.get("numpy", None)
if (
numpy is not None
and containing_ns is not None
and isinstance(containing_ns.obj, numpy.ndarray)
):
# numpy atoms are not interned (so assigning array elts to a variable does not bump refcount);
# also seems that refcount is always 0, so just check if the containing namespace is garbage
return self.containing_namespace.is_garbage
return self.get_ref_count() == 0
def is_globally_accessible(self) -> bool:
return self.containing_scope.is_globally_accessible
def is_user_accessible(self) -> bool:
return (
self.is_globally_accessible
and not self.is_anonymous
and not self.is_garbage
and not (
self.containing_namespace is not None
and (
self.containing_namespace.is_anonymous
or self.containing_namespace.is_garbage
)
)
)
def _remove_self_from_aliases(self) -> None:
cleanup_discard(flow().aliases, self.obj_id, self)
self.obj = None
def mark_garbage(self) -> None:
if self.is_garbage:
return
self._tombstone = True
ns = self.namespace
if ns is not None and all(alias.is_garbage for alias in self.aliases):
ns.mark_garbage()
def collect_self_garbage(self) -> None:
assert self.is_garbage
flow().blocked_reactive_timestamps_by_symbol.pop(self, None)
self._remove_self_from_aliases()
for parent in self.parents:
parent.children.pop(self, None)
for child in self.children:
child.parents.pop(self, None)
containing_ns = self.containing_namespace
if self.is_subscript and containing_ns is not None:
containing_ns._subscript_symbol_by_name.pop(self.name, None)
elif not self.is_subscript:
self.containing_scope._symbol_by_name.pop(self.name, None)
else:
logger.warning(
"could not find symbol %s in its scope %s", self, self.containing_scope
)
# TODO: remove from static / dynamic parent / children edges
# need to keep this around for readable_name to work
# self.containing_scope = None
# def update_type(self, new_type):
# self.symbol_type = new_type
# if self.is_function:
# self.call_scope = self.containing_scope.make_child_scope(self.name)
# else:
# self.call_scope = None
def update_obj_ref(self, obj: Any, refresh_cached: bool = True) -> None:
if self._num_ipywidget_observers > 0:
try:
self.obj.unobserve_all()
except: # noqa
pass
self._num_ipywidget_observers = 0
if self._num_mercury_widget_observers > 0:
try:
self._mercury_widgets_manager.get_widget(
self.obj.code_uid
).unobserve_all()
except: # noqa
pass
self._num_mercury_widget_observers = 0
self._tombstone = False
self._cached_out_of_sync = True
if (
flow().settings.mark_typecheck_failures_unsafe
and self.cached_obj_type != type(obj)
):
for cell in self.cells_where_live:
cell.invalidate_typecheck_result()
self.cells_where_shallow_live.clear()
self.cells_where_deep_live.clear()
self.obj = obj
if self.cached_obj_id is not None and self.cached_obj_id != self.obj_id:
new_ns = flow().namespaces.get(self.obj_id, None)
# don't overwrite existing namespace for this obj
old_ns = flow().namespaces.get(self.cached_obj_id, None)
if (
old_ns is not None
and old_ns.full_namespace_path == self.full_namespace_path
):
if new_ns is None:
logger.info("create fresh copy of namespace %s", old_ns)
new_ns = old_ns.fresh_copy(obj)
old_ns.transfer_symbols_to(new_ns)
else:
new_ns.scope_name = old_ns.scope_name
new_ns.parent_scope = old_ns.parent_scope
self._handle_aliases()
if (
old_ns is not None
and len(flow().aliases.get(self.cached_obj_id, [])) == 0
):
old_ns.mark_garbage()
if refresh_cached:
self._refresh_cached_obj()
def invalidate_cached(self) -> None:
self._cached_out_of_sync = True
self.cached_obj_id = None
self.cached_obj_type = None
def get_ref_count(self) -> int:
if self.obj is None or self.obj is Symbol.NULL:
return -1
total = sys.getrefcount(self.obj) - 1
total -= len(flow().aliases.get(self.obj_id, []))
ns = flow().namespaces.get(self.obj_id, None)
if ns is not None and ns.obj is not None and ns.obj is not Symbol.NULL:
total -= 1
return total
def _should_cancel_propagation(self, prev_obj: Optional[Any]) -> bool:
if prev_obj is None:
return False
if (
flow().blocked_reactive_timestamps_by_symbol.get(self, -1)
== self.timestamp.cell_num
):
return False
if not self._cached_out_of_sync or self.obj_id == self.cached_obj_id:
return True
if self.obj is None or prev_obj is Symbol.NULL:
return self.obj is None and prev_obj is Symbol.NULL
return False
def _handle_aliases(self):
cleanup_discard(flow().aliases, self.cached_obj_id, self)
flow().aliases.setdefault(self.obj_id, set()).add(self)
def update_stmt_node(self, stmt_node: Optional[ast.stmt]) -> Optional[ast.stmt]:
self.stmt_node = stmt_node
self._funcall_live_symbols = None
if self.is_function or (
stmt_node is not None and isinstance(stmt_node, ast.Lambda)
):
# TODO: in the case of lambdas, there will not necessarily be one
# symbol for a given statement. We need a more precise way to determine
# the symbol being called than by looking at the stmt in question.
flow().statement_to_func_sym[id(stmt_node)] = self
self.call_scope = self.containing_scope.make_child_scope(self.name)
self.func_def_stmt = stmt_node
return stmt_node
def _refresh_cached_obj(self) -> None:
self._cached_out_of_sync = False
# don't keep an actual ref to avoid bumping refcount
self.cached_obj_id = self.obj_id
self.cached_obj_type = self.obj_type
self.cached_obj_len = self.obj_len
def get_definition_args(self) -> List[ast.arg]:
assert self.func_def_stmt is not None and isinstance(
self.func_def_stmt, (ast.FunctionDef, ast.AsyncFunctionDef, ast.Lambda)
)
args = []
for arg in self.func_def_stmt.args.args + self.func_def_stmt.args.kwonlyargs:
args.append(arg)
if self.func_def_stmt.args.vararg is not None:
args.append(self.func_def_stmt.args.vararg)
if self.func_def_stmt.args.kwarg is not None:
args.append(self.func_def_stmt.args.kwarg)
return args
def _match_call_args_with_definition_args(
self,
) -> Generator[Tuple[ast.arg, List["Symbol"]], None, None]:
# TODO: handle posonlyargs, kwonlyargs
assert self.func_def_stmt is not None and isinstance(
self.func_def_stmt, (ast.FunctionDef, ast.AsyncFunctionDef, ast.Lambda)
)
caller_node = self._get_calling_ast_node()
if caller_node is None or not isinstance(caller_node, ast.Call):
return
kwarg_by_name = {
arg_key.arg: arg_key
for arg_key in self.func_def_stmt.args.args[
-len(self.func_def_stmt.args.defaults) :
]
}
if not all(keyword.arg in kwarg_by_name for keyword in caller_node.keywords):
logger.warning("detected mismatched kwargs from caller node to definition")
return
def_args = self.func_def_stmt.args.args
if len(self.func_def_stmt.args.defaults) > 0:
def_args = def_args[: -len(self.func_def_stmt.args.defaults)]
if len(def_args) > 0 and def_args[0].arg == "self":
# FIXME: this is bad and I should feel bad
def_args = def_args[1:]
for def_arg, call_arg in zip(def_args, caller_node.args):
if isinstance(call_arg, ast.Starred):
# give up
# TODO: handle this case
break
yield def_arg, tracer().resolve_loaded_symbols(call_arg)
seen_keys = set()
for keyword in caller_node.keywords:
keyword_key, keyword_value = keyword.arg, keyword.value
if keyword_value is None:
continue
seen_keys.add(keyword_key)
yield kwarg_by_name[keyword_key], tracer().resolve_loaded_symbols(
keyword_value
)
for arg_key, arg_value in zip(
self.func_def_stmt.args.args[-len(self.func_def_stmt.args.defaults) :],
self.func_def_stmt.args.defaults,
):
if arg_key.arg in seen_keys:
continue
yield arg_key, tracer().resolve_loaded_symbols(arg_value)
def _get_calling_ast_node(self) -> Optional[ast.Call]:
if tracer().tracing_disabled_since_last_module_stmt or (
not hasattr(self.obj, "__module__")
and getattr(type(self.obj), "__module__", None) == "builtins"
):
return None
if self.func_def_stmt is not None and isinstance(
self.func_def_stmt, (ast.FunctionDef, ast.AsyncFunctionDef)
):
if self.name in ("__getitem__", "__setitem__", "__delitem__"):
# TODO: handle case where we're looking for a subscript for the calling node
return None
for decorator in self.func_def_stmt.decorator_list:
if isinstance(decorator, ast.Name) and decorator.id == "property":
# TODO: handle case where we're looking for an attribute for the calling node
return None
lexical_call_stack = tracer().lexical_call_stack
if len(lexical_call_stack) == 0:
return None
prev_node_id_in_cur_frame_lexical = lexical_call_stack.get_field(
"prev_node_id_in_cur_frame_lexical"
)
caller_ast_node = tracer().ast_node_by_id.get(
prev_node_id_in_cur_frame_lexical, None
)
if caller_ast_node is None or not isinstance(caller_ast_node, ast.Call):
return None
return caller_ast_node
def create_symbols_for_call_args(self, call_frame: FrameType) -> None:
assert self.func_def_stmt is not None
seen_def_args = set()
logger.info("create symbols for call to %s", self)
for def_arg, deps in self._match_call_args_with_definition_args():
seen_def_args.add(def_arg.arg)
self.call_scope.upsert_symbol_for_name(
def_arg.arg,
call_frame.f_locals.get(def_arg.arg),
deps,
self.func_def_stmt,
propagate=False,
symbol_node=def_arg,
)
logger.info("def arg %s matched with deps %s", def_arg, deps)
for def_arg in self.get_definition_args():
if def_arg.arg in seen_def_args:
continue
self.call_scope.upsert_symbol_for_name(
def_arg.arg,
None,
set(),
self.func_def_stmt,
propagate=False,
symbol_node=def_arg,
)
def is_waiting(self) -> bool:
if self.disable_warnings or self._temp_disable_warnings:
return False
if self.waiting_timestamp < self.required_timestamp.cell_num:
return True
elif flow().min_timestamp == -1:
return len(self.namespace_waiting_symbols) > 0
else:
# TODO: guard against infinite recurision
return any(sym.is_waiting for sym in self.namespace_waiting_symbols)
def is_shallow_stale(self) -> bool:
if self.disable_warnings or self._temp_disable_warnings:
return False
return self.waiting_timestamp < self.required_timestamp.cell_num
def _is_ready_or_waiting_at_position_impl(self, pos: int, deep: bool) -> bool:
for par, timestamps in self.parents.items():
for ts in timestamps:
dep_introduced_pos = cells().at_timestamp(ts).position
if dep_introduced_pos > pos:
continue
for updated_ts in par.updated_timestamps:
if cells().at_timestamp(updated_ts).position > dep_introduced_pos:
continue
if updated_ts.cell_num > ts.cell_num or par.is_waiting_at_position(
dep_introduced_pos
):
# logger.error("sym: %s", self)
# logger.error("pos: %s", pos)
# logger.error("parent: %s", par)
# logger.error("dep introdced ts: %s", ts)
# logger.error("dep introdced pos: %s", dep_introduced_pos)
# logger.error("par updated ts: %s", updated_ts)
# logger.error("par updated position: %s", cells().from_timestamp(updated_ts).position)
return True
if deep:
for sym in self.namespace_waiting_symbols:
if sym.is_waiting_at_position(pos):
return True
return False
def is_waiting_at_position(self, pos: int, deep: bool = True) -> bool:
if deep:
if not self.is_waiting:
return False
else:
if not self.is_shallow_stale:
return False
if flow().mut_settings.flow_order == FlowDirection.ANY_ORDER:
return True
if cells().exec_counter() > self._last_computed_ready_or_waiting_cache_ts:
self._is_ready_or_waiting_at_position_cache.clear()
self._last_computed_ready_or_waiting_cache_ts = cells().exec_counter()
if (pos, deep) in self._is_ready_or_waiting_at_position_cache:
return self._is_ready_or_waiting_at_position_cache[pos, deep]
# preemptively set this entry to 'False' in the cache to avoid infinite loops
self._is_ready_or_waiting_at_position_cache[pos, deep] = False
is_waiting = self._is_ready_or_waiting_at_position_impl(pos, deep)
self._is_ready_or_waiting_at_position_cache[pos, deep] = is_waiting
return is_waiting
def should_mark_waiting(self, updated_dep):
if self.disable_warnings:
return False
if updated_dep is self:
return False
return True
def _is_underscore_or_simple_assign(self, new_deps: Set["Symbol"]) -> bool:
if self.is_underscore:
# FIXME: distinguish between explicit assignment to _ from user and implicit assignment from kernel
return True
if not isinstance(self.stmt_node, (ast.Assign, ast.AnnAssign)):
return False
if len(new_deps) != 1:
return False
only_dep: Symbol = next(iter(new_deps))
# obj ids can get reused for anon symbols like literals
return not only_dep.is_anonymous and self.obj_id == only_dep.obj_id
def update_deps(
self,
new_deps: Set["Symbol"],
prev_obj: Any = None,
overwrite: bool = True,
mutated: bool = False,
deleted: bool = False,
propagate_to_namespace_descendents: bool = False,
propagate: bool = True,
refresh: bool = True,
is_cascading_reactive: Optional[bool] = None,
) -> None:
if self.is_import and self.obj_id == self.cached_obj_id:
# skip updates for imported symbols; just bump the version
self.refresh()
return
if overwrite and not self.is_globally_accessible:
self.watchpoints.clear()
if mutated and self.is_immutable:
return
# if we get here, no longer implicit
self._implicit = False
# quick last fix to avoid overwriting if we appear inside the set of deps to add (or a 1st order ancestor)
# TODO: check higher-order ancestors too?
overwrite = overwrite and self not in new_deps
overwrite = overwrite and not any(
self in new_dep.parents for new_dep in new_deps
)
logger.warning("symbol %s new deps %s", self, new_deps)
new_deps.discard(self)
if overwrite:
for parent in self.parents.keys() - new_deps:
parent.children.pop(self, None)
self.parents.pop(parent, None)
for new_parent in new_deps - self.parents.keys():
if new_parent is None:
continue
new_parent.children.setdefault(self, []).append(Timestamp.current())
self.parents.setdefault(new_parent, []).append(Timestamp.current())
self.required_timestamp = Timestamp.uninitialized()
self.fresher_ancestors.clear()
self.fresher_ancestor_timestamps.clear()
if mutated or isinstance(self.stmt_node, ast.AugAssign):
self.update_usage_info()
if (
(mutated or overwrite)
and Timestamp.current().is_initialized
and not self.is_immutable
and not self.is_mutation_virtual_symbol
and not self.is_anonymous
and self.containing_scope.is_global
and not self.is_underscore
and not self.is_implicit
and self.obj_type is not type
and not self.is_class
and self.namespace is not None
):
self.namespace.upsert_symbol_for_name(
self.IPYFLOW_MUTATION_VIRTUAL_SYMBOL_NAME, object(), propagate=False
)
propagate = propagate and (
mutated or deleted or not self._should_cancel_propagation(prev_obj)
)
try:
prev_cell = cells().current_cell().prev_cell
except KeyError:
prev_cell = None
prev_cell_ctr = -1 if prev_cell is None else prev_cell.cell_ctr
if overwrite:
flow_ = flow()
self._cascading_reactive_cell_num = -1
flow_.updated_reactive_symbols.discard(self)
flow_.updated_deep_reactive_symbols.discard(self)
if is_cascading_reactive is not None:
is_cascading_reactive = is_cascading_reactive or any(
sym.is_cascading_reactive_at_counter(prev_cell_ctr) for sym in new_deps
)
if is_cascading_reactive:
self.bump_cascading_reactive_cell_num()
if refresh:
self.refresh(
# rationale: if this is a mutation for which we have more precise information,
# then we don't need to update the ns descendents as this will already have happened.
# also don't update ns descendents for things like `a = b`
refresh_descendent_namespaces=propagate
and not (mutated and not propagate_to_namespace_descendents)
and not self._is_underscore_or_simple_assign(new_deps),
refresh_namespace_waiting=not mutated,
)
if propagate:
UpdateProtocol(self)(
new_deps, mutated, propagate_to_namespace_descendents, refresh
)
self._refresh_cached_obj()
if self.is_class:
# pop pending class defs and update obj ref
pending_class_ns = tracer().pending_class_namespaces.pop()
pending_class_ns.update_obj_ref(self.obj)
for dep in new_deps:
if dep.obj is self.obj and dep.call_scope is not None:
self.call_scope = dep.call_scope
self.func_def_stmt = dep.func_def_stmt
ns = self.namespace
if ns is not None and ns.scope_name == "self" and isinstance(self.name, str):
# fixup namespace name if necessary
# can happen if symbol for 'self' was created in a previous __init__
ns.scope_name = self.name
if overwrite and len(flow().aliases[self.obj_id]) == 1:
self._handle_possible_widget_creation()
self._handle_possible_mercury_widget_creation()
def _mercury_widgets_manager(self):
if self.obj is None:
return None
if self.is_obj_lazy_module or not hasattr(self.obj, "code_uid"):
return None
try:
return sys.modules.get(self.obj.__class__.__module__).WidgetsManager
except: # noqa
return None
def _handle_possible_widget_creation(self) -> None:
if self.obj is None:
return
Widget = getattr(sys.modules.get("ipywidgets"), "Widget", None)
if (
Widget is None
or self.is_obj_lazy_module
or not isinstance(self.obj, Widget)
or not hasattr(self.obj, "observe")
or not hasattr(self.obj, "value")
):
return
self.namespaced().upsert_symbol_for_name(
"value", getattr(self.obj, "value", None), set(), self.stmt_node
)
self.obj.observe(self._observe_widget)
self._num_ipywidget_observers += 1
def _handle_possible_mercury_widget_creation(self) -> None:
WidgetsManager = self._mercury_widgets_manager
if WidgetsManager is None:
return
widget = WidgetsManager.get_widget(self.obj.code_uid)
self.namespaced().upsert_symbol_for_name(
"value", getattr(widget, "value", None), set(), self.stmt_node
)
widget.observe(self._observe_widget)
self._num_mercury_widget_observers += 1
def _observe_widget(self, msg: Dict[str, Any]) -> None:
if msg.get("name") != "value" or "new" not in msg:
return
ns = self.namespace
sym = ns.lookup_symbol_by_name_this_indentation("value")
if sym is None:
return
newval = msg["new"]
current_ts_cell = cells().at_timestamp(self._timestamp)
current_ts_cell._extra_stmt = ast.parse(f"{sym.readable_name} = {newval}").body[
0
]
sym._override_ready_liveness_cell_num = flow().cell_counter() + 1
sym._override_timestamp = Timestamp(
self._timestamp.cell_num, current_ts_cell.num_original_stmts
)
sym.update_obj_ref(newval)
statements().create_and_track(
current_ts_cell._extra_stmt,
timestamp=sym._override_timestamp,
override=True,
)
with dynamic_slicing_context():
flow().add_data_dep(
sym._timestamp,
sym._override_timestamp,
sym,
)
flow().add_data_dep(
sym._override_timestamp,
sym._timestamp,
sym,
)
self.debounced_exec_schedule(reactive=True)
def debounced_exec_schedule(self, reactive: bool) -> None:
if _debounced_exec_schedule(
cells().at_timestamp(self.timestamp).cell_id, reactive=reactive
):
flow().debounced_exec_schedule_pending = True
def namespaced(self) -> "Namespace":
ns = self.namespace
if ns is not None:
return ns
return namespaces()(self.obj, self.name, parent_scope=self.containing_scope)
def update_usage_info_one_timestamp(
self,
used_time: Timestamp,
updated_time: Timestamp,
is_static: bool,
) -> bool:
flow_ = flow()
is_usage = is_static or updated_time < used_time
if is_usage:
with slicing_context(is_static=is_static):
flow_.add_data_dep(
used_time,
updated_time,
self,
)
if is_static:
is_usage = cells().at_timestamp(updated_time).is_visible
return is_usage
def update_usage_info(
self,
used_time: Optional[Timestamp] = None,
used_node: Optional[ast.AST] = None,
exclude_ns: bool = False,
is_static: bool = False,
is_blocking: bool = False,
) -> "Symbol":
is_blocking = is_blocking or id(used_node) in tracer().blocking_node_ids
if used_time is None:
used_time = Timestamp.current()
if flow().is_dev_mode:
logger.info(
"sym `%s` used in cell %d last updated in cell %d",
self,
used_time.cell_num,
self.timestamp,
)
timestamp_by_used_time = (
self.timestamp_by_liveness_time
if is_static
else self.timestamp_by_used_time
)
if not is_blocking:
is_usage = False
ts_to_use = self._timestamp
for updated_ts in sorted(self.updated_timestamps, reverse=True):
if not updated_ts.is_initialized:
continue
is_usage = self.update_usage_info_one_timestamp(
used_time,
updated_ts,
is_static=is_static,
)
if is_usage or not is_static:
break
if is_usage and used_time.is_initialized:
timestamp_by_used_time[used_time] = ts_to_use
if used_node is not None:
self.used_node_by_used_time[used_time] = used_node
if exclude_ns:
return self
for sym in self.get_namespace_symbols(recurse=True):
sym.update_usage_info(
used_time=used_time,
used_node=None,
exclude_ns=True,
is_static=is_static,
is_blocking=is_blocking,
)
return self
def get_namespace_symbols(
self, recurse: bool = False, seen: Optional[Set["Symbol"]] = None
) -> Generator["Symbol", None, None]:
ns = self.namespace
if ns is None:
return
if seen is None:
seen = set()
if self in seen:
return
seen.add(self)
for sym in ns.all_symbols_this_indentation():
yield sym
if recurse:
yield from sym.get_namespace_symbols(recurse=recurse, seen=seen)
def _take_timestamp_snapshots(
self, ts_ubound: Timestamp, seen: Optional[Set["Symbol"]] = None
) -> None:
if seen is None:
seen = set()
if self in seen:
return
seen.add(self)
self._snapshot_timestamps.append(self._timestamp)
self._snapshot_timestamp_ubounds.append(ts_ubound)
containing_ns = self.containing_namespace
if containing_ns is None:
return
for alias in flow().aliases.get(containing_ns.obj_id, []):
alias._take_timestamp_snapshots(ts_ubound, seen=seen)
def refresh(
self,
take_timestamp_snapshots: bool = True,
refresh_descendent_namespaces: bool = False,
refresh_namespace_waiting: bool = True,
timestamp: Optional[Timestamp] = None,
seen: Optional[Set["Symbol"]] = None,
) -> None:
orig_timestamp = self._timestamp
self._timestamp = Timestamp.current() if timestamp is None else timestamp
self._override_timestamp = None
if take_timestamp_snapshots and (
orig_timestamp < self._timestamp or len(self._snapshot_timestamps) == 0
):
self._take_timestamp_snapshots(self._timestamp)
self.updated_timestamps.add(self._timestamp)
self._temp_disable_warnings = False
for cell in self.cells_where_live:
cell.add_used_cell_counter(self, self._timestamp.cell_num)
ns = self.containing_namespace
if ns is not None:
# logger.error("bump version of %s due to %s (value %s)", ns.full_path, self.full_path, self.obj)
ns.max_descendent_timestamp = self.shallow_timestamp
for alias in flow().aliases.get(ns.obj_id, []):
for cell in alias.cells_where_deep_live:
cell.add_used_cell_counter(alias, self._timestamp.cell_num)
if refresh_descendent_namespaces:
if seen is None:
seen = set()
if self in seen:
return
seen.add(self)
ns = self.namespace
if ns is not None:
for sym in ns.all_symbols_this_indentation(exclude_class=True):
# this is to handle cases like `x = x.mutate(42)`, where
# we could have changed some member of x but returned the
# original object -- in this case, just assume that all
# the stale namespace descendents are no longer stale, as
# this is likely the user intention. For an example, see
# `test_external_object_update_propagates_to_stale_namespace_symbols()`
# in `test_frontend_checker.py`
if not sym.is_waiting or refresh_namespace_waiting:
# logger.error(
# "refresh %s due to %s (value %s) via namespace %s",
# sym.full_path,
# self.full_path,
# self.obj,
# ns.full_path,
# )
sym.refresh(
refresh_descendent_namespaces=True,
timestamp=self.shallow_timestamp,
take_timestamp_snapshots=False,
seen=seen,
)
if refresh_namespace_waiting:
self.namespace_waiting_symbols.clear()
def resync_if_necessary(self, refresh: bool) -> None:
if not self.containing_scope.is_global:
return
try:
obj = shell().user_ns[self.name]
except: # noqa
# cinder runtime can throw an exception here due to lazy imports that fail
return
if self.obj is not obj:
flow_ = flow()
for alias in flow_.aliases.get(
self.cached_obj_id, set()
) | flow_.aliases.get(self.obj_id, set()):
containing_namespace = alias.containing_namespace
if containing_namespace is None:
continue
containing_obj = containing_namespace.obj
if containing_obj is None:
continue
# TODO: handle dict case too
if isinstance(containing_obj, list) and containing_obj[-1] is obj:
containing_namespace._subscript_symbol_by_name.pop(alias.name, None)
alias.name = len(containing_obj) - 1
alias.update_obj_ref(obj)
containing_namespace._subscript_symbol_by_name[alias.name] = alias
cleanup_discard(flow_.aliases, self.cached_obj_id, self)
cleanup_discard(flow_.aliases, self.obj_id, self)
flow_.aliases.setdefault(id(obj), set()).add(self)
self.update_obj_ref(obj)
elif self.obj_len != self.cached_obj_len:
self._refresh_cached_obj()
else:
return
if refresh:
self.refresh()
_MAX_MEMOIZE_COMPARABLE_SIZE = 10**6
def _equal(obj1: Any, obj2: Any) -> bool:
return obj1 == obj2
def _array_equal(obj1: Any, obj2: Any) -> bool:
import numpy as np
try:
return np.alltrue(obj1 == obj2) # type: ignore
except: # noqa
return False
def _dataframe_equal(obj1: Any, obj2: Any) -> bool:
try:
return obj1.equals(obj2) # type: ignore
except: # noqa
return False
def _make_list_eq(
eqs: List[Callable[[Any, Any], bool]]
) -> Callable[[List[Any], List[Any]], bool]:
def list_eq(lst1: List[Any], lst2: List[Any]) -> bool:
for eq, obj1, obj2 in zip(eqs, lst1, lst2):
if not eq(obj1, obj2):
return False
return True
return list_eq
def make_memoize_comparable_for_obj(
cls, obj: Any, seen_ids: Set[int]
) -> Tuple[Any, Optional[Callable[[Any, Any], bool]], int]:
if isinstance(obj, (bool, bytes, bytearray, int, float, str)):
return obj, cls._equal, 1
if not isinstance(obj, tuple):
if id(obj) in seen_ids:
return cls.NULL, None, -1
seen_ids.add(id(obj))
if isinstance(obj, (dict, frozenset, list, set, tuple)):
size = 0
comp = []
eqs: List[Callable[[Any, Any], bool]] = []
if isinstance(obj, dict):
iterable: "Iterable[Any]" = sorted(obj.items())
else:
iterable = obj
for inner in iterable:
inner_comp, inner_eq, inner_size = cls.make_memoize_comparable_for_obj(
inner, seen_ids
)
if inner_comp is cls.NULL or inner_eq is None:
return cls.NULL, None, -1
size += inner_size + 1
if size > cls._MAX_MEMOIZE_COMPARABLE_SIZE:
return cls.NULL, None, -1
comp.append(inner_comp)
eqs.append(inner_eq)
if all(eq is cls._equal for eq in eqs):
iter_eq: Callable[[Any, Any], bool] = cls._equal
elif isinstance(obj, (frozenset, set)):
return cls.NULL, None, -1
else:
iter_eq = cls._make_list_eq(eqs)
ret = frozenset(comp) if isinstance(obj, (frozenset, set)) else comp
return ret, iter_eq, size
elif type(obj) in (type, FunctionType):
# try to determine it based on the symbol
for sym in flow().aliases.get(id(obj), []):
comp, eq = sym.make_memoize_comparable(seen_ids=seen_ids)
if comp is not cls.NULL and eq is not None:
return comp, eq, 1
return cls.NULL, None, -1
else:
# hacks to check if they are arrays, dataframes, etc without explicitly importing these
module = getattr(type(obj), "__module__", "")
if module.startswith("numpy"):
name = getattr(type(obj), "__name__", "")
if name.endswith("ndarray"):
return obj, cls._array_equal, obj.size
else:
numpy = sys.modules.get("numpy")
if numpy is not None and isinstance(obj, numpy.number):
return obj, cls._equal, 1
elif module.startswith(("modin", "pandas")):
name = getattr(type(obj), "__name__", "")
if name.endswith(("DataFrame", "Series")):
return obj, cls._dataframe_equal, obj.size
elif module.startswith("ipywidgets"):
ipywidgets = sys.modules.get("ipywidgets")
if (
ipywidgets is not None
and isinstance(obj, ipywidgets.Widget)
and hasattr(obj, "value")
):
return obj.value, cls._equal, 1
return cls.NULL, None, -1
def make_memoize_comparable(
self, seen_ids: Optional[Set[int]] = None
) -> Tuple[Any, Optional[Callable[[Any, Any], bool]]]:
if seen_ids is None:
seen_ids = set()
if isinstance(
self.stmt_node, (ast.ClassDef, ast.FunctionDef, ast.AsyncFunctionDef)
):
comps = [astunparse.unparse(self.stmt_node)]
for sym in sorted(self.parents.keys()):
par_comp, eq = sym.make_memoize_comparable(seen_ids=seen_ids)
if par_comp is self.NULL or eq is not self._equal:
return self.NULL, None
comps.append(par_comp)
return comps, self._equal
obj, eq, size = self.make_memoize_comparable_for_obj(self.obj, seen_ids)
if size > self._MAX_MEMOIZE_COMPARABLE_SIZE:
return self.NULL, None
else:
return obj, eq
The provided code snippet includes necessary dependencies for implementing the `lift` function. Write a Python function `def lift(sym: Any) -> Symbol` to solve the following problem:
Given the programmatic usage of some symbol, look up the corresponding Symbol metadata.
Here is the function:
def lift(sym: Any) -> Symbol:
"""
Given the programmatic usage of some symbol,
look up the corresponding Symbol metadata.
"""
# See the `argument` handler in ipyflow_tracer for the
# actual implementation; this is just a stub that ensures
# that handler was able to find something.
return _validate(sym) | Given the programmatic usage of some symbol, look up the corresponding Symbol metadata. |
15,082 | from typing import TYPE_CHECKING, Any, List, Set, Union, cast
from ipyflow.data_model.symbol import Symbol
from ipyflow.data_model.timestamp import Timestamp
from ipyflow.tracing.watchpoint import Watchpoints
def _validate(sym: Any) -> Symbol:
if sym is None or not isinstance(sym, Symbol):
raise ValueError("unable to lookup metadata for symbol")
return cast(Symbol, sym)
The provided code snippet includes necessary dependencies for implementing the `code` function. Write a Python function `def code(sym: Any, **kwargs: Any) -> Union["HTML", str]` to solve the following problem:
Given the programmatic usage of some symbol, look up the corresponding code for that symbol.
Here is the function:
def code(sym: Any, **kwargs: Any) -> Union["HTML", str]:
"""
Given the programmatic usage of some symbol,
look up the corresponding code for that symbol.
"""
# See the `argument` handler in ipyflow_tracer for the
# actual implementation; this is just a stub that ensures
# that handler was able to find something.
return _validate(sym).code(**kwargs) | Given the programmatic usage of some symbol, look up the corresponding code for that symbol. |
15,083 | from typing import TYPE_CHECKING, Any, List, Set, Union, cast
from ipyflow.data_model.symbol import Symbol
from ipyflow.data_model.timestamp import Timestamp
from ipyflow.tracing.watchpoint import Watchpoints
def _validate(sym: Any) -> Symbol:
if sym is None or not isinstance(sym, Symbol):
raise ValueError("unable to lookup metadata for symbol")
return cast(Symbol, sym)
class Timestamp(NamedTuple):
cell_num: int
stmt_num: int
def current(cls) -> "Timestamp":
# TODO: shouldn't have to go through flow() singleton to get the cell counter,
# but the dependency structure prevents us from importing from ipyflow.data_model.code_cell
if tracer_initialized():
return cls(
flow().cell_counter() + _cell_offset,
tracer().module_stmt_counter() + _stmt_offset,
)
else:
return Timestamp.uninitialized()
def positional(self) -> "Timestamp":
return Timestamp(cells().at_counter(self.cell_num).position, self.stmt_num)
def uninitialized(cls) -> "Timestamp":
return _TS_UNINITIALIZED
def is_initialized(self) -> bool:
uninited = Timestamp.uninitialized()
return self.cell_num > uninited.cell_num and self.stmt_num > uninited.stmt_num
def plus(self, cell_num_delta: int, stmt_num_delta: int) -> "Timestamp":
return self.__class__(
self.cell_num + cell_num_delta, self.stmt_num + stmt_num_delta
)
def offset(
cell_offset: int = 0, stmt_offset: int = 0
) -> Generator[None, None, None]:
global _cell_offset
global _stmt_offset
_cell_offset += cell_offset
_stmt_offset += stmt_offset
try:
yield
finally:
_cell_offset -= cell_offset
_stmt_offset -= stmt_offset
def as_tuple(self) -> Tuple[int, int]:
return (self.cell_num, self.stmt_num)
def __eq__(self, other) -> bool:
if other is None:
return False
if not isinstance(other, Timestamp):
raise TypeError(
"cannot compare non-timestamp value %s with timestamp %s"
% (other, self)
)
return tuple(self._asdict().values()) == tuple(other._asdict().values())
def __ne__(self, other) -> bool:
return not self == other
def update_usage_info(
cls,
symbols: Union[
Optional["Symbol"],
Iterable[Optional["Symbol"]],
Optional["ResolvedSymbol"],
Iterable[Optional["ResolvedSymbol"]],
],
exclude_ns=False,
used_node: Optional[ast.AST] = None,
):
if symbols is None:
return
try:
iter(symbols) # type: ignore
except TypeError:
symbols = [symbols] # type: ignore
used_time = cls.current()
for sym in symbols: # type: ignore
if sym is not None and not sym.is_anonymous:
sym.update_usage_info(
used_time=used_time, used_node=used_node, exclude_ns=exclude_ns
)
The provided code snippet includes necessary dependencies for implementing the `timestamp` function. Write a Python function `def timestamp(sym: Any) -> Timestamp` to solve the following problem:
Given the programmatic usage of some symbol, look up the corresponding timestamp for that symbol.
Here is the function:
def timestamp(sym: Any) -> Timestamp:
"""
Given the programmatic usage of some symbol,
look up the corresponding timestamp for that symbol.
"""
# See the `argument` handler in ipyflow_tracer for the
# actual implementation; this is just a stub that ensures
# that handler was able to find something.
return _validate(sym).timestamp | Given the programmatic usage of some symbol, look up the corresponding timestamp for that symbol. |
15,084 | from typing import TYPE_CHECKING, Any, List, Set, Union, cast
from ipyflow.data_model.symbol import Symbol
from ipyflow.data_model.timestamp import Timestamp
from ipyflow.tracing.watchpoint import Watchpoints
def _validate(sym: Any) -> Symbol:
if sym is None or not isinstance(sym, Symbol):
raise ValueError("unable to lookup metadata for symbol")
return cast(Symbol, sym)
class Symbol:
NULL = object()
# object for virtual display symbol
DISPLAY = object()
IMMUTABLE_TYPES = set(IMMUTABLE_PRIMITIVE_TYPES)
IPYFLOW_MUTATION_VIRTUAL_SYMBOL_NAME = "__ipyflow_mutation"
def __init__(
self,
name: SupportedIndexType,
symbol_type: SymbolType,
obj: Any,
containing_scope: "Scope",
stmt_node: Optional[ast.stmt] = None,
symbol_node: Optional[ast.AST] = None,
refresh_cached_obj: bool = False,
implicit: bool = False,
) -> None:
if refresh_cached_obj:
# TODO: clean up redundancies
assert implicit
assert stmt_node is None
self.name = name
self.symbol_type = symbol_type
self.obj = obj
# additional user-specific metadata
self._tags: Set[str] = set()
self.extra_metadata: Dict[str, Any] = {}
self._tombstone = False
self._cached_out_of_sync = True
self.cached_obj_id: Optional[int] = None
self.cached_obj_type: Optional[Type[object]] = None
self.cached_obj_len: Optional[int] = None
if refresh_cached_obj:
self._refresh_cached_obj()
self.containing_scope = containing_scope or flow().global_scope
self.call_scope: Optional[Scope] = None
self.func_def_stmt: Optional[ast.stmt] = None
self.stmt_node = self.update_stmt_node(stmt_node)
self.symbol_node = symbol_node
self._funcall_live_symbols = None
self.parents: Dict["Symbol", List[Timestamp]] = {}
self.children: Dict["Symbol", List[Timestamp]] = {}
# initialize at -1 for implicit since the corresponding piece of data could already be around,
# and we don't want liveness checker to think this was newly created unless we
# explicitly trace an update somewhere
self._timestamp: Timestamp = (
Timestamp.uninitialized() if implicit else Timestamp.current()
)
self._snapshot_timestamps: List[Timestamp] = []
self._snapshot_timestamp_ubounds: List[Timestamp] = []
self._defined_cell_num = cells().exec_counter()
self._is_dangling_on_edges = False
self._cascading_reactive_cell_num = -1
self._override_ready_liveness_cell_num = -1
self._override_timestamp: Optional[Timestamp] = None
self.watchpoints = Watchpoints()
# The necessary last-updated timestamp / cell counter for this symbol to not be waiting
self.required_timestamp: Timestamp = self.timestamp
# for each usage of this sym, the version that was used, if different from the timestamp of usage
self.timestamp_by_used_time: Dict[Timestamp, Timestamp] = {}
self.used_node_by_used_time: Dict[Timestamp, ast.AST] = {}
# History of definitions at time of liveness
self.timestamp_by_liveness_time: Dict[Timestamp, Timestamp] = {}
# All timestamps associated with updates to this symbol
self.updated_timestamps: Set[Timestamp] = set()
# The most recent timestamp associated with a particular object id
self.last_updated_timestamp_by_obj_id: Dict[int, Timestamp] = {}
self.fresher_ancestors: Set["Symbol"] = set()
self.fresher_ancestor_timestamps: Set[Timestamp] = set()
# cells where this symbol was live
self.cells_where_deep_live: Set[Cell] = set()
self.cells_where_shallow_live: Set[Cell] = set()
self._last_computed_ready_or_waiting_cache_ts: int = -1
self._is_ready_or_waiting_at_position_cache: Dict[Tuple[int, bool], bool] = {}
# if implicitly created when tracing non-store-context ast nodes
self._implicit = implicit
# Will never be stale if no_warning is True
self.disable_warnings = False
self._temp_disable_warnings = False
self._num_ipywidget_observers = 0
self._num_mercury_widget_observers = 0
flow().aliases.setdefault(id(obj), set()).add(self)
if (
isinstance(self.name, str)
and not self.is_anonymous
and not self.containing_scope.is_namespace_scope
):
ns = self.namespace
if ns is not None and ns.scope_name == "self":
# hack to get a better name than `self.whatever` for fields of this object
# not ideal because it relies on the `self` convention but is probably
# acceptable for the use case of improving readable names
ns.scope_name = self.name
def aliases(self) -> List["Symbol"]:
return list(flow().aliases.get(self.obj_id, []))
def cells_where_live(self) -> Set[Cell]:
return self.cells_where_deep_live | self.cells_where_shallow_live
def __repr__(self) -> str:
return f"<{self.readable_name}>"
def __str__(self) -> str:
return self.readable_name
def __hash__(self) -> int:
return hash(id(self))
def __lt__(self, other) -> bool:
return id(self) < id(other)
def add_tag(self, tag_value: str) -> None:
self._tags.add(tag_value)
def remove_tag(self, tag_value: str) -> None:
self._tags.discard(tag_value)
def has_tag(self, tag_value: str) -> bool:
return tag_value in self._tags
def temporary_disable_warnings(self) -> None:
self._temp_disable_warnings = True
def last_used_timestamp(self) -> Timestamp:
if len(self.timestamp_by_used_time) == 0:
return Timestamp.uninitialized()
else:
return max(self.timestamp_by_used_time.keys())
def namespace_waiting_symbols(self) -> Set["Symbol"]:
ns = self.namespace
return set() if ns is None else ns.namespace_waiting_symbols
def shallow_timestamp(self) -> Timestamp:
if self._override_timestamp is None:
return self._timestamp
else:
return max(self._timestamp, self._override_timestamp)
def visible_timestamp(self) -> Optional[Timestamp]:
for ts in sorted(self.updated_timestamps, reverse=True):
if cells().at_timestamp(ts).is_visible:
return ts
return None
def memoize_timestamp(self) -> Optional[Timestamp]:
return self.last_updated_timestamp_by_obj_id.get(self.obj_id)
def timestamp(self) -> Timestamp:
ts = self.shallow_timestamp
if self.is_import or self.is_module:
return ts
ns = self.namespace
return ts if ns is None else max(ts, ns.max_descendent_timestamp)
def _compute_namespace_timestamps(
self,
seen: Optional[Set["Symbol"]] = None,
version_ubound: Optional[Timestamp] = None,
) -> Set[Timestamp]:
if version_ubound is None:
timestamps = {self.shallow_timestamp, self.timestamp}
else:
max_leq_ubound = Timestamp.uninitialized()
for ts in reversed(self._snapshot_timestamps):
if ts <= version_ubound:
max_leq_ubound = ts
break
if max_leq_ubound.is_initialized:
timestamps = {max_leq_ubound}
else:
timestamps = set()
ns = self.namespace
if ns is None:
return timestamps
if seen is None:
seen = set()
if self in seen:
return timestamps
seen.add(self)
for sym in ns.all_symbols_this_indentation():
timestamps |= sym._compute_namespace_timestamps(
seen=seen, version_ubound=version_ubound
)
return timestamps
def _get_timestamps_for_version(self, version: int) -> Set[Timestamp]:
if len(self._snapshot_timestamps) == 0:
return {self.timestamp}
ts = self._snapshot_timestamps[version]
if ts.cell_num == -1:
return {Timestamp(self.defined_cell_num, ts.stmt_num)}
else:
return self._compute_namespace_timestamps(
version_ubound=None if version == -1 else ts
)
def code(
self, format_type: Optional[Type[FormatType]] = None, version: int = -1
) -> Slice:
return statements().format_multi_slice(
self._get_timestamps_for_version(version=version),
blacken=True,
format_type=format_type,
)
def cascading_reactive_cell_num(
self,
seen: Optional[Set["Symbol"]] = None,
consider_containing_symbols: bool = True,
) -> int:
if seen is None:
seen = set()
if self in seen:
return -1
seen.add(self)
cell_num = self._cascading_reactive_cell_num
ns = self.namespace
ret = (
cell_num
if ns is None
else max(
cell_num,
ns.max_cascading_reactive_cell_num(seen),
)
)
if not consider_containing_symbols:
return ret
for sym in self.iter_containing_symbols():
ret = max(ret, sym.cascading_reactive_cell_num(seen=seen))
return ret
def bump_cascading_reactive_cell_num(self, ctr: Optional[int] = None) -> None:
self._cascading_reactive_cell_num = max(
self._cascading_reactive_cell_num,
flow().cell_counter() if ctr is None else ctr,
)
def iter_containing_symbols(self) -> Generator["Symbol", None, None]:
yield self
ns = self.containing_namespace
if ns is None or not ns.is_namespace_scope:
return
for containing_ns in ns.iter_containing_namespaces():
yield from flow().aliases.get(containing_ns.obj_id, [])
def waiting_timestamp(self) -> int:
return max(self._timestamp.cell_num, flow().min_timestamp)
def defined_cell_num(self) -> int:
return self._defined_cell_num
def readable_name(self) -> str:
return self.containing_scope.make_namespace_qualified_name(self)
def is_subscript(self) -> bool:
return self.symbol_type == SymbolType.SUBSCRIPT
def is_class(self) -> bool:
return self.symbol_type == SymbolType.CLASS
def is_function(self) -> bool:
return self.symbol_type == SymbolType.FUNCTION
def is_lambda(self) -> bool:
# TODO: this is terrible
return type(self.name) is str and self.name.startswith( # noqa: E721
"<lambda_sym_"
)
def is_import(self) -> bool:
return self.symbol_type == SymbolType.IMPORT
def is_module(self) -> bool:
return self.symbol_type == SymbolType.MODULE
def imported_module(self) -> str:
if not self.is_import:
raise ValueError("only IMPORT symbols have `imported_module` property")
if isinstance(self.stmt_node, ast.Import):
for alias in self.stmt_node.names:
name = alias.asname or alias.name
if name == self.name:
return alias.name
raise ValueError(
"Unable to find module for symbol %s is stmt %s"
% (self, ast.dump(self.stmt_node))
)
elif isinstance(self.stmt_node, ast.ImportFrom):
return self.stmt_node.module
else:
raise TypeError(
"Invalid stmt type for import symbol: %s" % ast.dump(self.stmt_node)
)
def imported_symbol_original_name(self) -> str:
if not self.is_import:
raise ValueError(
"only IMPORT symbols have `imported_symbol_original_name` property"
)
if isinstance(self.stmt_node, ast.Import):
return self.imported_module
elif isinstance(self.stmt_node, ast.ImportFrom):
for alias in self.stmt_node.names:
name = alias.asname or alias.name
if name == self.name:
return alias.name
raise ValueError(
"Unable to find module for symbol %s is stmt %s"
% (self, ast.dump(self.stmt_node))
)
else:
raise TypeError(
"Invalid stmt type for import symbol: %s" % ast.dump(self.stmt_node)
)
def is_cascading_reactive_at_counter(self, ctr: int) -> bool:
return self.cascading_reactive_cell_num() > max(
ctr, flow().min_cascading_reactive_cell_num
)
def get_top_level(self) -> Optional["Symbol"]:
if not self.containing_scope.is_namespace_scope:
return self
else:
containing_scope = cast("Namespace", self.containing_scope)
for alias in flow().aliases.get(containing_scope.obj_id, []):
if alias.is_globally_accessible:
return alias.get_top_level()
return None
def get_import_string(self) -> str:
if not self.is_import:
raise ValueError("only IMPORT symbols support recreating the import string")
module = self.imported_module
if isinstance(self.stmt_node, ast.Import):
if module == self.name:
return f"import {module}"
else:
return f"import {module} as {self.name}"
elif isinstance(self.stmt_node, ast.ImportFrom):
original_symbol_name = self.imported_symbol_original_name
if original_symbol_name == self.name:
return f"from {module} import {original_symbol_name}"
else:
return f"from {module} import {original_symbol_name} as {self.name}"
else:
raise TypeError(
"Invalid stmt type for import symbol: %s" % ast.dump(self.stmt_node)
)
def is_anonymous(self) -> bool:
if self.symbol_type == SymbolType.ANONYMOUS:
return True
ns = self.containing_namespace
if ns is not None and ns.is_anonymous:
return True
return False
def is_implicit(self) -> bool:
return self._implicit
def shallow_clone(
self, new_obj: Any, new_containing_scope: "Scope", symbol_type: SymbolType
) -> "Symbol":
return self.__class__(self.name, symbol_type, new_obj, new_containing_scope)
def obj_id(self) -> int:
return id(self.obj)
def obj_len(self) -> Optional[int]:
try:
if not self.is_obj_lazy_module and hasattr(self.obj, "__len__"):
return len(self.obj)
except: # noqa: E722
pass
return None
def obj_type(self) -> Type[Any]:
return type(self.obj)
def is_immutable(self) -> bool:
return self.obj_type in self.IMMUTABLE_TYPES
def is_mutation_virtual_symbol(self) -> bool:
return self.name == self.IPYFLOW_MUTATION_VIRTUAL_SYMBOL_NAME
def is_underscore(self) -> bool:
return self.name == "_" and self.containing_scope.is_global
def is_obj_lazy_module(self) -> bool:
return self.obj_type is _LazyModule
def get_type_annotation(self):
return get_type_annotation(self.obj)
def get_type_annotation_string(self) -> str:
return make_annotation_string(self.get_type_annotation())
def namespace(self) -> Optional["Namespace"]:
return flow().namespaces.get(self.obj_id)
def containing_namespace(self) -> Optional["Namespace"]:
if self.containing_scope.is_namespace_scope:
return cast("Namespace", self.containing_scope)
else:
return None
def full_path(self) -> Tuple[str, ...]:
return self.containing_scope.full_path + (str(self.name),)
def full_namespace_path(self) -> str:
return self.containing_scope.make_namespace_qualified_name(self)
def is_garbage(self) -> bool:
return self._tombstone
def is_new_garbage(self) -> bool:
if self._tombstone:
return False
containing_ns = self.containing_namespace
numpy = sys.modules.get("numpy", None)
if (
numpy is not None
and containing_ns is not None
and isinstance(containing_ns.obj, numpy.ndarray)
):
# numpy atoms are not interned (so assigning array elts to a variable does not bump refcount);
# also seems that refcount is always 0, so just check if the containing namespace is garbage
return self.containing_namespace.is_garbage
return self.get_ref_count() == 0
def is_globally_accessible(self) -> bool:
return self.containing_scope.is_globally_accessible
def is_user_accessible(self) -> bool:
return (
self.is_globally_accessible
and not self.is_anonymous
and not self.is_garbage
and not (
self.containing_namespace is not None
and (
self.containing_namespace.is_anonymous
or self.containing_namespace.is_garbage
)
)
)
def _remove_self_from_aliases(self) -> None:
cleanup_discard(flow().aliases, self.obj_id, self)
self.obj = None
def mark_garbage(self) -> None:
if self.is_garbage:
return
self._tombstone = True
ns = self.namespace
if ns is not None and all(alias.is_garbage for alias in self.aliases):
ns.mark_garbage()
def collect_self_garbage(self) -> None:
assert self.is_garbage
flow().blocked_reactive_timestamps_by_symbol.pop(self, None)
self._remove_self_from_aliases()
for parent in self.parents:
parent.children.pop(self, None)
for child in self.children:
child.parents.pop(self, None)
containing_ns = self.containing_namespace
if self.is_subscript and containing_ns is not None:
containing_ns._subscript_symbol_by_name.pop(self.name, None)
elif not self.is_subscript:
self.containing_scope._symbol_by_name.pop(self.name, None)
else:
logger.warning(
"could not find symbol %s in its scope %s", self, self.containing_scope
)
# TODO: remove from static / dynamic parent / children edges
# need to keep this around for readable_name to work
# self.containing_scope = None
# def update_type(self, new_type):
# self.symbol_type = new_type
# if self.is_function:
# self.call_scope = self.containing_scope.make_child_scope(self.name)
# else:
# self.call_scope = None
def update_obj_ref(self, obj: Any, refresh_cached: bool = True) -> None:
if self._num_ipywidget_observers > 0:
try:
self.obj.unobserve_all()
except: # noqa
pass
self._num_ipywidget_observers = 0
if self._num_mercury_widget_observers > 0:
try:
self._mercury_widgets_manager.get_widget(
self.obj.code_uid
).unobserve_all()
except: # noqa
pass
self._num_mercury_widget_observers = 0
self._tombstone = False
self._cached_out_of_sync = True
if (
flow().settings.mark_typecheck_failures_unsafe
and self.cached_obj_type != type(obj)
):
for cell in self.cells_where_live:
cell.invalidate_typecheck_result()
self.cells_where_shallow_live.clear()
self.cells_where_deep_live.clear()
self.obj = obj
if self.cached_obj_id is not None and self.cached_obj_id != self.obj_id:
new_ns = flow().namespaces.get(self.obj_id, None)
# don't overwrite existing namespace for this obj
old_ns = flow().namespaces.get(self.cached_obj_id, None)
if (
old_ns is not None
and old_ns.full_namespace_path == self.full_namespace_path
):
if new_ns is None:
logger.info("create fresh copy of namespace %s", old_ns)
new_ns = old_ns.fresh_copy(obj)
old_ns.transfer_symbols_to(new_ns)
else:
new_ns.scope_name = old_ns.scope_name
new_ns.parent_scope = old_ns.parent_scope
self._handle_aliases()
if (
old_ns is not None
and len(flow().aliases.get(self.cached_obj_id, [])) == 0
):
old_ns.mark_garbage()
if refresh_cached:
self._refresh_cached_obj()
def invalidate_cached(self) -> None:
self._cached_out_of_sync = True
self.cached_obj_id = None
self.cached_obj_type = None
def get_ref_count(self) -> int:
if self.obj is None or self.obj is Symbol.NULL:
return -1
total = sys.getrefcount(self.obj) - 1
total -= len(flow().aliases.get(self.obj_id, []))
ns = flow().namespaces.get(self.obj_id, None)
if ns is not None and ns.obj is not None and ns.obj is not Symbol.NULL:
total -= 1
return total
def _should_cancel_propagation(self, prev_obj: Optional[Any]) -> bool:
if prev_obj is None:
return False
if (
flow().blocked_reactive_timestamps_by_symbol.get(self, -1)
== self.timestamp.cell_num
):
return False
if not self._cached_out_of_sync or self.obj_id == self.cached_obj_id:
return True
if self.obj is None or prev_obj is Symbol.NULL:
return self.obj is None and prev_obj is Symbol.NULL
return False
def _handle_aliases(self):
cleanup_discard(flow().aliases, self.cached_obj_id, self)
flow().aliases.setdefault(self.obj_id, set()).add(self)
def update_stmt_node(self, stmt_node: Optional[ast.stmt]) -> Optional[ast.stmt]:
self.stmt_node = stmt_node
self._funcall_live_symbols = None
if self.is_function or (
stmt_node is not None and isinstance(stmt_node, ast.Lambda)
):
# TODO: in the case of lambdas, there will not necessarily be one
# symbol for a given statement. We need a more precise way to determine
# the symbol being called than by looking at the stmt in question.
flow().statement_to_func_sym[id(stmt_node)] = self
self.call_scope = self.containing_scope.make_child_scope(self.name)
self.func_def_stmt = stmt_node
return stmt_node
def _refresh_cached_obj(self) -> None:
self._cached_out_of_sync = False
# don't keep an actual ref to avoid bumping refcount
self.cached_obj_id = self.obj_id
self.cached_obj_type = self.obj_type
self.cached_obj_len = self.obj_len
def get_definition_args(self) -> List[ast.arg]:
assert self.func_def_stmt is not None and isinstance(
self.func_def_stmt, (ast.FunctionDef, ast.AsyncFunctionDef, ast.Lambda)
)
args = []
for arg in self.func_def_stmt.args.args + self.func_def_stmt.args.kwonlyargs:
args.append(arg)
if self.func_def_stmt.args.vararg is not None:
args.append(self.func_def_stmt.args.vararg)
if self.func_def_stmt.args.kwarg is not None:
args.append(self.func_def_stmt.args.kwarg)
return args
def _match_call_args_with_definition_args(
self,
) -> Generator[Tuple[ast.arg, List["Symbol"]], None, None]:
# TODO: handle posonlyargs, kwonlyargs
assert self.func_def_stmt is not None and isinstance(
self.func_def_stmt, (ast.FunctionDef, ast.AsyncFunctionDef, ast.Lambda)
)
caller_node = self._get_calling_ast_node()
if caller_node is None or not isinstance(caller_node, ast.Call):
return
kwarg_by_name = {
arg_key.arg: arg_key
for arg_key in self.func_def_stmt.args.args[
-len(self.func_def_stmt.args.defaults) :
]
}
if not all(keyword.arg in kwarg_by_name for keyword in caller_node.keywords):
logger.warning("detected mismatched kwargs from caller node to definition")
return
def_args = self.func_def_stmt.args.args
if len(self.func_def_stmt.args.defaults) > 0:
def_args = def_args[: -len(self.func_def_stmt.args.defaults)]
if len(def_args) > 0 and def_args[0].arg == "self":
# FIXME: this is bad and I should feel bad
def_args = def_args[1:]
for def_arg, call_arg in zip(def_args, caller_node.args):
if isinstance(call_arg, ast.Starred):
# give up
# TODO: handle this case
break
yield def_arg, tracer().resolve_loaded_symbols(call_arg)
seen_keys = set()
for keyword in caller_node.keywords:
keyword_key, keyword_value = keyword.arg, keyword.value
if keyword_value is None:
continue
seen_keys.add(keyword_key)
yield kwarg_by_name[keyword_key], tracer().resolve_loaded_symbols(
keyword_value
)
for arg_key, arg_value in zip(
self.func_def_stmt.args.args[-len(self.func_def_stmt.args.defaults) :],
self.func_def_stmt.args.defaults,
):
if arg_key.arg in seen_keys:
continue
yield arg_key, tracer().resolve_loaded_symbols(arg_value)
def _get_calling_ast_node(self) -> Optional[ast.Call]:
if tracer().tracing_disabled_since_last_module_stmt or (
not hasattr(self.obj, "__module__")
and getattr(type(self.obj), "__module__", None) == "builtins"
):
return None
if self.func_def_stmt is not None and isinstance(
self.func_def_stmt, (ast.FunctionDef, ast.AsyncFunctionDef)
):
if self.name in ("__getitem__", "__setitem__", "__delitem__"):
# TODO: handle case where we're looking for a subscript for the calling node
return None
for decorator in self.func_def_stmt.decorator_list:
if isinstance(decorator, ast.Name) and decorator.id == "property":
# TODO: handle case where we're looking for an attribute for the calling node
return None
lexical_call_stack = tracer().lexical_call_stack
if len(lexical_call_stack) == 0:
return None
prev_node_id_in_cur_frame_lexical = lexical_call_stack.get_field(
"prev_node_id_in_cur_frame_lexical"
)
caller_ast_node = tracer().ast_node_by_id.get(
prev_node_id_in_cur_frame_lexical, None
)
if caller_ast_node is None or not isinstance(caller_ast_node, ast.Call):
return None
return caller_ast_node
def create_symbols_for_call_args(self, call_frame: FrameType) -> None:
assert self.func_def_stmt is not None
seen_def_args = set()
logger.info("create symbols for call to %s", self)
for def_arg, deps in self._match_call_args_with_definition_args():
seen_def_args.add(def_arg.arg)
self.call_scope.upsert_symbol_for_name(
def_arg.arg,
call_frame.f_locals.get(def_arg.arg),
deps,
self.func_def_stmt,
propagate=False,
symbol_node=def_arg,
)
logger.info("def arg %s matched with deps %s", def_arg, deps)
for def_arg in self.get_definition_args():
if def_arg.arg in seen_def_args:
continue
self.call_scope.upsert_symbol_for_name(
def_arg.arg,
None,
set(),
self.func_def_stmt,
propagate=False,
symbol_node=def_arg,
)
def is_waiting(self) -> bool:
if self.disable_warnings or self._temp_disable_warnings:
return False
if self.waiting_timestamp < self.required_timestamp.cell_num:
return True
elif flow().min_timestamp == -1:
return len(self.namespace_waiting_symbols) > 0
else:
# TODO: guard against infinite recurision
return any(sym.is_waiting for sym in self.namespace_waiting_symbols)
def is_shallow_stale(self) -> bool:
if self.disable_warnings or self._temp_disable_warnings:
return False
return self.waiting_timestamp < self.required_timestamp.cell_num
def _is_ready_or_waiting_at_position_impl(self, pos: int, deep: bool) -> bool:
for par, timestamps in self.parents.items():
for ts in timestamps:
dep_introduced_pos = cells().at_timestamp(ts).position
if dep_introduced_pos > pos:
continue
for updated_ts in par.updated_timestamps:
if cells().at_timestamp(updated_ts).position > dep_introduced_pos:
continue
if updated_ts.cell_num > ts.cell_num or par.is_waiting_at_position(
dep_introduced_pos
):
# logger.error("sym: %s", self)
# logger.error("pos: %s", pos)
# logger.error("parent: %s", par)
# logger.error("dep introdced ts: %s", ts)
# logger.error("dep introdced pos: %s", dep_introduced_pos)
# logger.error("par updated ts: %s", updated_ts)
# logger.error("par updated position: %s", cells().from_timestamp(updated_ts).position)
return True
if deep:
for sym in self.namespace_waiting_symbols:
if sym.is_waiting_at_position(pos):
return True
return False
def is_waiting_at_position(self, pos: int, deep: bool = True) -> bool:
if deep:
if not self.is_waiting:
return False
else:
if not self.is_shallow_stale:
return False
if flow().mut_settings.flow_order == FlowDirection.ANY_ORDER:
return True
if cells().exec_counter() > self._last_computed_ready_or_waiting_cache_ts:
self._is_ready_or_waiting_at_position_cache.clear()
self._last_computed_ready_or_waiting_cache_ts = cells().exec_counter()
if (pos, deep) in self._is_ready_or_waiting_at_position_cache:
return self._is_ready_or_waiting_at_position_cache[pos, deep]
# preemptively set this entry to 'False' in the cache to avoid infinite loops
self._is_ready_or_waiting_at_position_cache[pos, deep] = False
is_waiting = self._is_ready_or_waiting_at_position_impl(pos, deep)
self._is_ready_or_waiting_at_position_cache[pos, deep] = is_waiting
return is_waiting
def should_mark_waiting(self, updated_dep):
if self.disable_warnings:
return False
if updated_dep is self:
return False
return True
def _is_underscore_or_simple_assign(self, new_deps: Set["Symbol"]) -> bool:
if self.is_underscore:
# FIXME: distinguish between explicit assignment to _ from user and implicit assignment from kernel
return True
if not isinstance(self.stmt_node, (ast.Assign, ast.AnnAssign)):
return False
if len(new_deps) != 1:
return False
only_dep: Symbol = next(iter(new_deps))
# obj ids can get reused for anon symbols like literals
return not only_dep.is_anonymous and self.obj_id == only_dep.obj_id
def update_deps(
self,
new_deps: Set["Symbol"],
prev_obj: Any = None,
overwrite: bool = True,
mutated: bool = False,
deleted: bool = False,
propagate_to_namespace_descendents: bool = False,
propagate: bool = True,
refresh: bool = True,
is_cascading_reactive: Optional[bool] = None,
) -> None:
if self.is_import and self.obj_id == self.cached_obj_id:
# skip updates for imported symbols; just bump the version
self.refresh()
return
if overwrite and not self.is_globally_accessible:
self.watchpoints.clear()
if mutated and self.is_immutable:
return
# if we get here, no longer implicit
self._implicit = False
# quick last fix to avoid overwriting if we appear inside the set of deps to add (or a 1st order ancestor)
# TODO: check higher-order ancestors too?
overwrite = overwrite and self not in new_deps
overwrite = overwrite and not any(
self in new_dep.parents for new_dep in new_deps
)
logger.warning("symbol %s new deps %s", self, new_deps)
new_deps.discard(self)
if overwrite:
for parent in self.parents.keys() - new_deps:
parent.children.pop(self, None)
self.parents.pop(parent, None)
for new_parent in new_deps - self.parents.keys():
if new_parent is None:
continue
new_parent.children.setdefault(self, []).append(Timestamp.current())
self.parents.setdefault(new_parent, []).append(Timestamp.current())
self.required_timestamp = Timestamp.uninitialized()
self.fresher_ancestors.clear()
self.fresher_ancestor_timestamps.clear()
if mutated or isinstance(self.stmt_node, ast.AugAssign):
self.update_usage_info()
if (
(mutated or overwrite)
and Timestamp.current().is_initialized
and not self.is_immutable
and not self.is_mutation_virtual_symbol
and not self.is_anonymous
and self.containing_scope.is_global
and not self.is_underscore
and not self.is_implicit
and self.obj_type is not type
and not self.is_class
and self.namespace is not None
):
self.namespace.upsert_symbol_for_name(
self.IPYFLOW_MUTATION_VIRTUAL_SYMBOL_NAME, object(), propagate=False
)
propagate = propagate and (
mutated or deleted or not self._should_cancel_propagation(prev_obj)
)
try:
prev_cell = cells().current_cell().prev_cell
except KeyError:
prev_cell = None
prev_cell_ctr = -1 if prev_cell is None else prev_cell.cell_ctr
if overwrite:
flow_ = flow()
self._cascading_reactive_cell_num = -1
flow_.updated_reactive_symbols.discard(self)
flow_.updated_deep_reactive_symbols.discard(self)
if is_cascading_reactive is not None:
is_cascading_reactive = is_cascading_reactive or any(
sym.is_cascading_reactive_at_counter(prev_cell_ctr) for sym in new_deps
)
if is_cascading_reactive:
self.bump_cascading_reactive_cell_num()
if refresh:
self.refresh(
# rationale: if this is a mutation for which we have more precise information,
# then we don't need to update the ns descendents as this will already have happened.
# also don't update ns descendents for things like `a = b`
refresh_descendent_namespaces=propagate
and not (mutated and not propagate_to_namespace_descendents)
and not self._is_underscore_or_simple_assign(new_deps),
refresh_namespace_waiting=not mutated,
)
if propagate:
UpdateProtocol(self)(
new_deps, mutated, propagate_to_namespace_descendents, refresh
)
self._refresh_cached_obj()
if self.is_class:
# pop pending class defs and update obj ref
pending_class_ns = tracer().pending_class_namespaces.pop()
pending_class_ns.update_obj_ref(self.obj)
for dep in new_deps:
if dep.obj is self.obj and dep.call_scope is not None:
self.call_scope = dep.call_scope
self.func_def_stmt = dep.func_def_stmt
ns = self.namespace
if ns is not None and ns.scope_name == "self" and isinstance(self.name, str):
# fixup namespace name if necessary
# can happen if symbol for 'self' was created in a previous __init__
ns.scope_name = self.name
if overwrite and len(flow().aliases[self.obj_id]) == 1:
self._handle_possible_widget_creation()
self._handle_possible_mercury_widget_creation()
def _mercury_widgets_manager(self):
if self.obj is None:
return None
if self.is_obj_lazy_module or not hasattr(self.obj, "code_uid"):
return None
try:
return sys.modules.get(self.obj.__class__.__module__).WidgetsManager
except: # noqa
return None
def _handle_possible_widget_creation(self) -> None:
if self.obj is None:
return
Widget = getattr(sys.modules.get("ipywidgets"), "Widget", None)
if (
Widget is None
or self.is_obj_lazy_module
or not isinstance(self.obj, Widget)
or not hasattr(self.obj, "observe")
or not hasattr(self.obj, "value")
):
return
self.namespaced().upsert_symbol_for_name(
"value", getattr(self.obj, "value", None), set(), self.stmt_node
)
self.obj.observe(self._observe_widget)
self._num_ipywidget_observers += 1
def _handle_possible_mercury_widget_creation(self) -> None:
WidgetsManager = self._mercury_widgets_manager
if WidgetsManager is None:
return
widget = WidgetsManager.get_widget(self.obj.code_uid)
self.namespaced().upsert_symbol_for_name(
"value", getattr(widget, "value", None), set(), self.stmt_node
)
widget.observe(self._observe_widget)
self._num_mercury_widget_observers += 1
def _observe_widget(self, msg: Dict[str, Any]) -> None:
if msg.get("name") != "value" or "new" not in msg:
return
ns = self.namespace
sym = ns.lookup_symbol_by_name_this_indentation("value")
if sym is None:
return
newval = msg["new"]
current_ts_cell = cells().at_timestamp(self._timestamp)
current_ts_cell._extra_stmt = ast.parse(f"{sym.readable_name} = {newval}").body[
0
]
sym._override_ready_liveness_cell_num = flow().cell_counter() + 1
sym._override_timestamp = Timestamp(
self._timestamp.cell_num, current_ts_cell.num_original_stmts
)
sym.update_obj_ref(newval)
statements().create_and_track(
current_ts_cell._extra_stmt,
timestamp=sym._override_timestamp,
override=True,
)
with dynamic_slicing_context():
flow().add_data_dep(
sym._timestamp,
sym._override_timestamp,
sym,
)
flow().add_data_dep(
sym._override_timestamp,
sym._timestamp,
sym,
)
self.debounced_exec_schedule(reactive=True)
def debounced_exec_schedule(self, reactive: bool) -> None:
if _debounced_exec_schedule(
cells().at_timestamp(self.timestamp).cell_id, reactive=reactive
):
flow().debounced_exec_schedule_pending = True
def namespaced(self) -> "Namespace":
ns = self.namespace
if ns is not None:
return ns
return namespaces()(self.obj, self.name, parent_scope=self.containing_scope)
def update_usage_info_one_timestamp(
self,
used_time: Timestamp,
updated_time: Timestamp,
is_static: bool,
) -> bool:
flow_ = flow()
is_usage = is_static or updated_time < used_time
if is_usage:
with slicing_context(is_static=is_static):
flow_.add_data_dep(
used_time,
updated_time,
self,
)
if is_static:
is_usage = cells().at_timestamp(updated_time).is_visible
return is_usage
def update_usage_info(
self,
used_time: Optional[Timestamp] = None,
used_node: Optional[ast.AST] = None,
exclude_ns: bool = False,
is_static: bool = False,
is_blocking: bool = False,
) -> "Symbol":
is_blocking = is_blocking or id(used_node) in tracer().blocking_node_ids
if used_time is None:
used_time = Timestamp.current()
if flow().is_dev_mode:
logger.info(
"sym `%s` used in cell %d last updated in cell %d",
self,
used_time.cell_num,
self.timestamp,
)
timestamp_by_used_time = (
self.timestamp_by_liveness_time
if is_static
else self.timestamp_by_used_time
)
if not is_blocking:
is_usage = False
ts_to_use = self._timestamp
for updated_ts in sorted(self.updated_timestamps, reverse=True):
if not updated_ts.is_initialized:
continue
is_usage = self.update_usage_info_one_timestamp(
used_time,
updated_ts,
is_static=is_static,
)
if is_usage or not is_static:
break
if is_usage and used_time.is_initialized:
timestamp_by_used_time[used_time] = ts_to_use
if used_node is not None:
self.used_node_by_used_time[used_time] = used_node
if exclude_ns:
return self
for sym in self.get_namespace_symbols(recurse=True):
sym.update_usage_info(
used_time=used_time,
used_node=None,
exclude_ns=True,
is_static=is_static,
is_blocking=is_blocking,
)
return self
def get_namespace_symbols(
self, recurse: bool = False, seen: Optional[Set["Symbol"]] = None
) -> Generator["Symbol", None, None]:
ns = self.namespace
if ns is None:
return
if seen is None:
seen = set()
if self in seen:
return
seen.add(self)
for sym in ns.all_symbols_this_indentation():
yield sym
if recurse:
yield from sym.get_namespace_symbols(recurse=recurse, seen=seen)
def _take_timestamp_snapshots(
self, ts_ubound: Timestamp, seen: Optional[Set["Symbol"]] = None
) -> None:
if seen is None:
seen = set()
if self in seen:
return
seen.add(self)
self._snapshot_timestamps.append(self._timestamp)
self._snapshot_timestamp_ubounds.append(ts_ubound)
containing_ns = self.containing_namespace
if containing_ns is None:
return
for alias in flow().aliases.get(containing_ns.obj_id, []):
alias._take_timestamp_snapshots(ts_ubound, seen=seen)
def refresh(
self,
take_timestamp_snapshots: bool = True,
refresh_descendent_namespaces: bool = False,
refresh_namespace_waiting: bool = True,
timestamp: Optional[Timestamp] = None,
seen: Optional[Set["Symbol"]] = None,
) -> None:
orig_timestamp = self._timestamp
self._timestamp = Timestamp.current() if timestamp is None else timestamp
self._override_timestamp = None
if take_timestamp_snapshots and (
orig_timestamp < self._timestamp or len(self._snapshot_timestamps) == 0
):
self._take_timestamp_snapshots(self._timestamp)
self.updated_timestamps.add(self._timestamp)
self._temp_disable_warnings = False
for cell in self.cells_where_live:
cell.add_used_cell_counter(self, self._timestamp.cell_num)
ns = self.containing_namespace
if ns is not None:
# logger.error("bump version of %s due to %s (value %s)", ns.full_path, self.full_path, self.obj)
ns.max_descendent_timestamp = self.shallow_timestamp
for alias in flow().aliases.get(ns.obj_id, []):
for cell in alias.cells_where_deep_live:
cell.add_used_cell_counter(alias, self._timestamp.cell_num)
if refresh_descendent_namespaces:
if seen is None:
seen = set()
if self in seen:
return
seen.add(self)
ns = self.namespace
if ns is not None:
for sym in ns.all_symbols_this_indentation(exclude_class=True):
# this is to handle cases like `x = x.mutate(42)`, where
# we could have changed some member of x but returned the
# original object -- in this case, just assume that all
# the stale namespace descendents are no longer stale, as
# this is likely the user intention. For an example, see
# `test_external_object_update_propagates_to_stale_namespace_symbols()`
# in `test_frontend_checker.py`
if not sym.is_waiting or refresh_namespace_waiting:
# logger.error(
# "refresh %s due to %s (value %s) via namespace %s",
# sym.full_path,
# self.full_path,
# self.obj,
# ns.full_path,
# )
sym.refresh(
refresh_descendent_namespaces=True,
timestamp=self.shallow_timestamp,
take_timestamp_snapshots=False,
seen=seen,
)
if refresh_namespace_waiting:
self.namespace_waiting_symbols.clear()
def resync_if_necessary(self, refresh: bool) -> None:
if not self.containing_scope.is_global:
return
try:
obj = shell().user_ns[self.name]
except: # noqa
# cinder runtime can throw an exception here due to lazy imports that fail
return
if self.obj is not obj:
flow_ = flow()
for alias in flow_.aliases.get(
self.cached_obj_id, set()
) | flow_.aliases.get(self.obj_id, set()):
containing_namespace = alias.containing_namespace
if containing_namespace is None:
continue
containing_obj = containing_namespace.obj
if containing_obj is None:
continue
# TODO: handle dict case too
if isinstance(containing_obj, list) and containing_obj[-1] is obj:
containing_namespace._subscript_symbol_by_name.pop(alias.name, None)
alias.name = len(containing_obj) - 1
alias.update_obj_ref(obj)
containing_namespace._subscript_symbol_by_name[alias.name] = alias
cleanup_discard(flow_.aliases, self.cached_obj_id, self)
cleanup_discard(flow_.aliases, self.obj_id, self)
flow_.aliases.setdefault(id(obj), set()).add(self)
self.update_obj_ref(obj)
elif self.obj_len != self.cached_obj_len:
self._refresh_cached_obj()
else:
return
if refresh:
self.refresh()
_MAX_MEMOIZE_COMPARABLE_SIZE = 10**6
def _equal(obj1: Any, obj2: Any) -> bool:
return obj1 == obj2
def _array_equal(obj1: Any, obj2: Any) -> bool:
import numpy as np
try:
return np.alltrue(obj1 == obj2) # type: ignore
except: # noqa
return False
def _dataframe_equal(obj1: Any, obj2: Any) -> bool:
try:
return obj1.equals(obj2) # type: ignore
except: # noqa
return False
def _make_list_eq(
eqs: List[Callable[[Any, Any], bool]]
) -> Callable[[List[Any], List[Any]], bool]:
def list_eq(lst1: List[Any], lst2: List[Any]) -> bool:
for eq, obj1, obj2 in zip(eqs, lst1, lst2):
if not eq(obj1, obj2):
return False
return True
return list_eq
def make_memoize_comparable_for_obj(
cls, obj: Any, seen_ids: Set[int]
) -> Tuple[Any, Optional[Callable[[Any, Any], bool]], int]:
if isinstance(obj, (bool, bytes, bytearray, int, float, str)):
return obj, cls._equal, 1
if not isinstance(obj, tuple):
if id(obj) in seen_ids:
return cls.NULL, None, -1
seen_ids.add(id(obj))
if isinstance(obj, (dict, frozenset, list, set, tuple)):
size = 0
comp = []
eqs: List[Callable[[Any, Any], bool]] = []
if isinstance(obj, dict):
iterable: "Iterable[Any]" = sorted(obj.items())
else:
iterable = obj
for inner in iterable:
inner_comp, inner_eq, inner_size = cls.make_memoize_comparable_for_obj(
inner, seen_ids
)
if inner_comp is cls.NULL or inner_eq is None:
return cls.NULL, None, -1
size += inner_size + 1
if size > cls._MAX_MEMOIZE_COMPARABLE_SIZE:
return cls.NULL, None, -1
comp.append(inner_comp)
eqs.append(inner_eq)
if all(eq is cls._equal for eq in eqs):
iter_eq: Callable[[Any, Any], bool] = cls._equal
elif isinstance(obj, (frozenset, set)):
return cls.NULL, None, -1
else:
iter_eq = cls._make_list_eq(eqs)
ret = frozenset(comp) if isinstance(obj, (frozenset, set)) else comp
return ret, iter_eq, size
elif type(obj) in (type, FunctionType):
# try to determine it based on the symbol
for sym in flow().aliases.get(id(obj), []):
comp, eq = sym.make_memoize_comparable(seen_ids=seen_ids)
if comp is not cls.NULL and eq is not None:
return comp, eq, 1
return cls.NULL, None, -1
else:
# hacks to check if they are arrays, dataframes, etc without explicitly importing these
module = getattr(type(obj), "__module__", "")
if module.startswith("numpy"):
name = getattr(type(obj), "__name__", "")
if name.endswith("ndarray"):
return obj, cls._array_equal, obj.size
else:
numpy = sys.modules.get("numpy")
if numpy is not None and isinstance(obj, numpy.number):
return obj, cls._equal, 1
elif module.startswith(("modin", "pandas")):
name = getattr(type(obj), "__name__", "")
if name.endswith(("DataFrame", "Series")):
return obj, cls._dataframe_equal, obj.size
elif module.startswith("ipywidgets"):
ipywidgets = sys.modules.get("ipywidgets")
if (
ipywidgets is not None
and isinstance(obj, ipywidgets.Widget)
and hasattr(obj, "value")
):
return obj.value, cls._equal, 1
return cls.NULL, None, -1
def make_memoize_comparable(
self, seen_ids: Optional[Set[int]] = None
) -> Tuple[Any, Optional[Callable[[Any, Any], bool]]]:
if seen_ids is None:
seen_ids = set()
if isinstance(
self.stmt_node, (ast.ClassDef, ast.FunctionDef, ast.AsyncFunctionDef)
):
comps = [astunparse.unparse(self.stmt_node)]
for sym in sorted(self.parents.keys()):
par_comp, eq = sym.make_memoize_comparable(seen_ids=seen_ids)
if par_comp is self.NULL or eq is not self._equal:
return self.NULL, None
comps.append(par_comp)
return comps, self._equal
obj, eq, size = self.make_memoize_comparable_for_obj(self.obj, seen_ids)
if size > self._MAX_MEMOIZE_COMPARABLE_SIZE:
return self.NULL, None
else:
return obj, eq
The provided code snippet includes necessary dependencies for implementing the `deps` function. Write a Python function `def deps(sym: Any) -> List[Symbol]` to solve the following problem:
Given the programmatic usage of some symbol, look up the corresponding dependencies for that symbol.
Here is the function:
def deps(sym: Any) -> List[Symbol]:
"""
Given the programmatic usage of some symbol,
look up the corresponding dependencies for that symbol.
"""
# See the `argument` handler in ipyflow_tracer for the
# actual implementation; this is just a stub that ensures
# that handler was able to find something.
sym = _validate(sym)
return [dep for dep in sym.parents.keys() if not dep.is_anonymous] | Given the programmatic usage of some symbol, look up the corresponding dependencies for that symbol. |
15,085 | from typing import TYPE_CHECKING, Any, List, Set, Union, cast
from ipyflow.data_model.symbol import Symbol
from ipyflow.data_model.timestamp import Timestamp
from ipyflow.tracing.watchpoint import Watchpoints
def _validate(sym: Any) -> Symbol:
if sym is None or not isinstance(sym, Symbol):
raise ValueError("unable to lookup metadata for symbol")
return cast(Symbol, sym)
class Symbol:
NULL = object()
# object for virtual display symbol
DISPLAY = object()
IMMUTABLE_TYPES = set(IMMUTABLE_PRIMITIVE_TYPES)
IPYFLOW_MUTATION_VIRTUAL_SYMBOL_NAME = "__ipyflow_mutation"
def __init__(
self,
name: SupportedIndexType,
symbol_type: SymbolType,
obj: Any,
containing_scope: "Scope",
stmt_node: Optional[ast.stmt] = None,
symbol_node: Optional[ast.AST] = None,
refresh_cached_obj: bool = False,
implicit: bool = False,
) -> None:
if refresh_cached_obj:
# TODO: clean up redundancies
assert implicit
assert stmt_node is None
self.name = name
self.symbol_type = symbol_type
self.obj = obj
# additional user-specific metadata
self._tags: Set[str] = set()
self.extra_metadata: Dict[str, Any] = {}
self._tombstone = False
self._cached_out_of_sync = True
self.cached_obj_id: Optional[int] = None
self.cached_obj_type: Optional[Type[object]] = None
self.cached_obj_len: Optional[int] = None
if refresh_cached_obj:
self._refresh_cached_obj()
self.containing_scope = containing_scope or flow().global_scope
self.call_scope: Optional[Scope] = None
self.func_def_stmt: Optional[ast.stmt] = None
self.stmt_node = self.update_stmt_node(stmt_node)
self.symbol_node = symbol_node
self._funcall_live_symbols = None
self.parents: Dict["Symbol", List[Timestamp]] = {}
self.children: Dict["Symbol", List[Timestamp]] = {}
# initialize at -1 for implicit since the corresponding piece of data could already be around,
# and we don't want liveness checker to think this was newly created unless we
# explicitly trace an update somewhere
self._timestamp: Timestamp = (
Timestamp.uninitialized() if implicit else Timestamp.current()
)
self._snapshot_timestamps: List[Timestamp] = []
self._snapshot_timestamp_ubounds: List[Timestamp] = []
self._defined_cell_num = cells().exec_counter()
self._is_dangling_on_edges = False
self._cascading_reactive_cell_num = -1
self._override_ready_liveness_cell_num = -1
self._override_timestamp: Optional[Timestamp] = None
self.watchpoints = Watchpoints()
# The necessary last-updated timestamp / cell counter for this symbol to not be waiting
self.required_timestamp: Timestamp = self.timestamp
# for each usage of this sym, the version that was used, if different from the timestamp of usage
self.timestamp_by_used_time: Dict[Timestamp, Timestamp] = {}
self.used_node_by_used_time: Dict[Timestamp, ast.AST] = {}
# History of definitions at time of liveness
self.timestamp_by_liveness_time: Dict[Timestamp, Timestamp] = {}
# All timestamps associated with updates to this symbol
self.updated_timestamps: Set[Timestamp] = set()
# The most recent timestamp associated with a particular object id
self.last_updated_timestamp_by_obj_id: Dict[int, Timestamp] = {}
self.fresher_ancestors: Set["Symbol"] = set()
self.fresher_ancestor_timestamps: Set[Timestamp] = set()
# cells where this symbol was live
self.cells_where_deep_live: Set[Cell] = set()
self.cells_where_shallow_live: Set[Cell] = set()
self._last_computed_ready_or_waiting_cache_ts: int = -1
self._is_ready_or_waiting_at_position_cache: Dict[Tuple[int, bool], bool] = {}
# if implicitly created when tracing non-store-context ast nodes
self._implicit = implicit
# Will never be stale if no_warning is True
self.disable_warnings = False
self._temp_disable_warnings = False
self._num_ipywidget_observers = 0
self._num_mercury_widget_observers = 0
flow().aliases.setdefault(id(obj), set()).add(self)
if (
isinstance(self.name, str)
and not self.is_anonymous
and not self.containing_scope.is_namespace_scope
):
ns = self.namespace
if ns is not None and ns.scope_name == "self":
# hack to get a better name than `self.whatever` for fields of this object
# not ideal because it relies on the `self` convention but is probably
# acceptable for the use case of improving readable names
ns.scope_name = self.name
def aliases(self) -> List["Symbol"]:
return list(flow().aliases.get(self.obj_id, []))
def cells_where_live(self) -> Set[Cell]:
return self.cells_where_deep_live | self.cells_where_shallow_live
def __repr__(self) -> str:
return f"<{self.readable_name}>"
def __str__(self) -> str:
return self.readable_name
def __hash__(self) -> int:
return hash(id(self))
def __lt__(self, other) -> bool:
return id(self) < id(other)
def add_tag(self, tag_value: str) -> None:
self._tags.add(tag_value)
def remove_tag(self, tag_value: str) -> None:
self._tags.discard(tag_value)
def has_tag(self, tag_value: str) -> bool:
return tag_value in self._tags
def temporary_disable_warnings(self) -> None:
self._temp_disable_warnings = True
def last_used_timestamp(self) -> Timestamp:
if len(self.timestamp_by_used_time) == 0:
return Timestamp.uninitialized()
else:
return max(self.timestamp_by_used_time.keys())
def namespace_waiting_symbols(self) -> Set["Symbol"]:
ns = self.namespace
return set() if ns is None else ns.namespace_waiting_symbols
def shallow_timestamp(self) -> Timestamp:
if self._override_timestamp is None:
return self._timestamp
else:
return max(self._timestamp, self._override_timestamp)
def visible_timestamp(self) -> Optional[Timestamp]:
for ts in sorted(self.updated_timestamps, reverse=True):
if cells().at_timestamp(ts).is_visible:
return ts
return None
def memoize_timestamp(self) -> Optional[Timestamp]:
return self.last_updated_timestamp_by_obj_id.get(self.obj_id)
def timestamp(self) -> Timestamp:
ts = self.shallow_timestamp
if self.is_import or self.is_module:
return ts
ns = self.namespace
return ts if ns is None else max(ts, ns.max_descendent_timestamp)
def _compute_namespace_timestamps(
self,
seen: Optional[Set["Symbol"]] = None,
version_ubound: Optional[Timestamp] = None,
) -> Set[Timestamp]:
if version_ubound is None:
timestamps = {self.shallow_timestamp, self.timestamp}
else:
max_leq_ubound = Timestamp.uninitialized()
for ts in reversed(self._snapshot_timestamps):
if ts <= version_ubound:
max_leq_ubound = ts
break
if max_leq_ubound.is_initialized:
timestamps = {max_leq_ubound}
else:
timestamps = set()
ns = self.namespace
if ns is None:
return timestamps
if seen is None:
seen = set()
if self in seen:
return timestamps
seen.add(self)
for sym in ns.all_symbols_this_indentation():
timestamps |= sym._compute_namespace_timestamps(
seen=seen, version_ubound=version_ubound
)
return timestamps
def _get_timestamps_for_version(self, version: int) -> Set[Timestamp]:
if len(self._snapshot_timestamps) == 0:
return {self.timestamp}
ts = self._snapshot_timestamps[version]
if ts.cell_num == -1:
return {Timestamp(self.defined_cell_num, ts.stmt_num)}
else:
return self._compute_namespace_timestamps(
version_ubound=None if version == -1 else ts
)
def code(
self, format_type: Optional[Type[FormatType]] = None, version: int = -1
) -> Slice:
return statements().format_multi_slice(
self._get_timestamps_for_version(version=version),
blacken=True,
format_type=format_type,
)
def cascading_reactive_cell_num(
self,
seen: Optional[Set["Symbol"]] = None,
consider_containing_symbols: bool = True,
) -> int:
if seen is None:
seen = set()
if self in seen:
return -1
seen.add(self)
cell_num = self._cascading_reactive_cell_num
ns = self.namespace
ret = (
cell_num
if ns is None
else max(
cell_num,
ns.max_cascading_reactive_cell_num(seen),
)
)
if not consider_containing_symbols:
return ret
for sym in self.iter_containing_symbols():
ret = max(ret, sym.cascading_reactive_cell_num(seen=seen))
return ret
def bump_cascading_reactive_cell_num(self, ctr: Optional[int] = None) -> None:
self._cascading_reactive_cell_num = max(
self._cascading_reactive_cell_num,
flow().cell_counter() if ctr is None else ctr,
)
def iter_containing_symbols(self) -> Generator["Symbol", None, None]:
yield self
ns = self.containing_namespace
if ns is None or not ns.is_namespace_scope:
return
for containing_ns in ns.iter_containing_namespaces():
yield from flow().aliases.get(containing_ns.obj_id, [])
def waiting_timestamp(self) -> int:
return max(self._timestamp.cell_num, flow().min_timestamp)
def defined_cell_num(self) -> int:
return self._defined_cell_num
def readable_name(self) -> str:
return self.containing_scope.make_namespace_qualified_name(self)
def is_subscript(self) -> bool:
return self.symbol_type == SymbolType.SUBSCRIPT
def is_class(self) -> bool:
return self.symbol_type == SymbolType.CLASS
def is_function(self) -> bool:
return self.symbol_type == SymbolType.FUNCTION
def is_lambda(self) -> bool:
# TODO: this is terrible
return type(self.name) is str and self.name.startswith( # noqa: E721
"<lambda_sym_"
)
def is_import(self) -> bool:
return self.symbol_type == SymbolType.IMPORT
def is_module(self) -> bool:
return self.symbol_type == SymbolType.MODULE
def imported_module(self) -> str:
if not self.is_import:
raise ValueError("only IMPORT symbols have `imported_module` property")
if isinstance(self.stmt_node, ast.Import):
for alias in self.stmt_node.names:
name = alias.asname or alias.name
if name == self.name:
return alias.name
raise ValueError(
"Unable to find module for symbol %s is stmt %s"
% (self, ast.dump(self.stmt_node))
)
elif isinstance(self.stmt_node, ast.ImportFrom):
return self.stmt_node.module
else:
raise TypeError(
"Invalid stmt type for import symbol: %s" % ast.dump(self.stmt_node)
)
def imported_symbol_original_name(self) -> str:
if not self.is_import:
raise ValueError(
"only IMPORT symbols have `imported_symbol_original_name` property"
)
if isinstance(self.stmt_node, ast.Import):
return self.imported_module
elif isinstance(self.stmt_node, ast.ImportFrom):
for alias in self.stmt_node.names:
name = alias.asname or alias.name
if name == self.name:
return alias.name
raise ValueError(
"Unable to find module for symbol %s is stmt %s"
% (self, ast.dump(self.stmt_node))
)
else:
raise TypeError(
"Invalid stmt type for import symbol: %s" % ast.dump(self.stmt_node)
)
def is_cascading_reactive_at_counter(self, ctr: int) -> bool:
return self.cascading_reactive_cell_num() > max(
ctr, flow().min_cascading_reactive_cell_num
)
def get_top_level(self) -> Optional["Symbol"]:
if not self.containing_scope.is_namespace_scope:
return self
else:
containing_scope = cast("Namespace", self.containing_scope)
for alias in flow().aliases.get(containing_scope.obj_id, []):
if alias.is_globally_accessible:
return alias.get_top_level()
return None
def get_import_string(self) -> str:
if not self.is_import:
raise ValueError("only IMPORT symbols support recreating the import string")
module = self.imported_module
if isinstance(self.stmt_node, ast.Import):
if module == self.name:
return f"import {module}"
else:
return f"import {module} as {self.name}"
elif isinstance(self.stmt_node, ast.ImportFrom):
original_symbol_name = self.imported_symbol_original_name
if original_symbol_name == self.name:
return f"from {module} import {original_symbol_name}"
else:
return f"from {module} import {original_symbol_name} as {self.name}"
else:
raise TypeError(
"Invalid stmt type for import symbol: %s" % ast.dump(self.stmt_node)
)
def is_anonymous(self) -> bool:
if self.symbol_type == SymbolType.ANONYMOUS:
return True
ns = self.containing_namespace
if ns is not None and ns.is_anonymous:
return True
return False
def is_implicit(self) -> bool:
return self._implicit
def shallow_clone(
self, new_obj: Any, new_containing_scope: "Scope", symbol_type: SymbolType
) -> "Symbol":
return self.__class__(self.name, symbol_type, new_obj, new_containing_scope)
def obj_id(self) -> int:
return id(self.obj)
def obj_len(self) -> Optional[int]:
try:
if not self.is_obj_lazy_module and hasattr(self.obj, "__len__"):
return len(self.obj)
except: # noqa: E722
pass
return None
def obj_type(self) -> Type[Any]:
return type(self.obj)
def is_immutable(self) -> bool:
return self.obj_type in self.IMMUTABLE_TYPES
def is_mutation_virtual_symbol(self) -> bool:
return self.name == self.IPYFLOW_MUTATION_VIRTUAL_SYMBOL_NAME
def is_underscore(self) -> bool:
return self.name == "_" and self.containing_scope.is_global
def is_obj_lazy_module(self) -> bool:
return self.obj_type is _LazyModule
def get_type_annotation(self):
return get_type_annotation(self.obj)
def get_type_annotation_string(self) -> str:
return make_annotation_string(self.get_type_annotation())
def namespace(self) -> Optional["Namespace"]:
return flow().namespaces.get(self.obj_id)
def containing_namespace(self) -> Optional["Namespace"]:
if self.containing_scope.is_namespace_scope:
return cast("Namespace", self.containing_scope)
else:
return None
def full_path(self) -> Tuple[str, ...]:
return self.containing_scope.full_path + (str(self.name),)
def full_namespace_path(self) -> str:
return self.containing_scope.make_namespace_qualified_name(self)
def is_garbage(self) -> bool:
return self._tombstone
def is_new_garbage(self) -> bool:
if self._tombstone:
return False
containing_ns = self.containing_namespace
numpy = sys.modules.get("numpy", None)
if (
numpy is not None
and containing_ns is not None
and isinstance(containing_ns.obj, numpy.ndarray)
):
# numpy atoms are not interned (so assigning array elts to a variable does not bump refcount);
# also seems that refcount is always 0, so just check if the containing namespace is garbage
return self.containing_namespace.is_garbage
return self.get_ref_count() == 0
def is_globally_accessible(self) -> bool:
return self.containing_scope.is_globally_accessible
def is_user_accessible(self) -> bool:
return (
self.is_globally_accessible
and not self.is_anonymous
and not self.is_garbage
and not (
self.containing_namespace is not None
and (
self.containing_namespace.is_anonymous
or self.containing_namespace.is_garbage
)
)
)
def _remove_self_from_aliases(self) -> None:
cleanup_discard(flow().aliases, self.obj_id, self)
self.obj = None
def mark_garbage(self) -> None:
if self.is_garbage:
return
self._tombstone = True
ns = self.namespace
if ns is not None and all(alias.is_garbage for alias in self.aliases):
ns.mark_garbage()
def collect_self_garbage(self) -> None:
assert self.is_garbage
flow().blocked_reactive_timestamps_by_symbol.pop(self, None)
self._remove_self_from_aliases()
for parent in self.parents:
parent.children.pop(self, None)
for child in self.children:
child.parents.pop(self, None)
containing_ns = self.containing_namespace
if self.is_subscript and containing_ns is not None:
containing_ns._subscript_symbol_by_name.pop(self.name, None)
elif not self.is_subscript:
self.containing_scope._symbol_by_name.pop(self.name, None)
else:
logger.warning(
"could not find symbol %s in its scope %s", self, self.containing_scope
)
# TODO: remove from static / dynamic parent / children edges
# need to keep this around for readable_name to work
# self.containing_scope = None
# def update_type(self, new_type):
# self.symbol_type = new_type
# if self.is_function:
# self.call_scope = self.containing_scope.make_child_scope(self.name)
# else:
# self.call_scope = None
def update_obj_ref(self, obj: Any, refresh_cached: bool = True) -> None:
if self._num_ipywidget_observers > 0:
try:
self.obj.unobserve_all()
except: # noqa
pass
self._num_ipywidget_observers = 0
if self._num_mercury_widget_observers > 0:
try:
self._mercury_widgets_manager.get_widget(
self.obj.code_uid
).unobserve_all()
except: # noqa
pass
self._num_mercury_widget_observers = 0
self._tombstone = False
self._cached_out_of_sync = True
if (
flow().settings.mark_typecheck_failures_unsafe
and self.cached_obj_type != type(obj)
):
for cell in self.cells_where_live:
cell.invalidate_typecheck_result()
self.cells_where_shallow_live.clear()
self.cells_where_deep_live.clear()
self.obj = obj
if self.cached_obj_id is not None and self.cached_obj_id != self.obj_id:
new_ns = flow().namespaces.get(self.obj_id, None)
# don't overwrite existing namespace for this obj
old_ns = flow().namespaces.get(self.cached_obj_id, None)
if (
old_ns is not None
and old_ns.full_namespace_path == self.full_namespace_path
):
if new_ns is None:
logger.info("create fresh copy of namespace %s", old_ns)
new_ns = old_ns.fresh_copy(obj)
old_ns.transfer_symbols_to(new_ns)
else:
new_ns.scope_name = old_ns.scope_name
new_ns.parent_scope = old_ns.parent_scope
self._handle_aliases()
if (
old_ns is not None
and len(flow().aliases.get(self.cached_obj_id, [])) == 0
):
old_ns.mark_garbage()
if refresh_cached:
self._refresh_cached_obj()
def invalidate_cached(self) -> None:
self._cached_out_of_sync = True
self.cached_obj_id = None
self.cached_obj_type = None
def get_ref_count(self) -> int:
if self.obj is None or self.obj is Symbol.NULL:
return -1
total = sys.getrefcount(self.obj) - 1
total -= len(flow().aliases.get(self.obj_id, []))
ns = flow().namespaces.get(self.obj_id, None)
if ns is not None and ns.obj is not None and ns.obj is not Symbol.NULL:
total -= 1
return total
def _should_cancel_propagation(self, prev_obj: Optional[Any]) -> bool:
if prev_obj is None:
return False
if (
flow().blocked_reactive_timestamps_by_symbol.get(self, -1)
== self.timestamp.cell_num
):
return False
if not self._cached_out_of_sync or self.obj_id == self.cached_obj_id:
return True
if self.obj is None or prev_obj is Symbol.NULL:
return self.obj is None and prev_obj is Symbol.NULL
return False
def _handle_aliases(self):
cleanup_discard(flow().aliases, self.cached_obj_id, self)
flow().aliases.setdefault(self.obj_id, set()).add(self)
def update_stmt_node(self, stmt_node: Optional[ast.stmt]) -> Optional[ast.stmt]:
self.stmt_node = stmt_node
self._funcall_live_symbols = None
if self.is_function or (
stmt_node is not None and isinstance(stmt_node, ast.Lambda)
):
# TODO: in the case of lambdas, there will not necessarily be one
# symbol for a given statement. We need a more precise way to determine
# the symbol being called than by looking at the stmt in question.
flow().statement_to_func_sym[id(stmt_node)] = self
self.call_scope = self.containing_scope.make_child_scope(self.name)
self.func_def_stmt = stmt_node
return stmt_node
def _refresh_cached_obj(self) -> None:
self._cached_out_of_sync = False
# don't keep an actual ref to avoid bumping refcount
self.cached_obj_id = self.obj_id
self.cached_obj_type = self.obj_type
self.cached_obj_len = self.obj_len
def get_definition_args(self) -> List[ast.arg]:
assert self.func_def_stmt is not None and isinstance(
self.func_def_stmt, (ast.FunctionDef, ast.AsyncFunctionDef, ast.Lambda)
)
args = []
for arg in self.func_def_stmt.args.args + self.func_def_stmt.args.kwonlyargs:
args.append(arg)
if self.func_def_stmt.args.vararg is not None:
args.append(self.func_def_stmt.args.vararg)
if self.func_def_stmt.args.kwarg is not None:
args.append(self.func_def_stmt.args.kwarg)
return args
def _match_call_args_with_definition_args(
self,
) -> Generator[Tuple[ast.arg, List["Symbol"]], None, None]:
# TODO: handle posonlyargs, kwonlyargs
assert self.func_def_stmt is not None and isinstance(
self.func_def_stmt, (ast.FunctionDef, ast.AsyncFunctionDef, ast.Lambda)
)
caller_node = self._get_calling_ast_node()
if caller_node is None or not isinstance(caller_node, ast.Call):
return
kwarg_by_name = {
arg_key.arg: arg_key
for arg_key in self.func_def_stmt.args.args[
-len(self.func_def_stmt.args.defaults) :
]
}
if not all(keyword.arg in kwarg_by_name for keyword in caller_node.keywords):
logger.warning("detected mismatched kwargs from caller node to definition")
return
def_args = self.func_def_stmt.args.args
if len(self.func_def_stmt.args.defaults) > 0:
def_args = def_args[: -len(self.func_def_stmt.args.defaults)]
if len(def_args) > 0 and def_args[0].arg == "self":
# FIXME: this is bad and I should feel bad
def_args = def_args[1:]
for def_arg, call_arg in zip(def_args, caller_node.args):
if isinstance(call_arg, ast.Starred):
# give up
# TODO: handle this case
break
yield def_arg, tracer().resolve_loaded_symbols(call_arg)
seen_keys = set()
for keyword in caller_node.keywords:
keyword_key, keyword_value = keyword.arg, keyword.value
if keyword_value is None:
continue
seen_keys.add(keyword_key)
yield kwarg_by_name[keyword_key], tracer().resolve_loaded_symbols(
keyword_value
)
for arg_key, arg_value in zip(
self.func_def_stmt.args.args[-len(self.func_def_stmt.args.defaults) :],
self.func_def_stmt.args.defaults,
):
if arg_key.arg in seen_keys:
continue
yield arg_key, tracer().resolve_loaded_symbols(arg_value)
def _get_calling_ast_node(self) -> Optional[ast.Call]:
if tracer().tracing_disabled_since_last_module_stmt or (
not hasattr(self.obj, "__module__")
and getattr(type(self.obj), "__module__", None) == "builtins"
):
return None
if self.func_def_stmt is not None and isinstance(
self.func_def_stmt, (ast.FunctionDef, ast.AsyncFunctionDef)
):
if self.name in ("__getitem__", "__setitem__", "__delitem__"):
# TODO: handle case where we're looking for a subscript for the calling node
return None
for decorator in self.func_def_stmt.decorator_list:
if isinstance(decorator, ast.Name) and decorator.id == "property":
# TODO: handle case where we're looking for an attribute for the calling node
return None
lexical_call_stack = tracer().lexical_call_stack
if len(lexical_call_stack) == 0:
return None
prev_node_id_in_cur_frame_lexical = lexical_call_stack.get_field(
"prev_node_id_in_cur_frame_lexical"
)
caller_ast_node = tracer().ast_node_by_id.get(
prev_node_id_in_cur_frame_lexical, None
)
if caller_ast_node is None or not isinstance(caller_ast_node, ast.Call):
return None
return caller_ast_node
def create_symbols_for_call_args(self, call_frame: FrameType) -> None:
assert self.func_def_stmt is not None
seen_def_args = set()
logger.info("create symbols for call to %s", self)
for def_arg, deps in self._match_call_args_with_definition_args():
seen_def_args.add(def_arg.arg)
self.call_scope.upsert_symbol_for_name(
def_arg.arg,
call_frame.f_locals.get(def_arg.arg),
deps,
self.func_def_stmt,
propagate=False,
symbol_node=def_arg,
)
logger.info("def arg %s matched with deps %s", def_arg, deps)
for def_arg in self.get_definition_args():
if def_arg.arg in seen_def_args:
continue
self.call_scope.upsert_symbol_for_name(
def_arg.arg,
None,
set(),
self.func_def_stmt,
propagate=False,
symbol_node=def_arg,
)
def is_waiting(self) -> bool:
if self.disable_warnings or self._temp_disable_warnings:
return False
if self.waiting_timestamp < self.required_timestamp.cell_num:
return True
elif flow().min_timestamp == -1:
return len(self.namespace_waiting_symbols) > 0
else:
# TODO: guard against infinite recurision
return any(sym.is_waiting for sym in self.namespace_waiting_symbols)
def is_shallow_stale(self) -> bool:
if self.disable_warnings or self._temp_disable_warnings:
return False
return self.waiting_timestamp < self.required_timestamp.cell_num
def _is_ready_or_waiting_at_position_impl(self, pos: int, deep: bool) -> bool:
for par, timestamps in self.parents.items():
for ts in timestamps:
dep_introduced_pos = cells().at_timestamp(ts).position
if dep_introduced_pos > pos:
continue
for updated_ts in par.updated_timestamps:
if cells().at_timestamp(updated_ts).position > dep_introduced_pos:
continue
if updated_ts.cell_num > ts.cell_num or par.is_waiting_at_position(
dep_introduced_pos
):
# logger.error("sym: %s", self)
# logger.error("pos: %s", pos)
# logger.error("parent: %s", par)
# logger.error("dep introdced ts: %s", ts)
# logger.error("dep introdced pos: %s", dep_introduced_pos)
# logger.error("par updated ts: %s", updated_ts)
# logger.error("par updated position: %s", cells().from_timestamp(updated_ts).position)
return True
if deep:
for sym in self.namespace_waiting_symbols:
if sym.is_waiting_at_position(pos):
return True
return False
def is_waiting_at_position(self, pos: int, deep: bool = True) -> bool:
if deep:
if not self.is_waiting:
return False
else:
if not self.is_shallow_stale:
return False
if flow().mut_settings.flow_order == FlowDirection.ANY_ORDER:
return True
if cells().exec_counter() > self._last_computed_ready_or_waiting_cache_ts:
self._is_ready_or_waiting_at_position_cache.clear()
self._last_computed_ready_or_waiting_cache_ts = cells().exec_counter()
if (pos, deep) in self._is_ready_or_waiting_at_position_cache:
return self._is_ready_or_waiting_at_position_cache[pos, deep]
# preemptively set this entry to 'False' in the cache to avoid infinite loops
self._is_ready_or_waiting_at_position_cache[pos, deep] = False
is_waiting = self._is_ready_or_waiting_at_position_impl(pos, deep)
self._is_ready_or_waiting_at_position_cache[pos, deep] = is_waiting
return is_waiting
def should_mark_waiting(self, updated_dep):
if self.disable_warnings:
return False
if updated_dep is self:
return False
return True
def _is_underscore_or_simple_assign(self, new_deps: Set["Symbol"]) -> bool:
if self.is_underscore:
# FIXME: distinguish between explicit assignment to _ from user and implicit assignment from kernel
return True
if not isinstance(self.stmt_node, (ast.Assign, ast.AnnAssign)):
return False
if len(new_deps) != 1:
return False
only_dep: Symbol = next(iter(new_deps))
# obj ids can get reused for anon symbols like literals
return not only_dep.is_anonymous and self.obj_id == only_dep.obj_id
def update_deps(
self,
new_deps: Set["Symbol"],
prev_obj: Any = None,
overwrite: bool = True,
mutated: bool = False,
deleted: bool = False,
propagate_to_namespace_descendents: bool = False,
propagate: bool = True,
refresh: bool = True,
is_cascading_reactive: Optional[bool] = None,
) -> None:
if self.is_import and self.obj_id == self.cached_obj_id:
# skip updates for imported symbols; just bump the version
self.refresh()
return
if overwrite and not self.is_globally_accessible:
self.watchpoints.clear()
if mutated and self.is_immutable:
return
# if we get here, no longer implicit
self._implicit = False
# quick last fix to avoid overwriting if we appear inside the set of deps to add (or a 1st order ancestor)
# TODO: check higher-order ancestors too?
overwrite = overwrite and self not in new_deps
overwrite = overwrite and not any(
self in new_dep.parents for new_dep in new_deps
)
logger.warning("symbol %s new deps %s", self, new_deps)
new_deps.discard(self)
if overwrite:
for parent in self.parents.keys() - new_deps:
parent.children.pop(self, None)
self.parents.pop(parent, None)
for new_parent in new_deps - self.parents.keys():
if new_parent is None:
continue
new_parent.children.setdefault(self, []).append(Timestamp.current())
self.parents.setdefault(new_parent, []).append(Timestamp.current())
self.required_timestamp = Timestamp.uninitialized()
self.fresher_ancestors.clear()
self.fresher_ancestor_timestamps.clear()
if mutated or isinstance(self.stmt_node, ast.AugAssign):
self.update_usage_info()
if (
(mutated or overwrite)
and Timestamp.current().is_initialized
and not self.is_immutable
and not self.is_mutation_virtual_symbol
and not self.is_anonymous
and self.containing_scope.is_global
and not self.is_underscore
and not self.is_implicit
and self.obj_type is not type
and not self.is_class
and self.namespace is not None
):
self.namespace.upsert_symbol_for_name(
self.IPYFLOW_MUTATION_VIRTUAL_SYMBOL_NAME, object(), propagate=False
)
propagate = propagate and (
mutated or deleted or not self._should_cancel_propagation(prev_obj)
)
try:
prev_cell = cells().current_cell().prev_cell
except KeyError:
prev_cell = None
prev_cell_ctr = -1 if prev_cell is None else prev_cell.cell_ctr
if overwrite:
flow_ = flow()
self._cascading_reactive_cell_num = -1
flow_.updated_reactive_symbols.discard(self)
flow_.updated_deep_reactive_symbols.discard(self)
if is_cascading_reactive is not None:
is_cascading_reactive = is_cascading_reactive or any(
sym.is_cascading_reactive_at_counter(prev_cell_ctr) for sym in new_deps
)
if is_cascading_reactive:
self.bump_cascading_reactive_cell_num()
if refresh:
self.refresh(
# rationale: if this is a mutation for which we have more precise information,
# then we don't need to update the ns descendents as this will already have happened.
# also don't update ns descendents for things like `a = b`
refresh_descendent_namespaces=propagate
and not (mutated and not propagate_to_namespace_descendents)
and not self._is_underscore_or_simple_assign(new_deps),
refresh_namespace_waiting=not mutated,
)
if propagate:
UpdateProtocol(self)(
new_deps, mutated, propagate_to_namespace_descendents, refresh
)
self._refresh_cached_obj()
if self.is_class:
# pop pending class defs and update obj ref
pending_class_ns = tracer().pending_class_namespaces.pop()
pending_class_ns.update_obj_ref(self.obj)
for dep in new_deps:
if dep.obj is self.obj and dep.call_scope is not None:
self.call_scope = dep.call_scope
self.func_def_stmt = dep.func_def_stmt
ns = self.namespace
if ns is not None and ns.scope_name == "self" and isinstance(self.name, str):
# fixup namespace name if necessary
# can happen if symbol for 'self' was created in a previous __init__
ns.scope_name = self.name
if overwrite and len(flow().aliases[self.obj_id]) == 1:
self._handle_possible_widget_creation()
self._handle_possible_mercury_widget_creation()
def _mercury_widgets_manager(self):
if self.obj is None:
return None
if self.is_obj_lazy_module or not hasattr(self.obj, "code_uid"):
return None
try:
return sys.modules.get(self.obj.__class__.__module__).WidgetsManager
except: # noqa
return None
def _handle_possible_widget_creation(self) -> None:
if self.obj is None:
return
Widget = getattr(sys.modules.get("ipywidgets"), "Widget", None)
if (
Widget is None
or self.is_obj_lazy_module
or not isinstance(self.obj, Widget)
or not hasattr(self.obj, "observe")
or not hasattr(self.obj, "value")
):
return
self.namespaced().upsert_symbol_for_name(
"value", getattr(self.obj, "value", None), set(), self.stmt_node
)
self.obj.observe(self._observe_widget)
self._num_ipywidget_observers += 1
def _handle_possible_mercury_widget_creation(self) -> None:
WidgetsManager = self._mercury_widgets_manager
if WidgetsManager is None:
return
widget = WidgetsManager.get_widget(self.obj.code_uid)
self.namespaced().upsert_symbol_for_name(
"value", getattr(widget, "value", None), set(), self.stmt_node
)
widget.observe(self._observe_widget)
self._num_mercury_widget_observers += 1
def _observe_widget(self, msg: Dict[str, Any]) -> None:
if msg.get("name") != "value" or "new" not in msg:
return
ns = self.namespace
sym = ns.lookup_symbol_by_name_this_indentation("value")
if sym is None:
return
newval = msg["new"]
current_ts_cell = cells().at_timestamp(self._timestamp)
current_ts_cell._extra_stmt = ast.parse(f"{sym.readable_name} = {newval}").body[
0
]
sym._override_ready_liveness_cell_num = flow().cell_counter() + 1
sym._override_timestamp = Timestamp(
self._timestamp.cell_num, current_ts_cell.num_original_stmts
)
sym.update_obj_ref(newval)
statements().create_and_track(
current_ts_cell._extra_stmt,
timestamp=sym._override_timestamp,
override=True,
)
with dynamic_slicing_context():
flow().add_data_dep(
sym._timestamp,
sym._override_timestamp,
sym,
)
flow().add_data_dep(
sym._override_timestamp,
sym._timestamp,
sym,
)
self.debounced_exec_schedule(reactive=True)
def debounced_exec_schedule(self, reactive: bool) -> None:
if _debounced_exec_schedule(
cells().at_timestamp(self.timestamp).cell_id, reactive=reactive
):
flow().debounced_exec_schedule_pending = True
def namespaced(self) -> "Namespace":
ns = self.namespace
if ns is not None:
return ns
return namespaces()(self.obj, self.name, parent_scope=self.containing_scope)
def update_usage_info_one_timestamp(
self,
used_time: Timestamp,
updated_time: Timestamp,
is_static: bool,
) -> bool:
flow_ = flow()
is_usage = is_static or updated_time < used_time
if is_usage:
with slicing_context(is_static=is_static):
flow_.add_data_dep(
used_time,
updated_time,
self,
)
if is_static:
is_usage = cells().at_timestamp(updated_time).is_visible
return is_usage
def update_usage_info(
self,
used_time: Optional[Timestamp] = None,
used_node: Optional[ast.AST] = None,
exclude_ns: bool = False,
is_static: bool = False,
is_blocking: bool = False,
) -> "Symbol":
is_blocking = is_blocking or id(used_node) in tracer().blocking_node_ids
if used_time is None:
used_time = Timestamp.current()
if flow().is_dev_mode:
logger.info(
"sym `%s` used in cell %d last updated in cell %d",
self,
used_time.cell_num,
self.timestamp,
)
timestamp_by_used_time = (
self.timestamp_by_liveness_time
if is_static
else self.timestamp_by_used_time
)
if not is_blocking:
is_usage = False
ts_to_use = self._timestamp
for updated_ts in sorted(self.updated_timestamps, reverse=True):
if not updated_ts.is_initialized:
continue
is_usage = self.update_usage_info_one_timestamp(
used_time,
updated_ts,
is_static=is_static,
)
if is_usage or not is_static:
break
if is_usage and used_time.is_initialized:
timestamp_by_used_time[used_time] = ts_to_use
if used_node is not None:
self.used_node_by_used_time[used_time] = used_node
if exclude_ns:
return self
for sym in self.get_namespace_symbols(recurse=True):
sym.update_usage_info(
used_time=used_time,
used_node=None,
exclude_ns=True,
is_static=is_static,
is_blocking=is_blocking,
)
return self
def get_namespace_symbols(
self, recurse: bool = False, seen: Optional[Set["Symbol"]] = None
) -> Generator["Symbol", None, None]:
ns = self.namespace
if ns is None:
return
if seen is None:
seen = set()
if self in seen:
return
seen.add(self)
for sym in ns.all_symbols_this_indentation():
yield sym
if recurse:
yield from sym.get_namespace_symbols(recurse=recurse, seen=seen)
def _take_timestamp_snapshots(
self, ts_ubound: Timestamp, seen: Optional[Set["Symbol"]] = None
) -> None:
if seen is None:
seen = set()
if self in seen:
return
seen.add(self)
self._snapshot_timestamps.append(self._timestamp)
self._snapshot_timestamp_ubounds.append(ts_ubound)
containing_ns = self.containing_namespace
if containing_ns is None:
return
for alias in flow().aliases.get(containing_ns.obj_id, []):
alias._take_timestamp_snapshots(ts_ubound, seen=seen)
def refresh(
self,
take_timestamp_snapshots: bool = True,
refresh_descendent_namespaces: bool = False,
refresh_namespace_waiting: bool = True,
timestamp: Optional[Timestamp] = None,
seen: Optional[Set["Symbol"]] = None,
) -> None:
orig_timestamp = self._timestamp
self._timestamp = Timestamp.current() if timestamp is None else timestamp
self._override_timestamp = None
if take_timestamp_snapshots and (
orig_timestamp < self._timestamp or len(self._snapshot_timestamps) == 0
):
self._take_timestamp_snapshots(self._timestamp)
self.updated_timestamps.add(self._timestamp)
self._temp_disable_warnings = False
for cell in self.cells_where_live:
cell.add_used_cell_counter(self, self._timestamp.cell_num)
ns = self.containing_namespace
if ns is not None:
# logger.error("bump version of %s due to %s (value %s)", ns.full_path, self.full_path, self.obj)
ns.max_descendent_timestamp = self.shallow_timestamp
for alias in flow().aliases.get(ns.obj_id, []):
for cell in alias.cells_where_deep_live:
cell.add_used_cell_counter(alias, self._timestamp.cell_num)
if refresh_descendent_namespaces:
if seen is None:
seen = set()
if self in seen:
return
seen.add(self)
ns = self.namespace
if ns is not None:
for sym in ns.all_symbols_this_indentation(exclude_class=True):
# this is to handle cases like `x = x.mutate(42)`, where
# we could have changed some member of x but returned the
# original object -- in this case, just assume that all
# the stale namespace descendents are no longer stale, as
# this is likely the user intention. For an example, see
# `test_external_object_update_propagates_to_stale_namespace_symbols()`
# in `test_frontend_checker.py`
if not sym.is_waiting or refresh_namespace_waiting:
# logger.error(
# "refresh %s due to %s (value %s) via namespace %s",
# sym.full_path,
# self.full_path,
# self.obj,
# ns.full_path,
# )
sym.refresh(
refresh_descendent_namespaces=True,
timestamp=self.shallow_timestamp,
take_timestamp_snapshots=False,
seen=seen,
)
if refresh_namespace_waiting:
self.namespace_waiting_symbols.clear()
def resync_if_necessary(self, refresh: bool) -> None:
if not self.containing_scope.is_global:
return
try:
obj = shell().user_ns[self.name]
except: # noqa
# cinder runtime can throw an exception here due to lazy imports that fail
return
if self.obj is not obj:
flow_ = flow()
for alias in flow_.aliases.get(
self.cached_obj_id, set()
) | flow_.aliases.get(self.obj_id, set()):
containing_namespace = alias.containing_namespace
if containing_namespace is None:
continue
containing_obj = containing_namespace.obj
if containing_obj is None:
continue
# TODO: handle dict case too
if isinstance(containing_obj, list) and containing_obj[-1] is obj:
containing_namespace._subscript_symbol_by_name.pop(alias.name, None)
alias.name = len(containing_obj) - 1
alias.update_obj_ref(obj)
containing_namespace._subscript_symbol_by_name[alias.name] = alias
cleanup_discard(flow_.aliases, self.cached_obj_id, self)
cleanup_discard(flow_.aliases, self.obj_id, self)
flow_.aliases.setdefault(id(obj), set()).add(self)
self.update_obj_ref(obj)
elif self.obj_len != self.cached_obj_len:
self._refresh_cached_obj()
else:
return
if refresh:
self.refresh()
_MAX_MEMOIZE_COMPARABLE_SIZE = 10**6
def _equal(obj1: Any, obj2: Any) -> bool:
return obj1 == obj2
def _array_equal(obj1: Any, obj2: Any) -> bool:
import numpy as np
try:
return np.alltrue(obj1 == obj2) # type: ignore
except: # noqa
return False
def _dataframe_equal(obj1: Any, obj2: Any) -> bool:
try:
return obj1.equals(obj2) # type: ignore
except: # noqa
return False
def _make_list_eq(
eqs: List[Callable[[Any, Any], bool]]
) -> Callable[[List[Any], List[Any]], bool]:
def list_eq(lst1: List[Any], lst2: List[Any]) -> bool:
for eq, obj1, obj2 in zip(eqs, lst1, lst2):
if not eq(obj1, obj2):
return False
return True
return list_eq
def make_memoize_comparable_for_obj(
cls, obj: Any, seen_ids: Set[int]
) -> Tuple[Any, Optional[Callable[[Any, Any], bool]], int]:
if isinstance(obj, (bool, bytes, bytearray, int, float, str)):
return obj, cls._equal, 1
if not isinstance(obj, tuple):
if id(obj) in seen_ids:
return cls.NULL, None, -1
seen_ids.add(id(obj))
if isinstance(obj, (dict, frozenset, list, set, tuple)):
size = 0
comp = []
eqs: List[Callable[[Any, Any], bool]] = []
if isinstance(obj, dict):
iterable: "Iterable[Any]" = sorted(obj.items())
else:
iterable = obj
for inner in iterable:
inner_comp, inner_eq, inner_size = cls.make_memoize_comparable_for_obj(
inner, seen_ids
)
if inner_comp is cls.NULL or inner_eq is None:
return cls.NULL, None, -1
size += inner_size + 1
if size > cls._MAX_MEMOIZE_COMPARABLE_SIZE:
return cls.NULL, None, -1
comp.append(inner_comp)
eqs.append(inner_eq)
if all(eq is cls._equal for eq in eqs):
iter_eq: Callable[[Any, Any], bool] = cls._equal
elif isinstance(obj, (frozenset, set)):
return cls.NULL, None, -1
else:
iter_eq = cls._make_list_eq(eqs)
ret = frozenset(comp) if isinstance(obj, (frozenset, set)) else comp
return ret, iter_eq, size
elif type(obj) in (type, FunctionType):
# try to determine it based on the symbol
for sym in flow().aliases.get(id(obj), []):
comp, eq = sym.make_memoize_comparable(seen_ids=seen_ids)
if comp is not cls.NULL and eq is not None:
return comp, eq, 1
return cls.NULL, None, -1
else:
# hacks to check if they are arrays, dataframes, etc without explicitly importing these
module = getattr(type(obj), "__module__", "")
if module.startswith("numpy"):
name = getattr(type(obj), "__name__", "")
if name.endswith("ndarray"):
return obj, cls._array_equal, obj.size
else:
numpy = sys.modules.get("numpy")
if numpy is not None and isinstance(obj, numpy.number):
return obj, cls._equal, 1
elif module.startswith(("modin", "pandas")):
name = getattr(type(obj), "__name__", "")
if name.endswith(("DataFrame", "Series")):
return obj, cls._dataframe_equal, obj.size
elif module.startswith("ipywidgets"):
ipywidgets = sys.modules.get("ipywidgets")
if (
ipywidgets is not None
and isinstance(obj, ipywidgets.Widget)
and hasattr(obj, "value")
):
return obj.value, cls._equal, 1
return cls.NULL, None, -1
def make_memoize_comparable(
self, seen_ids: Optional[Set[int]] = None
) -> Tuple[Any, Optional[Callable[[Any, Any], bool]]]:
if seen_ids is None:
seen_ids = set()
if isinstance(
self.stmt_node, (ast.ClassDef, ast.FunctionDef, ast.AsyncFunctionDef)
):
comps = [astunparse.unparse(self.stmt_node)]
for sym in sorted(self.parents.keys()):
par_comp, eq = sym.make_memoize_comparable(seen_ids=seen_ids)
if par_comp is self.NULL or eq is not self._equal:
return self.NULL, None
comps.append(par_comp)
return comps, self._equal
obj, eq, size = self.make_memoize_comparable_for_obj(self.obj, seen_ids)
if size > self._MAX_MEMOIZE_COMPARABLE_SIZE:
return self.NULL, None
else:
return obj, eq
The provided code snippet includes necessary dependencies for implementing the `users` function. Write a Python function `def users(sym: Any) -> List[Symbol]` to solve the following problem:
Given the programmatic usage of some symbol, look up the corresponding users of that symbol.
Here is the function:
def users(sym: Any) -> List[Symbol]:
"""
Given the programmatic usage of some symbol,
look up the corresponding users of that symbol.
"""
# See the `argument` handler in ipyflow_tracer for the
# actual implementation; this is just a stub that ensures
# that handler was able to find something.
sym = _validate(sym)
return [child for child in sym.children.keys() if not child.is_anonymous] | Given the programmatic usage of some symbol, look up the corresponding users of that symbol. |
15,086 | from typing import TYPE_CHECKING, Any, List, Set, Union, cast
from ipyflow.data_model.symbol import Symbol
from ipyflow.data_model.timestamp import Timestamp
from ipyflow.tracing.watchpoint import Watchpoints
def _validate(sym: Any) -> Symbol:
if sym is None or not isinstance(sym, Symbol):
raise ValueError("unable to lookup metadata for symbol")
return cast(Symbol, sym)
def _traverse(sym: Symbol, seen: Set[Symbol], attr: str) -> None:
if sym in seen:
return
seen.add(sym)
for related in getattr(sym, attr).keys():
_traverse(related, seen, attr)
class Symbol:
NULL = object()
# object for virtual display symbol
DISPLAY = object()
IMMUTABLE_TYPES = set(IMMUTABLE_PRIMITIVE_TYPES)
IPYFLOW_MUTATION_VIRTUAL_SYMBOL_NAME = "__ipyflow_mutation"
def __init__(
self,
name: SupportedIndexType,
symbol_type: SymbolType,
obj: Any,
containing_scope: "Scope",
stmt_node: Optional[ast.stmt] = None,
symbol_node: Optional[ast.AST] = None,
refresh_cached_obj: bool = False,
implicit: bool = False,
) -> None:
if refresh_cached_obj:
# TODO: clean up redundancies
assert implicit
assert stmt_node is None
self.name = name
self.symbol_type = symbol_type
self.obj = obj
# additional user-specific metadata
self._tags: Set[str] = set()
self.extra_metadata: Dict[str, Any] = {}
self._tombstone = False
self._cached_out_of_sync = True
self.cached_obj_id: Optional[int] = None
self.cached_obj_type: Optional[Type[object]] = None
self.cached_obj_len: Optional[int] = None
if refresh_cached_obj:
self._refresh_cached_obj()
self.containing_scope = containing_scope or flow().global_scope
self.call_scope: Optional[Scope] = None
self.func_def_stmt: Optional[ast.stmt] = None
self.stmt_node = self.update_stmt_node(stmt_node)
self.symbol_node = symbol_node
self._funcall_live_symbols = None
self.parents: Dict["Symbol", List[Timestamp]] = {}
self.children: Dict["Symbol", List[Timestamp]] = {}
# initialize at -1 for implicit since the corresponding piece of data could already be around,
# and we don't want liveness checker to think this was newly created unless we
# explicitly trace an update somewhere
self._timestamp: Timestamp = (
Timestamp.uninitialized() if implicit else Timestamp.current()
)
self._snapshot_timestamps: List[Timestamp] = []
self._snapshot_timestamp_ubounds: List[Timestamp] = []
self._defined_cell_num = cells().exec_counter()
self._is_dangling_on_edges = False
self._cascading_reactive_cell_num = -1
self._override_ready_liveness_cell_num = -1
self._override_timestamp: Optional[Timestamp] = None
self.watchpoints = Watchpoints()
# The necessary last-updated timestamp / cell counter for this symbol to not be waiting
self.required_timestamp: Timestamp = self.timestamp
# for each usage of this sym, the version that was used, if different from the timestamp of usage
self.timestamp_by_used_time: Dict[Timestamp, Timestamp] = {}
self.used_node_by_used_time: Dict[Timestamp, ast.AST] = {}
# History of definitions at time of liveness
self.timestamp_by_liveness_time: Dict[Timestamp, Timestamp] = {}
# All timestamps associated with updates to this symbol
self.updated_timestamps: Set[Timestamp] = set()
# The most recent timestamp associated with a particular object id
self.last_updated_timestamp_by_obj_id: Dict[int, Timestamp] = {}
self.fresher_ancestors: Set["Symbol"] = set()
self.fresher_ancestor_timestamps: Set[Timestamp] = set()
# cells where this symbol was live
self.cells_where_deep_live: Set[Cell] = set()
self.cells_where_shallow_live: Set[Cell] = set()
self._last_computed_ready_or_waiting_cache_ts: int = -1
self._is_ready_or_waiting_at_position_cache: Dict[Tuple[int, bool], bool] = {}
# if implicitly created when tracing non-store-context ast nodes
self._implicit = implicit
# Will never be stale if no_warning is True
self.disable_warnings = False
self._temp_disable_warnings = False
self._num_ipywidget_observers = 0
self._num_mercury_widget_observers = 0
flow().aliases.setdefault(id(obj), set()).add(self)
if (
isinstance(self.name, str)
and not self.is_anonymous
and not self.containing_scope.is_namespace_scope
):
ns = self.namespace
if ns is not None and ns.scope_name == "self":
# hack to get a better name than `self.whatever` for fields of this object
# not ideal because it relies on the `self` convention but is probably
# acceptable for the use case of improving readable names
ns.scope_name = self.name
def aliases(self) -> List["Symbol"]:
return list(flow().aliases.get(self.obj_id, []))
def cells_where_live(self) -> Set[Cell]:
return self.cells_where_deep_live | self.cells_where_shallow_live
def __repr__(self) -> str:
return f"<{self.readable_name}>"
def __str__(self) -> str:
return self.readable_name
def __hash__(self) -> int:
return hash(id(self))
def __lt__(self, other) -> bool:
return id(self) < id(other)
def add_tag(self, tag_value: str) -> None:
self._tags.add(tag_value)
def remove_tag(self, tag_value: str) -> None:
self._tags.discard(tag_value)
def has_tag(self, tag_value: str) -> bool:
return tag_value in self._tags
def temporary_disable_warnings(self) -> None:
self._temp_disable_warnings = True
def last_used_timestamp(self) -> Timestamp:
if len(self.timestamp_by_used_time) == 0:
return Timestamp.uninitialized()
else:
return max(self.timestamp_by_used_time.keys())
def namespace_waiting_symbols(self) -> Set["Symbol"]:
ns = self.namespace
return set() if ns is None else ns.namespace_waiting_symbols
def shallow_timestamp(self) -> Timestamp:
if self._override_timestamp is None:
return self._timestamp
else:
return max(self._timestamp, self._override_timestamp)
def visible_timestamp(self) -> Optional[Timestamp]:
for ts in sorted(self.updated_timestamps, reverse=True):
if cells().at_timestamp(ts).is_visible:
return ts
return None
def memoize_timestamp(self) -> Optional[Timestamp]:
return self.last_updated_timestamp_by_obj_id.get(self.obj_id)
def timestamp(self) -> Timestamp:
ts = self.shallow_timestamp
if self.is_import or self.is_module:
return ts
ns = self.namespace
return ts if ns is None else max(ts, ns.max_descendent_timestamp)
def _compute_namespace_timestamps(
self,
seen: Optional[Set["Symbol"]] = None,
version_ubound: Optional[Timestamp] = None,
) -> Set[Timestamp]:
if version_ubound is None:
timestamps = {self.shallow_timestamp, self.timestamp}
else:
max_leq_ubound = Timestamp.uninitialized()
for ts in reversed(self._snapshot_timestamps):
if ts <= version_ubound:
max_leq_ubound = ts
break
if max_leq_ubound.is_initialized:
timestamps = {max_leq_ubound}
else:
timestamps = set()
ns = self.namespace
if ns is None:
return timestamps
if seen is None:
seen = set()
if self in seen:
return timestamps
seen.add(self)
for sym in ns.all_symbols_this_indentation():
timestamps |= sym._compute_namespace_timestamps(
seen=seen, version_ubound=version_ubound
)
return timestamps
def _get_timestamps_for_version(self, version: int) -> Set[Timestamp]:
if len(self._snapshot_timestamps) == 0:
return {self.timestamp}
ts = self._snapshot_timestamps[version]
if ts.cell_num == -1:
return {Timestamp(self.defined_cell_num, ts.stmt_num)}
else:
return self._compute_namespace_timestamps(
version_ubound=None if version == -1 else ts
)
def code(
self, format_type: Optional[Type[FormatType]] = None, version: int = -1
) -> Slice:
return statements().format_multi_slice(
self._get_timestamps_for_version(version=version),
blacken=True,
format_type=format_type,
)
def cascading_reactive_cell_num(
self,
seen: Optional[Set["Symbol"]] = None,
consider_containing_symbols: bool = True,
) -> int:
if seen is None:
seen = set()
if self in seen:
return -1
seen.add(self)
cell_num = self._cascading_reactive_cell_num
ns = self.namespace
ret = (
cell_num
if ns is None
else max(
cell_num,
ns.max_cascading_reactive_cell_num(seen),
)
)
if not consider_containing_symbols:
return ret
for sym in self.iter_containing_symbols():
ret = max(ret, sym.cascading_reactive_cell_num(seen=seen))
return ret
def bump_cascading_reactive_cell_num(self, ctr: Optional[int] = None) -> None:
self._cascading_reactive_cell_num = max(
self._cascading_reactive_cell_num,
flow().cell_counter() if ctr is None else ctr,
)
def iter_containing_symbols(self) -> Generator["Symbol", None, None]:
yield self
ns = self.containing_namespace
if ns is None or not ns.is_namespace_scope:
return
for containing_ns in ns.iter_containing_namespaces():
yield from flow().aliases.get(containing_ns.obj_id, [])
def waiting_timestamp(self) -> int:
return max(self._timestamp.cell_num, flow().min_timestamp)
def defined_cell_num(self) -> int:
return self._defined_cell_num
def readable_name(self) -> str:
return self.containing_scope.make_namespace_qualified_name(self)
def is_subscript(self) -> bool:
return self.symbol_type == SymbolType.SUBSCRIPT
def is_class(self) -> bool:
return self.symbol_type == SymbolType.CLASS
def is_function(self) -> bool:
return self.symbol_type == SymbolType.FUNCTION
def is_lambda(self) -> bool:
# TODO: this is terrible
return type(self.name) is str and self.name.startswith( # noqa: E721
"<lambda_sym_"
)
def is_import(self) -> bool:
return self.symbol_type == SymbolType.IMPORT
def is_module(self) -> bool:
return self.symbol_type == SymbolType.MODULE
def imported_module(self) -> str:
if not self.is_import:
raise ValueError("only IMPORT symbols have `imported_module` property")
if isinstance(self.stmt_node, ast.Import):
for alias in self.stmt_node.names:
name = alias.asname or alias.name
if name == self.name:
return alias.name
raise ValueError(
"Unable to find module for symbol %s is stmt %s"
% (self, ast.dump(self.stmt_node))
)
elif isinstance(self.stmt_node, ast.ImportFrom):
return self.stmt_node.module
else:
raise TypeError(
"Invalid stmt type for import symbol: %s" % ast.dump(self.stmt_node)
)
def imported_symbol_original_name(self) -> str:
if not self.is_import:
raise ValueError(
"only IMPORT symbols have `imported_symbol_original_name` property"
)
if isinstance(self.stmt_node, ast.Import):
return self.imported_module
elif isinstance(self.stmt_node, ast.ImportFrom):
for alias in self.stmt_node.names:
name = alias.asname or alias.name
if name == self.name:
return alias.name
raise ValueError(
"Unable to find module for symbol %s is stmt %s"
% (self, ast.dump(self.stmt_node))
)
else:
raise TypeError(
"Invalid stmt type for import symbol: %s" % ast.dump(self.stmt_node)
)
def is_cascading_reactive_at_counter(self, ctr: int) -> bool:
return self.cascading_reactive_cell_num() > max(
ctr, flow().min_cascading_reactive_cell_num
)
def get_top_level(self) -> Optional["Symbol"]:
if not self.containing_scope.is_namespace_scope:
return self
else:
containing_scope = cast("Namespace", self.containing_scope)
for alias in flow().aliases.get(containing_scope.obj_id, []):
if alias.is_globally_accessible:
return alias.get_top_level()
return None
def get_import_string(self) -> str:
if not self.is_import:
raise ValueError("only IMPORT symbols support recreating the import string")
module = self.imported_module
if isinstance(self.stmt_node, ast.Import):
if module == self.name:
return f"import {module}"
else:
return f"import {module} as {self.name}"
elif isinstance(self.stmt_node, ast.ImportFrom):
original_symbol_name = self.imported_symbol_original_name
if original_symbol_name == self.name:
return f"from {module} import {original_symbol_name}"
else:
return f"from {module} import {original_symbol_name} as {self.name}"
else:
raise TypeError(
"Invalid stmt type for import symbol: %s" % ast.dump(self.stmt_node)
)
def is_anonymous(self) -> bool:
if self.symbol_type == SymbolType.ANONYMOUS:
return True
ns = self.containing_namespace
if ns is not None and ns.is_anonymous:
return True
return False
def is_implicit(self) -> bool:
return self._implicit
def shallow_clone(
self, new_obj: Any, new_containing_scope: "Scope", symbol_type: SymbolType
) -> "Symbol":
return self.__class__(self.name, symbol_type, new_obj, new_containing_scope)
def obj_id(self) -> int:
return id(self.obj)
def obj_len(self) -> Optional[int]:
try:
if not self.is_obj_lazy_module and hasattr(self.obj, "__len__"):
return len(self.obj)
except: # noqa: E722
pass
return None
def obj_type(self) -> Type[Any]:
return type(self.obj)
def is_immutable(self) -> bool:
return self.obj_type in self.IMMUTABLE_TYPES
def is_mutation_virtual_symbol(self) -> bool:
return self.name == self.IPYFLOW_MUTATION_VIRTUAL_SYMBOL_NAME
def is_underscore(self) -> bool:
return self.name == "_" and self.containing_scope.is_global
def is_obj_lazy_module(self) -> bool:
return self.obj_type is _LazyModule
def get_type_annotation(self):
return get_type_annotation(self.obj)
def get_type_annotation_string(self) -> str:
return make_annotation_string(self.get_type_annotation())
def namespace(self) -> Optional["Namespace"]:
return flow().namespaces.get(self.obj_id)
def containing_namespace(self) -> Optional["Namespace"]:
if self.containing_scope.is_namespace_scope:
return cast("Namespace", self.containing_scope)
else:
return None
def full_path(self) -> Tuple[str, ...]:
return self.containing_scope.full_path + (str(self.name),)
def full_namespace_path(self) -> str:
return self.containing_scope.make_namespace_qualified_name(self)
def is_garbage(self) -> bool:
return self._tombstone
def is_new_garbage(self) -> bool:
if self._tombstone:
return False
containing_ns = self.containing_namespace
numpy = sys.modules.get("numpy", None)
if (
numpy is not None
and containing_ns is not None
and isinstance(containing_ns.obj, numpy.ndarray)
):
# numpy atoms are not interned (so assigning array elts to a variable does not bump refcount);
# also seems that refcount is always 0, so just check if the containing namespace is garbage
return self.containing_namespace.is_garbage
return self.get_ref_count() == 0
def is_globally_accessible(self) -> bool:
return self.containing_scope.is_globally_accessible
def is_user_accessible(self) -> bool:
return (
self.is_globally_accessible
and not self.is_anonymous
and not self.is_garbage
and not (
self.containing_namespace is not None
and (
self.containing_namespace.is_anonymous
or self.containing_namespace.is_garbage
)
)
)
def _remove_self_from_aliases(self) -> None:
cleanup_discard(flow().aliases, self.obj_id, self)
self.obj = None
def mark_garbage(self) -> None:
if self.is_garbage:
return
self._tombstone = True
ns = self.namespace
if ns is not None and all(alias.is_garbage for alias in self.aliases):
ns.mark_garbage()
def collect_self_garbage(self) -> None:
assert self.is_garbage
flow().blocked_reactive_timestamps_by_symbol.pop(self, None)
self._remove_self_from_aliases()
for parent in self.parents:
parent.children.pop(self, None)
for child in self.children:
child.parents.pop(self, None)
containing_ns = self.containing_namespace
if self.is_subscript and containing_ns is not None:
containing_ns._subscript_symbol_by_name.pop(self.name, None)
elif not self.is_subscript:
self.containing_scope._symbol_by_name.pop(self.name, None)
else:
logger.warning(
"could not find symbol %s in its scope %s", self, self.containing_scope
)
# TODO: remove from static / dynamic parent / children edges
# need to keep this around for readable_name to work
# self.containing_scope = None
# def update_type(self, new_type):
# self.symbol_type = new_type
# if self.is_function:
# self.call_scope = self.containing_scope.make_child_scope(self.name)
# else:
# self.call_scope = None
def update_obj_ref(self, obj: Any, refresh_cached: bool = True) -> None:
if self._num_ipywidget_observers > 0:
try:
self.obj.unobserve_all()
except: # noqa
pass
self._num_ipywidget_observers = 0
if self._num_mercury_widget_observers > 0:
try:
self._mercury_widgets_manager.get_widget(
self.obj.code_uid
).unobserve_all()
except: # noqa
pass
self._num_mercury_widget_observers = 0
self._tombstone = False
self._cached_out_of_sync = True
if (
flow().settings.mark_typecheck_failures_unsafe
and self.cached_obj_type != type(obj)
):
for cell in self.cells_where_live:
cell.invalidate_typecheck_result()
self.cells_where_shallow_live.clear()
self.cells_where_deep_live.clear()
self.obj = obj
if self.cached_obj_id is not None and self.cached_obj_id != self.obj_id:
new_ns = flow().namespaces.get(self.obj_id, None)
# don't overwrite existing namespace for this obj
old_ns = flow().namespaces.get(self.cached_obj_id, None)
if (
old_ns is not None
and old_ns.full_namespace_path == self.full_namespace_path
):
if new_ns is None:
logger.info("create fresh copy of namespace %s", old_ns)
new_ns = old_ns.fresh_copy(obj)
old_ns.transfer_symbols_to(new_ns)
else:
new_ns.scope_name = old_ns.scope_name
new_ns.parent_scope = old_ns.parent_scope
self._handle_aliases()
if (
old_ns is not None
and len(flow().aliases.get(self.cached_obj_id, [])) == 0
):
old_ns.mark_garbage()
if refresh_cached:
self._refresh_cached_obj()
def invalidate_cached(self) -> None:
self._cached_out_of_sync = True
self.cached_obj_id = None
self.cached_obj_type = None
def get_ref_count(self) -> int:
if self.obj is None or self.obj is Symbol.NULL:
return -1
total = sys.getrefcount(self.obj) - 1
total -= len(flow().aliases.get(self.obj_id, []))
ns = flow().namespaces.get(self.obj_id, None)
if ns is not None and ns.obj is not None and ns.obj is not Symbol.NULL:
total -= 1
return total
def _should_cancel_propagation(self, prev_obj: Optional[Any]) -> bool:
if prev_obj is None:
return False
if (
flow().blocked_reactive_timestamps_by_symbol.get(self, -1)
== self.timestamp.cell_num
):
return False
if not self._cached_out_of_sync or self.obj_id == self.cached_obj_id:
return True
if self.obj is None or prev_obj is Symbol.NULL:
return self.obj is None and prev_obj is Symbol.NULL
return False
def _handle_aliases(self):
cleanup_discard(flow().aliases, self.cached_obj_id, self)
flow().aliases.setdefault(self.obj_id, set()).add(self)
def update_stmt_node(self, stmt_node: Optional[ast.stmt]) -> Optional[ast.stmt]:
self.stmt_node = stmt_node
self._funcall_live_symbols = None
if self.is_function or (
stmt_node is not None and isinstance(stmt_node, ast.Lambda)
):
# TODO: in the case of lambdas, there will not necessarily be one
# symbol for a given statement. We need a more precise way to determine
# the symbol being called than by looking at the stmt in question.
flow().statement_to_func_sym[id(stmt_node)] = self
self.call_scope = self.containing_scope.make_child_scope(self.name)
self.func_def_stmt = stmt_node
return stmt_node
def _refresh_cached_obj(self) -> None:
self._cached_out_of_sync = False
# don't keep an actual ref to avoid bumping refcount
self.cached_obj_id = self.obj_id
self.cached_obj_type = self.obj_type
self.cached_obj_len = self.obj_len
def get_definition_args(self) -> List[ast.arg]:
assert self.func_def_stmt is not None and isinstance(
self.func_def_stmt, (ast.FunctionDef, ast.AsyncFunctionDef, ast.Lambda)
)
args = []
for arg in self.func_def_stmt.args.args + self.func_def_stmt.args.kwonlyargs:
args.append(arg)
if self.func_def_stmt.args.vararg is not None:
args.append(self.func_def_stmt.args.vararg)
if self.func_def_stmt.args.kwarg is not None:
args.append(self.func_def_stmt.args.kwarg)
return args
def _match_call_args_with_definition_args(
self,
) -> Generator[Tuple[ast.arg, List["Symbol"]], None, None]:
# TODO: handle posonlyargs, kwonlyargs
assert self.func_def_stmt is not None and isinstance(
self.func_def_stmt, (ast.FunctionDef, ast.AsyncFunctionDef, ast.Lambda)
)
caller_node = self._get_calling_ast_node()
if caller_node is None or not isinstance(caller_node, ast.Call):
return
kwarg_by_name = {
arg_key.arg: arg_key
for arg_key in self.func_def_stmt.args.args[
-len(self.func_def_stmt.args.defaults) :
]
}
if not all(keyword.arg in kwarg_by_name for keyword in caller_node.keywords):
logger.warning("detected mismatched kwargs from caller node to definition")
return
def_args = self.func_def_stmt.args.args
if len(self.func_def_stmt.args.defaults) > 0:
def_args = def_args[: -len(self.func_def_stmt.args.defaults)]
if len(def_args) > 0 and def_args[0].arg == "self":
# FIXME: this is bad and I should feel bad
def_args = def_args[1:]
for def_arg, call_arg in zip(def_args, caller_node.args):
if isinstance(call_arg, ast.Starred):
# give up
# TODO: handle this case
break
yield def_arg, tracer().resolve_loaded_symbols(call_arg)
seen_keys = set()
for keyword in caller_node.keywords:
keyword_key, keyword_value = keyword.arg, keyword.value
if keyword_value is None:
continue
seen_keys.add(keyword_key)
yield kwarg_by_name[keyword_key], tracer().resolve_loaded_symbols(
keyword_value
)
for arg_key, arg_value in zip(
self.func_def_stmt.args.args[-len(self.func_def_stmt.args.defaults) :],
self.func_def_stmt.args.defaults,
):
if arg_key.arg in seen_keys:
continue
yield arg_key, tracer().resolve_loaded_symbols(arg_value)
def _get_calling_ast_node(self) -> Optional[ast.Call]:
if tracer().tracing_disabled_since_last_module_stmt or (
not hasattr(self.obj, "__module__")
and getattr(type(self.obj), "__module__", None) == "builtins"
):
return None
if self.func_def_stmt is not None and isinstance(
self.func_def_stmt, (ast.FunctionDef, ast.AsyncFunctionDef)
):
if self.name in ("__getitem__", "__setitem__", "__delitem__"):
# TODO: handle case where we're looking for a subscript for the calling node
return None
for decorator in self.func_def_stmt.decorator_list:
if isinstance(decorator, ast.Name) and decorator.id == "property":
# TODO: handle case where we're looking for an attribute for the calling node
return None
lexical_call_stack = tracer().lexical_call_stack
if len(lexical_call_stack) == 0:
return None
prev_node_id_in_cur_frame_lexical = lexical_call_stack.get_field(
"prev_node_id_in_cur_frame_lexical"
)
caller_ast_node = tracer().ast_node_by_id.get(
prev_node_id_in_cur_frame_lexical, None
)
if caller_ast_node is None or not isinstance(caller_ast_node, ast.Call):
return None
return caller_ast_node
def create_symbols_for_call_args(self, call_frame: FrameType) -> None:
assert self.func_def_stmt is not None
seen_def_args = set()
logger.info("create symbols for call to %s", self)
for def_arg, deps in self._match_call_args_with_definition_args():
seen_def_args.add(def_arg.arg)
self.call_scope.upsert_symbol_for_name(
def_arg.arg,
call_frame.f_locals.get(def_arg.arg),
deps,
self.func_def_stmt,
propagate=False,
symbol_node=def_arg,
)
logger.info("def arg %s matched with deps %s", def_arg, deps)
for def_arg in self.get_definition_args():
if def_arg.arg in seen_def_args:
continue
self.call_scope.upsert_symbol_for_name(
def_arg.arg,
None,
set(),
self.func_def_stmt,
propagate=False,
symbol_node=def_arg,
)
def is_waiting(self) -> bool:
if self.disable_warnings or self._temp_disable_warnings:
return False
if self.waiting_timestamp < self.required_timestamp.cell_num:
return True
elif flow().min_timestamp == -1:
return len(self.namespace_waiting_symbols) > 0
else:
# TODO: guard against infinite recurision
return any(sym.is_waiting for sym in self.namespace_waiting_symbols)
def is_shallow_stale(self) -> bool:
if self.disable_warnings or self._temp_disable_warnings:
return False
return self.waiting_timestamp < self.required_timestamp.cell_num
def _is_ready_or_waiting_at_position_impl(self, pos: int, deep: bool) -> bool:
for par, timestamps in self.parents.items():
for ts in timestamps:
dep_introduced_pos = cells().at_timestamp(ts).position
if dep_introduced_pos > pos:
continue
for updated_ts in par.updated_timestamps:
if cells().at_timestamp(updated_ts).position > dep_introduced_pos:
continue
if updated_ts.cell_num > ts.cell_num or par.is_waiting_at_position(
dep_introduced_pos
):
# logger.error("sym: %s", self)
# logger.error("pos: %s", pos)
# logger.error("parent: %s", par)
# logger.error("dep introdced ts: %s", ts)
# logger.error("dep introdced pos: %s", dep_introduced_pos)
# logger.error("par updated ts: %s", updated_ts)
# logger.error("par updated position: %s", cells().from_timestamp(updated_ts).position)
return True
if deep:
for sym in self.namespace_waiting_symbols:
if sym.is_waiting_at_position(pos):
return True
return False
def is_waiting_at_position(self, pos: int, deep: bool = True) -> bool:
if deep:
if not self.is_waiting:
return False
else:
if not self.is_shallow_stale:
return False
if flow().mut_settings.flow_order == FlowDirection.ANY_ORDER:
return True
if cells().exec_counter() > self._last_computed_ready_or_waiting_cache_ts:
self._is_ready_or_waiting_at_position_cache.clear()
self._last_computed_ready_or_waiting_cache_ts = cells().exec_counter()
if (pos, deep) in self._is_ready_or_waiting_at_position_cache:
return self._is_ready_or_waiting_at_position_cache[pos, deep]
# preemptively set this entry to 'False' in the cache to avoid infinite loops
self._is_ready_or_waiting_at_position_cache[pos, deep] = False
is_waiting = self._is_ready_or_waiting_at_position_impl(pos, deep)
self._is_ready_or_waiting_at_position_cache[pos, deep] = is_waiting
return is_waiting
def should_mark_waiting(self, updated_dep):
if self.disable_warnings:
return False
if updated_dep is self:
return False
return True
def _is_underscore_or_simple_assign(self, new_deps: Set["Symbol"]) -> bool:
if self.is_underscore:
# FIXME: distinguish between explicit assignment to _ from user and implicit assignment from kernel
return True
if not isinstance(self.stmt_node, (ast.Assign, ast.AnnAssign)):
return False
if len(new_deps) != 1:
return False
only_dep: Symbol = next(iter(new_deps))
# obj ids can get reused for anon symbols like literals
return not only_dep.is_anonymous and self.obj_id == only_dep.obj_id
def update_deps(
self,
new_deps: Set["Symbol"],
prev_obj: Any = None,
overwrite: bool = True,
mutated: bool = False,
deleted: bool = False,
propagate_to_namespace_descendents: bool = False,
propagate: bool = True,
refresh: bool = True,
is_cascading_reactive: Optional[bool] = None,
) -> None:
if self.is_import and self.obj_id == self.cached_obj_id:
# skip updates for imported symbols; just bump the version
self.refresh()
return
if overwrite and not self.is_globally_accessible:
self.watchpoints.clear()
if mutated and self.is_immutable:
return
# if we get here, no longer implicit
self._implicit = False
# quick last fix to avoid overwriting if we appear inside the set of deps to add (or a 1st order ancestor)
# TODO: check higher-order ancestors too?
overwrite = overwrite and self not in new_deps
overwrite = overwrite and not any(
self in new_dep.parents for new_dep in new_deps
)
logger.warning("symbol %s new deps %s", self, new_deps)
new_deps.discard(self)
if overwrite:
for parent in self.parents.keys() - new_deps:
parent.children.pop(self, None)
self.parents.pop(parent, None)
for new_parent in new_deps - self.parents.keys():
if new_parent is None:
continue
new_parent.children.setdefault(self, []).append(Timestamp.current())
self.parents.setdefault(new_parent, []).append(Timestamp.current())
self.required_timestamp = Timestamp.uninitialized()
self.fresher_ancestors.clear()
self.fresher_ancestor_timestamps.clear()
if mutated or isinstance(self.stmt_node, ast.AugAssign):
self.update_usage_info()
if (
(mutated or overwrite)
and Timestamp.current().is_initialized
and not self.is_immutable
and not self.is_mutation_virtual_symbol
and not self.is_anonymous
and self.containing_scope.is_global
and not self.is_underscore
and not self.is_implicit
and self.obj_type is not type
and not self.is_class
and self.namespace is not None
):
self.namespace.upsert_symbol_for_name(
self.IPYFLOW_MUTATION_VIRTUAL_SYMBOL_NAME, object(), propagate=False
)
propagate = propagate and (
mutated or deleted or not self._should_cancel_propagation(prev_obj)
)
try:
prev_cell = cells().current_cell().prev_cell
except KeyError:
prev_cell = None
prev_cell_ctr = -1 if prev_cell is None else prev_cell.cell_ctr
if overwrite:
flow_ = flow()
self._cascading_reactive_cell_num = -1
flow_.updated_reactive_symbols.discard(self)
flow_.updated_deep_reactive_symbols.discard(self)
if is_cascading_reactive is not None:
is_cascading_reactive = is_cascading_reactive or any(
sym.is_cascading_reactive_at_counter(prev_cell_ctr) for sym in new_deps
)
if is_cascading_reactive:
self.bump_cascading_reactive_cell_num()
if refresh:
self.refresh(
# rationale: if this is a mutation for which we have more precise information,
# then we don't need to update the ns descendents as this will already have happened.
# also don't update ns descendents for things like `a = b`
refresh_descendent_namespaces=propagate
and not (mutated and not propagate_to_namespace_descendents)
and not self._is_underscore_or_simple_assign(new_deps),
refresh_namespace_waiting=not mutated,
)
if propagate:
UpdateProtocol(self)(
new_deps, mutated, propagate_to_namespace_descendents, refresh
)
self._refresh_cached_obj()
if self.is_class:
# pop pending class defs and update obj ref
pending_class_ns = tracer().pending_class_namespaces.pop()
pending_class_ns.update_obj_ref(self.obj)
for dep in new_deps:
if dep.obj is self.obj and dep.call_scope is not None:
self.call_scope = dep.call_scope
self.func_def_stmt = dep.func_def_stmt
ns = self.namespace
if ns is not None and ns.scope_name == "self" and isinstance(self.name, str):
# fixup namespace name if necessary
# can happen if symbol for 'self' was created in a previous __init__
ns.scope_name = self.name
if overwrite and len(flow().aliases[self.obj_id]) == 1:
self._handle_possible_widget_creation()
self._handle_possible_mercury_widget_creation()
def _mercury_widgets_manager(self):
if self.obj is None:
return None
if self.is_obj_lazy_module or not hasattr(self.obj, "code_uid"):
return None
try:
return sys.modules.get(self.obj.__class__.__module__).WidgetsManager
except: # noqa
return None
def _handle_possible_widget_creation(self) -> None:
if self.obj is None:
return
Widget = getattr(sys.modules.get("ipywidgets"), "Widget", None)
if (
Widget is None
or self.is_obj_lazy_module
or not isinstance(self.obj, Widget)
or not hasattr(self.obj, "observe")
or not hasattr(self.obj, "value")
):
return
self.namespaced().upsert_symbol_for_name(
"value", getattr(self.obj, "value", None), set(), self.stmt_node
)
self.obj.observe(self._observe_widget)
self._num_ipywidget_observers += 1
def _handle_possible_mercury_widget_creation(self) -> None:
WidgetsManager = self._mercury_widgets_manager
if WidgetsManager is None:
return
widget = WidgetsManager.get_widget(self.obj.code_uid)
self.namespaced().upsert_symbol_for_name(
"value", getattr(widget, "value", None), set(), self.stmt_node
)
widget.observe(self._observe_widget)
self._num_mercury_widget_observers += 1
def _observe_widget(self, msg: Dict[str, Any]) -> None:
if msg.get("name") != "value" or "new" not in msg:
return
ns = self.namespace
sym = ns.lookup_symbol_by_name_this_indentation("value")
if sym is None:
return
newval = msg["new"]
current_ts_cell = cells().at_timestamp(self._timestamp)
current_ts_cell._extra_stmt = ast.parse(f"{sym.readable_name} = {newval}").body[
0
]
sym._override_ready_liveness_cell_num = flow().cell_counter() + 1
sym._override_timestamp = Timestamp(
self._timestamp.cell_num, current_ts_cell.num_original_stmts
)
sym.update_obj_ref(newval)
statements().create_and_track(
current_ts_cell._extra_stmt,
timestamp=sym._override_timestamp,
override=True,
)
with dynamic_slicing_context():
flow().add_data_dep(
sym._timestamp,
sym._override_timestamp,
sym,
)
flow().add_data_dep(
sym._override_timestamp,
sym._timestamp,
sym,
)
self.debounced_exec_schedule(reactive=True)
def debounced_exec_schedule(self, reactive: bool) -> None:
if _debounced_exec_schedule(
cells().at_timestamp(self.timestamp).cell_id, reactive=reactive
):
flow().debounced_exec_schedule_pending = True
def namespaced(self) -> "Namespace":
ns = self.namespace
if ns is not None:
return ns
return namespaces()(self.obj, self.name, parent_scope=self.containing_scope)
def update_usage_info_one_timestamp(
self,
used_time: Timestamp,
updated_time: Timestamp,
is_static: bool,
) -> bool:
flow_ = flow()
is_usage = is_static or updated_time < used_time
if is_usage:
with slicing_context(is_static=is_static):
flow_.add_data_dep(
used_time,
updated_time,
self,
)
if is_static:
is_usage = cells().at_timestamp(updated_time).is_visible
return is_usage
def update_usage_info(
self,
used_time: Optional[Timestamp] = None,
used_node: Optional[ast.AST] = None,
exclude_ns: bool = False,
is_static: bool = False,
is_blocking: bool = False,
) -> "Symbol":
is_blocking = is_blocking or id(used_node) in tracer().blocking_node_ids
if used_time is None:
used_time = Timestamp.current()
if flow().is_dev_mode:
logger.info(
"sym `%s` used in cell %d last updated in cell %d",
self,
used_time.cell_num,
self.timestamp,
)
timestamp_by_used_time = (
self.timestamp_by_liveness_time
if is_static
else self.timestamp_by_used_time
)
if not is_blocking:
is_usage = False
ts_to_use = self._timestamp
for updated_ts in sorted(self.updated_timestamps, reverse=True):
if not updated_ts.is_initialized:
continue
is_usage = self.update_usage_info_one_timestamp(
used_time,
updated_ts,
is_static=is_static,
)
if is_usage or not is_static:
break
if is_usage and used_time.is_initialized:
timestamp_by_used_time[used_time] = ts_to_use
if used_node is not None:
self.used_node_by_used_time[used_time] = used_node
if exclude_ns:
return self
for sym in self.get_namespace_symbols(recurse=True):
sym.update_usage_info(
used_time=used_time,
used_node=None,
exclude_ns=True,
is_static=is_static,
is_blocking=is_blocking,
)
return self
def get_namespace_symbols(
self, recurse: bool = False, seen: Optional[Set["Symbol"]] = None
) -> Generator["Symbol", None, None]:
ns = self.namespace
if ns is None:
return
if seen is None:
seen = set()
if self in seen:
return
seen.add(self)
for sym in ns.all_symbols_this_indentation():
yield sym
if recurse:
yield from sym.get_namespace_symbols(recurse=recurse, seen=seen)
def _take_timestamp_snapshots(
self, ts_ubound: Timestamp, seen: Optional[Set["Symbol"]] = None
) -> None:
if seen is None:
seen = set()
if self in seen:
return
seen.add(self)
self._snapshot_timestamps.append(self._timestamp)
self._snapshot_timestamp_ubounds.append(ts_ubound)
containing_ns = self.containing_namespace
if containing_ns is None:
return
for alias in flow().aliases.get(containing_ns.obj_id, []):
alias._take_timestamp_snapshots(ts_ubound, seen=seen)
def refresh(
self,
take_timestamp_snapshots: bool = True,
refresh_descendent_namespaces: bool = False,
refresh_namespace_waiting: bool = True,
timestamp: Optional[Timestamp] = None,
seen: Optional[Set["Symbol"]] = None,
) -> None:
orig_timestamp = self._timestamp
self._timestamp = Timestamp.current() if timestamp is None else timestamp
self._override_timestamp = None
if take_timestamp_snapshots and (
orig_timestamp < self._timestamp or len(self._snapshot_timestamps) == 0
):
self._take_timestamp_snapshots(self._timestamp)
self.updated_timestamps.add(self._timestamp)
self._temp_disable_warnings = False
for cell in self.cells_where_live:
cell.add_used_cell_counter(self, self._timestamp.cell_num)
ns = self.containing_namespace
if ns is not None:
# logger.error("bump version of %s due to %s (value %s)", ns.full_path, self.full_path, self.obj)
ns.max_descendent_timestamp = self.shallow_timestamp
for alias in flow().aliases.get(ns.obj_id, []):
for cell in alias.cells_where_deep_live:
cell.add_used_cell_counter(alias, self._timestamp.cell_num)
if refresh_descendent_namespaces:
if seen is None:
seen = set()
if self in seen:
return
seen.add(self)
ns = self.namespace
if ns is not None:
for sym in ns.all_symbols_this_indentation(exclude_class=True):
# this is to handle cases like `x = x.mutate(42)`, where
# we could have changed some member of x but returned the
# original object -- in this case, just assume that all
# the stale namespace descendents are no longer stale, as
# this is likely the user intention. For an example, see
# `test_external_object_update_propagates_to_stale_namespace_symbols()`
# in `test_frontend_checker.py`
if not sym.is_waiting or refresh_namespace_waiting:
# logger.error(
# "refresh %s due to %s (value %s) via namespace %s",
# sym.full_path,
# self.full_path,
# self.obj,
# ns.full_path,
# )
sym.refresh(
refresh_descendent_namespaces=True,
timestamp=self.shallow_timestamp,
take_timestamp_snapshots=False,
seen=seen,
)
if refresh_namespace_waiting:
self.namespace_waiting_symbols.clear()
def resync_if_necessary(self, refresh: bool) -> None:
if not self.containing_scope.is_global:
return
try:
obj = shell().user_ns[self.name]
except: # noqa
# cinder runtime can throw an exception here due to lazy imports that fail
return
if self.obj is not obj:
flow_ = flow()
for alias in flow_.aliases.get(
self.cached_obj_id, set()
) | flow_.aliases.get(self.obj_id, set()):
containing_namespace = alias.containing_namespace
if containing_namespace is None:
continue
containing_obj = containing_namespace.obj
if containing_obj is None:
continue
# TODO: handle dict case too
if isinstance(containing_obj, list) and containing_obj[-1] is obj:
containing_namespace._subscript_symbol_by_name.pop(alias.name, None)
alias.name = len(containing_obj) - 1
alias.update_obj_ref(obj)
containing_namespace._subscript_symbol_by_name[alias.name] = alias
cleanup_discard(flow_.aliases, self.cached_obj_id, self)
cleanup_discard(flow_.aliases, self.obj_id, self)
flow_.aliases.setdefault(id(obj), set()).add(self)
self.update_obj_ref(obj)
elif self.obj_len != self.cached_obj_len:
self._refresh_cached_obj()
else:
return
if refresh:
self.refresh()
_MAX_MEMOIZE_COMPARABLE_SIZE = 10**6
def _equal(obj1: Any, obj2: Any) -> bool:
return obj1 == obj2
def _array_equal(obj1: Any, obj2: Any) -> bool:
import numpy as np
try:
return np.alltrue(obj1 == obj2) # type: ignore
except: # noqa
return False
def _dataframe_equal(obj1: Any, obj2: Any) -> bool:
try:
return obj1.equals(obj2) # type: ignore
except: # noqa
return False
def _make_list_eq(
eqs: List[Callable[[Any, Any], bool]]
) -> Callable[[List[Any], List[Any]], bool]:
def list_eq(lst1: List[Any], lst2: List[Any]) -> bool:
for eq, obj1, obj2 in zip(eqs, lst1, lst2):
if not eq(obj1, obj2):
return False
return True
return list_eq
def make_memoize_comparable_for_obj(
cls, obj: Any, seen_ids: Set[int]
) -> Tuple[Any, Optional[Callable[[Any, Any], bool]], int]:
if isinstance(obj, (bool, bytes, bytearray, int, float, str)):
return obj, cls._equal, 1
if not isinstance(obj, tuple):
if id(obj) in seen_ids:
return cls.NULL, None, -1
seen_ids.add(id(obj))
if isinstance(obj, (dict, frozenset, list, set, tuple)):
size = 0
comp = []
eqs: List[Callable[[Any, Any], bool]] = []
if isinstance(obj, dict):
iterable: "Iterable[Any]" = sorted(obj.items())
else:
iterable = obj
for inner in iterable:
inner_comp, inner_eq, inner_size = cls.make_memoize_comparable_for_obj(
inner, seen_ids
)
if inner_comp is cls.NULL or inner_eq is None:
return cls.NULL, None, -1
size += inner_size + 1
if size > cls._MAX_MEMOIZE_COMPARABLE_SIZE:
return cls.NULL, None, -1
comp.append(inner_comp)
eqs.append(inner_eq)
if all(eq is cls._equal for eq in eqs):
iter_eq: Callable[[Any, Any], bool] = cls._equal
elif isinstance(obj, (frozenset, set)):
return cls.NULL, None, -1
else:
iter_eq = cls._make_list_eq(eqs)
ret = frozenset(comp) if isinstance(obj, (frozenset, set)) else comp
return ret, iter_eq, size
elif type(obj) in (type, FunctionType):
# try to determine it based on the symbol
for sym in flow().aliases.get(id(obj), []):
comp, eq = sym.make_memoize_comparable(seen_ids=seen_ids)
if comp is not cls.NULL and eq is not None:
return comp, eq, 1
return cls.NULL, None, -1
else:
# hacks to check if they are arrays, dataframes, etc without explicitly importing these
module = getattr(type(obj), "__module__", "")
if module.startswith("numpy"):
name = getattr(type(obj), "__name__", "")
if name.endswith("ndarray"):
return obj, cls._array_equal, obj.size
else:
numpy = sys.modules.get("numpy")
if numpy is not None and isinstance(obj, numpy.number):
return obj, cls._equal, 1
elif module.startswith(("modin", "pandas")):
name = getattr(type(obj), "__name__", "")
if name.endswith(("DataFrame", "Series")):
return obj, cls._dataframe_equal, obj.size
elif module.startswith("ipywidgets"):
ipywidgets = sys.modules.get("ipywidgets")
if (
ipywidgets is not None
and isinstance(obj, ipywidgets.Widget)
and hasattr(obj, "value")
):
return obj.value, cls._equal, 1
return cls.NULL, None, -1
def make_memoize_comparable(
self, seen_ids: Optional[Set[int]] = None
) -> Tuple[Any, Optional[Callable[[Any, Any], bool]]]:
if seen_ids is None:
seen_ids = set()
if isinstance(
self.stmt_node, (ast.ClassDef, ast.FunctionDef, ast.AsyncFunctionDef)
):
comps = [astunparse.unparse(self.stmt_node)]
for sym in sorted(self.parents.keys()):
par_comp, eq = sym.make_memoize_comparable(seen_ids=seen_ids)
if par_comp is self.NULL or eq is not self._equal:
return self.NULL, None
comps.append(par_comp)
return comps, self._equal
obj, eq, size = self.make_memoize_comparable_for_obj(self.obj, seen_ids)
if size > self._MAX_MEMOIZE_COMPARABLE_SIZE:
return self.NULL, None
else:
return obj, eq
The provided code snippet includes necessary dependencies for implementing the `rdeps` function. Write a Python function `def rdeps(sym: Any) -> List[Symbol]` to solve the following problem:
Given the programmatic usage of some symbol, look up the corresponding recursive dependencies for that symbol.
Here is the function:
def rdeps(sym: Any) -> List[Symbol]:
"""
Given the programmatic usage of some symbol, look up the
corresponding recursive dependencies for that symbol.
"""
# See the `argument` handler in ipyflow_tracer for the
# actual implementation; this is just a stub that ensures
# that handler was able to find something.
sym = _validate(sym)
seen: Set[Symbol] = set()
_traverse(sym, seen, "parents")
return [v for v in (seen - {sym}) if not v.is_anonymous] | Given the programmatic usage of some symbol, look up the corresponding recursive dependencies for that symbol. |
15,087 | from typing import TYPE_CHECKING, Any, List, Set, Union, cast
from ipyflow.data_model.symbol import Symbol
from ipyflow.data_model.timestamp import Timestamp
from ipyflow.tracing.watchpoint import Watchpoints
def _validate(sym: Any) -> Symbol:
if sym is None or not isinstance(sym, Symbol):
raise ValueError("unable to lookup metadata for symbol")
return cast(Symbol, sym)
def _traverse(sym: Symbol, seen: Set[Symbol], attr: str) -> None:
if sym in seen:
return
seen.add(sym)
for related in getattr(sym, attr).keys():
_traverse(related, seen, attr)
class Symbol:
NULL = object()
# object for virtual display symbol
DISPLAY = object()
IMMUTABLE_TYPES = set(IMMUTABLE_PRIMITIVE_TYPES)
IPYFLOW_MUTATION_VIRTUAL_SYMBOL_NAME = "__ipyflow_mutation"
def __init__(
self,
name: SupportedIndexType,
symbol_type: SymbolType,
obj: Any,
containing_scope: "Scope",
stmt_node: Optional[ast.stmt] = None,
symbol_node: Optional[ast.AST] = None,
refresh_cached_obj: bool = False,
implicit: bool = False,
) -> None:
if refresh_cached_obj:
# TODO: clean up redundancies
assert implicit
assert stmt_node is None
self.name = name
self.symbol_type = symbol_type
self.obj = obj
# additional user-specific metadata
self._tags: Set[str] = set()
self.extra_metadata: Dict[str, Any] = {}
self._tombstone = False
self._cached_out_of_sync = True
self.cached_obj_id: Optional[int] = None
self.cached_obj_type: Optional[Type[object]] = None
self.cached_obj_len: Optional[int] = None
if refresh_cached_obj:
self._refresh_cached_obj()
self.containing_scope = containing_scope or flow().global_scope
self.call_scope: Optional[Scope] = None
self.func_def_stmt: Optional[ast.stmt] = None
self.stmt_node = self.update_stmt_node(stmt_node)
self.symbol_node = symbol_node
self._funcall_live_symbols = None
self.parents: Dict["Symbol", List[Timestamp]] = {}
self.children: Dict["Symbol", List[Timestamp]] = {}
# initialize at -1 for implicit since the corresponding piece of data could already be around,
# and we don't want liveness checker to think this was newly created unless we
# explicitly trace an update somewhere
self._timestamp: Timestamp = (
Timestamp.uninitialized() if implicit else Timestamp.current()
)
self._snapshot_timestamps: List[Timestamp] = []
self._snapshot_timestamp_ubounds: List[Timestamp] = []
self._defined_cell_num = cells().exec_counter()
self._is_dangling_on_edges = False
self._cascading_reactive_cell_num = -1
self._override_ready_liveness_cell_num = -1
self._override_timestamp: Optional[Timestamp] = None
self.watchpoints = Watchpoints()
# The necessary last-updated timestamp / cell counter for this symbol to not be waiting
self.required_timestamp: Timestamp = self.timestamp
# for each usage of this sym, the version that was used, if different from the timestamp of usage
self.timestamp_by_used_time: Dict[Timestamp, Timestamp] = {}
self.used_node_by_used_time: Dict[Timestamp, ast.AST] = {}
# History of definitions at time of liveness
self.timestamp_by_liveness_time: Dict[Timestamp, Timestamp] = {}
# All timestamps associated with updates to this symbol
self.updated_timestamps: Set[Timestamp] = set()
# The most recent timestamp associated with a particular object id
self.last_updated_timestamp_by_obj_id: Dict[int, Timestamp] = {}
self.fresher_ancestors: Set["Symbol"] = set()
self.fresher_ancestor_timestamps: Set[Timestamp] = set()
# cells where this symbol was live
self.cells_where_deep_live: Set[Cell] = set()
self.cells_where_shallow_live: Set[Cell] = set()
self._last_computed_ready_or_waiting_cache_ts: int = -1
self._is_ready_or_waiting_at_position_cache: Dict[Tuple[int, bool], bool] = {}
# if implicitly created when tracing non-store-context ast nodes
self._implicit = implicit
# Will never be stale if no_warning is True
self.disable_warnings = False
self._temp_disable_warnings = False
self._num_ipywidget_observers = 0
self._num_mercury_widget_observers = 0
flow().aliases.setdefault(id(obj), set()).add(self)
if (
isinstance(self.name, str)
and not self.is_anonymous
and not self.containing_scope.is_namespace_scope
):
ns = self.namespace
if ns is not None and ns.scope_name == "self":
# hack to get a better name than `self.whatever` for fields of this object
# not ideal because it relies on the `self` convention but is probably
# acceptable for the use case of improving readable names
ns.scope_name = self.name
def aliases(self) -> List["Symbol"]:
return list(flow().aliases.get(self.obj_id, []))
def cells_where_live(self) -> Set[Cell]:
return self.cells_where_deep_live | self.cells_where_shallow_live
def __repr__(self) -> str:
return f"<{self.readable_name}>"
def __str__(self) -> str:
return self.readable_name
def __hash__(self) -> int:
return hash(id(self))
def __lt__(self, other) -> bool:
return id(self) < id(other)
def add_tag(self, tag_value: str) -> None:
self._tags.add(tag_value)
def remove_tag(self, tag_value: str) -> None:
self._tags.discard(tag_value)
def has_tag(self, tag_value: str) -> bool:
return tag_value in self._tags
def temporary_disable_warnings(self) -> None:
self._temp_disable_warnings = True
def last_used_timestamp(self) -> Timestamp:
if len(self.timestamp_by_used_time) == 0:
return Timestamp.uninitialized()
else:
return max(self.timestamp_by_used_time.keys())
def namespace_waiting_symbols(self) -> Set["Symbol"]:
ns = self.namespace
return set() if ns is None else ns.namespace_waiting_symbols
def shallow_timestamp(self) -> Timestamp:
if self._override_timestamp is None:
return self._timestamp
else:
return max(self._timestamp, self._override_timestamp)
def visible_timestamp(self) -> Optional[Timestamp]:
for ts in sorted(self.updated_timestamps, reverse=True):
if cells().at_timestamp(ts).is_visible:
return ts
return None
def memoize_timestamp(self) -> Optional[Timestamp]:
return self.last_updated_timestamp_by_obj_id.get(self.obj_id)
def timestamp(self) -> Timestamp:
ts = self.shallow_timestamp
if self.is_import or self.is_module:
return ts
ns = self.namespace
return ts if ns is None else max(ts, ns.max_descendent_timestamp)
def _compute_namespace_timestamps(
self,
seen: Optional[Set["Symbol"]] = None,
version_ubound: Optional[Timestamp] = None,
) -> Set[Timestamp]:
if version_ubound is None:
timestamps = {self.shallow_timestamp, self.timestamp}
else:
max_leq_ubound = Timestamp.uninitialized()
for ts in reversed(self._snapshot_timestamps):
if ts <= version_ubound:
max_leq_ubound = ts
break
if max_leq_ubound.is_initialized:
timestamps = {max_leq_ubound}
else:
timestamps = set()
ns = self.namespace
if ns is None:
return timestamps
if seen is None:
seen = set()
if self in seen:
return timestamps
seen.add(self)
for sym in ns.all_symbols_this_indentation():
timestamps |= sym._compute_namespace_timestamps(
seen=seen, version_ubound=version_ubound
)
return timestamps
def _get_timestamps_for_version(self, version: int) -> Set[Timestamp]:
if len(self._snapshot_timestamps) == 0:
return {self.timestamp}
ts = self._snapshot_timestamps[version]
if ts.cell_num == -1:
return {Timestamp(self.defined_cell_num, ts.stmt_num)}
else:
return self._compute_namespace_timestamps(
version_ubound=None if version == -1 else ts
)
def code(
self, format_type: Optional[Type[FormatType]] = None, version: int = -1
) -> Slice:
return statements().format_multi_slice(
self._get_timestamps_for_version(version=version),
blacken=True,
format_type=format_type,
)
def cascading_reactive_cell_num(
self,
seen: Optional[Set["Symbol"]] = None,
consider_containing_symbols: bool = True,
) -> int:
if seen is None:
seen = set()
if self in seen:
return -1
seen.add(self)
cell_num = self._cascading_reactive_cell_num
ns = self.namespace
ret = (
cell_num
if ns is None
else max(
cell_num,
ns.max_cascading_reactive_cell_num(seen),
)
)
if not consider_containing_symbols:
return ret
for sym in self.iter_containing_symbols():
ret = max(ret, sym.cascading_reactive_cell_num(seen=seen))
return ret
def bump_cascading_reactive_cell_num(self, ctr: Optional[int] = None) -> None:
self._cascading_reactive_cell_num = max(
self._cascading_reactive_cell_num,
flow().cell_counter() if ctr is None else ctr,
)
def iter_containing_symbols(self) -> Generator["Symbol", None, None]:
yield self
ns = self.containing_namespace
if ns is None or not ns.is_namespace_scope:
return
for containing_ns in ns.iter_containing_namespaces():
yield from flow().aliases.get(containing_ns.obj_id, [])
def waiting_timestamp(self) -> int:
return max(self._timestamp.cell_num, flow().min_timestamp)
def defined_cell_num(self) -> int:
return self._defined_cell_num
def readable_name(self) -> str:
return self.containing_scope.make_namespace_qualified_name(self)
def is_subscript(self) -> bool:
return self.symbol_type == SymbolType.SUBSCRIPT
def is_class(self) -> bool:
return self.symbol_type == SymbolType.CLASS
def is_function(self) -> bool:
return self.symbol_type == SymbolType.FUNCTION
def is_lambda(self) -> bool:
# TODO: this is terrible
return type(self.name) is str and self.name.startswith( # noqa: E721
"<lambda_sym_"
)
def is_import(self) -> bool:
return self.symbol_type == SymbolType.IMPORT
def is_module(self) -> bool:
return self.symbol_type == SymbolType.MODULE
def imported_module(self) -> str:
if not self.is_import:
raise ValueError("only IMPORT symbols have `imported_module` property")
if isinstance(self.stmt_node, ast.Import):
for alias in self.stmt_node.names:
name = alias.asname or alias.name
if name == self.name:
return alias.name
raise ValueError(
"Unable to find module for symbol %s is stmt %s"
% (self, ast.dump(self.stmt_node))
)
elif isinstance(self.stmt_node, ast.ImportFrom):
return self.stmt_node.module
else:
raise TypeError(
"Invalid stmt type for import symbol: %s" % ast.dump(self.stmt_node)
)
def imported_symbol_original_name(self) -> str:
if not self.is_import:
raise ValueError(
"only IMPORT symbols have `imported_symbol_original_name` property"
)
if isinstance(self.stmt_node, ast.Import):
return self.imported_module
elif isinstance(self.stmt_node, ast.ImportFrom):
for alias in self.stmt_node.names:
name = alias.asname or alias.name
if name == self.name:
return alias.name
raise ValueError(
"Unable to find module for symbol %s is stmt %s"
% (self, ast.dump(self.stmt_node))
)
else:
raise TypeError(
"Invalid stmt type for import symbol: %s" % ast.dump(self.stmt_node)
)
def is_cascading_reactive_at_counter(self, ctr: int) -> bool:
return self.cascading_reactive_cell_num() > max(
ctr, flow().min_cascading_reactive_cell_num
)
def get_top_level(self) -> Optional["Symbol"]:
if not self.containing_scope.is_namespace_scope:
return self
else:
containing_scope = cast("Namespace", self.containing_scope)
for alias in flow().aliases.get(containing_scope.obj_id, []):
if alias.is_globally_accessible:
return alias.get_top_level()
return None
def get_import_string(self) -> str:
if not self.is_import:
raise ValueError("only IMPORT symbols support recreating the import string")
module = self.imported_module
if isinstance(self.stmt_node, ast.Import):
if module == self.name:
return f"import {module}"
else:
return f"import {module} as {self.name}"
elif isinstance(self.stmt_node, ast.ImportFrom):
original_symbol_name = self.imported_symbol_original_name
if original_symbol_name == self.name:
return f"from {module} import {original_symbol_name}"
else:
return f"from {module} import {original_symbol_name} as {self.name}"
else:
raise TypeError(
"Invalid stmt type for import symbol: %s" % ast.dump(self.stmt_node)
)
def is_anonymous(self) -> bool:
if self.symbol_type == SymbolType.ANONYMOUS:
return True
ns = self.containing_namespace
if ns is not None and ns.is_anonymous:
return True
return False
def is_implicit(self) -> bool:
return self._implicit
def shallow_clone(
self, new_obj: Any, new_containing_scope: "Scope", symbol_type: SymbolType
) -> "Symbol":
return self.__class__(self.name, symbol_type, new_obj, new_containing_scope)
def obj_id(self) -> int:
return id(self.obj)
def obj_len(self) -> Optional[int]:
try:
if not self.is_obj_lazy_module and hasattr(self.obj, "__len__"):
return len(self.obj)
except: # noqa: E722
pass
return None
def obj_type(self) -> Type[Any]:
return type(self.obj)
def is_immutable(self) -> bool:
return self.obj_type in self.IMMUTABLE_TYPES
def is_mutation_virtual_symbol(self) -> bool:
return self.name == self.IPYFLOW_MUTATION_VIRTUAL_SYMBOL_NAME
def is_underscore(self) -> bool:
return self.name == "_" and self.containing_scope.is_global
def is_obj_lazy_module(self) -> bool:
return self.obj_type is _LazyModule
def get_type_annotation(self):
return get_type_annotation(self.obj)
def get_type_annotation_string(self) -> str:
return make_annotation_string(self.get_type_annotation())
def namespace(self) -> Optional["Namespace"]:
return flow().namespaces.get(self.obj_id)
def containing_namespace(self) -> Optional["Namespace"]:
if self.containing_scope.is_namespace_scope:
return cast("Namespace", self.containing_scope)
else:
return None
def full_path(self) -> Tuple[str, ...]:
return self.containing_scope.full_path + (str(self.name),)
def full_namespace_path(self) -> str:
return self.containing_scope.make_namespace_qualified_name(self)
def is_garbage(self) -> bool:
return self._tombstone
def is_new_garbage(self) -> bool:
if self._tombstone:
return False
containing_ns = self.containing_namespace
numpy = sys.modules.get("numpy", None)
if (
numpy is not None
and containing_ns is not None
and isinstance(containing_ns.obj, numpy.ndarray)
):
# numpy atoms are not interned (so assigning array elts to a variable does not bump refcount);
# also seems that refcount is always 0, so just check if the containing namespace is garbage
return self.containing_namespace.is_garbage
return self.get_ref_count() == 0
def is_globally_accessible(self) -> bool:
return self.containing_scope.is_globally_accessible
def is_user_accessible(self) -> bool:
return (
self.is_globally_accessible
and not self.is_anonymous
and not self.is_garbage
and not (
self.containing_namespace is not None
and (
self.containing_namespace.is_anonymous
or self.containing_namespace.is_garbage
)
)
)
def _remove_self_from_aliases(self) -> None:
cleanup_discard(flow().aliases, self.obj_id, self)
self.obj = None
def mark_garbage(self) -> None:
if self.is_garbage:
return
self._tombstone = True
ns = self.namespace
if ns is not None and all(alias.is_garbage for alias in self.aliases):
ns.mark_garbage()
def collect_self_garbage(self) -> None:
assert self.is_garbage
flow().blocked_reactive_timestamps_by_symbol.pop(self, None)
self._remove_self_from_aliases()
for parent in self.parents:
parent.children.pop(self, None)
for child in self.children:
child.parents.pop(self, None)
containing_ns = self.containing_namespace
if self.is_subscript and containing_ns is not None:
containing_ns._subscript_symbol_by_name.pop(self.name, None)
elif not self.is_subscript:
self.containing_scope._symbol_by_name.pop(self.name, None)
else:
logger.warning(
"could not find symbol %s in its scope %s", self, self.containing_scope
)
# TODO: remove from static / dynamic parent / children edges
# need to keep this around for readable_name to work
# self.containing_scope = None
# def update_type(self, new_type):
# self.symbol_type = new_type
# if self.is_function:
# self.call_scope = self.containing_scope.make_child_scope(self.name)
# else:
# self.call_scope = None
def update_obj_ref(self, obj: Any, refresh_cached: bool = True) -> None:
if self._num_ipywidget_observers > 0:
try:
self.obj.unobserve_all()
except: # noqa
pass
self._num_ipywidget_observers = 0
if self._num_mercury_widget_observers > 0:
try:
self._mercury_widgets_manager.get_widget(
self.obj.code_uid
).unobserve_all()
except: # noqa
pass
self._num_mercury_widget_observers = 0
self._tombstone = False
self._cached_out_of_sync = True
if (
flow().settings.mark_typecheck_failures_unsafe
and self.cached_obj_type != type(obj)
):
for cell in self.cells_where_live:
cell.invalidate_typecheck_result()
self.cells_where_shallow_live.clear()
self.cells_where_deep_live.clear()
self.obj = obj
if self.cached_obj_id is not None and self.cached_obj_id != self.obj_id:
new_ns = flow().namespaces.get(self.obj_id, None)
# don't overwrite existing namespace for this obj
old_ns = flow().namespaces.get(self.cached_obj_id, None)
if (
old_ns is not None
and old_ns.full_namespace_path == self.full_namespace_path
):
if new_ns is None:
logger.info("create fresh copy of namespace %s", old_ns)
new_ns = old_ns.fresh_copy(obj)
old_ns.transfer_symbols_to(new_ns)
else:
new_ns.scope_name = old_ns.scope_name
new_ns.parent_scope = old_ns.parent_scope
self._handle_aliases()
if (
old_ns is not None
and len(flow().aliases.get(self.cached_obj_id, [])) == 0
):
old_ns.mark_garbage()
if refresh_cached:
self._refresh_cached_obj()
def invalidate_cached(self) -> None:
self._cached_out_of_sync = True
self.cached_obj_id = None
self.cached_obj_type = None
def get_ref_count(self) -> int:
if self.obj is None or self.obj is Symbol.NULL:
return -1
total = sys.getrefcount(self.obj) - 1
total -= len(flow().aliases.get(self.obj_id, []))
ns = flow().namespaces.get(self.obj_id, None)
if ns is not None and ns.obj is not None and ns.obj is not Symbol.NULL:
total -= 1
return total
def _should_cancel_propagation(self, prev_obj: Optional[Any]) -> bool:
if prev_obj is None:
return False
if (
flow().blocked_reactive_timestamps_by_symbol.get(self, -1)
== self.timestamp.cell_num
):
return False
if not self._cached_out_of_sync or self.obj_id == self.cached_obj_id:
return True
if self.obj is None or prev_obj is Symbol.NULL:
return self.obj is None and prev_obj is Symbol.NULL
return False
def _handle_aliases(self):
cleanup_discard(flow().aliases, self.cached_obj_id, self)
flow().aliases.setdefault(self.obj_id, set()).add(self)
def update_stmt_node(self, stmt_node: Optional[ast.stmt]) -> Optional[ast.stmt]:
self.stmt_node = stmt_node
self._funcall_live_symbols = None
if self.is_function or (
stmt_node is not None and isinstance(stmt_node, ast.Lambda)
):
# TODO: in the case of lambdas, there will not necessarily be one
# symbol for a given statement. We need a more precise way to determine
# the symbol being called than by looking at the stmt in question.
flow().statement_to_func_sym[id(stmt_node)] = self
self.call_scope = self.containing_scope.make_child_scope(self.name)
self.func_def_stmt = stmt_node
return stmt_node
def _refresh_cached_obj(self) -> None:
self._cached_out_of_sync = False
# don't keep an actual ref to avoid bumping refcount
self.cached_obj_id = self.obj_id
self.cached_obj_type = self.obj_type
self.cached_obj_len = self.obj_len
def get_definition_args(self) -> List[ast.arg]:
assert self.func_def_stmt is not None and isinstance(
self.func_def_stmt, (ast.FunctionDef, ast.AsyncFunctionDef, ast.Lambda)
)
args = []
for arg in self.func_def_stmt.args.args + self.func_def_stmt.args.kwonlyargs:
args.append(arg)
if self.func_def_stmt.args.vararg is not None:
args.append(self.func_def_stmt.args.vararg)
if self.func_def_stmt.args.kwarg is not None:
args.append(self.func_def_stmt.args.kwarg)
return args
def _match_call_args_with_definition_args(
self,
) -> Generator[Tuple[ast.arg, List["Symbol"]], None, None]:
# TODO: handle posonlyargs, kwonlyargs
assert self.func_def_stmt is not None and isinstance(
self.func_def_stmt, (ast.FunctionDef, ast.AsyncFunctionDef, ast.Lambda)
)
caller_node = self._get_calling_ast_node()
if caller_node is None or not isinstance(caller_node, ast.Call):
return
kwarg_by_name = {
arg_key.arg: arg_key
for arg_key in self.func_def_stmt.args.args[
-len(self.func_def_stmt.args.defaults) :
]
}
if not all(keyword.arg in kwarg_by_name for keyword in caller_node.keywords):
logger.warning("detected mismatched kwargs from caller node to definition")
return
def_args = self.func_def_stmt.args.args
if len(self.func_def_stmt.args.defaults) > 0:
def_args = def_args[: -len(self.func_def_stmt.args.defaults)]
if len(def_args) > 0 and def_args[0].arg == "self":
# FIXME: this is bad and I should feel bad
def_args = def_args[1:]
for def_arg, call_arg in zip(def_args, caller_node.args):
if isinstance(call_arg, ast.Starred):
# give up
# TODO: handle this case
break
yield def_arg, tracer().resolve_loaded_symbols(call_arg)
seen_keys = set()
for keyword in caller_node.keywords:
keyword_key, keyword_value = keyword.arg, keyword.value
if keyword_value is None:
continue
seen_keys.add(keyword_key)
yield kwarg_by_name[keyword_key], tracer().resolve_loaded_symbols(
keyword_value
)
for arg_key, arg_value in zip(
self.func_def_stmt.args.args[-len(self.func_def_stmt.args.defaults) :],
self.func_def_stmt.args.defaults,
):
if arg_key.arg in seen_keys:
continue
yield arg_key, tracer().resolve_loaded_symbols(arg_value)
def _get_calling_ast_node(self) -> Optional[ast.Call]:
if tracer().tracing_disabled_since_last_module_stmt or (
not hasattr(self.obj, "__module__")
and getattr(type(self.obj), "__module__", None) == "builtins"
):
return None
if self.func_def_stmt is not None and isinstance(
self.func_def_stmt, (ast.FunctionDef, ast.AsyncFunctionDef)
):
if self.name in ("__getitem__", "__setitem__", "__delitem__"):
# TODO: handle case where we're looking for a subscript for the calling node
return None
for decorator in self.func_def_stmt.decorator_list:
if isinstance(decorator, ast.Name) and decorator.id == "property":
# TODO: handle case where we're looking for an attribute for the calling node
return None
lexical_call_stack = tracer().lexical_call_stack
if len(lexical_call_stack) == 0:
return None
prev_node_id_in_cur_frame_lexical = lexical_call_stack.get_field(
"prev_node_id_in_cur_frame_lexical"
)
caller_ast_node = tracer().ast_node_by_id.get(
prev_node_id_in_cur_frame_lexical, None
)
if caller_ast_node is None or not isinstance(caller_ast_node, ast.Call):
return None
return caller_ast_node
def create_symbols_for_call_args(self, call_frame: FrameType) -> None:
assert self.func_def_stmt is not None
seen_def_args = set()
logger.info("create symbols for call to %s", self)
for def_arg, deps in self._match_call_args_with_definition_args():
seen_def_args.add(def_arg.arg)
self.call_scope.upsert_symbol_for_name(
def_arg.arg,
call_frame.f_locals.get(def_arg.arg),
deps,
self.func_def_stmt,
propagate=False,
symbol_node=def_arg,
)
logger.info("def arg %s matched with deps %s", def_arg, deps)
for def_arg in self.get_definition_args():
if def_arg.arg in seen_def_args:
continue
self.call_scope.upsert_symbol_for_name(
def_arg.arg,
None,
set(),
self.func_def_stmt,
propagate=False,
symbol_node=def_arg,
)
def is_waiting(self) -> bool:
if self.disable_warnings or self._temp_disable_warnings:
return False
if self.waiting_timestamp < self.required_timestamp.cell_num:
return True
elif flow().min_timestamp == -1:
return len(self.namespace_waiting_symbols) > 0
else:
# TODO: guard against infinite recurision
return any(sym.is_waiting for sym in self.namespace_waiting_symbols)
def is_shallow_stale(self) -> bool:
if self.disable_warnings or self._temp_disable_warnings:
return False
return self.waiting_timestamp < self.required_timestamp.cell_num
def _is_ready_or_waiting_at_position_impl(self, pos: int, deep: bool) -> bool:
for par, timestamps in self.parents.items():
for ts in timestamps:
dep_introduced_pos = cells().at_timestamp(ts).position
if dep_introduced_pos > pos:
continue
for updated_ts in par.updated_timestamps:
if cells().at_timestamp(updated_ts).position > dep_introduced_pos:
continue
if updated_ts.cell_num > ts.cell_num or par.is_waiting_at_position(
dep_introduced_pos
):
# logger.error("sym: %s", self)
# logger.error("pos: %s", pos)
# logger.error("parent: %s", par)
# logger.error("dep introdced ts: %s", ts)
# logger.error("dep introdced pos: %s", dep_introduced_pos)
# logger.error("par updated ts: %s", updated_ts)
# logger.error("par updated position: %s", cells().from_timestamp(updated_ts).position)
return True
if deep:
for sym in self.namespace_waiting_symbols:
if sym.is_waiting_at_position(pos):
return True
return False
def is_waiting_at_position(self, pos: int, deep: bool = True) -> bool:
if deep:
if not self.is_waiting:
return False
else:
if not self.is_shallow_stale:
return False
if flow().mut_settings.flow_order == FlowDirection.ANY_ORDER:
return True
if cells().exec_counter() > self._last_computed_ready_or_waiting_cache_ts:
self._is_ready_or_waiting_at_position_cache.clear()
self._last_computed_ready_or_waiting_cache_ts = cells().exec_counter()
if (pos, deep) in self._is_ready_or_waiting_at_position_cache:
return self._is_ready_or_waiting_at_position_cache[pos, deep]
# preemptively set this entry to 'False' in the cache to avoid infinite loops
self._is_ready_or_waiting_at_position_cache[pos, deep] = False
is_waiting = self._is_ready_or_waiting_at_position_impl(pos, deep)
self._is_ready_or_waiting_at_position_cache[pos, deep] = is_waiting
return is_waiting
def should_mark_waiting(self, updated_dep):
if self.disable_warnings:
return False
if updated_dep is self:
return False
return True
def _is_underscore_or_simple_assign(self, new_deps: Set["Symbol"]) -> bool:
if self.is_underscore:
# FIXME: distinguish between explicit assignment to _ from user and implicit assignment from kernel
return True
if not isinstance(self.stmt_node, (ast.Assign, ast.AnnAssign)):
return False
if len(new_deps) != 1:
return False
only_dep: Symbol = next(iter(new_deps))
# obj ids can get reused for anon symbols like literals
return not only_dep.is_anonymous and self.obj_id == only_dep.obj_id
def update_deps(
self,
new_deps: Set["Symbol"],
prev_obj: Any = None,
overwrite: bool = True,
mutated: bool = False,
deleted: bool = False,
propagate_to_namespace_descendents: bool = False,
propagate: bool = True,
refresh: bool = True,
is_cascading_reactive: Optional[bool] = None,
) -> None:
if self.is_import and self.obj_id == self.cached_obj_id:
# skip updates for imported symbols; just bump the version
self.refresh()
return
if overwrite and not self.is_globally_accessible:
self.watchpoints.clear()
if mutated and self.is_immutable:
return
# if we get here, no longer implicit
self._implicit = False
# quick last fix to avoid overwriting if we appear inside the set of deps to add (or a 1st order ancestor)
# TODO: check higher-order ancestors too?
overwrite = overwrite and self not in new_deps
overwrite = overwrite and not any(
self in new_dep.parents for new_dep in new_deps
)
logger.warning("symbol %s new deps %s", self, new_deps)
new_deps.discard(self)
if overwrite:
for parent in self.parents.keys() - new_deps:
parent.children.pop(self, None)
self.parents.pop(parent, None)
for new_parent in new_deps - self.parents.keys():
if new_parent is None:
continue
new_parent.children.setdefault(self, []).append(Timestamp.current())
self.parents.setdefault(new_parent, []).append(Timestamp.current())
self.required_timestamp = Timestamp.uninitialized()
self.fresher_ancestors.clear()
self.fresher_ancestor_timestamps.clear()
if mutated or isinstance(self.stmt_node, ast.AugAssign):
self.update_usage_info()
if (
(mutated or overwrite)
and Timestamp.current().is_initialized
and not self.is_immutable
and not self.is_mutation_virtual_symbol
and not self.is_anonymous
and self.containing_scope.is_global
and not self.is_underscore
and not self.is_implicit
and self.obj_type is not type
and not self.is_class
and self.namespace is not None
):
self.namespace.upsert_symbol_for_name(
self.IPYFLOW_MUTATION_VIRTUAL_SYMBOL_NAME, object(), propagate=False
)
propagate = propagate and (
mutated or deleted or not self._should_cancel_propagation(prev_obj)
)
try:
prev_cell = cells().current_cell().prev_cell
except KeyError:
prev_cell = None
prev_cell_ctr = -1 if prev_cell is None else prev_cell.cell_ctr
if overwrite:
flow_ = flow()
self._cascading_reactive_cell_num = -1
flow_.updated_reactive_symbols.discard(self)
flow_.updated_deep_reactive_symbols.discard(self)
if is_cascading_reactive is not None:
is_cascading_reactive = is_cascading_reactive or any(
sym.is_cascading_reactive_at_counter(prev_cell_ctr) for sym in new_deps
)
if is_cascading_reactive:
self.bump_cascading_reactive_cell_num()
if refresh:
self.refresh(
# rationale: if this is a mutation for which we have more precise information,
# then we don't need to update the ns descendents as this will already have happened.
# also don't update ns descendents for things like `a = b`
refresh_descendent_namespaces=propagate
and not (mutated and not propagate_to_namespace_descendents)
and not self._is_underscore_or_simple_assign(new_deps),
refresh_namespace_waiting=not mutated,
)
if propagate:
UpdateProtocol(self)(
new_deps, mutated, propagate_to_namespace_descendents, refresh
)
self._refresh_cached_obj()
if self.is_class:
# pop pending class defs and update obj ref
pending_class_ns = tracer().pending_class_namespaces.pop()
pending_class_ns.update_obj_ref(self.obj)
for dep in new_deps:
if dep.obj is self.obj and dep.call_scope is not None:
self.call_scope = dep.call_scope
self.func_def_stmt = dep.func_def_stmt
ns = self.namespace
if ns is not None and ns.scope_name == "self" and isinstance(self.name, str):
# fixup namespace name if necessary
# can happen if symbol for 'self' was created in a previous __init__
ns.scope_name = self.name
if overwrite and len(flow().aliases[self.obj_id]) == 1:
self._handle_possible_widget_creation()
self._handle_possible_mercury_widget_creation()
def _mercury_widgets_manager(self):
if self.obj is None:
return None
if self.is_obj_lazy_module or not hasattr(self.obj, "code_uid"):
return None
try:
return sys.modules.get(self.obj.__class__.__module__).WidgetsManager
except: # noqa
return None
def _handle_possible_widget_creation(self) -> None:
if self.obj is None:
return
Widget = getattr(sys.modules.get("ipywidgets"), "Widget", None)
if (
Widget is None
or self.is_obj_lazy_module
or not isinstance(self.obj, Widget)
or not hasattr(self.obj, "observe")
or not hasattr(self.obj, "value")
):
return
self.namespaced().upsert_symbol_for_name(
"value", getattr(self.obj, "value", None), set(), self.stmt_node
)
self.obj.observe(self._observe_widget)
self._num_ipywidget_observers += 1
def _handle_possible_mercury_widget_creation(self) -> None:
WidgetsManager = self._mercury_widgets_manager
if WidgetsManager is None:
return
widget = WidgetsManager.get_widget(self.obj.code_uid)
self.namespaced().upsert_symbol_for_name(
"value", getattr(widget, "value", None), set(), self.stmt_node
)
widget.observe(self._observe_widget)
self._num_mercury_widget_observers += 1
def _observe_widget(self, msg: Dict[str, Any]) -> None:
if msg.get("name") != "value" or "new" not in msg:
return
ns = self.namespace
sym = ns.lookup_symbol_by_name_this_indentation("value")
if sym is None:
return
newval = msg["new"]
current_ts_cell = cells().at_timestamp(self._timestamp)
current_ts_cell._extra_stmt = ast.parse(f"{sym.readable_name} = {newval}").body[
0
]
sym._override_ready_liveness_cell_num = flow().cell_counter() + 1
sym._override_timestamp = Timestamp(
self._timestamp.cell_num, current_ts_cell.num_original_stmts
)
sym.update_obj_ref(newval)
statements().create_and_track(
current_ts_cell._extra_stmt,
timestamp=sym._override_timestamp,
override=True,
)
with dynamic_slicing_context():
flow().add_data_dep(
sym._timestamp,
sym._override_timestamp,
sym,
)
flow().add_data_dep(
sym._override_timestamp,
sym._timestamp,
sym,
)
self.debounced_exec_schedule(reactive=True)
def debounced_exec_schedule(self, reactive: bool) -> None:
if _debounced_exec_schedule(
cells().at_timestamp(self.timestamp).cell_id, reactive=reactive
):
flow().debounced_exec_schedule_pending = True
def namespaced(self) -> "Namespace":
ns = self.namespace
if ns is not None:
return ns
return namespaces()(self.obj, self.name, parent_scope=self.containing_scope)
def update_usage_info_one_timestamp(
self,
used_time: Timestamp,
updated_time: Timestamp,
is_static: bool,
) -> bool:
flow_ = flow()
is_usage = is_static or updated_time < used_time
if is_usage:
with slicing_context(is_static=is_static):
flow_.add_data_dep(
used_time,
updated_time,
self,
)
if is_static:
is_usage = cells().at_timestamp(updated_time).is_visible
return is_usage
def update_usage_info(
self,
used_time: Optional[Timestamp] = None,
used_node: Optional[ast.AST] = None,
exclude_ns: bool = False,
is_static: bool = False,
is_blocking: bool = False,
) -> "Symbol":
is_blocking = is_blocking or id(used_node) in tracer().blocking_node_ids
if used_time is None:
used_time = Timestamp.current()
if flow().is_dev_mode:
logger.info(
"sym `%s` used in cell %d last updated in cell %d",
self,
used_time.cell_num,
self.timestamp,
)
timestamp_by_used_time = (
self.timestamp_by_liveness_time
if is_static
else self.timestamp_by_used_time
)
if not is_blocking:
is_usage = False
ts_to_use = self._timestamp
for updated_ts in sorted(self.updated_timestamps, reverse=True):
if not updated_ts.is_initialized:
continue
is_usage = self.update_usage_info_one_timestamp(
used_time,
updated_ts,
is_static=is_static,
)
if is_usage or not is_static:
break
if is_usage and used_time.is_initialized:
timestamp_by_used_time[used_time] = ts_to_use
if used_node is not None:
self.used_node_by_used_time[used_time] = used_node
if exclude_ns:
return self
for sym in self.get_namespace_symbols(recurse=True):
sym.update_usage_info(
used_time=used_time,
used_node=None,
exclude_ns=True,
is_static=is_static,
is_blocking=is_blocking,
)
return self
def get_namespace_symbols(
self, recurse: bool = False, seen: Optional[Set["Symbol"]] = None
) -> Generator["Symbol", None, None]:
ns = self.namespace
if ns is None:
return
if seen is None:
seen = set()
if self in seen:
return
seen.add(self)
for sym in ns.all_symbols_this_indentation():
yield sym
if recurse:
yield from sym.get_namespace_symbols(recurse=recurse, seen=seen)
def _take_timestamp_snapshots(
self, ts_ubound: Timestamp, seen: Optional[Set["Symbol"]] = None
) -> None:
if seen is None:
seen = set()
if self in seen:
return
seen.add(self)
self._snapshot_timestamps.append(self._timestamp)
self._snapshot_timestamp_ubounds.append(ts_ubound)
containing_ns = self.containing_namespace
if containing_ns is None:
return
for alias in flow().aliases.get(containing_ns.obj_id, []):
alias._take_timestamp_snapshots(ts_ubound, seen=seen)
def refresh(
self,
take_timestamp_snapshots: bool = True,
refresh_descendent_namespaces: bool = False,
refresh_namespace_waiting: bool = True,
timestamp: Optional[Timestamp] = None,
seen: Optional[Set["Symbol"]] = None,
) -> None:
orig_timestamp = self._timestamp
self._timestamp = Timestamp.current() if timestamp is None else timestamp
self._override_timestamp = None
if take_timestamp_snapshots and (
orig_timestamp < self._timestamp or len(self._snapshot_timestamps) == 0
):
self._take_timestamp_snapshots(self._timestamp)
self.updated_timestamps.add(self._timestamp)
self._temp_disable_warnings = False
for cell in self.cells_where_live:
cell.add_used_cell_counter(self, self._timestamp.cell_num)
ns = self.containing_namespace
if ns is not None:
# logger.error("bump version of %s due to %s (value %s)", ns.full_path, self.full_path, self.obj)
ns.max_descendent_timestamp = self.shallow_timestamp
for alias in flow().aliases.get(ns.obj_id, []):
for cell in alias.cells_where_deep_live:
cell.add_used_cell_counter(alias, self._timestamp.cell_num)
if refresh_descendent_namespaces:
if seen is None:
seen = set()
if self in seen:
return
seen.add(self)
ns = self.namespace
if ns is not None:
for sym in ns.all_symbols_this_indentation(exclude_class=True):
# this is to handle cases like `x = x.mutate(42)`, where
# we could have changed some member of x but returned the
# original object -- in this case, just assume that all
# the stale namespace descendents are no longer stale, as
# this is likely the user intention. For an example, see
# `test_external_object_update_propagates_to_stale_namespace_symbols()`
# in `test_frontend_checker.py`
if not sym.is_waiting or refresh_namespace_waiting:
# logger.error(
# "refresh %s due to %s (value %s) via namespace %s",
# sym.full_path,
# self.full_path,
# self.obj,
# ns.full_path,
# )
sym.refresh(
refresh_descendent_namespaces=True,
timestamp=self.shallow_timestamp,
take_timestamp_snapshots=False,
seen=seen,
)
if refresh_namespace_waiting:
self.namespace_waiting_symbols.clear()
def resync_if_necessary(self, refresh: bool) -> None:
if not self.containing_scope.is_global:
return
try:
obj = shell().user_ns[self.name]
except: # noqa
# cinder runtime can throw an exception here due to lazy imports that fail
return
if self.obj is not obj:
flow_ = flow()
for alias in flow_.aliases.get(
self.cached_obj_id, set()
) | flow_.aliases.get(self.obj_id, set()):
containing_namespace = alias.containing_namespace
if containing_namespace is None:
continue
containing_obj = containing_namespace.obj
if containing_obj is None:
continue
# TODO: handle dict case too
if isinstance(containing_obj, list) and containing_obj[-1] is obj:
containing_namespace._subscript_symbol_by_name.pop(alias.name, None)
alias.name = len(containing_obj) - 1
alias.update_obj_ref(obj)
containing_namespace._subscript_symbol_by_name[alias.name] = alias
cleanup_discard(flow_.aliases, self.cached_obj_id, self)
cleanup_discard(flow_.aliases, self.obj_id, self)
flow_.aliases.setdefault(id(obj), set()).add(self)
self.update_obj_ref(obj)
elif self.obj_len != self.cached_obj_len:
self._refresh_cached_obj()
else:
return
if refresh:
self.refresh()
_MAX_MEMOIZE_COMPARABLE_SIZE = 10**6
def _equal(obj1: Any, obj2: Any) -> bool:
return obj1 == obj2
def _array_equal(obj1: Any, obj2: Any) -> bool:
import numpy as np
try:
return np.alltrue(obj1 == obj2) # type: ignore
except: # noqa
return False
def _dataframe_equal(obj1: Any, obj2: Any) -> bool:
try:
return obj1.equals(obj2) # type: ignore
except: # noqa
return False
def _make_list_eq(
eqs: List[Callable[[Any, Any], bool]]
) -> Callable[[List[Any], List[Any]], bool]:
def list_eq(lst1: List[Any], lst2: List[Any]) -> bool:
for eq, obj1, obj2 in zip(eqs, lst1, lst2):
if not eq(obj1, obj2):
return False
return True
return list_eq
def make_memoize_comparable_for_obj(
cls, obj: Any, seen_ids: Set[int]
) -> Tuple[Any, Optional[Callable[[Any, Any], bool]], int]:
if isinstance(obj, (bool, bytes, bytearray, int, float, str)):
return obj, cls._equal, 1
if not isinstance(obj, tuple):
if id(obj) in seen_ids:
return cls.NULL, None, -1
seen_ids.add(id(obj))
if isinstance(obj, (dict, frozenset, list, set, tuple)):
size = 0
comp = []
eqs: List[Callable[[Any, Any], bool]] = []
if isinstance(obj, dict):
iterable: "Iterable[Any]" = sorted(obj.items())
else:
iterable = obj
for inner in iterable:
inner_comp, inner_eq, inner_size = cls.make_memoize_comparable_for_obj(
inner, seen_ids
)
if inner_comp is cls.NULL or inner_eq is None:
return cls.NULL, None, -1
size += inner_size + 1
if size > cls._MAX_MEMOIZE_COMPARABLE_SIZE:
return cls.NULL, None, -1
comp.append(inner_comp)
eqs.append(inner_eq)
if all(eq is cls._equal for eq in eqs):
iter_eq: Callable[[Any, Any], bool] = cls._equal
elif isinstance(obj, (frozenset, set)):
return cls.NULL, None, -1
else:
iter_eq = cls._make_list_eq(eqs)
ret = frozenset(comp) if isinstance(obj, (frozenset, set)) else comp
return ret, iter_eq, size
elif type(obj) in (type, FunctionType):
# try to determine it based on the symbol
for sym in flow().aliases.get(id(obj), []):
comp, eq = sym.make_memoize_comparable(seen_ids=seen_ids)
if comp is not cls.NULL and eq is not None:
return comp, eq, 1
return cls.NULL, None, -1
else:
# hacks to check if they are arrays, dataframes, etc without explicitly importing these
module = getattr(type(obj), "__module__", "")
if module.startswith("numpy"):
name = getattr(type(obj), "__name__", "")
if name.endswith("ndarray"):
return obj, cls._array_equal, obj.size
else:
numpy = sys.modules.get("numpy")
if numpy is not None and isinstance(obj, numpy.number):
return obj, cls._equal, 1
elif module.startswith(("modin", "pandas")):
name = getattr(type(obj), "__name__", "")
if name.endswith(("DataFrame", "Series")):
return obj, cls._dataframe_equal, obj.size
elif module.startswith("ipywidgets"):
ipywidgets = sys.modules.get("ipywidgets")
if (
ipywidgets is not None
and isinstance(obj, ipywidgets.Widget)
and hasattr(obj, "value")
):
return obj.value, cls._equal, 1
return cls.NULL, None, -1
def make_memoize_comparable(
self, seen_ids: Optional[Set[int]] = None
) -> Tuple[Any, Optional[Callable[[Any, Any], bool]]]:
if seen_ids is None:
seen_ids = set()
if isinstance(
self.stmt_node, (ast.ClassDef, ast.FunctionDef, ast.AsyncFunctionDef)
):
comps = [astunparse.unparse(self.stmt_node)]
for sym in sorted(self.parents.keys()):
par_comp, eq = sym.make_memoize_comparable(seen_ids=seen_ids)
if par_comp is self.NULL or eq is not self._equal:
return self.NULL, None
comps.append(par_comp)
return comps, self._equal
obj, eq, size = self.make_memoize_comparable_for_obj(self.obj, seen_ids)
if size > self._MAX_MEMOIZE_COMPARABLE_SIZE:
return self.NULL, None
else:
return obj, eq
The provided code snippet includes necessary dependencies for implementing the `rusers` function. Write a Python function `def rusers(sym: Any) -> List[Symbol]` to solve the following problem:
Given the programmatic usage of some symbol, look up the corresponding users of that symbol.
Here is the function:
def rusers(sym: Any) -> List[Symbol]:
"""
Given the programmatic usage of some symbol,
look up the corresponding users of that symbol.
"""
# See the `argument` handler in ipyflow_tracer for the
# actual implementation; this is just a stub that ensures
# that handler was able to find something.
sym = _validate(sym)
seen: Set[Symbol] = set()
_traverse(sym, seen, "children")
ret = [v for v in (seen - {sym}) if not v.is_anonymous]
return ret | Given the programmatic usage of some symbol, look up the corresponding users of that symbol. |
15,088 | from typing import TYPE_CHECKING, Any, List, Set, Union, cast
from ipyflow.data_model.symbol import Symbol
from ipyflow.data_model.timestamp import Timestamp
from ipyflow.tracing.watchpoint import Watchpoints
def _validate(sym: Any) -> Symbol:
if sym is None or not isinstance(sym, Symbol):
raise ValueError("unable to lookup metadata for symbol")
return cast(Symbol, sym)
class Watchpoints(list):
def append(self, *args, **kwargs) -> None:
raise NotImplementedError("please use the `add` method instead")
def extend(self, *args, **kwargs) -> None:
raise NotImplementedError("please use the `add` method instead")
def __add__(self, *args, **kwargs) -> None:
raise NotImplementedError("please use the `add` method instead")
def __iadd__(self, *args, **kwargs) -> None:
raise NotImplementedError("please use the `add` method instead")
def __radd__(self, *args, **kwargs) -> None:
raise NotImplementedError("please use the `add` method instead")
def add(
self, pred: Optional[Callable[..., bool]] = None, name: Optional[str] = None
):
super().append(Watchpoint(name, pred))
def passing(
self, obj: Any, *, position: Tuple[int, int], symbol_name: str
) -> Tuple[Watchpoint, ...]:
passing_watchpoints = []
for wp in self:
if wp(obj, position=position, symbol_name=symbol_name):
passing_watchpoints.append(wp)
return tuple(passing_watchpoints)
def __call__(
self, obj: Any, *, position: Tuple[int, int], symbol_name: str
) -> Tuple[Watchpoint, ...]:
return self.passing(obj, position=position, symbol_name=symbol_name)
The provided code snippet includes necessary dependencies for implementing the `watchpoints` function. Write a Python function `def watchpoints(sym: Any) -> Watchpoints` to solve the following problem:
Given the programmatic usage of some symbol, look up the corresponding watchpoints for that symbol.
Here is the function:
def watchpoints(sym: Any) -> Watchpoints:
"""
Given the programmatic usage of some symbol,
look up the corresponding watchpoints for that symbol.
"""
return _validate(sym).watchpoints | Given the programmatic usage of some symbol, look up the corresponding watchpoints for that symbol. |
15,089 | from typing import TYPE_CHECKING, Any, List, Set, Union, cast
from ipyflow.data_model.symbol import Symbol
from ipyflow.data_model.timestamp import Timestamp
from ipyflow.tracing.watchpoint import Watchpoints
def _validate(sym: Any) -> Symbol:
if sym is None or not isinstance(sym, Symbol):
raise ValueError("unable to lookup metadata for symbol")
return cast(Symbol, sym)
The provided code snippet includes necessary dependencies for implementing the `set_tag` function. Write a Python function `def set_tag(sym: Any, tag_value: str) -> None` to solve the following problem:
Add the tag `value` to the symbol.
Here is the function:
def set_tag(sym: Any, tag_value: str) -> None:
"""
Add the tag `value` to the symbol.
"""
_validate(sym).add_tag(tag_value) | Add the tag `value` to the symbol. |
15,090 | from typing import TYPE_CHECKING, Any, List, Set, Union, cast
from ipyflow.data_model.symbol import Symbol
from ipyflow.data_model.timestamp import Timestamp
from ipyflow.tracing.watchpoint import Watchpoints
def _validate(sym: Any) -> Symbol:
if sym is None or not isinstance(sym, Symbol):
raise ValueError("unable to lookup metadata for symbol")
return cast(Symbol, sym)
The provided code snippet includes necessary dependencies for implementing the `unset_tag` function. Write a Python function `def unset_tag(sym: Any, tag_value: str) -> None` to solve the following problem:
Remove the tag `value` from the symbol.
Here is the function:
def unset_tag(sym: Any, tag_value: str) -> None:
"""
Remove the tag `value` from the symbol.
"""
_validate(sym).remove_tag(tag_value) | Remove the tag `value` from the symbol. |
15,091 | from typing import TYPE_CHECKING, Any, List, Set, Union, cast
from ipyflow.data_model.symbol import Symbol
from ipyflow.data_model.timestamp import Timestamp
from ipyflow.tracing.watchpoint import Watchpoints
def _validate(sym: Any) -> Symbol:
if sym is None or not isinstance(sym, Symbol):
raise ValueError("unable to lookup metadata for symbol")
return cast(Symbol, sym)
The provided code snippet includes necessary dependencies for implementing the `has_tag` function. Write a Python function `def has_tag(sym: Any, tag_value: str) -> bool` to solve the following problem:
Test whether the symbol has the `value` tag.
Here is the function:
def has_tag(sym: Any, tag_value: str) -> bool:
"""
Test whether the symbol has the `value` tag.
"""
return _validate(sym).has_tag(tag_value) | Test whether the symbol has the `value` tag. |
15,092 | from typing import Optional, Union
from ipyflow.data_model.cell import cells
from ipyflow.data_model.timestamp import Timestamp
def _to_cell_num(ts_or_cell_num: Union[int, Timestamp]) -> int:
return (
ts_or_cell_num.cell_num
if isinstance(ts_or_cell_num, Timestamp)
else ts_or_cell_num
)
class Timestamp(NamedTuple):
cell_num: int
stmt_num: int
def current(cls) -> "Timestamp":
# TODO: shouldn't have to go through flow() singleton to get the cell counter,
# but the dependency structure prevents us from importing from ipyflow.data_model.code_cell
if tracer_initialized():
return cls(
flow().cell_counter() + _cell_offset,
tracer().module_stmt_counter() + _stmt_offset,
)
else:
return Timestamp.uninitialized()
def positional(self) -> "Timestamp":
return Timestamp(cells().at_counter(self.cell_num).position, self.stmt_num)
def uninitialized(cls) -> "Timestamp":
return _TS_UNINITIALIZED
def is_initialized(self) -> bool:
uninited = Timestamp.uninitialized()
return self.cell_num > uninited.cell_num and self.stmt_num > uninited.stmt_num
def plus(self, cell_num_delta: int, stmt_num_delta: int) -> "Timestamp":
return self.__class__(
self.cell_num + cell_num_delta, self.stmt_num + stmt_num_delta
)
def offset(
cell_offset: int = 0, stmt_offset: int = 0
) -> Generator[None, None, None]:
global _cell_offset
global _stmt_offset
_cell_offset += cell_offset
_stmt_offset += stmt_offset
try:
yield
finally:
_cell_offset -= cell_offset
_stmt_offset -= stmt_offset
def as_tuple(self) -> Tuple[int, int]:
return (self.cell_num, self.stmt_num)
def __eq__(self, other) -> bool:
if other is None:
return False
if not isinstance(other, Timestamp):
raise TypeError(
"cannot compare non-timestamp value %s with timestamp %s"
% (other, self)
)
return tuple(self._asdict().values()) == tuple(other._asdict().values())
def __ne__(self, other) -> bool:
return not self == other
def update_usage_info(
cls,
symbols: Union[
Optional["Symbol"],
Iterable[Optional["Symbol"]],
Optional["ResolvedSymbol"],
Iterable[Optional["ResolvedSymbol"]],
],
exclude_ns=False,
used_node: Optional[ast.AST] = None,
):
if symbols is None:
return
try:
iter(symbols) # type: ignore
except TypeError:
symbols = [symbols] # type: ignore
used_time = cls.current()
for sym in symbols: # type: ignore
if sym is not None and not sym.is_anonymous:
sym.update_usage_info(
used_time=used_time, used_node=used_node, exclude_ns=exclude_ns
)
def stdout(ts_or_cell_num: Union[int, Timestamp]) -> Optional[str]:
try:
cell_num = _to_cell_num(ts_or_cell_num)
captured = cells().at_counter(cell_num).captured_output
return None if captured is None else str(captured.stdout)
except KeyError:
raise ValueError("cell with counter %d has not yet executed" % cell_num) | null |
15,093 | from typing import Optional, Union
from ipyflow.data_model.cell import cells
from ipyflow.data_model.timestamp import Timestamp
def _to_cell_num(ts_or_cell_num: Union[int, Timestamp]) -> int:
return (
ts_or_cell_num.cell_num
if isinstance(ts_or_cell_num, Timestamp)
else ts_or_cell_num
)
class Timestamp(NamedTuple):
cell_num: int
stmt_num: int
def current(cls) -> "Timestamp":
# TODO: shouldn't have to go through flow() singleton to get the cell counter,
# but the dependency structure prevents us from importing from ipyflow.data_model.code_cell
if tracer_initialized():
return cls(
flow().cell_counter() + _cell_offset,
tracer().module_stmt_counter() + _stmt_offset,
)
else:
return Timestamp.uninitialized()
def positional(self) -> "Timestamp":
return Timestamp(cells().at_counter(self.cell_num).position, self.stmt_num)
def uninitialized(cls) -> "Timestamp":
return _TS_UNINITIALIZED
def is_initialized(self) -> bool:
uninited = Timestamp.uninitialized()
return self.cell_num > uninited.cell_num and self.stmt_num > uninited.stmt_num
def plus(self, cell_num_delta: int, stmt_num_delta: int) -> "Timestamp":
return self.__class__(
self.cell_num + cell_num_delta, self.stmt_num + stmt_num_delta
)
def offset(
cell_offset: int = 0, stmt_offset: int = 0
) -> Generator[None, None, None]:
global _cell_offset
global _stmt_offset
_cell_offset += cell_offset
_stmt_offset += stmt_offset
try:
yield
finally:
_cell_offset -= cell_offset
_stmt_offset -= stmt_offset
def as_tuple(self) -> Tuple[int, int]:
return (self.cell_num, self.stmt_num)
def __eq__(self, other) -> bool:
if other is None:
return False
if not isinstance(other, Timestamp):
raise TypeError(
"cannot compare non-timestamp value %s with timestamp %s"
% (other, self)
)
return tuple(self._asdict().values()) == tuple(other._asdict().values())
def __ne__(self, other) -> bool:
return not self == other
def update_usage_info(
cls,
symbols: Union[
Optional["Symbol"],
Iterable[Optional["Symbol"]],
Optional["ResolvedSymbol"],
Iterable[Optional["ResolvedSymbol"]],
],
exclude_ns=False,
used_node: Optional[ast.AST] = None,
):
if symbols is None:
return
try:
iter(symbols) # type: ignore
except TypeError:
symbols = [symbols] # type: ignore
used_time = cls.current()
for sym in symbols: # type: ignore
if sym is not None and not sym.is_anonymous:
sym.update_usage_info(
used_time=used_time, used_node=used_node, exclude_ns=exclude_ns
)
def stderr(ts_or_cell_num: Union[int, Timestamp]) -> Optional[str]:
try:
cell_num = _to_cell_num(ts_or_cell_num)
captured = cells().at_counter(cell_num).captured_output
return None if captured is None else str(captured.stderr)
captured = cells().at_counter(cell_num).captured_output
return None if captured is None else str(captured.stderr)
except KeyError:
raise ValueError("cell with counter %d has not yet executed" % cell_num) | null |
15,094 | from typing import Optional, Union
from ipyflow.data_model.cell import cells
from ipyflow.data_model.timestamp import Timestamp
def reproduce_cell(
ctr: int, show_input: bool = True, show_output: bool = True, lookback: int = 0
):
return (
cells()
.at_counter(ctr)
.reproduce(show_input=show_input, show_output=show_output, lookback=lookback)
) | null |
15,095 | import ast
import inspect
import json
import os.path
import re
import shlex
import sys
from typing import TYPE_CHECKING, Iterable, Optional, Sequence, Type, cast
import pyccolo as pyc
from IPython import get_ipython
from IPython.core.magic import register_line_magic
from ipyflow.analysis.symbol_ref import SymbolRef
from ipyflow.annotations.compiler import (
register_annotations_directory,
register_annotations_file,
)
from ipyflow.config import (
ExecutionMode,
ExecutionSchedule,
FlowDirection,
Highlights,
ReactivityMode,
)
from ipyflow.data_model.cell import cells
from ipyflow.data_model.symbol import Symbol
from ipyflow.experimental.dag import create_dag_metadata
from ipyflow.singletons import flow, shell
from ipyflow.slicing.mixin import SliceableMixin, format_slice
from ipyflow.tracing.symbol_resolver import resolve_rval_symbols
from ipyflow.utils.magic_parser import MagicParser
_FLOW_LINE_MAGIC = "flow"
_USAGE = """Options:
[enable|disable]
- Toggle dataflow capture. On by default.
[deps|show_deps|show_dependencies] <symbol>:
- This will print out the dependencies for given symbol.
[code|get_code] <symbol>:
- This will print the backward slice for the given symbol.
[waiting|show_waiting]:
- This will print out all the global variables that are waiting for newer dependencies.
slice <cell_num>:
- This will print the code necessary to reconstruct <cell_num> using a dynamic
program slicing algorithm.
tag <tag>:
- This will tag the executing cell with the given tag.
show_tags:
- This will display the current tags of the executing cell.
register_annotations <directory_or_file>:
- This will register the annotations in the given directory or file.
""".strip()
print_ = print
def warn(*args, **kwargs):
def toggle_dataflow(line: str) -> Optional[str]:
def show_deps(symbol_str: str) -> Optional[str]:
def get_code(symbol_str: str) -> Optional[str]:
def show_waiting(line_: str) -> Optional[str]:
def trace_messages(line_: str) -> None:
def set_highlights(cmd: str, rest: str) -> None:
def make_slice(line: str) -> Optional[str]:
def tag(line: str) -> None:
def show_tags(line: str) -> None:
def set_exec_mode(line_: str) -> None:
def set_exec_schedule(line_: str) -> None:
def set_flow_direction(line_: str) -> None:
def set_reactivity(line_: str) -> None:
def register_tracer(line_: str) -> None:
def deregister_tracer(line_: str) -> None:
def register_annotations(line_: str) -> None:
def create_dag_metadata() -> (
Dict[int, Dict[str, Union[List[int], List[str], Dict[str, Dict[str, str]]]]]
):
def make_line_magic(flow_: "NotebookFlow"):
line_magic_names = [
name for name, val in globals().items() if inspect.isfunction(val)
]
def _handle(cmd, line):
cmd = cmd.replace("-", "_")
if cmd in ("enable", "disable", "on", "off"):
return toggle_dataflow(cmd)
elif cmd in ("deps", "show_deps", "show_dependency", "show_dependencies"):
return show_deps(line)
elif cmd in ("code", "get_code"):
return get_code(line)
elif cmd in ("waiting", "show_waiting"):
return show_waiting(line)
elif cmd == "trace_messages":
return trace_messages(line)
elif cmd in ("hls", "nohls", "highlight", "highlights"):
return set_highlights(cmd, line)
elif cmd in ("dag", "make_dag", "cell_dag", "make_cell_dag"):
return json.dumps(create_dag_metadata(), indent=2)
elif cmd in ("slice", "make_slice", "gather_slice"):
return make_slice(line)
elif cmd == "tag":
return tag(line)
elif cmd == "show_tags":
return show_tags(line)
elif cmd in ("mode", "exec_mode"):
return set_exec_mode(line)
elif cmd in ("schedule", "exec_schedule", "execution_schedule"):
return set_exec_schedule(line)
elif cmd in (
"direction",
"flow_direction",
"order",
"flow_order",
"semantics",
"flow_semantics",
):
return set_flow_direction(line)
elif cmd == "reactivity":
return set_reactivity(line)
elif cmd in ("register", "register_tracer"):
return register_tracer(line)
elif cmd in ("deregister", "deregister_tracer"):
return deregister_tracer(line)
elif cmd == "clear":
flow_.min_timestamp = flow_.cell_counter()
return None
elif cmd.endswith("warn_ooo"):
flow_.mut_settings.warn_out_of_order_usages = not cmd.startswith("no")
return None
elif cmd.endswith("lint_ooo"):
flow_.mut_settings.lint_out_of_order_usages = not cmd.startswith("no")
return None
elif cmd == "syntax_transforms":
is_on = line.endswith(("enabled", "on"))
is_off = line.endswith(("disabled", "off"))
if is_on or is_off:
flow_.mut_settings.syntax_transforms_enabled = is_on
return None
elif cmd == "syntax_transforms_only":
flow_.mut_settings.syntax_transforms_only = True
return None
elif cmd.startswith("register_annotation"):
return register_annotations(line)
elif cmd == "toggle_reactivity":
flow_.toggle_reactivity()
return None
elif cmd == "bump_min_forced_reactive_counter":
flow_.bump_min_forced_reactive_counter()
return None
elif cmd in line_magic_names:
warn(
f"We have a magic for {cmd}, but have not yet registered it",
)
return None
else:
warn(_USAGE)
return None
def _flow_magic(line: str):
# this is to avoid capturing `self` and creating an extra reference to the singleton
try:
cmd, line = line.split(" ", 1)
if cmd in ("slice", "make_slice", "gather_slice"):
# FIXME: hack to workaround some input transformer
line = re.sub(r"--tag +<class '(\w+)'>", r"--tag $\1", line)
except ValueError:
cmd, line = line, ""
try:
line, fname = line.split(">", 1)
except ValueError:
line, fname = line, None
line = line.strip()
if fname is not None:
fname = fname.strip()
outstr = _handle(cmd, line)
if outstr is None:
return
if fname is None:
print_(outstr)
else:
with open(fname, "w") as f:
f.write(outstr)
# FIXME (smacke): probably not a great idea to rely on this
_flow_magic.__name__ = _FLOW_LINE_MAGIC
return register_line_magic(_flow_magic) | null |
15,096 | import errno
import os
import re
import subprocess
import sys
from typing import Callable, Dict
import functools
HANDLERS: Dict[str, Dict[str, Callable]] = {}
The provided code snippet includes necessary dependencies for implementing the `register_vcs_handler` function. Write a Python function `def register_vcs_handler(vcs, method)` to solve the following problem:
Create decorator to mark a method as the handler of a VCS.
Here is the function:
def register_vcs_handler(vcs, method): # decorator
"""Create decorator to mark a method as the handler of a VCS."""
def decorate(f):
"""Store f in HANDLERS[vcs][method]."""
if vcs not in HANDLERS:
HANDLERS[vcs] = {}
HANDLERS[vcs][method] = f
return f
return decorate | Create decorator to mark a method as the handler of a VCS. |
15,097 | import errno
import os
import re
import subprocess
import sys
from typing import Callable, Dict
import functools
The provided code snippet includes necessary dependencies for implementing the `git_get_keywords` function. Write a Python function `def git_get_keywords(versionfile_abs)` to solve the following problem:
Extract version information from the given file.
Here is the function:
def git_get_keywords(versionfile_abs):
"""Extract version information from the given file."""
# the code embedded in _version.py can just fetch the value of these
# keywords. When used from setup.py, we don't want to import _version.py,
# so we do it with a regexp instead. This function is not used from
# _version.py.
keywords = {}
try:
with open(versionfile_abs, "r") as fobj:
for line in fobj:
if line.strip().startswith("git_refnames ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["refnames"] = mo.group(1)
if line.strip().startswith("git_full ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["full"] = mo.group(1)
if line.strip().startswith("git_date ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["date"] = mo.group(1)
except OSError:
pass
return keywords | Extract version information from the given file. |
15,098 | import errno
import os
import re
import subprocess
import sys
from typing import Callable, Dict
import functools
def get_keywords():
"""Get the keywords needed to look up the version information."""
# these strings will be replaced by git during git-archive.
# setup.py/versioneer.py will grep for the variable names, so they must
# each be defined on a line of their own. _version.py will just call
# get_keywords().
git_refnames = "$Format:%d$"
git_full = "$Format:%H$"
git_date = "$Format:%ci$"
keywords = {"refnames": git_refnames, "full": git_full, "date": git_date}
return keywords
def get_config():
"""Create, populate and return the VersioneerConfig() object."""
# these strings are filled in when 'setup.py versioneer' creates
# _version.py
cfg = VersioneerConfig()
cfg.VCS = "git"
cfg.style = "pep440"
cfg.tag_prefix = ""
cfg.parentdir_prefix = "ipyflow_core-"
cfg.versionfile_source = "ipyflow/_version.py"
cfg.verbose = False
return cfg
class NotThisMethod(Exception):
"""Exception raised if a method is not valid for the current scenario."""
def versions_from_parentdir(parentdir_prefix, root, verbose):
"""Try to determine the version from the parent directory name.
Source tarballs conventionally unpack into a directory that includes both
the project name and a version string. We will also support searching up
two directory levels for an appropriately named parent directory
"""
rootdirs = []
for _ in range(3):
for prefix in [parentdir_prefix, ""]:
prefix = prefix.replace("-", "_")
for dirname in [os.path.basename(root)] + os.listdir(root):
dirname = dirname.replace("-", "_")
if not dirname.startswith(prefix):
continue
components = dirname[len(prefix):].split(".")
components = [
comp for comp in components
if all(c.isdigit() for c in comp)
]
if len(components) <= 1:
continue
return {"version": ".".join(components),
"full-revisionid": None,
"dirty": False, "error": None, "date": None}
rootdirs.append(root)
root = os.path.dirname(root) # up a level
if verbose:
print("Tried directories %s but none started with prefix %s" %
(str(rootdirs), parentdir_prefix))
raise NotThisMethod("rootdir doesn't start with parentdir_prefix")
def git_versions_from_keywords(keywords, tag_prefix, verbose):
"""Get version information from git keywords."""
if "refnames" not in keywords:
raise NotThisMethod("Short version file found")
date = keywords.get("date")
if date is not None:
# Use only the last line. Previous lines may contain GPG signature
# information.
date = date.splitlines()[-1]
# git-2.2.0 added "%cI", which expands to an ISO-8601 -compliant
# datestamp. However we prefer "%ci" (which expands to an "ISO-8601
# -like" string, which we must then edit to make compliant), because
# it's been around since git-1.5.3, and it's too difficult to
# discover which version we're using, or to work around using an
# older one.
date = date.strip().replace(" ", "T", 1).replace(" ", "", 1)
refnames = keywords["refnames"].strip()
if refnames.startswith("$Format"):
if verbose:
print("keywords are unexpanded, not using")
raise NotThisMethod("unexpanded keywords, not a git-archive tarball")
refs = {r.strip() for r in refnames.strip("()").split(",")}
# starting in git-1.8.3, tags are listed as "tag: foo-1.0" instead of
# just "foo-1.0". If we see a "tag: " prefix, prefer those.
TAG = "tag: "
tags = {r[len(TAG):] for r in refs if r.startswith(TAG)}
if not tags:
# Either we're using git < 1.8.3, or there really are no tags. We use
# a heuristic: assume all version tags have a digit. The old git %d
# expansion behaves like git log --decorate=short and strips out the
# refs/heads/ and refs/tags/ prefixes that would let us distinguish
# between branches and tags. By ignoring refnames without digits, we
# filter out many common branch names like "release" and
# "stabilization", as well as "HEAD" and "master".
tags = {r for r in refs if re.search(r'\d', r)}
if verbose:
print("discarding '%s', no digits" % ",".join(refs - tags))
if verbose:
print("likely tags: %s" % ",".join(sorted(tags)))
for ref in sorted(tags):
# sorting will prefer e.g. "2.0" over "2.0rc1"
if ref.startswith(tag_prefix):
r = ref[len(tag_prefix):]
# Filter out refs that exactly match prefix or that don't start
# with a number once the prefix is stripped (mostly a concern
# when prefix is '')
if not re.match(r'\d', r):
continue
if verbose:
print("picking %s" % r)
return {"version": r,
"full-revisionid": keywords["full"].strip(),
"dirty": False, "error": None,
"date": date}
# no suitable tags, so version is "0+unknown", but full hex is still there
if verbose:
print("no suitable tags, using unknown + full revision id")
return {"version": "0+unknown",
"full-revisionid": keywords["full"].strip(),
"dirty": False, "error": "no suitable tags", "date": None}
def git_pieces_from_vcs(tag_prefix, root, verbose, runner=run_command):
"""Get version from 'git describe' in the root of the source tree.
This only gets called if the git-archive 'subst' keywords were *not*
expanded, and _version.py hasn't already been rewritten with a short
version string, meaning we're inside a checked out source tree.
"""
GITS = ["git"]
if sys.platform == "win32":
GITS = ["git.cmd", "git.exe"]
# GIT_DIR can interfere with correct operation of Versioneer.
# It may be intended to be passed to the Versioneer-versioned project,
# but that should not change where we get our version from.
env = os.environ.copy()
env.pop("GIT_DIR", None)
runner = functools.partial(runner, env=env)
_, rc = runner(GITS, ["rev-parse", "--git-dir"], cwd=root,
hide_stderr=True)
if rc != 0:
if verbose:
print("Directory %s not under git control" % root)
raise NotThisMethod("'git rev-parse --git-dir' returned error")
MATCH_ARGS = ["--match", "%s*" % tag_prefix] if tag_prefix else []
# if there is a tag matching tag_prefix, this yields TAG-NUM-gHEX[-dirty]
# if there isn't one, this yields HEX[-dirty] (no NUM)
describe_out, rc = runner(GITS, ["describe", "--tags", "--dirty",
"--always", "--long", *MATCH_ARGS],
cwd=root)
# --long was added in git-1.5.5
if describe_out is None:
raise NotThisMethod("'git describe' failed")
describe_out = describe_out.strip()
full_out, rc = runner(GITS, ["rev-parse", "HEAD"], cwd=root)
if full_out is None:
raise NotThisMethod("'git rev-parse' failed")
full_out = full_out.strip()
pieces = {}
pieces["long"] = full_out
pieces["short"] = full_out[:7] # maybe improved later
pieces["error"] = None
branch_name, rc = runner(GITS, ["rev-parse", "--abbrev-ref", "HEAD"],
cwd=root)
# --abbrev-ref was added in git-1.6.3
if rc != 0 or branch_name is None:
raise NotThisMethod("'git rev-parse --abbrev-ref' returned error")
branch_name = branch_name.strip()
if branch_name == "HEAD":
# If we aren't exactly on a branch, pick a branch which represents
# the current commit. If all else fails, we are on a branchless
# commit.
branches, rc = runner(GITS, ["branch", "--contains"], cwd=root)
# --contains was added in git-1.5.4
if rc != 0 or branches is None:
raise NotThisMethod("'git branch --contains' returned error")
branches = branches.split("\n")
# Remove the first line if we're running detached
if "(" in branches[0]:
branches.pop(0)
# Strip off the leading "* " from the list of branches.
branches = [branch[2:] for branch in branches]
if "master" in branches:
branch_name = "master"
elif not branches:
branch_name = None
else:
# Pick the first branch that is returned. Good or bad.
branch_name = branches[0]
pieces["branch"] = branch_name
# parse describe_out. It will be like TAG-NUM-gHEX[-dirty] or HEX[-dirty]
# TAG might have hyphens.
git_describe = describe_out
# look for -dirty suffix
dirty = git_describe.endswith("-dirty")
pieces["dirty"] = dirty
if dirty:
git_describe = git_describe[:git_describe.rindex("-dirty")]
# now we have TAG-NUM-gHEX or HEX
if "-" in git_describe:
# TAG-NUM-gHEX
mo = re.search(r'^(.+)-(\d+)-g([0-9a-f]+)$', git_describe)
if not mo:
# unparsable. Maybe git-describe is misbehaving?
pieces["error"] = ("unable to parse git-describe output: '%s'"
% describe_out)
return pieces
# tag
full_tag = mo.group(1)
if not full_tag.startswith(tag_prefix):
if verbose:
fmt = "tag '%s' doesn't start with prefix '%s'"
print(fmt % (full_tag, tag_prefix))
pieces["error"] = ("tag '%s' doesn't start with prefix '%s'"
% (full_tag, tag_prefix))
return pieces
pieces["closest-tag"] = full_tag[len(tag_prefix):]
# distance: number of commits since tag
pieces["distance"] = int(mo.group(2))
# commit: short hex revision ID
pieces["short"] = mo.group(3)
else:
# HEX: no tags
pieces["closest-tag"] = None
count_out, rc = runner(GITS, ["rev-list", "HEAD", "--count"], cwd=root)
pieces["distance"] = int(count_out) # total number of commits
# commit date: see ISO-8601 comment in git_versions_from_keywords()
date = runner(GITS, ["show", "-s", "--format=%ci", "HEAD"], cwd=root)[0].strip()
# Use only the last line. Previous lines may contain GPG signature
# information.
date = date.splitlines()[-1]
pieces["date"] = date.strip().replace(" ", "T", 1).replace(" ", "", 1)
return pieces
def render(pieces, style):
"""Render the given version pieces into the requested style."""
if pieces["error"]:
return {"version": "unknown",
"full-revisionid": pieces.get("long"),
"dirty": None,
"error": pieces["error"],
"date": None}
if not style or style == "default":
style = "pep440" # the default
if style == "pep440":
rendered = render_pep440(pieces)
elif style == "pep440-branch":
rendered = render_pep440_branch(pieces)
elif style == "pep440-pre":
rendered = render_pep440_pre(pieces)
elif style == "pep440-post":
rendered = render_pep440_post(pieces)
elif style == "pep440-post-branch":
rendered = render_pep440_post_branch(pieces)
elif style == "pep440-old":
rendered = render_pep440_old(pieces)
elif style == "git-describe":
rendered = render_git_describe(pieces)
elif style == "git-describe-long":
rendered = render_git_describe_long(pieces)
else:
raise ValueError("unknown style '%s'" % style)
return {"version": rendered, "full-revisionid": pieces["long"],
"dirty": pieces["dirty"], "error": None,
"date": pieces.get("date")}
The provided code snippet includes necessary dependencies for implementing the `get_versions` function. Write a Python function `def get_versions()` to solve the following problem:
Get version information or return default if unable to do so.
Here is the function:
def get_versions():
"""Get version information or return default if unable to do so."""
# I am in _version.py, which lives at ROOT/VERSIONFILE_SOURCE. If we have
# __file__, we can work backwards from there to the root. Some
# py2exe/bbfreeze/non-CPython implementations don't do __file__, in which
# case we can only use expanded keywords.
cfg = get_config()
verbose = cfg.verbose
try:
return git_versions_from_keywords(get_keywords(), cfg.tag_prefix,
verbose)
except NotThisMethod:
pass
try:
root = os.path.realpath(__file__)
# versionfile_source is the relative path from the top of the source
# tree (where the .git directory might live) to this file. Invert
# this to find the root from __file__.
for _ in cfg.versionfile_source.split('/'):
root = os.path.dirname(root)
except NameError:
return {"version": "0+unknown", "full-revisionid": None,
"dirty": None,
"error": "unable to find root of source tree",
"date": None}
try:
pieces = git_pieces_from_vcs(cfg.tag_prefix, root, verbose)
return render(pieces, cfg.style)
except NotThisMethod:
pass
try:
if cfg.parentdir_prefix:
return versions_from_parentdir(cfg.parentdir_prefix, root, verbose)
except NotThisMethod:
pass
return {"version": "0+unknown", "full-revisionid": None,
"dirty": None,
"error": "unable to compute version", "date": None} | Get version information or return default if unable to do so. |
15,099 | from types import FunctionType, LambdaType, ModuleType
from typing import TYPE_CHECKING, Type, Union
from ipyflow.tracing.uninstrument import uninstrument
def uninstrument(
obj: Union[FunctionType, LambdaType], seen: Optional[Set[int]] = None
) -> Optional[Union[FunctionType, LambdaType]]:
if seen is None:
seen = set()
if id(obj) in seen:
return None
seen.add(id(obj))
try:
new_obj = _get_uninstrumented_decorator(obj, seen)
except: # noqa
new_obj = None
if new_obj is not None:
return new_obj
for alias in flow().aliases.get(id(obj), []):
if not alias.is_function and not alias.is_lambda:
continue
func_text = astunparse.unparse(alias.func_def_stmt)
new_obj = _make_uninstrumented_function(
obj, func_text, alias.func_def_stmt, seen
)
if new_obj is not None:
return new_obj
return None
def patch_cloudpickle_function_reduce(module: ModuleType) -> None:
pickler_cls: Type["CloudPickler"] = module.CloudPickler
_function_reduce = pickler_cls._function_reduce
def _patched_function_reduce(
self_: "CloudPickler", obj: Union[FunctionType, LambdaType]
) -> None:
uninstrumented = uninstrument(obj)
return _function_reduce(
self_, obj if uninstrumented is None else uninstrumented
)
pickler_cls._function_reduce = _patched_function_reduce | null |
15,100 | from types import ModuleType
from typing import TYPE_CHECKING, Type
from ipyflow.tracing.uninstrument import uninstrument
def uninstrument(
obj: Union[FunctionType, LambdaType], seen: Optional[Set[int]] = None
) -> Optional[Union[FunctionType, LambdaType]]:
if seen is None:
seen = set()
if id(obj) in seen:
return None
seen.add(id(obj))
try:
new_obj = _get_uninstrumented_decorator(obj, seen)
except: # noqa
new_obj = None
if new_obj is not None:
return new_obj
for alias in flow().aliases.get(id(obj), []):
if not alias.is_function and not alias.is_lambda:
continue
func_text = astunparse.unparse(alias.func_def_stmt)
new_obj = _make_uninstrumented_function(
obj, func_text, alias.func_def_stmt, seen
)
if new_obj is not None:
return new_obj
return None
def patch_pyspark_udf(module: ModuleType) -> None:
udf_cls: Type["UserDefinedFunction"] = module.UserDefinedFunction
udf_cls_init = udf_cls.__init__
def _patched_init(self_: "UserDefinedFunction", func, *args, **kwargs) -> None:
uninstrumented = uninstrument(func)
return udf_cls_init(
self_, func if uninstrumented is None else uninstrumented, *args, **kwargs
)
udf_cls.__init__ = _patched_init | null |
15,101 | from typing import TYPE_CHECKING, List, Optional, Type, Union, overload
from ipyflow.singletons import flow
if TYPE_CHECKING:
def cells(cell_id: None = None) -> Type["Cell"]:
...
def cells(cell_id: "IdType") -> "Cell":
...
def cells(cell_id: Optional["IdType"] = None) -> Union[Type["Cell"], "Cell"]:
clazz = _CodeCellContainer[0]
if cell_id is None:
return clazz
elif isinstance(cell_id, int) and cell_id <= clazz.exec_counter():
return clazz.at_counter(cell_id)
else:
return clazz.from_id(cell_id)
def flow() -> "NotebookFlowInstance":
assert NotebookFlow.initialized()
return NotebookFlow.instance() # type: ignore
def cell_above() -> "Cell":
active_cell_id = flow().active_cell_id
assert active_cell_id is not None
return cells().at_position(cells().from_id(active_cell_id).position - 1) | null |
15,102 | from typing import TYPE_CHECKING, List, Optional, Type, Union, overload
from ipyflow.singletons import flow
if TYPE_CHECKING:
def cells(cell_id: None = None) -> Type["Cell"]:
...
def cells(cell_id: "IdType") -> "Cell":
...
def cells(cell_id: Optional["IdType"] = None) -> Union[Type["Cell"], "Cell"]:
clazz = _CodeCellContainer[0]
if cell_id is None:
return clazz
elif isinstance(cell_id, int) and cell_id <= clazz.exec_counter():
return clazz.at_counter(cell_id)
else:
return clazz.from_id(cell_id)
def flow() -> "NotebookFlowInstance":
assert NotebookFlow.initialized()
return NotebookFlow.instance() # type: ignore
def cell_below() -> "Cell":
active_cell_id = flow().active_cell_id
assert active_cell_id is not None
return cells().at_position(cells().from_id(active_cell_id).position + 1) | null |
15,103 | from typing import TYPE_CHECKING, List, Optional, Type, Union, overload
from ipyflow.singletons import flow
if TYPE_CHECKING:
def cells(cell_id: None = None) -> Type["Cell"]:
...
def cells(cell_id: "IdType") -> "Cell":
...
def cells(cell_id: Optional["IdType"] = None) -> Union[Type["Cell"], "Cell"]:
clazz = _CodeCellContainer[0]
if cell_id is None:
return clazz
elif isinstance(cell_id, int) and cell_id <= clazz.exec_counter():
return clazz.at_counter(cell_id)
else:
return clazz.from_id(cell_id)
def flow() -> "NotebookFlowInstance":
assert NotebookFlow.initialized()
return NotebookFlow.instance() # type: ignore
def cell_at_offset(offset: int) -> "Cell":
active_cell_id = flow().active_cell_id
assert active_cell_id is not None
return cells().at_position(cells().from_id(active_cell_id).position + offset) | null |
15,104 | from typing import TYPE_CHECKING, List, Optional, Type, Union, overload
from ipyflow.singletons import flow
if TYPE_CHECKING:
def cells(cell_id: None = None) -> Type["Cell"]:
...
def cells(cell_id: "IdType") -> "Cell":
...
def cells(cell_id: Optional["IdType"] = None) -> Union[Type["Cell"], "Cell"]:
clazz = _CodeCellContainer[0]
if cell_id is None:
return clazz
elif isinstance(cell_id, int) and cell_id <= clazz.exec_counter():
return clazz.at_counter(cell_id)
else:
return clazz.from_id(cell_id)
def last_run_cell() -> Optional["Cell"]:
return cells().at_counter(cells().exec_counter() - 1) | null |
15,105 | from typing import TYPE_CHECKING, List, Optional, Type, Union, overload
from ipyflow.singletons import flow
_NamespaceContainer: List[Type["Namespace"]] = []
def namespaces() -> Type["Namespace"]:
return _NamespaceContainer[0] | null |
15,106 | from typing import TYPE_CHECKING, List, Optional, Type, Union, overload
from ipyflow.singletons import flow
_ScopeContainer: List[Type["Scope"]] = []
def scopes() -> Type["Scope"]:
return _ScopeContainer[0] | null |
15,107 | from typing import TYPE_CHECKING, List, Optional, Type, Union, overload
from ipyflow.singletons import flow
def symbols(sym: None = None) -> Type["Symbol"]:
... | null |
15,108 | from typing import TYPE_CHECKING, List, Optional, Type, Union, overload
from ipyflow.singletons import flow
def symbols(sym: "Symbol") -> "Symbol":
... | null |
15,109 | from typing import TYPE_CHECKING, List, Optional, Type, Union, overload
from ipyflow.singletons import flow
_SymbolContainer: List[Type["Symbol"]] = []
def symbols(sym: Optional["Symbol"] = None) -> Union[Type["Symbol"], "Symbol"]:
if sym is None:
return _SymbolContainer[0]
else:
return sym | null |
15,110 | from typing import TYPE_CHECKING, List, Optional, Type, Union, overload
from ipyflow.singletons import flow
_StatementContainer: List[Type["Statement"]] = []
def statements() -> Type["Statement"]:
return _StatementContainer[0] | null |
15,111 | from typing import TYPE_CHECKING, List, Optional, Type, Union, overload
from ipyflow.singletons import flow
_TimestampContainer: List[Type["Timestamp"]] = []
def timestamps() -> Type["Timestamp"]:
return _TimestampContainer[0] | null |
15,112 | import os
from enum import Enum
from typing import Any, Dict, List, Optional, Set
from ipyflow.data_model.symbol import Symbol
from ipyflow.singletons import flow
from ipyflow.tracing.external_calls.base_handlers import ExternalCallHandler, HasGetitem
The provided code snippet includes necessary dependencies for implementing the `handler_for` function. Write a Python function `def handler_for(*_methods)` to solve the following problem:
Just a marker decorator to indicate that the handler is used for functions / methods named differently from the decorated function / method
Here is the function:
def handler_for(*_methods):
"""
Just a marker decorator to indicate that the handler is used for functions / methods
named differently from the decorated function / method
"""
pass | Just a marker decorator to indicate that the handler is used for functions / methods named differently from the decorated function / method |
15,113 | import sys
from IPython.core.interactiveshell import InteractiveShell
from IPython.terminal.embed import InteractiveShellEmbed
from IPython.terminal.ipapp import load_default_config
from ipyflow import singletons
from ipyflow.shell.interactiveshell import UsesIPyflowShell
class IPyflowInteractiveShellEmbed(
singletons.IPyflowShell, InteractiveShellEmbed, metaclass=UsesIPyflowShell # type: ignore
):
pass
The provided code snippet includes necessary dependencies for implementing the `embed` function. Write a Python function `def embed(*, header="", compile_flags=None, **kwargs)` to solve the following problem:
Call this to embed IPyflow at the current point in your program. The first invocation of this will create a :class:`terminal.embed.InteractiveShellEmbed` instance and then call it. Consecutive calls just call the already created instance. If you don't want the kernel to initialize the namespace from the scope of the surrounding function, and/or you want to load full IPython configuration, you probably want `IPython.start_ipython()` instead. Here is a simple example:: from ipyflow.shell import embed a = 10 b = 20 embed(header='First time') c = 30 d = 40 embed() Parameters ---------- header : str Optional header string to print at startup. compile_flags Passed to the `compile_flags` parameter of :py:meth:`terminal.embed.InteractiveShellEmbed.mainloop()`, which is called when the :class:`terminal.embed.InteractiveShellEmbed` instance is called. **kwargs : various, optional Any other kwargs will be passed to the :class:`terminal.embed.InteractiveShellEmbed` constructor. Full customization can be done by passing a traitlets :class:`Config` in as the `config` argument (see :ref:`configure_start_ipython` and :ref:`terminal_options`).
Here is the function:
def embed(*, header="", compile_flags=None, **kwargs):
"""Call this to embed IPyflow at the current point in your program.
The first invocation of this will create a :class:`terminal.embed.InteractiveShellEmbed`
instance and then call it. Consecutive calls just call the already
created instance.
If you don't want the kernel to initialize the namespace
from the scope of the surrounding function,
and/or you want to load full IPython configuration,
you probably want `IPython.start_ipython()` instead.
Here is a simple example::
from ipyflow.shell import embed
a = 10
b = 20
embed(header='First time')
c = 30
d = 40
embed()
Parameters
----------
header : str
Optional header string to print at startup.
compile_flags
Passed to the `compile_flags` parameter of :py:meth:`terminal.embed.InteractiveShellEmbed.mainloop()`,
which is called when the :class:`terminal.embed.InteractiveShellEmbed` instance is called.
**kwargs : various, optional
Any other kwargs will be passed to the :class:`terminal.embed.InteractiveShellEmbed` constructor.
Full customization can be done by passing a traitlets :class:`Config` in as the
`config` argument (see :ref:`configure_start_ipython` and :ref:`terminal_options`).
"""
config = kwargs.get("config")
if config is None:
config = load_default_config()
config.InteractiveShellEmbed = config.TerminalInteractiveShell
kwargs["config"] = config
using = kwargs.get("using", "sync")
if using:
kwargs["config"].update(
{
"TerminalInteractiveShell": {
"loop_runner": using,
"colors": "NoColor",
"autoawait": using != "sync",
}
}
)
# save ps1/ps2 if defined
ps1 = None
ps2 = None
try:
ps1 = sys.ps1
ps2 = sys.ps2
except AttributeError:
pass
# save previous instance
saved_shell_instance = InteractiveShell._instance
if saved_shell_instance is not None:
cls = type(saved_shell_instance)
cls.clear_instance()
frame = sys._getframe(1)
shell = IPyflowInteractiveShellEmbed.instance(
_init_location_id="%s:%s" % (frame.f_code.co_filename, frame.f_lineno), **kwargs
)
shell(
header=header,
stack_depth=2,
compile_flags=compile_flags,
_call_location_id="%s:%s" % (frame.f_code.co_filename, frame.f_lineno),
)
IPyflowInteractiveShellEmbed.clear_instance()
# restore previous instance
if saved_shell_instance is not None:
cls = type(saved_shell_instance)
cls.clear_instance()
for subclass in cls._walk_mro():
subclass._instance = saved_shell_instance
if ps1 is not None:
sys.ps1 = ps1
sys.ps2 = ps2 | Call this to embed IPyflow at the current point in your program. The first invocation of this will create a :class:`terminal.embed.InteractiveShellEmbed` instance and then call it. Consecutive calls just call the already created instance. If you don't want the kernel to initialize the namespace from the scope of the surrounding function, and/or you want to load full IPython configuration, you probably want `IPython.start_ipython()` instead. Here is a simple example:: from ipyflow.shell import embed a = 10 b = 20 embed(header='First time') c = 30 d = 40 embed() Parameters ---------- header : str Optional header string to print at startup. compile_flags Passed to the `compile_flags` parameter of :py:meth:`terminal.embed.InteractiveShellEmbed.mainloop()`, which is called when the :class:`terminal.embed.InteractiveShellEmbed` instance is called. **kwargs : various, optional Any other kwargs will be passed to the :class:`terminal.embed.InteractiveShellEmbed` constructor. Full customization can be done by passing a traitlets :class:`Config` in as the `config` argument (see :ref:`configure_start_ipython` and :ref:`terminal_options`). |
15,114 | import ast
import logging
from typing import TYPE_CHECKING, Any, Dict, List, Tuple, Union
def match_container_obj_or_namespace_with_literal_nodes(
container_obj_or_namespace: Union[
"Namespace", Dict[Any, Any], List[Any], Tuple[Any, ...]
],
literal_node: Union[ast.Dict, ast.List, ast.Tuple],
):
try:
gen = container_obj_or_namespace.items() # type: ignore
assert isinstance(literal_node, ast.Dict), "got %s" % ast.dump(literal_node)
yield from zip(gen, zip(literal_node.keys, literal_node.values))
except (AttributeError, TypeError):
assert isinstance(literal_node, (ast.List, ast.Tuple))
elts = literal_node.elts
cur_node = None
cur_elt_idx = -1
for i, obj_or_sym in enumerate(container_obj_or_namespace):
if (
not isinstance(cur_node, ast.Starred)
or len(elts) - cur_elt_idx - 1 >= len(container_obj_or_namespace) - i
):
cur_elt_idx += 1
cur_node = elts[cur_elt_idx]
yield (i, obj_or_sym), (None, cur_node) | null |
15,115 | from contextlib import contextmanager
from contextvars import ContextVar
from enum import Enum
from typing import Generator, Optional
from ipyflow.utils.misc_utils import yield_in_loop
class SlicingContext(Enum):
DYNAMIC = "dynamic"
STATIC = "static"
def iter_slicing_contexts(cls) -> Generator[None, None, None]:
for _ in iter_slicing_contexts(*cls):
yield
def set_slicing_context(dep_ctx: SlicingContext) -> Generator[None, None, None]:
token = slicing_ctx_var.set(dep_ctx)
try:
yield
finally:
slicing_ctx_var.reset(token)
def dynamic_slicing_context() -> Generator[None, None, None]:
with set_slicing_context(SlicingContext.DYNAMIC):
yield | null |
15,116 | from contextlib import contextmanager
from contextvars import ContextVar
from enum import Enum
from typing import Generator, Optional
from ipyflow.utils.misc_utils import yield_in_loop
class SlicingContext(Enum):
DYNAMIC = "dynamic"
STATIC = "static"
def iter_slicing_contexts(cls) -> Generator[None, None, None]:
for _ in iter_slicing_contexts(*cls):
yield
def set_slicing_context(dep_ctx: SlicingContext) -> Generator[None, None, None]:
token = slicing_ctx_var.set(dep_ctx)
try:
yield
finally:
slicing_ctx_var.reset(token)
def static_slicing_context() -> Generator[None, None, None]:
with set_slicing_context(SlicingContext.STATIC):
yield | null |
15,117 | from contextlib import contextmanager
from contextvars import ContextVar
from enum import Enum
from typing import Generator, Optional
from ipyflow.utils.misc_utils import yield_in_loop
class SlicingContext(Enum):
DYNAMIC = "dynamic"
STATIC = "static"
def iter_slicing_contexts(cls) -> Generator[None, None, None]:
for _ in iter_slicing_contexts(*cls):
yield
def set_slicing_context(dep_ctx: SlicingContext) -> Generator[None, None, None]:
token = slicing_ctx_var.set(dep_ctx)
try:
yield
finally:
slicing_ctx_var.reset(token)
def slicing_context(is_static: bool) -> Generator[None, None, None]:
with set_slicing_context(
SlicingContext.STATIC if is_static else SlicingContext.DYNAMIC
):
yield | null |
15,118 | from contextlib import contextmanager
from contextvars import ContextVar
from enum import Enum
from typing import Generator, Optional
from ipyflow.utils.misc_utils import yield_in_loop
class SlicingContext(Enum):
DYNAMIC = "dynamic"
STATIC = "static"
def iter_slicing_contexts(cls) -> Generator[None, None, None]:
for _ in iter_slicing_contexts(*cls):
yield
def set_slicing_context(dep_ctx: SlicingContext) -> Generator[None, None, None]:
token = slicing_ctx_var.set(dep_ctx)
try:
yield
finally:
slicing_ctx_var.reset(token)
def yield_in_loop(*gens):
for gen in gens:
with gen:
yield
def iter_slicing_contexts(*dep_contexts: SlicingContext) -> Generator[None, None, None]:
for _ in yield_in_loop(*[set_slicing_context(dep_ctx) for dep_ctx in dep_contexts]):
yield | null |
15,119 | import ast
import logging
import sys
from enum import Enum
from types import FrameType, FunctionType
from typing import (
TYPE_CHECKING,
Any,
Callable,
Dict,
Generator,
Iterable,
List,
Optional,
Set,
Tuple,
Type,
cast,
)
from ipyflow.config import ExecutionSchedule, FlowDirection
from ipyflow.data_model.cell import Cell, cells
from ipyflow.data_model.timestamp import Timestamp
from ipyflow.data_model.utils.annotation_utils import (
get_type_annotation,
make_annotation_string,
)
from ipyflow.data_model.utils.update_protocol import UpdateProtocol
from ipyflow.models import _SymbolContainer, namespaces, statements, symbols
from ipyflow.singletons import flow, shell, tracer
from ipyflow.slicing.context import dynamic_slicing_context, slicing_context
from ipyflow.slicing.mixin import FormatType, Slice
from ipyflow.tracing.watchpoint import Watchpoints
from ipyflow.types import IMMUTABLE_PRIMITIVE_TYPES, IdType, SupportedIndexType
from ipyflow.utils.misc_utils import cleanup_discard, debounce
class ExecutionSchedule(EnumWithDefault):
def flow() -> "NotebookFlowInstance": # type: ignore
IdType = Union[str, int]
def _debounced_exec_schedule(executed_cell_id: IdType, reactive: bool) -> None:
flow_ = flow()
settings = flow_.mut_settings
exec_schedule = settings.exec_schedule
try:
if exec_schedule == ExecutionSchedule.DAG_BASED:
settings.exec_schedule = ExecutionSchedule.HYBRID_DAG_LIVENESS_BASED
flow_.get_and_set_exception_raised_during_execution(None)
flow_.handle(
{
"type": "compute_exec_schedule",
"executed_cell_id": executed_cell_id,
"is_reactively_executing": reactive,
"allow_new_ready": reactive,
}
)
finally:
settings.exec_schedule = exec_schedule
flow_.debounced_exec_schedule_pending = False | null |
15,120 | import typing
from typing import Any, Iterable, List
def make_annotation_string(ann) -> str:
if ann is type(None):
ret = "None"
elif hasattr(ann, "__name__"):
ret = ann.__name__
elif hasattr(ann, "_name"):
ret = ann._name
if ret is None:
args = ann.__args__
if args[-1] is type(None) and len(args) == 2:
ret = "Optional"
else:
ret = "Union"
elif ann is ...:
ret = "..."
else:
ret = str(ann)
if ret.startswith("typing.") and "[" in ret:
ret = ret.split(".")[1].split("[")[0]
module = getattr(ann, "__module__", None)
if module is not None and module not in ("typing", "builtins", "__main__"):
ret = f"{module}.{ret}"
ann_args = getattr(ann, "__args__", None)
if ann_args is not None:
ann_args = [arg for arg in ann_args if not isinstance(arg, typing.TypeVar)]
if (
ret in ("Optional", "Union")
and len(ann_args) > 0
and ann_args[-1] is type(None)
):
if len(ann_args) == 2:
ann_args = ann_args[:-1]
if len(ann_args) == 1:
ret = "Optional"
if len(ann_args) > 0:
args_anns = []
for arg in ann_args:
args_anns.append(make_annotation_string(arg))
should_sort = ret == "Union"
ret = f'{ret}[{", ".join(sorted(args_anns) if should_sort else args_anns)}]'
return ret | null |
15,121 | from typing import TYPE_CHECKING
import pyccolo as pyc
from IPython.core.interactiveshell import InteractiveShellABC
from traitlets.config.configurable import SingletonConfigurable
class SingletonBaseTracer(pyc.BaseTracer):
pass
def tracer_initialized() -> bool:
return SingletonBaseTracer.initialized() | null |
15,122 | import ast
import logging
import sys
class ContainsNamedExprVisitor(ast.NodeVisitor):
def __init__(self):
self.contains_named_expr = False
def __call__(self, node: ast.stmt) -> bool:
if sys.version_info.minor < 8:
return False
self.visit(node)
return self.contains_named_expr
def visit_NamedExpr(self, node):
self.contains_named_expr = True
def generic_visit(self, node: ast.AST):
if self.contains_named_expr:
return
super().generic_visit(node)
def stmt_contains_lval(node: ast.stmt):
# TODO: expand to method calls, etc.
simple_contains_lval = isinstance(
node,
(
ast.Assign,
ast.AnnAssign,
ast.AugAssign,
ast.ClassDef,
ast.FunctionDef,
ast.AsyncFunctionDef,
ast.For,
ast.Import,
ast.ImportFrom,
ast.With,
),
)
return simple_contains_lval or ContainsNamedExprVisitor()(node) | null |
15,123 | import ast
import builtins
import itertools
import logging
import sys
from contextlib import contextmanager
from typing import (
TYPE_CHECKING,
Dict,
Iterable,
List,
Optional,
Set,
Tuple,
Union,
cast,
)
from ipyflow.analysis.mixins import (
SaveOffAttributesMixin,
SkipUnboundArgsMixin,
VisitListsMixin,
)
from ipyflow.analysis.resolved_symbols import ResolvedSymbol
from ipyflow.analysis.symbol_ref import Atom, LiveSymbolRef, SymbolRef, visit_stack
from ipyflow.config import FlowDirection
from ipyflow.data_model.timestamp import Timestamp
from ipyflow.singletons import flow, tracer
def _chain_root(node: ast.AST):
while True:
if isinstance(node, (ast.Attribute, ast.Subscript)):
node = node.value
elif isinstance(node, ast.Call):
node = node.func
else:
break
return node | null |
15,124 | import ast
import builtins
import itertools
import logging
import sys
from contextlib import contextmanager
from typing import (
TYPE_CHECKING,
Dict,
Iterable,
List,
Optional,
Set,
Tuple,
Union,
cast,
)
from ipyflow.analysis.mixins import (
SaveOffAttributesMixin,
SkipUnboundArgsMixin,
VisitListsMixin,
)
from ipyflow.analysis.resolved_symbols import ResolvedSymbol
from ipyflow.analysis.symbol_ref import Atom, LiveSymbolRef, SymbolRef, visit_stack
from ipyflow.config import FlowDirection
from ipyflow.data_model.timestamp import Timestamp
from ipyflow.singletons import flow, tracer
class SymbolRef:
_cached_symbol_ref_visitor = SymbolRefVisitor()
def __init__(
self,
symbols: Union[ast.AST, Atom, Sequence[Atom]],
scope: Optional["Scope"] = None,
ast_range: Optional[AstRange] = None,
) -> None:
# FIXME: each symbol should distinguish between attribute and subscript
# FIXME: bumped in priority 2021/09/07
if isinstance(
symbols,
(
ast.Name,
ast.Attribute,
ast.Subscript,
ast.Call,
ast.ClassDef,
ast.FunctionDef,
ast.AsyncFunctionDef,
ast.Import,
ast.ImportFrom,
),
):
ast_range = ast_range or AstRange.from_ast_node(
symbols if hasattr(symbols, "lineno") else visit_stack[-1]
)
symbols = self._cached_symbol_ref_visitor(symbols, scope=scope).chain
elif isinstance(symbols, ast.AST): # pragma: no cover
raise TypeError("unexpected type for %s" % symbols)
elif isinstance(symbols, Atom):
symbols = [symbols]
self.chain: Tuple[Atom, ...] = tuple(symbols)
self.scope: Optional["Scope"] = scope
self.ast_range: Optional[AstRange] = ast_range
def from_string(
cls, symbol_str: str, scope: Optional["Scope"] = None
) -> "SymbolRef":
ret = cls(ast.parse(symbol_str, mode="eval").body, scope=scope)
ret.ast_range = None
return ret
def to_symbol(self, scope: Optional["Scope"] = None) -> Optional["Symbol"]:
for resolved in self.gen_resolved_symbols(
scope or self.scope or flow().global_scope,
only_yield_final_symbol=False,
yield_all_intermediate_symbols=True,
yield_in_reverse=True,
):
return resolved.sym
return None
def to_fully_resolved_symbol(
self, scope: Optional["Scope"] = None
) -> Optional["Symbol"]:
return (scope or flow().global_scope).try_fully_resolve_attrsub_chain(self)
def resolve(cls, symbol_str: str) -> Optional["Symbol"]:
return cls.from_string(symbol_str).to_symbol()
def __hash__(self) -> int:
# intentionally omit self.scope
return hash(self.chain)
def __eq__(self, other) -> bool:
# intentionally omit self.scope
if not isinstance(other, SymbolRef):
return False
if (
self.ast_range is not None
and other.ast_range is not None
and self.ast_range != other.ast_range
):
# goal: equality checks should compare against ast_range when it is set to ensure that
# different ranges get different SymbolRefs in sets and dicts, but containment checks
# that don't set the range (and therefore don't care about it) don't use it.
return False
if (
self.scope is not None
and other.scope is not None
and self.scope is not other.scope
):
# same for scope
return False
return self.chain == other.chain
def __repr__(self) -> str:
return repr(self.chain)
def __str__(self) -> str:
return repr(self)
def canonical(self) -> "SymbolRef":
return self.__class__(
[atom.nonreactive() for atom in self.chain],
scope=None,
ast_range=None,
)
def gen_resolved_symbols(
self,
scope: "Scope",
only_yield_final_symbol: bool,
yield_all_intermediate_symbols: bool = False,
inherit_reactivity: bool = True,
yield_in_reverse: bool = False,
) -> Generator[ResolvedSymbol, None, None]:
assert not (only_yield_final_symbol and yield_all_intermediate_symbols)
assert not (yield_in_reverse and not yield_all_intermediate_symbols)
sym, atom, next_atom = None, None, None
reactive_seen = False
cascading_reactive_seen = False
blocking_seen = False
if yield_in_reverse:
gen: Iterable[Tuple["Symbol", Atom, Atom]] = [
(resolved.sym, resolved.atom, resolved.next_atom)
for resolved in self.gen_resolved_symbols(
scope,
only_yield_final_symbol=only_yield_final_symbol,
yield_all_intermediate_symbols=True,
inherit_reactivity=False,
yield_in_reverse=False,
)
]
cast(list, gen).reverse()
else:
gen = scope.gen_symbols_for_attrsub_chain(self)
for sym, atom, next_atom in gen:
reactive_seen = reactive_seen or atom.is_reactive
cascading_reactive_seen = (
cascading_reactive_seen or atom.is_cascading_reactive
)
yield_all_intermediate_symbols = (
yield_all_intermediate_symbols or reactive_seen
)
if inherit_reactivity:
if reactive_seen and not blocking_seen and not atom.is_reactive:
atom = atom.reactive()
if (
cascading_reactive_seen
and not blocking_seen
and not atom.is_cascading_reactive
):
atom = atom.cascading_reactive()
if blocking_seen and not atom.is_blocking:
atom = atom.blocking()
if yield_all_intermediate_symbols:
# TODO: only use this branch once staleness checker can be smarter about liveness timestamps.
# Right now, yielding the intermediate elts of the chain will yield false positives in the
# event of namespace stale children.
yield ResolvedSymbol(sym, atom, next_atom)
if not yield_all_intermediate_symbols and sym is not None:
if next_atom is None or not only_yield_final_symbol:
yield ResolvedSymbol(sym, atom, next_atom)
class Symbol:
NULL = object()
# object for virtual display symbol
DISPLAY = object()
IMMUTABLE_TYPES = set(IMMUTABLE_PRIMITIVE_TYPES)
IPYFLOW_MUTATION_VIRTUAL_SYMBOL_NAME = "__ipyflow_mutation"
def __init__(
self,
name: SupportedIndexType,
symbol_type: SymbolType,
obj: Any,
containing_scope: "Scope",
stmt_node: Optional[ast.stmt] = None,
symbol_node: Optional[ast.AST] = None,
refresh_cached_obj: bool = False,
implicit: bool = False,
) -> None:
if refresh_cached_obj:
# TODO: clean up redundancies
assert implicit
assert stmt_node is None
self.name = name
self.symbol_type = symbol_type
self.obj = obj
# additional user-specific metadata
self._tags: Set[str] = set()
self.extra_metadata: Dict[str, Any] = {}
self._tombstone = False
self._cached_out_of_sync = True
self.cached_obj_id: Optional[int] = None
self.cached_obj_type: Optional[Type[object]] = None
self.cached_obj_len: Optional[int] = None
if refresh_cached_obj:
self._refresh_cached_obj()
self.containing_scope = containing_scope or flow().global_scope
self.call_scope: Optional[Scope] = None
self.func_def_stmt: Optional[ast.stmt] = None
self.stmt_node = self.update_stmt_node(stmt_node)
self.symbol_node = symbol_node
self._funcall_live_symbols = None
self.parents: Dict["Symbol", List[Timestamp]] = {}
self.children: Dict["Symbol", List[Timestamp]] = {}
# initialize at -1 for implicit since the corresponding piece of data could already be around,
# and we don't want liveness checker to think this was newly created unless we
# explicitly trace an update somewhere
self._timestamp: Timestamp = (
Timestamp.uninitialized() if implicit else Timestamp.current()
)
self._snapshot_timestamps: List[Timestamp] = []
self._snapshot_timestamp_ubounds: List[Timestamp] = []
self._defined_cell_num = cells().exec_counter()
self._is_dangling_on_edges = False
self._cascading_reactive_cell_num = -1
self._override_ready_liveness_cell_num = -1
self._override_timestamp: Optional[Timestamp] = None
self.watchpoints = Watchpoints()
# The necessary last-updated timestamp / cell counter for this symbol to not be waiting
self.required_timestamp: Timestamp = self.timestamp
# for each usage of this sym, the version that was used, if different from the timestamp of usage
self.timestamp_by_used_time: Dict[Timestamp, Timestamp] = {}
self.used_node_by_used_time: Dict[Timestamp, ast.AST] = {}
# History of definitions at time of liveness
self.timestamp_by_liveness_time: Dict[Timestamp, Timestamp] = {}
# All timestamps associated with updates to this symbol
self.updated_timestamps: Set[Timestamp] = set()
# The most recent timestamp associated with a particular object id
self.last_updated_timestamp_by_obj_id: Dict[int, Timestamp] = {}
self.fresher_ancestors: Set["Symbol"] = set()
self.fresher_ancestor_timestamps: Set[Timestamp] = set()
# cells where this symbol was live
self.cells_where_deep_live: Set[Cell] = set()
self.cells_where_shallow_live: Set[Cell] = set()
self._last_computed_ready_or_waiting_cache_ts: int = -1
self._is_ready_or_waiting_at_position_cache: Dict[Tuple[int, bool], bool] = {}
# if implicitly created when tracing non-store-context ast nodes
self._implicit = implicit
# Will never be stale if no_warning is True
self.disable_warnings = False
self._temp_disable_warnings = False
self._num_ipywidget_observers = 0
self._num_mercury_widget_observers = 0
flow().aliases.setdefault(id(obj), set()).add(self)
if (
isinstance(self.name, str)
and not self.is_anonymous
and not self.containing_scope.is_namespace_scope
):
ns = self.namespace
if ns is not None and ns.scope_name == "self":
# hack to get a better name than `self.whatever` for fields of this object
# not ideal because it relies on the `self` convention but is probably
# acceptable for the use case of improving readable names
ns.scope_name = self.name
def aliases(self) -> List["Symbol"]:
return list(flow().aliases.get(self.obj_id, []))
def cells_where_live(self) -> Set[Cell]:
return self.cells_where_deep_live | self.cells_where_shallow_live
def __repr__(self) -> str:
return f"<{self.readable_name}>"
def __str__(self) -> str:
return self.readable_name
def __hash__(self) -> int:
return hash(id(self))
def __lt__(self, other) -> bool:
return id(self) < id(other)
def add_tag(self, tag_value: str) -> None:
self._tags.add(tag_value)
def remove_tag(self, tag_value: str) -> None:
self._tags.discard(tag_value)
def has_tag(self, tag_value: str) -> bool:
return tag_value in self._tags
def temporary_disable_warnings(self) -> None:
self._temp_disable_warnings = True
def last_used_timestamp(self) -> Timestamp:
if len(self.timestamp_by_used_time) == 0:
return Timestamp.uninitialized()
else:
return max(self.timestamp_by_used_time.keys())
def namespace_waiting_symbols(self) -> Set["Symbol"]:
ns = self.namespace
return set() if ns is None else ns.namespace_waiting_symbols
def shallow_timestamp(self) -> Timestamp:
if self._override_timestamp is None:
return self._timestamp
else:
return max(self._timestamp, self._override_timestamp)
def visible_timestamp(self) -> Optional[Timestamp]:
for ts in sorted(self.updated_timestamps, reverse=True):
if cells().at_timestamp(ts).is_visible:
return ts
return None
def memoize_timestamp(self) -> Optional[Timestamp]:
return self.last_updated_timestamp_by_obj_id.get(self.obj_id)
def timestamp(self) -> Timestamp:
ts = self.shallow_timestamp
if self.is_import or self.is_module:
return ts
ns = self.namespace
return ts if ns is None else max(ts, ns.max_descendent_timestamp)
def _compute_namespace_timestamps(
self,
seen: Optional[Set["Symbol"]] = None,
version_ubound: Optional[Timestamp] = None,
) -> Set[Timestamp]:
if version_ubound is None:
timestamps = {self.shallow_timestamp, self.timestamp}
else:
max_leq_ubound = Timestamp.uninitialized()
for ts in reversed(self._snapshot_timestamps):
if ts <= version_ubound:
max_leq_ubound = ts
break
if max_leq_ubound.is_initialized:
timestamps = {max_leq_ubound}
else:
timestamps = set()
ns = self.namespace
if ns is None:
return timestamps
if seen is None:
seen = set()
if self in seen:
return timestamps
seen.add(self)
for sym in ns.all_symbols_this_indentation():
timestamps |= sym._compute_namespace_timestamps(
seen=seen, version_ubound=version_ubound
)
return timestamps
def _get_timestamps_for_version(self, version: int) -> Set[Timestamp]:
if len(self._snapshot_timestamps) == 0:
return {self.timestamp}
ts = self._snapshot_timestamps[version]
if ts.cell_num == -1:
return {Timestamp(self.defined_cell_num, ts.stmt_num)}
else:
return self._compute_namespace_timestamps(
version_ubound=None if version == -1 else ts
)
def code(
self, format_type: Optional[Type[FormatType]] = None, version: int = -1
) -> Slice:
return statements().format_multi_slice(
self._get_timestamps_for_version(version=version),
blacken=True,
format_type=format_type,
)
def cascading_reactive_cell_num(
self,
seen: Optional[Set["Symbol"]] = None,
consider_containing_symbols: bool = True,
) -> int:
if seen is None:
seen = set()
if self in seen:
return -1
seen.add(self)
cell_num = self._cascading_reactive_cell_num
ns = self.namespace
ret = (
cell_num
if ns is None
else max(
cell_num,
ns.max_cascading_reactive_cell_num(seen),
)
)
if not consider_containing_symbols:
return ret
for sym in self.iter_containing_symbols():
ret = max(ret, sym.cascading_reactive_cell_num(seen=seen))
return ret
def bump_cascading_reactive_cell_num(self, ctr: Optional[int] = None) -> None:
self._cascading_reactive_cell_num = max(
self._cascading_reactive_cell_num,
flow().cell_counter() if ctr is None else ctr,
)
def iter_containing_symbols(self) -> Generator["Symbol", None, None]:
yield self
ns = self.containing_namespace
if ns is None or not ns.is_namespace_scope:
return
for containing_ns in ns.iter_containing_namespaces():
yield from flow().aliases.get(containing_ns.obj_id, [])
def waiting_timestamp(self) -> int:
return max(self._timestamp.cell_num, flow().min_timestamp)
def defined_cell_num(self) -> int:
return self._defined_cell_num
def readable_name(self) -> str:
return self.containing_scope.make_namespace_qualified_name(self)
def is_subscript(self) -> bool:
return self.symbol_type == SymbolType.SUBSCRIPT
def is_class(self) -> bool:
return self.symbol_type == SymbolType.CLASS
def is_function(self) -> bool:
return self.symbol_type == SymbolType.FUNCTION
def is_lambda(self) -> bool:
# TODO: this is terrible
return type(self.name) is str and self.name.startswith( # noqa: E721
"<lambda_sym_"
)
def is_import(self) -> bool:
return self.symbol_type == SymbolType.IMPORT
def is_module(self) -> bool:
return self.symbol_type == SymbolType.MODULE
def imported_module(self) -> str:
if not self.is_import:
raise ValueError("only IMPORT symbols have `imported_module` property")
if isinstance(self.stmt_node, ast.Import):
for alias in self.stmt_node.names:
name = alias.asname or alias.name
if name == self.name:
return alias.name
raise ValueError(
"Unable to find module for symbol %s is stmt %s"
% (self, ast.dump(self.stmt_node))
)
elif isinstance(self.stmt_node, ast.ImportFrom):
return self.stmt_node.module
else:
raise TypeError(
"Invalid stmt type for import symbol: %s" % ast.dump(self.stmt_node)
)
def imported_symbol_original_name(self) -> str:
if not self.is_import:
raise ValueError(
"only IMPORT symbols have `imported_symbol_original_name` property"
)
if isinstance(self.stmt_node, ast.Import):
return self.imported_module
elif isinstance(self.stmt_node, ast.ImportFrom):
for alias in self.stmt_node.names:
name = alias.asname or alias.name
if name == self.name:
return alias.name
raise ValueError(
"Unable to find module for symbol %s is stmt %s"
% (self, ast.dump(self.stmt_node))
)
else:
raise TypeError(
"Invalid stmt type for import symbol: %s" % ast.dump(self.stmt_node)
)
def is_cascading_reactive_at_counter(self, ctr: int) -> bool:
return self.cascading_reactive_cell_num() > max(
ctr, flow().min_cascading_reactive_cell_num
)
def get_top_level(self) -> Optional["Symbol"]:
if not self.containing_scope.is_namespace_scope:
return self
else:
containing_scope = cast("Namespace", self.containing_scope)
for alias in flow().aliases.get(containing_scope.obj_id, []):
if alias.is_globally_accessible:
return alias.get_top_level()
return None
def get_import_string(self) -> str:
if not self.is_import:
raise ValueError("only IMPORT symbols support recreating the import string")
module = self.imported_module
if isinstance(self.stmt_node, ast.Import):
if module == self.name:
return f"import {module}"
else:
return f"import {module} as {self.name}"
elif isinstance(self.stmt_node, ast.ImportFrom):
original_symbol_name = self.imported_symbol_original_name
if original_symbol_name == self.name:
return f"from {module} import {original_symbol_name}"
else:
return f"from {module} import {original_symbol_name} as {self.name}"
else:
raise TypeError(
"Invalid stmt type for import symbol: %s" % ast.dump(self.stmt_node)
)
def is_anonymous(self) -> bool:
if self.symbol_type == SymbolType.ANONYMOUS:
return True
ns = self.containing_namespace
if ns is not None and ns.is_anonymous:
return True
return False
def is_implicit(self) -> bool:
return self._implicit
def shallow_clone(
self, new_obj: Any, new_containing_scope: "Scope", symbol_type: SymbolType
) -> "Symbol":
return self.__class__(self.name, symbol_type, new_obj, new_containing_scope)
def obj_id(self) -> int:
return id(self.obj)
def obj_len(self) -> Optional[int]:
try:
if not self.is_obj_lazy_module and hasattr(self.obj, "__len__"):
return len(self.obj)
except: # noqa: E722
pass
return None
def obj_type(self) -> Type[Any]:
return type(self.obj)
def is_immutable(self) -> bool:
return self.obj_type in self.IMMUTABLE_TYPES
def is_mutation_virtual_symbol(self) -> bool:
return self.name == self.IPYFLOW_MUTATION_VIRTUAL_SYMBOL_NAME
def is_underscore(self) -> bool:
return self.name == "_" and self.containing_scope.is_global
def is_obj_lazy_module(self) -> bool:
return self.obj_type is _LazyModule
def get_type_annotation(self):
return get_type_annotation(self.obj)
def get_type_annotation_string(self) -> str:
return make_annotation_string(self.get_type_annotation())
def namespace(self) -> Optional["Namespace"]:
return flow().namespaces.get(self.obj_id)
def containing_namespace(self) -> Optional["Namespace"]:
if self.containing_scope.is_namespace_scope:
return cast("Namespace", self.containing_scope)
else:
return None
def full_path(self) -> Tuple[str, ...]:
return self.containing_scope.full_path + (str(self.name),)
def full_namespace_path(self) -> str:
return self.containing_scope.make_namespace_qualified_name(self)
def is_garbage(self) -> bool:
return self._tombstone
def is_new_garbage(self) -> bool:
if self._tombstone:
return False
containing_ns = self.containing_namespace
numpy = sys.modules.get("numpy", None)
if (
numpy is not None
and containing_ns is not None
and isinstance(containing_ns.obj, numpy.ndarray)
):
# numpy atoms are not interned (so assigning array elts to a variable does not bump refcount);
# also seems that refcount is always 0, so just check if the containing namespace is garbage
return self.containing_namespace.is_garbage
return self.get_ref_count() == 0
def is_globally_accessible(self) -> bool:
return self.containing_scope.is_globally_accessible
def is_user_accessible(self) -> bool:
return (
self.is_globally_accessible
and not self.is_anonymous
and not self.is_garbage
and not (
self.containing_namespace is not None
and (
self.containing_namespace.is_anonymous
or self.containing_namespace.is_garbage
)
)
)
def _remove_self_from_aliases(self) -> None:
cleanup_discard(flow().aliases, self.obj_id, self)
self.obj = None
def mark_garbage(self) -> None:
if self.is_garbage:
return
self._tombstone = True
ns = self.namespace
if ns is not None and all(alias.is_garbage for alias in self.aliases):
ns.mark_garbage()
def collect_self_garbage(self) -> None:
assert self.is_garbage
flow().blocked_reactive_timestamps_by_symbol.pop(self, None)
self._remove_self_from_aliases()
for parent in self.parents:
parent.children.pop(self, None)
for child in self.children:
child.parents.pop(self, None)
containing_ns = self.containing_namespace
if self.is_subscript and containing_ns is not None:
containing_ns._subscript_symbol_by_name.pop(self.name, None)
elif not self.is_subscript:
self.containing_scope._symbol_by_name.pop(self.name, None)
else:
logger.warning(
"could not find symbol %s in its scope %s", self, self.containing_scope
)
# TODO: remove from static / dynamic parent / children edges
# need to keep this around for readable_name to work
# self.containing_scope = None
# def update_type(self, new_type):
# self.symbol_type = new_type
# if self.is_function:
# self.call_scope = self.containing_scope.make_child_scope(self.name)
# else:
# self.call_scope = None
def update_obj_ref(self, obj: Any, refresh_cached: bool = True) -> None:
if self._num_ipywidget_observers > 0:
try:
self.obj.unobserve_all()
except: # noqa
pass
self._num_ipywidget_observers = 0
if self._num_mercury_widget_observers > 0:
try:
self._mercury_widgets_manager.get_widget(
self.obj.code_uid
).unobserve_all()
except: # noqa
pass
self._num_mercury_widget_observers = 0
self._tombstone = False
self._cached_out_of_sync = True
if (
flow().settings.mark_typecheck_failures_unsafe
and self.cached_obj_type != type(obj)
):
for cell in self.cells_where_live:
cell.invalidate_typecheck_result()
self.cells_where_shallow_live.clear()
self.cells_where_deep_live.clear()
self.obj = obj
if self.cached_obj_id is not None and self.cached_obj_id != self.obj_id:
new_ns = flow().namespaces.get(self.obj_id, None)
# don't overwrite existing namespace for this obj
old_ns = flow().namespaces.get(self.cached_obj_id, None)
if (
old_ns is not None
and old_ns.full_namespace_path == self.full_namespace_path
):
if new_ns is None:
logger.info("create fresh copy of namespace %s", old_ns)
new_ns = old_ns.fresh_copy(obj)
old_ns.transfer_symbols_to(new_ns)
else:
new_ns.scope_name = old_ns.scope_name
new_ns.parent_scope = old_ns.parent_scope
self._handle_aliases()
if (
old_ns is not None
and len(flow().aliases.get(self.cached_obj_id, [])) == 0
):
old_ns.mark_garbage()
if refresh_cached:
self._refresh_cached_obj()
def invalidate_cached(self) -> None:
self._cached_out_of_sync = True
self.cached_obj_id = None
self.cached_obj_type = None
def get_ref_count(self) -> int:
if self.obj is None or self.obj is Symbol.NULL:
return -1
total = sys.getrefcount(self.obj) - 1
total -= len(flow().aliases.get(self.obj_id, []))
ns = flow().namespaces.get(self.obj_id, None)
if ns is not None and ns.obj is not None and ns.obj is not Symbol.NULL:
total -= 1
return total
def _should_cancel_propagation(self, prev_obj: Optional[Any]) -> bool:
if prev_obj is None:
return False
if (
flow().blocked_reactive_timestamps_by_symbol.get(self, -1)
== self.timestamp.cell_num
):
return False
if not self._cached_out_of_sync or self.obj_id == self.cached_obj_id:
return True
if self.obj is None or prev_obj is Symbol.NULL:
return self.obj is None and prev_obj is Symbol.NULL
return False
def _handle_aliases(self):
cleanup_discard(flow().aliases, self.cached_obj_id, self)
flow().aliases.setdefault(self.obj_id, set()).add(self)
def update_stmt_node(self, stmt_node: Optional[ast.stmt]) -> Optional[ast.stmt]:
self.stmt_node = stmt_node
self._funcall_live_symbols = None
if self.is_function or (
stmt_node is not None and isinstance(stmt_node, ast.Lambda)
):
# TODO: in the case of lambdas, there will not necessarily be one
# symbol for a given statement. We need a more precise way to determine
# the symbol being called than by looking at the stmt in question.
flow().statement_to_func_sym[id(stmt_node)] = self
self.call_scope = self.containing_scope.make_child_scope(self.name)
self.func_def_stmt = stmt_node
return stmt_node
def _refresh_cached_obj(self) -> None:
self._cached_out_of_sync = False
# don't keep an actual ref to avoid bumping refcount
self.cached_obj_id = self.obj_id
self.cached_obj_type = self.obj_type
self.cached_obj_len = self.obj_len
def get_definition_args(self) -> List[ast.arg]:
assert self.func_def_stmt is not None and isinstance(
self.func_def_stmt, (ast.FunctionDef, ast.AsyncFunctionDef, ast.Lambda)
)
args = []
for arg in self.func_def_stmt.args.args + self.func_def_stmt.args.kwonlyargs:
args.append(arg)
if self.func_def_stmt.args.vararg is not None:
args.append(self.func_def_stmt.args.vararg)
if self.func_def_stmt.args.kwarg is not None:
args.append(self.func_def_stmt.args.kwarg)
return args
def _match_call_args_with_definition_args(
self,
) -> Generator[Tuple[ast.arg, List["Symbol"]], None, None]:
# TODO: handle posonlyargs, kwonlyargs
assert self.func_def_stmt is not None and isinstance(
self.func_def_stmt, (ast.FunctionDef, ast.AsyncFunctionDef, ast.Lambda)
)
caller_node = self._get_calling_ast_node()
if caller_node is None or not isinstance(caller_node, ast.Call):
return
kwarg_by_name = {
arg_key.arg: arg_key
for arg_key in self.func_def_stmt.args.args[
-len(self.func_def_stmt.args.defaults) :
]
}
if not all(keyword.arg in kwarg_by_name for keyword in caller_node.keywords):
logger.warning("detected mismatched kwargs from caller node to definition")
return
def_args = self.func_def_stmt.args.args
if len(self.func_def_stmt.args.defaults) > 0:
def_args = def_args[: -len(self.func_def_stmt.args.defaults)]
if len(def_args) > 0 and def_args[0].arg == "self":
# FIXME: this is bad and I should feel bad
def_args = def_args[1:]
for def_arg, call_arg in zip(def_args, caller_node.args):
if isinstance(call_arg, ast.Starred):
# give up
# TODO: handle this case
break
yield def_arg, tracer().resolve_loaded_symbols(call_arg)
seen_keys = set()
for keyword in caller_node.keywords:
keyword_key, keyword_value = keyword.arg, keyword.value
if keyword_value is None:
continue
seen_keys.add(keyword_key)
yield kwarg_by_name[keyword_key], tracer().resolve_loaded_symbols(
keyword_value
)
for arg_key, arg_value in zip(
self.func_def_stmt.args.args[-len(self.func_def_stmt.args.defaults) :],
self.func_def_stmt.args.defaults,
):
if arg_key.arg in seen_keys:
continue
yield arg_key, tracer().resolve_loaded_symbols(arg_value)
def _get_calling_ast_node(self) -> Optional[ast.Call]:
if tracer().tracing_disabled_since_last_module_stmt or (
not hasattr(self.obj, "__module__")
and getattr(type(self.obj), "__module__", None) == "builtins"
):
return None
if self.func_def_stmt is not None and isinstance(
self.func_def_stmt, (ast.FunctionDef, ast.AsyncFunctionDef)
):
if self.name in ("__getitem__", "__setitem__", "__delitem__"):
# TODO: handle case where we're looking for a subscript for the calling node
return None
for decorator in self.func_def_stmt.decorator_list:
if isinstance(decorator, ast.Name) and decorator.id == "property":
# TODO: handle case where we're looking for an attribute for the calling node
return None
lexical_call_stack = tracer().lexical_call_stack
if len(lexical_call_stack) == 0:
return None
prev_node_id_in_cur_frame_lexical = lexical_call_stack.get_field(
"prev_node_id_in_cur_frame_lexical"
)
caller_ast_node = tracer().ast_node_by_id.get(
prev_node_id_in_cur_frame_lexical, None
)
if caller_ast_node is None or not isinstance(caller_ast_node, ast.Call):
return None
return caller_ast_node
def create_symbols_for_call_args(self, call_frame: FrameType) -> None:
assert self.func_def_stmt is not None
seen_def_args = set()
logger.info("create symbols for call to %s", self)
for def_arg, deps in self._match_call_args_with_definition_args():
seen_def_args.add(def_arg.arg)
self.call_scope.upsert_symbol_for_name(
def_arg.arg,
call_frame.f_locals.get(def_arg.arg),
deps,
self.func_def_stmt,
propagate=False,
symbol_node=def_arg,
)
logger.info("def arg %s matched with deps %s", def_arg, deps)
for def_arg in self.get_definition_args():
if def_arg.arg in seen_def_args:
continue
self.call_scope.upsert_symbol_for_name(
def_arg.arg,
None,
set(),
self.func_def_stmt,
propagate=False,
symbol_node=def_arg,
)
def is_waiting(self) -> bool:
if self.disable_warnings or self._temp_disable_warnings:
return False
if self.waiting_timestamp < self.required_timestamp.cell_num:
return True
elif flow().min_timestamp == -1:
return len(self.namespace_waiting_symbols) > 0
else:
# TODO: guard against infinite recurision
return any(sym.is_waiting for sym in self.namespace_waiting_symbols)
def is_shallow_stale(self) -> bool:
if self.disable_warnings or self._temp_disable_warnings:
return False
return self.waiting_timestamp < self.required_timestamp.cell_num
def _is_ready_or_waiting_at_position_impl(self, pos: int, deep: bool) -> bool:
for par, timestamps in self.parents.items():
for ts in timestamps:
dep_introduced_pos = cells().at_timestamp(ts).position
if dep_introduced_pos > pos:
continue
for updated_ts in par.updated_timestamps:
if cells().at_timestamp(updated_ts).position > dep_introduced_pos:
continue
if updated_ts.cell_num > ts.cell_num or par.is_waiting_at_position(
dep_introduced_pos
):
# logger.error("sym: %s", self)
# logger.error("pos: %s", pos)
# logger.error("parent: %s", par)
# logger.error("dep introdced ts: %s", ts)
# logger.error("dep introdced pos: %s", dep_introduced_pos)
# logger.error("par updated ts: %s", updated_ts)
# logger.error("par updated position: %s", cells().from_timestamp(updated_ts).position)
return True
if deep:
for sym in self.namespace_waiting_symbols:
if sym.is_waiting_at_position(pos):
return True
return False
def is_waiting_at_position(self, pos: int, deep: bool = True) -> bool:
if deep:
if not self.is_waiting:
return False
else:
if not self.is_shallow_stale:
return False
if flow().mut_settings.flow_order == FlowDirection.ANY_ORDER:
return True
if cells().exec_counter() > self._last_computed_ready_or_waiting_cache_ts:
self._is_ready_or_waiting_at_position_cache.clear()
self._last_computed_ready_or_waiting_cache_ts = cells().exec_counter()
if (pos, deep) in self._is_ready_or_waiting_at_position_cache:
return self._is_ready_or_waiting_at_position_cache[pos, deep]
# preemptively set this entry to 'False' in the cache to avoid infinite loops
self._is_ready_or_waiting_at_position_cache[pos, deep] = False
is_waiting = self._is_ready_or_waiting_at_position_impl(pos, deep)
self._is_ready_or_waiting_at_position_cache[pos, deep] = is_waiting
return is_waiting
def should_mark_waiting(self, updated_dep):
if self.disable_warnings:
return False
if updated_dep is self:
return False
return True
def _is_underscore_or_simple_assign(self, new_deps: Set["Symbol"]) -> bool:
if self.is_underscore:
# FIXME: distinguish between explicit assignment to _ from user and implicit assignment from kernel
return True
if not isinstance(self.stmt_node, (ast.Assign, ast.AnnAssign)):
return False
if len(new_deps) != 1:
return False
only_dep: Symbol = next(iter(new_deps))
# obj ids can get reused for anon symbols like literals
return not only_dep.is_anonymous and self.obj_id == only_dep.obj_id
def update_deps(
self,
new_deps: Set["Symbol"],
prev_obj: Any = None,
overwrite: bool = True,
mutated: bool = False,
deleted: bool = False,
propagate_to_namespace_descendents: bool = False,
propagate: bool = True,
refresh: bool = True,
is_cascading_reactive: Optional[bool] = None,
) -> None:
if self.is_import and self.obj_id == self.cached_obj_id:
# skip updates for imported symbols; just bump the version
self.refresh()
return
if overwrite and not self.is_globally_accessible:
self.watchpoints.clear()
if mutated and self.is_immutable:
return
# if we get here, no longer implicit
self._implicit = False
# quick last fix to avoid overwriting if we appear inside the set of deps to add (or a 1st order ancestor)
# TODO: check higher-order ancestors too?
overwrite = overwrite and self not in new_deps
overwrite = overwrite and not any(
self in new_dep.parents for new_dep in new_deps
)
logger.warning("symbol %s new deps %s", self, new_deps)
new_deps.discard(self)
if overwrite:
for parent in self.parents.keys() - new_deps:
parent.children.pop(self, None)
self.parents.pop(parent, None)
for new_parent in new_deps - self.parents.keys():
if new_parent is None:
continue
new_parent.children.setdefault(self, []).append(Timestamp.current())
self.parents.setdefault(new_parent, []).append(Timestamp.current())
self.required_timestamp = Timestamp.uninitialized()
self.fresher_ancestors.clear()
self.fresher_ancestor_timestamps.clear()
if mutated or isinstance(self.stmt_node, ast.AugAssign):
self.update_usage_info()
if (
(mutated or overwrite)
and Timestamp.current().is_initialized
and not self.is_immutable
and not self.is_mutation_virtual_symbol
and not self.is_anonymous
and self.containing_scope.is_global
and not self.is_underscore
and not self.is_implicit
and self.obj_type is not type
and not self.is_class
and self.namespace is not None
):
self.namespace.upsert_symbol_for_name(
self.IPYFLOW_MUTATION_VIRTUAL_SYMBOL_NAME, object(), propagate=False
)
propagate = propagate and (
mutated or deleted or not self._should_cancel_propagation(prev_obj)
)
try:
prev_cell = cells().current_cell().prev_cell
except KeyError:
prev_cell = None
prev_cell_ctr = -1 if prev_cell is None else prev_cell.cell_ctr
if overwrite:
flow_ = flow()
self._cascading_reactive_cell_num = -1
flow_.updated_reactive_symbols.discard(self)
flow_.updated_deep_reactive_symbols.discard(self)
if is_cascading_reactive is not None:
is_cascading_reactive = is_cascading_reactive or any(
sym.is_cascading_reactive_at_counter(prev_cell_ctr) for sym in new_deps
)
if is_cascading_reactive:
self.bump_cascading_reactive_cell_num()
if refresh:
self.refresh(
# rationale: if this is a mutation for which we have more precise information,
# then we don't need to update the ns descendents as this will already have happened.
# also don't update ns descendents for things like `a = b`
refresh_descendent_namespaces=propagate
and not (mutated and not propagate_to_namespace_descendents)
and not self._is_underscore_or_simple_assign(new_deps),
refresh_namespace_waiting=not mutated,
)
if propagate:
UpdateProtocol(self)(
new_deps, mutated, propagate_to_namespace_descendents, refresh
)
self._refresh_cached_obj()
if self.is_class:
# pop pending class defs and update obj ref
pending_class_ns = tracer().pending_class_namespaces.pop()
pending_class_ns.update_obj_ref(self.obj)
for dep in new_deps:
if dep.obj is self.obj and dep.call_scope is not None:
self.call_scope = dep.call_scope
self.func_def_stmt = dep.func_def_stmt
ns = self.namespace
if ns is not None and ns.scope_name == "self" and isinstance(self.name, str):
# fixup namespace name if necessary
# can happen if symbol for 'self' was created in a previous __init__
ns.scope_name = self.name
if overwrite and len(flow().aliases[self.obj_id]) == 1:
self._handle_possible_widget_creation()
self._handle_possible_mercury_widget_creation()
def _mercury_widgets_manager(self):
if self.obj is None:
return None
if self.is_obj_lazy_module or not hasattr(self.obj, "code_uid"):
return None
try:
return sys.modules.get(self.obj.__class__.__module__).WidgetsManager
except: # noqa
return None
def _handle_possible_widget_creation(self) -> None:
if self.obj is None:
return
Widget = getattr(sys.modules.get("ipywidgets"), "Widget", None)
if (
Widget is None
or self.is_obj_lazy_module
or not isinstance(self.obj, Widget)
or not hasattr(self.obj, "observe")
or not hasattr(self.obj, "value")
):
return
self.namespaced().upsert_symbol_for_name(
"value", getattr(self.obj, "value", None), set(), self.stmt_node
)
self.obj.observe(self._observe_widget)
self._num_ipywidget_observers += 1
def _handle_possible_mercury_widget_creation(self) -> None:
WidgetsManager = self._mercury_widgets_manager
if WidgetsManager is None:
return
widget = WidgetsManager.get_widget(self.obj.code_uid)
self.namespaced().upsert_symbol_for_name(
"value", getattr(widget, "value", None), set(), self.stmt_node
)
widget.observe(self._observe_widget)
self._num_mercury_widget_observers += 1
def _observe_widget(self, msg: Dict[str, Any]) -> None:
if msg.get("name") != "value" or "new" not in msg:
return
ns = self.namespace
sym = ns.lookup_symbol_by_name_this_indentation("value")
if sym is None:
return
newval = msg["new"]
current_ts_cell = cells().at_timestamp(self._timestamp)
current_ts_cell._extra_stmt = ast.parse(f"{sym.readable_name} = {newval}").body[
0
]
sym._override_ready_liveness_cell_num = flow().cell_counter() + 1
sym._override_timestamp = Timestamp(
self._timestamp.cell_num, current_ts_cell.num_original_stmts
)
sym.update_obj_ref(newval)
statements().create_and_track(
current_ts_cell._extra_stmt,
timestamp=sym._override_timestamp,
override=True,
)
with dynamic_slicing_context():
flow().add_data_dep(
sym._timestamp,
sym._override_timestamp,
sym,
)
flow().add_data_dep(
sym._override_timestamp,
sym._timestamp,
sym,
)
self.debounced_exec_schedule(reactive=True)
def debounced_exec_schedule(self, reactive: bool) -> None:
if _debounced_exec_schedule(
cells().at_timestamp(self.timestamp).cell_id, reactive=reactive
):
flow().debounced_exec_schedule_pending = True
def namespaced(self) -> "Namespace":
ns = self.namespace
if ns is not None:
return ns
return namespaces()(self.obj, self.name, parent_scope=self.containing_scope)
def update_usage_info_one_timestamp(
self,
used_time: Timestamp,
updated_time: Timestamp,
is_static: bool,
) -> bool:
flow_ = flow()
is_usage = is_static or updated_time < used_time
if is_usage:
with slicing_context(is_static=is_static):
flow_.add_data_dep(
used_time,
updated_time,
self,
)
if is_static:
is_usage = cells().at_timestamp(updated_time).is_visible
return is_usage
def update_usage_info(
self,
used_time: Optional[Timestamp] = None,
used_node: Optional[ast.AST] = None,
exclude_ns: bool = False,
is_static: bool = False,
is_blocking: bool = False,
) -> "Symbol":
is_blocking = is_blocking or id(used_node) in tracer().blocking_node_ids
if used_time is None:
used_time = Timestamp.current()
if flow().is_dev_mode:
logger.info(
"sym `%s` used in cell %d last updated in cell %d",
self,
used_time.cell_num,
self.timestamp,
)
timestamp_by_used_time = (
self.timestamp_by_liveness_time
if is_static
else self.timestamp_by_used_time
)
if not is_blocking:
is_usage = False
ts_to_use = self._timestamp
for updated_ts in sorted(self.updated_timestamps, reverse=True):
if not updated_ts.is_initialized:
continue
is_usage = self.update_usage_info_one_timestamp(
used_time,
updated_ts,
is_static=is_static,
)
if is_usage or not is_static:
break
if is_usage and used_time.is_initialized:
timestamp_by_used_time[used_time] = ts_to_use
if used_node is not None:
self.used_node_by_used_time[used_time] = used_node
if exclude_ns:
return self
for sym in self.get_namespace_symbols(recurse=True):
sym.update_usage_info(
used_time=used_time,
used_node=None,
exclude_ns=True,
is_static=is_static,
is_blocking=is_blocking,
)
return self
def get_namespace_symbols(
self, recurse: bool = False, seen: Optional[Set["Symbol"]] = None
) -> Generator["Symbol", None, None]:
ns = self.namespace
if ns is None:
return
if seen is None:
seen = set()
if self in seen:
return
seen.add(self)
for sym in ns.all_symbols_this_indentation():
yield sym
if recurse:
yield from sym.get_namespace_symbols(recurse=recurse, seen=seen)
def _take_timestamp_snapshots(
self, ts_ubound: Timestamp, seen: Optional[Set["Symbol"]] = None
) -> None:
if seen is None:
seen = set()
if self in seen:
return
seen.add(self)
self._snapshot_timestamps.append(self._timestamp)
self._snapshot_timestamp_ubounds.append(ts_ubound)
containing_ns = self.containing_namespace
if containing_ns is None:
return
for alias in flow().aliases.get(containing_ns.obj_id, []):
alias._take_timestamp_snapshots(ts_ubound, seen=seen)
def refresh(
self,
take_timestamp_snapshots: bool = True,
refresh_descendent_namespaces: bool = False,
refresh_namespace_waiting: bool = True,
timestamp: Optional[Timestamp] = None,
seen: Optional[Set["Symbol"]] = None,
) -> None:
orig_timestamp = self._timestamp
self._timestamp = Timestamp.current() if timestamp is None else timestamp
self._override_timestamp = None
if take_timestamp_snapshots and (
orig_timestamp < self._timestamp or len(self._snapshot_timestamps) == 0
):
self._take_timestamp_snapshots(self._timestamp)
self.updated_timestamps.add(self._timestamp)
self._temp_disable_warnings = False
for cell in self.cells_where_live:
cell.add_used_cell_counter(self, self._timestamp.cell_num)
ns = self.containing_namespace
if ns is not None:
# logger.error("bump version of %s due to %s (value %s)", ns.full_path, self.full_path, self.obj)
ns.max_descendent_timestamp = self.shallow_timestamp
for alias in flow().aliases.get(ns.obj_id, []):
for cell in alias.cells_where_deep_live:
cell.add_used_cell_counter(alias, self._timestamp.cell_num)
if refresh_descendent_namespaces:
if seen is None:
seen = set()
if self in seen:
return
seen.add(self)
ns = self.namespace
if ns is not None:
for sym in ns.all_symbols_this_indentation(exclude_class=True):
# this is to handle cases like `x = x.mutate(42)`, where
# we could have changed some member of x but returned the
# original object -- in this case, just assume that all
# the stale namespace descendents are no longer stale, as
# this is likely the user intention. For an example, see
# `test_external_object_update_propagates_to_stale_namespace_symbols()`
# in `test_frontend_checker.py`
if not sym.is_waiting or refresh_namespace_waiting:
# logger.error(
# "refresh %s due to %s (value %s) via namespace %s",
# sym.full_path,
# self.full_path,
# self.obj,
# ns.full_path,
# )
sym.refresh(
refresh_descendent_namespaces=True,
timestamp=self.shallow_timestamp,
take_timestamp_snapshots=False,
seen=seen,
)
if refresh_namespace_waiting:
self.namespace_waiting_symbols.clear()
def resync_if_necessary(self, refresh: bool) -> None:
if not self.containing_scope.is_global:
return
try:
obj = shell().user_ns[self.name]
except: # noqa
# cinder runtime can throw an exception here due to lazy imports that fail
return
if self.obj is not obj:
flow_ = flow()
for alias in flow_.aliases.get(
self.cached_obj_id, set()
) | flow_.aliases.get(self.obj_id, set()):
containing_namespace = alias.containing_namespace
if containing_namespace is None:
continue
containing_obj = containing_namespace.obj
if containing_obj is None:
continue
# TODO: handle dict case too
if isinstance(containing_obj, list) and containing_obj[-1] is obj:
containing_namespace._subscript_symbol_by_name.pop(alias.name, None)
alias.name = len(containing_obj) - 1
alias.update_obj_ref(obj)
containing_namespace._subscript_symbol_by_name[alias.name] = alias
cleanup_discard(flow_.aliases, self.cached_obj_id, self)
cleanup_discard(flow_.aliases, self.obj_id, self)
flow_.aliases.setdefault(id(obj), set()).add(self)
self.update_obj_ref(obj)
elif self.obj_len != self.cached_obj_len:
self._refresh_cached_obj()
else:
return
if refresh:
self.refresh()
_MAX_MEMOIZE_COMPARABLE_SIZE = 10**6
def _equal(obj1: Any, obj2: Any) -> bool:
return obj1 == obj2
def _array_equal(obj1: Any, obj2: Any) -> bool:
import numpy as np
try:
return np.alltrue(obj1 == obj2) # type: ignore
except: # noqa
return False
def _dataframe_equal(obj1: Any, obj2: Any) -> bool:
try:
return obj1.equals(obj2) # type: ignore
except: # noqa
return False
def _make_list_eq(
eqs: List[Callable[[Any, Any], bool]]
) -> Callable[[List[Any], List[Any]], bool]:
def list_eq(lst1: List[Any], lst2: List[Any]) -> bool:
for eq, obj1, obj2 in zip(eqs, lst1, lst2):
if not eq(obj1, obj2):
return False
return True
return list_eq
def make_memoize_comparable_for_obj(
cls, obj: Any, seen_ids: Set[int]
) -> Tuple[Any, Optional[Callable[[Any, Any], bool]], int]:
if isinstance(obj, (bool, bytes, bytearray, int, float, str)):
return obj, cls._equal, 1
if not isinstance(obj, tuple):
if id(obj) in seen_ids:
return cls.NULL, None, -1
seen_ids.add(id(obj))
if isinstance(obj, (dict, frozenset, list, set, tuple)):
size = 0
comp = []
eqs: List[Callable[[Any, Any], bool]] = []
if isinstance(obj, dict):
iterable: "Iterable[Any]" = sorted(obj.items())
else:
iterable = obj
for inner in iterable:
inner_comp, inner_eq, inner_size = cls.make_memoize_comparable_for_obj(
inner, seen_ids
)
if inner_comp is cls.NULL or inner_eq is None:
return cls.NULL, None, -1
size += inner_size + 1
if size > cls._MAX_MEMOIZE_COMPARABLE_SIZE:
return cls.NULL, None, -1
comp.append(inner_comp)
eqs.append(inner_eq)
if all(eq is cls._equal for eq in eqs):
iter_eq: Callable[[Any, Any], bool] = cls._equal
elif isinstance(obj, (frozenset, set)):
return cls.NULL, None, -1
else:
iter_eq = cls._make_list_eq(eqs)
ret = frozenset(comp) if isinstance(obj, (frozenset, set)) else comp
return ret, iter_eq, size
elif type(obj) in (type, FunctionType):
# try to determine it based on the symbol
for sym in flow().aliases.get(id(obj), []):
comp, eq = sym.make_memoize_comparable(seen_ids=seen_ids)
if comp is not cls.NULL and eq is not None:
return comp, eq, 1
return cls.NULL, None, -1
else:
# hacks to check if they are arrays, dataframes, etc without explicitly importing these
module = getattr(type(obj), "__module__", "")
if module.startswith("numpy"):
name = getattr(type(obj), "__name__", "")
if name.endswith("ndarray"):
return obj, cls._array_equal, obj.size
else:
numpy = sys.modules.get("numpy")
if numpy is not None and isinstance(obj, numpy.number):
return obj, cls._equal, 1
elif module.startswith(("modin", "pandas")):
name = getattr(type(obj), "__name__", "")
if name.endswith(("DataFrame", "Series")):
return obj, cls._dataframe_equal, obj.size
elif module.startswith("ipywidgets"):
ipywidgets = sys.modules.get("ipywidgets")
if (
ipywidgets is not None
and isinstance(obj, ipywidgets.Widget)
and hasattr(obj, "value")
):
return obj.value, cls._equal, 1
return cls.NULL, None, -1
def make_memoize_comparable(
self, seen_ids: Optional[Set[int]] = None
) -> Tuple[Any, Optional[Callable[[Any, Any], bool]]]:
if seen_ids is None:
seen_ids = set()
if isinstance(
self.stmt_node, (ast.ClassDef, ast.FunctionDef, ast.AsyncFunctionDef)
):
comps = [astunparse.unparse(self.stmt_node)]
for sym in sorted(self.parents.keys()):
par_comp, eq = sym.make_memoize_comparable(seen_ids=seen_ids)
if par_comp is self.NULL or eq is not self._equal:
return self.NULL, None
comps.append(par_comp)
return comps, self._equal
obj, eq, size = self.make_memoize_comparable_for_obj(self.obj, seen_ids)
if size > self._MAX_MEMOIZE_COMPARABLE_SIZE:
return self.NULL, None
else:
return obj, eq
def get_symbols_for_references(
symbol_refs: Iterable[SymbolRef],
scope: "Scope",
) -> Tuple[Set["Symbol"], Set["Symbol"]]:
syms: Set[Symbol] = set()
called_syms: Set["Symbol"] = set()
for symbol_ref in symbol_refs:
for resolved in symbol_ref.gen_resolved_symbols(
scope, only_yield_final_symbol=True
):
if resolved.is_called:
called_syms.add(resolved.sym)
else:
syms.add(resolved.sym)
return syms, called_syms | null |
15,125 | import ast
import builtins
import itertools
import logging
import sys
from contextlib import contextmanager
from typing import (
TYPE_CHECKING,
Dict,
Iterable,
List,
Optional,
Set,
Tuple,
Union,
cast,
)
from ipyflow.analysis.mixins import (
SaveOffAttributesMixin,
SkipUnboundArgsMixin,
VisitListsMixin,
)
from ipyflow.analysis.resolved_symbols import ResolvedSymbol
from ipyflow.analysis.symbol_ref import Atom, LiveSymbolRef, SymbolRef, visit_stack
from ipyflow.config import FlowDirection
from ipyflow.data_model.timestamp import Timestamp
from ipyflow.singletons import flow, tracer
def compute_live_dead_symbol_refs(
code: Union[ast.AST, List[ast.stmt], str],
scope: "Scope" = None,
init_killed: Optional[Set[str]] = None,
include_killed_live: bool = False,
) -> Tuple[Set[LiveSymbolRef], Set[SymbolRef], Set[SymbolRef]]:
if init_killed is None:
init_killed = set()
if isinstance(code, str):
code = ast.parse(code)
elif isinstance(code, list):
code = ast.Module(code)
return ComputeLiveSymbolRefs(
scope=scope, init_killed=init_killed, include_killed_live=include_killed_live
)(code)
def stmt_contains_cascading_reactive_rval(stmt: ast.stmt) -> bool:
live_refs, *_ = compute_live_dead_symbol_refs(stmt)
for ref in live_refs:
for atom in ref.ref.chain:
if atom.is_cascading_reactive:
return True
return False | null |
15,126 | import ast
import logging
from typing import (
TYPE_CHECKING,
Any,
Generator,
Iterable,
List,
Optional,
Sequence,
Tuple,
Union,
cast,
)
from ipyflow.analysis.resolved_symbols import ResolvedSymbol
from ipyflow.data_model.timestamp import Timestamp
from ipyflow.singletons import flow, tracer
from ipyflow.types import SubscriptIndices, SupportedIndexType
from ipyflow.utils import CommonEqualityMixin
from ipyflow.utils.ast_utils import AstRange, subscript_to_slice
SupportedIndexType = Union[
str, int, bool, None, Tuple[Union[str, int, bool, None], ...]
]
The provided code snippet includes necessary dependencies for implementing the `resolve_slice_to_constant` function. Write a Python function `def resolve_slice_to_constant( node: ast.Subscript, ) -> Optional[Union[SupportedIndexType, ast.Name]]` to solve the following problem:
Version-independent way to get at the slice data
Here is the function:
def resolve_slice_to_constant(
node: ast.Subscript,
) -> Optional[Union[SupportedIndexType, ast.Name]]:
"""
Version-independent way to get at the slice data
"""
slc = subscript_to_slice(node)
if isinstance(slc, ast.Tuple):
elts: Any = []
for v in slc.elts:
if isinstance(v, ast.Num):
elts.append(v.n)
elif isinstance(v, ast.Str):
elts.append(v.s)
elif isinstance(v, ast.Constant):
elts.append(v.value)
else:
return None
return tuple(elts) # type: ignore
negate = False
if isinstance(slc, ast.UnaryOp) and isinstance(slc.op, ast.USub):
negate = True
slc = slc.operand
if isinstance(slc, ast.Name):
return slc
if not isinstance(slc, (ast.Constant, ast.Str, ast.Num)):
return None
if isinstance(slc, ast.Constant):
slc = slc.value
elif isinstance(slc, ast.Num): # pragma: no cover
slc = slc.n # type: ignore
if not isinstance(slc, int):
return None
elif isinstance(slc, ast.Str): # pragma: no cover
slc = slc.s # type: ignore
else:
return None
if isinstance(slc, int) and negate:
slc = -slc # type: ignore
return slc # type: ignore | Version-independent way to get at the slice data |
15,127 | import ast
import logging
from typing import List, Sequence, Tuple, Union
from ipyflow.analysis.mixins import (
SaveOffAttributesMixin,
SkipUnboundArgsMixin,
VisitListsMixin,
)
class GetSymbolEdges(
SaveOffAttributesMixin, SkipUnboundArgsMixin, VisitListsMixin, ast.NodeVisitor
):
def __init__(self) -> None:
self.edges: List[Tuple[Union[str, ast.AST], ast.AST]] = []
def __call__(self, node: ast.AST) -> List[Tuple[Union[str, ast.AST], ast.AST]]:
self.visit(node)
# need to reverse in order to handle nested edges first,
# since these need to have symbols in place for e.g. nested NamedExpr's
self.edges.reverse()
return self.edges
def visit_expr(self, node):
# python <= 3.7 doesn't support isinstance(obj, None)
if hasattr(ast, "NamedExpr") and isinstance(node, getattr(ast, "NamedExpr")):
self.visit_NamedExpr(node)
else:
super().generic_visit(node)
def visit_NamedExpr(self, node):
self.edges.append((node.target, node.value))
self.visit(node.value)
def generic_visit(self, node: Union[ast.AST, Sequence[ast.AST]]):
# The purpose of this is to make sure we call our visit_expr method if we see an expr
if node is None:
return
elif isinstance(node, ast.expr):
self.visit_expr(node)
else:
super().generic_visit(node)
def visit_AugAssign_or_AnnAssign(self, node):
self.edges.append((node.target, node.value))
self.visit(node.value)
def visit_AnnAssign(self, node):
self.visit_AugAssign_or_AnnAssign(node)
def visit_AugAssign(self, node):
self.visit_AugAssign_or_AnnAssign(node)
def visit_For(self, node):
# skip body -- will have dummy since this visitor works line-by-line
self.edges.append((node.target, node.iter))
self.visit(node.iter)
def visit_If(self, node):
# skip body here too
self.visit(node.test)
def visit_FunctionDef_or_AsyncFunctionDef(self, node):
self.edges.append((node.name, node))
self.visit(node.args)
self.visit(node.decorator_list)
def visit_FunctionDef(self, node):
self.visit_FunctionDef_or_AsyncFunctionDef(node)
def visit_AsyncFunctionDef(self, node):
self.visit_FunctionDef_or_AsyncFunctionDef(node)
def visit_ClassDef(self, node):
self.edges.append((node.name, node))
self.visit(node.bases)
self.visit(node.decorator_list)
def visit_With(self, node):
# skip body
self.visit(node.items)
def visit_withitem(self, node):
aliases = node.optional_vars
if aliases is not None:
# TODO: ideally we should unpack from the namespace
if isinstance(aliases, list):
for alias in aliases:
self.edges.append((alias, node.context_expr))
else:
self.edges.append((aliases, node.context_expr))
def visit_ExceptHandler(self, node: ast.ExceptHandler):
# TODO: this needs test coverage I think
if node.name is not None and node.type is not None:
self.edges.append((node.name, node.type))
def visit_Import(self, node: ast.Import):
self.visit_Import_or_ImportFrom(node)
def visit_ImportFrom(self, node: ast.ImportFrom):
self.visit_Import_or_ImportFrom(node)
def visit_Import_or_ImportFrom(self, node: Union[ast.Import, ast.ImportFrom]):
for name in node.names:
if name.asname is None:
self.edges.append((name.name, name))
else:
self.edges.append((name.asname, name))
def get_symbol_edges(
node: Union[str, ast.AST]
) -> List[Tuple[Union[str, ast.AST], ast.AST]]:
if isinstance(node, str):
node = ast.parse(node).body[0]
return GetSymbolEdges()(node) | null |
15,128 | import ast
import logging
from collections import defaultdict
from typing import Any, Dict, Iterable, List, NamedTuple, Optional, Set, Tuple
from ipyflow.config import ExecutionSchedule, FlowDirection
from ipyflow.data_model.cell import Cell, CheckerResult, cells
from ipyflow.data_model.symbol import Symbol
from ipyflow.singletons import flow
from ipyflow.slicing.context import SlicingContext, slicing_ctx_var
from ipyflow.types import IdType
def _make_range_from_node(node: ast.AST) -> Dict[str, Any]:
return {
"start": {
"line": node.lineno - 1,
"character": node.col_offset,
},
"end": {
"line": getattr(node, "end_lineno", 0) - 1,
"character": getattr(node, "end_col_offset", 0) - 1,
},
} | null |
15,129 | import re
from threading import Timer
from typing import Callable
def cleanup_discard(d, key, val):
s = d.get(key, set())
s.discard(val)
if len(s) == 0:
d.pop(key, None) | null |
15,130 | import re
from threading import Timer
from typing import Callable
def cleanup_pop(d, key, val):
d2 = d.get(key, {})
d2.pop(val, None)
if len(d2) == 0:
d.pop(key, None) | null |
15,131 | import re
from threading import Timer
from typing import Callable
The provided code snippet includes necessary dependencies for implementing the `debounce` function. Write a Python function `def debounce(wait: float) -> Callable[[Callable[..., None]], Callable[..., bool]]` to solve the following problem:
Decorator that will postpone a functions execution until after wait seconds have elapsed since the last time it was invoked.
Here is the function:
def debounce(wait: float) -> Callable[[Callable[..., None]], Callable[..., bool]]:
"""Decorator that will postpone a functions
execution until after wait seconds
have elapsed since the last time it was invoked."""
def decorator(fn: Callable[..., None]) -> Callable[..., bool]:
def debounced(*args, **kwargs) -> bool:
def call_it():
fn(*args, **kwargs)
try:
did_start_new = debounced.t.finished.is_set() # type: ignore
debounced.t.cancel() # type: ignore
except AttributeError:
did_start_new = True
debounced.t = Timer(wait, call_it) # type: ignore
debounced.t.start() # type: ignore
return did_start_new
return debounced
return decorator | Decorator that will postpone a functions execution until after wait seconds have elapsed since the last time it was invoked. |
15,132 | import ast
import logging
import sys
from contextlib import contextmanager
from io import StringIO
from typing import Any, Callable, Generator, List, Optional, TextIO
from IPython.core.displayhook import DisplayHook
from IPython.core.displaypub import CapturingDisplayPublisher, DisplayPublisher
from IPython.core.interactiveshell import ExecutionResult, InteractiveShell
from IPython.utils.capture import CapturedIO
from traitlets import MetaHasTraits
from ipyflow.singletons import shell
_IPY = _IpythonState()
def save_number_of_currently_executing_cell() -> Generator[None, None, None]:
with _IPY.save_number_of_currently_executing_cell():
yield | null |
15,133 | import ast
import logging
import sys
from contextlib import contextmanager
from io import StringIO
from typing import Any, Callable, Generator, List, Optional, TextIO
from IPython.core.displayhook import DisplayHook
from IPython.core.displaypub import CapturingDisplayPublisher, DisplayPublisher
from IPython.core.interactiveshell import ExecutionResult, InteractiveShell
from IPython.utils.capture import CapturedIO
from traitlets import MetaHasTraits
from ipyflow.singletons import shell
_IPY = _IpythonState()
def ast_transformer_context(transformers) -> Generator[None, None, None]:
with _IPY.ast_transformer_context(transformers):
yield | null |
15,134 | import ast
import logging
import sys
from contextlib import contextmanager
from io import StringIO
from typing import Any, Callable, Generator, List, Optional, TextIO
from IPython.core.displayhook import DisplayHook
from IPython.core.displaypub import CapturingDisplayPublisher, DisplayPublisher
from IPython.core.interactiveshell import ExecutionResult, InteractiveShell
from IPython.utils.capture import CapturedIO
from traitlets import MetaHasTraits
from ipyflow.singletons import shell
_IPY = _IpythonState()
def input_transformer_context(transformers) -> Generator[None, None, None]:
with _IPY.input_transformer_context(transformers):
yield | null |
15,135 | import ast
import logging
import sys
from contextlib import contextmanager
from io import StringIO
from typing import Any, Callable, Generator, List, Optional, TextIO
from IPython.core.displayhook import DisplayHook
from IPython.core.displaypub import CapturingDisplayPublisher, DisplayPublisher
from IPython.core.interactiveshell import ExecutionResult, InteractiveShell
from IPython.utils.capture import CapturedIO
from traitlets import MetaHasTraits
from ipyflow.singletons import shell
_IPY = _IpythonState()
def cell_counter() -> int:
if _IPY.cell_counter is None:
raise ValueError("should be inside context manager here")
return _IPY.cell_counter | null |
15,136 | import ast
import logging
import sys
from contextlib import contextmanager
from io import StringIO
from typing import Any, Callable, Generator, List, Optional, TextIO
from IPython.core.displayhook import DisplayHook
from IPython.core.displaypub import CapturingDisplayPublisher, DisplayPublisher
from IPython.core.interactiveshell import ExecutionResult, InteractiveShell
from IPython.utils.capture import CapturedIO
from traitlets import MetaHasTraits
from ipyflow.singletons import shell
def shell() -> "IPyflowShellInstance":
assert IPyflowShell.initialized()
return IPyflowShell.instance() # type: ignore
def run_cell(cell, **kwargs) -> ExecutionResult:
return shell().run_cell(
cell,
store_history=kwargs.pop("store_history", True),
silent=kwargs.pop("silent", False),
) | null |
15,137 | import ast
import logging
import sys
from contextlib import contextmanager
from io import StringIO
from typing import Any, Callable, Generator, List, Optional, TextIO
from IPython.core.displayhook import DisplayHook
from IPython.core.displaypub import CapturingDisplayPublisher, DisplayPublisher
from IPython.core.interactiveshell import ExecutionResult, InteractiveShell
from IPython.utils.capture import CapturedIO
from traitlets import MetaHasTraits
from ipyflow.singletons import shell
_PURPLE = "\033[95m"
_RESET = "\033[0m"
print_ = print
def print_purple(text: str, **kwargs) -> None:
# The ANSI escape code for purple text is \033[95m
# The \033 is the escape code, and [95m specifies the color (purple)
# Reset code is \033[0m that resets the style to default
print_(f"{_PURPLE}{text}{_RESET}", **kwargs) | null |
15,138 | import ast
import logging
import sys
from contextlib import contextmanager
from io import StringIO
from typing import Any, Callable, Generator, List, Optional, TextIO
from IPython.core.displayhook import DisplayHook
from IPython.core.displaypub import CapturingDisplayPublisher, DisplayPublisher
from IPython.core.interactiveshell import ExecutionResult, InteractiveShell
from IPython.utils.capture import CapturedIO
from traitlets import MetaHasTraits
from ipyflow.singletons import shell
_RED = "\033[91m"
_RESET = "\033[0m"
print_ = print
def print_red(text: str, **kwargs) -> None:
print_(f"{_RED}{text}{_RESET}", **kwargs) | null |
15,139 | import ast
import logging
import sys
from contextlib import contextmanager
from io import StringIO
from typing import Any, Callable, Generator, List, Optional, TextIO
from IPython.core.displayhook import DisplayHook
from IPython.core.displaypub import CapturingDisplayPublisher, DisplayPublisher
from IPython.core.interactiveshell import ExecutionResult, InteractiveShell
from IPython.utils.capture import CapturedIO
from traitlets import MetaHasTraits
from ipyflow.singletons import shell
def make_mro_inserter_metaclass(old_class, new_class):
class MetaMroInserter(MetaHasTraits):
def mro(cls):
ret = []
for clazz in super().mro():
if clazz is old_class:
ret.append(new_class)
ret.append(clazz)
return ret
return MetaMroInserter | null |
15,140 | import configparser
import errno
import json
import os
import re
import subprocess
import sys
from typing import Callable, Dict
import functools
HANDLERS: Dict[str, Dict[str, Callable]] = {}
The provided code snippet includes necessary dependencies for implementing the `register_vcs_handler` function. Write a Python function `def register_vcs_handler(vcs, method)` to solve the following problem:
Create decorator to mark a method as the handler of a VCS.
Here is the function:
def register_vcs_handler(vcs, method): # decorator
"""Create decorator to mark a method as the handler of a VCS."""
def decorate(f):
"""Store f in HANDLERS[vcs][method]."""
HANDLERS.setdefault(vcs, {})[method] = f
return f
return decorate | Create decorator to mark a method as the handler of a VCS. |
15,141 | import configparser
import errno
import json
import os
import re
import subprocess
import sys
from typing import Callable, Dict
import functools
The provided code snippet includes necessary dependencies for implementing the `git_get_keywords` function. Write a Python function `def git_get_keywords(versionfile_abs)` to solve the following problem:
Extract version information from the given file.
Here is the function:
def git_get_keywords(versionfile_abs):
"""Extract version information from the given file."""
# the code embedded in _version.py can just fetch the value of these
# keywords. When used from setup.py, we don't want to import _version.py,
# so we do it with a regexp instead. This function is not used from
# _version.py.
keywords = {}
try:
with open(versionfile_abs, "r") as fobj:
for line in fobj:
if line.strip().startswith("git_refnames ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["refnames"] = mo.group(1)
if line.strip().startswith("git_full ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["full"] = mo.group(1)
if line.strip().startswith("git_date ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["date"] = mo.group(1)
except OSError:
pass
return keywords | Extract version information from the given file. |
15,142 | import configparser
import errno
import json
import os
import re
import subprocess
import sys
from typing import Callable, Dict
import functools
class NotThisMethod(Exception):
"""Exception raised if a method is not valid for the current scenario."""
The provided code snippet includes necessary dependencies for implementing the `git_versions_from_keywords` function. Write a Python function `def git_versions_from_keywords(keywords, tag_prefix, verbose)` to solve the following problem:
Get version information from git keywords.
Here is the function:
def git_versions_from_keywords(keywords, tag_prefix, verbose):
"""Get version information from git keywords."""
if "refnames" not in keywords:
raise NotThisMethod("Short version file found")
date = keywords.get("date")
if date is not None:
# Use only the last line. Previous lines may contain GPG signature
# information.
date = date.splitlines()[-1]
# git-2.2.0 added "%cI", which expands to an ISO-8601 -compliant
# datestamp. However we prefer "%ci" (which expands to an "ISO-8601
# -like" string, which we must then edit to make compliant), because
# it's been around since git-1.5.3, and it's too difficult to
# discover which version we're using, or to work around using an
# older one.
date = date.strip().replace(" ", "T", 1).replace(" ", "", 1)
refnames = keywords["refnames"].strip()
if refnames.startswith("$Format"):
if verbose:
print("keywords are unexpanded, not using")
raise NotThisMethod("unexpanded keywords, not a git-archive tarball")
refs = {r.strip() for r in refnames.strip("()").split(",")}
# starting in git-1.8.3, tags are listed as "tag: foo-1.0" instead of
# just "foo-1.0". If we see a "tag: " prefix, prefer those.
TAG = "tag: "
tags = {r[len(TAG):] for r in refs if r.startswith(TAG)}
if not tags:
# Either we're using git < 1.8.3, or there really are no tags. We use
# a heuristic: assume all version tags have a digit. The old git %d
# expansion behaves like git log --decorate=short and strips out the
# refs/heads/ and refs/tags/ prefixes that would let us distinguish
# between branches and tags. By ignoring refnames without digits, we
# filter out many common branch names like "release" and
# "stabilization", as well as "HEAD" and "master".
tags = {r for r in refs if re.search(r'\d', r)}
if verbose:
print("discarding '%s', no digits" % ",".join(refs - tags))
if verbose:
print("likely tags: %s" % ",".join(sorted(tags)))
for ref in sorted(tags):
# sorting will prefer e.g. "2.0" over "2.0rc1"
if ref.startswith(tag_prefix):
r = ref[len(tag_prefix):]
# Filter out refs that exactly match prefix or that don't start
# with a number once the prefix is stripped (mostly a concern
# when prefix is '')
if not re.match(r'\d', r):
continue
if verbose:
print("picking %s" % r)
return {"version": r,
"full-revisionid": keywords["full"].strip(),
"dirty": False, "error": None,
"date": date}
# no suitable tags, so version is "0+unknown", but full hex is still there
if verbose:
print("no suitable tags, using unknown + full revision id")
return {"version": "0+unknown",
"full-revisionid": keywords["full"].strip(),
"dirty": False, "error": "no suitable tags", "date": None} | Get version information from git keywords. |
15,143 | import configparser
import errno
import json
import os
import re
import subprocess
import sys
from typing import Callable, Dict
import functools
class NotThisMethod(Exception):
"""Exception raised if a method is not valid for the current scenario."""
def run_command(commands, args, cwd=None, verbose=False, hide_stderr=False,
env=None):
"""Call the given command(s)."""
assert isinstance(commands, list)
process = None
popen_kwargs = {}
if sys.platform == "win32":
# This hides the console window if pythonw.exe is used
startupinfo = subprocess.STARTUPINFO()
startupinfo.dwFlags |= subprocess.STARTF_USESHOWWINDOW
popen_kwargs["startupinfo"] = startupinfo
for command in commands:
try:
dispcmd = str([command] + args)
# remember shell=False, so use git.cmd on windows, not just git
process = subprocess.Popen([command] + args, cwd=cwd, env=env,
stdout=subprocess.PIPE,
stderr=(subprocess.PIPE if hide_stderr
else None), **popen_kwargs)
break
except OSError:
e = sys.exc_info()[1]
if e.errno == errno.ENOENT:
continue
if verbose:
print("unable to run %s" % dispcmd)
print(e)
return None, None
else:
if verbose:
print("unable to find command, tried %s" % (commands,))
return None, None
stdout = process.communicate()[0].strip().decode()
if process.returncode != 0:
if verbose:
print("unable to run %s (error)" % dispcmd)
print("stdout was %s" % stdout)
return None, process.returncode
return stdout, process.returncode
The provided code snippet includes necessary dependencies for implementing the `git_pieces_from_vcs` function. Write a Python function `def git_pieces_from_vcs(tag_prefix, root, verbose, runner=run_command)` to solve the following problem:
Get version from 'git describe' in the root of the source tree. This only gets called if the git-archive 'subst' keywords were *not* expanded, and _version.py hasn't already been rewritten with a short version string, meaning we're inside a checked out source tree.
Here is the function:
def git_pieces_from_vcs(tag_prefix, root, verbose, runner=run_command):
"""Get version from 'git describe' in the root of the source tree.
This only gets called if the git-archive 'subst' keywords were *not*
expanded, and _version.py hasn't already been rewritten with a short
version string, meaning we're inside a checked out source tree.
"""
GITS = ["git"]
if sys.platform == "win32":
GITS = ["git.cmd", "git.exe"]
# GIT_DIR can interfere with correct operation of Versioneer.
# It may be intended to be passed to the Versioneer-versioned project,
# but that should not change where we get our version from.
env = os.environ.copy()
env.pop("GIT_DIR", None)
runner = functools.partial(runner, env=env)
_, rc = runner(GITS, ["rev-parse", "--git-dir"], cwd=root,
hide_stderr=True)
if rc != 0:
if verbose:
print("Directory %s not under git control" % root)
raise NotThisMethod("'git rev-parse --git-dir' returned error")
MATCH_ARGS = ["--match", "%s*" % tag_prefix] if tag_prefix else []
# if there is a tag matching tag_prefix, this yields TAG-NUM-gHEX[-dirty]
# if there isn't one, this yields HEX[-dirty] (no NUM)
describe_out, rc = runner(GITS, ["describe", "--tags", "--dirty",
"--always", "--long", *MATCH_ARGS],
cwd=root)
# --long was added in git-1.5.5
if describe_out is None:
raise NotThisMethod("'git describe' failed")
describe_out = describe_out.strip()
full_out, rc = runner(GITS, ["rev-parse", "HEAD"], cwd=root)
if full_out is None:
raise NotThisMethod("'git rev-parse' failed")
full_out = full_out.strip()
pieces = {}
pieces["long"] = full_out
pieces["short"] = full_out[:7] # maybe improved later
pieces["error"] = None
branch_name, rc = runner(GITS, ["rev-parse", "--abbrev-ref", "HEAD"],
cwd=root)
# --abbrev-ref was added in git-1.6.3
if rc != 0 or branch_name is None:
raise NotThisMethod("'git rev-parse --abbrev-ref' returned error")
branch_name = branch_name.strip()
if branch_name == "HEAD":
# If we aren't exactly on a branch, pick a branch which represents
# the current commit. If all else fails, we are on a branchless
# commit.
branches, rc = runner(GITS, ["branch", "--contains"], cwd=root)
# --contains was added in git-1.5.4
if rc != 0 or branches is None:
raise NotThisMethod("'git branch --contains' returned error")
branches = branches.split("\n")
# Remove the first line if we're running detached
if "(" in branches[0]:
branches.pop(0)
# Strip off the leading "* " from the list of branches.
branches = [branch[2:] for branch in branches]
if "master" in branches:
branch_name = "master"
elif not branches:
branch_name = None
else:
# Pick the first branch that is returned. Good or bad.
branch_name = branches[0]
pieces["branch"] = branch_name
# parse describe_out. It will be like TAG-NUM-gHEX[-dirty] or HEX[-dirty]
# TAG might have hyphens.
git_describe = describe_out
# look for -dirty suffix
dirty = git_describe.endswith("-dirty")
pieces["dirty"] = dirty
if dirty:
git_describe = git_describe[:git_describe.rindex("-dirty")]
# now we have TAG-NUM-gHEX or HEX
if "-" in git_describe:
# TAG-NUM-gHEX
mo = re.search(r'^(.+)-(\d+)-g([0-9a-f]+)$', git_describe)
if not mo:
# unparsable. Maybe git-describe is misbehaving?
pieces["error"] = ("unable to parse git-describe output: '%s'"
% describe_out)
return pieces
# tag
full_tag = mo.group(1)
if not full_tag.startswith(tag_prefix):
if verbose:
fmt = "tag '%s' doesn't start with prefix '%s'"
print(fmt % (full_tag, tag_prefix))
pieces["error"] = ("tag '%s' doesn't start with prefix '%s'"
% (full_tag, tag_prefix))
return pieces
pieces["closest-tag"] = full_tag[len(tag_prefix):]
# distance: number of commits since tag
pieces["distance"] = int(mo.group(2))
# commit: short hex revision ID
pieces["short"] = mo.group(3)
else:
# HEX: no tags
pieces["closest-tag"] = None
count_out, rc = runner(GITS, ["rev-list", "HEAD", "--count"], cwd=root)
pieces["distance"] = int(count_out) # total number of commits
# commit date: see ISO-8601 comment in git_versions_from_keywords()
date = runner(GITS, ["show", "-s", "--format=%ci", "HEAD"], cwd=root)[0].strip()
# Use only the last line. Previous lines may contain GPG signature
# information.
date = date.splitlines()[-1]
pieces["date"] = date.strip().replace(" ", "T", 1).replace(" ", "", 1)
return pieces | Get version from 'git describe' in the root of the source tree. This only gets called if the git-archive 'subst' keywords were *not* expanded, and _version.py hasn't already been rewritten with a short version string, meaning we're inside a checked out source tree. |
15,144 | import configparser
import errno
import json
import os
import re
import subprocess
import sys
from typing import Callable, Dict
import functools
def get_versions(verbose=False):
"""Get the project version from whatever source is available.
Returns dict with two keys: 'version' and 'full'.
"""
if "versioneer" in sys.modules:
# see the discussion in cmdclass.py:get_cmdclass()
del sys.modules["versioneer"]
root = get_root()
cfg = get_config_from_root(root)
assert cfg.VCS is not None, "please set [versioneer]VCS= in setup.cfg"
handlers = HANDLERS.get(cfg.VCS)
assert handlers, "unrecognized VCS '%s'" % cfg.VCS
verbose = verbose or cfg.verbose
assert cfg.versionfile_source is not None, \
"please set versioneer.versionfile_source"
assert cfg.tag_prefix is not None, "please set versioneer.tag_prefix"
versionfile_abs = os.path.join(root, cfg.versionfile_source)
# extract version from first of: _version.py, VCS command (e.g. 'git
# describe'), parentdir. This is meant to work for developers using a
# source checkout, for users of a tarball created by 'setup.py sdist',
# and for users of a tarball/zipball created by 'git archive' or github's
# download-from-tag feature or the equivalent in other VCSes.
get_keywords_f = handlers.get("get_keywords")
from_keywords_f = handlers.get("keywords")
if get_keywords_f and from_keywords_f:
try:
keywords = get_keywords_f(versionfile_abs)
ver = from_keywords_f(keywords, cfg.tag_prefix, verbose)
if verbose:
print("got version from expanded keyword %s" % ver)
return ver
except NotThisMethod:
pass
try:
ver = versions_from_file(versionfile_abs)
if verbose:
print("got version from file %s %s" % (versionfile_abs, ver))
return ver
except NotThisMethod:
pass
from_vcs_f = handlers.get("pieces_from_vcs")
if from_vcs_f:
try:
pieces = from_vcs_f(cfg.tag_prefix, root, verbose)
ver = render(pieces, cfg.style)
if verbose:
print("got version from VCS %s" % ver)
return ver
except NotThisMethod:
pass
try:
if cfg.parentdir_prefix:
ver = versions_from_parentdir(cfg.parentdir_prefix, root, verbose)
if verbose:
print("got version from parentdir %s" % ver)
return ver
except NotThisMethod:
pass
if verbose:
print("unable to compute version")
return {"version": "0+unknown", "full-revisionid": None,
"dirty": None, "error": "unable to compute version",
"date": None}
The provided code snippet includes necessary dependencies for implementing the `get_version` function. Write a Python function `def get_version()` to solve the following problem:
Get the short version string for this project.
Here is the function:
def get_version():
"""Get the short version string for this project."""
return get_versions()["version"] | Get the short version string for this project. |
15,145 | import configparser
import errno
import json
import os
import re
import subprocess
import sys
from typing import Callable, Dict
import functools
def get_root():
"""Get the project root directory.
We require that all commands are run from the project root, i.e. the
directory that contains setup.py, setup.cfg, and versioneer.py .
"""
root = os.path.realpath(os.path.abspath(os.getcwd()))
setup_py = os.path.join(root, "setup.py")
versioneer_py = os.path.join(root, "versioneer.py")
if not (os.path.exists(setup_py) or os.path.exists(versioneer_py)):
# allow 'python path/to/setup.py COMMAND'
root = os.path.dirname(os.path.realpath(os.path.abspath(sys.argv[0])))
setup_py = os.path.join(root, "setup.py")
versioneer_py = os.path.join(root, "versioneer.py")
if not (os.path.exists(setup_py) or os.path.exists(versioneer_py)):
err = ("Versioneer was unable to run the project root directory. "
"Versioneer requires setup.py to be executed from "
"its immediate directory (like 'python setup.py COMMAND'), "
"or in a way that lets it use sys.argv[0] to find the root "
"(like 'python path/to/setup.py COMMAND').")
raise VersioneerBadRootError(err)
try:
# Certain runtime workflows (setup.py install/develop in a setuptools
# tree) execute all dependencies in a single python process, so
# "versioneer" may be imported multiple times, and python's shared
# module-import table will cache the first one. So we can't use
# os.path.dirname(__file__), as that will find whichever
# versioneer.py was first imported, even in later projects.
my_path = os.path.realpath(os.path.abspath(__file__))
me_dir = os.path.normcase(os.path.splitext(my_path)[0])
vsr_dir = os.path.normcase(os.path.splitext(versioneer_py)[0])
if me_dir != vsr_dir:
print("Warning: build in %s is using versioneer.py from %s"
% (os.path.dirname(my_path), versioneer_py))
except NameError:
pass
return root
def get_config_from_root(root):
"""Read the project setup.cfg file to determine Versioneer config."""
# This might raise OSError (if setup.cfg is missing), or
# configparser.NoSectionError (if it lacks a [versioneer] section), or
# configparser.NoOptionError (if it lacks "VCS="). See the docstring at
# the top of versioneer.py for instructions on writing your setup.cfg .
setup_cfg = os.path.join(root, "setup.cfg")
parser = configparser.ConfigParser()
with open(setup_cfg, "r") as cfg_file:
parser.read_file(cfg_file)
VCS = parser.get("versioneer", "VCS") # mandatory
# Dict-like interface for non-mandatory entries
section = parser["versioneer"]
cfg = VersioneerConfig()
cfg.VCS = VCS
cfg.style = section.get("style", "")
cfg.versionfile_source = section.get("versionfile_source")
cfg.versionfile_build = section.get("versionfile_build")
cfg.tag_prefix = section.get("tag_prefix")
if cfg.tag_prefix in ("''", '""'):
cfg.tag_prefix = ""
cfg.parentdir_prefix = section.get("parentdir_prefix")
cfg.verbose = section.get("verbose")
return cfg
LONG_VERSION_PY: Dict[str, str] = {}
LONG_VERSION_PY['git'] = r'''
# This file helps to compute a version number in source trees obtained from
# git-archive tarball (such as those provided by githubs download-from-tag
# feature). Distribution tarballs (built by setup.py sdist) and build
# directories (produced by setup.py build) will contain a much shorter file
# that just contains the computed version number.
# This file is released into the public domain. Generated by
# versioneer-0.22 (https://github.com/python-versioneer/python-versioneer)
"""Git implementation of _version.py."""
import errno
import os
import re
import subprocess
import sys
from typing import Callable, Dict
import functools
def get_keywords():
"""Get the keywords needed to look up the version information."""
# these strings will be replaced by git during git-archive.
# setup.py/versioneer.py will grep for the variable names, so they must
# each be defined on a line of their own. _version.py will just call
# get_keywords().
git_refnames = "%(DOLLAR)sFormat:%%d%(DOLLAR)s"
git_full = "%(DOLLAR)sFormat:%%H%(DOLLAR)s"
git_date = "%(DOLLAR)sFormat:%%ci%(DOLLAR)s"
keywords = {"refnames": git_refnames, "full": git_full, "date": git_date}
return keywords
class VersioneerConfig:
"""Container for Versioneer configuration parameters."""
def get_config():
"""Create, populate and return the VersioneerConfig() object."""
# these strings are filled in when 'setup.py versioneer' creates
# _version.py
cfg = VersioneerConfig()
cfg.VCS = "git"
cfg.style = "%(STYLE)s"
cfg.tag_prefix = "%(TAG_PREFIX)s"
cfg.parentdir_prefix = "%(PARENTDIR_PREFIX)s"
cfg.versionfile_source = "%(VERSIONFILE_SOURCE)s"
cfg.verbose = False
return cfg
class NotThisMethod(Exception):
"""Exception raised if a method is not valid for the current scenario."""
LONG_VERSION_PY: Dict[str, str] = {}
HANDLERS: Dict[str, Dict[str, Callable]] = {}
def register_vcs_handler(vcs, method): # decorator
"""Create decorator to mark a method as the handler of a VCS."""
def decorate(f):
"""Store f in HANDLERS[vcs][method]."""
if vcs not in HANDLERS:
HANDLERS[vcs] = {}
HANDLERS[vcs][method] = f
return f
return decorate
def run_command(commands, args, cwd=None, verbose=False, hide_stderr=False,
env=None):
"""Call the given command(s)."""
assert isinstance(commands, list)
process = None
popen_kwargs = {}
if sys.platform == "win32":
# This hides the console window if pythonw.exe is used
startupinfo = subprocess.STARTUPINFO()
startupinfo.dwFlags |= subprocess.STARTF_USESHOWWINDOW
popen_kwargs["startupinfo"] = startupinfo
for command in commands:
try:
dispcmd = str([command] + args)
# remember shell=False, so use git.cmd on windows, not just git
process = subprocess.Popen([command] + args, cwd=cwd, env=env,
stdout=subprocess.PIPE,
stderr=(subprocess.PIPE if hide_stderr
else None), **popen_kwargs)
break
except OSError:
e = sys.exc_info()[1]
if e.errno == errno.ENOENT:
continue
if verbose:
print("unable to run %%s" %% dispcmd)
print(e)
return None, None
else:
if verbose:
print("unable to find command, tried %%s" %% (commands,))
return None, None
stdout = process.communicate()[0].strip().decode()
if process.returncode != 0:
if verbose:
print("unable to run %%s (error)" %% dispcmd)
print("stdout was %%s" %% stdout)
return None, process.returncode
return stdout, process.returncode
def versions_from_parentdir(parentdir_prefix, root, verbose):
"""Try to determine the version from the parent directory name.
Source tarballs conventionally unpack into a directory that includes both
the project name and a version string. We will also support searching up
two directory levels for an appropriately named parent directory
"""
rootdirs = []
for _ in range(3):
for prefix in [parentdir_prefix, ""]:
prefix = prefix.replace("-", "_")
for dirname in [os.path.basename(root)] + os.listdir(root):
dirname = dirname.replace("-", "_")
if not dirname.startswith(prefix):
continue
components = dirname[len(prefix):].split(".")
components = [
comp for comp in components
if all(c.isdigit() for c in comp)
]
if len(components) <= 1:
continue
return {"version": ".".join(components),
"full-revisionid": None,
"dirty": False, "error": None, "date": None}
rootdirs.append(root)
root = os.path.dirname(root) # up a level
if verbose:
print("Tried directories %%s but none started with prefix %%s" %%
(str(rootdirs), parentdir_prefix))
raise NotThisMethod("rootdir doesn't start with parentdir_prefix")
def git_get_keywords(versionfile_abs):
"""Extract version information from the given file."""
# the code embedded in _version.py can just fetch the value of these
# keywords. When used from setup.py, we don't want to import _version.py,
# so we do it with a regexp instead. This function is not used from
# _version.py.
keywords = {}
try:
with open(versionfile_abs, "r") as fobj:
for line in fobj:
if line.strip().startswith("git_refnames ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["refnames"] = mo.group(1)
if line.strip().startswith("git_full ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["full"] = mo.group(1)
if line.strip().startswith("git_date ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["date"] = mo.group(1)
except OSError:
pass
return keywords
def git_versions_from_keywords(keywords, tag_prefix, verbose):
"""Get version information from git keywords."""
if "refnames" not in keywords:
raise NotThisMethod("Short version file found")
date = keywords.get("date")
if date is not None:
# Use only the last line. Previous lines may contain GPG signature
# information.
date = date.splitlines()[-1]
# git-2.2.0 added "%%cI", which expands to an ISO-8601 -compliant
# datestamp. However we prefer "%%ci" (which expands to an "ISO-8601
# -like" string, which we must then edit to make compliant), because
# it's been around since git-1.5.3, and it's too difficult to
# discover which version we're using, or to work around using an
# older one.
date = date.strip().replace(" ", "T", 1).replace(" ", "", 1)
refnames = keywords["refnames"].strip()
if refnames.startswith("$Format"):
if verbose:
print("keywords are unexpanded, not using")
raise NotThisMethod("unexpanded keywords, not a git-archive tarball")
refs = {r.strip() for r in refnames.strip("()").split(",")}
# starting in git-1.8.3, tags are listed as "tag: foo-1.0" instead of
# just "foo-1.0". If we see a "tag: " prefix, prefer those.
TAG = "tag: "
tags = {r[len(TAG):] for r in refs if r.startswith(TAG)}
if not tags:
# Either we're using git < 1.8.3, or there really are no tags. We use
# a heuristic: assume all version tags have a digit. The old git %%d
# expansion behaves like git log --decorate=short and strips out the
# refs/heads/ and refs/tags/ prefixes that would let us distinguish
# between branches and tags. By ignoring refnames without digits, we
# filter out many common branch names like "release" and
# "stabilization", as well as "HEAD" and "master".
tags = {r for r in refs if re.search(r'\d', r)}
if verbose:
print("discarding '%%s', no digits" %% ",".join(refs - tags))
if verbose:
print("likely tags: %%s" %% ",".join(sorted(tags)))
for ref in sorted(tags):
# sorting will prefer e.g. "2.0" over "2.0rc1"
if ref.startswith(tag_prefix):
r = ref[len(tag_prefix):]
# Filter out refs that exactly match prefix or that don't start
# with a number once the prefix is stripped (mostly a concern
# when prefix is '')
if not re.match(r'\d', r):
continue
if verbose:
print("picking %%s" %% r)
return {"version": r,
"full-revisionid": keywords["full"].strip(),
"dirty": False, "error": None,
"date": date}
# no suitable tags, so version is "0+unknown", but full hex is still there
if verbose:
print("no suitable tags, using unknown + full revision id")
return {"version": "0+unknown",
"full-revisionid": keywords["full"].strip(),
"dirty": False, "error": "no suitable tags", "date": None}
def git_pieces_from_vcs(tag_prefix, root, verbose, runner=run_command):
"""Get version from 'git describe' in the root of the source tree.
This only gets called if the git-archive 'subst' keywords were *not*
expanded, and _version.py hasn't already been rewritten with a short
version string, meaning we're inside a checked out source tree.
"""
GITS = ["git"]
if sys.platform == "win32":
GITS = ["git.cmd", "git.exe"]
# GIT_DIR can interfere with correct operation of Versioneer.
# It may be intended to be passed to the Versioneer-versioned project,
# but that should not change where we get our version from.
env = os.environ.copy()
env.pop("GIT_DIR", None)
runner = functools.partial(runner, env=env)
_, rc = runner(GITS, ["rev-parse", "--git-dir"], cwd=root,
hide_stderr=True)
if rc != 0:
if verbose:
print("Directory %%s not under git control" %% root)
raise NotThisMethod("'git rev-parse --git-dir' returned error")
MATCH_ARGS = ["--match", "%%s*" %% tag_prefix] if tag_prefix else []
# if there is a tag matching tag_prefix, this yields TAG-NUM-gHEX[-dirty]
# if there isn't one, this yields HEX[-dirty] (no NUM)
describe_out, rc = runner(GITS, ["describe", "--tags", "--dirty",
"--always", "--long", *MATCH_ARGS],
cwd=root)
# --long was added in git-1.5.5
if describe_out is None:
raise NotThisMethod("'git describe' failed")
describe_out = describe_out.strip()
full_out, rc = runner(GITS, ["rev-parse", "HEAD"], cwd=root)
if full_out is None:
raise NotThisMethod("'git rev-parse' failed")
full_out = full_out.strip()
pieces = {}
pieces["long"] = full_out
pieces["short"] = full_out[:7] # maybe improved later
pieces["error"] = None
branch_name, rc = runner(GITS, ["rev-parse", "--abbrev-ref", "HEAD"],
cwd=root)
# --abbrev-ref was added in git-1.6.3
if rc != 0 or branch_name is None:
raise NotThisMethod("'git rev-parse --abbrev-ref' returned error")
branch_name = branch_name.strip()
if branch_name == "HEAD":
# If we aren't exactly on a branch, pick a branch which represents
# the current commit. If all else fails, we are on a branchless
# commit.
branches, rc = runner(GITS, ["branch", "--contains"], cwd=root)
# --contains was added in git-1.5.4
if rc != 0 or branches is None:
raise NotThisMethod("'git branch --contains' returned error")
branches = branches.split("\n")
# Remove the first line if we're running detached
if "(" in branches[0]:
branches.pop(0)
# Strip off the leading "* " from the list of branches.
branches = [branch[2:] for branch in branches]
if "master" in branches:
branch_name = "master"
elif not branches:
branch_name = None
else:
# Pick the first branch that is returned. Good or bad.
branch_name = branches[0]
pieces["branch"] = branch_name
# parse describe_out. It will be like TAG-NUM-gHEX[-dirty] or HEX[-dirty]
# TAG might have hyphens.
git_describe = describe_out
# look for -dirty suffix
dirty = git_describe.endswith("-dirty")
pieces["dirty"] = dirty
if dirty:
git_describe = git_describe[:git_describe.rindex("-dirty")]
# now we have TAG-NUM-gHEX or HEX
if "-" in git_describe:
# TAG-NUM-gHEX
mo = re.search(r'^(.+)-(\d+)-g([0-9a-f]+)$', git_describe)
if not mo:
# unparsable. Maybe git-describe is misbehaving?
pieces["error"] = ("unable to parse git-describe output: '%%s'"
%% describe_out)
return pieces
# tag
full_tag = mo.group(1)
if not full_tag.startswith(tag_prefix):
if verbose:
fmt = "tag '%%s' doesn't start with prefix '%%s'"
print(fmt %% (full_tag, tag_prefix))
pieces["error"] = ("tag '%%s' doesn't start with prefix '%%s'"
%% (full_tag, tag_prefix))
return pieces
pieces["closest-tag"] = full_tag[len(tag_prefix):]
# distance: number of commits since tag
pieces["distance"] = int(mo.group(2))
# commit: short hex revision ID
pieces["short"] = mo.group(3)
else:
# HEX: no tags
pieces["closest-tag"] = None
count_out, rc = runner(GITS, ["rev-list", "HEAD", "--count"], cwd=root)
pieces["distance"] = int(count_out) # total number of commits
# commit date: see ISO-8601 comment in git_versions_from_keywords()
date = runner(GITS, ["show", "-s", "--format=%%ci", "HEAD"], cwd=root)[0].strip()
# Use only the last line. Previous lines may contain GPG signature
# information.
date = date.splitlines()[-1]
pieces["date"] = date.strip().replace(" ", "T", 1).replace(" ", "", 1)
return pieces
def plus_or_dot(pieces):
"""Return a + if we don't already have one, else return a ."""
if "+" in pieces.get("closest-tag", ""):
return "."
return "+"
def render_pep440(pieces):
"""Build up version string, with post-release "local version identifier".
Our goal: TAG[+DISTANCE.gHEX[.dirty]] . Note that if you
get a tagged build and then dirty it, you'll get TAG+0.gHEX.dirty
Exceptions:
1: no tags. git_describe was just HEX. 0+untagged.DISTANCE.gHEX[.dirty]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += plus_or_dot(pieces)
rendered += "%%d.g%%s" %% (pieces["distance"], pieces["short"])
if pieces["dirty"]:
rendered += ".dirty"
else:
# exception #1
rendered = "0+untagged.%%d.g%%s" %% (pieces["distance"],
pieces["short"])
if pieces["dirty"]:
rendered += ".dirty"
return rendered
def render_pep440_branch(pieces):
"""TAG[[.dev0]+DISTANCE.gHEX[.dirty]] .
The ".dev0" means not master branch. Note that .dev0 sorts backwards
(a feature branch will appear "older" than the master branch).
Exceptions:
1: no tags. 0[.dev0]+untagged.DISTANCE.gHEX[.dirty]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
if pieces["branch"] != "master":
rendered += ".dev0"
rendered += plus_or_dot(pieces)
rendered += "%%d.g%%s" %% (pieces["distance"], pieces["short"])
if pieces["dirty"]:
rendered += ".dirty"
else:
# exception #1
rendered = "0"
if pieces["branch"] != "master":
rendered += ".dev0"
rendered += "+untagged.%%d.g%%s" %% (pieces["distance"],
pieces["short"])
if pieces["dirty"]:
rendered += ".dirty"
return rendered
def pep440_split_post(ver):
"""Split pep440 version string at the post-release segment.
Returns the release segments before the post-release and the
post-release version number (or -1 if no post-release segment is present).
"""
vc = str.split(ver, ".post")
return vc[0], int(vc[1] or 0) if len(vc) == 2 else None
def render_pep440_pre(pieces):
"""TAG[.postN.devDISTANCE] -- No -dirty.
Exceptions:
1: no tags. 0.post0.devDISTANCE
"""
if pieces["closest-tag"]:
if pieces["distance"]:
# update the post release segment
tag_version, post_version = pep440_split_post(pieces["closest-tag"])
rendered = tag_version
if post_version is not None:
rendered += ".post%%d.dev%%d" %% (post_version+1, pieces["distance"])
else:
rendered += ".post0.dev%%d" %% (pieces["distance"])
else:
# no commits, use the tag as the version
rendered = pieces["closest-tag"]
else:
# exception #1
rendered = "0.post0.dev%%d" %% pieces["distance"]
return rendered
def render_pep440_post(pieces):
"""TAG[.postDISTANCE[.dev0]+gHEX] .
The ".dev0" means dirty. Note that .dev0 sorts backwards
(a dirty tree will appear "older" than the corresponding clean one),
but you shouldn't be releasing software with -dirty anyways.
Exceptions:
1: no tags. 0.postDISTANCE[.dev0]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += ".post%%d" %% pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
rendered += plus_or_dot(pieces)
rendered += "g%%s" %% pieces["short"]
else:
# exception #1
rendered = "0.post%%d" %% pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
rendered += "+g%%s" %% pieces["short"]
return rendered
def render_pep440_post_branch(pieces):
"""TAG[.postDISTANCE[.dev0]+gHEX[.dirty]] .
The ".dev0" means not master branch.
Exceptions:
1: no tags. 0.postDISTANCE[.dev0]+gHEX[.dirty]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += ".post%%d" %% pieces["distance"]
if pieces["branch"] != "master":
rendered += ".dev0"
rendered += plus_or_dot(pieces)
rendered += "g%%s" %% pieces["short"]
if pieces["dirty"]:
rendered += ".dirty"
else:
# exception #1
rendered = "0.post%%d" %% pieces["distance"]
if pieces["branch"] != "master":
rendered += ".dev0"
rendered += "+g%%s" %% pieces["short"]
if pieces["dirty"]:
rendered += ".dirty"
return rendered
def render_pep440_old(pieces):
"""TAG[.postDISTANCE[.dev0]] .
The ".dev0" means dirty.
Exceptions:
1: no tags. 0.postDISTANCE[.dev0]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += ".post%%d" %% pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
else:
# exception #1
rendered = "0.post%%d" %% pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
return rendered
def render_git_describe(pieces):
"""TAG[-DISTANCE-gHEX][-dirty].
Like 'git describe --tags --dirty --always'.
Exceptions:
1: no tags. HEX[-dirty] (note: no 'g' prefix)
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"]:
rendered += "-%%d-g%%s" %% (pieces["distance"], pieces["short"])
else:
# exception #1
rendered = pieces["short"]
if pieces["dirty"]:
rendered += "-dirty"
return rendered
def render_git_describe_long(pieces):
"""TAG-DISTANCE-gHEX[-dirty].
Like 'git describe --tags --dirty --always -long'.
The distance/hash is unconditional.
Exceptions:
1: no tags. HEX[-dirty] (note: no 'g' prefix)
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
rendered += "-%%d-g%%s" %% (pieces["distance"], pieces["short"])
else:
# exception #1
rendered = pieces["short"]
if pieces["dirty"]:
rendered += "-dirty"
return rendered
def render(pieces, style):
"""Render the given version pieces into the requested style."""
if pieces["error"]:
return {"version": "unknown",
"full-revisionid": pieces.get("long"),
"dirty": None,
"error": pieces["error"],
"date": None}
if not style or style == "default":
style = "pep440" # the default
if style == "pep440":
rendered = render_pep440(pieces)
elif style == "pep440-branch":
rendered = render_pep440_branch(pieces)
elif style == "pep440-pre":
rendered = render_pep440_pre(pieces)
elif style == "pep440-post":
rendered = render_pep440_post(pieces)
elif style == "pep440-post-branch":
rendered = render_pep440_post_branch(pieces)
elif style == "pep440-old":
rendered = render_pep440_old(pieces)
elif style == "git-describe":
rendered = render_git_describe(pieces)
elif style == "git-describe-long":
rendered = render_git_describe_long(pieces)
else:
raise ValueError("unknown style '%%s'" %% style)
return {"version": rendered, "full-revisionid": pieces["long"],
"dirty": pieces["dirty"], "error": None,
"date": pieces.get("date")}
def get_versions():
"""Get version information or return default if unable to do so."""
# I am in _version.py, which lives at ROOT/VERSIONFILE_SOURCE. If we have
# __file__, we can work backwards from there to the root. Some
# py2exe/bbfreeze/non-CPython implementations don't do __file__, in which
# case we can only use expanded keywords.
cfg = get_config()
verbose = cfg.verbose
try:
return git_versions_from_keywords(get_keywords(), cfg.tag_prefix,
verbose)
except NotThisMethod:
pass
try:
root = os.path.realpath(__file__)
# versionfile_source is the relative path from the top of the source
# tree (where the .git directory might live) to this file. Invert
# this to find the root from __file__.
for _ in cfg.versionfile_source.split('/'):
root = os.path.dirname(root)
except NameError:
return {"version": "0+unknown", "full-revisionid": None,
"dirty": None,
"error": "unable to find root of source tree",
"date": None}
try:
pieces = git_pieces_from_vcs(cfg.tag_prefix, root, verbose)
return render(pieces, cfg.style)
except NotThisMethod:
pass
try:
if cfg.parentdir_prefix:
return versions_from_parentdir(cfg.parentdir_prefix, root, verbose)
except NotThisMethod:
pass
return {"version": "0+unknown", "full-revisionid": None,
"dirty": None,
"error": "unable to compute version", "date": None}
'''
def write_to_version_file(filename, versions):
"""Write the given version number to the given _version.py file."""
os.unlink(filename)
contents = json.dumps(versions, sort_keys=True,
indent=1, separators=(",", ": "))
with open(filename, "w") as f:
f.write(SHORT_VERSION_PY % contents)
print("set %s to '%s'" % (filename, versions["version"]))
def get_versions(verbose=False):
"""Get the project version from whatever source is available.
Returns dict with two keys: 'version' and 'full'.
"""
if "versioneer" in sys.modules:
# see the discussion in cmdclass.py:get_cmdclass()
del sys.modules["versioneer"]
root = get_root()
cfg = get_config_from_root(root)
assert cfg.VCS is not None, "please set [versioneer]VCS= in setup.cfg"
handlers = HANDLERS.get(cfg.VCS)
assert handlers, "unrecognized VCS '%s'" % cfg.VCS
verbose = verbose or cfg.verbose
assert cfg.versionfile_source is not None, \
"please set versioneer.versionfile_source"
assert cfg.tag_prefix is not None, "please set versioneer.tag_prefix"
versionfile_abs = os.path.join(root, cfg.versionfile_source)
# extract version from first of: _version.py, VCS command (e.g. 'git
# describe'), parentdir. This is meant to work for developers using a
# source checkout, for users of a tarball created by 'setup.py sdist',
# and for users of a tarball/zipball created by 'git archive' or github's
# download-from-tag feature or the equivalent in other VCSes.
get_keywords_f = handlers.get("get_keywords")
from_keywords_f = handlers.get("keywords")
if get_keywords_f and from_keywords_f:
try:
keywords = get_keywords_f(versionfile_abs)
ver = from_keywords_f(keywords, cfg.tag_prefix, verbose)
if verbose:
print("got version from expanded keyword %s" % ver)
return ver
except NotThisMethod:
pass
try:
ver = versions_from_file(versionfile_abs)
if verbose:
print("got version from file %s %s" % (versionfile_abs, ver))
return ver
except NotThisMethod:
pass
from_vcs_f = handlers.get("pieces_from_vcs")
if from_vcs_f:
try:
pieces = from_vcs_f(cfg.tag_prefix, root, verbose)
ver = render(pieces, cfg.style)
if verbose:
print("got version from VCS %s" % ver)
return ver
except NotThisMethod:
pass
try:
if cfg.parentdir_prefix:
ver = versions_from_parentdir(cfg.parentdir_prefix, root, verbose)
if verbose:
print("got version from parentdir %s" % ver)
return ver
except NotThisMethod:
pass
if verbose:
print("unable to compute version")
return {"version": "0+unknown", "full-revisionid": None,
"dirty": None, "error": "unable to compute version",
"date": None}
The provided code snippet includes necessary dependencies for implementing the `get_cmdclass` function. Write a Python function `def get_cmdclass(cmdclass=None)` to solve the following problem:
Get the custom setuptools/distutils subclasses used by Versioneer. If the package uses a different cmdclass (e.g. one from numpy), it should be provide as an argument.
Here is the function:
def get_cmdclass(cmdclass=None):
"""Get the custom setuptools/distutils subclasses used by Versioneer.
If the package uses a different cmdclass (e.g. one from numpy), it
should be provide as an argument.
"""
if "versioneer" in sys.modules:
del sys.modules["versioneer"]
# this fixes the "python setup.py develop" case (also 'install' and
# 'easy_install .'), in which subdependencies of the main project are
# built (using setup.py bdist_egg) in the same python process. Assume
# a main project A and a dependency B, which use different versions
# of Versioneer. A's setup.py imports A's Versioneer, leaving it in
# sys.modules by the time B's setup.py is executed, causing B to run
# with the wrong versioneer. Setuptools wraps the sub-dep builds in a
# sandbox that restores sys.modules to it's pre-build state, so the
# parent is protected against the child's "import versioneer". By
# removing ourselves from sys.modules here, before the child build
# happens, we protect the child from the parent's versioneer too.
# Also see https://github.com/python-versioneer/python-versioneer/issues/52
cmds = {} if cmdclass is None else cmdclass.copy()
# we add "version" to both distutils and setuptools
try:
from setuptools import Command
except ImportError:
from distutils.core import Command
class cmd_version(Command):
description = "report generated version string"
user_options = []
boolean_options = []
def initialize_options(self):
pass
def finalize_options(self):
pass
def run(self):
vers = get_versions(verbose=True)
print("Version: %s" % vers["version"])
print(" full-revisionid: %s" % vers.get("full-revisionid"))
print(" dirty: %s" % vers.get("dirty"))
print(" date: %s" % vers.get("date"))
if vers["error"]:
print(" error: %s" % vers["error"])
cmds["version"] = cmd_version
# we override "build_py" in both distutils and setuptools
#
# most invocation pathways end up running build_py:
# distutils/build -> build_py
# distutils/install -> distutils/build ->..
# setuptools/bdist_wheel -> distutils/install ->..
# setuptools/bdist_egg -> distutils/install_lib -> build_py
# setuptools/install -> bdist_egg ->..
# setuptools/develop -> ?
# pip install:
# copies source tree to a tempdir before running egg_info/etc
# if .git isn't copied too, 'git describe' will fail
# then does setup.py bdist_wheel, or sometimes setup.py install
# setup.py egg_info -> ?
# we override different "build_py" commands for both environments
if 'build_py' in cmds:
_build_py = cmds['build_py']
elif "setuptools" in sys.modules:
from setuptools.command.build_py import build_py as _build_py
else:
from distutils.command.build_py import build_py as _build_py
class cmd_build_py(_build_py):
def run(self):
root = get_root()
cfg = get_config_from_root(root)
versions = get_versions()
_build_py.run(self)
# now locate _version.py in the new build/ directory and replace
# it with an updated value
if cfg.versionfile_build:
target_versionfile = os.path.join(self.build_lib,
cfg.versionfile_build)
print("UPDATING %s" % target_versionfile)
write_to_version_file(target_versionfile, versions)
cmds["build_py"] = cmd_build_py
if 'build_ext' in cmds:
_build_ext = cmds['build_ext']
elif "setuptools" in sys.modules:
from setuptools.command.build_ext import build_ext as _build_ext
else:
from distutils.command.build_ext import build_ext as _build_ext
class cmd_build_ext(_build_ext):
def run(self):
root = get_root()
cfg = get_config_from_root(root)
versions = get_versions()
_build_ext.run(self)
if self.inplace:
# build_ext --inplace will only build extensions in
# build/lib<..> dir with no _version.py to write to.
# As in place builds will already have a _version.py
# in the module dir, we do not need to write one.
return
# now locate _version.py in the new build/ directory and replace
# it with an updated value
target_versionfile = os.path.join(self.build_lib,
cfg.versionfile_build)
print("UPDATING %s" % target_versionfile)
write_to_version_file(target_versionfile, versions)
cmds["build_ext"] = cmd_build_ext
if "cx_Freeze" in sys.modules: # cx_freeze enabled?
from cx_Freeze.dist import build_exe as _build_exe
# nczeczulin reports that py2exe won't like the pep440-style string
# as FILEVERSION, but it can be used for PRODUCTVERSION, e.g.
# setup(console=[{
# "version": versioneer.get_version().split("+", 1)[0], # FILEVERSION
# "product_version": versioneer.get_version(),
# ...
class cmd_build_exe(_build_exe):
def run(self):
root = get_root()
cfg = get_config_from_root(root)
versions = get_versions()
target_versionfile = cfg.versionfile_source
print("UPDATING %s" % target_versionfile)
write_to_version_file(target_versionfile, versions)
_build_exe.run(self)
os.unlink(target_versionfile)
with open(cfg.versionfile_source, "w") as f:
LONG = LONG_VERSION_PY[cfg.VCS]
f.write(LONG %
{"DOLLAR": "$",
"STYLE": cfg.style,
"TAG_PREFIX": cfg.tag_prefix,
"PARENTDIR_PREFIX": cfg.parentdir_prefix,
"VERSIONFILE_SOURCE": cfg.versionfile_source,
})
cmds["build_exe"] = cmd_build_exe
del cmds["build_py"]
if 'py2exe' in sys.modules: # py2exe enabled?
from py2exe.distutils_buildexe import py2exe as _py2exe
class cmd_py2exe(_py2exe):
def run(self):
root = get_root()
cfg = get_config_from_root(root)
versions = get_versions()
target_versionfile = cfg.versionfile_source
print("UPDATING %s" % target_versionfile)
write_to_version_file(target_versionfile, versions)
_py2exe.run(self)
os.unlink(target_versionfile)
with open(cfg.versionfile_source, "w") as f:
LONG = LONG_VERSION_PY[cfg.VCS]
f.write(LONG %
{"DOLLAR": "$",
"STYLE": cfg.style,
"TAG_PREFIX": cfg.tag_prefix,
"PARENTDIR_PREFIX": cfg.parentdir_prefix,
"VERSIONFILE_SOURCE": cfg.versionfile_source,
})
cmds["py2exe"] = cmd_py2exe
# we override different "sdist" commands for both environments
if 'sdist' in cmds:
_sdist = cmds['sdist']
elif "setuptools" in sys.modules:
from setuptools.command.sdist import sdist as _sdist
else:
from distutils.command.sdist import sdist as _sdist
class cmd_sdist(_sdist):
def run(self):
versions = get_versions()
self._versioneer_generated_versions = versions
# unless we update this, the command will keep using the old
# version
self.distribution.metadata.version = versions["version"]
return _sdist.run(self)
def make_release_tree(self, base_dir, files):
root = get_root()
cfg = get_config_from_root(root)
_sdist.make_release_tree(self, base_dir, files)
# now locate _version.py in the new base_dir directory
# (remembering that it may be a hardlink) and replace it with an
# updated value
target_versionfile = os.path.join(base_dir, cfg.versionfile_source)
print("UPDATING %s" % target_versionfile)
write_to_version_file(target_versionfile,
self._versioneer_generated_versions)
cmds["sdist"] = cmd_sdist
return cmds | Get the custom setuptools/distutils subclasses used by Versioneer. If the package uses a different cmdclass (e.g. one from numpy), it should be provide as an argument. |
15,146 | import configparser
import errno
import json
import os
import re
import subprocess
import sys
from typing import Callable, Dict
import functools
def get_root():
"""Get the project root directory.
We require that all commands are run from the project root, i.e. the
directory that contains setup.py, setup.cfg, and versioneer.py .
"""
root = os.path.realpath(os.path.abspath(os.getcwd()))
setup_py = os.path.join(root, "setup.py")
versioneer_py = os.path.join(root, "versioneer.py")
if not (os.path.exists(setup_py) or os.path.exists(versioneer_py)):
# allow 'python path/to/setup.py COMMAND'
root = os.path.dirname(os.path.realpath(os.path.abspath(sys.argv[0])))
setup_py = os.path.join(root, "setup.py")
versioneer_py = os.path.join(root, "versioneer.py")
if not (os.path.exists(setup_py) or os.path.exists(versioneer_py)):
err = ("Versioneer was unable to run the project root directory. "
"Versioneer requires setup.py to be executed from "
"its immediate directory (like 'python setup.py COMMAND'), "
"or in a way that lets it use sys.argv[0] to find the root "
"(like 'python path/to/setup.py COMMAND').")
raise VersioneerBadRootError(err)
try:
# Certain runtime workflows (setup.py install/develop in a setuptools
# tree) execute all dependencies in a single python process, so
# "versioneer" may be imported multiple times, and python's shared
# module-import table will cache the first one. So we can't use
# os.path.dirname(__file__), as that will find whichever
# versioneer.py was first imported, even in later projects.
my_path = os.path.realpath(os.path.abspath(__file__))
me_dir = os.path.normcase(os.path.splitext(my_path)[0])
vsr_dir = os.path.normcase(os.path.splitext(versioneer_py)[0])
if me_dir != vsr_dir:
print("Warning: build in %s is using versioneer.py from %s"
% (os.path.dirname(my_path), versioneer_py))
except NameError:
pass
return root
def get_config_from_root(root):
"""Read the project setup.cfg file to determine Versioneer config."""
# This might raise OSError (if setup.cfg is missing), or
# configparser.NoSectionError (if it lacks a [versioneer] section), or
# configparser.NoOptionError (if it lacks "VCS="). See the docstring at
# the top of versioneer.py for instructions on writing your setup.cfg .
setup_cfg = os.path.join(root, "setup.cfg")
parser = configparser.ConfigParser()
with open(setup_cfg, "r") as cfg_file:
parser.read_file(cfg_file)
VCS = parser.get("versioneer", "VCS") # mandatory
# Dict-like interface for non-mandatory entries
section = parser["versioneer"]
cfg = VersioneerConfig()
cfg.VCS = VCS
cfg.style = section.get("style", "")
cfg.versionfile_source = section.get("versionfile_source")
cfg.versionfile_build = section.get("versionfile_build")
cfg.tag_prefix = section.get("tag_prefix")
if cfg.tag_prefix in ("''", '""'):
cfg.tag_prefix = ""
cfg.parentdir_prefix = section.get("parentdir_prefix")
cfg.verbose = section.get("verbose")
return cfg
LONG_VERSION_PY: Dict[str, str] = {}
LONG_VERSION_PY['git'] = r'''
# This file helps to compute a version number in source trees obtained from
# git-archive tarball (such as those provided by githubs download-from-tag
# feature). Distribution tarballs (built by setup.py sdist) and build
# directories (produced by setup.py build) will contain a much shorter file
# that just contains the computed version number.
# This file is released into the public domain. Generated by
# versioneer-0.22 (https://github.com/python-versioneer/python-versioneer)
"""Git implementation of _version.py."""
import errno
import os
import re
import subprocess
import sys
from typing import Callable, Dict
import functools
def get_keywords():
"""Get the keywords needed to look up the version information."""
# these strings will be replaced by git during git-archive.
# setup.py/versioneer.py will grep for the variable names, so they must
# each be defined on a line of their own. _version.py will just call
# get_keywords().
git_refnames = "%(DOLLAR)sFormat:%%d%(DOLLAR)s"
git_full = "%(DOLLAR)sFormat:%%H%(DOLLAR)s"
git_date = "%(DOLLAR)sFormat:%%ci%(DOLLAR)s"
keywords = {"refnames": git_refnames, "full": git_full, "date": git_date}
return keywords
class VersioneerConfig:
"""Container for Versioneer configuration parameters."""
def get_config():
"""Create, populate and return the VersioneerConfig() object."""
# these strings are filled in when 'setup.py versioneer' creates
# _version.py
cfg = VersioneerConfig()
cfg.VCS = "git"
cfg.style = "%(STYLE)s"
cfg.tag_prefix = "%(TAG_PREFIX)s"
cfg.parentdir_prefix = "%(PARENTDIR_PREFIX)s"
cfg.versionfile_source = "%(VERSIONFILE_SOURCE)s"
cfg.verbose = False
return cfg
class NotThisMethod(Exception):
"""Exception raised if a method is not valid for the current scenario."""
LONG_VERSION_PY: Dict[str, str] = {}
HANDLERS: Dict[str, Dict[str, Callable]] = {}
def register_vcs_handler(vcs, method): # decorator
"""Create decorator to mark a method as the handler of a VCS."""
def decorate(f):
"""Store f in HANDLERS[vcs][method]."""
if vcs not in HANDLERS:
HANDLERS[vcs] = {}
HANDLERS[vcs][method] = f
return f
return decorate
def run_command(commands, args, cwd=None, verbose=False, hide_stderr=False,
env=None):
"""Call the given command(s)."""
assert isinstance(commands, list)
process = None
popen_kwargs = {}
if sys.platform == "win32":
# This hides the console window if pythonw.exe is used
startupinfo = subprocess.STARTUPINFO()
startupinfo.dwFlags |= subprocess.STARTF_USESHOWWINDOW
popen_kwargs["startupinfo"] = startupinfo
for command in commands:
try:
dispcmd = str([command] + args)
# remember shell=False, so use git.cmd on windows, not just git
process = subprocess.Popen([command] + args, cwd=cwd, env=env,
stdout=subprocess.PIPE,
stderr=(subprocess.PIPE if hide_stderr
else None), **popen_kwargs)
break
except OSError:
e = sys.exc_info()[1]
if e.errno == errno.ENOENT:
continue
if verbose:
print("unable to run %%s" %% dispcmd)
print(e)
return None, None
else:
if verbose:
print("unable to find command, tried %%s" %% (commands,))
return None, None
stdout = process.communicate()[0].strip().decode()
if process.returncode != 0:
if verbose:
print("unable to run %%s (error)" %% dispcmd)
print("stdout was %%s" %% stdout)
return None, process.returncode
return stdout, process.returncode
def versions_from_parentdir(parentdir_prefix, root, verbose):
"""Try to determine the version from the parent directory name.
Source tarballs conventionally unpack into a directory that includes both
the project name and a version string. We will also support searching up
two directory levels for an appropriately named parent directory
"""
rootdirs = []
for _ in range(3):
for prefix in [parentdir_prefix, ""]:
prefix = prefix.replace("-", "_")
for dirname in [os.path.basename(root)] + os.listdir(root):
dirname = dirname.replace("-", "_")
if not dirname.startswith(prefix):
continue
components = dirname[len(prefix):].split(".")
components = [
comp for comp in components
if all(c.isdigit() for c in comp)
]
if len(components) <= 1:
continue
return {"version": ".".join(components),
"full-revisionid": None,
"dirty": False, "error": None, "date": None}
rootdirs.append(root)
root = os.path.dirname(root) # up a level
if verbose:
print("Tried directories %%s but none started with prefix %%s" %%
(str(rootdirs), parentdir_prefix))
raise NotThisMethod("rootdir doesn't start with parentdir_prefix")
def git_get_keywords(versionfile_abs):
"""Extract version information from the given file."""
# the code embedded in _version.py can just fetch the value of these
# keywords. When used from setup.py, we don't want to import _version.py,
# so we do it with a regexp instead. This function is not used from
# _version.py.
keywords = {}
try:
with open(versionfile_abs, "r") as fobj:
for line in fobj:
if line.strip().startswith("git_refnames ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["refnames"] = mo.group(1)
if line.strip().startswith("git_full ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["full"] = mo.group(1)
if line.strip().startswith("git_date ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["date"] = mo.group(1)
except OSError:
pass
return keywords
def git_versions_from_keywords(keywords, tag_prefix, verbose):
"""Get version information from git keywords."""
if "refnames" not in keywords:
raise NotThisMethod("Short version file found")
date = keywords.get("date")
if date is not None:
# Use only the last line. Previous lines may contain GPG signature
# information.
date = date.splitlines()[-1]
# git-2.2.0 added "%%cI", which expands to an ISO-8601 -compliant
# datestamp. However we prefer "%%ci" (which expands to an "ISO-8601
# -like" string, which we must then edit to make compliant), because
# it's been around since git-1.5.3, and it's too difficult to
# discover which version we're using, or to work around using an
# older one.
date = date.strip().replace(" ", "T", 1).replace(" ", "", 1)
refnames = keywords["refnames"].strip()
if refnames.startswith("$Format"):
if verbose:
print("keywords are unexpanded, not using")
raise NotThisMethod("unexpanded keywords, not a git-archive tarball")
refs = {r.strip() for r in refnames.strip("()").split(",")}
# starting in git-1.8.3, tags are listed as "tag: foo-1.0" instead of
# just "foo-1.0". If we see a "tag: " prefix, prefer those.
TAG = "tag: "
tags = {r[len(TAG):] for r in refs if r.startswith(TAG)}
if not tags:
# Either we're using git < 1.8.3, or there really are no tags. We use
# a heuristic: assume all version tags have a digit. The old git %%d
# expansion behaves like git log --decorate=short and strips out the
# refs/heads/ and refs/tags/ prefixes that would let us distinguish
# between branches and tags. By ignoring refnames without digits, we
# filter out many common branch names like "release" and
# "stabilization", as well as "HEAD" and "master".
tags = {r for r in refs if re.search(r'\d', r)}
if verbose:
print("discarding '%%s', no digits" %% ",".join(refs - tags))
if verbose:
print("likely tags: %%s" %% ",".join(sorted(tags)))
for ref in sorted(tags):
# sorting will prefer e.g. "2.0" over "2.0rc1"
if ref.startswith(tag_prefix):
r = ref[len(tag_prefix):]
# Filter out refs that exactly match prefix or that don't start
# with a number once the prefix is stripped (mostly a concern
# when prefix is '')
if not re.match(r'\d', r):
continue
if verbose:
print("picking %%s" %% r)
return {"version": r,
"full-revisionid": keywords["full"].strip(),
"dirty": False, "error": None,
"date": date}
# no suitable tags, so version is "0+unknown", but full hex is still there
if verbose:
print("no suitable tags, using unknown + full revision id")
return {"version": "0+unknown",
"full-revisionid": keywords["full"].strip(),
"dirty": False, "error": "no suitable tags", "date": None}
def git_pieces_from_vcs(tag_prefix, root, verbose, runner=run_command):
"""Get version from 'git describe' in the root of the source tree.
This only gets called if the git-archive 'subst' keywords were *not*
expanded, and _version.py hasn't already been rewritten with a short
version string, meaning we're inside a checked out source tree.
"""
GITS = ["git"]
if sys.platform == "win32":
GITS = ["git.cmd", "git.exe"]
# GIT_DIR can interfere with correct operation of Versioneer.
# It may be intended to be passed to the Versioneer-versioned project,
# but that should not change where we get our version from.
env = os.environ.copy()
env.pop("GIT_DIR", None)
runner = functools.partial(runner, env=env)
_, rc = runner(GITS, ["rev-parse", "--git-dir"], cwd=root,
hide_stderr=True)
if rc != 0:
if verbose:
print("Directory %%s not under git control" %% root)
raise NotThisMethod("'git rev-parse --git-dir' returned error")
MATCH_ARGS = ["--match", "%%s*" %% tag_prefix] if tag_prefix else []
# if there is a tag matching tag_prefix, this yields TAG-NUM-gHEX[-dirty]
# if there isn't one, this yields HEX[-dirty] (no NUM)
describe_out, rc = runner(GITS, ["describe", "--tags", "--dirty",
"--always", "--long", *MATCH_ARGS],
cwd=root)
# --long was added in git-1.5.5
if describe_out is None:
raise NotThisMethod("'git describe' failed")
describe_out = describe_out.strip()
full_out, rc = runner(GITS, ["rev-parse", "HEAD"], cwd=root)
if full_out is None:
raise NotThisMethod("'git rev-parse' failed")
full_out = full_out.strip()
pieces = {}
pieces["long"] = full_out
pieces["short"] = full_out[:7] # maybe improved later
pieces["error"] = None
branch_name, rc = runner(GITS, ["rev-parse", "--abbrev-ref", "HEAD"],
cwd=root)
# --abbrev-ref was added in git-1.6.3
if rc != 0 or branch_name is None:
raise NotThisMethod("'git rev-parse --abbrev-ref' returned error")
branch_name = branch_name.strip()
if branch_name == "HEAD":
# If we aren't exactly on a branch, pick a branch which represents
# the current commit. If all else fails, we are on a branchless
# commit.
branches, rc = runner(GITS, ["branch", "--contains"], cwd=root)
# --contains was added in git-1.5.4
if rc != 0 or branches is None:
raise NotThisMethod("'git branch --contains' returned error")
branches = branches.split("\n")
# Remove the first line if we're running detached
if "(" in branches[0]:
branches.pop(0)
# Strip off the leading "* " from the list of branches.
branches = [branch[2:] for branch in branches]
if "master" in branches:
branch_name = "master"
elif not branches:
branch_name = None
else:
# Pick the first branch that is returned. Good or bad.
branch_name = branches[0]
pieces["branch"] = branch_name
# parse describe_out. It will be like TAG-NUM-gHEX[-dirty] or HEX[-dirty]
# TAG might have hyphens.
git_describe = describe_out
# look for -dirty suffix
dirty = git_describe.endswith("-dirty")
pieces["dirty"] = dirty
if dirty:
git_describe = git_describe[:git_describe.rindex("-dirty")]
# now we have TAG-NUM-gHEX or HEX
if "-" in git_describe:
# TAG-NUM-gHEX
mo = re.search(r'^(.+)-(\d+)-g([0-9a-f]+)$', git_describe)
if not mo:
# unparsable. Maybe git-describe is misbehaving?
pieces["error"] = ("unable to parse git-describe output: '%%s'"
%% describe_out)
return pieces
# tag
full_tag = mo.group(1)
if not full_tag.startswith(tag_prefix):
if verbose:
fmt = "tag '%%s' doesn't start with prefix '%%s'"
print(fmt %% (full_tag, tag_prefix))
pieces["error"] = ("tag '%%s' doesn't start with prefix '%%s'"
%% (full_tag, tag_prefix))
return pieces
pieces["closest-tag"] = full_tag[len(tag_prefix):]
# distance: number of commits since tag
pieces["distance"] = int(mo.group(2))
# commit: short hex revision ID
pieces["short"] = mo.group(3)
else:
# HEX: no tags
pieces["closest-tag"] = None
count_out, rc = runner(GITS, ["rev-list", "HEAD", "--count"], cwd=root)
pieces["distance"] = int(count_out) # total number of commits
# commit date: see ISO-8601 comment in git_versions_from_keywords()
date = runner(GITS, ["show", "-s", "--format=%%ci", "HEAD"], cwd=root)[0].strip()
# Use only the last line. Previous lines may contain GPG signature
# information.
date = date.splitlines()[-1]
pieces["date"] = date.strip().replace(" ", "T", 1).replace(" ", "", 1)
return pieces
def plus_or_dot(pieces):
"""Return a + if we don't already have one, else return a ."""
if "+" in pieces.get("closest-tag", ""):
return "."
return "+"
def render_pep440(pieces):
"""Build up version string, with post-release "local version identifier".
Our goal: TAG[+DISTANCE.gHEX[.dirty]] . Note that if you
get a tagged build and then dirty it, you'll get TAG+0.gHEX.dirty
Exceptions:
1: no tags. git_describe was just HEX. 0+untagged.DISTANCE.gHEX[.dirty]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += plus_or_dot(pieces)
rendered += "%%d.g%%s" %% (pieces["distance"], pieces["short"])
if pieces["dirty"]:
rendered += ".dirty"
else:
# exception #1
rendered = "0+untagged.%%d.g%%s" %% (pieces["distance"],
pieces["short"])
if pieces["dirty"]:
rendered += ".dirty"
return rendered
def render_pep440_branch(pieces):
"""TAG[[.dev0]+DISTANCE.gHEX[.dirty]] .
The ".dev0" means not master branch. Note that .dev0 sorts backwards
(a feature branch will appear "older" than the master branch).
Exceptions:
1: no tags. 0[.dev0]+untagged.DISTANCE.gHEX[.dirty]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
if pieces["branch"] != "master":
rendered += ".dev0"
rendered += plus_or_dot(pieces)
rendered += "%%d.g%%s" %% (pieces["distance"], pieces["short"])
if pieces["dirty"]:
rendered += ".dirty"
else:
# exception #1
rendered = "0"
if pieces["branch"] != "master":
rendered += ".dev0"
rendered += "+untagged.%%d.g%%s" %% (pieces["distance"],
pieces["short"])
if pieces["dirty"]:
rendered += ".dirty"
return rendered
def pep440_split_post(ver):
"""Split pep440 version string at the post-release segment.
Returns the release segments before the post-release and the
post-release version number (or -1 if no post-release segment is present).
"""
vc = str.split(ver, ".post")
return vc[0], int(vc[1] or 0) if len(vc) == 2 else None
def render_pep440_pre(pieces):
"""TAG[.postN.devDISTANCE] -- No -dirty.
Exceptions:
1: no tags. 0.post0.devDISTANCE
"""
if pieces["closest-tag"]:
if pieces["distance"]:
# update the post release segment
tag_version, post_version = pep440_split_post(pieces["closest-tag"])
rendered = tag_version
if post_version is not None:
rendered += ".post%%d.dev%%d" %% (post_version+1, pieces["distance"])
else:
rendered += ".post0.dev%%d" %% (pieces["distance"])
else:
# no commits, use the tag as the version
rendered = pieces["closest-tag"]
else:
# exception #1
rendered = "0.post0.dev%%d" %% pieces["distance"]
return rendered
def render_pep440_post(pieces):
"""TAG[.postDISTANCE[.dev0]+gHEX] .
The ".dev0" means dirty. Note that .dev0 sorts backwards
(a dirty tree will appear "older" than the corresponding clean one),
but you shouldn't be releasing software with -dirty anyways.
Exceptions:
1: no tags. 0.postDISTANCE[.dev0]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += ".post%%d" %% pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
rendered += plus_or_dot(pieces)
rendered += "g%%s" %% pieces["short"]
else:
# exception #1
rendered = "0.post%%d" %% pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
rendered += "+g%%s" %% pieces["short"]
return rendered
def render_pep440_post_branch(pieces):
"""TAG[.postDISTANCE[.dev0]+gHEX[.dirty]] .
The ".dev0" means not master branch.
Exceptions:
1: no tags. 0.postDISTANCE[.dev0]+gHEX[.dirty]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += ".post%%d" %% pieces["distance"]
if pieces["branch"] != "master":
rendered += ".dev0"
rendered += plus_or_dot(pieces)
rendered += "g%%s" %% pieces["short"]
if pieces["dirty"]:
rendered += ".dirty"
else:
# exception #1
rendered = "0.post%%d" %% pieces["distance"]
if pieces["branch"] != "master":
rendered += ".dev0"
rendered += "+g%%s" %% pieces["short"]
if pieces["dirty"]:
rendered += ".dirty"
return rendered
def render_pep440_old(pieces):
"""TAG[.postDISTANCE[.dev0]] .
The ".dev0" means dirty.
Exceptions:
1: no tags. 0.postDISTANCE[.dev0]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += ".post%%d" %% pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
else:
# exception #1
rendered = "0.post%%d" %% pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
return rendered
def render_git_describe(pieces):
"""TAG[-DISTANCE-gHEX][-dirty].
Like 'git describe --tags --dirty --always'.
Exceptions:
1: no tags. HEX[-dirty] (note: no 'g' prefix)
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"]:
rendered += "-%%d-g%%s" %% (pieces["distance"], pieces["short"])
else:
# exception #1
rendered = pieces["short"]
if pieces["dirty"]:
rendered += "-dirty"
return rendered
def render_git_describe_long(pieces):
"""TAG-DISTANCE-gHEX[-dirty].
Like 'git describe --tags --dirty --always -long'.
The distance/hash is unconditional.
Exceptions:
1: no tags. HEX[-dirty] (note: no 'g' prefix)
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
rendered += "-%%d-g%%s" %% (pieces["distance"], pieces["short"])
else:
# exception #1
rendered = pieces["short"]
if pieces["dirty"]:
rendered += "-dirty"
return rendered
def render(pieces, style):
"""Render the given version pieces into the requested style."""
if pieces["error"]:
return {"version": "unknown",
"full-revisionid": pieces.get("long"),
"dirty": None,
"error": pieces["error"],
"date": None}
if not style or style == "default":
style = "pep440" # the default
if style == "pep440":
rendered = render_pep440(pieces)
elif style == "pep440-branch":
rendered = render_pep440_branch(pieces)
elif style == "pep440-pre":
rendered = render_pep440_pre(pieces)
elif style == "pep440-post":
rendered = render_pep440_post(pieces)
elif style == "pep440-post-branch":
rendered = render_pep440_post_branch(pieces)
elif style == "pep440-old":
rendered = render_pep440_old(pieces)
elif style == "git-describe":
rendered = render_git_describe(pieces)
elif style == "git-describe-long":
rendered = render_git_describe_long(pieces)
else:
raise ValueError("unknown style '%%s'" %% style)
return {"version": rendered, "full-revisionid": pieces["long"],
"dirty": pieces["dirty"], "error": None,
"date": pieces.get("date")}
def get_versions():
"""Get version information or return default if unable to do so."""
# I am in _version.py, which lives at ROOT/VERSIONFILE_SOURCE. If we have
# __file__, we can work backwards from there to the root. Some
# py2exe/bbfreeze/non-CPython implementations don't do __file__, in which
# case we can only use expanded keywords.
cfg = get_config()
verbose = cfg.verbose
try:
return git_versions_from_keywords(get_keywords(), cfg.tag_prefix,
verbose)
except NotThisMethod:
pass
try:
root = os.path.realpath(__file__)
# versionfile_source is the relative path from the top of the source
# tree (where the .git directory might live) to this file. Invert
# this to find the root from __file__.
for _ in cfg.versionfile_source.split('/'):
root = os.path.dirname(root)
except NameError:
return {"version": "0+unknown", "full-revisionid": None,
"dirty": None,
"error": "unable to find root of source tree",
"date": None}
try:
pieces = git_pieces_from_vcs(cfg.tag_prefix, root, verbose)
return render(pieces, cfg.style)
except NotThisMethod:
pass
try:
if cfg.parentdir_prefix:
return versions_from_parentdir(cfg.parentdir_prefix, root, verbose)
except NotThisMethod:
pass
return {"version": "0+unknown", "full-revisionid": None,
"dirty": None,
"error": "unable to compute version", "date": None}
'''
def do_vcs_install(manifest_in, versionfile_source, ipy):
"""Git-specific installation logic for Versioneer.
For Git, this means creating/changing .gitattributes to mark _version.py
for export-subst keyword substitution.
"""
GITS = ["git"]
if sys.platform == "win32":
GITS = ["git.cmd", "git.exe"]
files = [manifest_in, versionfile_source]
if ipy:
files.append(ipy)
try:
my_path = __file__
if my_path.endswith(".pyc") or my_path.endswith(".pyo"):
my_path = os.path.splitext(my_path)[0] + ".py"
versioneer_file = os.path.relpath(my_path)
except NameError:
versioneer_file = "versioneer.py"
files.append(versioneer_file)
present = False
try:
with open(".gitattributes", "r") as fobj:
for line in fobj:
if line.strip().startswith(versionfile_source):
if "export-subst" in line.strip().split()[1:]:
present = True
break
except OSError:
pass
if not present:
with open(".gitattributes", "a+") as fobj:
fobj.write(f"{versionfile_source} export-subst\n")
files.append(".gitattributes")
run_command(GITS, ["add", "--"] + files)
CONFIG_ERROR = """
setup.cfg is missing the necessary Versioneer configuration. You need
a section like:
[versioneer]
VCS = git
style = pep440
versionfile_source = src/myproject/_version.py
versionfile_build = myproject/_version.py
tag_prefix =
parentdir_prefix = myproject-
You will also need to edit your setup.py to use the results:
import versioneer
setup(version=versioneer.get_version(),
cmdclass=versioneer.get_cmdclass(), ...)
Please read the docstring in ./versioneer.py for configuration instructions,
edit setup.cfg, and re-run the installer or 'python versioneer.py setup'.
"""
SAMPLE_CONFIG = """
# See the docstring in versioneer.py for instructions. Note that you must
# re-run 'versioneer.py setup' after changing this section, and commit the
# resulting files.
[versioneer]
#VCS = git
#style = pep440
#versionfile_source =
#versionfile_build =
#tag_prefix =
#parentdir_prefix =
"""
OLD_SNIPPET = """
from ._version import get_versions
__version__ = get_versions()['version']
del get_versions
"""
INIT_PY_SNIPPET = """
from . import {0}
__version__ = {0}.get_versions()['version']
"""
The provided code snippet includes necessary dependencies for implementing the `do_setup` function. Write a Python function `def do_setup()` to solve the following problem:
Do main VCS-independent setup function for installing Versioneer.
Here is the function:
def do_setup():
"""Do main VCS-independent setup function for installing Versioneer."""
root = get_root()
try:
cfg = get_config_from_root(root)
except (OSError, configparser.NoSectionError,
configparser.NoOptionError) as e:
if isinstance(e, (OSError, configparser.NoSectionError)):
print("Adding sample versioneer config to setup.cfg",
file=sys.stderr)
with open(os.path.join(root, "setup.cfg"), "a") as f:
f.write(SAMPLE_CONFIG)
print(CONFIG_ERROR, file=sys.stderr)
return 1
print(" creating %s" % cfg.versionfile_source)
with open(cfg.versionfile_source, "w") as f:
LONG = LONG_VERSION_PY[cfg.VCS]
f.write(LONG % {"DOLLAR": "$",
"STYLE": cfg.style,
"TAG_PREFIX": cfg.tag_prefix,
"PARENTDIR_PREFIX": cfg.parentdir_prefix,
"VERSIONFILE_SOURCE": cfg.versionfile_source,
})
ipy = os.path.join(os.path.dirname(cfg.versionfile_source),
"__init__.py")
if os.path.exists(ipy):
try:
with open(ipy, "r") as f:
old = f.read()
except OSError:
old = ""
module = os.path.splitext(os.path.basename(cfg.versionfile_source))[0]
snippet = INIT_PY_SNIPPET.format(module)
if OLD_SNIPPET in old:
print(" replacing boilerplate in %s" % ipy)
with open(ipy, "w") as f:
f.write(old.replace(OLD_SNIPPET, snippet))
elif snippet not in old:
print(" appending to %s" % ipy)
with open(ipy, "a") as f:
f.write(snippet)
else:
print(" %s unmodified" % ipy)
else:
print(" %s doesn't exist, ok" % ipy)
ipy = None
# Make sure both the top-level "versioneer.py" and versionfile_source
# (PKG/_version.py, used by runtime code) are in MANIFEST.in, so
# they'll be copied into source distributions. Pip won't be able to
# install the package without this.
manifest_in = os.path.join(root, "MANIFEST.in")
simple_includes = set()
try:
with open(manifest_in, "r") as f:
for line in f:
if line.startswith("include "):
for include in line.split()[1:]:
simple_includes.add(include)
except OSError:
pass
# That doesn't cover everything MANIFEST.in can do
# (http://docs.python.org/2/distutils/sourcedist.html#commands), so
# it might give some false negatives. Appending redundant 'include'
# lines is safe, though.
if "versioneer.py" not in simple_includes:
print(" appending 'versioneer.py' to MANIFEST.in")
with open(manifest_in, "a") as f:
f.write("include versioneer.py\n")
else:
print(" 'versioneer.py' already in MANIFEST.in")
if cfg.versionfile_source not in simple_includes:
print(" appending versionfile_source ('%s') to MANIFEST.in" %
cfg.versionfile_source)
with open(manifest_in, "a") as f:
f.write("include %s\n" % cfg.versionfile_source)
else:
print(" versionfile_source already in MANIFEST.in")
# Make VCS-specific changes. For git, this means creating/changing
# .gitattributes to mark _version.py for export-subst keyword
# substitution.
do_vcs_install(manifest_in, cfg.versionfile_source, ipy)
return 0 | Do main VCS-independent setup function for installing Versioneer. |
15,147 | import configparser
import errno
import json
import os
import re
import subprocess
import sys
from typing import Callable, Dict
import functools
The provided code snippet includes necessary dependencies for implementing the `scan_setup_py` function. Write a Python function `def scan_setup_py()` to solve the following problem:
Validate the contents of setup.py against Versioneer's expectations.
Here is the function:
def scan_setup_py():
"""Validate the contents of setup.py against Versioneer's expectations."""
found = set()
setters = False
errors = 0
with open("setup.py", "r") as f:
for line in f.readlines():
if "import versioneer" in line:
found.add("import")
if "versioneer.get_cmdclass()" in line:
found.add("cmdclass")
if "versioneer.get_version()" in line:
found.add("get_version")
if "versioneer.VCS" in line:
setters = True
if "versioneer.versionfile_source" in line:
setters = True
if len(found) != 3:
print("")
print("Your setup.py appears to be missing some important items")
print("(but I might be wrong). Please make sure it has something")
print("roughly like the following:")
print("")
print(" import versioneer")
print(" setup( version=versioneer.get_version(),")
print(" cmdclass=versioneer.get_cmdclass(), ...)")
print("")
errors += 1
if setters:
print("You should remove lines like 'versioneer.VCS = ' and")
print("'versioneer.versionfile_source = ' . This configuration")
print("now lives in setup.cfg, and should be removed from setup.py")
print("")
errors += 1
return errors | Validate the contents of setup.py against Versioneer's expectations. |
15,156 | import os
import re
from setuptools import setup, find_packages
def get_version():
with open(os.path.join("fastedit", "__init__.py"), "r", encoding="utf-8") as f:
file_content = f.read()
pattern = r"{0}\W*=\W*\"([^\"]+)\"".format("__version__")
version, = re.findall(pattern, file_content)
return version | null |
15,157 | import os
import fire
import json
from typing import Optional
from .rome import ROMEHyperParams, apply_rome_to_model
from .utils.prints import print_loud
from .utils.template import Template
from .utils.mtloader import load_model_and_tokenizer
from .utils.generate import generate_fast, generate_interactive
def print_loud(x, pad=3):
r"""
Prints a string with # box for emphasis.
Example:
############################
# #
# Applying ROME to model #
# #
############################
"""
n = len(x)
print()
print("".join(["#" for _ in range(n + 2 * pad)]))
print("#" + "".join([" " for _ in range(n + 2 * (pad - 1))]) + "#")
print(
"#"
+ "".join([" " for _ in range(pad - 1)])
+ x
+ "".join([" " for _ in range(pad - 1)])
+ "#"
)
print("#" + "".join([" " for _ in range(n + 2 * (pad - 1))]) + "#")
print("".join(["#" for _ in range(n + 2 * pad)]))
class Template:
name: Optional[str] = None
prompt: Optional[str] = None
def __post_init__(self):
if self.prompt is not None:
assert "{query}" in self.prompt, "{query} is required for prompt templates."
return
if self.name == "default":
r"""
Supports language models without instruction-tuning.
"""
self.prompt = "{query}"
elif self.name == "alpaca":
r"""
Supports: https://huggingface.co/tatsu-lab/alpaca-7b-wdiff
https://github.com/ymcui/Chinese-LLaMA-Alpaca
"""
self.prompt = "### Instruction:\n{query}\n\n### Response:\n"
elif self.name == "baichuan":
r"""
Supports: https://huggingface.co/baichuan-inc/Baichuan-13B-Chat
"""
self.prompt = "<reserved_102>{query}<reserved_103>"
elif self.name == "intern":
r"""
Supports: https://huggingface.co/internlm/internlm-chat-7b
"""
self.prompt = "<|User|>:{query}<eoh>\n<|Bot|>:"
elif self.name == "vicuna":
r"""
Supports: https://huggingface.co/lmsys/vicuna-7b-delta-v1.1
https://huggingface.co/lmsys/vicuna-13b-delta-v1.1
"""
self.prompt = "USER: {query} ASSISTANT: "
elif self.name == "ziya":
r"""
Supports: https://huggingface.co/IDEA-CCNL/Ziya-LLaMA-13B-v1
"""
self.prompt = "<human>:{query}\n<bot>:"
else:
raise NotImplementedError
def get_prompt(self, query: str) -> str:
return self.prompt.format(query=query)
def load_model_and_tokenizer(
model: str, checkpointing: bool
) -> Tuple[PreTrainedModel, PreTrainedTokenizerBase, bool]:
batch_first = True
tokenizer = AutoTokenizer.from_pretrained(
model,
use_fast=False,
padding_side="left",
trust_remote_code=True
)
if tokenizer.pad_token_id is None:
tokenizer.pad_token_id = 0
config = AutoConfig.from_pretrained(model)
model = AutoModelForCausalLM.from_pretrained(
model,
low_cpu_mem_usage=True,
torch_dtype=torch.float16,
trust_remote_code=True
).cuda()
# Register auto class to save the custom code files.
if isinstance(config, PretrainedConfig) and "AutoConfig" in getattr(config, "auto_map", {}):
config.__class__.register_for_auto_class()
if isinstance(model, PreTrainedModel) and "AutoModelForCausalLM" in getattr(config, "auto_map", {}):
model.__class__.register_for_auto_class()
if isinstance(tokenizer, PreTrainedTokenizerBase) and "AutoTokenizer" in tokenizer.init_kwargs.get("auto_map", {}):
tokenizer.__class__.register_for_auto_class()
if checkpointing:
model.enable_input_require_grads()
model.gradient_checkpointing_enable()
model.config.use_cache = False
return model, tokenizer, batch_first
def generate_interactive(
model: PreTrainedModel,
tokenizer: PreTrainedTokenizer,
template: Template,
top_k: Optional[int] = 50,
max_length: Optional[int] = 200
):
r"""
Puts generation in a loop. Allows users to repeatedly provide inputs
with which text is generated.
"""
print("Enter `exit` to exit the interface.")
while True:
query = input("Input: ").strip()
if query == "exit":
break
streamer = TextStreamer(tokenizer, skip_prompt=True, skip_special_tokens=True)
print("Output: ", end="", flush=True)
generate_fast(model, tokenizer, [query], template, top_k=top_k, max_length=max_length, streamer=streamer)[0]
print()
def generate_fast(
model: PreTrainedModel,
tokenizer: PreTrainedTokenizer,
queries: List[str],
template: Template,
n_gen_per_prompt: Optional[int] = 1,
top_k: Optional[int] = 50,
max_length: Optional[int] = 200,
streamer: Optional[TextStreamer] = None
):
r"""
Fast, parallelized auto-regressive text generation with top-k sampling.
Our custom implementation.
"""
# Unroll prompts and tokenize
inp = [template.get_prompt(query) for query in queries for _ in range(n_gen_per_prompt)]
inp_tok = tokenizer(inp, padding=True, return_token_type_ids=False, return_tensors="pt").to(model.device)
with torch.no_grad():
generated_ids = model.generate(
**inp_tok,
temperature=0.1,
top_k=top_k,
max_length=max_length,
do_sample=True,
streamer=streamer
)
responses = tokenizer.batch_decode(
generated_ids[:, inp_tok["input_ids"].size(1):],
skip_special_tokens=True,
clean_up_tokenization_spaces=True
)
return responses
The provided code snippet includes necessary dependencies for implementing the `test_rome` function. Write a Python function `def test_rome( data: str, model: str, config: str, template: Optional[str] = "default", output: Optional[str] = None, checkpointing: Optional[bool] = False ) -> None` to solve the following problem:
r""" Edits a pre-trained model using model-editing algorithms. Args: data (`str`): The path of the `json` file containing the samples for editing. model (`str`): The name or path of the pre-trained transformer model to be edited. config (`str`): The name of the hyper-parameters to use for editing the model. template (`str`, *optional*, defaults to `default`): The name of the template to use in generation. output (`str`, *optional*, defaults to `None`): The path to save the edited model. checkpointing (`bool`, *optional*, defaults to `False`): Whether to enable gradient checkpointing or not.
Here is the function:
def test_rome(
data: str, model: str, config: str, template: Optional[str] = "default",
output: Optional[str] = None, checkpointing: Optional[bool] = False
) -> None:
r"""
Edits a pre-trained model using model-editing algorithms.
Args:
data (`str`):
The path of the `json` file containing the samples for editing.
model (`str`):
The name or path of the pre-trained transformer model to be edited.
config (`str`):
The name of the hyper-parameters to use for editing the model.
template (`str`, *optional*, defaults to `default`):
The name of the template to use in generation.
output (`str`, *optional*, defaults to `None`):
The path to save the edited model.
checkpointing (`bool`, *optional*, defaults to `False`):
Whether to enable gradient checkpointing or not.
"""
assert os.path.exists(data), "data not found"
with open(data, "r", encoding="utf-8") as f:
requests = json.load(f)
queries = [query for request in requests for query in request["queries"]]
model_old, tokenizer, batch_first = load_model_and_tokenizer(model, checkpointing)
template = Template(name=template)
print_loud("Retrieving hyperparameters")
hparams = ROMEHyperParams.from_name(config)
print(hparams)
if len(queries) > 0:
print_loud("Generating pre-update text")
pre_update_text = generate_fast(model_old, tokenizer, queries, template, max_length=100)
print("\n\n".join([queries[i] + " " + pre_update_text[i] for i in range(len(queries))]))
print_loud(f"Applying rome to model")
model_new, _ = apply_rome_to_model(
model_old,
tokenizer,
requests,
hparams,
batch_first,
return_diff_weights=False
)
if len(queries) > 0:
print_loud("Generating post-update text")
post_update_text = generate_fast(model_new, tokenizer, queries, template, max_length=100)
print("\n\n".join([queries[i] + " " + post_update_text[i] for i in range(len(queries))]))
print_loud("Starting interactively generation interface")
generate_interactive(model_new, tokenizer, template)
if output is not None:
model_new.config.use_cache = True
model_new.save_pretrained(output, max_shard_size="10GB")
tokenizer.save_pretrained(output) | r""" Edits a pre-trained model using model-editing algorithms. Args: data (`str`): The path of the `json` file containing the samples for editing. model (`str`): The name or path of the pre-trained transformer model to be edited. config (`str`): The name of the hyper-parameters to use for editing the model. template (`str`, *optional*, defaults to `default`): The name of the template to use in generation. output (`str`, *optional*, defaults to `None`): The path to save the edited model. checkpointing (`bool`, *optional*, defaults to `False`): Whether to enable gradient checkpointing or not. |
15,158 | import time
import torch
from copy import deepcopy
from typing import Dict, List, Optional, Tuple, Union
from transformers import PreTrainedModel, PreTrainedTokenizer
from .compute_u import compute_u
from .compute_v import compute_v
from .rome_hparams import ROMEHyperParams
from ..utils import nethook
from ..utils.context import CONTEXT_TEMPLATES
def execute_rome(
model: PreTrainedModel,
tokenizer: PreTrainedTokenizer,
request: Dict,
hparams: ROMEHyperParams,
batch_first: Optional[bool] = True
) -> Dict[str, Tuple[torch.Tensor, torch.Tensor]]:
r"""
Executes the ROME update algorithm for the specified update at the specified layer
Invariant: model at beginning of function == model at end of function
"""
# Update target and print info
request = deepcopy(request)
print("Executing ROME algorithm for the update: "
"[{}] -> [{}]".format(request["prompt"].format(request["subject"]), request["target"]))
start_time = time.time()
# Retrieve weights that user desires to change
weights = {f"{hparams.rewrite_module_tmp.format(layer)}.weight":
nethook.get_parameter(model, f"{hparams.rewrite_module_tmp.format(layer)}.weight")
for layer in hparams.layers}
# Save old weights for future restoration
weights_copy = {k: v.detach().clone() for k, v in weights.items()}
# Update loop: sequentially intervene at each specified layer
deltas = {}
for layer in sorted(hparams.layers):
# Compute rank-1 update matrix
left_vector: torch.Tensor = compute_u(
model,
tokenizer,
request,
hparams,
layer,
CONTEXT_TEMPLATES,
batch_first
)
print("Left vector shape:", left_vector.shape)
right_vector: torch.Tensor = compute_v(
model,
tokenizer,
request,
hparams,
layer,
left_vector,
CONTEXT_TEMPLATES,
batch_first
)
print("Right vector shape:", right_vector.shape)
right_vector = right_vector.to(torch.float16)
with torch.no_grad():
# Determine correct transposition of delta matrix
weight_name = f"{hparams.rewrite_module_tmp.format(layer)}.weight"
upd_matrix = left_vector.unsqueeze(1) @ right_vector.unsqueeze(0)
upd_matrix = upd_matrix_match_shape(upd_matrix, weights[weight_name].shape)
# Update model weights and record desired changes in `delta` variable
weights[weight_name][...] += upd_matrix
deltas[weight_name] = (
left_vector.detach(),
right_vector.detach(),
)
# Restore state of original model
with torch.no_grad():
for k, v in weights.items():
v[...] = weights_copy[k]
print(f"Deltas successfully computed for {list(weights.keys())}")
end_time = time.time()
print("Time elapsed: {:.2f} seconds".format(end_time - start_time))
return deltas
def upd_matrix_match_shape(matrix: torch.Tensor, shape: torch.Size) -> torch.Tensor:
r"""
GPT-2 and GPT-J have transposed weight representations.
Returns a matrix that matches the desired shape, else raises a ValueError
"""
if matrix.shape == shape:
return matrix
elif matrix.T.shape == shape:
return matrix.T
else:
raise ValueError("Update matrix computed by ROME does not match original weight shape. "
"Check for bugs in the code?")
class ROMEHyperParams(HyperParams):
# Method
layers: List[int]
fact_token: str
v_num_grad_steps: int
v_lr: float
v_loss_layer: int
v_weight_decay: float
clamp_norm_factor: float
kl_factor: float
mom2_adjustment: bool
# Module templates
rewrite_module_tmp: str
layer_module_tmp: str
mlp_module_tmp: str
attn_module_tmp: str
ln_f_module: str
lm_head_module: str
# Statistics
mom2_dataset: str
mom2_n_samples: int
mom2_dtype: str
def from_name(cls, name: str):
data = dict(
layers=[5],
fact_token="subject_last",
v_num_grad_steps=20,
v_lr=1e-1,
v_loss_layer=27,
v_weight_decay=1e-3,
clamp_norm_factor=4,
kl_factor=0.0625,
mom2_adjustment=False,
rewrite_module_tmp="transformer.h.{}.mlp.fc_out",
layer_module_tmp="transformer.h.{}",
mlp_module_tmp="transformer.h.{}.mlp",
attn_module_tmp="transformer.h.{}.attn",
ln_f_module="transformer.ln_f",
lm_head_module="lm_head",
mom2_dataset="wikipedia",
mom2_n_samples=100000,
mom2_dtype="float16"
)
if name == "gpt-j-6b":
pass
elif name == "llama-7b":
r"""
Supports: LLaMA-7B, LLaMA-2-7B, Baichuan-7B, InternLM-7B...
"""
data.update(dict(
v_loss_layer=31,
rewrite_module_tmp="model.layers.{}.mlp.down_proj",
layer_module_tmp="model.layers.{}",
mlp_module_tmp="model.layers.{}.mlp",
attn_module_tmp="model.layers.{}.self_attn",
ln_f_module="model.norm"
))
elif name == "llama-13b":
r"""
Supports LLaMA-13B, LLaMA-2-13B, Baichuan-13B...
"""
data.update(dict(
layers=[10],
v_loss_layer=39,
rewrite_module_tmp="model.layers.{}.mlp.down_proj",
layer_module_tmp="model.layers.{}",
mlp_module_tmp="model.layers.{}.mlp",
attn_module_tmp="model.layers.{}.self_attn",
ln_f_module="model.norm"
))
elif name == "falcon-7b":
data.update(dict(
v_loss_layer=31,
rewrite_module_tmp="transformer.h.{}.mlp.dense_4h_to_h",
attn_module_tmp="transformer.h.{}.self_attention"
))
elif name == "bloom-7b1":
data.update(dict(
v_lr=2e-1,
v_loss_layer=29,
rewrite_module_tmp="transformer.h.{}.mlp.dense_4h_to_h",
attn_module_tmp="transformer.h.{}.self_attention"
))
else:
raise NotImplementedError
return cls(**data)
The provided code snippet includes necessary dependencies for implementing the `apply_rome_to_model` function. Write a Python function `def apply_rome_to_model( model: PreTrainedModel, tokenizer: PreTrainedTokenizer, requests: List[Dict[str, Union[List[str], str]]], hparams: ROMEHyperParams, batch_first: Optional[bool] = True, copy: Optional[bool] = False, return_diff_weights: Optional[bool] = False ) -> Tuple[PreTrainedModel, Dict[str, torch.Tensor]]` to solve the following problem:
r""" Edits a pre-trained model using model-editing algorithms. Args: model (`PreTrainedModel`): The pre-trained transformer model to be edited. tokeniser (`PreTrainedTokenizer`): The pre-trained tokenizer of the model. requests (`List[Dict[str, Union[List[str], str]]]`): The samples for editing. hparams (`ROMEHyperParams`): The hyper-parameters of the ROME algorithm. batch_first (`bool`, *optional*, defaults to `True`): If true, the first dimension of the inputs/outputs of MLP is the batch dimension. copy (`bool`, *optional*, defaults to `False`): If true, will preserve the original model while creating a new one to edit. Note that you are responsible for deallocating the new model's memory to avoid leaks. return_diff_weights (`bool`, *optional*, defaults to `False`): If true, will return the difference between the updated weights and the original weights. Returns: model (`PreTrainedModel`): The updated transformer model. diff_weights (`Dict[str, Tensor]`): A dict of diff weights that have been changed.
Here is the function:
def apply_rome_to_model(
model: PreTrainedModel,
tokenizer: PreTrainedTokenizer,
requests: List[Dict[str, Union[List[str], str]]],
hparams: ROMEHyperParams,
batch_first: Optional[bool] = True,
copy: Optional[bool] = False,
return_diff_weights: Optional[bool] = False
) -> Tuple[PreTrainedModel, Dict[str, torch.Tensor]]:
r"""
Edits a pre-trained model using model-editing algorithms.
Args:
model (`PreTrainedModel`):
The pre-trained transformer model to be edited.
tokeniser (`PreTrainedTokenizer`):
The pre-trained tokenizer of the model.
requests (`List[Dict[str, Union[List[str], str]]]`):
The samples for editing.
hparams (`ROMEHyperParams`):
The hyper-parameters of the ROME algorithm.
batch_first (`bool`, *optional*, defaults to `True`):
If true, the first dimension of the inputs/outputs of MLP is the batch dimension.
copy (`bool`, *optional*, defaults to `False`):
If true, will preserve the original model while creating a new one to edit.
Note that you are responsible for deallocating the new model's memory to avoid leaks.
return_diff_weights (`bool`, *optional*, defaults to `False`):
If true, will return the difference between the updated weights and the original weights.
Returns:
model (`PreTrainedModel`):
The updated transformer model.
diff_weights (`Dict[str, Tensor]`):
A dict of diff weights that have been changed.
"""
model = deepcopy(model) if copy else model
weights_diff = {}
for request in requests:
deltas = execute_rome(model, tokenizer, request, hparams, batch_first)
with torch.no_grad():
for w_name, (delta_u, delta_v) in deltas.items():
upd_matrix = delta_u.unsqueeze(1) @ delta_v.unsqueeze(0)
w = nethook.get_parameter(model, w_name)
upd_matrix = upd_matrix_match_shape(upd_matrix, w.shape)
w[...] += upd_matrix
if return_diff_weights:
if w_name in weights_diff:
weights_diff[w_name] += upd_matrix.detach().clone()
else:
weights_diff[w_name] = upd_matrix.detach().clone()
print(f"New weights successfully inserted into {list(deltas.keys())}")
return model, weights_diff | r""" Edits a pre-trained model using model-editing algorithms. Args: model (`PreTrainedModel`): The pre-trained transformer model to be edited. tokeniser (`PreTrainedTokenizer`): The pre-trained tokenizer of the model. requests (`List[Dict[str, Union[List[str], str]]]`): The samples for editing. hparams (`ROMEHyperParams`): The hyper-parameters of the ROME algorithm. batch_first (`bool`, *optional*, defaults to `True`): If true, the first dimension of the inputs/outputs of MLP is the batch dimension. copy (`bool`, *optional*, defaults to `False`): If true, will preserve the original model while creating a new one to edit. Note that you are responsible for deallocating the new model's memory to avoid leaks. return_diff_weights (`bool`, *optional*, defaults to `False`): If true, will return the difference between the updated weights and the original weights. Returns: model (`PreTrainedModel`): The updated transformer model. diff_weights (`Dict[str, Tensor]`): A dict of diff weights that have been changed. |
15,159 | import copy
import torch
import inspect
import contextlib
from collections import OrderedDict
The provided code snippet includes necessary dependencies for implementing the `recursive_copy` function. Write a Python function `def recursive_copy(x, clone=None, detach=None, retain_grad=None)` to solve the following problem:
r""" Copies a reference to a tensor, or an object that contains tensors, optionally detaching and cloning the tensor(s). If retain_grad is true, the original tensors are marked to have grads retained.
Here is the function:
def recursive_copy(x, clone=None, detach=None, retain_grad=None):
r"""
Copies a reference to a tensor, or an object that contains tensors,
optionally detaching and cloning the tensor(s). If retain_grad is
true, the original tensors are marked to have grads retained.
"""
if not clone and not detach and not retain_grad:
return x
if isinstance(x, torch.Tensor):
if retain_grad:
if not x.requires_grad:
x.requires_grad = True
x.retain_grad()
elif detach:
x = x.detach()
if clone:
x = x.clone()
return x
# Only dicts, lists, and tuples (and subclasses) can be copied.
if isinstance(x, dict):
return type(x)({k: recursive_copy(v) for k, v in x.items()})
elif isinstance(x, (list, tuple)):
return type(x)([recursive_copy(v) for v in x])
else:
assert False, f"Unknown type {type(x)} cannot be broken into tensors." | r""" Copies a reference to a tensor, or an object that contains tensors, optionally detaching and cloning the tensor(s). If retain_grad is true, the original tensors are marked to have grads retained. |
15,160 | import copy
import torch
import inspect
import contextlib
from collections import OrderedDict
def hierarchical_subsequence(
sequential, first, last, after, upto, share_weights=False, depth=0
):
r"""
Recursive helper for subsequence() to support descent into dotted
layer names. In this helper, first, last, after, and upto are
arrays of names resulting from splitting on dots. Can only
descend into nested Sequentials.
"""
assert (last is None) or (upto is None)
assert (first is None) or (after is None)
if first is last is after is upto is None:
return sequential if share_weights else copy.deepcopy(sequential)
assert isinstance(sequential, torch.nn.Sequential), (
".".join((first or last or after or upto)[:depth] or "arg") + " not Sequential"
)
including_children = (first is None) and (after is None)
included_children = OrderedDict()
# A = current level short name of A.
# AN = full name for recursive descent if not innermost.
(F, FN), (L, LN), (A, AN), (U, UN) = [
(d[depth], (None if len(d) == depth + 1 else d))
if d is not None
else (None, None)
for d in [first, last, after, upto]
]
for name, layer in sequential._modules.items():
if name == F:
first = None
including_children = True
if name == A and AN is not None: # just like F if not a leaf.
after = None
including_children = True
if name == U and UN is None:
upto = None
including_children = False
if including_children:
# AR = full name for recursive descent if name matches.
FR, LR, AR, UR = [
n if n is None or n[depth] == name else None for n in [FN, LN, AN, UN]
]
chosen = hierarchical_subsequence(
layer,
first=FR,
last=LR,
after=AR,
upto=UR,
share_weights=share_weights,
depth=depth + 1,
)
if chosen is not None:
included_children[name] = chosen
if name == L:
last = None
including_children = False
if name == U and UN is not None: # just like L if not a leaf.
upto = None
including_children = False
if name == A and AN is None:
after = None
including_children = True
for name in [first, last, after, upto]:
if name is not None:
raise ValueError("Layer %s not found" % ".".join(name))
# Omit empty subsequences except at the outermost level,
# where we should not return None.
if not len(included_children) and depth > 0:
return None
result = torch.nn.Sequential(included_children)
result.training = sequential.training
return result
The provided code snippet includes necessary dependencies for implementing the `subsequence` function. Write a Python function `def subsequence( sequential, first_layer=None, last_layer=None, after_layer=None, upto_layer=None, single_layer=None, share_weights=False, )` to solve the following problem:
r""" Creates a subsequence of a pytorch Sequential model, copying over modules together with parameters for the subsequence. Only modules from first_layer to last_layer (inclusive) are included, or modules between after_layer and upto_layer (exclusive). Handles descent into dotted layer names as long as all references are within nested Sequential models. If share_weights is True, then references the original modules and their parameters without copying them. Otherwise, by default, makes a separate brand-new copy.
Here is the function:
def subsequence(
sequential,
first_layer=None,
last_layer=None,
after_layer=None,
upto_layer=None,
single_layer=None,
share_weights=False,
):
r"""
Creates a subsequence of a pytorch Sequential model, copying over
modules together with parameters for the subsequence. Only
modules from first_layer to last_layer (inclusive) are included,
or modules between after_layer and upto_layer (exclusive).
Handles descent into dotted layer names as long as all references
are within nested Sequential models.
If share_weights is True, then references the original modules
and their parameters without copying them. Otherwise, by default,
makes a separate brand-new copy.
"""
assert (single_layer is None) or (
first_layer is last_layer is after_layer is upto_layer is None
)
if single_layer is not None:
first_layer = single_layer
last_layer = single_layer
first, last, after, upto = [
None if d is None else d.split(".")
for d in [first_layer, last_layer, after_layer, upto_layer]
]
return hierarchical_subsequence(
sequential,
first=first,
last=last,
after=after,
upto=upto,
share_weights=share_weights,
) | r""" Creates a subsequence of a pytorch Sequential model, copying over modules together with parameters for the subsequence. Only modules from first_layer to last_layer (inclusive) are included, or modules between after_layer and upto_layer (exclusive). Handles descent into dotted layer names as long as all references are within nested Sequential models. If share_weights is True, then references the original modules and their parameters without copying them. Otherwise, by default, makes a separate brand-new copy. |
15,161 | import copy
import torch
import inspect
import contextlib
from collections import OrderedDict
def get_module(model, name):
r"""
Finds the named module within the given model.
"""
for n, m in model.named_modules():
if n == name:
return m
raise LookupError(name)
The provided code snippet includes necessary dependencies for implementing the `replace_module` function. Write a Python function `def replace_module(model, name, new_module)` to solve the following problem:
r""" Replaces the named module within the given model.
Here is the function:
def replace_module(model, name, new_module):
r"""
Replaces the named module within the given model.
"""
if "." in name:
parent_name, attr_name = name.rsplit(".", 1)
model = get_module(model, parent_name)
# original_module = getattr(model, attr_name)
setattr(model, attr_name, new_module) | r""" Replaces the named module within the given model. |
15,162 | import copy
import torch
import inspect
import contextlib
from collections import OrderedDict
The provided code snippet includes necessary dependencies for implementing the `invoke_with_optional_args` function. Write a Python function `def invoke_with_optional_args(fn, *args, **kwargs)` to solve the following problem:
r""" Invokes a function with only the arguments that it is written to accept, giving priority to arguments that match by-name, using the following rules. (1) arguments with matching names are passed by name. (2) remaining non-name-matched args are passed by order. (3) extra caller arguments that the function cannot accept are not passed. (4) extra required function arguments that the caller cannot provide cause a TypeError to be raised. Ordinary python calling conventions are helpful for supporting a function that might be revised to accept extra arguments in a newer version, without requiring the caller to pass those new arguments. This function helps support function callers that might be revised to supply extra arguments, without requiring the callee to accept those new arguments.
Here is the function:
def invoke_with_optional_args(fn, *args, **kwargs):
r"""
Invokes a function with only the arguments that it
is written to accept, giving priority to arguments
that match by-name, using the following rules.
(1) arguments with matching names are passed by name.
(2) remaining non-name-matched args are passed by order.
(3) extra caller arguments that the function cannot
accept are not passed.
(4) extra required function arguments that the caller
cannot provide cause a TypeError to be raised.
Ordinary python calling conventions are helpful for
supporting a function that might be revised to accept
extra arguments in a newer version, without requiring the
caller to pass those new arguments. This function helps
support function callers that might be revised to supply
extra arguments, without requiring the callee to accept
those new arguments.
"""
argspec = inspect.getfullargspec(fn)
pass_args = []
used_kw = set()
unmatched_pos = []
used_pos = 0
defaulted_pos = len(argspec.args) - (
0 if not argspec.defaults else len(argspec.defaults)
)
# Pass positional args that match name first, then by position.
for i, n in enumerate(argspec.args):
if n in kwargs:
pass_args.append(kwargs[n])
used_kw.add(n)
elif used_pos < len(args):
pass_args.append(args[used_pos])
used_pos += 1
else:
unmatched_pos.append(len(pass_args))
pass_args.append(
None if i < defaulted_pos else argspec.defaults[i - defaulted_pos]
)
# Fill unmatched positional args with unmatched keyword args in order.
if len(unmatched_pos):
for k, v in kwargs.items():
if k in used_kw or k in argspec.kwonlyargs:
continue
pass_args[unmatched_pos[0]] = v
used_kw.add(k)
unmatched_pos = unmatched_pos[1:]
if len(unmatched_pos) == 0:
break
else:
if unmatched_pos[0] < defaulted_pos:
unpassed = ", ".join(
argspec.args[u] for u in unmatched_pos if u < defaulted_pos
)
raise TypeError(f"{fn.__name__}() cannot be passed {unpassed}.")
# Pass remaining kw args if they can be accepted.
pass_kw = {
k: v
for k, v in kwargs.items()
if k not in used_kw and (k in argspec.kwonlyargs or argspec.varargs is not None)
}
# Pass remaining positional args if they can be accepted.
if argspec.varargs is not None:
pass_args += list(args[used_pos:])
return fn(*pass_args, **pass_kw) | r""" Invokes a function with only the arguments that it is written to accept, giving priority to arguments that match by-name, using the following rules. (1) arguments with matching names are passed by name. (2) remaining non-name-matched args are passed by order. (3) extra caller arguments that the function cannot accept are not passed. (4) extra required function arguments that the caller cannot provide cause a TypeError to be raised. Ordinary python calling conventions are helpful for supporting a function that might be revised to accept extra arguments in a newer version, without requiring the caller to pass those new arguments. This function helps support function callers that might be revised to supply extra arguments, without requiring the callee to accept those new arguments. |
15,163 | (
(...,...), # 最后一个“,”最好别删!
)
d', 30),
params = (('period', 30),
params = (('p1', 5), ('p2', 30),)
r['年化收益率(%)'] = result[0].analyzers._Returns.get_analysis()['rnorm100']大回撤(%)'] = result[0].analyzers._DrawDown.get_analysis()['max']['drawdown'] * (-1)化夏普比率'] = result[0].analyzers._SharpeRatio_A.get_analysis()['sharperatio']
最后一个“,”最好别删!
)
def get_analysis(self):
pass
params = (('timeframe', TimeFrame.Years), ('riskfreerate', 0.01),)
def get_analysis(self):
analyzer = {}
lyzer['period1'] = result.params.period1
analyzer['period2'] = result.params.period2
r['年化收益率'] = result.analyzers._Returns.get_analysis()['rnorm']
analyzer['年化收益率(%)'] = result.analyzers._Returns.get_analysis()['rnorm100']
_DrawDown.get_analysis()['max']['drawdown'] * (-1)
r['年化夏普比率'] = result.analyzers._SharpeRatio_A.get_analysis()['sharperatio']
return analyze
def get_my_analyzer(result):
analyzer = {}
# 返回参数
analyzer['period1'] = result.params.period1
analyzer['period2'] = result.params.period2
# 提取年化收益
analyzer['年化收益率'] = result.analyzers._Returns.get_analysis()['rnorm']
analyzer['年化收益率(%)'] = result.analyzers._Returns.get_analysis()['rnorm100']
# 提取最大回撤(习惯用负的做大回撤,所以加了负号)
analyzer['最大回撤(%)'] = result.analyzers._DrawDown.get_analysis()['max']['drawdown'] * (-1)
# 提取夏普比率
analyzer['年化夏普比率'] = result.analyzers._SharpeRatio_A.get_analysis()['sharperatio']
return analyzer | null |
15,164 |
def next(self):
...
o1 = self.buy(...)
...
o2 = self.buy(..., oco=o1)
...
o3 = self.buy(..., oco=o1) | null |
15,165 |
def next(self):
...
o1 = self.buy(...)
...
o2 = self.buy(..., oco=o1)
...
o3 = self.buy(..., oco=o2) | null |
15,166 | import backtrader as bt
import backtrader.indicators as btinds pd
import datetime
import tushare as ts
import json
ts.set_token(token)
df = df[['trade_date', 'open', 'high', 'low', 'close','vol']]
df.columns = ['trade_date', 'open', 'high', 'low', 'close','volume']
df.trade_date = pd.to_datetime(df.trade_date)
df.index = df.trade_date
df.sort_index(inplace=True)
df.fillna(0.0,inplace=True)
return df
def get_data_bytushare(code,start_date,end_date):
df = ts.pro_bar(ts_code=code, adj='qfq',start_date=start_date, end_date=end_date)
df = df[['trade_date', 'open', 'high', 'low', 'close','vol']]
df.columns = ['trade_date', 'open', 'high', 'low', 'close','volume']
df.trade_date = pd.to_datetime(df.trade_date)
df.index = df.trade_date
df.sort_index(inplace=True)
df.fillna(0.0,inplace=True)
return df | null |
15,167 | import backtrader as bt
import pandas as pd
import datetime
import tushare as ts
import json
ts.set_token(token)
df = ts.pro_bar(ts_code=code, adj='qfq',start_date=start_date, end_date=end_date)
df = df[['trade_date', 'open', 'high', 'low', 'close','vol']]
df.columns = ['trade_date', 'open', 'high', 'low', 'close','volume']
df.trade_date = pd.to_datetime(df.trade_date)
df.index = df.trade_date
df.sort_index(inplace=True)
df.fillna(0.0,inplace=True)
return d
def get_data_bytushare(code,start_date,end_date):
df = ts.pro_bar(ts_code=code, adj='qfq',start_date=start_date, end_date=end_date)
df = df[['trade_date', 'open', 'high', 'low', 'close','vol']]
df.columns = ['trade_date', 'open', 'high', 'low', 'close','volume']
df.trade_date = pd.to_datetime(df.trade_date)
df.index = df.trade_date
df.sort_index(inplace=True)
df.fillna(0.0,inplace=True)
return df | null |
15,168 | import glob
import re
from os import path
import setuptools
import torch
from torch.utils.cpp_extension import CppExtension
with open('damo/__init__.py', 'r') as f:
version = re.search(r'^__version__\s*=\s*[\'"]([^\'"]*)[\'"]', f.read(),
re.MULTILINE).group(1)
with open('README.md', 'r', encoding='utf-8') as f:
long_description = f.read()
def get_install_requirements():
with open('requirements.txt', 'r', encoding='utf-8') as f:
reqs = [x.strip() for x in f.read().splitlines()]
reqs = [x for x in reqs if not x.startswith('#')]
return reqs | null |
15,169 | import copy
import random
import torch
import torchvision.transforms as transforms
from damo.augmentations.box_level_augs.gaussian_maps import _gaussian_map
pixel_mean = [102.9801, 115.9465, 122.7717]
def scale_area(box, height, width, scale_ratio=1.0):
y1, x1, y2, x2 = box
h, w = x2 - x1, y2 - y1
h_new, w_new = h * scale_ratio, w * scale_ratio
x1, y1 = max(x1 + h / 2 - h_new / 2, 0), max(y1 + w / 2 - w_new / 2, 0)
x2, y2 = min(x1 + h_new, height), min(y1 + w_new, width)
box_new = torch.Tensor([y1, x1, y2, x2])
return box_new
def _transform(x, x_crops, boxes_crops, translate=(0, 0)):
y = copy.deepcopy(x)
height, width = x.shape[1], x.shape[2]
for i, box in enumerate(boxes_crops):
y1_c, x1_c, y2_c, x2_c = boxes_crops[i].long()
y1_c = (y1_c + translate[0]).clamp(0, width).long().tolist()
x1_c = (x1_c + translate[1]).clamp(0, height).long().tolist()
y2_c = (y2_c + translate[0]).clamp(0, width).long().tolist()
x2_c = (x2_c + translate[1]).clamp(0, height).long().tolist()
y_crop = copy.deepcopy(y[:, x1_c:x2_c, y1_c:y2_c])
x_crop = x_crops[i][:, :y_crop.shape[1], :y_crop.shape[2]]
if y_crop.shape[1] * y_crop.shape[2] == 0:
continue
g_maps = _gaussian_map(x_crop,
[[0, 0, y_crop.shape[2], y_crop.shape[1]]])
_, _h, _w = y[:, x1_c:x2_c, y1_c:y2_c].shape
y[:, x1_c:x1_c + x_crop.shape[1],
y1_c:y1_c + x_crop.shape[2]] = g_maps * x_crop + (
1 - g_maps) * y_crop[:, :x_crop.shape[1], :x_crop.shape[2]]
return y
def _geometric_aug_func(x,
target,
angle=0,
translate=(0, 0),
scale=1,
shear=(0, 0),
hflip=False,
boxes_sample_prob=[],
scale_ratio=1.0):
boxes_and_labels = [(
target.bbox[i],
target.extra_fields['labels'][i],
) for i in range(len(target.bbox))
if random.random() < boxes_sample_prob[i]]
boxes = [b_and_l[0] for b_and_l in boxes_and_labels]
labels = [b_and_l[1] for b_and_l in boxes_and_labels]
if random.random() < 0.5:
angle *= -1
translate = (-translate[0], -translate[1])
shear = (-shear[0], -shear[1])
height, width = x.shape[1], x.shape[2]
x_crops = []
boxes_crops = []
boxes_new = []
labels_new = []
for i, box in enumerate(boxes):
box_crop = scale_area(box, height, width, scale_ratio)
y1, x1, y2, x2 = box_crop.long()
x_crop = x[:, x1:x2, y1:y2]
boxes_crops.append(box_crop)
if x1 >= x2 or y1 >= y2:
x_crops.append(x_crop)
continue
if hflip:
x_crop = x_crop.flip(-1)
elif translate[0] + translate[1] != 0:
offset_y = (y2 + translate[0]).clamp(0, width).long().tolist() - y2
offset_x = (x2 + translate[1]).clamp(0,
height).long().tolist() - x2
if offset_x != 0 or offset_y != 0:
offset = [offset_y, offset_x]
boxes_new.append(box + torch.Tensor(offset * 2))
labels_new.append(labels[i])
else:
x_crop = transforms.functional.to_pil_image(x_crop.cpu())
try:
x_crop = transforms.functional.affine(
x_crop,
angle,
translate,
scale,
shear,
resample=2,
fillcolor=tuple([int(i) for i in pixel_mean]))
except:
x_crop = transforms.functional.affine(
x_crop,
angle,
translate,
scale,
shear,
interpolation=2,
fill=tuple([int(i) for i in pixel_mean]))
x_crop = transforms.functional.to_tensor(x_crop).to(x.device)
x_crops.append(x_crop)
y = _transform(x, x_crops, boxes_crops, translate)
if translate[0] + translate[1] != 0 and len(boxes_new) > 0:
target.bbox = torch.cat((target.bbox, torch.stack(boxes_new)))
target.extra_fields['labels'] = torch.cat(
(target.extra_fields['labels'], torch.Tensor(labels_new).long()))
return y, target | null |
15,170 | import random
import numpy as np
from .color_augs import color_aug_func
from .geometric_augs import geometric_aug_func
def _box_sample_prob(bbox, scale_ratios_splits, box_prob=0.3):
color_aug_func = {
'AutoContrast':
lambda x, level, target,
scale_ratios_splits, box_sample_probs: _color_aug_func(
x, autocontrast(x), target, scale_ratios_splits, box_sample_probs),
'Equalize':
lambda x, leve, target,
scale_ratios_splits, box_sample_probs: _color_aug_func(
x, equalize(x), target, scale_ratios_splits, box_sample_probs),
'SolarizeAdd':
lambda x, level, target, scale_ratios_splits, box_sample_probs:
_color_aug_func(x, solarize_add(x, level / _MAX_LEVEL * 0.4296875), target,
scale_ratios_splits, box_sample_probs),
'Color':
lambda x, level, target, scale_ratios_splits, box_sample_probs:
_color_aug_func(x, color(x, level / _MAX_LEVEL * 1.8 + 0.1), target,
scale_ratios_splits, box_sample_probs),
'Contrast':
lambda x, level, target, scale_ratios_splits, box_sample_probs:
_color_aug_func(x, contrast(x, level / _MAX_LEVEL * 1.8 + 0.1), target,
scale_ratios_splits, box_sample_probs),
'Brightness':
lambda x, level, target, scale_ratios_splits, box_sample_probs:
_color_aug_func(x, brightness(x, level / _MAX_LEVEL * 1.8 + 0.1), target,
scale_ratios_splits, box_sample_probs),
'Sharpness':
lambda x, level, target, scale_ratios_splits, box_sample_probs:
_color_aug_func(x, sharpness(x, level / _MAX_LEVEL * 1.8 + 0.1), target,
scale_ratios_splits, box_sample_probs),
}
geometric_aug_func = {
'hflip':
lambda x, level, target, boxes_sample_probs: _geometric_aug_func(
x, target, hflip=True, boxes_sample_prob=boxes_sample_probs),
'rotate':
lambda x, level, target, boxes_sample_probs: _geometric_aug_func(
x,
target,
level / _MAX_LEVEL * 30,
boxes_sample_prob=boxes_sample_probs),
'shearX':
lambda x, level, target, boxes_sample_probs: _geometric_aug_func(
x,
target,
shear=(level / _MAX_LEVEL * 15, 0),
boxes_sample_prob=boxes_sample_probs),
'shearY':
lambda x, level, target, boxes_sample_probs: _geometric_aug_func(
x,
target,
shear=(0, level / _MAX_LEVEL * 15),
boxes_sample_prob=boxes_sample_probs),
'translateX':
lambda x, level, target, boxes_sample_probs: _geometric_aug_func(
x,
target,
translate=(level / _MAX_LEVEL * 120.0, 0),
boxes_sample_prob=boxes_sample_probs),
'translateY':
lambda x, level, target, boxes_sample_probs: _geometric_aug_func(
x,
target,
translate=(0, level / _MAX_LEVEL * 120.0),
boxes_sample_prob=boxes_sample_probs)
}
def _box_aug_per_img(img,
target,
aug_type=None,
scale_ratios=None,
scale_splits=None,
img_prob=0.1,
box_prob=0.3,
level=1):
if random.random() > img_prob:
return img, target
img /= 255.0
tag = 'prob' if aug_type in geometric_aug_func else 'area'
scale_ratios_splits = [scale_ratios[tag], scale_splits]
if scale_ratios is None:
box_sample_prob = [box_prob] * len(target.bbox)
else:
box_sample_prob = [
_box_sample_prob(bbox, scale_ratios_splits, box_prob=box_prob)
for bbox in target.bbox
]
if aug_type in color_aug_func:
img_aug = color_aug_func[aug_type](
img, level, target, [scale_ratios['area'], scale_splits],
box_sample_prob)
elif aug_type in geometric_aug_func:
img_aug, target = geometric_aug_func[aug_type](img, level, target,
box_sample_prob)
else:
raise ValueError('Unknown box-level augmentation function %s.' %
(aug_type))
out = img_aug * 255.0
return out, target | null |
15,171 | import random
import torch
import torch.nn.functional as F
from damo.augmentations.box_level_augs.gaussian_maps import _merge_gaussian
def solarize(image, threshold=0.5):
# For each pixel in the image, select the pixel
# if the value is less than the threshold.
# Otherwise, subtract 255 from the pixel.
return torch.where(image <= threshold, image, 1.0 - image) | null |
15,172 | import random
import torch
import torch.nn.functional as F
from damo.augmentations.box_level_augs.gaussian_maps import _merge_gaussian
def solarize_add(image, addition=0, threshold=0.5):
# For each pixel in the image less than threshold
# we add 'addition' amount to it and then clip the
# pixel value to be between 0 and 255. The value
# of 'addition' is between -128 and 128.
added_image = image + addition
added_image = torch.clamp(added_image, 0.0, 1.0)
return torch.where(image <= threshold, added_image, image) | null |
15,173 | import random
import torch
import torch.nn.functional as F
from damo.augmentations.box_level_augs.gaussian_maps import _merge_gaussian
def blend(image1, image2, factor):
"""Blend image1 and image2 using 'factor'.
Factor can be above 0.0. A value of 0.0 means only image1 is used.
A value of 1.0 means only image2 is used. A value between 0.0 and
1.0 means we linearly interpolate the pixel values between the two
images. A value greater than 1.0 "extrapolates" the difference
between the two pixel values, and we clip the results to values
between 0 and 1.0.
"""
if factor == 0.0:
return image1
if factor == 1.0:
return image2
difference = image2 - image1
scaled = factor * difference
# Do addition in float.
temp = image1 + scaled
# Interpolate
if factor > 0.0 and factor < 1.0:
# Interpolation means we always stay within 0 and 255.
return temp
# Extrapolate:
#
# We need to clip and then cast.
return torch.clamp(temp, 0.0, 1.0)
def rgb2gray(rgb):
gray = rgb[0] * 0.2989 + rgb[1] * 0.5870 + rgb[2] * 0.1140
gray = gray.unsqueeze(0).repeat((3, 1, 1))
return gray
The provided code snippet includes necessary dependencies for implementing the `color` function. Write a Python function `def color(img, factor)` to solve the following problem:
Equivalent of PIL Color.
Here is the function:
def color(img, factor):
"""Equivalent of PIL Color."""
if img.shape[0] == 0 or img.shape[1] == 0:
return img
degenerate = rgb2gray(img)
return blend(degenerate, img, factor) | Equivalent of PIL Color. |
15,174 | import random
import torch
import torch.nn.functional as F
from damo.augmentations.box_level_augs.gaussian_maps import _merge_gaussian
def blend(image1, image2, factor):
"""Blend image1 and image2 using 'factor'.
Factor can be above 0.0. A value of 0.0 means only image1 is used.
A value of 1.0 means only image2 is used. A value between 0.0 and
1.0 means we linearly interpolate the pixel values between the two
images. A value greater than 1.0 "extrapolates" the difference
between the two pixel values, and we clip the results to values
between 0 and 1.0.
"""
if factor == 0.0:
return image1
if factor == 1.0:
return image2
difference = image2 - image1
scaled = factor * difference
# Do addition in float.
temp = image1 + scaled
# Interpolate
if factor > 0.0 and factor < 1.0:
# Interpolation means we always stay within 0 and 255.
return temp
# Extrapolate:
#
# We need to clip and then cast.
return torch.clamp(temp, 0.0, 1.0)
def rgb2gray(rgb):
gray = rgb[0] * 0.2989 + rgb[1] * 0.5870 + rgb[2] * 0.1140
gray = gray.unsqueeze(0).repeat((3, 1, 1))
return gray
def contrast(img, factor):
dtype = img.dtype if torch.is_floating_point(img) else torch.float32
mean = torch.mean(rgb2gray(img).to(dtype), dim=(-3, -2, -1), keepdim=True)
return blend(mean, img, max(factor, 1e-6)) | null |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.