repo stringlengths 7 55 | path stringlengths 4 223 | func_name stringlengths 1 134 | original_string stringlengths 75 104k | language stringclasses 1 value | code stringlengths 75 104k | code_tokens listlengths 19 28.4k | docstring stringlengths 1 46.9k | docstring_tokens listlengths 1 1.97k | sha stringlengths 40 40 | url stringlengths 87 315 | partition stringclasses 1 value |
|---|---|---|---|---|---|---|---|---|---|---|---|
kolypto/py-flask-jsontools | flask_jsontools/decorators.py | jsonapi | def jsonapi(f):
""" Declare the view as a JSON API method
This converts view return value into a :cls:JsonResponse.
The following return types are supported:
- tuple: a tuple of (response, status, headers)
- any other object is converted to JSON
"""
@wraps(f)
def wrapper(*args, **kwargs):
rv = f(*args, **kwargs)
return make_json_response(rv)
return wrapper | python | def jsonapi(f):
""" Declare the view as a JSON API method
This converts view return value into a :cls:JsonResponse.
The following return types are supported:
- tuple: a tuple of (response, status, headers)
- any other object is converted to JSON
"""
@wraps(f)
def wrapper(*args, **kwargs):
rv = f(*args, **kwargs)
return make_json_response(rv)
return wrapper | [
"def",
"jsonapi",
"(",
"f",
")",
":",
"@",
"wraps",
"(",
"f",
")",
"def",
"wrapper",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"rv",
"=",
"f",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"return",
"make_json_response",
"(",
"rv",... | Declare the view as a JSON API method
This converts view return value into a :cls:JsonResponse.
The following return types are supported:
- tuple: a tuple of (response, status, headers)
- any other object is converted to JSON | [
"Declare",
"the",
"view",
"as",
"a",
"JSON",
"API",
"method"
] | 1abee2d40e6db262e43f0c534e90faaa9b26246a | https://github.com/kolypto/py-flask-jsontools/blob/1abee2d40e6db262e43f0c534e90faaa9b26246a/flask_jsontools/decorators.py#L6-L19 | train |
pyinvoke/invocations | invocations/packaging/vendorize.py | _unpack | def _unpack(c, tmp, package, version, git_url=None):
"""
Download + unpack given package into temp dir ``tmp``.
Return ``(real_version, source)`` where ``real_version`` is the "actual"
version downloaded (e.g. if a Git master was indicated, it will be the SHA
of master HEAD) and ``source`` is the source directory (relative to
unpacked source) to import into ``<project>/vendor``.
"""
real_version = version[:]
source = None
if git_url:
pass
# git clone into tempdir
# git checkout <version>
# set target to checkout
# if version does not look SHA-ish:
# in the checkout, obtain SHA from that branch
# set real_version to that value
else:
cwd = os.getcwd()
print("Moving into temp dir %s" % tmp)
os.chdir(tmp)
try:
# Nab from index. Skip wheels; we want to unpack an sdist.
flags = "--download=. --build=build --no-use-wheel"
cmd = "pip install %s %s==%s" % (flags, package, version)
c.run(cmd)
# Identify basename
# TODO: glob is bad here because pip install --download gets all
# dependencies too! ugh. Figure out best approach for that.
globs = []
globexpr = ""
for extension, opener in (
("zip", "unzip"),
("tgz", "tar xzvf"),
("tar.gz", "tar xzvf"),
):
globexpr = "*.{0}".format(extension)
globs = glob(globexpr)
if globs:
break
archive = os.path.basename(globs[0])
source, _, _ = archive.rpartition(".{0}".format(extension))
c.run("{0} {1}".format(opener, globexpr))
finally:
os.chdir(cwd)
return real_version, source | python | def _unpack(c, tmp, package, version, git_url=None):
"""
Download + unpack given package into temp dir ``tmp``.
Return ``(real_version, source)`` where ``real_version`` is the "actual"
version downloaded (e.g. if a Git master was indicated, it will be the SHA
of master HEAD) and ``source`` is the source directory (relative to
unpacked source) to import into ``<project>/vendor``.
"""
real_version = version[:]
source = None
if git_url:
pass
# git clone into tempdir
# git checkout <version>
# set target to checkout
# if version does not look SHA-ish:
# in the checkout, obtain SHA from that branch
# set real_version to that value
else:
cwd = os.getcwd()
print("Moving into temp dir %s" % tmp)
os.chdir(tmp)
try:
# Nab from index. Skip wheels; we want to unpack an sdist.
flags = "--download=. --build=build --no-use-wheel"
cmd = "pip install %s %s==%s" % (flags, package, version)
c.run(cmd)
# Identify basename
# TODO: glob is bad here because pip install --download gets all
# dependencies too! ugh. Figure out best approach for that.
globs = []
globexpr = ""
for extension, opener in (
("zip", "unzip"),
("tgz", "tar xzvf"),
("tar.gz", "tar xzvf"),
):
globexpr = "*.{0}".format(extension)
globs = glob(globexpr)
if globs:
break
archive = os.path.basename(globs[0])
source, _, _ = archive.rpartition(".{0}".format(extension))
c.run("{0} {1}".format(opener, globexpr))
finally:
os.chdir(cwd)
return real_version, source | [
"def",
"_unpack",
"(",
"c",
",",
"tmp",
",",
"package",
",",
"version",
",",
"git_url",
"=",
"None",
")",
":",
"real_version",
"=",
"version",
"[",
":",
"]",
"source",
"=",
"None",
"if",
"git_url",
":",
"pass",
"# git clone into tempdir",
"# git checko... | Download + unpack given package into temp dir ``tmp``.
Return ``(real_version, source)`` where ``real_version`` is the "actual"
version downloaded (e.g. if a Git master was indicated, it will be the SHA
of master HEAD) and ``source`` is the source directory (relative to
unpacked source) to import into ``<project>/vendor``. | [
"Download",
"+",
"unpack",
"given",
"package",
"into",
"temp",
"dir",
"tmp",
"."
] | bbf1b319bd1536817d5301ceb9eeb2f31830e5dc | https://github.com/pyinvoke/invocations/blob/bbf1b319bd1536817d5301ceb9eeb2f31830e5dc/invocations/packaging/vendorize.py#L13-L60 | train |
pyinvoke/invocations | invocations/packaging/vendorize.py | vendorize | def vendorize(
c,
distribution,
version,
vendor_dir,
package=None,
git_url=None,
license=None,
):
"""
Vendorize Python package ``distribution`` at version/SHA ``version``.
Specify the vendor folder (e.g. ``<mypackage>/vendor``) as ``vendor_dir``.
For Crate/PyPI releases, ``package`` should be the name of the software
entry on those sites, and ``version`` should be a specific version number.
E.g. ``vendorize('lexicon', '0.1.2')``.
For Git releases, ``package`` should be the name of the package folder
within the checkout that needs to be vendorized and ``version`` should be a
Git identifier (branch, tag, SHA etc.) ``git_url`` must also be given,
something suitable for ``git clone <git_url>``.
For SVN releases: xxx.
For packages where the distribution name is not the same as the package
directory name, give ``package='name'``.
By default, no explicit license seeking is done -- we assume the license
info is in file headers or otherwise within the Python package vendorized.
This is not always true; specify ``license=/path/to/license/file`` to
trigger copying of a license into the vendored folder from the
checkout/download (relative to its root.)
"""
with tmpdir() as tmp:
package = package or distribution
target = os.path.join(vendor_dir, package)
# Unpack source
real_version, source = _unpack(c, tmp, distribution, version, git_url)
abs_source = os.path.join(tmp, source)
source_package = os.path.join(abs_source, package)
# Ensure source package exists
if not os.path.exists(source_package):
rel_package = os.path.join(source, package)
raise ValueError("Source package %s doesn't exist!" % rel_package)
# Nuke target if exists
if os.path.exists(target):
print("Removing pre-existing vendorized folder %s" % target)
rmtree(target)
# Perform the copy
print("Copying %s => %s" % (source_package, target))
copytree(source_package, target)
# Explicit license if needed
if license:
copy(os.path.join(abs_source, license), target) | python | def vendorize(
c,
distribution,
version,
vendor_dir,
package=None,
git_url=None,
license=None,
):
"""
Vendorize Python package ``distribution`` at version/SHA ``version``.
Specify the vendor folder (e.g. ``<mypackage>/vendor``) as ``vendor_dir``.
For Crate/PyPI releases, ``package`` should be the name of the software
entry on those sites, and ``version`` should be a specific version number.
E.g. ``vendorize('lexicon', '0.1.2')``.
For Git releases, ``package`` should be the name of the package folder
within the checkout that needs to be vendorized and ``version`` should be a
Git identifier (branch, tag, SHA etc.) ``git_url`` must also be given,
something suitable for ``git clone <git_url>``.
For SVN releases: xxx.
For packages where the distribution name is not the same as the package
directory name, give ``package='name'``.
By default, no explicit license seeking is done -- we assume the license
info is in file headers or otherwise within the Python package vendorized.
This is not always true; specify ``license=/path/to/license/file`` to
trigger copying of a license into the vendored folder from the
checkout/download (relative to its root.)
"""
with tmpdir() as tmp:
package = package or distribution
target = os.path.join(vendor_dir, package)
# Unpack source
real_version, source = _unpack(c, tmp, distribution, version, git_url)
abs_source = os.path.join(tmp, source)
source_package = os.path.join(abs_source, package)
# Ensure source package exists
if not os.path.exists(source_package):
rel_package = os.path.join(source, package)
raise ValueError("Source package %s doesn't exist!" % rel_package)
# Nuke target if exists
if os.path.exists(target):
print("Removing pre-existing vendorized folder %s" % target)
rmtree(target)
# Perform the copy
print("Copying %s => %s" % (source_package, target))
copytree(source_package, target)
# Explicit license if needed
if license:
copy(os.path.join(abs_source, license), target) | [
"def",
"vendorize",
"(",
"c",
",",
"distribution",
",",
"version",
",",
"vendor_dir",
",",
"package",
"=",
"None",
",",
"git_url",
"=",
"None",
",",
"license",
"=",
"None",
",",
")",
":",
"with",
"tmpdir",
"(",
")",
"as",
"tmp",
":",
"package",
"=",
... | Vendorize Python package ``distribution`` at version/SHA ``version``.
Specify the vendor folder (e.g. ``<mypackage>/vendor``) as ``vendor_dir``.
For Crate/PyPI releases, ``package`` should be the name of the software
entry on those sites, and ``version`` should be a specific version number.
E.g. ``vendorize('lexicon', '0.1.2')``.
For Git releases, ``package`` should be the name of the package folder
within the checkout that needs to be vendorized and ``version`` should be a
Git identifier (branch, tag, SHA etc.) ``git_url`` must also be given,
something suitable for ``git clone <git_url>``.
For SVN releases: xxx.
For packages where the distribution name is not the same as the package
directory name, give ``package='name'``.
By default, no explicit license seeking is done -- we assume the license
info is in file headers or otherwise within the Python package vendorized.
This is not always true; specify ``license=/path/to/license/file`` to
trigger copying of a license into the vendored folder from the
checkout/download (relative to its root.) | [
"Vendorize",
"Python",
"package",
"distribution",
"at",
"version",
"/",
"SHA",
"version",
"."
] | bbf1b319bd1536817d5301ceb9eeb2f31830e5dc | https://github.com/pyinvoke/invocations/blob/bbf1b319bd1536817d5301ceb9eeb2f31830e5dc/invocations/packaging/vendorize.py#L64-L118 | train |
pyinvoke/invocations | invocations/travis.py | make_sudouser | def make_sudouser(c):
"""
Create a passworded sudo-capable user.
Used by other tasks to execute the test suite so sudo tests work.
"""
user = c.travis.sudo.user
password = c.travis.sudo.password
# --create-home because we need a place to put conf files, keys etc
# --groups travis because we must be in the Travis group to access the
# (created by Travis for us) virtualenv and other contents within
# /home/travis.
c.sudo("useradd {0} --create-home --groups travis".format(user))
# Password 'mypass' also arbitrary
c.run("echo {0}:{1} | sudo chpasswd".format(user, password))
# Set up new (glob-sourced) sudoers conf file for our user; easier than
# attempting to mutate or overwrite main sudoers conf.
conf = "/etc/sudoers.d/passworded"
cmd = "echo '{0} ALL=(ALL:ALL) PASSWD:ALL' > {1}".format(user, conf)
c.sudo('sh -c "{0}"'.format(cmd))
# Grant travis group write access to /home/travis as some integration tests
# may try writing conf files there. (TODO: shouldn't running the tests via
# 'sudo -H' mean that's no longer necessary?)
c.sudo("chmod g+w /home/travis") | python | def make_sudouser(c):
"""
Create a passworded sudo-capable user.
Used by other tasks to execute the test suite so sudo tests work.
"""
user = c.travis.sudo.user
password = c.travis.sudo.password
# --create-home because we need a place to put conf files, keys etc
# --groups travis because we must be in the Travis group to access the
# (created by Travis for us) virtualenv and other contents within
# /home/travis.
c.sudo("useradd {0} --create-home --groups travis".format(user))
# Password 'mypass' also arbitrary
c.run("echo {0}:{1} | sudo chpasswd".format(user, password))
# Set up new (glob-sourced) sudoers conf file for our user; easier than
# attempting to mutate or overwrite main sudoers conf.
conf = "/etc/sudoers.d/passworded"
cmd = "echo '{0} ALL=(ALL:ALL) PASSWD:ALL' > {1}".format(user, conf)
c.sudo('sh -c "{0}"'.format(cmd))
# Grant travis group write access to /home/travis as some integration tests
# may try writing conf files there. (TODO: shouldn't running the tests via
# 'sudo -H' mean that's no longer necessary?)
c.sudo("chmod g+w /home/travis") | [
"def",
"make_sudouser",
"(",
"c",
")",
":",
"user",
"=",
"c",
".",
"travis",
".",
"sudo",
".",
"user",
"password",
"=",
"c",
".",
"travis",
".",
"sudo",
".",
"password",
"# --create-home because we need a place to put conf files, keys etc",
"# --groups travis becaus... | Create a passworded sudo-capable user.
Used by other tasks to execute the test suite so sudo tests work. | [
"Create",
"a",
"passworded",
"sudo",
"-",
"capable",
"user",
"."
] | bbf1b319bd1536817d5301ceb9eeb2f31830e5dc | https://github.com/pyinvoke/invocations/blob/bbf1b319bd1536817d5301ceb9eeb2f31830e5dc/invocations/travis.py#L26-L49 | train |
pyinvoke/invocations | invocations/travis.py | make_sshable | def make_sshable(c):
"""
Set up passwordless SSH keypair & authorized_hosts access to localhost.
"""
user = c.travis.sudo.user
home = "~{0}".format(user)
# Run sudo() as the new sudo user; means less chown'ing, etc.
c.config.sudo.user = user
ssh_dir = "{0}/.ssh".format(home)
# TODO: worth wrapping in 'sh -c' and using '&&' instead of doing this?
for cmd in ("mkdir {0}", "chmod 0700 {0}"):
c.sudo(cmd.format(ssh_dir, user))
c.sudo('ssh-keygen -f {0}/id_rsa -N ""'.format(ssh_dir))
c.sudo("cp {0}/{{id_rsa.pub,authorized_keys}}".format(ssh_dir)) | python | def make_sshable(c):
"""
Set up passwordless SSH keypair & authorized_hosts access to localhost.
"""
user = c.travis.sudo.user
home = "~{0}".format(user)
# Run sudo() as the new sudo user; means less chown'ing, etc.
c.config.sudo.user = user
ssh_dir = "{0}/.ssh".format(home)
# TODO: worth wrapping in 'sh -c' and using '&&' instead of doing this?
for cmd in ("mkdir {0}", "chmod 0700 {0}"):
c.sudo(cmd.format(ssh_dir, user))
c.sudo('ssh-keygen -f {0}/id_rsa -N ""'.format(ssh_dir))
c.sudo("cp {0}/{{id_rsa.pub,authorized_keys}}".format(ssh_dir)) | [
"def",
"make_sshable",
"(",
"c",
")",
":",
"user",
"=",
"c",
".",
"travis",
".",
"sudo",
".",
"user",
"home",
"=",
"\"~{0}\"",
".",
"format",
"(",
"user",
")",
"# Run sudo() as the new sudo user; means less chown'ing, etc.",
"c",
".",
"config",
".",
"sudo",
... | Set up passwordless SSH keypair & authorized_hosts access to localhost. | [
"Set",
"up",
"passwordless",
"SSH",
"keypair",
"&",
"authorized_hosts",
"access",
"to",
"localhost",
"."
] | bbf1b319bd1536817d5301ceb9eeb2f31830e5dc | https://github.com/pyinvoke/invocations/blob/bbf1b319bd1536817d5301ceb9eeb2f31830e5dc/invocations/travis.py#L55-L68 | train |
pyinvoke/invocations | invocations/travis.py | sudo_run | def sudo_run(c, command):
"""
Run some command under Travis-oriented sudo subshell/virtualenv.
:param str command:
Command string to run, e.g. ``inv coverage``, ``inv integration``, etc.
(Does not necessarily need to be an Invoke task, but...)
"""
# NOTE: explicit shell wrapper because sourcing the venv works best here;
# test tasks currently use their own subshell to call e.g. 'pytest --blah',
# so the tactic of '$VIRTUAL_ENV/bin/inv coverage' doesn't help - only that
# intermediate process knows about the venv!
cmd = "source $VIRTUAL_ENV/bin/activate && {}".format(command)
c.sudo('bash -c "{0}"'.format(cmd), user=c.travis.sudo.user) | python | def sudo_run(c, command):
"""
Run some command under Travis-oriented sudo subshell/virtualenv.
:param str command:
Command string to run, e.g. ``inv coverage``, ``inv integration``, etc.
(Does not necessarily need to be an Invoke task, but...)
"""
# NOTE: explicit shell wrapper because sourcing the venv works best here;
# test tasks currently use their own subshell to call e.g. 'pytest --blah',
# so the tactic of '$VIRTUAL_ENV/bin/inv coverage' doesn't help - only that
# intermediate process knows about the venv!
cmd = "source $VIRTUAL_ENV/bin/activate && {}".format(command)
c.sudo('bash -c "{0}"'.format(cmd), user=c.travis.sudo.user) | [
"def",
"sudo_run",
"(",
"c",
",",
"command",
")",
":",
"# NOTE: explicit shell wrapper because sourcing the venv works best here;",
"# test tasks currently use their own subshell to call e.g. 'pytest --blah',",
"# so the tactic of '$VIRTUAL_ENV/bin/inv coverage' doesn't help - only that",
"# i... | Run some command under Travis-oriented sudo subshell/virtualenv.
:param str command:
Command string to run, e.g. ``inv coverage``, ``inv integration``, etc.
(Does not necessarily need to be an Invoke task, but...) | [
"Run",
"some",
"command",
"under",
"Travis",
"-",
"oriented",
"sudo",
"subshell",
"/",
"virtualenv",
"."
] | bbf1b319bd1536817d5301ceb9eeb2f31830e5dc | https://github.com/pyinvoke/invocations/blob/bbf1b319bd1536817d5301ceb9eeb2f31830e5dc/invocations/travis.py#L72-L85 | train |
pyinvoke/invocations | invocations/travis.py | blacken | def blacken(c):
"""
Install and execute ``black`` under appropriate circumstances, with diffs.
Installs and runs ``black`` under Python 3.6 (the first version it
supports). Since this sort of CI based task only needs to run once per
commit (formatting is not going to change between interpreters) this seems
like a worthwhile tradeoff.
This task uses black's ``--check`` and ``--fail`` flags, so not only will
the build fail if it does not conform, but contributors can see exactly
what they need to change. This is intended as a hedge against the fact that
not all contributors will be using Python 3.6+.
"""
if not PYTHON.startswith("3.6"):
msg = "Not blackening, since Python {} != Python 3.6".format(PYTHON)
print(msg, file=sys.stderr)
return
# Install, allowing config override of hardcoded default version
config = c.config.get("travis", {}).get("black", {})
version = config.get("version", "18.5b0")
c.run("pip install black=={}".format(version))
# Execute our blacken task, with diff + check, which will both error
# and emit diffs.
checks.blacken(c, check=True, diff=True) | python | def blacken(c):
"""
Install and execute ``black`` under appropriate circumstances, with diffs.
Installs and runs ``black`` under Python 3.6 (the first version it
supports). Since this sort of CI based task only needs to run once per
commit (formatting is not going to change between interpreters) this seems
like a worthwhile tradeoff.
This task uses black's ``--check`` and ``--fail`` flags, so not only will
the build fail if it does not conform, but contributors can see exactly
what they need to change. This is intended as a hedge against the fact that
not all contributors will be using Python 3.6+.
"""
if not PYTHON.startswith("3.6"):
msg = "Not blackening, since Python {} != Python 3.6".format(PYTHON)
print(msg, file=sys.stderr)
return
# Install, allowing config override of hardcoded default version
config = c.config.get("travis", {}).get("black", {})
version = config.get("version", "18.5b0")
c.run("pip install black=={}".format(version))
# Execute our blacken task, with diff + check, which will both error
# and emit diffs.
checks.blacken(c, check=True, diff=True) | [
"def",
"blacken",
"(",
"c",
")",
":",
"if",
"not",
"PYTHON",
".",
"startswith",
"(",
"\"3.6\"",
")",
":",
"msg",
"=",
"\"Not blackening, since Python {} != Python 3.6\"",
".",
"format",
"(",
"PYTHON",
")",
"print",
"(",
"msg",
",",
"file",
"=",
"sys",
".",... | Install and execute ``black`` under appropriate circumstances, with diffs.
Installs and runs ``black`` under Python 3.6 (the first version it
supports). Since this sort of CI based task only needs to run once per
commit (formatting is not going to change between interpreters) this seems
like a worthwhile tradeoff.
This task uses black's ``--check`` and ``--fail`` flags, so not only will
the build fail if it does not conform, but contributors can see exactly
what they need to change. This is intended as a hedge against the fact that
not all contributors will be using Python 3.6+. | [
"Install",
"and",
"execute",
"black",
"under",
"appropriate",
"circumstances",
"with",
"diffs",
"."
] | bbf1b319bd1536817d5301ceb9eeb2f31830e5dc | https://github.com/pyinvoke/invocations/blob/bbf1b319bd1536817d5301ceb9eeb2f31830e5dc/invocations/travis.py#L166-L190 | train |
pysal/giddy | giddy/rank.py | Tau._calc | def _calc(self, x, y):
"""
List based implementation of binary tree algorithm for concordance
measure after :cite:`Christensen2005`.
"""
x = np.array(x)
y = np.array(y)
n = len(y)
perm = list(range(n))
perm.sort(key=lambda a: (x[a], y[a]))
vals = y[perm]
ExtraY = 0
ExtraX = 0
ACount = 0
BCount = 0
CCount = 0
DCount = 0
ECount = 0
DCount = 0
Concordant = 0
Discordant = 0
# ids for left child
li = [None] * (n - 1)
# ids for right child
ri = [None] * (n - 1)
# number of left descendants for a node
ld = np.zeros(n)
# number of values equal to value i
nequal = np.zeros(n)
for i in range(1, n):
NumBefore = 0
NumEqual = 1
root = 0
x0 = x[perm[i - 1]]
y0 = y[perm[i - 1]]
x1 = x[perm[i]]
y1 = y[perm[i]]
if x0 != x1:
DCount = 0
ECount = 1
else:
if y0 == y1:
ECount += 1
else:
DCount += ECount
ECount = 1
root = 0
inserting = True
while inserting:
current = y[perm[i]]
if current > y[perm[root]]:
# right branch
NumBefore += 1 + ld[root] + nequal[root]
if ri[root] is None:
# insert as right child to root
ri[root] = i
inserting = False
else:
root = ri[root]
elif current < y[perm[root]]:
# increment number of left descendants
ld[root] += 1
if li[root] is None:
# insert as left child to root
li[root] = i
inserting = False
else:
root = li[root]
elif current == y[perm[root]]:
NumBefore += ld[root]
NumEqual += nequal[root] + 1
nequal[root] += 1
inserting = False
ACount = NumBefore - DCount
BCount = NumEqual - ECount
CCount = i - (ACount + BCount + DCount + ECount - 1)
ExtraY += DCount
ExtraX += BCount
Concordant += ACount
Discordant += CCount
cd = Concordant + Discordant
num = Concordant - Discordant
tau = num / np.sqrt((cd + ExtraX) * (cd + ExtraY))
v = (4. * n + 10) / (9. * n * (n - 1))
z = tau / np.sqrt(v)
pval = erfc(np.abs(z) / 1.4142136) # follow scipy
return tau, pval, Concordant, Discordant, ExtraX, ExtraY | python | def _calc(self, x, y):
"""
List based implementation of binary tree algorithm for concordance
measure after :cite:`Christensen2005`.
"""
x = np.array(x)
y = np.array(y)
n = len(y)
perm = list(range(n))
perm.sort(key=lambda a: (x[a], y[a]))
vals = y[perm]
ExtraY = 0
ExtraX = 0
ACount = 0
BCount = 0
CCount = 0
DCount = 0
ECount = 0
DCount = 0
Concordant = 0
Discordant = 0
# ids for left child
li = [None] * (n - 1)
# ids for right child
ri = [None] * (n - 1)
# number of left descendants for a node
ld = np.zeros(n)
# number of values equal to value i
nequal = np.zeros(n)
for i in range(1, n):
NumBefore = 0
NumEqual = 1
root = 0
x0 = x[perm[i - 1]]
y0 = y[perm[i - 1]]
x1 = x[perm[i]]
y1 = y[perm[i]]
if x0 != x1:
DCount = 0
ECount = 1
else:
if y0 == y1:
ECount += 1
else:
DCount += ECount
ECount = 1
root = 0
inserting = True
while inserting:
current = y[perm[i]]
if current > y[perm[root]]:
# right branch
NumBefore += 1 + ld[root] + nequal[root]
if ri[root] is None:
# insert as right child to root
ri[root] = i
inserting = False
else:
root = ri[root]
elif current < y[perm[root]]:
# increment number of left descendants
ld[root] += 1
if li[root] is None:
# insert as left child to root
li[root] = i
inserting = False
else:
root = li[root]
elif current == y[perm[root]]:
NumBefore += ld[root]
NumEqual += nequal[root] + 1
nequal[root] += 1
inserting = False
ACount = NumBefore - DCount
BCount = NumEqual - ECount
CCount = i - (ACount + BCount + DCount + ECount - 1)
ExtraY += DCount
ExtraX += BCount
Concordant += ACount
Discordant += CCount
cd = Concordant + Discordant
num = Concordant - Discordant
tau = num / np.sqrt((cd + ExtraX) * (cd + ExtraY))
v = (4. * n + 10) / (9. * n * (n - 1))
z = tau / np.sqrt(v)
pval = erfc(np.abs(z) / 1.4142136) # follow scipy
return tau, pval, Concordant, Discordant, ExtraX, ExtraY | [
"def",
"_calc",
"(",
"self",
",",
"x",
",",
"y",
")",
":",
"x",
"=",
"np",
".",
"array",
"(",
"x",
")",
"y",
"=",
"np",
".",
"array",
"(",
"y",
")",
"n",
"=",
"len",
"(",
"y",
")",
"perm",
"=",
"list",
"(",
"range",
"(",
"n",
")",
")",
... | List based implementation of binary tree algorithm for concordance
measure after :cite:`Christensen2005`. | [
"List",
"based",
"implementation",
"of",
"binary",
"tree",
"algorithm",
"for",
"concordance",
"measure",
"after",
":",
"cite",
":",
"Christensen2005",
"."
] | 13fae6c18933614be78e91a6b5060693bea33a04 | https://github.com/pysal/giddy/blob/13fae6c18933614be78e91a6b5060693bea33a04/giddy/rank.py#L171-L261 | train |
kolypto/py-flask-jsontools | flask_jsontools/views.py | _MethodViewInfo.decorator | def decorator(self, func):
""" Wrapper function to decorate a function """
if inspect.isfunction(func):
func._methodview = self
elif inspect.ismethod(func):
func.__func__._methodview = self
else:
raise AssertionError('Can only decorate function and methods, {} given'.format(func))
return func | python | def decorator(self, func):
""" Wrapper function to decorate a function """
if inspect.isfunction(func):
func._methodview = self
elif inspect.ismethod(func):
func.__func__._methodview = self
else:
raise AssertionError('Can only decorate function and methods, {} given'.format(func))
return func | [
"def",
"decorator",
"(",
"self",
",",
"func",
")",
":",
"if",
"inspect",
".",
"isfunction",
"(",
"func",
")",
":",
"func",
".",
"_methodview",
"=",
"self",
"elif",
"inspect",
".",
"ismethod",
"(",
"func",
")",
":",
"func",
".",
"__func__",
".",
"_met... | Wrapper function to decorate a function | [
"Wrapper",
"function",
"to",
"decorate",
"a",
"function"
] | 1abee2d40e6db262e43f0c534e90faaa9b26246a | https://github.com/kolypto/py-flask-jsontools/blob/1abee2d40e6db262e43f0c534e90faaa9b26246a/flask_jsontools/views.py#L30-L38 | train |
kolypto/py-flask-jsontools | flask_jsontools/views.py | _MethodViewInfo.matches | def matches(self, verb, params):
""" Test if the method matches the provided set of arguments
:param verb: HTTP verb. Uppercase
:type verb: str
:param params: Existing route parameters
:type params: set
:returns: Whether this view matches
:rtype: bool
"""
return (self.ifset is None or self.ifset <= params) and \
(self.ifnset is None or self.ifnset.isdisjoint(params)) and \
(self.methods is None or verb in self.methods) | python | def matches(self, verb, params):
""" Test if the method matches the provided set of arguments
:param verb: HTTP verb. Uppercase
:type verb: str
:param params: Existing route parameters
:type params: set
:returns: Whether this view matches
:rtype: bool
"""
return (self.ifset is None or self.ifset <= params) and \
(self.ifnset is None or self.ifnset.isdisjoint(params)) and \
(self.methods is None or verb in self.methods) | [
"def",
"matches",
"(",
"self",
",",
"verb",
",",
"params",
")",
":",
"return",
"(",
"self",
".",
"ifset",
"is",
"None",
"or",
"self",
".",
"ifset",
"<=",
"params",
")",
"and",
"(",
"self",
".",
"ifnset",
"is",
"None",
"or",
"self",
".",
"ifnset",
... | Test if the method matches the provided set of arguments
:param verb: HTTP verb. Uppercase
:type verb: str
:param params: Existing route parameters
:type params: set
:returns: Whether this view matches
:rtype: bool | [
"Test",
"if",
"the",
"method",
"matches",
"the",
"provided",
"set",
"of",
"arguments"
] | 1abee2d40e6db262e43f0c534e90faaa9b26246a | https://github.com/kolypto/py-flask-jsontools/blob/1abee2d40e6db262e43f0c534e90faaa9b26246a/flask_jsontools/views.py#L63-L75 | train |
kolypto/py-flask-jsontools | flask_jsontools/views.py | MethodView._match_view | def _match_view(self, method, route_params):
""" Detect a view matching the query
:param method: HTTP method
:param route_params: Route parameters dict
:return: Method
:rtype: Callable|None
"""
method = method.upper()
route_params = frozenset(k for k, v in route_params.items() if v is not None)
for view_name, info in self.methods_map[method].items():
if info.matches(method, route_params):
return getattr(self, view_name)
else:
return None | python | def _match_view(self, method, route_params):
""" Detect a view matching the query
:param method: HTTP method
:param route_params: Route parameters dict
:return: Method
:rtype: Callable|None
"""
method = method.upper()
route_params = frozenset(k for k, v in route_params.items() if v is not None)
for view_name, info in self.methods_map[method].items():
if info.matches(method, route_params):
return getattr(self, view_name)
else:
return None | [
"def",
"_match_view",
"(",
"self",
",",
"method",
",",
"route_params",
")",
":",
"method",
"=",
"method",
".",
"upper",
"(",
")",
"route_params",
"=",
"frozenset",
"(",
"k",
"for",
"k",
",",
"v",
"in",
"route_params",
".",
"items",
"(",
")",
"if",
"v... | Detect a view matching the query
:param method: HTTP method
:param route_params: Route parameters dict
:return: Method
:rtype: Callable|None | [
"Detect",
"a",
"view",
"matching",
"the",
"query"
] | 1abee2d40e6db262e43f0c534e90faaa9b26246a | https://github.com/kolypto/py-flask-jsontools/blob/1abee2d40e6db262e43f0c534e90faaa9b26246a/flask_jsontools/views.py#L112-L127 | train |
kolypto/py-flask-jsontools | flask_jsontools/views.py | MethodView.route_as_view | def route_as_view(cls, app, name, rules, *class_args, **class_kwargs):
""" Register the view with an URL route
:param app: Flask application
:type app: flask.Flask|flask.Blueprint
:param name: Unique view name
:type name: str
:param rules: List of route rules to use
:type rules: Iterable[str|werkzeug.routing.Rule]
:param class_args: Args to pass to object constructor
:param class_kwargs: KwArgs to pass to object constructor
:return: View callable
:rtype: Callable
"""
view = super(MethodView, cls).as_view(name, *class_args, **class_kwargs)
for rule in rules:
app.add_url_rule(rule, view_func=view)
return view | python | def route_as_view(cls, app, name, rules, *class_args, **class_kwargs):
""" Register the view with an URL route
:param app: Flask application
:type app: flask.Flask|flask.Blueprint
:param name: Unique view name
:type name: str
:param rules: List of route rules to use
:type rules: Iterable[str|werkzeug.routing.Rule]
:param class_args: Args to pass to object constructor
:param class_kwargs: KwArgs to pass to object constructor
:return: View callable
:rtype: Callable
"""
view = super(MethodView, cls).as_view(name, *class_args, **class_kwargs)
for rule in rules:
app.add_url_rule(rule, view_func=view)
return view | [
"def",
"route_as_view",
"(",
"cls",
",",
"app",
",",
"name",
",",
"rules",
",",
"*",
"class_args",
",",
"*",
"*",
"class_kwargs",
")",
":",
"view",
"=",
"super",
"(",
"MethodView",
",",
"cls",
")",
".",
"as_view",
"(",
"name",
",",
"*",
"class_args",... | Register the view with an URL route
:param app: Flask application
:type app: flask.Flask|flask.Blueprint
:param name: Unique view name
:type name: str
:param rules: List of route rules to use
:type rules: Iterable[str|werkzeug.routing.Rule]
:param class_args: Args to pass to object constructor
:param class_kwargs: KwArgs to pass to object constructor
:return: View callable
:rtype: Callable | [
"Register",
"the",
"view",
"with",
"an",
"URL",
"route",
":",
"param",
"app",
":",
"Flask",
"application",
":",
"type",
"app",
":",
"flask",
".",
"Flask|flask",
".",
"Blueprint",
":",
"param",
"name",
":",
"Unique",
"view",
"name",
":",
"type",
"name",
... | 1abee2d40e6db262e43f0c534e90faaa9b26246a | https://github.com/kolypto/py-flask-jsontools/blob/1abee2d40e6db262e43f0c534e90faaa9b26246a/flask_jsontools/views.py#L136-L152 | train |
pysal/giddy | giddy/ergodic.py | steady_state | def steady_state(P):
"""
Calculates the steady state probability vector for a regular Markov
transition matrix P.
Parameters
----------
P : array
(k, k), an ergodic Markov transition probability matrix.
Returns
-------
: array
(k, ), steady state distribution.
Examples
--------
Taken from :cite:`Kemeny1967`. Land of Oz example where the states are
Rain, Nice and Snow, so there is 25 percent chance that if it
rained in Oz today, it will snow tomorrow, while if it snowed today in
Oz there is a 50 percent chance of snow again tomorrow and a 25
percent chance of a nice day (nice, like when the witch with the monkeys
is melting).
>>> import numpy as np
>>> from giddy.ergodic import steady_state
>>> p=np.array([[.5, .25, .25],[.5,0,.5],[.25,.25,.5]])
>>> steady_state(p)
array([0.4, 0.2, 0.4])
Thus, the long run distribution for Oz is to have 40 percent of the
days classified as Rain, 20 percent as Nice, and 40 percent as Snow
(states are mutually exclusive).
"""
v, d = la.eig(np.transpose(P))
d = np.array(d)
# for a regular P maximum eigenvalue will be 1
mv = max(v)
# find its position
i = v.tolist().index(mv)
row = abs(d[:, i])
# normalize eigenvector corresponding to the eigenvalue 1
return row / sum(row) | python | def steady_state(P):
"""
Calculates the steady state probability vector for a regular Markov
transition matrix P.
Parameters
----------
P : array
(k, k), an ergodic Markov transition probability matrix.
Returns
-------
: array
(k, ), steady state distribution.
Examples
--------
Taken from :cite:`Kemeny1967`. Land of Oz example where the states are
Rain, Nice and Snow, so there is 25 percent chance that if it
rained in Oz today, it will snow tomorrow, while if it snowed today in
Oz there is a 50 percent chance of snow again tomorrow and a 25
percent chance of a nice day (nice, like when the witch with the monkeys
is melting).
>>> import numpy as np
>>> from giddy.ergodic import steady_state
>>> p=np.array([[.5, .25, .25],[.5,0,.5],[.25,.25,.5]])
>>> steady_state(p)
array([0.4, 0.2, 0.4])
Thus, the long run distribution for Oz is to have 40 percent of the
days classified as Rain, 20 percent as Nice, and 40 percent as Snow
(states are mutually exclusive).
"""
v, d = la.eig(np.transpose(P))
d = np.array(d)
# for a regular P maximum eigenvalue will be 1
mv = max(v)
# find its position
i = v.tolist().index(mv)
row = abs(d[:, i])
# normalize eigenvector corresponding to the eigenvalue 1
return row / sum(row) | [
"def",
"steady_state",
"(",
"P",
")",
":",
"v",
",",
"d",
"=",
"la",
".",
"eig",
"(",
"np",
".",
"transpose",
"(",
"P",
")",
")",
"d",
"=",
"np",
".",
"array",
"(",
"d",
")",
"# for a regular P maximum eigenvalue will be 1",
"mv",
"=",
"max",
"(",
... | Calculates the steady state probability vector for a regular Markov
transition matrix P.
Parameters
----------
P : array
(k, k), an ergodic Markov transition probability matrix.
Returns
-------
: array
(k, ), steady state distribution.
Examples
--------
Taken from :cite:`Kemeny1967`. Land of Oz example where the states are
Rain, Nice and Snow, so there is 25 percent chance that if it
rained in Oz today, it will snow tomorrow, while if it snowed today in
Oz there is a 50 percent chance of snow again tomorrow and a 25
percent chance of a nice day (nice, like when the witch with the monkeys
is melting).
>>> import numpy as np
>>> from giddy.ergodic import steady_state
>>> p=np.array([[.5, .25, .25],[.5,0,.5],[.25,.25,.5]])
>>> steady_state(p)
array([0.4, 0.2, 0.4])
Thus, the long run distribution for Oz is to have 40 percent of the
days classified as Rain, 20 percent as Nice, and 40 percent as Snow
(states are mutually exclusive). | [
"Calculates",
"the",
"steady",
"state",
"probability",
"vector",
"for",
"a",
"regular",
"Markov",
"transition",
"matrix",
"P",
"."
] | 13fae6c18933614be78e91a6b5060693bea33a04 | https://github.com/pysal/giddy/blob/13fae6c18933614be78e91a6b5060693bea33a04/giddy/ergodic.py#L12-L59 | train |
pysal/giddy | giddy/ergodic.py | fmpt | def fmpt(P):
"""
Calculates the matrix of first mean passage times for an ergodic transition
probability matrix.
Parameters
----------
P : array
(k, k), an ergodic Markov transition probability matrix.
Returns
-------
M : array
(k, k), elements are the expected value for the number of intervals
required for a chain starting in state i to first enter state j.
If i=j then this is the recurrence time.
Examples
--------
>>> import numpy as np
>>> from giddy.ergodic import fmpt
>>> p=np.array([[.5, .25, .25],[.5,0,.5],[.25,.25,.5]])
>>> fm=fmpt(p)
>>> fm
array([[2.5 , 4. , 3.33333333],
[2.66666667, 5. , 2.66666667],
[3.33333333, 4. , 2.5 ]])
Thus, if it is raining today in Oz we can expect a nice day to come
along in another 4 days, on average, and snow to hit in 3.33 days. We can
expect another rainy day in 2.5 days. If it is nice today in Oz, we would
experience a change in the weather (either rain or snow) in 2.67 days from
today. (That wicked witch can only die once so I reckon that is the
ultimate absorbing state).
Notes
-----
Uses formulation (and examples on p. 218) in :cite:`Kemeny1967`.
"""
P = np.matrix(P)
k = P.shape[0]
A = np.zeros_like(P)
ss = steady_state(P).reshape(k, 1)
for i in range(k):
A[:, i] = ss
A = A.transpose()
I = np.identity(k)
Z = la.inv(I - P + A)
E = np.ones_like(Z)
A_diag = np.diag(A)
A_diag = A_diag + (A_diag == 0)
D = np.diag(1. / A_diag)
Zdg = np.diag(np.diag(Z))
M = (I - Z + E * Zdg) * D
return np.array(M) | python | def fmpt(P):
"""
Calculates the matrix of first mean passage times for an ergodic transition
probability matrix.
Parameters
----------
P : array
(k, k), an ergodic Markov transition probability matrix.
Returns
-------
M : array
(k, k), elements are the expected value for the number of intervals
required for a chain starting in state i to first enter state j.
If i=j then this is the recurrence time.
Examples
--------
>>> import numpy as np
>>> from giddy.ergodic import fmpt
>>> p=np.array([[.5, .25, .25],[.5,0,.5],[.25,.25,.5]])
>>> fm=fmpt(p)
>>> fm
array([[2.5 , 4. , 3.33333333],
[2.66666667, 5. , 2.66666667],
[3.33333333, 4. , 2.5 ]])
Thus, if it is raining today in Oz we can expect a nice day to come
along in another 4 days, on average, and snow to hit in 3.33 days. We can
expect another rainy day in 2.5 days. If it is nice today in Oz, we would
experience a change in the weather (either rain or snow) in 2.67 days from
today. (That wicked witch can only die once so I reckon that is the
ultimate absorbing state).
Notes
-----
Uses formulation (and examples on p. 218) in :cite:`Kemeny1967`.
"""
P = np.matrix(P)
k = P.shape[0]
A = np.zeros_like(P)
ss = steady_state(P).reshape(k, 1)
for i in range(k):
A[:, i] = ss
A = A.transpose()
I = np.identity(k)
Z = la.inv(I - P + A)
E = np.ones_like(Z)
A_diag = np.diag(A)
A_diag = A_diag + (A_diag == 0)
D = np.diag(1. / A_diag)
Zdg = np.diag(np.diag(Z))
M = (I - Z + E * Zdg) * D
return np.array(M) | [
"def",
"fmpt",
"(",
"P",
")",
":",
"P",
"=",
"np",
".",
"matrix",
"(",
"P",
")",
"k",
"=",
"P",
".",
"shape",
"[",
"0",
"]",
"A",
"=",
"np",
".",
"zeros_like",
"(",
"P",
")",
"ss",
"=",
"steady_state",
"(",
"P",
")",
".",
"reshape",
"(",
... | Calculates the matrix of first mean passage times for an ergodic transition
probability matrix.
Parameters
----------
P : array
(k, k), an ergodic Markov transition probability matrix.
Returns
-------
M : array
(k, k), elements are the expected value for the number of intervals
required for a chain starting in state i to first enter state j.
If i=j then this is the recurrence time.
Examples
--------
>>> import numpy as np
>>> from giddy.ergodic import fmpt
>>> p=np.array([[.5, .25, .25],[.5,0,.5],[.25,.25,.5]])
>>> fm=fmpt(p)
>>> fm
array([[2.5 , 4. , 3.33333333],
[2.66666667, 5. , 2.66666667],
[3.33333333, 4. , 2.5 ]])
Thus, if it is raining today in Oz we can expect a nice day to come
along in another 4 days, on average, and snow to hit in 3.33 days. We can
expect another rainy day in 2.5 days. If it is nice today in Oz, we would
experience a change in the weather (either rain or snow) in 2.67 days from
today. (That wicked witch can only die once so I reckon that is the
ultimate absorbing state).
Notes
-----
Uses formulation (and examples on p. 218) in :cite:`Kemeny1967`. | [
"Calculates",
"the",
"matrix",
"of",
"first",
"mean",
"passage",
"times",
"for",
"an",
"ergodic",
"transition",
"probability",
"matrix",
"."
] | 13fae6c18933614be78e91a6b5060693bea33a04 | https://github.com/pysal/giddy/blob/13fae6c18933614be78e91a6b5060693bea33a04/giddy/ergodic.py#L62-L118 | train |
pysal/giddy | giddy/ergodic.py | var_fmpt | def var_fmpt(P):
"""
Variances of first mean passage times for an ergodic transition
probability matrix.
Parameters
----------
P : array
(k, k), an ergodic Markov transition probability matrix.
Returns
-------
: array
(k, k), elements are the variances for the number of intervals
required for a chain starting in state i to first enter state j.
Examples
--------
>>> import numpy as np
>>> from giddy.ergodic import var_fmpt
>>> p=np.array([[.5, .25, .25],[.5,0,.5],[.25,.25,.5]])
>>> vfm=var_fmpt(p)
>>> vfm
array([[ 5.58333333, 12. , 6.88888889],
[ 6.22222222, 12. , 6.22222222],
[ 6.88888889, 12. , 5.58333333]])
Notes
-----
Uses formulation (and examples on p. 83) in :cite:`Kemeny1967`.
"""
P = np.matrix(P)
A = P ** 1000
n, k = A.shape
I = np.identity(k)
Z = la.inv(I - P + A)
E = np.ones_like(Z)
D = np.diag(1. / np.diag(A))
Zdg = np.diag(np.diag(Z))
M = (I - Z + E * Zdg) * D
ZM = Z * M
ZMdg = np.diag(np.diag(ZM))
W = M * (2 * Zdg * D - I) + 2 * (ZM - E * ZMdg)
return np.array(W - np.multiply(M, M)) | python | def var_fmpt(P):
"""
Variances of first mean passage times for an ergodic transition
probability matrix.
Parameters
----------
P : array
(k, k), an ergodic Markov transition probability matrix.
Returns
-------
: array
(k, k), elements are the variances for the number of intervals
required for a chain starting in state i to first enter state j.
Examples
--------
>>> import numpy as np
>>> from giddy.ergodic import var_fmpt
>>> p=np.array([[.5, .25, .25],[.5,0,.5],[.25,.25,.5]])
>>> vfm=var_fmpt(p)
>>> vfm
array([[ 5.58333333, 12. , 6.88888889],
[ 6.22222222, 12. , 6.22222222],
[ 6.88888889, 12. , 5.58333333]])
Notes
-----
Uses formulation (and examples on p. 83) in :cite:`Kemeny1967`.
"""
P = np.matrix(P)
A = P ** 1000
n, k = A.shape
I = np.identity(k)
Z = la.inv(I - P + A)
E = np.ones_like(Z)
D = np.diag(1. / np.diag(A))
Zdg = np.diag(np.diag(Z))
M = (I - Z + E * Zdg) * D
ZM = Z * M
ZMdg = np.diag(np.diag(ZM))
W = M * (2 * Zdg * D - I) + 2 * (ZM - E * ZMdg)
return np.array(W - np.multiply(M, M)) | [
"def",
"var_fmpt",
"(",
"P",
")",
":",
"P",
"=",
"np",
".",
"matrix",
"(",
"P",
")",
"A",
"=",
"P",
"**",
"1000",
"n",
",",
"k",
"=",
"A",
".",
"shape",
"I",
"=",
"np",
".",
"identity",
"(",
"k",
")",
"Z",
"=",
"la",
".",
"inv",
"(",
"I... | Variances of first mean passage times for an ergodic transition
probability matrix.
Parameters
----------
P : array
(k, k), an ergodic Markov transition probability matrix.
Returns
-------
: array
(k, k), elements are the variances for the number of intervals
required for a chain starting in state i to first enter state j.
Examples
--------
>>> import numpy as np
>>> from giddy.ergodic import var_fmpt
>>> p=np.array([[.5, .25, .25],[.5,0,.5],[.25,.25,.5]])
>>> vfm=var_fmpt(p)
>>> vfm
array([[ 5.58333333, 12. , 6.88888889],
[ 6.22222222, 12. , 6.22222222],
[ 6.88888889, 12. , 5.58333333]])
Notes
-----
Uses formulation (and examples on p. 83) in :cite:`Kemeny1967`. | [
"Variances",
"of",
"first",
"mean",
"passage",
"times",
"for",
"an",
"ergodic",
"transition",
"probability",
"matrix",
"."
] | 13fae6c18933614be78e91a6b5060693bea33a04 | https://github.com/pysal/giddy/blob/13fae6c18933614be78e91a6b5060693bea33a04/giddy/ergodic.py#L121-L167 | train |
pyinvoke/invocations | invocations/packaging/release.py | _converge | def _converge(c):
"""
Examine world state, returning data on what needs updating for release.
:param c: Invoke ``Context`` object or subclass.
:returns:
Two dicts (technically, dict subclasses, which allow attribute access),
``actions`` and ``state`` (in that order.)
``actions`` maps release component names to variables (usually class
constants) determining what action should be taken for that component:
- ``changelog``: members of `.Changelog` such as ``NEEDS_RELEASE`` or
``OKAY``.
- ``version``: members of `.VersionFile`.
``state`` contains the data used to calculate the actions, in case the
caller wants to do further analysis:
- ``branch``: the name of the checked-out Git branch.
- ``changelog``: the parsed project changelog, a `dict` of releases.
- ``release_type``: what type of release the branch appears to be (will
be a member of `.Release` such as ``Release.BUGFIX``.)
- ``latest_line_release``: the latest changelog release found for
current release type/line.
- ``latest_overall_release``: the absolute most recent release entry.
Useful for determining next minor/feature release.
- ``current_version``: the version string as found in the package's
``__version__``.
"""
#
# Data/state gathering
#
# Get data about current repo context: what branch are we on & what kind of
# release does it appear to represent?
branch, release_type = _release_line(c)
# Short-circuit if type is undefined; we can't do useful work for that.
if release_type is Release.UNDEFINED:
raise UndefinedReleaseType(
"You don't seem to be on a release-related branch; "
"why are you trying to cut a release?"
)
# Parse our changelog so we can tell what's released and what's not.
# TODO: below needs to go in something doc-y somewhere; having it in a
# non-user-facing subroutine docstring isn't visible enough.
"""
.. note::
Requires that one sets the ``packaging.changelog_file`` configuration
option; it should be a relative or absolute path to your
``changelog.rst`` (or whatever it's named in your project).
"""
# TODO: allow skipping changelog if not using Releases since we have no
# other good way of detecting whether a changelog needs/got an update.
# TODO: chdir to sphinx.source, import conf.py, look at
# releases_changelog_name - that way it will honor that setting and we can
# ditch this explicit one instead. (and the docstring above)
changelog = parse_changelog(
c.packaging.changelog_file, load_extensions=True
)
# Get latest appropriate changelog release and any unreleased issues, for
# current line
line_release, issues = _release_and_issues(changelog, branch, release_type)
# Also get latest overall release, sometimes that matters (usually only
# when latest *appropriate* release doesn't exist yet)
overall_release = _versions_from_changelog(changelog)[-1]
# Obtain the project's main package & its version data
current_version = load_version(c)
# Grab all git tags
tags = _get_tags(c)
state = Lexicon(
{
"branch": branch,
"release_type": release_type,
"changelog": changelog,
"latest_line_release": Version(line_release)
if line_release
else None,
"latest_overall_release": overall_release, # already a Version
"unreleased_issues": issues,
"current_version": Version(current_version),
"tags": tags,
}
)
# Version number determinations:
# - latest actually-released version
# - the next version after that for current branch
# - which of the two is the actual version we're looking to converge on,
# depends on current changelog state.
latest_version, next_version = _latest_and_next_version(state)
state.latest_version = latest_version
state.next_version = next_version
state.expected_version = latest_version
if state.unreleased_issues:
state.expected_version = next_version
#
# Logic determination / convergence
#
actions = Lexicon()
# Changelog: needs new release entry if there are any unreleased issues for
# current branch's line.
# TODO: annotate with number of released issues [of each type?] - so not
# just "up to date!" but "all set (will release 3 features & 5 bugs)"
actions.changelog = Changelog.OKAY
if release_type in (Release.BUGFIX, Release.FEATURE) and issues:
actions.changelog = Changelog.NEEDS_RELEASE
# Version file: simply whether version file equals the target version.
# TODO: corner case of 'version file is >1 release in the future', but
# that's still wrong, just would be a different 'bad' status output.
actions.version = VersionFile.OKAY
if state.current_version != state.expected_version:
actions.version = VersionFile.NEEDS_BUMP
# Git tag: similar to version file, except the check is existence of tag
# instead of comparison to file contents. We even reuse the
# 'expected_version' variable wholesale.
actions.tag = Tag.OKAY
if state.expected_version not in state.tags:
actions.tag = Tag.NEEDS_CUTTING
#
# Return
#
return actions, state | python | def _converge(c):
"""
Examine world state, returning data on what needs updating for release.
:param c: Invoke ``Context`` object or subclass.
:returns:
Two dicts (technically, dict subclasses, which allow attribute access),
``actions`` and ``state`` (in that order.)
``actions`` maps release component names to variables (usually class
constants) determining what action should be taken for that component:
- ``changelog``: members of `.Changelog` such as ``NEEDS_RELEASE`` or
``OKAY``.
- ``version``: members of `.VersionFile`.
``state`` contains the data used to calculate the actions, in case the
caller wants to do further analysis:
- ``branch``: the name of the checked-out Git branch.
- ``changelog``: the parsed project changelog, a `dict` of releases.
- ``release_type``: what type of release the branch appears to be (will
be a member of `.Release` such as ``Release.BUGFIX``.)
- ``latest_line_release``: the latest changelog release found for
current release type/line.
- ``latest_overall_release``: the absolute most recent release entry.
Useful for determining next minor/feature release.
- ``current_version``: the version string as found in the package's
``__version__``.
"""
#
# Data/state gathering
#
# Get data about current repo context: what branch are we on & what kind of
# release does it appear to represent?
branch, release_type = _release_line(c)
# Short-circuit if type is undefined; we can't do useful work for that.
if release_type is Release.UNDEFINED:
raise UndefinedReleaseType(
"You don't seem to be on a release-related branch; "
"why are you trying to cut a release?"
)
# Parse our changelog so we can tell what's released and what's not.
# TODO: below needs to go in something doc-y somewhere; having it in a
# non-user-facing subroutine docstring isn't visible enough.
"""
.. note::
Requires that one sets the ``packaging.changelog_file`` configuration
option; it should be a relative or absolute path to your
``changelog.rst`` (or whatever it's named in your project).
"""
# TODO: allow skipping changelog if not using Releases since we have no
# other good way of detecting whether a changelog needs/got an update.
# TODO: chdir to sphinx.source, import conf.py, look at
# releases_changelog_name - that way it will honor that setting and we can
# ditch this explicit one instead. (and the docstring above)
changelog = parse_changelog(
c.packaging.changelog_file, load_extensions=True
)
# Get latest appropriate changelog release and any unreleased issues, for
# current line
line_release, issues = _release_and_issues(changelog, branch, release_type)
# Also get latest overall release, sometimes that matters (usually only
# when latest *appropriate* release doesn't exist yet)
overall_release = _versions_from_changelog(changelog)[-1]
# Obtain the project's main package & its version data
current_version = load_version(c)
# Grab all git tags
tags = _get_tags(c)
state = Lexicon(
{
"branch": branch,
"release_type": release_type,
"changelog": changelog,
"latest_line_release": Version(line_release)
if line_release
else None,
"latest_overall_release": overall_release, # already a Version
"unreleased_issues": issues,
"current_version": Version(current_version),
"tags": tags,
}
)
# Version number determinations:
# - latest actually-released version
# - the next version after that for current branch
# - which of the two is the actual version we're looking to converge on,
# depends on current changelog state.
latest_version, next_version = _latest_and_next_version(state)
state.latest_version = latest_version
state.next_version = next_version
state.expected_version = latest_version
if state.unreleased_issues:
state.expected_version = next_version
#
# Logic determination / convergence
#
actions = Lexicon()
# Changelog: needs new release entry if there are any unreleased issues for
# current branch's line.
# TODO: annotate with number of released issues [of each type?] - so not
# just "up to date!" but "all set (will release 3 features & 5 bugs)"
actions.changelog = Changelog.OKAY
if release_type in (Release.BUGFIX, Release.FEATURE) and issues:
actions.changelog = Changelog.NEEDS_RELEASE
# Version file: simply whether version file equals the target version.
# TODO: corner case of 'version file is >1 release in the future', but
# that's still wrong, just would be a different 'bad' status output.
actions.version = VersionFile.OKAY
if state.current_version != state.expected_version:
actions.version = VersionFile.NEEDS_BUMP
# Git tag: similar to version file, except the check is existence of tag
# instead of comparison to file contents. We even reuse the
# 'expected_version' variable wholesale.
actions.tag = Tag.OKAY
if state.expected_version not in state.tags:
actions.tag = Tag.NEEDS_CUTTING
#
# Return
#
return actions, state | [
"def",
"_converge",
"(",
"c",
")",
":",
"#",
"# Data/state gathering",
"#",
"# Get data about current repo context: what branch are we on & what kind of",
"# release does it appear to represent?",
"branch",
",",
"release_type",
"=",
"_release_line",
"(",
"c",
")",
"# Short-circ... | Examine world state, returning data on what needs updating for release.
:param c: Invoke ``Context`` object or subclass.
:returns:
Two dicts (technically, dict subclasses, which allow attribute access),
``actions`` and ``state`` (in that order.)
``actions`` maps release component names to variables (usually class
constants) determining what action should be taken for that component:
- ``changelog``: members of `.Changelog` such as ``NEEDS_RELEASE`` or
``OKAY``.
- ``version``: members of `.VersionFile`.
``state`` contains the data used to calculate the actions, in case the
caller wants to do further analysis:
- ``branch``: the name of the checked-out Git branch.
- ``changelog``: the parsed project changelog, a `dict` of releases.
- ``release_type``: what type of release the branch appears to be (will
be a member of `.Release` such as ``Release.BUGFIX``.)
- ``latest_line_release``: the latest changelog release found for
current release type/line.
- ``latest_overall_release``: the absolute most recent release entry.
Useful for determining next minor/feature release.
- ``current_version``: the version string as found in the package's
``__version__``. | [
"Examine",
"world",
"state",
"returning",
"data",
"on",
"what",
"needs",
"updating",
"for",
"release",
"."
] | bbf1b319bd1536817d5301ceb9eeb2f31830e5dc | https://github.com/pyinvoke/invocations/blob/bbf1b319bd1536817d5301ceb9eeb2f31830e5dc/invocations/packaging/release.py#L110-L240 | train |
pyinvoke/invocations | invocations/packaging/release.py | status | def status(c):
"""
Print current release (version, changelog, tag, etc) status.
Doubles as a subroutine, returning the return values from its inner call to
``_converge`` (an ``(actions, state)`` two-tuple of Lexicons).
"""
# TODO: wants some holistic "you don't actually HAVE any changes to
# release" final status - i.e. all steps were at no-op status.
actions, state = _converge(c)
table = []
# NOTE: explicit 'sensible' sort (in rough order of how things are usually
# modified, and/or which depend on one another, e.g. tags are near the end)
for component in "changelog version tag".split():
table.append((component.capitalize(), actions[component].value))
print(tabulate(table))
return actions, state | python | def status(c):
"""
Print current release (version, changelog, tag, etc) status.
Doubles as a subroutine, returning the return values from its inner call to
``_converge`` (an ``(actions, state)`` two-tuple of Lexicons).
"""
# TODO: wants some holistic "you don't actually HAVE any changes to
# release" final status - i.e. all steps were at no-op status.
actions, state = _converge(c)
table = []
# NOTE: explicit 'sensible' sort (in rough order of how things are usually
# modified, and/or which depend on one another, e.g. tags are near the end)
for component in "changelog version tag".split():
table.append((component.capitalize(), actions[component].value))
print(tabulate(table))
return actions, state | [
"def",
"status",
"(",
"c",
")",
":",
"# TODO: wants some holistic \"you don't actually HAVE any changes to",
"# release\" final status - i.e. all steps were at no-op status.",
"actions",
",",
"state",
"=",
"_converge",
"(",
"c",
")",
"table",
"=",
"[",
"]",
"# NOTE: explicit ... | Print current release (version, changelog, tag, etc) status.
Doubles as a subroutine, returning the return values from its inner call to
``_converge`` (an ``(actions, state)`` two-tuple of Lexicons). | [
"Print",
"current",
"release",
"(",
"version",
"changelog",
"tag",
"etc",
")",
"status",
"."
] | bbf1b319bd1536817d5301ceb9eeb2f31830e5dc | https://github.com/pyinvoke/invocations/blob/bbf1b319bd1536817d5301ceb9eeb2f31830e5dc/invocations/packaging/release.py#L244-L260 | train |
pyinvoke/invocations | invocations/packaging/release.py | prepare | def prepare(c):
"""
Edit changelog & version, git commit, and git tag, to set up for release.
"""
# Print dry-run/status/actions-to-take data & grab programmatic result
# TODO: maybe expand the enum-based stuff to have values that split up
# textual description, command string, etc. See the TODO up by their
# definition too, re: just making them non-enum classes period.
# TODO: otherwise, we at least want derived eg changelog/version/etc paths
# transmitted from status() into here...
actions, state = status(c)
# TODO: unless nothing-to-do in which case just say that & exit 0
if not confirm("Take the above actions?"):
sys.exit("Aborting.")
# TODO: factor out what it means to edit a file:
# - $EDITOR or explicit expansion of it in case no shell involved
# - pty=True and hide=False, because otherwise things can be bad
# - what else?
# Changelog! (pty for non shite editing, eg vim sure won't like non-pty)
if actions.changelog is Changelog.NEEDS_RELEASE:
# TODO: identify top of list and inject a ready-made line? Requires vim
# assumption...GREAT opportunity for class/method based tasks!
cmd = "$EDITOR {0.packaging.changelog_file}".format(c)
c.run(cmd, pty=True, hide=False)
# TODO: add a step for checking reqs.txt / setup.py vs virtualenv contents
# Version file!
if actions.version == VersionFile.NEEDS_BUMP:
# TODO: suggest the bump and/or overwrite the entire file? Assumes a
# specific file format. Could be bad for users which expose __version__
# but have other contents as well.
version_file = os.path.join(
_find_package(c),
c.packaging.get("version_module", "_version") + ".py",
)
cmd = "$EDITOR {0}".format(version_file)
c.run(cmd, pty=True, hide=False)
if actions.tag == Tag.NEEDS_CUTTING:
# Commit, if necessary, so the tag includes everything.
# NOTE: this strips out untracked files. effort.
cmd = 'git status --porcelain | egrep -v "^\\?"'
if c.run(cmd, hide=True, warn=True).ok:
c.run(
'git commit -am "Cut {0}"'.format(state.expected_version),
hide=False,
)
# Tag!
c.run("git tag {0}".format(state.expected_version), hide=False) | python | def prepare(c):
"""
Edit changelog & version, git commit, and git tag, to set up for release.
"""
# Print dry-run/status/actions-to-take data & grab programmatic result
# TODO: maybe expand the enum-based stuff to have values that split up
# textual description, command string, etc. See the TODO up by their
# definition too, re: just making them non-enum classes period.
# TODO: otherwise, we at least want derived eg changelog/version/etc paths
# transmitted from status() into here...
actions, state = status(c)
# TODO: unless nothing-to-do in which case just say that & exit 0
if not confirm("Take the above actions?"):
sys.exit("Aborting.")
# TODO: factor out what it means to edit a file:
# - $EDITOR or explicit expansion of it in case no shell involved
# - pty=True and hide=False, because otherwise things can be bad
# - what else?
# Changelog! (pty for non shite editing, eg vim sure won't like non-pty)
if actions.changelog is Changelog.NEEDS_RELEASE:
# TODO: identify top of list and inject a ready-made line? Requires vim
# assumption...GREAT opportunity for class/method based tasks!
cmd = "$EDITOR {0.packaging.changelog_file}".format(c)
c.run(cmd, pty=True, hide=False)
# TODO: add a step for checking reqs.txt / setup.py vs virtualenv contents
# Version file!
if actions.version == VersionFile.NEEDS_BUMP:
# TODO: suggest the bump and/or overwrite the entire file? Assumes a
# specific file format. Could be bad for users which expose __version__
# but have other contents as well.
version_file = os.path.join(
_find_package(c),
c.packaging.get("version_module", "_version") + ".py",
)
cmd = "$EDITOR {0}".format(version_file)
c.run(cmd, pty=True, hide=False)
if actions.tag == Tag.NEEDS_CUTTING:
# Commit, if necessary, so the tag includes everything.
# NOTE: this strips out untracked files. effort.
cmd = 'git status --porcelain | egrep -v "^\\?"'
if c.run(cmd, hide=True, warn=True).ok:
c.run(
'git commit -am "Cut {0}"'.format(state.expected_version),
hide=False,
)
# Tag!
c.run("git tag {0}".format(state.expected_version), hide=False) | [
"def",
"prepare",
"(",
"c",
")",
":",
"# Print dry-run/status/actions-to-take data & grab programmatic result",
"# TODO: maybe expand the enum-based stuff to have values that split up",
"# textual description, command string, etc. See the TODO up by their",
"# definition too, re: just making them ... | Edit changelog & version, git commit, and git tag, to set up for release. | [
"Edit",
"changelog",
"&",
"version",
"git",
"commit",
"and",
"git",
"tag",
"to",
"set",
"up",
"for",
"release",
"."
] | bbf1b319bd1536817d5301ceb9eeb2f31830e5dc | https://github.com/pyinvoke/invocations/blob/bbf1b319bd1536817d5301ceb9eeb2f31830e5dc/invocations/packaging/release.py#L277-L325 | train |
pyinvoke/invocations | invocations/packaging/release.py | _release_line | def _release_line(c):
"""
Examine current repo state to determine what type of release to prep.
:returns:
A two-tuple of ``(branch-name, line-type)`` where:
- ``branch-name`` is the current branch name, e.g. ``1.1``, ``master``,
``gobbledygook`` (or, usually, ``HEAD`` if not on a branch).
- ``line-type`` is a symbolic member of `.Release` representing what
"type" of release the line appears to be for:
- ``Release.BUGFIX`` if on a bugfix/stable release line, e.g.
``1.1``.
- ``Release.FEATURE`` if on a feature-release branch (typically
``master``).
- ``Release.UNDEFINED`` if neither of those appears to apply
(usually means on some unmerged feature/dev branch).
"""
# TODO: I don't _think_ this technically overlaps with Releases (because
# that only ever deals with changelog contents, and therefore full release
# version numbers) but in case it does, move it there sometime.
# TODO: this and similar calls in this module may want to be given an
# explicit pointer-to-git-repo option (i.e. if run from outside project
# context).
# TODO: major releases? or are they big enough events we don't need to
# bother with the script? Also just hard to gauge - when is master the next
# 1.x feature vs 2.0?
branch = c.run("git rev-parse --abbrev-ref HEAD", hide=True).stdout.strip()
type_ = Release.UNDEFINED
if BUGFIX_RE.match(branch):
type_ = Release.BUGFIX
if FEATURE_RE.match(branch):
type_ = Release.FEATURE
return branch, type_ | python | def _release_line(c):
"""
Examine current repo state to determine what type of release to prep.
:returns:
A two-tuple of ``(branch-name, line-type)`` where:
- ``branch-name`` is the current branch name, e.g. ``1.1``, ``master``,
``gobbledygook`` (or, usually, ``HEAD`` if not on a branch).
- ``line-type`` is a symbolic member of `.Release` representing what
"type" of release the line appears to be for:
- ``Release.BUGFIX`` if on a bugfix/stable release line, e.g.
``1.1``.
- ``Release.FEATURE`` if on a feature-release branch (typically
``master``).
- ``Release.UNDEFINED`` if neither of those appears to apply
(usually means on some unmerged feature/dev branch).
"""
# TODO: I don't _think_ this technically overlaps with Releases (because
# that only ever deals with changelog contents, and therefore full release
# version numbers) but in case it does, move it there sometime.
# TODO: this and similar calls in this module may want to be given an
# explicit pointer-to-git-repo option (i.e. if run from outside project
# context).
# TODO: major releases? or are they big enough events we don't need to
# bother with the script? Also just hard to gauge - when is master the next
# 1.x feature vs 2.0?
branch = c.run("git rev-parse --abbrev-ref HEAD", hide=True).stdout.strip()
type_ = Release.UNDEFINED
if BUGFIX_RE.match(branch):
type_ = Release.BUGFIX
if FEATURE_RE.match(branch):
type_ = Release.FEATURE
return branch, type_ | [
"def",
"_release_line",
"(",
"c",
")",
":",
"# TODO: I don't _think_ this technically overlaps with Releases (because",
"# that only ever deals with changelog contents, and therefore full release",
"# version numbers) but in case it does, move it there sometime.",
"# TODO: this and similar calls i... | Examine current repo state to determine what type of release to prep.
:returns:
A two-tuple of ``(branch-name, line-type)`` where:
- ``branch-name`` is the current branch name, e.g. ``1.1``, ``master``,
``gobbledygook`` (or, usually, ``HEAD`` if not on a branch).
- ``line-type`` is a symbolic member of `.Release` representing what
"type" of release the line appears to be for:
- ``Release.BUGFIX`` if on a bugfix/stable release line, e.g.
``1.1``.
- ``Release.FEATURE`` if on a feature-release branch (typically
``master``).
- ``Release.UNDEFINED`` if neither of those appears to apply
(usually means on some unmerged feature/dev branch). | [
"Examine",
"current",
"repo",
"state",
"to",
"determine",
"what",
"type",
"of",
"release",
"to",
"prep",
"."
] | bbf1b319bd1536817d5301ceb9eeb2f31830e5dc | https://github.com/pyinvoke/invocations/blob/bbf1b319bd1536817d5301ceb9eeb2f31830e5dc/invocations/packaging/release.py#L330-L364 | train |
pyinvoke/invocations | invocations/packaging/release.py | _versions_from_changelog | def _versions_from_changelog(changelog):
"""
Return all released versions from given ``changelog``, sorted.
:param dict changelog:
A changelog dict as returned by ``releases.util.parse_changelog``.
:returns: A sorted list of `semantic_version.Version` objects.
"""
versions = [Version(x) for x in changelog if BUGFIX_RELEASE_RE.match(x)]
return sorted(versions) | python | def _versions_from_changelog(changelog):
"""
Return all released versions from given ``changelog``, sorted.
:param dict changelog:
A changelog dict as returned by ``releases.util.parse_changelog``.
:returns: A sorted list of `semantic_version.Version` objects.
"""
versions = [Version(x) for x in changelog if BUGFIX_RELEASE_RE.match(x)]
return sorted(versions) | [
"def",
"_versions_from_changelog",
"(",
"changelog",
")",
":",
"versions",
"=",
"[",
"Version",
"(",
"x",
")",
"for",
"x",
"in",
"changelog",
"if",
"BUGFIX_RELEASE_RE",
".",
"match",
"(",
"x",
")",
"]",
"return",
"sorted",
"(",
"versions",
")"
] | Return all released versions from given ``changelog``, sorted.
:param dict changelog:
A changelog dict as returned by ``releases.util.parse_changelog``.
:returns: A sorted list of `semantic_version.Version` objects. | [
"Return",
"all",
"released",
"versions",
"from",
"given",
"changelog",
"sorted",
"."
] | bbf1b319bd1536817d5301ceb9eeb2f31830e5dc | https://github.com/pyinvoke/invocations/blob/bbf1b319bd1536817d5301ceb9eeb2f31830e5dc/invocations/packaging/release.py#L381-L391 | train |
pyinvoke/invocations | invocations/packaging/release.py | _release_and_issues | def _release_and_issues(changelog, branch, release_type):
"""
Return most recent branch-appropriate release, if any, and its contents.
:param dict changelog:
Changelog contents, as returned by ``releases.util.parse_changelog``.
:param str branch:
Branch name.
:param release_type:
Member of `Release`, e.g. `Release.FEATURE`.
:returns:
Two-tuple of release (``str``) and issues (``list`` of issue numbers.)
If there is no latest release for the given branch (e.g. if it's a
feature or master branch), it will be ``None``.
"""
# Bugfix lines just use the branch to find issues
bucket = branch
# Features need a bit more logic
if release_type is Release.FEATURE:
bucket = _latest_feature_bucket(changelog)
# Issues is simply what's in the bucket
issues = changelog[bucket]
# Latest release is undefined for feature lines
release = None
# And requires scanning changelog, for bugfix lines
if release_type is Release.BUGFIX:
versions = [text_type(x) for x in _versions_from_changelog(changelog)]
release = [x for x in versions if x.startswith(bucket)][-1]
return release, issues | python | def _release_and_issues(changelog, branch, release_type):
"""
Return most recent branch-appropriate release, if any, and its contents.
:param dict changelog:
Changelog contents, as returned by ``releases.util.parse_changelog``.
:param str branch:
Branch name.
:param release_type:
Member of `Release`, e.g. `Release.FEATURE`.
:returns:
Two-tuple of release (``str``) and issues (``list`` of issue numbers.)
If there is no latest release for the given branch (e.g. if it's a
feature or master branch), it will be ``None``.
"""
# Bugfix lines just use the branch to find issues
bucket = branch
# Features need a bit more logic
if release_type is Release.FEATURE:
bucket = _latest_feature_bucket(changelog)
# Issues is simply what's in the bucket
issues = changelog[bucket]
# Latest release is undefined for feature lines
release = None
# And requires scanning changelog, for bugfix lines
if release_type is Release.BUGFIX:
versions = [text_type(x) for x in _versions_from_changelog(changelog)]
release = [x for x in versions if x.startswith(bucket)][-1]
return release, issues | [
"def",
"_release_and_issues",
"(",
"changelog",
",",
"branch",
",",
"release_type",
")",
":",
"# Bugfix lines just use the branch to find issues",
"bucket",
"=",
"branch",
"# Features need a bit more logic",
"if",
"release_type",
"is",
"Release",
".",
"FEATURE",
":",
"buc... | Return most recent branch-appropriate release, if any, and its contents.
:param dict changelog:
Changelog contents, as returned by ``releases.util.parse_changelog``.
:param str branch:
Branch name.
:param release_type:
Member of `Release`, e.g. `Release.FEATURE`.
:returns:
Two-tuple of release (``str``) and issues (``list`` of issue numbers.)
If there is no latest release for the given branch (e.g. if it's a
feature or master branch), it will be ``None``. | [
"Return",
"most",
"recent",
"branch",
"-",
"appropriate",
"release",
"if",
"any",
"and",
"its",
"contents",
"."
] | bbf1b319bd1536817d5301ceb9eeb2f31830e5dc | https://github.com/pyinvoke/invocations/blob/bbf1b319bd1536817d5301ceb9eeb2f31830e5dc/invocations/packaging/release.py#L395-L427 | train |
pyinvoke/invocations | invocations/packaging/release.py | _get_tags | def _get_tags(c):
"""
Return sorted list of release-style tags as semver objects.
"""
tags_ = []
for tagstr in c.run("git tag", hide=True).stdout.strip().split("\n"):
try:
tags_.append(Version(tagstr))
# Ignore anything non-semver; most of the time they'll be non-release
# tags, and even if they are, we can't reason about anything
# non-semver anyways.
# TODO: perhaps log these to DEBUG
except ValueError:
pass
# Version objects sort semantically
return sorted(tags_) | python | def _get_tags(c):
"""
Return sorted list of release-style tags as semver objects.
"""
tags_ = []
for tagstr in c.run("git tag", hide=True).stdout.strip().split("\n"):
try:
tags_.append(Version(tagstr))
# Ignore anything non-semver; most of the time they'll be non-release
# tags, and even if they are, we can't reason about anything
# non-semver anyways.
# TODO: perhaps log these to DEBUG
except ValueError:
pass
# Version objects sort semantically
return sorted(tags_) | [
"def",
"_get_tags",
"(",
"c",
")",
":",
"tags_",
"=",
"[",
"]",
"for",
"tagstr",
"in",
"c",
".",
"run",
"(",
"\"git tag\"",
",",
"hide",
"=",
"True",
")",
".",
"stdout",
".",
"strip",
"(",
")",
".",
"split",
"(",
"\"\\n\"",
")",
":",
"try",
":"... | Return sorted list of release-style tags as semver objects. | [
"Return",
"sorted",
"list",
"of",
"release",
"-",
"style",
"tags",
"as",
"semver",
"objects",
"."
] | bbf1b319bd1536817d5301ceb9eeb2f31830e5dc | https://github.com/pyinvoke/invocations/blob/bbf1b319bd1536817d5301ceb9eeb2f31830e5dc/invocations/packaging/release.py#L430-L445 | train |
pyinvoke/invocations | invocations/packaging/release.py | _find_package | def _find_package(c):
"""
Try to find 'the' One True Package for this project.
Mostly for obtaining the ``_version`` file within it.
Uses the ``packaging.package`` config setting if defined. If not defined,
fallback is to look for a single top-level Python package (directory
containing ``__init__.py``). (This search ignores a small blacklist of
directories like ``tests/``, ``vendor/`` etc.)
"""
# TODO: is there a way to get this from the same place setup.py does w/o
# setup.py barfing (since setup() runs at import time and assumes CLI use)?
configured_value = c.get("packaging", {}).get("package", None)
if configured_value:
return configured_value
# TODO: tests covering this stuff here (most logic tests simply supply
# config above)
packages = [
path
for path in os.listdir(".")
if (
os.path.isdir(path)
and os.path.exists(os.path.join(path, "__init__.py"))
and path not in ("tests", "integration", "sites", "vendor")
)
]
if not packages:
sys.exit("Unable to find a local Python package!")
if len(packages) > 1:
sys.exit("Found multiple Python packages: {0!r}".format(packages))
return packages[0] | python | def _find_package(c):
"""
Try to find 'the' One True Package for this project.
Mostly for obtaining the ``_version`` file within it.
Uses the ``packaging.package`` config setting if defined. If not defined,
fallback is to look for a single top-level Python package (directory
containing ``__init__.py``). (This search ignores a small blacklist of
directories like ``tests/``, ``vendor/`` etc.)
"""
# TODO: is there a way to get this from the same place setup.py does w/o
# setup.py barfing (since setup() runs at import time and assumes CLI use)?
configured_value = c.get("packaging", {}).get("package", None)
if configured_value:
return configured_value
# TODO: tests covering this stuff here (most logic tests simply supply
# config above)
packages = [
path
for path in os.listdir(".")
if (
os.path.isdir(path)
and os.path.exists(os.path.join(path, "__init__.py"))
and path not in ("tests", "integration", "sites", "vendor")
)
]
if not packages:
sys.exit("Unable to find a local Python package!")
if len(packages) > 1:
sys.exit("Found multiple Python packages: {0!r}".format(packages))
return packages[0] | [
"def",
"_find_package",
"(",
"c",
")",
":",
"# TODO: is there a way to get this from the same place setup.py does w/o",
"# setup.py barfing (since setup() runs at import time and assumes CLI use)?",
"configured_value",
"=",
"c",
".",
"get",
"(",
"\"packaging\"",
",",
"{",
"}",
")... | Try to find 'the' One True Package for this project.
Mostly for obtaining the ``_version`` file within it.
Uses the ``packaging.package`` config setting if defined. If not defined,
fallback is to look for a single top-level Python package (directory
containing ``__init__.py``). (This search ignores a small blacklist of
directories like ``tests/``, ``vendor/`` etc.) | [
"Try",
"to",
"find",
"the",
"One",
"True",
"Package",
"for",
"this",
"project",
"."
] | bbf1b319bd1536817d5301ceb9eeb2f31830e5dc | https://github.com/pyinvoke/invocations/blob/bbf1b319bd1536817d5301ceb9eeb2f31830e5dc/invocations/packaging/release.py#L471-L502 | train |
pyinvoke/invocations | invocations/packaging/release.py | build | def build(c, sdist=True, wheel=False, directory=None, python=None, clean=True):
"""
Build sdist and/or wheel archives, optionally in a temp base directory.
All parameters save ``directory`` honor config settings of the same name,
under the ``packaging`` tree. E.g. say ``.configure({'packaging': {'wheel':
True}})`` to force building wheel archives by default.
:param bool sdist:
Whether to build sdists/tgzs.
:param bool wheel:
Whether to build wheels (requires the ``wheel`` package from PyPI).
:param str directory:
Allows specifying a specific directory in which to perform builds and
dist creation. Useful when running as a subroutine from ``publish``
which sets up a temporary directory.
Two subdirectories will be created within this directory: one for
builds, and one for the dist archives.
When ``None`` or another false-y value, the current working directory
is used (and thus, local ``dist/`` and ``build/`` subdirectories).
:param str python:
Which Python binary to use when invoking ``setup.py``.
Defaults to just ``python``.
If ``wheel=True``, then this Python must have ``wheel`` installed in
its default ``site-packages`` (or similar) location.
:param bool clean:
Whether to clean out the local ``build/`` folder before building.
"""
# Config hooks
config = c.config.get("packaging", {})
# TODO: update defaults to be None, then flip the below so non-None runtime
# beats config.
sdist = config.get("sdist", sdist)
wheel = config.get("wheel", wheel)
python = config.get("python", python or "python") # buffalo buffalo
# Sanity
if not sdist and not wheel:
sys.exit(
"You said no sdists and no wheels..."
"what DO you want to build exactly?"
)
# Directory path/arg logic
if not directory:
directory = "" # os.path.join() doesn't like None
dist_dir = os.path.join(directory, "dist")
dist_arg = "-d {0}".format(dist_dir)
build_dir = os.path.join(directory, "build")
build_arg = "-b {0}".format(build_dir)
# Clean
if clean:
if os.path.exists(build_dir):
rmtree(build_dir)
# NOTE: not cleaning dist_dir, since this may be called >1 time within
# publish() trying to build up multiple wheels/etc.
# TODO: separate clean-build/clean-dist args? Meh
# Build
parts = [python, "setup.py"]
if sdist:
parts.extend(("sdist", dist_arg))
if wheel:
# Manually execute build in case we are using a custom build dir.
# Doesn't seem to be a way to tell bdist_wheel to do this directly.
parts.extend(("build", build_arg))
parts.extend(("bdist_wheel", dist_arg))
c.run(" ".join(parts)) | python | def build(c, sdist=True, wheel=False, directory=None, python=None, clean=True):
"""
Build sdist and/or wheel archives, optionally in a temp base directory.
All parameters save ``directory`` honor config settings of the same name,
under the ``packaging`` tree. E.g. say ``.configure({'packaging': {'wheel':
True}})`` to force building wheel archives by default.
:param bool sdist:
Whether to build sdists/tgzs.
:param bool wheel:
Whether to build wheels (requires the ``wheel`` package from PyPI).
:param str directory:
Allows specifying a specific directory in which to perform builds and
dist creation. Useful when running as a subroutine from ``publish``
which sets up a temporary directory.
Two subdirectories will be created within this directory: one for
builds, and one for the dist archives.
When ``None`` or another false-y value, the current working directory
is used (and thus, local ``dist/`` and ``build/`` subdirectories).
:param str python:
Which Python binary to use when invoking ``setup.py``.
Defaults to just ``python``.
If ``wheel=True``, then this Python must have ``wheel`` installed in
its default ``site-packages`` (or similar) location.
:param bool clean:
Whether to clean out the local ``build/`` folder before building.
"""
# Config hooks
config = c.config.get("packaging", {})
# TODO: update defaults to be None, then flip the below so non-None runtime
# beats config.
sdist = config.get("sdist", sdist)
wheel = config.get("wheel", wheel)
python = config.get("python", python or "python") # buffalo buffalo
# Sanity
if not sdist and not wheel:
sys.exit(
"You said no sdists and no wheels..."
"what DO you want to build exactly?"
)
# Directory path/arg logic
if not directory:
directory = "" # os.path.join() doesn't like None
dist_dir = os.path.join(directory, "dist")
dist_arg = "-d {0}".format(dist_dir)
build_dir = os.path.join(directory, "build")
build_arg = "-b {0}".format(build_dir)
# Clean
if clean:
if os.path.exists(build_dir):
rmtree(build_dir)
# NOTE: not cleaning dist_dir, since this may be called >1 time within
# publish() trying to build up multiple wheels/etc.
# TODO: separate clean-build/clean-dist args? Meh
# Build
parts = [python, "setup.py"]
if sdist:
parts.extend(("sdist", dist_arg))
if wheel:
# Manually execute build in case we are using a custom build dir.
# Doesn't seem to be a way to tell bdist_wheel to do this directly.
parts.extend(("build", build_arg))
parts.extend(("bdist_wheel", dist_arg))
c.run(" ".join(parts)) | [
"def",
"build",
"(",
"c",
",",
"sdist",
"=",
"True",
",",
"wheel",
"=",
"False",
",",
"directory",
"=",
"None",
",",
"python",
"=",
"None",
",",
"clean",
"=",
"True",
")",
":",
"# Config hooks",
"config",
"=",
"c",
".",
"config",
".",
"get",
"(",
... | Build sdist and/or wheel archives, optionally in a temp base directory.
All parameters save ``directory`` honor config settings of the same name,
under the ``packaging`` tree. E.g. say ``.configure({'packaging': {'wheel':
True}})`` to force building wheel archives by default.
:param bool sdist:
Whether to build sdists/tgzs.
:param bool wheel:
Whether to build wheels (requires the ``wheel`` package from PyPI).
:param str directory:
Allows specifying a specific directory in which to perform builds and
dist creation. Useful when running as a subroutine from ``publish``
which sets up a temporary directory.
Two subdirectories will be created within this directory: one for
builds, and one for the dist archives.
When ``None`` or another false-y value, the current working directory
is used (and thus, local ``dist/`` and ``build/`` subdirectories).
:param str python:
Which Python binary to use when invoking ``setup.py``.
Defaults to just ``python``.
If ``wheel=True``, then this Python must have ``wheel`` installed in
its default ``site-packages`` (or similar) location.
:param bool clean:
Whether to clean out the local ``build/`` folder before building. | [
"Build",
"sdist",
"and",
"/",
"or",
"wheel",
"archives",
"optionally",
"in",
"a",
"temp",
"base",
"directory",
"."
] | bbf1b319bd1536817d5301ceb9eeb2f31830e5dc | https://github.com/pyinvoke/invocations/blob/bbf1b319bd1536817d5301ceb9eeb2f31830e5dc/invocations/packaging/release.py#L518-L590 | train |
pyinvoke/invocations | invocations/packaging/release.py | publish | def publish(
c,
sdist=True,
wheel=False,
index=None,
sign=False,
dry_run=False,
directory=None,
dual_wheels=False,
alt_python=None,
check_desc=False,
):
"""
Publish code to PyPI or index of choice.
All parameters save ``dry_run`` and ``directory`` honor config settings of
the same name, under the ``packaging`` tree. E.g. say
``.configure({'packaging': {'wheel': True}})`` to force building wheel
archives by default.
:param bool sdist:
Whether to upload sdists/tgzs.
:param bool wheel:
Whether to upload wheels (requires the ``wheel`` package from PyPI).
:param str index:
Custom upload index/repository name. See ``upload`` help for details.
:param bool sign:
Whether to sign the built archive(s) via GPG.
:param bool dry_run:
Skip actual publication step if ``True``.
This also prevents cleanup of the temporary build/dist directories, so
you can examine the build artifacts.
:param str directory:
Base directory within which will live the ``dist/`` and ``build/``
directories.
Defaults to a temporary directory which is cleaned up after the run
finishes.
:param bool dual_wheels:
When ``True``, builds individual wheels for Python 2 and Python 3.
Useful for situations where you can't build universal wheels, but still
want to distribute for both interpreter versions.
Requires that you have a useful ``python3`` (or ``python2``, if you're
on Python 3 already) binary in your ``$PATH``. Also requires that this
other python have the ``wheel`` package installed in its
``site-packages``; usually this will mean the global site-packages for
that interpreter.
See also the ``alt_python`` argument.
:param str alt_python:
Path to the 'alternate' Python interpreter to use when
``dual_wheels=True``.
When ``None`` (the default) will be ``python3`` or ``python2``,
depending on the currently active interpreter.
:param bool check_desc:
Whether to run ``setup.py check -r -s`` (uses ``readme_renderer``)
before trying to publish - catches long_description bugs. Default:
``False``.
"""
# Don't hide by default, this step likes to be verbose most of the time.
c.config.run.hide = False
# Config hooks
config = c.config.get("packaging", {})
index = config.get("index", index)
sign = config.get("sign", sign)
dual_wheels = config.get("dual_wheels", dual_wheels)
check_desc = config.get("check_desc", check_desc)
# Initial sanity check, if needed. Will die usefully.
if check_desc:
c.run("python setup.py check -r -s")
# Build, into controlled temp dir (avoids attempting to re-upload old
# files)
with tmpdir(skip_cleanup=dry_run, explicit=directory) as tmp:
# Build default archives
build(c, sdist=sdist, wheel=wheel, directory=tmp)
# Build opposing interpreter archive, if necessary
if dual_wheels:
if not alt_python:
alt_python = "python2"
if sys.version_info[0] == 2:
alt_python = "python3"
build(c, sdist=False, wheel=True, directory=tmp, python=alt_python)
# Do the thing!
upload(c, directory=tmp, index=index, sign=sign, dry_run=dry_run) | python | def publish(
c,
sdist=True,
wheel=False,
index=None,
sign=False,
dry_run=False,
directory=None,
dual_wheels=False,
alt_python=None,
check_desc=False,
):
"""
Publish code to PyPI or index of choice.
All parameters save ``dry_run`` and ``directory`` honor config settings of
the same name, under the ``packaging`` tree. E.g. say
``.configure({'packaging': {'wheel': True}})`` to force building wheel
archives by default.
:param bool sdist:
Whether to upload sdists/tgzs.
:param bool wheel:
Whether to upload wheels (requires the ``wheel`` package from PyPI).
:param str index:
Custom upload index/repository name. See ``upload`` help for details.
:param bool sign:
Whether to sign the built archive(s) via GPG.
:param bool dry_run:
Skip actual publication step if ``True``.
This also prevents cleanup of the temporary build/dist directories, so
you can examine the build artifacts.
:param str directory:
Base directory within which will live the ``dist/`` and ``build/``
directories.
Defaults to a temporary directory which is cleaned up after the run
finishes.
:param bool dual_wheels:
When ``True``, builds individual wheels for Python 2 and Python 3.
Useful for situations where you can't build universal wheels, but still
want to distribute for both interpreter versions.
Requires that you have a useful ``python3`` (or ``python2``, if you're
on Python 3 already) binary in your ``$PATH``. Also requires that this
other python have the ``wheel`` package installed in its
``site-packages``; usually this will mean the global site-packages for
that interpreter.
See also the ``alt_python`` argument.
:param str alt_python:
Path to the 'alternate' Python interpreter to use when
``dual_wheels=True``.
When ``None`` (the default) will be ``python3`` or ``python2``,
depending on the currently active interpreter.
:param bool check_desc:
Whether to run ``setup.py check -r -s`` (uses ``readme_renderer``)
before trying to publish - catches long_description bugs. Default:
``False``.
"""
# Don't hide by default, this step likes to be verbose most of the time.
c.config.run.hide = False
# Config hooks
config = c.config.get("packaging", {})
index = config.get("index", index)
sign = config.get("sign", sign)
dual_wheels = config.get("dual_wheels", dual_wheels)
check_desc = config.get("check_desc", check_desc)
# Initial sanity check, if needed. Will die usefully.
if check_desc:
c.run("python setup.py check -r -s")
# Build, into controlled temp dir (avoids attempting to re-upload old
# files)
with tmpdir(skip_cleanup=dry_run, explicit=directory) as tmp:
# Build default archives
build(c, sdist=sdist, wheel=wheel, directory=tmp)
# Build opposing interpreter archive, if necessary
if dual_wheels:
if not alt_python:
alt_python = "python2"
if sys.version_info[0] == 2:
alt_python = "python3"
build(c, sdist=False, wheel=True, directory=tmp, python=alt_python)
# Do the thing!
upload(c, directory=tmp, index=index, sign=sign, dry_run=dry_run) | [
"def",
"publish",
"(",
"c",
",",
"sdist",
"=",
"True",
",",
"wheel",
"=",
"False",
",",
"index",
"=",
"None",
",",
"sign",
"=",
"False",
",",
"dry_run",
"=",
"False",
",",
"directory",
"=",
"None",
",",
"dual_wheels",
"=",
"False",
",",
"alt_python",... | Publish code to PyPI or index of choice.
All parameters save ``dry_run`` and ``directory`` honor config settings of
the same name, under the ``packaging`` tree. E.g. say
``.configure({'packaging': {'wheel': True}})`` to force building wheel
archives by default.
:param bool sdist:
Whether to upload sdists/tgzs.
:param bool wheel:
Whether to upload wheels (requires the ``wheel`` package from PyPI).
:param str index:
Custom upload index/repository name. See ``upload`` help for details.
:param bool sign:
Whether to sign the built archive(s) via GPG.
:param bool dry_run:
Skip actual publication step if ``True``.
This also prevents cleanup of the temporary build/dist directories, so
you can examine the build artifacts.
:param str directory:
Base directory within which will live the ``dist/`` and ``build/``
directories.
Defaults to a temporary directory which is cleaned up after the run
finishes.
:param bool dual_wheels:
When ``True``, builds individual wheels for Python 2 and Python 3.
Useful for situations where you can't build universal wheels, but still
want to distribute for both interpreter versions.
Requires that you have a useful ``python3`` (or ``python2``, if you're
on Python 3 already) binary in your ``$PATH``. Also requires that this
other python have the ``wheel`` package installed in its
``site-packages``; usually this will mean the global site-packages for
that interpreter.
See also the ``alt_python`` argument.
:param str alt_python:
Path to the 'alternate' Python interpreter to use when
``dual_wheels=True``.
When ``None`` (the default) will be ``python3`` or ``python2``,
depending on the currently active interpreter.
:param bool check_desc:
Whether to run ``setup.py check -r -s`` (uses ``readme_renderer``)
before trying to publish - catches long_description bugs. Default:
``False``. | [
"Publish",
"code",
"to",
"PyPI",
"or",
"index",
"of",
"choice",
"."
] | bbf1b319bd1536817d5301ceb9eeb2f31830e5dc | https://github.com/pyinvoke/invocations/blob/bbf1b319bd1536817d5301ceb9eeb2f31830e5dc/invocations/packaging/release.py#L605-L700 | train |
pyinvoke/invocations | invocations/packaging/release.py | upload | def upload(c, directory, index=None, sign=False, dry_run=False):
"""
Upload (potentially also signing) all artifacts in ``directory``.
:param str index:
Custom upload index/repository name.
By default, uses whatever the invoked ``pip`` is configured to use.
Modify your ``pypirc`` file to add new named repositories.
:param bool sign:
Whether to sign the built archive(s) via GPG.
:param bool dry_run:
Skip actual publication step if ``True``.
This also prevents cleanup of the temporary build/dist directories, so
you can examine the build artifacts.
"""
# Obtain list of archive filenames, then ensure any wheels come first
# so their improved metadata is what PyPI sees initially (otherwise, it
# only honors the sdist's lesser data).
archives = list(
itertools.chain.from_iterable(
glob(os.path.join(directory, "dist", "*.{0}".format(extension)))
for extension in ("whl", "tar.gz")
)
)
# Sign each archive in turn
# TODO: twine has a --sign option; but the below is still nice insofar
# as it lets us dry-run, generate for web upload when pypi's API is
# being cranky, etc. Figure out which is better.
if sign:
prompt = "Please enter GPG passphrase for signing: "
input_ = StringIO(getpass.getpass(prompt) + "\n")
gpg_bin = find_gpg(c)
if not gpg_bin:
sys.exit(
"You need to have one of `gpg`, `gpg1` or `gpg2` "
"installed to GPG-sign!"
)
for archive in archives:
cmd = "{0} --detach-sign -a --passphrase-fd 0 {{0}}".format(
gpg_bin
) # noqa
c.run(cmd.format(archive), in_stream=input_)
input_.seek(0) # So it can be replayed by subsequent iterations
# Upload
parts = ["twine", "upload"]
if index:
index_arg = "--repository {0}".format(index)
if index:
parts.append(index_arg)
paths = archives[:]
if sign:
paths.append(os.path.join(directory, "dist", "*.asc"))
parts.extend(paths)
cmd = " ".join(parts)
if dry_run:
print("Would publish via: {0}".format(cmd))
print("Files that would be published:")
c.run("ls -l {0}".format(" ".join(paths)))
else:
c.run(cmd) | python | def upload(c, directory, index=None, sign=False, dry_run=False):
"""
Upload (potentially also signing) all artifacts in ``directory``.
:param str index:
Custom upload index/repository name.
By default, uses whatever the invoked ``pip`` is configured to use.
Modify your ``pypirc`` file to add new named repositories.
:param bool sign:
Whether to sign the built archive(s) via GPG.
:param bool dry_run:
Skip actual publication step if ``True``.
This also prevents cleanup of the temporary build/dist directories, so
you can examine the build artifacts.
"""
# Obtain list of archive filenames, then ensure any wheels come first
# so their improved metadata is what PyPI sees initially (otherwise, it
# only honors the sdist's lesser data).
archives = list(
itertools.chain.from_iterable(
glob(os.path.join(directory, "dist", "*.{0}".format(extension)))
for extension in ("whl", "tar.gz")
)
)
# Sign each archive in turn
# TODO: twine has a --sign option; but the below is still nice insofar
# as it lets us dry-run, generate for web upload when pypi's API is
# being cranky, etc. Figure out which is better.
if sign:
prompt = "Please enter GPG passphrase for signing: "
input_ = StringIO(getpass.getpass(prompt) + "\n")
gpg_bin = find_gpg(c)
if not gpg_bin:
sys.exit(
"You need to have one of `gpg`, `gpg1` or `gpg2` "
"installed to GPG-sign!"
)
for archive in archives:
cmd = "{0} --detach-sign -a --passphrase-fd 0 {{0}}".format(
gpg_bin
) # noqa
c.run(cmd.format(archive), in_stream=input_)
input_.seek(0) # So it can be replayed by subsequent iterations
# Upload
parts = ["twine", "upload"]
if index:
index_arg = "--repository {0}".format(index)
if index:
parts.append(index_arg)
paths = archives[:]
if sign:
paths.append(os.path.join(directory, "dist", "*.asc"))
parts.extend(paths)
cmd = " ".join(parts)
if dry_run:
print("Would publish via: {0}".format(cmd))
print("Files that would be published:")
c.run("ls -l {0}".format(" ".join(paths)))
else:
c.run(cmd) | [
"def",
"upload",
"(",
"c",
",",
"directory",
",",
"index",
"=",
"None",
",",
"sign",
"=",
"False",
",",
"dry_run",
"=",
"False",
")",
":",
"# Obtain list of archive filenames, then ensure any wheels come first",
"# so their improved metadata is what PyPI sees initially (oth... | Upload (potentially also signing) all artifacts in ``directory``.
:param str index:
Custom upload index/repository name.
By default, uses whatever the invoked ``pip`` is configured to use.
Modify your ``pypirc`` file to add new named repositories.
:param bool sign:
Whether to sign the built archive(s) via GPG.
:param bool dry_run:
Skip actual publication step if ``True``.
This also prevents cleanup of the temporary build/dist directories, so
you can examine the build artifacts. | [
"Upload",
"(",
"potentially",
"also",
"signing",
")",
"all",
"artifacts",
"in",
"directory",
"."
] | bbf1b319bd1536817d5301ceb9eeb2f31830e5dc | https://github.com/pyinvoke/invocations/blob/bbf1b319bd1536817d5301ceb9eeb2f31830e5dc/invocations/packaging/release.py#L703-L766 | train |
pyinvoke/invocations | invocations/util.py | tmpdir | def tmpdir(skip_cleanup=False, explicit=None):
"""
Context-manage a temporary directory.
Can be given ``skip_cleanup`` to skip cleanup, and ``explicit`` to choose a
specific location.
(If both are given, this is basically not doing anything, but it allows
code that normally requires a secure temporary directory to 'dry run'
instead.)
"""
tmp = explicit if explicit is not None else mkdtemp()
try:
yield tmp
finally:
if not skip_cleanup:
rmtree(tmp) | python | def tmpdir(skip_cleanup=False, explicit=None):
"""
Context-manage a temporary directory.
Can be given ``skip_cleanup`` to skip cleanup, and ``explicit`` to choose a
specific location.
(If both are given, this is basically not doing anything, but it allows
code that normally requires a secure temporary directory to 'dry run'
instead.)
"""
tmp = explicit if explicit is not None else mkdtemp()
try:
yield tmp
finally:
if not skip_cleanup:
rmtree(tmp) | [
"def",
"tmpdir",
"(",
"skip_cleanup",
"=",
"False",
",",
"explicit",
"=",
"None",
")",
":",
"tmp",
"=",
"explicit",
"if",
"explicit",
"is",
"not",
"None",
"else",
"mkdtemp",
"(",
")",
"try",
":",
"yield",
"tmp",
"finally",
":",
"if",
"not",
"skip_clean... | Context-manage a temporary directory.
Can be given ``skip_cleanup`` to skip cleanup, and ``explicit`` to choose a
specific location.
(If both are given, this is basically not doing anything, but it allows
code that normally requires a secure temporary directory to 'dry run'
instead.) | [
"Context",
"-",
"manage",
"a",
"temporary",
"directory",
"."
] | bbf1b319bd1536817d5301ceb9eeb2f31830e5dc | https://github.com/pyinvoke/invocations/blob/bbf1b319bd1536817d5301ceb9eeb2f31830e5dc/invocations/util.py#L7-L23 | train |
pysal/giddy | giddy/directional.py | Rose.permute | def permute(self, permutations=99, alternative='two.sided'):
"""
Generate ransom spatial permutations for inference on LISA vectors.
Parameters
----------
permutations : int, optional
Number of random permutations of observations.
alternative : string, optional
Type of alternative to form in generating p-values.
Options are: `two-sided` which tests for difference between observed
counts and those obtained from the permutation distribution;
`positive` which tests the alternative that the focal unit and its
lag move in the same direction over time; `negative` which tests
that the focal unit and its lag move in opposite directions over
the interval.
"""
rY = self.Y.copy()
idxs = np.arange(len(rY))
counts = np.zeros((permutations, len(self.counts)))
for m in range(permutations):
np.random.shuffle(idxs)
res = self._calc(rY[idxs, :], self.w, self.k)
counts[m] = res['counts']
self.counts_perm = counts
self.larger_perm = np.array(
[(counts[:, i] >= self.counts[i]).sum() for i in range(self.k)])
self.smaller_perm = np.array(
[(counts[:, i] <= self.counts[i]).sum() for i in range(self.k)])
self.expected_perm = counts.mean(axis=0)
self.alternative = alternative
# pvalue logic
# if P is the proportion that are as large for a one sided test (larger
# than), then
# p=P.
#
# For a two-tailed test, if P < .5, p = 2 * P, else, p = 2(1-P)
# Source: Rayner, J. C. W., O. Thas, and D. J. Best. 2009. "Appendix B:
# Parametric Bootstrap P-Values." In Smooth Tests of Goodness of Fit,
# 247. John Wiley and Sons.
# Note that the larger and smaller counts would be complements (except
# for the shared equality, for
# a given bin in the circular histogram. So we only need one of them.
# We report two-sided p-values for each bin as the default
# since a priori there could # be different alternatives for each bin
# depending on the problem at hand.
alt = alternative.upper()
if alt == 'TWO.SIDED':
P = (self.larger_perm + 1) / (permutations + 1.)
mask = P < 0.5
self.p = mask * 2 * P + (1 - mask) * 2 * (1 - P)
elif alt == 'POSITIVE':
# NE, SW sectors are higher, NW, SE are lower
POS = _POS8
if self.k == 4:
POS = _POS4
L = (self.larger_perm + 1) / (permutations + 1.)
S = (self.smaller_perm + 1) / (permutations + 1.)
P = POS * L + (1 - POS) * S
self.p = P
elif alt == 'NEGATIVE':
# NE, SW sectors are lower, NW, SE are higher
NEG = _NEG8
if self.k == 4:
NEG = _NEG4
L = (self.larger_perm + 1) / (permutations + 1.)
S = (self.smaller_perm + 1) / (permutations + 1.)
P = NEG * L + (1 - NEG) * S
self.p = P
else:
print(('Bad option for alternative: %s.' % alternative)) | python | def permute(self, permutations=99, alternative='two.sided'):
"""
Generate ransom spatial permutations for inference on LISA vectors.
Parameters
----------
permutations : int, optional
Number of random permutations of observations.
alternative : string, optional
Type of alternative to form in generating p-values.
Options are: `two-sided` which tests for difference between observed
counts and those obtained from the permutation distribution;
`positive` which tests the alternative that the focal unit and its
lag move in the same direction over time; `negative` which tests
that the focal unit and its lag move in opposite directions over
the interval.
"""
rY = self.Y.copy()
idxs = np.arange(len(rY))
counts = np.zeros((permutations, len(self.counts)))
for m in range(permutations):
np.random.shuffle(idxs)
res = self._calc(rY[idxs, :], self.w, self.k)
counts[m] = res['counts']
self.counts_perm = counts
self.larger_perm = np.array(
[(counts[:, i] >= self.counts[i]).sum() for i in range(self.k)])
self.smaller_perm = np.array(
[(counts[:, i] <= self.counts[i]).sum() for i in range(self.k)])
self.expected_perm = counts.mean(axis=0)
self.alternative = alternative
# pvalue logic
# if P is the proportion that are as large for a one sided test (larger
# than), then
# p=P.
#
# For a two-tailed test, if P < .5, p = 2 * P, else, p = 2(1-P)
# Source: Rayner, J. C. W., O. Thas, and D. J. Best. 2009. "Appendix B:
# Parametric Bootstrap P-Values." In Smooth Tests of Goodness of Fit,
# 247. John Wiley and Sons.
# Note that the larger and smaller counts would be complements (except
# for the shared equality, for
# a given bin in the circular histogram. So we only need one of them.
# We report two-sided p-values for each bin as the default
# since a priori there could # be different alternatives for each bin
# depending on the problem at hand.
alt = alternative.upper()
if alt == 'TWO.SIDED':
P = (self.larger_perm + 1) / (permutations + 1.)
mask = P < 0.5
self.p = mask * 2 * P + (1 - mask) * 2 * (1 - P)
elif alt == 'POSITIVE':
# NE, SW sectors are higher, NW, SE are lower
POS = _POS8
if self.k == 4:
POS = _POS4
L = (self.larger_perm + 1) / (permutations + 1.)
S = (self.smaller_perm + 1) / (permutations + 1.)
P = POS * L + (1 - POS) * S
self.p = P
elif alt == 'NEGATIVE':
# NE, SW sectors are lower, NW, SE are higher
NEG = _NEG8
if self.k == 4:
NEG = _NEG4
L = (self.larger_perm + 1) / (permutations + 1.)
S = (self.smaller_perm + 1) / (permutations + 1.)
P = NEG * L + (1 - NEG) * S
self.p = P
else:
print(('Bad option for alternative: %s.' % alternative)) | [
"def",
"permute",
"(",
"self",
",",
"permutations",
"=",
"99",
",",
"alternative",
"=",
"'two.sided'",
")",
":",
"rY",
"=",
"self",
".",
"Y",
".",
"copy",
"(",
")",
"idxs",
"=",
"np",
".",
"arange",
"(",
"len",
"(",
"rY",
")",
")",
"counts",
"=",... | Generate ransom spatial permutations for inference on LISA vectors.
Parameters
----------
permutations : int, optional
Number of random permutations of observations.
alternative : string, optional
Type of alternative to form in generating p-values.
Options are: `two-sided` which tests for difference between observed
counts and those obtained from the permutation distribution;
`positive` which tests the alternative that the focal unit and its
lag move in the same direction over time; `negative` which tests
that the focal unit and its lag move in opposite directions over
the interval. | [
"Generate",
"ransom",
"spatial",
"permutations",
"for",
"inference",
"on",
"LISA",
"vectors",
"."
] | 13fae6c18933614be78e91a6b5060693bea33a04 | https://github.com/pysal/giddy/blob/13fae6c18933614be78e91a6b5060693bea33a04/giddy/directional.py#L227-L300 | train |
pysal/giddy | giddy/directional.py | Rose.plot | def plot(self, attribute=None, ax=None, **kwargs):
"""
Plot the rose diagram.
Parameters
----------
attribute : (n,) ndarray, optional
Variable to specify colors of the colorbars.
ax : Matplotlib Axes instance, optional
If given, the figure will be created inside this axis.
Default =None. Note, this axis should have a polar projection.
**kwargs : keyword arguments, optional
Keywords used for creating and designing the plot.
Note: 'c' and 'color' cannot be passed when attribute is not None
Returns
-------
fig : Matplotlib Figure instance
Moran scatterplot figure
ax : matplotlib Axes instance
Axes in which the figure is plotted
"""
from splot.giddy import dynamic_lisa_rose
fig, ax = dynamic_lisa_rose(self, attribute=attribute,
ax=ax, **kwargs)
return fig, ax | python | def plot(self, attribute=None, ax=None, **kwargs):
"""
Plot the rose diagram.
Parameters
----------
attribute : (n,) ndarray, optional
Variable to specify colors of the colorbars.
ax : Matplotlib Axes instance, optional
If given, the figure will be created inside this axis.
Default =None. Note, this axis should have a polar projection.
**kwargs : keyword arguments, optional
Keywords used for creating and designing the plot.
Note: 'c' and 'color' cannot be passed when attribute is not None
Returns
-------
fig : Matplotlib Figure instance
Moran scatterplot figure
ax : matplotlib Axes instance
Axes in which the figure is plotted
"""
from splot.giddy import dynamic_lisa_rose
fig, ax = dynamic_lisa_rose(self, attribute=attribute,
ax=ax, **kwargs)
return fig, ax | [
"def",
"plot",
"(",
"self",
",",
"attribute",
"=",
"None",
",",
"ax",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"from",
"splot",
".",
"giddy",
"import",
"dynamic_lisa_rose",
"fig",
",",
"ax",
"=",
"dynamic_lisa_rose",
"(",
"self",
",",
"attribute... | Plot the rose diagram.
Parameters
----------
attribute : (n,) ndarray, optional
Variable to specify colors of the colorbars.
ax : Matplotlib Axes instance, optional
If given, the figure will be created inside this axis.
Default =None. Note, this axis should have a polar projection.
**kwargs : keyword arguments, optional
Keywords used for creating and designing the plot.
Note: 'c' and 'color' cannot be passed when attribute is not None
Returns
-------
fig : Matplotlib Figure instance
Moran scatterplot figure
ax : matplotlib Axes instance
Axes in which the figure is plotted | [
"Plot",
"the",
"rose",
"diagram",
"."
] | 13fae6c18933614be78e91a6b5060693bea33a04 | https://github.com/pysal/giddy/blob/13fae6c18933614be78e91a6b5060693bea33a04/giddy/directional.py#L324-L351 | train |
pysal/giddy | giddy/directional.py | Rose.plot_origin | def plot_origin(self): # TODO add attribute option to color vectors
"""
Plot vectors of positional transition of LISA values starting
from the same origin.
"""
import matplotlib.cm as cm
import matplotlib.pyplot as plt
ax = plt.subplot(111)
xlim = [self._dx.min(), self._dx.max()]
ylim = [self._dy.min(), self._dy.max()]
for x, y in zip(self._dx, self._dy):
xs = [0, x]
ys = [0, y]
plt.plot(xs, ys, '-b') # TODO change this to scale with attribute
plt.axis('equal')
plt.xlim(xlim)
plt.ylim(ylim) | python | def plot_origin(self): # TODO add attribute option to color vectors
"""
Plot vectors of positional transition of LISA values starting
from the same origin.
"""
import matplotlib.cm as cm
import matplotlib.pyplot as plt
ax = plt.subplot(111)
xlim = [self._dx.min(), self._dx.max()]
ylim = [self._dy.min(), self._dy.max()]
for x, y in zip(self._dx, self._dy):
xs = [0, x]
ys = [0, y]
plt.plot(xs, ys, '-b') # TODO change this to scale with attribute
plt.axis('equal')
plt.xlim(xlim)
plt.ylim(ylim) | [
"def",
"plot_origin",
"(",
"self",
")",
":",
"# TODO add attribute option to color vectors",
"import",
"matplotlib",
".",
"cm",
"as",
"cm",
"import",
"matplotlib",
".",
"pyplot",
"as",
"plt",
"ax",
"=",
"plt",
".",
"subplot",
"(",
"111",
")",
"xlim",
"=",
"[... | Plot vectors of positional transition of LISA values starting
from the same origin. | [
"Plot",
"vectors",
"of",
"positional",
"transition",
"of",
"LISA",
"values",
"starting",
"from",
"the",
"same",
"origin",
"."
] | 13fae6c18933614be78e91a6b5060693bea33a04 | https://github.com/pysal/giddy/blob/13fae6c18933614be78e91a6b5060693bea33a04/giddy/directional.py#L353-L369 | train |
pysal/giddy | giddy/directional.py | Rose.plot_vectors | def plot_vectors(self, arrows=True):
"""
Plot vectors of positional transition of LISA values
within quadrant in scatterplot in a polar plot.
Parameters
----------
ax : Matplotlib Axes instance, optional
If given, the figure will be created inside this axis.
Default =None.
arrows : boolean, optional
If True show arrowheads of vectors. Default =True
**kwargs : keyword arguments, optional
Keywords used for creating and designing the plot.
Note: 'c' and 'color' cannot be passed when attribute is not None
Returns
-------
fig : Matplotlib Figure instance
Moran scatterplot figure
ax : matplotlib Axes instance
Axes in which the figure is plotted
"""
from splot.giddy import dynamic_lisa_vectors
fig, ax = dynamic_lisa_vectors(self, arrows=arrows)
return fig, ax | python | def plot_vectors(self, arrows=True):
"""
Plot vectors of positional transition of LISA values
within quadrant in scatterplot in a polar plot.
Parameters
----------
ax : Matplotlib Axes instance, optional
If given, the figure will be created inside this axis.
Default =None.
arrows : boolean, optional
If True show arrowheads of vectors. Default =True
**kwargs : keyword arguments, optional
Keywords used for creating and designing the plot.
Note: 'c' and 'color' cannot be passed when attribute is not None
Returns
-------
fig : Matplotlib Figure instance
Moran scatterplot figure
ax : matplotlib Axes instance
Axes in which the figure is plotted
"""
from splot.giddy import dynamic_lisa_vectors
fig, ax = dynamic_lisa_vectors(self, arrows=arrows)
return fig, ax | [
"def",
"plot_vectors",
"(",
"self",
",",
"arrows",
"=",
"True",
")",
":",
"from",
"splot",
".",
"giddy",
"import",
"dynamic_lisa_vectors",
"fig",
",",
"ax",
"=",
"dynamic_lisa_vectors",
"(",
"self",
",",
"arrows",
"=",
"arrows",
")",
"return",
"fig",
",",
... | Plot vectors of positional transition of LISA values
within quadrant in scatterplot in a polar plot.
Parameters
----------
ax : Matplotlib Axes instance, optional
If given, the figure will be created inside this axis.
Default =None.
arrows : boolean, optional
If True show arrowheads of vectors. Default =True
**kwargs : keyword arguments, optional
Keywords used for creating and designing the plot.
Note: 'c' and 'color' cannot be passed when attribute is not None
Returns
-------
fig : Matplotlib Figure instance
Moran scatterplot figure
ax : matplotlib Axes instance
Axes in which the figure is plotted | [
"Plot",
"vectors",
"of",
"positional",
"transition",
"of",
"LISA",
"values",
"within",
"quadrant",
"in",
"scatterplot",
"in",
"a",
"polar",
"plot",
"."
] | 13fae6c18933614be78e91a6b5060693bea33a04 | https://github.com/pysal/giddy/blob/13fae6c18933614be78e91a6b5060693bea33a04/giddy/directional.py#L372-L400 | train |
pyinvoke/invocations | invocations/docs.py | _clean | def _clean(c):
"""
Nuke docs build target directory so next build is clean.
"""
if isdir(c.sphinx.target):
rmtree(c.sphinx.target) | python | def _clean(c):
"""
Nuke docs build target directory so next build is clean.
"""
if isdir(c.sphinx.target):
rmtree(c.sphinx.target) | [
"def",
"_clean",
"(",
"c",
")",
":",
"if",
"isdir",
"(",
"c",
".",
"sphinx",
".",
"target",
")",
":",
"rmtree",
"(",
"c",
".",
"sphinx",
".",
"target",
")"
] | Nuke docs build target directory so next build is clean. | [
"Nuke",
"docs",
"build",
"target",
"directory",
"so",
"next",
"build",
"is",
"clean",
"."
] | bbf1b319bd1536817d5301ceb9eeb2f31830e5dc | https://github.com/pyinvoke/invocations/blob/bbf1b319bd1536817d5301ceb9eeb2f31830e5dc/invocations/docs.py#L17-L22 | train |
pyinvoke/invocations | invocations/docs.py | _browse | def _browse(c):
"""
Open build target's index.html in a browser (using 'open').
"""
index = join(c.sphinx.target, c.sphinx.target_file)
c.run("open {0}".format(index)) | python | def _browse(c):
"""
Open build target's index.html in a browser (using 'open').
"""
index = join(c.sphinx.target, c.sphinx.target_file)
c.run("open {0}".format(index)) | [
"def",
"_browse",
"(",
"c",
")",
":",
"index",
"=",
"join",
"(",
"c",
".",
"sphinx",
".",
"target",
",",
"c",
".",
"sphinx",
".",
"target_file",
")",
"c",
".",
"run",
"(",
"\"open {0}\"",
".",
"format",
"(",
"index",
")",
")"
] | Open build target's index.html in a browser (using 'open'). | [
"Open",
"build",
"target",
"s",
"index",
".",
"html",
"in",
"a",
"browser",
"(",
"using",
"open",
")",
"."
] | bbf1b319bd1536817d5301ceb9eeb2f31830e5dc | https://github.com/pyinvoke/invocations/blob/bbf1b319bd1536817d5301ceb9eeb2f31830e5dc/invocations/docs.py#L27-L32 | train |
pyinvoke/invocations | invocations/docs.py | build | def build(
c,
clean=False,
browse=False,
nitpick=False,
opts=None,
source=None,
target=None,
):
"""
Build the project's Sphinx docs.
"""
if clean:
_clean(c)
if opts is None:
opts = ""
if nitpick:
opts += " -n -W -T"
cmd = "sphinx-build{0} {1} {2}".format(
(" " + opts) if opts else "",
source or c.sphinx.source,
target or c.sphinx.target,
)
c.run(cmd, pty=True)
if browse:
_browse(c) | python | def build(
c,
clean=False,
browse=False,
nitpick=False,
opts=None,
source=None,
target=None,
):
"""
Build the project's Sphinx docs.
"""
if clean:
_clean(c)
if opts is None:
opts = ""
if nitpick:
opts += " -n -W -T"
cmd = "sphinx-build{0} {1} {2}".format(
(" " + opts) if opts else "",
source or c.sphinx.source,
target or c.sphinx.target,
)
c.run(cmd, pty=True)
if browse:
_browse(c) | [
"def",
"build",
"(",
"c",
",",
"clean",
"=",
"False",
",",
"browse",
"=",
"False",
",",
"nitpick",
"=",
"False",
",",
"opts",
"=",
"None",
",",
"source",
"=",
"None",
",",
"target",
"=",
"None",
",",
")",
":",
"if",
"clean",
":",
"_clean",
"(",
... | Build the project's Sphinx docs. | [
"Build",
"the",
"project",
"s",
"Sphinx",
"docs",
"."
] | bbf1b319bd1536817d5301ceb9eeb2f31830e5dc | https://github.com/pyinvoke/invocations/blob/bbf1b319bd1536817d5301ceb9eeb2f31830e5dc/invocations/docs.py#L46-L71 | train |
pyinvoke/invocations | invocations/docs.py | tree | def tree(c):
"""
Display documentation contents with the 'tree' program.
"""
ignore = ".git|*.pyc|*.swp|dist|*.egg-info|_static|_build|_templates"
c.run('tree -Ca -I "{0}" {1}'.format(ignore, c.sphinx.source)) | python | def tree(c):
"""
Display documentation contents with the 'tree' program.
"""
ignore = ".git|*.pyc|*.swp|dist|*.egg-info|_static|_build|_templates"
c.run('tree -Ca -I "{0}" {1}'.format(ignore, c.sphinx.source)) | [
"def",
"tree",
"(",
"c",
")",
":",
"ignore",
"=",
"\".git|*.pyc|*.swp|dist|*.egg-info|_static|_build|_templates\"",
"c",
".",
"run",
"(",
"'tree -Ca -I \"{0}\" {1}'",
".",
"format",
"(",
"ignore",
",",
"c",
".",
"sphinx",
".",
"source",
")",
")"
] | Display documentation contents with the 'tree' program. | [
"Display",
"documentation",
"contents",
"with",
"the",
"tree",
"program",
"."
] | bbf1b319bd1536817d5301ceb9eeb2f31830e5dc | https://github.com/pyinvoke/invocations/blob/bbf1b319bd1536817d5301ceb9eeb2f31830e5dc/invocations/docs.py#L95-L100 | train |
pyinvoke/invocations | invocations/docs.py | sites | def sites(c):
"""
Build both doc sites w/ maxed nitpicking.
"""
# TODO: This is super lolzy but we haven't actually tackled nontrivial
# in-Python task calling yet, so we do this to get a copy of 'our' context,
# which has been updated with the per-collection config data of the
# docs/www subcollections.
docs_c = Context(config=c.config.clone())
www_c = Context(config=c.config.clone())
docs_c.update(**docs.configuration())
www_c.update(**www.configuration())
# Must build both normally first to ensure good intersphinx inventory files
# exist =/ circular dependencies ahoy! Do it quietly to avoid pulluting
# output; only super-serious errors will bubble up.
# TODO: wants a 'temporarily tweak context settings' contextmanager
# TODO: also a fucking spinner cuz this confuses me every time I run it
# when the docs aren't already prebuilt
docs_c["run"].hide = True
www_c["run"].hide = True
docs["build"](docs_c)
www["build"](www_c)
docs_c["run"].hide = False
www_c["run"].hide = False
# Run the actual builds, with nitpick=True (nitpicks + tracebacks)
docs["build"](docs_c, nitpick=True)
www["build"](www_c, nitpick=True) | python | def sites(c):
"""
Build both doc sites w/ maxed nitpicking.
"""
# TODO: This is super lolzy but we haven't actually tackled nontrivial
# in-Python task calling yet, so we do this to get a copy of 'our' context,
# which has been updated with the per-collection config data of the
# docs/www subcollections.
docs_c = Context(config=c.config.clone())
www_c = Context(config=c.config.clone())
docs_c.update(**docs.configuration())
www_c.update(**www.configuration())
# Must build both normally first to ensure good intersphinx inventory files
# exist =/ circular dependencies ahoy! Do it quietly to avoid pulluting
# output; only super-serious errors will bubble up.
# TODO: wants a 'temporarily tweak context settings' contextmanager
# TODO: also a fucking spinner cuz this confuses me every time I run it
# when the docs aren't already prebuilt
docs_c["run"].hide = True
www_c["run"].hide = True
docs["build"](docs_c)
www["build"](www_c)
docs_c["run"].hide = False
www_c["run"].hide = False
# Run the actual builds, with nitpick=True (nitpicks + tracebacks)
docs["build"](docs_c, nitpick=True)
www["build"](www_c, nitpick=True) | [
"def",
"sites",
"(",
"c",
")",
":",
"# TODO: This is super lolzy but we haven't actually tackled nontrivial",
"# in-Python task calling yet, so we do this to get a copy of 'our' context,",
"# which has been updated with the per-collection config data of the",
"# docs/www subcollections.",
"docs_... | Build both doc sites w/ maxed nitpicking. | [
"Build",
"both",
"doc",
"sites",
"w",
"/",
"maxed",
"nitpicking",
"."
] | bbf1b319bd1536817d5301ceb9eeb2f31830e5dc | https://github.com/pyinvoke/invocations/blob/bbf1b319bd1536817d5301ceb9eeb2f31830e5dc/invocations/docs.py#L143-L169 | train |
pyinvoke/invocations | invocations/docs.py | watch_docs | def watch_docs(c):
"""
Watch both doc trees & rebuild them if files change.
This includes e.g. rebuilding the API docs if the source code changes;
rebuilding the WWW docs if the README changes; etc.
Reuses the configuration values ``packaging.package`` or ``tests.package``
(the former winning over the latter if both defined) when determining which
source directory to scan for API doc updates.
"""
# TODO: break back down into generic single-site version, then create split
# tasks as with docs/www above. Probably wants invoke#63.
# NOTE: 'www'/'docs' refer to the module level sub-collections. meh.
# Readme & WWW triggers WWW
www_c = Context(config=c.config.clone())
www_c.update(**www.configuration())
www_handler = make_handler(
ctx=www_c,
task_=www["build"],
regexes=[r"\./README.rst", r"\./sites/www"],
ignore_regexes=[r".*/\..*\.swp", r"\./sites/www/_build"],
)
# Code and docs trigger API
docs_c = Context(config=c.config.clone())
docs_c.update(**docs.configuration())
regexes = [r"\./sites/docs"]
package = c.get("packaging", {}).get("package", None)
if package is None:
package = c.get("tests", {}).get("package", None)
if package:
regexes.append(r"\./{}/".format(package))
api_handler = make_handler(
ctx=docs_c,
task_=docs["build"],
regexes=regexes,
ignore_regexes=[r".*/\..*\.swp", r"\./sites/docs/_build"],
)
observe(www_handler, api_handler) | python | def watch_docs(c):
"""
Watch both doc trees & rebuild them if files change.
This includes e.g. rebuilding the API docs if the source code changes;
rebuilding the WWW docs if the README changes; etc.
Reuses the configuration values ``packaging.package`` or ``tests.package``
(the former winning over the latter if both defined) when determining which
source directory to scan for API doc updates.
"""
# TODO: break back down into generic single-site version, then create split
# tasks as with docs/www above. Probably wants invoke#63.
# NOTE: 'www'/'docs' refer to the module level sub-collections. meh.
# Readme & WWW triggers WWW
www_c = Context(config=c.config.clone())
www_c.update(**www.configuration())
www_handler = make_handler(
ctx=www_c,
task_=www["build"],
regexes=[r"\./README.rst", r"\./sites/www"],
ignore_regexes=[r".*/\..*\.swp", r"\./sites/www/_build"],
)
# Code and docs trigger API
docs_c = Context(config=c.config.clone())
docs_c.update(**docs.configuration())
regexes = [r"\./sites/docs"]
package = c.get("packaging", {}).get("package", None)
if package is None:
package = c.get("tests", {}).get("package", None)
if package:
regexes.append(r"\./{}/".format(package))
api_handler = make_handler(
ctx=docs_c,
task_=docs["build"],
regexes=regexes,
ignore_regexes=[r".*/\..*\.swp", r"\./sites/docs/_build"],
)
observe(www_handler, api_handler) | [
"def",
"watch_docs",
"(",
"c",
")",
":",
"# TODO: break back down into generic single-site version, then create split",
"# tasks as with docs/www above. Probably wants invoke#63.",
"# NOTE: 'www'/'docs' refer to the module level sub-collections. meh.",
"# Readme & WWW triggers WWW",
"www_c",
"... | Watch both doc trees & rebuild them if files change.
This includes e.g. rebuilding the API docs if the source code changes;
rebuilding the WWW docs if the README changes; etc.
Reuses the configuration values ``packaging.package`` or ``tests.package``
(the former winning over the latter if both defined) when determining which
source directory to scan for API doc updates. | [
"Watch",
"both",
"doc",
"trees",
"&",
"rebuild",
"them",
"if",
"files",
"change",
"."
] | bbf1b319bd1536817d5301ceb9eeb2f31830e5dc | https://github.com/pyinvoke/invocations/blob/bbf1b319bd1536817d5301ceb9eeb2f31830e5dc/invocations/docs.py#L173-L215 | train |
pysal/giddy | giddy/util.py | shuffle_matrix | def shuffle_matrix(X, ids):
"""
Random permutation of rows and columns of a matrix
Parameters
----------
X : array
(k, k), array to be permutated.
ids : array
range (k, ).
Returns
-------
X : array
(k, k) with rows and columns randomly shuffled.
Examples
--------
>>> import numpy as np
>>> from giddy.util import shuffle_matrix
>>> X=np.arange(16)
>>> X.shape=(4,4)
>>> np.random.seed(10)
>>> shuffle_matrix(X,list(range(4)))
array([[10, 8, 11, 9],
[ 2, 0, 3, 1],
[14, 12, 15, 13],
[ 6, 4, 7, 5]])
"""
np.random.shuffle(ids)
return X[ids, :][:, ids] | python | def shuffle_matrix(X, ids):
"""
Random permutation of rows and columns of a matrix
Parameters
----------
X : array
(k, k), array to be permutated.
ids : array
range (k, ).
Returns
-------
X : array
(k, k) with rows and columns randomly shuffled.
Examples
--------
>>> import numpy as np
>>> from giddy.util import shuffle_matrix
>>> X=np.arange(16)
>>> X.shape=(4,4)
>>> np.random.seed(10)
>>> shuffle_matrix(X,list(range(4)))
array([[10, 8, 11, 9],
[ 2, 0, 3, 1],
[14, 12, 15, 13],
[ 6, 4, 7, 5]])
"""
np.random.shuffle(ids)
return X[ids, :][:, ids] | [
"def",
"shuffle_matrix",
"(",
"X",
",",
"ids",
")",
":",
"np",
".",
"random",
".",
"shuffle",
"(",
"ids",
")",
"return",
"X",
"[",
"ids",
",",
":",
"]",
"[",
":",
",",
"ids",
"]"
] | Random permutation of rows and columns of a matrix
Parameters
----------
X : array
(k, k), array to be permutated.
ids : array
range (k, ).
Returns
-------
X : array
(k, k) with rows and columns randomly shuffled.
Examples
--------
>>> import numpy as np
>>> from giddy.util import shuffle_matrix
>>> X=np.arange(16)
>>> X.shape=(4,4)
>>> np.random.seed(10)
>>> shuffle_matrix(X,list(range(4)))
array([[10, 8, 11, 9],
[ 2, 0, 3, 1],
[14, 12, 15, 13],
[ 6, 4, 7, 5]]) | [
"Random",
"permutation",
"of",
"rows",
"and",
"columns",
"of",
"a",
"matrix"
] | 13fae6c18933614be78e91a6b5060693bea33a04 | https://github.com/pysal/giddy/blob/13fae6c18933614be78e91a6b5060693bea33a04/giddy/util.py#L9-L40 | train |
pysal/giddy | giddy/util.py | get_lower | def get_lower(matrix):
"""
Flattens the lower part of an n x n matrix into an n*(n-1)/2 x 1 vector.
Parameters
----------
matrix : array
(n, n) numpy array, a distance matrix.
Returns
-------
lowvec : array
numpy array, the lower half of the distance matrix flattened into
a vector of length n*(n-1)/2.
Examples
--------
>>> import numpy as np
>>> from giddy.util import get_lower
>>> test = np.array([[0,1,2,3],[1,0,1,2],[2,1,0,1],[4,2,1,0]])
>>> lower = get_lower(test)
>>> lower
array([[1],
[2],
[1],
[4],
[2],
[1]])
"""
n = matrix.shape[0]
lowerlist = []
for i in range(n):
for j in range(n):
if i > j:
lowerlist.append(matrix[i, j])
veclen = n * (n - 1) / 2
lowvec = np.reshape(np.array(lowerlist), (int(veclen), 1))
return lowvec | python | def get_lower(matrix):
"""
Flattens the lower part of an n x n matrix into an n*(n-1)/2 x 1 vector.
Parameters
----------
matrix : array
(n, n) numpy array, a distance matrix.
Returns
-------
lowvec : array
numpy array, the lower half of the distance matrix flattened into
a vector of length n*(n-1)/2.
Examples
--------
>>> import numpy as np
>>> from giddy.util import get_lower
>>> test = np.array([[0,1,2,3],[1,0,1,2],[2,1,0,1],[4,2,1,0]])
>>> lower = get_lower(test)
>>> lower
array([[1],
[2],
[1],
[4],
[2],
[1]])
"""
n = matrix.shape[0]
lowerlist = []
for i in range(n):
for j in range(n):
if i > j:
lowerlist.append(matrix[i, j])
veclen = n * (n - 1) / 2
lowvec = np.reshape(np.array(lowerlist), (int(veclen), 1))
return lowvec | [
"def",
"get_lower",
"(",
"matrix",
")",
":",
"n",
"=",
"matrix",
".",
"shape",
"[",
"0",
"]",
"lowerlist",
"=",
"[",
"]",
"for",
"i",
"in",
"range",
"(",
"n",
")",
":",
"for",
"j",
"in",
"range",
"(",
"n",
")",
":",
"if",
"i",
">",
"j",
":"... | Flattens the lower part of an n x n matrix into an n*(n-1)/2 x 1 vector.
Parameters
----------
matrix : array
(n, n) numpy array, a distance matrix.
Returns
-------
lowvec : array
numpy array, the lower half of the distance matrix flattened into
a vector of length n*(n-1)/2.
Examples
--------
>>> import numpy as np
>>> from giddy.util import get_lower
>>> test = np.array([[0,1,2,3],[1,0,1,2],[2,1,0,1],[4,2,1,0]])
>>> lower = get_lower(test)
>>> lower
array([[1],
[2],
[1],
[4],
[2],
[1]]) | [
"Flattens",
"the",
"lower",
"part",
"of",
"an",
"n",
"x",
"n",
"matrix",
"into",
"an",
"n",
"*",
"(",
"n",
"-",
"1",
")",
"/",
"2",
"x",
"1",
"vector",
"."
] | 13fae6c18933614be78e91a6b5060693bea33a04 | https://github.com/pysal/giddy/blob/13fae6c18933614be78e91a6b5060693bea33a04/giddy/util.py#L43-L81 | train |
pyinvoke/invocations | invocations/checks.py | blacken | def blacken(
c, line_length=79, folders=None, check=False, diff=False, find_opts=None
):
r"""
Run black on the current source tree (all ``.py`` files).
.. warning::
``black`` only runs on Python 3.6 or above. (However, it can be
executed against Python 2 compatible code.)
:param int line_length:
Line length argument. Default: ``79``.
:param list folders:
List of folders (or, on the CLI, an argument that can be given N times)
to search within for ``.py`` files. Default: ``["."]``. Honors the
``blacken.folders`` config option.
:param bool check:
Whether to run ``black --check``. Default: ``False``.
:param bool diff:
Whether to run ``black --diff``. Default: ``False``.
:param str find_opts:
Extra option string appended to the end of the internal ``find``
command. For example, skip a vendor directory with ``"-and -not -path
./vendor\*"``, add ``-mtime N``, or etc. Honors the
``blacken.find_opts`` config option.
.. versionadded:: 1.2
.. versionchanged:: 1.4
Added the ``find_opts`` argument.
"""
config = c.config.get("blacken", {})
default_folders = ["."]
configured_folders = config.get("folders", default_folders)
folders = folders or configured_folders
default_find_opts = ""
configured_find_opts = config.get("find_opts", default_find_opts)
find_opts = find_opts or configured_find_opts
black_command_line = "black -l {}".format(line_length)
if check:
black_command_line = "{} --check".format(black_command_line)
if diff:
black_command_line = "{} --diff".format(black_command_line)
if find_opts:
find_opts = " {}".format(find_opts)
else:
find_opts = ""
cmd = "find {} -name '*.py'{} | xargs {}".format(
" ".join(folders), find_opts, black_command_line
)
c.run(cmd, pty=True) | python | def blacken(
c, line_length=79, folders=None, check=False, diff=False, find_opts=None
):
r"""
Run black on the current source tree (all ``.py`` files).
.. warning::
``black`` only runs on Python 3.6 or above. (However, it can be
executed against Python 2 compatible code.)
:param int line_length:
Line length argument. Default: ``79``.
:param list folders:
List of folders (or, on the CLI, an argument that can be given N times)
to search within for ``.py`` files. Default: ``["."]``. Honors the
``blacken.folders`` config option.
:param bool check:
Whether to run ``black --check``. Default: ``False``.
:param bool diff:
Whether to run ``black --diff``. Default: ``False``.
:param str find_opts:
Extra option string appended to the end of the internal ``find``
command. For example, skip a vendor directory with ``"-and -not -path
./vendor\*"``, add ``-mtime N``, or etc. Honors the
``blacken.find_opts`` config option.
.. versionadded:: 1.2
.. versionchanged:: 1.4
Added the ``find_opts`` argument.
"""
config = c.config.get("blacken", {})
default_folders = ["."]
configured_folders = config.get("folders", default_folders)
folders = folders or configured_folders
default_find_opts = ""
configured_find_opts = config.get("find_opts", default_find_opts)
find_opts = find_opts or configured_find_opts
black_command_line = "black -l {}".format(line_length)
if check:
black_command_line = "{} --check".format(black_command_line)
if diff:
black_command_line = "{} --diff".format(black_command_line)
if find_opts:
find_opts = " {}".format(find_opts)
else:
find_opts = ""
cmd = "find {} -name '*.py'{} | xargs {}".format(
" ".join(folders), find_opts, black_command_line
)
c.run(cmd, pty=True) | [
"def",
"blacken",
"(",
"c",
",",
"line_length",
"=",
"79",
",",
"folders",
"=",
"None",
",",
"check",
"=",
"False",
",",
"diff",
"=",
"False",
",",
"find_opts",
"=",
"None",
")",
":",
"config",
"=",
"c",
".",
"config",
".",
"get",
"(",
"\"blacken\"... | r"""
Run black on the current source tree (all ``.py`` files).
.. warning::
``black`` only runs on Python 3.6 or above. (However, it can be
executed against Python 2 compatible code.)
:param int line_length:
Line length argument. Default: ``79``.
:param list folders:
List of folders (or, on the CLI, an argument that can be given N times)
to search within for ``.py`` files. Default: ``["."]``. Honors the
``blacken.folders`` config option.
:param bool check:
Whether to run ``black --check``. Default: ``False``.
:param bool diff:
Whether to run ``black --diff``. Default: ``False``.
:param str find_opts:
Extra option string appended to the end of the internal ``find``
command. For example, skip a vendor directory with ``"-and -not -path
./vendor\*"``, add ``-mtime N``, or etc. Honors the
``blacken.find_opts`` config option.
.. versionadded:: 1.2
.. versionchanged:: 1.4
Added the ``find_opts`` argument. | [
"r",
"Run",
"black",
"on",
"the",
"current",
"source",
"tree",
"(",
"all",
".",
"py",
"files",
")",
"."
] | bbf1b319bd1536817d5301ceb9eeb2f31830e5dc | https://github.com/pyinvoke/invocations/blob/bbf1b319bd1536817d5301ceb9eeb2f31830e5dc/invocations/checks.py#L13-L65 | train |
kolypto/py-flask-jsontools | flask_jsontools/response.py | normalize_response_value | def normalize_response_value(rv):
""" Normalize the response value into a 3-tuple (rv, status, headers)
:type rv: tuple|*
:returns: tuple(rv, status, headers)
:rtype: tuple(Response|JsonResponse|*, int|None, dict|None)
"""
status = headers = None
if isinstance(rv, tuple):
rv, status, headers = rv + (None,) * (3 - len(rv))
return rv, status, headers | python | def normalize_response_value(rv):
""" Normalize the response value into a 3-tuple (rv, status, headers)
:type rv: tuple|*
:returns: tuple(rv, status, headers)
:rtype: tuple(Response|JsonResponse|*, int|None, dict|None)
"""
status = headers = None
if isinstance(rv, tuple):
rv, status, headers = rv + (None,) * (3 - len(rv))
return rv, status, headers | [
"def",
"normalize_response_value",
"(",
"rv",
")",
":",
"status",
"=",
"headers",
"=",
"None",
"if",
"isinstance",
"(",
"rv",
",",
"tuple",
")",
":",
"rv",
",",
"status",
",",
"headers",
"=",
"rv",
"+",
"(",
"None",
",",
")",
"*",
"(",
"3",
"-",
... | Normalize the response value into a 3-tuple (rv, status, headers)
:type rv: tuple|*
:returns: tuple(rv, status, headers)
:rtype: tuple(Response|JsonResponse|*, int|None, dict|None) | [
"Normalize",
"the",
"response",
"value",
"into",
"a",
"3",
"-",
"tuple",
"(",
"rv",
"status",
"headers",
")",
":",
"type",
"rv",
":",
"tuple|",
"*",
":",
"returns",
":",
"tuple",
"(",
"rv",
"status",
"headers",
")",
":",
"rtype",
":",
"tuple",
"(",
... | 1abee2d40e6db262e43f0c534e90faaa9b26246a | https://github.com/kolypto/py-flask-jsontools/blob/1abee2d40e6db262e43f0c534e90faaa9b26246a/flask_jsontools/response.py#L51-L60 | train |
kolypto/py-flask-jsontools | flask_jsontools/response.py | make_json_response | def make_json_response(rv):
""" Make JsonResponse
:param rv: Response: the object to encode, or tuple (response, status, headers)
:type rv: tuple|*
:rtype: JsonResponse
"""
# Tuple of (response, status, headers)
rv, status, headers = normalize_response_value(rv)
# JsonResponse
if isinstance(rv, JsonResponse):
return rv
# Data
return JsonResponse(rv, status, headers) | python | def make_json_response(rv):
""" Make JsonResponse
:param rv: Response: the object to encode, or tuple (response, status, headers)
:type rv: tuple|*
:rtype: JsonResponse
"""
# Tuple of (response, status, headers)
rv, status, headers = normalize_response_value(rv)
# JsonResponse
if isinstance(rv, JsonResponse):
return rv
# Data
return JsonResponse(rv, status, headers) | [
"def",
"make_json_response",
"(",
"rv",
")",
":",
"# Tuple of (response, status, headers)",
"rv",
",",
"status",
",",
"headers",
"=",
"normalize_response_value",
"(",
"rv",
")",
"# JsonResponse",
"if",
"isinstance",
"(",
"rv",
",",
"JsonResponse",
")",
":",
"retur... | Make JsonResponse
:param rv: Response: the object to encode, or tuple (response, status, headers)
:type rv: tuple|*
:rtype: JsonResponse | [
"Make",
"JsonResponse",
":",
"param",
"rv",
":",
"Response",
":",
"the",
"object",
"to",
"encode",
"or",
"tuple",
"(",
"response",
"status",
"headers",
")",
":",
"type",
"rv",
":",
"tuple|",
"*",
":",
"rtype",
":",
"JsonResponse"
] | 1abee2d40e6db262e43f0c534e90faaa9b26246a | https://github.com/kolypto/py-flask-jsontools/blob/1abee2d40e6db262e43f0c534e90faaa9b26246a/flask_jsontools/response.py#L63-L77 | train |
pysal/giddy | giddy/mobility.py | markov_mobility | def markov_mobility(p, measure="P", ini=None):
"""
Markov-based mobility index.
Parameters
----------
p : array
(k, k), Markov transition probability matrix.
measure : string
If measure= "P",
:math:`M_{P} = \\frac{m-\sum_{i=1}^m P_{ii}}{m-1}`;
if measure = "D",
:math:`M_{D} = 1 - |\det(P)|`,
where :math:`\det(P)` is the determinant of :math:`P`;
if measure = "L2",
:math:`M_{L2} = 1 - |\lambda_2|`,
where :math:`\lambda_2` is the second largest eigenvalue of
:math:`P`;
if measure = "B1",
:math:`M_{B1} = \\frac{m-m \sum_{i=1}^m \pi_i P_{ii}}{m-1}`,
where :math:`\pi` is the initial income distribution;
if measure == "B2",
:math:`M_{B2} = \\frac{1}{m-1} \sum_{i=1}^m \sum_{
j=1}^m \pi_i P_{ij} |i-j|`,
where :math:`\pi` is the initial income distribution.
ini : array
(k,), initial distribution. Need to be specified if
measure = "B1" or "B2". If not,
the initial distribution would be treated as a uniform
distribution.
Returns
-------
mobi : float
Mobility value.
Notes
-----
The mobility indices are based on :cite:`Formby:2004fk`.
Examples
--------
>>> import numpy as np
>>> import libpysal
>>> import mapclassify as mc
>>> from giddy.markov import Markov
>>> from giddy.mobility import markov_mobility
>>> f = libpysal.io.open(libpysal.examples.get_path("usjoin.csv"))
>>> pci = np.array([f.by_col[str(y)] for y in range(1929,2010)])
>>> q5 = np.array([mc.Quantiles(y).yb for y in pci]).transpose()
>>> m = Markov(q5)
>>> m.p
array([[0.91011236, 0.0886392 , 0.00124844, 0. , 0. ],
[0.09972299, 0.78531856, 0.11080332, 0.00415512, 0. ],
[0. , 0.10125 , 0.78875 , 0.1075 , 0.0025 ],
[0. , 0.00417827, 0.11977716, 0.79805014, 0.07799443],
[0. , 0. , 0.00125156, 0.07133917, 0.92740926]])
(1) Estimate Shorrock1 mobility index:
>>> mobi_1 = markov_mobility(m.p, measure="P")
>>> print("{:.5f}".format(mobi_1))
0.19759
(2) Estimate Shorrock2 mobility index:
>>> mobi_2 = markov_mobility(m.p, measure="D")
>>> print("{:.5f}".format(mobi_2))
0.60685
(3) Estimate Sommers and Conlisk mobility index:
>>> mobi_3 = markov_mobility(m.p, measure="L2")
>>> print("{:.5f}".format(mobi_3))
0.03978
(4) Estimate Bartholomew1 mobility index (note that the initial
distribution should be given):
>>> ini = np.array([0.1,0.2,0.2,0.4,0.1])
>>> mobi_4 = markov_mobility(m.p, measure = "B1", ini=ini)
>>> print("{:.5f}".format(mobi_4))
0.22777
(5) Estimate Bartholomew2 mobility index (note that the initial
distribution should be given):
>>> ini = np.array([0.1,0.2,0.2,0.4,0.1])
>>> mobi_5 = markov_mobility(m.p, measure = "B2", ini=ini)
>>> print("{:.5f}".format(mobi_5))
0.04637
"""
p = np.array(p)
k = p.shape[1]
if measure == "P":
t = np.trace(p)
mobi = (k - t) / (k - 1)
elif measure == "D":
mobi = 1 - abs(la.det(p))
elif measure == "L2":
w, v = la.eig(p)
eigen_value_abs = abs(w)
mobi = 1 - np.sort(eigen_value_abs)[-2]
elif measure == "B1":
if ini is None:
ini = 1.0 / k * np.ones(k)
mobi = (k - k * np.sum(ini * np.diag(p))) / (k - 1)
elif measure == "B2":
mobi = 0
if ini is None:
ini = 1.0 / k * np.ones(k)
for i in range(k):
for j in range(k):
mobi = mobi + ini[i] * p[i, j] * abs(i - j)
mobi = mobi / (k - 1)
return mobi | python | def markov_mobility(p, measure="P", ini=None):
"""
Markov-based mobility index.
Parameters
----------
p : array
(k, k), Markov transition probability matrix.
measure : string
If measure= "P",
:math:`M_{P} = \\frac{m-\sum_{i=1}^m P_{ii}}{m-1}`;
if measure = "D",
:math:`M_{D} = 1 - |\det(P)|`,
where :math:`\det(P)` is the determinant of :math:`P`;
if measure = "L2",
:math:`M_{L2} = 1 - |\lambda_2|`,
where :math:`\lambda_2` is the second largest eigenvalue of
:math:`P`;
if measure = "B1",
:math:`M_{B1} = \\frac{m-m \sum_{i=1}^m \pi_i P_{ii}}{m-1}`,
where :math:`\pi` is the initial income distribution;
if measure == "B2",
:math:`M_{B2} = \\frac{1}{m-1} \sum_{i=1}^m \sum_{
j=1}^m \pi_i P_{ij} |i-j|`,
where :math:`\pi` is the initial income distribution.
ini : array
(k,), initial distribution. Need to be specified if
measure = "B1" or "B2". If not,
the initial distribution would be treated as a uniform
distribution.
Returns
-------
mobi : float
Mobility value.
Notes
-----
The mobility indices are based on :cite:`Formby:2004fk`.
Examples
--------
>>> import numpy as np
>>> import libpysal
>>> import mapclassify as mc
>>> from giddy.markov import Markov
>>> from giddy.mobility import markov_mobility
>>> f = libpysal.io.open(libpysal.examples.get_path("usjoin.csv"))
>>> pci = np.array([f.by_col[str(y)] for y in range(1929,2010)])
>>> q5 = np.array([mc.Quantiles(y).yb for y in pci]).transpose()
>>> m = Markov(q5)
>>> m.p
array([[0.91011236, 0.0886392 , 0.00124844, 0. , 0. ],
[0.09972299, 0.78531856, 0.11080332, 0.00415512, 0. ],
[0. , 0.10125 , 0.78875 , 0.1075 , 0.0025 ],
[0. , 0.00417827, 0.11977716, 0.79805014, 0.07799443],
[0. , 0. , 0.00125156, 0.07133917, 0.92740926]])
(1) Estimate Shorrock1 mobility index:
>>> mobi_1 = markov_mobility(m.p, measure="P")
>>> print("{:.5f}".format(mobi_1))
0.19759
(2) Estimate Shorrock2 mobility index:
>>> mobi_2 = markov_mobility(m.p, measure="D")
>>> print("{:.5f}".format(mobi_2))
0.60685
(3) Estimate Sommers and Conlisk mobility index:
>>> mobi_3 = markov_mobility(m.p, measure="L2")
>>> print("{:.5f}".format(mobi_3))
0.03978
(4) Estimate Bartholomew1 mobility index (note that the initial
distribution should be given):
>>> ini = np.array([0.1,0.2,0.2,0.4,0.1])
>>> mobi_4 = markov_mobility(m.p, measure = "B1", ini=ini)
>>> print("{:.5f}".format(mobi_4))
0.22777
(5) Estimate Bartholomew2 mobility index (note that the initial
distribution should be given):
>>> ini = np.array([0.1,0.2,0.2,0.4,0.1])
>>> mobi_5 = markov_mobility(m.p, measure = "B2", ini=ini)
>>> print("{:.5f}".format(mobi_5))
0.04637
"""
p = np.array(p)
k = p.shape[1]
if measure == "P":
t = np.trace(p)
mobi = (k - t) / (k - 1)
elif measure == "D":
mobi = 1 - abs(la.det(p))
elif measure == "L2":
w, v = la.eig(p)
eigen_value_abs = abs(w)
mobi = 1 - np.sort(eigen_value_abs)[-2]
elif measure == "B1":
if ini is None:
ini = 1.0 / k * np.ones(k)
mobi = (k - k * np.sum(ini * np.diag(p))) / (k - 1)
elif measure == "B2":
mobi = 0
if ini is None:
ini = 1.0 / k * np.ones(k)
for i in range(k):
for j in range(k):
mobi = mobi + ini[i] * p[i, j] * abs(i - j)
mobi = mobi / (k - 1)
return mobi | [
"def",
"markov_mobility",
"(",
"p",
",",
"measure",
"=",
"\"P\"",
",",
"ini",
"=",
"None",
")",
":",
"p",
"=",
"np",
".",
"array",
"(",
"p",
")",
"k",
"=",
"p",
".",
"shape",
"[",
"1",
"]",
"if",
"measure",
"==",
"\"P\"",
":",
"t",
"=",
"np",... | Markov-based mobility index.
Parameters
----------
p : array
(k, k), Markov transition probability matrix.
measure : string
If measure= "P",
:math:`M_{P} = \\frac{m-\sum_{i=1}^m P_{ii}}{m-1}`;
if measure = "D",
:math:`M_{D} = 1 - |\det(P)|`,
where :math:`\det(P)` is the determinant of :math:`P`;
if measure = "L2",
:math:`M_{L2} = 1 - |\lambda_2|`,
where :math:`\lambda_2` is the second largest eigenvalue of
:math:`P`;
if measure = "B1",
:math:`M_{B1} = \\frac{m-m \sum_{i=1}^m \pi_i P_{ii}}{m-1}`,
where :math:`\pi` is the initial income distribution;
if measure == "B2",
:math:`M_{B2} = \\frac{1}{m-1} \sum_{i=1}^m \sum_{
j=1}^m \pi_i P_{ij} |i-j|`,
where :math:`\pi` is the initial income distribution.
ini : array
(k,), initial distribution. Need to be specified if
measure = "B1" or "B2". If not,
the initial distribution would be treated as a uniform
distribution.
Returns
-------
mobi : float
Mobility value.
Notes
-----
The mobility indices are based on :cite:`Formby:2004fk`.
Examples
--------
>>> import numpy as np
>>> import libpysal
>>> import mapclassify as mc
>>> from giddy.markov import Markov
>>> from giddy.mobility import markov_mobility
>>> f = libpysal.io.open(libpysal.examples.get_path("usjoin.csv"))
>>> pci = np.array([f.by_col[str(y)] for y in range(1929,2010)])
>>> q5 = np.array([mc.Quantiles(y).yb for y in pci]).transpose()
>>> m = Markov(q5)
>>> m.p
array([[0.91011236, 0.0886392 , 0.00124844, 0. , 0. ],
[0.09972299, 0.78531856, 0.11080332, 0.00415512, 0. ],
[0. , 0.10125 , 0.78875 , 0.1075 , 0.0025 ],
[0. , 0.00417827, 0.11977716, 0.79805014, 0.07799443],
[0. , 0. , 0.00125156, 0.07133917, 0.92740926]])
(1) Estimate Shorrock1 mobility index:
>>> mobi_1 = markov_mobility(m.p, measure="P")
>>> print("{:.5f}".format(mobi_1))
0.19759
(2) Estimate Shorrock2 mobility index:
>>> mobi_2 = markov_mobility(m.p, measure="D")
>>> print("{:.5f}".format(mobi_2))
0.60685
(3) Estimate Sommers and Conlisk mobility index:
>>> mobi_3 = markov_mobility(m.p, measure="L2")
>>> print("{:.5f}".format(mobi_3))
0.03978
(4) Estimate Bartholomew1 mobility index (note that the initial
distribution should be given):
>>> ini = np.array([0.1,0.2,0.2,0.4,0.1])
>>> mobi_4 = markov_mobility(m.p, measure = "B1", ini=ini)
>>> print("{:.5f}".format(mobi_4))
0.22777
(5) Estimate Bartholomew2 mobility index (note that the initial
distribution should be given):
>>> ini = np.array([0.1,0.2,0.2,0.4,0.1])
>>> mobi_5 = markov_mobility(m.p, measure = "B2", ini=ini)
>>> print("{:.5f}".format(mobi_5))
0.04637 | [
"Markov",
"-",
"based",
"mobility",
"index",
"."
] | 13fae6c18933614be78e91a6b5060693bea33a04 | https://github.com/pysal/giddy/blob/13fae6c18933614be78e91a6b5060693bea33a04/giddy/mobility.py#L13-L131 | train |
pysal/giddy | giddy/markov.py | chi2 | def chi2(T1, T2):
"""
chi-squared test of difference between two transition matrices.
Parameters
----------
T1 : array
(k, k), matrix of transitions (counts).
T2 : array
(k, k), matrix of transitions (counts) to use to form the
probabilities under the null.
Returns
-------
: tuple
(3 elements).
(chi2 value, pvalue, degrees of freedom).
Examples
--------
>>> import libpysal
>>> from giddy.markov import Spatial_Markov, chi2
>>> f = libpysal.io.open(libpysal.examples.get_path("usjoin.csv"))
>>> years = list(range(1929, 2010))
>>> pci = np.array([f.by_col[str(y)] for y in years]).transpose()
>>> rpci = pci/(pci.mean(axis=0))
>>> w = libpysal.io.open(libpysal.examples.get_path("states48.gal")).read()
>>> w.transform='r'
>>> sm = Spatial_Markov(rpci, w, fixed=True)
>>> T1 = sm.T[0]
>>> T1
array([[562., 22., 1., 0.],
[ 12., 201., 22., 0.],
[ 0., 17., 97., 4.],
[ 0., 0., 3., 19.]])
>>> T2 = sm.transitions
>>> T2
array([[884., 77., 4., 0.],
[ 68., 794., 87., 3.],
[ 1., 92., 815., 51.],
[ 1., 0., 60., 903.]])
>>> chi2(T1,T2)
(23.39728441473295, 0.005363116704861337, 9)
Notes
-----
Second matrix is used to form the probabilities under the null.
Marginal sums from first matrix are distributed across these probabilities
under the null. In other words the observed transitions are taken from T1
while the expected transitions are formed as follows
.. math::
E_{i,j} = \sum_j T1_{i,j} * T2_{i,j}/\sum_j T2_{i,j}
Degrees of freedom corrected for any rows in either T1 or T2 that have
zero total transitions.
"""
rs2 = T2.sum(axis=1)
rs1 = T1.sum(axis=1)
rs2nz = rs2 > 0
rs1nz = rs1 > 0
dof1 = sum(rs1nz)
dof2 = sum(rs2nz)
rs2 = rs2 + (rs2 == 0)
dof = (dof1 - 1) * (dof2 - 1)
p = np.diag(1 / rs2) * np.matrix(T2)
E = np.diag(rs1) * np.matrix(p)
num = T1 - E
num = np.multiply(num, num)
E = E + (E == 0)
chi2 = num / E
chi2 = chi2.sum()
pvalue = 1 - stats.chi2.cdf(chi2, dof)
return chi2, pvalue, dof | python | def chi2(T1, T2):
"""
chi-squared test of difference between two transition matrices.
Parameters
----------
T1 : array
(k, k), matrix of transitions (counts).
T2 : array
(k, k), matrix of transitions (counts) to use to form the
probabilities under the null.
Returns
-------
: tuple
(3 elements).
(chi2 value, pvalue, degrees of freedom).
Examples
--------
>>> import libpysal
>>> from giddy.markov import Spatial_Markov, chi2
>>> f = libpysal.io.open(libpysal.examples.get_path("usjoin.csv"))
>>> years = list(range(1929, 2010))
>>> pci = np.array([f.by_col[str(y)] for y in years]).transpose()
>>> rpci = pci/(pci.mean(axis=0))
>>> w = libpysal.io.open(libpysal.examples.get_path("states48.gal")).read()
>>> w.transform='r'
>>> sm = Spatial_Markov(rpci, w, fixed=True)
>>> T1 = sm.T[0]
>>> T1
array([[562., 22., 1., 0.],
[ 12., 201., 22., 0.],
[ 0., 17., 97., 4.],
[ 0., 0., 3., 19.]])
>>> T2 = sm.transitions
>>> T2
array([[884., 77., 4., 0.],
[ 68., 794., 87., 3.],
[ 1., 92., 815., 51.],
[ 1., 0., 60., 903.]])
>>> chi2(T1,T2)
(23.39728441473295, 0.005363116704861337, 9)
Notes
-----
Second matrix is used to form the probabilities under the null.
Marginal sums from first matrix are distributed across these probabilities
under the null. In other words the observed transitions are taken from T1
while the expected transitions are formed as follows
.. math::
E_{i,j} = \sum_j T1_{i,j} * T2_{i,j}/\sum_j T2_{i,j}
Degrees of freedom corrected for any rows in either T1 or T2 that have
zero total transitions.
"""
rs2 = T2.sum(axis=1)
rs1 = T1.sum(axis=1)
rs2nz = rs2 > 0
rs1nz = rs1 > 0
dof1 = sum(rs1nz)
dof2 = sum(rs2nz)
rs2 = rs2 + (rs2 == 0)
dof = (dof1 - 1) * (dof2 - 1)
p = np.diag(1 / rs2) * np.matrix(T2)
E = np.diag(rs1) * np.matrix(p)
num = T1 - E
num = np.multiply(num, num)
E = E + (E == 0)
chi2 = num / E
chi2 = chi2.sum()
pvalue = 1 - stats.chi2.cdf(chi2, dof)
return chi2, pvalue, dof | [
"def",
"chi2",
"(",
"T1",
",",
"T2",
")",
":",
"rs2",
"=",
"T2",
".",
"sum",
"(",
"axis",
"=",
"1",
")",
"rs1",
"=",
"T1",
".",
"sum",
"(",
"axis",
"=",
"1",
")",
"rs2nz",
"=",
"rs2",
">",
"0",
"rs1nz",
"=",
"rs1",
">",
"0",
"dof1",
"=",
... | chi-squared test of difference between two transition matrices.
Parameters
----------
T1 : array
(k, k), matrix of transitions (counts).
T2 : array
(k, k), matrix of transitions (counts) to use to form the
probabilities under the null.
Returns
-------
: tuple
(3 elements).
(chi2 value, pvalue, degrees of freedom).
Examples
--------
>>> import libpysal
>>> from giddy.markov import Spatial_Markov, chi2
>>> f = libpysal.io.open(libpysal.examples.get_path("usjoin.csv"))
>>> years = list(range(1929, 2010))
>>> pci = np.array([f.by_col[str(y)] for y in years]).transpose()
>>> rpci = pci/(pci.mean(axis=0))
>>> w = libpysal.io.open(libpysal.examples.get_path("states48.gal")).read()
>>> w.transform='r'
>>> sm = Spatial_Markov(rpci, w, fixed=True)
>>> T1 = sm.T[0]
>>> T1
array([[562., 22., 1., 0.],
[ 12., 201., 22., 0.],
[ 0., 17., 97., 4.],
[ 0., 0., 3., 19.]])
>>> T2 = sm.transitions
>>> T2
array([[884., 77., 4., 0.],
[ 68., 794., 87., 3.],
[ 1., 92., 815., 51.],
[ 1., 0., 60., 903.]])
>>> chi2(T1,T2)
(23.39728441473295, 0.005363116704861337, 9)
Notes
-----
Second matrix is used to form the probabilities under the null.
Marginal sums from first matrix are distributed across these probabilities
under the null. In other words the observed transitions are taken from T1
while the expected transitions are formed as follows
.. math::
E_{i,j} = \sum_j T1_{i,j} * T2_{i,j}/\sum_j T2_{i,j}
Degrees of freedom corrected for any rows in either T1 or T2 that have
zero total transitions. | [
"chi",
"-",
"squared",
"test",
"of",
"difference",
"between",
"two",
"transition",
"matrices",
"."
] | 13fae6c18933614be78e91a6b5060693bea33a04 | https://github.com/pysal/giddy/blob/13fae6c18933614be78e91a6b5060693bea33a04/giddy/markov.py#L858-L932 | train |
pysal/giddy | giddy/markov.py | kullback | def kullback(F):
"""
Kullback information based test of Markov Homogeneity.
Parameters
----------
F : array
(s, r, r), values are transitions (not probabilities) for
s strata, r initial states, r terminal states.
Returns
-------
Results : dictionary
(key - value)
Conditional homogeneity - (float) test statistic for homogeneity
of transition probabilities across strata.
Conditional homogeneity pvalue - (float) p-value for test
statistic.
Conditional homogeneity dof - (int) degrees of freedom =
r(s-1)(r-1).
Notes
-----
Based on :cite:`Kullback1962`.
Example below is taken from Table 9.2 .
Examples
--------
>>> import numpy as np
>>> from giddy.markov import kullback
>>> s1 = np.array([
... [ 22, 11, 24, 2, 2, 7],
... [ 5, 23, 15, 3, 42, 6],
... [ 4, 21, 190, 25, 20, 34],
... [0, 2, 14, 56, 14, 28],
... [32, 15, 20, 10, 56, 14],
... [5, 22, 31, 18, 13, 134]
... ])
>>> s2 = np.array([
... [3, 6, 9, 3, 0, 8],
... [1, 9, 3, 12, 27, 5],
... [2, 9, 208, 32, 5, 18],
... [0, 14, 32, 108, 40, 40],
... [22, 14, 9, 26, 224, 14],
... [1, 5, 13, 53, 13, 116]
... ])
>>>
>>> F = np.array([s1, s2])
>>> res = kullback(F)
>>> "%8.3f"%res['Conditional homogeneity']
' 160.961'
>>> "%d"%res['Conditional homogeneity dof']
'30'
>>> "%3.1f"%res['Conditional homogeneity pvalue']
'0.0'
"""
F1 = F == 0
F1 = F + F1
FLF = F * np.log(F1)
T1 = 2 * FLF.sum()
FdJK = F.sum(axis=0)
FdJK1 = FdJK + (FdJK == 0)
FdJKLFdJK = FdJK * np.log(FdJK1)
T2 = 2 * FdJKLFdJK.sum()
FdJd = F.sum(axis=0).sum(axis=1)
FdJd1 = FdJd + (FdJd == 0)
T3 = 2 * (FdJd * np.log(FdJd1)).sum()
FIJd = F[:, :].sum(axis=1)
FIJd1 = FIJd + (FIJd == 0)
T4 = 2 * (FIJd * np.log(FIJd1)).sum()
T6 = F.sum()
T6 = 2 * T6 * np.log(T6)
s, r, r1 = F.shape
chom = T1 - T4 - T2 + T3
cdof = r * (s - 1) * (r - 1)
results = {}
results['Conditional homogeneity'] = chom
results['Conditional homogeneity dof'] = cdof
results['Conditional homogeneity pvalue'] = 1 - stats.chi2.cdf(chom, cdof)
return results | python | def kullback(F):
"""
Kullback information based test of Markov Homogeneity.
Parameters
----------
F : array
(s, r, r), values are transitions (not probabilities) for
s strata, r initial states, r terminal states.
Returns
-------
Results : dictionary
(key - value)
Conditional homogeneity - (float) test statistic for homogeneity
of transition probabilities across strata.
Conditional homogeneity pvalue - (float) p-value for test
statistic.
Conditional homogeneity dof - (int) degrees of freedom =
r(s-1)(r-1).
Notes
-----
Based on :cite:`Kullback1962`.
Example below is taken from Table 9.2 .
Examples
--------
>>> import numpy as np
>>> from giddy.markov import kullback
>>> s1 = np.array([
... [ 22, 11, 24, 2, 2, 7],
... [ 5, 23, 15, 3, 42, 6],
... [ 4, 21, 190, 25, 20, 34],
... [0, 2, 14, 56, 14, 28],
... [32, 15, 20, 10, 56, 14],
... [5, 22, 31, 18, 13, 134]
... ])
>>> s2 = np.array([
... [3, 6, 9, 3, 0, 8],
... [1, 9, 3, 12, 27, 5],
... [2, 9, 208, 32, 5, 18],
... [0, 14, 32, 108, 40, 40],
... [22, 14, 9, 26, 224, 14],
... [1, 5, 13, 53, 13, 116]
... ])
>>>
>>> F = np.array([s1, s2])
>>> res = kullback(F)
>>> "%8.3f"%res['Conditional homogeneity']
' 160.961'
>>> "%d"%res['Conditional homogeneity dof']
'30'
>>> "%3.1f"%res['Conditional homogeneity pvalue']
'0.0'
"""
F1 = F == 0
F1 = F + F1
FLF = F * np.log(F1)
T1 = 2 * FLF.sum()
FdJK = F.sum(axis=0)
FdJK1 = FdJK + (FdJK == 0)
FdJKLFdJK = FdJK * np.log(FdJK1)
T2 = 2 * FdJKLFdJK.sum()
FdJd = F.sum(axis=0).sum(axis=1)
FdJd1 = FdJd + (FdJd == 0)
T3 = 2 * (FdJd * np.log(FdJd1)).sum()
FIJd = F[:, :].sum(axis=1)
FIJd1 = FIJd + (FIJd == 0)
T4 = 2 * (FIJd * np.log(FIJd1)).sum()
T6 = F.sum()
T6 = 2 * T6 * np.log(T6)
s, r, r1 = F.shape
chom = T1 - T4 - T2 + T3
cdof = r * (s - 1) * (r - 1)
results = {}
results['Conditional homogeneity'] = chom
results['Conditional homogeneity dof'] = cdof
results['Conditional homogeneity pvalue'] = 1 - stats.chi2.cdf(chom, cdof)
return results | [
"def",
"kullback",
"(",
"F",
")",
":",
"F1",
"=",
"F",
"==",
"0",
"F1",
"=",
"F",
"+",
"F1",
"FLF",
"=",
"F",
"*",
"np",
".",
"log",
"(",
"F1",
")",
"T1",
"=",
"2",
"*",
"FLF",
".",
"sum",
"(",
")",
"FdJK",
"=",
"F",
".",
"sum",
"(",
... | Kullback information based test of Markov Homogeneity.
Parameters
----------
F : array
(s, r, r), values are transitions (not probabilities) for
s strata, r initial states, r terminal states.
Returns
-------
Results : dictionary
(key - value)
Conditional homogeneity - (float) test statistic for homogeneity
of transition probabilities across strata.
Conditional homogeneity pvalue - (float) p-value for test
statistic.
Conditional homogeneity dof - (int) degrees of freedom =
r(s-1)(r-1).
Notes
-----
Based on :cite:`Kullback1962`.
Example below is taken from Table 9.2 .
Examples
--------
>>> import numpy as np
>>> from giddy.markov import kullback
>>> s1 = np.array([
... [ 22, 11, 24, 2, 2, 7],
... [ 5, 23, 15, 3, 42, 6],
... [ 4, 21, 190, 25, 20, 34],
... [0, 2, 14, 56, 14, 28],
... [32, 15, 20, 10, 56, 14],
... [5, 22, 31, 18, 13, 134]
... ])
>>> s2 = np.array([
... [3, 6, 9, 3, 0, 8],
... [1, 9, 3, 12, 27, 5],
... [2, 9, 208, 32, 5, 18],
... [0, 14, 32, 108, 40, 40],
... [22, 14, 9, 26, 224, 14],
... [1, 5, 13, 53, 13, 116]
... ])
>>>
>>> F = np.array([s1, s2])
>>> res = kullback(F)
>>> "%8.3f"%res['Conditional homogeneity']
' 160.961'
>>> "%d"%res['Conditional homogeneity dof']
'30'
>>> "%3.1f"%res['Conditional homogeneity pvalue']
'0.0' | [
"Kullback",
"information",
"based",
"test",
"of",
"Markov",
"Homogeneity",
"."
] | 13fae6c18933614be78e91a6b5060693bea33a04 | https://github.com/pysal/giddy/blob/13fae6c18933614be78e91a6b5060693bea33a04/giddy/markov.py#L1336-L1425 | train |
pysal/giddy | giddy/markov.py | prais | def prais(pmat):
"""
Prais conditional mobility measure.
Parameters
----------
pmat : matrix
(k, k), Markov probability transition matrix.
Returns
-------
pr : matrix
(1, k), conditional mobility measures for each of the k classes.
Notes
-----
Prais' conditional mobility measure for a class is defined as:
.. math::
pr_i = 1 - p_{i,i}
Examples
--------
>>> import numpy as np
>>> import libpysal
>>> from giddy.markov import Markov,prais
>>> f = libpysal.io.open(libpysal.examples.get_path("usjoin.csv"))
>>> pci = np.array([f.by_col[str(y)] for y in range(1929,2010)])
>>> q5 = np.array([mc.Quantiles(y).yb for y in pci]).transpose()
>>> m = Markov(q5)
>>> m.transitions
array([[729., 71., 1., 0., 0.],
[ 72., 567., 80., 3., 0.],
[ 0., 81., 631., 86., 2.],
[ 0., 3., 86., 573., 56.],
[ 0., 0., 1., 57., 741.]])
>>> m.p
array([[0.91011236, 0.0886392 , 0.00124844, 0. , 0. ],
[0.09972299, 0.78531856, 0.11080332, 0.00415512, 0. ],
[0. , 0.10125 , 0.78875 , 0.1075 , 0.0025 ],
[0. , 0.00417827, 0.11977716, 0.79805014, 0.07799443],
[0. , 0. , 0.00125156, 0.07133917, 0.92740926]])
>>> prais(m.p)
array([0.08988764, 0.21468144, 0.21125 , 0.20194986, 0.07259074])
"""
pmat = np.array(pmat)
pr = 1 - np.diag(pmat)
return pr | python | def prais(pmat):
"""
Prais conditional mobility measure.
Parameters
----------
pmat : matrix
(k, k), Markov probability transition matrix.
Returns
-------
pr : matrix
(1, k), conditional mobility measures for each of the k classes.
Notes
-----
Prais' conditional mobility measure for a class is defined as:
.. math::
pr_i = 1 - p_{i,i}
Examples
--------
>>> import numpy as np
>>> import libpysal
>>> from giddy.markov import Markov,prais
>>> f = libpysal.io.open(libpysal.examples.get_path("usjoin.csv"))
>>> pci = np.array([f.by_col[str(y)] for y in range(1929,2010)])
>>> q5 = np.array([mc.Quantiles(y).yb for y in pci]).transpose()
>>> m = Markov(q5)
>>> m.transitions
array([[729., 71., 1., 0., 0.],
[ 72., 567., 80., 3., 0.],
[ 0., 81., 631., 86., 2.],
[ 0., 3., 86., 573., 56.],
[ 0., 0., 1., 57., 741.]])
>>> m.p
array([[0.91011236, 0.0886392 , 0.00124844, 0. , 0. ],
[0.09972299, 0.78531856, 0.11080332, 0.00415512, 0. ],
[0. , 0.10125 , 0.78875 , 0.1075 , 0.0025 ],
[0. , 0.00417827, 0.11977716, 0.79805014, 0.07799443],
[0. , 0. , 0.00125156, 0.07133917, 0.92740926]])
>>> prais(m.p)
array([0.08988764, 0.21468144, 0.21125 , 0.20194986, 0.07259074])
"""
pmat = np.array(pmat)
pr = 1 - np.diag(pmat)
return pr | [
"def",
"prais",
"(",
"pmat",
")",
":",
"pmat",
"=",
"np",
".",
"array",
"(",
"pmat",
")",
"pr",
"=",
"1",
"-",
"np",
".",
"diag",
"(",
"pmat",
")",
"return",
"pr"
] | Prais conditional mobility measure.
Parameters
----------
pmat : matrix
(k, k), Markov probability transition matrix.
Returns
-------
pr : matrix
(1, k), conditional mobility measures for each of the k classes.
Notes
-----
Prais' conditional mobility measure for a class is defined as:
.. math::
pr_i = 1 - p_{i,i}
Examples
--------
>>> import numpy as np
>>> import libpysal
>>> from giddy.markov import Markov,prais
>>> f = libpysal.io.open(libpysal.examples.get_path("usjoin.csv"))
>>> pci = np.array([f.by_col[str(y)] for y in range(1929,2010)])
>>> q5 = np.array([mc.Quantiles(y).yb for y in pci]).transpose()
>>> m = Markov(q5)
>>> m.transitions
array([[729., 71., 1., 0., 0.],
[ 72., 567., 80., 3., 0.],
[ 0., 81., 631., 86., 2.],
[ 0., 3., 86., 573., 56.],
[ 0., 0., 1., 57., 741.]])
>>> m.p
array([[0.91011236, 0.0886392 , 0.00124844, 0. , 0. ],
[0.09972299, 0.78531856, 0.11080332, 0.00415512, 0. ],
[0. , 0.10125 , 0.78875 , 0.1075 , 0.0025 ],
[0. , 0.00417827, 0.11977716, 0.79805014, 0.07799443],
[0. , 0. , 0.00125156, 0.07133917, 0.92740926]])
>>> prais(m.p)
array([0.08988764, 0.21468144, 0.21125 , 0.20194986, 0.07259074]) | [
"Prais",
"conditional",
"mobility",
"measure",
"."
] | 13fae6c18933614be78e91a6b5060693bea33a04 | https://github.com/pysal/giddy/blob/13fae6c18933614be78e91a6b5060693bea33a04/giddy/markov.py#L1428-L1477 | train |
pysal/giddy | giddy/markov.py | homogeneity | def homogeneity(transition_matrices, regime_names=[], class_names=[],
title="Markov Homogeneity Test"):
"""
Test for homogeneity of Markov transition probabilities across regimes.
Parameters
----------
transition_matrices : list
of transition matrices for regimes, all matrices must
have same size (r, c). r is the number of rows in the
transition matrix and c is the number of columns in
the transition matrix.
regime_names : sequence
Labels for the regimes.
class_names : sequence
Labels for the classes/states of the Markov chain.
title : string
name of test.
Returns
-------
: implicit
an instance of Homogeneity_Results.
"""
return Homogeneity_Results(transition_matrices, regime_names=regime_names,
class_names=class_names, title=title) | python | def homogeneity(transition_matrices, regime_names=[], class_names=[],
title="Markov Homogeneity Test"):
"""
Test for homogeneity of Markov transition probabilities across regimes.
Parameters
----------
transition_matrices : list
of transition matrices for regimes, all matrices must
have same size (r, c). r is the number of rows in the
transition matrix and c is the number of columns in
the transition matrix.
regime_names : sequence
Labels for the regimes.
class_names : sequence
Labels for the classes/states of the Markov chain.
title : string
name of test.
Returns
-------
: implicit
an instance of Homogeneity_Results.
"""
return Homogeneity_Results(transition_matrices, regime_names=regime_names,
class_names=class_names, title=title) | [
"def",
"homogeneity",
"(",
"transition_matrices",
",",
"regime_names",
"=",
"[",
"]",
",",
"class_names",
"=",
"[",
"]",
",",
"title",
"=",
"\"Markov Homogeneity Test\"",
")",
":",
"return",
"Homogeneity_Results",
"(",
"transition_matrices",
",",
"regime_names",
"... | Test for homogeneity of Markov transition probabilities across regimes.
Parameters
----------
transition_matrices : list
of transition matrices for regimes, all matrices must
have same size (r, c). r is the number of rows in the
transition matrix and c is the number of columns in
the transition matrix.
regime_names : sequence
Labels for the regimes.
class_names : sequence
Labels for the classes/states of the Markov chain.
title : string
name of test.
Returns
-------
: implicit
an instance of Homogeneity_Results. | [
"Test",
"for",
"homogeneity",
"of",
"Markov",
"transition",
"probabilities",
"across",
"regimes",
"."
] | 13fae6c18933614be78e91a6b5060693bea33a04 | https://github.com/pysal/giddy/blob/13fae6c18933614be78e91a6b5060693bea33a04/giddy/markov.py#L1480-L1506 | train |
pysal/giddy | giddy/markov.py | sojourn_time | def sojourn_time(p):
"""
Calculate sojourn time based on a given transition probability matrix.
Parameters
----------
p : array
(k, k), a Markov transition probability matrix.
Returns
-------
: array
(k, ), sojourn times. Each element is the expected time a Markov
chain spends in each states before leaving that state.
Notes
-----
Refer to :cite:`Ibe2009` for more details on sojourn times for Markov
chains.
Examples
--------
>>> from giddy.markov import sojourn_time
>>> import numpy as np
>>> p = np.array([[.5, .25, .25], [.5, 0, .5], [.25, .25, .5]])
>>> sojourn_time(p)
array([2., 1., 2.])
"""
p = np.asarray(p)
pii = p.diagonal()
if not (1 - pii).all():
print("Sojourn times are infinite for absorbing states!")
return 1 / (1 - pii) | python | def sojourn_time(p):
"""
Calculate sojourn time based on a given transition probability matrix.
Parameters
----------
p : array
(k, k), a Markov transition probability matrix.
Returns
-------
: array
(k, ), sojourn times. Each element is the expected time a Markov
chain spends in each states before leaving that state.
Notes
-----
Refer to :cite:`Ibe2009` for more details on sojourn times for Markov
chains.
Examples
--------
>>> from giddy.markov import sojourn_time
>>> import numpy as np
>>> p = np.array([[.5, .25, .25], [.5, 0, .5], [.25, .25, .5]])
>>> sojourn_time(p)
array([2., 1., 2.])
"""
p = np.asarray(p)
pii = p.diagonal()
if not (1 - pii).all():
print("Sojourn times are infinite for absorbing states!")
return 1 / (1 - pii) | [
"def",
"sojourn_time",
"(",
"p",
")",
":",
"p",
"=",
"np",
".",
"asarray",
"(",
"p",
")",
"pii",
"=",
"p",
".",
"diagonal",
"(",
")",
"if",
"not",
"(",
"1",
"-",
"pii",
")",
".",
"all",
"(",
")",
":",
"print",
"(",
"\"Sojourn times are infinite f... | Calculate sojourn time based on a given transition probability matrix.
Parameters
----------
p : array
(k, k), a Markov transition probability matrix.
Returns
-------
: array
(k, ), sojourn times. Each element is the expected time a Markov
chain spends in each states before leaving that state.
Notes
-----
Refer to :cite:`Ibe2009` for more details on sojourn times for Markov
chains.
Examples
--------
>>> from giddy.markov import sojourn_time
>>> import numpy as np
>>> p = np.array([[.5, .25, .25], [.5, 0, .5], [.25, .25, .5]])
>>> sojourn_time(p)
array([2., 1., 2.]) | [
"Calculate",
"sojourn",
"time",
"based",
"on",
"a",
"given",
"transition",
"probability",
"matrix",
"."
] | 13fae6c18933614be78e91a6b5060693bea33a04 | https://github.com/pysal/giddy/blob/13fae6c18933614be78e91a6b5060693bea33a04/giddy/markov.py#L1830-L1864 | train |
pysal/giddy | giddy/markov.py | Spatial_Markov._calc | def _calc(self, y, w):
'''Helper to estimate spatial lag conditioned Markov transition
probability matrices based on maximum likelihood techniques.
'''
if self.discrete:
self.lclass_ids = weights.lag_categorical(w, self.class_ids,
ties="tryself")
else:
ly = weights.lag_spatial(w, y)
self.lclass_ids, self.lag_cutoffs, self.m = self._maybe_classify(
ly, self.m, self.lag_cutoffs)
self.lclasses = np.arange(self.m)
T = np.zeros((self.m, self.k, self.k))
n, t = y.shape
for t1 in range(t - 1):
t2 = t1 + 1
for i in range(n):
T[self.lclass_ids[i, t1], self.class_ids[i, t1],
self.class_ids[i, t2]] += 1
P = np.zeros_like(T)
for i, mat in enumerate(T):
row_sum = mat.sum(axis=1)
row_sum = row_sum + (row_sum == 0)
p_i = np.matrix(np.diag(1. / row_sum) * np.matrix(mat))
P[i] = p_i
return T, P | python | def _calc(self, y, w):
'''Helper to estimate spatial lag conditioned Markov transition
probability matrices based on maximum likelihood techniques.
'''
if self.discrete:
self.lclass_ids = weights.lag_categorical(w, self.class_ids,
ties="tryself")
else:
ly = weights.lag_spatial(w, y)
self.lclass_ids, self.lag_cutoffs, self.m = self._maybe_classify(
ly, self.m, self.lag_cutoffs)
self.lclasses = np.arange(self.m)
T = np.zeros((self.m, self.k, self.k))
n, t = y.shape
for t1 in range(t - 1):
t2 = t1 + 1
for i in range(n):
T[self.lclass_ids[i, t1], self.class_ids[i, t1],
self.class_ids[i, t2]] += 1
P = np.zeros_like(T)
for i, mat in enumerate(T):
row_sum = mat.sum(axis=1)
row_sum = row_sum + (row_sum == 0)
p_i = np.matrix(np.diag(1. / row_sum) * np.matrix(mat))
P[i] = p_i
return T, P | [
"def",
"_calc",
"(",
"self",
",",
"y",
",",
"w",
")",
":",
"if",
"self",
".",
"discrete",
":",
"self",
".",
"lclass_ids",
"=",
"weights",
".",
"lag_categorical",
"(",
"w",
",",
"self",
".",
"class_ids",
",",
"ties",
"=",
"\"tryself\"",
")",
"else",
... | Helper to estimate spatial lag conditioned Markov transition
probability matrices based on maximum likelihood techniques. | [
"Helper",
"to",
"estimate",
"spatial",
"lag",
"conditioned",
"Markov",
"transition",
"probability",
"matrices",
"based",
"on",
"maximum",
"likelihood",
"techniques",
"."
] | 13fae6c18933614be78e91a6b5060693bea33a04 | https://github.com/pysal/giddy/blob/13fae6c18933614be78e91a6b5060693bea33a04/giddy/markov.py#L731-L759 | train |
pysal/giddy | giddy/markov.py | Spatial_Markov.summary | def summary(self, file_name=None):
"""
A summary method to call the Markov homogeneity test to test for
temporally lagged spatial dependence.
To learn more about the properties of the tests, refer to
:cite:`Rey2016a` and :cite:`Kang2018`.
"""
class_names = ["C%d" % i for i in range(self.k)]
regime_names = ["LAG%d" % i for i in range(self.k)]
ht = homogeneity(self.T, class_names=class_names,
regime_names=regime_names)
title = "Spatial Markov Test"
if self.variable_name:
title = title + ": " + self.variable_name
if file_name:
ht.summary(file_name=file_name, title=title)
else:
ht.summary(title=title) | python | def summary(self, file_name=None):
"""
A summary method to call the Markov homogeneity test to test for
temporally lagged spatial dependence.
To learn more about the properties of the tests, refer to
:cite:`Rey2016a` and :cite:`Kang2018`.
"""
class_names = ["C%d" % i for i in range(self.k)]
regime_names = ["LAG%d" % i for i in range(self.k)]
ht = homogeneity(self.T, class_names=class_names,
regime_names=regime_names)
title = "Spatial Markov Test"
if self.variable_name:
title = title + ": " + self.variable_name
if file_name:
ht.summary(file_name=file_name, title=title)
else:
ht.summary(title=title) | [
"def",
"summary",
"(",
"self",
",",
"file_name",
"=",
"None",
")",
":",
"class_names",
"=",
"[",
"\"C%d\"",
"%",
"i",
"for",
"i",
"in",
"range",
"(",
"self",
".",
"k",
")",
"]",
"regime_names",
"=",
"[",
"\"LAG%d\"",
"%",
"i",
"for",
"i",
"in",
"... | A summary method to call the Markov homogeneity test to test for
temporally lagged spatial dependence.
To learn more about the properties of the tests, refer to
:cite:`Rey2016a` and :cite:`Kang2018`. | [
"A",
"summary",
"method",
"to",
"call",
"the",
"Markov",
"homogeneity",
"test",
"to",
"test",
"for",
"temporally",
"lagged",
"spatial",
"dependence",
"."
] | 13fae6c18933614be78e91a6b5060693bea33a04 | https://github.com/pysal/giddy/blob/13fae6c18933614be78e91a6b5060693bea33a04/giddy/markov.py#L811-L830 | train |
pysal/giddy | giddy/markov.py | Spatial_Markov._maybe_classify | def _maybe_classify(self, y, k, cutoffs):
'''Helper method for classifying continuous data.
'''
rows, cols = y.shape
if cutoffs is None:
if self.fixed:
mcyb = mc.Quantiles(y.flatten(), k=k)
yb = mcyb.yb.reshape(y.shape)
cutoffs = mcyb.bins
k = len(cutoffs)
return yb, cutoffs[:-1], k
else:
yb = np.array([mc.Quantiles(y[:, i], k=k).yb for i in
np.arange(cols)]).transpose()
return yb, None, k
else:
cutoffs = list(cutoffs) + [np.inf]
cutoffs = np.array(cutoffs)
yb = mc.User_Defined(y.flatten(), np.array(cutoffs)).yb.reshape(
y.shape)
k = len(cutoffs)
return yb, cutoffs[:-1], k | python | def _maybe_classify(self, y, k, cutoffs):
'''Helper method for classifying continuous data.
'''
rows, cols = y.shape
if cutoffs is None:
if self.fixed:
mcyb = mc.Quantiles(y.flatten(), k=k)
yb = mcyb.yb.reshape(y.shape)
cutoffs = mcyb.bins
k = len(cutoffs)
return yb, cutoffs[:-1], k
else:
yb = np.array([mc.Quantiles(y[:, i], k=k).yb for i in
np.arange(cols)]).transpose()
return yb, None, k
else:
cutoffs = list(cutoffs) + [np.inf]
cutoffs = np.array(cutoffs)
yb = mc.User_Defined(y.flatten(), np.array(cutoffs)).yb.reshape(
y.shape)
k = len(cutoffs)
return yb, cutoffs[:-1], k | [
"def",
"_maybe_classify",
"(",
"self",
",",
"y",
",",
"k",
",",
"cutoffs",
")",
":",
"rows",
",",
"cols",
"=",
"y",
".",
"shape",
"if",
"cutoffs",
"is",
"None",
":",
"if",
"self",
".",
"fixed",
":",
"mcyb",
"=",
"mc",
".",
"Quantiles",
"(",
"y",
... | Helper method for classifying continuous data. | [
"Helper",
"method",
"for",
"classifying",
"continuous",
"data",
"."
] | 13fae6c18933614be78e91a6b5060693bea33a04 | https://github.com/pysal/giddy/blob/13fae6c18933614be78e91a6b5060693bea33a04/giddy/markov.py#L832-L855 | train |
pysal/giddy | giddy/markov.py | LISA_Markov.spillover | def spillover(self, quadrant=1, neighbors_on=False):
"""
Detect spillover locations for diffusion in LISA Markov.
Parameters
----------
quadrant : int
which quadrant in the scatterplot should form the core
of a cluster.
neighbors_on : binary
If false, then only the 1st order neighbors of a core
location are included in the cluster.
If true, neighbors of cluster core 1st order neighbors
are included in the cluster.
Returns
-------
results : dictionary
two keys - values pairs:
'components' - array (n, t)
values are integer ids (starting at 1) indicating which
component/cluster observation i in period t belonged to.
'spillover' - array (n, t-1)
binary values indicating if the location was a
spill-over location that became a new member of a
previously existing cluster.
Examples
--------
>>> import libpysal
>>> from giddy.markov import LISA_Markov
>>> f = libpysal.io.open(libpysal.examples.get_path("usjoin.csv"))
>>> years = list(range(1929, 2010))
>>> pci = np.array([f.by_col[str(y)] for y in years]).transpose()
>>> w = libpysal.io.open(libpysal.examples.get_path("states48.gal")).read()
>>> np.random.seed(10)
>>> lm_random = LISA_Markov(pci, w, permutations=99)
>>> r = lm_random.spillover()
>>> (r['components'][:, 12] > 0).sum()
17
>>> (r['components'][:, 13]>0).sum()
23
>>> (r['spill_over'][:,12]>0).sum()
6
Including neighbors of core neighbors
>>> rn = lm_random.spillover(neighbors_on=True)
>>> (rn['components'][:, 12] > 0).sum()
26
>>> (rn["components"][:, 13] > 0).sum()
34
>>> (rn["spill_over"][:, 12] > 0).sum()
8
"""
n, k = self.q.shape
if self.permutations:
spill_over = np.zeros((n, k - 1))
components = np.zeros((n, k))
i2id = {} # handle string keys
for key in list(self.w.neighbors.keys()):
idx = self.w.id2i[key]
i2id[idx] = key
sig_lisas = (self.q == quadrant) \
* (self.p_values <= self.significance_level)
sig_ids = [np.nonzero(
sig_lisas[:, i])[0].tolist() for i in range(k)]
neighbors = self.w.neighbors
for t in range(k - 1):
s1 = sig_ids[t]
s2 = sig_ids[t + 1]
g1 = Graph(undirected=True)
for i in s1:
for neighbor in neighbors[i2id[i]]:
g1.add_edge(i2id[i], neighbor, 1.0)
if neighbors_on:
for nn in neighbors[neighbor]:
g1.add_edge(neighbor, nn, 1.0)
components1 = g1.connected_components(op=gt)
components1 = [list(c.nodes) for c in components1]
g2 = Graph(undirected=True)
for i in s2:
for neighbor in neighbors[i2id[i]]:
g2.add_edge(i2id[i], neighbor, 1.0)
if neighbors_on:
for nn in neighbors[neighbor]:
g2.add_edge(neighbor, nn, 1.0)
components2 = g2.connected_components(op=gt)
components2 = [list(c.nodes) for c in components2]
c2 = []
c1 = []
for c in components2:
c2.extend(c)
for c in components1:
c1.extend(c)
new_ids = [j for j in c2 if j not in c1]
spill_ids = []
for j in new_ids:
# find j's component in period 2
cj = [c for c in components2 if j in c][0]
# for members of j's component in period 2, check if they
# belonged to any components in period 1
for i in cj:
if i in c1:
spill_ids.append(j)
break
for spill_id in spill_ids:
id = self.w.id2i[spill_id]
spill_over[id, t] = 1
for c, component in enumerate(components1):
for i in component:
ii = self.w.id2i[i]
components[ii, t] = c + 1
results = {}
results['components'] = components
results['spill_over'] = spill_over
return results
else:
return None | python | def spillover(self, quadrant=1, neighbors_on=False):
"""
Detect spillover locations for diffusion in LISA Markov.
Parameters
----------
quadrant : int
which quadrant in the scatterplot should form the core
of a cluster.
neighbors_on : binary
If false, then only the 1st order neighbors of a core
location are included in the cluster.
If true, neighbors of cluster core 1st order neighbors
are included in the cluster.
Returns
-------
results : dictionary
two keys - values pairs:
'components' - array (n, t)
values are integer ids (starting at 1) indicating which
component/cluster observation i in period t belonged to.
'spillover' - array (n, t-1)
binary values indicating if the location was a
spill-over location that became a new member of a
previously existing cluster.
Examples
--------
>>> import libpysal
>>> from giddy.markov import LISA_Markov
>>> f = libpysal.io.open(libpysal.examples.get_path("usjoin.csv"))
>>> years = list(range(1929, 2010))
>>> pci = np.array([f.by_col[str(y)] for y in years]).transpose()
>>> w = libpysal.io.open(libpysal.examples.get_path("states48.gal")).read()
>>> np.random.seed(10)
>>> lm_random = LISA_Markov(pci, w, permutations=99)
>>> r = lm_random.spillover()
>>> (r['components'][:, 12] > 0).sum()
17
>>> (r['components'][:, 13]>0).sum()
23
>>> (r['spill_over'][:,12]>0).sum()
6
Including neighbors of core neighbors
>>> rn = lm_random.spillover(neighbors_on=True)
>>> (rn['components'][:, 12] > 0).sum()
26
>>> (rn["components"][:, 13] > 0).sum()
34
>>> (rn["spill_over"][:, 12] > 0).sum()
8
"""
n, k = self.q.shape
if self.permutations:
spill_over = np.zeros((n, k - 1))
components = np.zeros((n, k))
i2id = {} # handle string keys
for key in list(self.w.neighbors.keys()):
idx = self.w.id2i[key]
i2id[idx] = key
sig_lisas = (self.q == quadrant) \
* (self.p_values <= self.significance_level)
sig_ids = [np.nonzero(
sig_lisas[:, i])[0].tolist() for i in range(k)]
neighbors = self.w.neighbors
for t in range(k - 1):
s1 = sig_ids[t]
s2 = sig_ids[t + 1]
g1 = Graph(undirected=True)
for i in s1:
for neighbor in neighbors[i2id[i]]:
g1.add_edge(i2id[i], neighbor, 1.0)
if neighbors_on:
for nn in neighbors[neighbor]:
g1.add_edge(neighbor, nn, 1.0)
components1 = g1.connected_components(op=gt)
components1 = [list(c.nodes) for c in components1]
g2 = Graph(undirected=True)
for i in s2:
for neighbor in neighbors[i2id[i]]:
g2.add_edge(i2id[i], neighbor, 1.0)
if neighbors_on:
for nn in neighbors[neighbor]:
g2.add_edge(neighbor, nn, 1.0)
components2 = g2.connected_components(op=gt)
components2 = [list(c.nodes) for c in components2]
c2 = []
c1 = []
for c in components2:
c2.extend(c)
for c in components1:
c1.extend(c)
new_ids = [j for j in c2 if j not in c1]
spill_ids = []
for j in new_ids:
# find j's component in period 2
cj = [c for c in components2 if j in c][0]
# for members of j's component in period 2, check if they
# belonged to any components in period 1
for i in cj:
if i in c1:
spill_ids.append(j)
break
for spill_id in spill_ids:
id = self.w.id2i[spill_id]
spill_over[id, t] = 1
for c, component in enumerate(components1):
for i in component:
ii = self.w.id2i[i]
components[ii, t] = c + 1
results = {}
results['components'] = components
results['spill_over'] = spill_over
return results
else:
return None | [
"def",
"spillover",
"(",
"self",
",",
"quadrant",
"=",
"1",
",",
"neighbors_on",
"=",
"False",
")",
":",
"n",
",",
"k",
"=",
"self",
".",
"q",
".",
"shape",
"if",
"self",
".",
"permutations",
":",
"spill_over",
"=",
"np",
".",
"zeros",
"(",
"(",
... | Detect spillover locations for diffusion in LISA Markov.
Parameters
----------
quadrant : int
which quadrant in the scatterplot should form the core
of a cluster.
neighbors_on : binary
If false, then only the 1st order neighbors of a core
location are included in the cluster.
If true, neighbors of cluster core 1st order neighbors
are included in the cluster.
Returns
-------
results : dictionary
two keys - values pairs:
'components' - array (n, t)
values are integer ids (starting at 1) indicating which
component/cluster observation i in period t belonged to.
'spillover' - array (n, t-1)
binary values indicating if the location was a
spill-over location that became a new member of a
previously existing cluster.
Examples
--------
>>> import libpysal
>>> from giddy.markov import LISA_Markov
>>> f = libpysal.io.open(libpysal.examples.get_path("usjoin.csv"))
>>> years = list(range(1929, 2010))
>>> pci = np.array([f.by_col[str(y)] for y in years]).transpose()
>>> w = libpysal.io.open(libpysal.examples.get_path("states48.gal")).read()
>>> np.random.seed(10)
>>> lm_random = LISA_Markov(pci, w, permutations=99)
>>> r = lm_random.spillover()
>>> (r['components'][:, 12] > 0).sum()
17
>>> (r['components'][:, 13]>0).sum()
23
>>> (r['spill_over'][:,12]>0).sum()
6
Including neighbors of core neighbors
>>> rn = lm_random.spillover(neighbors_on=True)
>>> (rn['components'][:, 12] > 0).sum()
26
>>> (rn["components"][:, 13] > 0).sum()
34
>>> (rn["spill_over"][:, 12] > 0).sum()
8 | [
"Detect",
"spillover",
"locations",
"for",
"diffusion",
"in",
"LISA",
"Markov",
"."
] | 13fae6c18933614be78e91a6b5060693bea33a04 | https://github.com/pysal/giddy/blob/13fae6c18933614be78e91a6b5060693bea33a04/giddy/markov.py#L1212-L1333 | train |
kolypto/py-flask-jsontools | flask_jsontools/formatting.py | get_entity_propnames | def get_entity_propnames(entity):
""" Get entity property names
:param entity: Entity
:type entity: sqlalchemy.ext.declarative.api.DeclarativeMeta
:returns: Set of entity property names
:rtype: set
"""
ins = entity if isinstance(entity, InstanceState) else inspect(entity)
return set(
ins.mapper.column_attrs.keys() + # Columns
ins.mapper.relationships.keys() # Relationships
) | python | def get_entity_propnames(entity):
""" Get entity property names
:param entity: Entity
:type entity: sqlalchemy.ext.declarative.api.DeclarativeMeta
:returns: Set of entity property names
:rtype: set
"""
ins = entity if isinstance(entity, InstanceState) else inspect(entity)
return set(
ins.mapper.column_attrs.keys() + # Columns
ins.mapper.relationships.keys() # Relationships
) | [
"def",
"get_entity_propnames",
"(",
"entity",
")",
":",
"ins",
"=",
"entity",
"if",
"isinstance",
"(",
"entity",
",",
"InstanceState",
")",
"else",
"inspect",
"(",
"entity",
")",
"return",
"set",
"(",
"ins",
".",
"mapper",
".",
"column_attrs",
".",
"keys",... | Get entity property names
:param entity: Entity
:type entity: sqlalchemy.ext.declarative.api.DeclarativeMeta
:returns: Set of entity property names
:rtype: set | [
"Get",
"entity",
"property",
"names"
] | 1abee2d40e6db262e43f0c534e90faaa9b26246a | https://github.com/kolypto/py-flask-jsontools/blob/1abee2d40e6db262e43f0c534e90faaa9b26246a/flask_jsontools/formatting.py#L31-L43 | train |
kolypto/py-flask-jsontools | flask_jsontools/formatting.py | get_entity_loaded_propnames | def get_entity_loaded_propnames(entity):
""" Get entity property names that are loaded (e.g. won't produce new queries)
:param entity: Entity
:type entity: sqlalchemy.ext.declarative.api.DeclarativeMeta
:returns: Set of entity property names
:rtype: set
"""
ins = inspect(entity)
keynames = get_entity_propnames(ins)
# If the entity is not transient -- exclude unloaded keys
# Transient entities won't load these anyway, so it's safe to include all columns and get defaults
if not ins.transient:
keynames -= ins.unloaded
# If the entity is expired -- reload expired attributes as well
# Expired attributes are usually unloaded as well!
if ins.expired:
keynames |= ins.expired_attributes
# Finish
return keynames | python | def get_entity_loaded_propnames(entity):
""" Get entity property names that are loaded (e.g. won't produce new queries)
:param entity: Entity
:type entity: sqlalchemy.ext.declarative.api.DeclarativeMeta
:returns: Set of entity property names
:rtype: set
"""
ins = inspect(entity)
keynames = get_entity_propnames(ins)
# If the entity is not transient -- exclude unloaded keys
# Transient entities won't load these anyway, so it's safe to include all columns and get defaults
if not ins.transient:
keynames -= ins.unloaded
# If the entity is expired -- reload expired attributes as well
# Expired attributes are usually unloaded as well!
if ins.expired:
keynames |= ins.expired_attributes
# Finish
return keynames | [
"def",
"get_entity_loaded_propnames",
"(",
"entity",
")",
":",
"ins",
"=",
"inspect",
"(",
"entity",
")",
"keynames",
"=",
"get_entity_propnames",
"(",
"ins",
")",
"# If the entity is not transient -- exclude unloaded keys",
"# Transient entities won't load these anyway, so it'... | Get entity property names that are loaded (e.g. won't produce new queries)
:param entity: Entity
:type entity: sqlalchemy.ext.declarative.api.DeclarativeMeta
:returns: Set of entity property names
:rtype: set | [
"Get",
"entity",
"property",
"names",
"that",
"are",
"loaded",
"(",
"e",
".",
"g",
".",
"won",
"t",
"produce",
"new",
"queries",
")"
] | 1abee2d40e6db262e43f0c534e90faaa9b26246a | https://github.com/kolypto/py-flask-jsontools/blob/1abee2d40e6db262e43f0c534e90faaa9b26246a/flask_jsontools/formatting.py#L46-L68 | train |
pyinvoke/invocations | invocations/packaging/semantic_version_monkey.py | next_minor | def next_minor(self):
"""
Return a Version whose minor number is one greater than self's.
.. note::
The new Version will always have a zeroed-out bugfix/tertiary version
number, because the "next minor release" of e.g. 1.2.1 is 1.3.0, not
1.3.1.
"""
clone = self.clone()
clone.minor += 1
clone.patch = 0
return clone | python | def next_minor(self):
"""
Return a Version whose minor number is one greater than self's.
.. note::
The new Version will always have a zeroed-out bugfix/tertiary version
number, because the "next minor release" of e.g. 1.2.1 is 1.3.0, not
1.3.1.
"""
clone = self.clone()
clone.minor += 1
clone.patch = 0
return clone | [
"def",
"next_minor",
"(",
"self",
")",
":",
"clone",
"=",
"self",
".",
"clone",
"(",
")",
"clone",
".",
"minor",
"+=",
"1",
"clone",
".",
"patch",
"=",
"0",
"return",
"clone"
] | Return a Version whose minor number is one greater than self's.
.. note::
The new Version will always have a zeroed-out bugfix/tertiary version
number, because the "next minor release" of e.g. 1.2.1 is 1.3.0, not
1.3.1. | [
"Return",
"a",
"Version",
"whose",
"minor",
"number",
"is",
"one",
"greater",
"than",
"self",
"s",
"."
] | bbf1b319bd1536817d5301ceb9eeb2f31830e5dc | https://github.com/pyinvoke/invocations/blob/bbf1b319bd1536817d5301ceb9eeb2f31830e5dc/invocations/packaging/semantic_version_monkey.py#L26-L38 | train |
roskakori/pygount | pygount/command.py | _check_encoding | def _check_encoding(name, encoding_to_check, alternative_encoding, source=None):
"""
Check that ``encoding`` is a valid Python encoding
:param name: name under which the encoding is known to the user, e.g. 'default encoding'
:param encoding_to_check: name of the encoding to check, e.g. 'utf-8'
:param source: source where the encoding has been set, e.g. option name
:raise pygount.common.OptionError if ``encoding`` is not a valid Python encoding
"""
assert name is not None
if encoding_to_check not in (alternative_encoding, 'chardet', None):
try:
''.encode(encoding_to_check)
except LookupError:
raise pygount.common.OptionError(
'{0} is "{1}" but must be "{2}" or a known Python encoding'.format(
name, encoding_to_check, alternative_encoding),
source) | python | def _check_encoding(name, encoding_to_check, alternative_encoding, source=None):
"""
Check that ``encoding`` is a valid Python encoding
:param name: name under which the encoding is known to the user, e.g. 'default encoding'
:param encoding_to_check: name of the encoding to check, e.g. 'utf-8'
:param source: source where the encoding has been set, e.g. option name
:raise pygount.common.OptionError if ``encoding`` is not a valid Python encoding
"""
assert name is not None
if encoding_to_check not in (alternative_encoding, 'chardet', None):
try:
''.encode(encoding_to_check)
except LookupError:
raise pygount.common.OptionError(
'{0} is "{1}" but must be "{2}" or a known Python encoding'.format(
name, encoding_to_check, alternative_encoding),
source) | [
"def",
"_check_encoding",
"(",
"name",
",",
"encoding_to_check",
",",
"alternative_encoding",
",",
"source",
"=",
"None",
")",
":",
"assert",
"name",
"is",
"not",
"None",
"if",
"encoding_to_check",
"not",
"in",
"(",
"alternative_encoding",
",",
"'chardet'",
",",... | Check that ``encoding`` is a valid Python encoding
:param name: name under which the encoding is known to the user, e.g. 'default encoding'
:param encoding_to_check: name of the encoding to check, e.g. 'utf-8'
:param source: source where the encoding has been set, e.g. option name
:raise pygount.common.OptionError if ``encoding`` is not a valid Python encoding | [
"Check",
"that",
"encoding",
"is",
"a",
"valid",
"Python",
"encoding",
":",
"param",
"name",
":",
"name",
"under",
"which",
"the",
"encoding",
"is",
"known",
"to",
"the",
"user",
"e",
".",
"g",
".",
"default",
"encoding",
":",
"param",
"encoding_to_check",... | c2c150a534ba5be498eb39cb78fc6a531d62f145 | https://github.com/roskakori/pygount/blob/c2c150a534ba5be498eb39cb78fc6a531d62f145/pygount/command.py#L58-L75 | train |
roskakori/pygount | pygount/common.py | lines | def lines(text):
"""
Generator function to yield lines (delimited with ``'\n'``) stored in
``text``. This is useful when a regular expression should only match on a
per line basis in a memory efficient way.
"""
assert text is not None
assert '\r' not in text
previous_newline_index = 0
newline_index = text.find('\n')
while newline_index != -1:
yield text[previous_newline_index:newline_index]
previous_newline_index = newline_index + 1
newline_index = text.find('\n', previous_newline_index)
last_line = text[previous_newline_index:]
if last_line != '':
yield last_line | python | def lines(text):
"""
Generator function to yield lines (delimited with ``'\n'``) stored in
``text``. This is useful when a regular expression should only match on a
per line basis in a memory efficient way.
"""
assert text is not None
assert '\r' not in text
previous_newline_index = 0
newline_index = text.find('\n')
while newline_index != -1:
yield text[previous_newline_index:newline_index]
previous_newline_index = newline_index + 1
newline_index = text.find('\n', previous_newline_index)
last_line = text[previous_newline_index:]
if last_line != '':
yield last_line | [
"def",
"lines",
"(",
"text",
")",
":",
"assert",
"text",
"is",
"not",
"None",
"assert",
"'\\r'",
"not",
"in",
"text",
"previous_newline_index",
"=",
"0",
"newline_index",
"=",
"text",
".",
"find",
"(",
"'\\n'",
")",
"while",
"newline_index",
"!=",
"-",
"... | Generator function to yield lines (delimited with ``'\n'``) stored in
``text``. This is useful when a regular expression should only match on a
per line basis in a memory efficient way. | [
"Generator",
"function",
"to",
"yield",
"lines",
"(",
"delimited",
"with",
"\\",
"n",
")",
"stored",
"in",
"text",
".",
"This",
"is",
"useful",
"when",
"a",
"regular",
"expression",
"should",
"only",
"match",
"on",
"a",
"per",
"line",
"basis",
"in",
"a",... | c2c150a534ba5be498eb39cb78fc6a531d62f145 | https://github.com/roskakori/pygount/blob/c2c150a534ba5be498eb39cb78fc6a531d62f145/pygount/common.py#L101-L117 | train |
roskakori/pygount | pygount/analysis.py | matching_number_line_and_regex | def matching_number_line_and_regex(source_lines, generated_regexes, max_line_count=15):
"""
The first line and its number (starting with 0) in the source code that
indicated that the source code is generated.
:param source_lines: lines of text to scan
:param generated_regexes: regular expressions a line must match to indicate
the source code is generated.
:param max_line_count: maximum number of lines to scan
:return: a tuple of the form ``(number, line, regex)`` or ``None`` if the
source lines do not match any ``generated_regexes``.
"""
initial_numbers_and_lines = enumerate(itertools.islice(source_lines, max_line_count))
matching_number_line_and_regexps = (
(number, line, matching_regex)
for number, line in initial_numbers_and_lines
for matching_regex in generated_regexes
if matching_regex.match(line)
)
possible_first_matching_number_line_and_regexp = list(
itertools.islice(matching_number_line_and_regexps, 1))
result = (possible_first_matching_number_line_and_regexp + [None])[0]
return result | python | def matching_number_line_and_regex(source_lines, generated_regexes, max_line_count=15):
"""
The first line and its number (starting with 0) in the source code that
indicated that the source code is generated.
:param source_lines: lines of text to scan
:param generated_regexes: regular expressions a line must match to indicate
the source code is generated.
:param max_line_count: maximum number of lines to scan
:return: a tuple of the form ``(number, line, regex)`` or ``None`` if the
source lines do not match any ``generated_regexes``.
"""
initial_numbers_and_lines = enumerate(itertools.islice(source_lines, max_line_count))
matching_number_line_and_regexps = (
(number, line, matching_regex)
for number, line in initial_numbers_and_lines
for matching_regex in generated_regexes
if matching_regex.match(line)
)
possible_first_matching_number_line_and_regexp = list(
itertools.islice(matching_number_line_and_regexps, 1))
result = (possible_first_matching_number_line_and_regexp + [None])[0]
return result | [
"def",
"matching_number_line_and_regex",
"(",
"source_lines",
",",
"generated_regexes",
",",
"max_line_count",
"=",
"15",
")",
":",
"initial_numbers_and_lines",
"=",
"enumerate",
"(",
"itertools",
".",
"islice",
"(",
"source_lines",
",",
"max_line_count",
")",
")",
... | The first line and its number (starting with 0) in the source code that
indicated that the source code is generated.
:param source_lines: lines of text to scan
:param generated_regexes: regular expressions a line must match to indicate
the source code is generated.
:param max_line_count: maximum number of lines to scan
:return: a tuple of the form ``(number, line, regex)`` or ``None`` if the
source lines do not match any ``generated_regexes``. | [
"The",
"first",
"line",
"and",
"its",
"number",
"(",
"starting",
"with",
"0",
")",
"in",
"the",
"source",
"code",
"that",
"indicated",
"that",
"the",
"source",
"code",
"is",
"generated",
".",
":",
"param",
"source_lines",
":",
"lines",
"of",
"text",
"to"... | c2c150a534ba5be498eb39cb78fc6a531d62f145 | https://github.com/roskakori/pygount/blob/c2c150a534ba5be498eb39cb78fc6a531d62f145/pygount/analysis.py#L245-L266 | train |
roskakori/pygount | pygount/analysis.py | _pythonized_comments | def _pythonized_comments(tokens):
"""
Similar to tokens but converts strings after a colon (:) to comments.
"""
is_after_colon = True
for token_type, token_text in tokens:
if is_after_colon and (token_type in pygments.token.String):
token_type = pygments.token.Comment
elif token_text == ':':
is_after_colon = True
elif token_type not in pygments.token.Comment:
is_whitespace = len(token_text.rstrip(' \f\n\r\t')) == 0
if not is_whitespace:
is_after_colon = False
yield token_type, token_text | python | def _pythonized_comments(tokens):
"""
Similar to tokens but converts strings after a colon (:) to comments.
"""
is_after_colon = True
for token_type, token_text in tokens:
if is_after_colon and (token_type in pygments.token.String):
token_type = pygments.token.Comment
elif token_text == ':':
is_after_colon = True
elif token_type not in pygments.token.Comment:
is_whitespace = len(token_text.rstrip(' \f\n\r\t')) == 0
if not is_whitespace:
is_after_colon = False
yield token_type, token_text | [
"def",
"_pythonized_comments",
"(",
"tokens",
")",
":",
"is_after_colon",
"=",
"True",
"for",
"token_type",
",",
"token_text",
"in",
"tokens",
":",
"if",
"is_after_colon",
"and",
"(",
"token_type",
"in",
"pygments",
".",
"token",
".",
"String",
")",
":",
"to... | Similar to tokens but converts strings after a colon (:) to comments. | [
"Similar",
"to",
"tokens",
"but",
"converts",
"strings",
"after",
"a",
"colon",
"(",
":",
")",
"to",
"comments",
"."
] | c2c150a534ba5be498eb39cb78fc6a531d62f145 | https://github.com/roskakori/pygount/blob/c2c150a534ba5be498eb39cb78fc6a531d62f145/pygount/analysis.py#L299-L313 | train |
roskakori/pygount | pygount/analysis.py | encoding_for | def encoding_for(source_path, encoding='automatic', fallback_encoding=None):
"""
The encoding used by the text file stored in ``source_path``.
The algorithm used is:
* If ``encoding`` is ``'automatic``, attempt the following:
1. Check BOM for UTF-8, UTF-16 and UTF-32.
2. Look for XML prolog or magic heading like ``# -*- coding: cp1252 -*-``
3. Read the file using UTF-8.
4. If all this fails, use assume the ``fallback_encoding``.
* If ``encoding`` is ``'chardet`` use :mod:`chardet` to obtain the encoding.
* For any other ``encoding`` simply use the specified value.
"""
assert encoding is not None
if encoding == 'automatic':
with open(source_path, 'rb') as source_file:
heading = source_file.read(128)
result = None
if len(heading) == 0:
# File is empty, assume a dummy encoding.
result = 'utf-8'
if result is None:
# Check for known BOMs.
for bom, encoding in _BOM_TO_ENCODING_MAP.items():
if heading[:len(bom)] == bom:
result = encoding
break
if result is None:
# Look for common headings that indicate the encoding.
ascii_heading = heading.decode('ascii', errors='replace')
ascii_heading = ascii_heading.replace('\r\n', '\n')
ascii_heading = ascii_heading.replace('\r', '\n')
ascii_heading = '\n'.join(ascii_heading.split('\n')[:2]) + '\n'
coding_magic_match = _CODING_MAGIC_REGEX.match(ascii_heading)
if coding_magic_match is not None:
result = coding_magic_match.group('encoding')
else:
first_line = ascii_heading.split('\n')[0]
xml_prolog_match = _XML_PROLOG_REGEX.match(first_line)
if xml_prolog_match is not None:
result = xml_prolog_match.group('encoding')
elif encoding == 'chardet':
assert _detector is not None, \
'without chardet installed, encoding="chardet" must be rejected before calling encoding_for()'
_detector.reset()
with open(source_path, 'rb') as source_file:
for line in source_file.readlines():
_detector.feed(line)
if _detector.done:
break
result = _detector.result['encoding']
if result is None:
_log.warning(
'%s: chardet cannot determine encoding, assuming fallback encoding %s',
source_path, fallback_encoding)
result = fallback_encoding
else:
# Simply use the specified encoding.
result = encoding
if result is None:
# Encoding 'automatic' or 'chardet' failed to detect anything.
if fallback_encoding is not None:
# If defined, use the fallback encoding.
result = fallback_encoding
else:
try:
# Attempt to read the file as UTF-8.
with open(source_path, 'r', encoding='utf-8') as source_file:
source_file.read()
result = 'utf-8'
except UnicodeDecodeError:
# UTF-8 did not work out, use the default as last resort.
result = DEFAULT_FALLBACK_ENCODING
_log.debug('%s: no fallback encoding specified, using %s', source_path, result)
assert result is not None
return result | python | def encoding_for(source_path, encoding='automatic', fallback_encoding=None):
"""
The encoding used by the text file stored in ``source_path``.
The algorithm used is:
* If ``encoding`` is ``'automatic``, attempt the following:
1. Check BOM for UTF-8, UTF-16 and UTF-32.
2. Look for XML prolog or magic heading like ``# -*- coding: cp1252 -*-``
3. Read the file using UTF-8.
4. If all this fails, use assume the ``fallback_encoding``.
* If ``encoding`` is ``'chardet`` use :mod:`chardet` to obtain the encoding.
* For any other ``encoding`` simply use the specified value.
"""
assert encoding is not None
if encoding == 'automatic':
with open(source_path, 'rb') as source_file:
heading = source_file.read(128)
result = None
if len(heading) == 0:
# File is empty, assume a dummy encoding.
result = 'utf-8'
if result is None:
# Check for known BOMs.
for bom, encoding in _BOM_TO_ENCODING_MAP.items():
if heading[:len(bom)] == bom:
result = encoding
break
if result is None:
# Look for common headings that indicate the encoding.
ascii_heading = heading.decode('ascii', errors='replace')
ascii_heading = ascii_heading.replace('\r\n', '\n')
ascii_heading = ascii_heading.replace('\r', '\n')
ascii_heading = '\n'.join(ascii_heading.split('\n')[:2]) + '\n'
coding_magic_match = _CODING_MAGIC_REGEX.match(ascii_heading)
if coding_magic_match is not None:
result = coding_magic_match.group('encoding')
else:
first_line = ascii_heading.split('\n')[0]
xml_prolog_match = _XML_PROLOG_REGEX.match(first_line)
if xml_prolog_match is not None:
result = xml_prolog_match.group('encoding')
elif encoding == 'chardet':
assert _detector is not None, \
'without chardet installed, encoding="chardet" must be rejected before calling encoding_for()'
_detector.reset()
with open(source_path, 'rb') as source_file:
for line in source_file.readlines():
_detector.feed(line)
if _detector.done:
break
result = _detector.result['encoding']
if result is None:
_log.warning(
'%s: chardet cannot determine encoding, assuming fallback encoding %s',
source_path, fallback_encoding)
result = fallback_encoding
else:
# Simply use the specified encoding.
result = encoding
if result is None:
# Encoding 'automatic' or 'chardet' failed to detect anything.
if fallback_encoding is not None:
# If defined, use the fallback encoding.
result = fallback_encoding
else:
try:
# Attempt to read the file as UTF-8.
with open(source_path, 'r', encoding='utf-8') as source_file:
source_file.read()
result = 'utf-8'
except UnicodeDecodeError:
# UTF-8 did not work out, use the default as last resort.
result = DEFAULT_FALLBACK_ENCODING
_log.debug('%s: no fallback encoding specified, using %s', source_path, result)
assert result is not None
return result | [
"def",
"encoding_for",
"(",
"source_path",
",",
"encoding",
"=",
"'automatic'",
",",
"fallback_encoding",
"=",
"None",
")",
":",
"assert",
"encoding",
"is",
"not",
"None",
"if",
"encoding",
"==",
"'automatic'",
":",
"with",
"open",
"(",
"source_path",
",",
"... | The encoding used by the text file stored in ``source_path``.
The algorithm used is:
* If ``encoding`` is ``'automatic``, attempt the following:
1. Check BOM for UTF-8, UTF-16 and UTF-32.
2. Look for XML prolog or magic heading like ``# -*- coding: cp1252 -*-``
3. Read the file using UTF-8.
4. If all this fails, use assume the ``fallback_encoding``.
* If ``encoding`` is ``'chardet`` use :mod:`chardet` to obtain the encoding.
* For any other ``encoding`` simply use the specified value. | [
"The",
"encoding",
"used",
"by",
"the",
"text",
"file",
"stored",
"in",
"source_path",
"."
] | c2c150a534ba5be498eb39cb78fc6a531d62f145 | https://github.com/roskakori/pygount/blob/c2c150a534ba5be498eb39cb78fc6a531d62f145/pygount/analysis.py#L340-L418 | train |
roskakori/pygount | pygount/analysis.py | has_lexer | def has_lexer(source_path):
"""
Initial quick check if there is a lexer for ``source_path``. This removes
the need for calling :py:func:`pygments.lexers.guess_lexer_for_filename()`
which fully reads the source file.
"""
result = bool(pygments.lexers.find_lexer_class_for_filename(source_path))
if not result:
suffix = os.path.splitext(os.path.basename(source_path))[1].lstrip('.')
result = suffix in _SUFFIX_TO_FALLBACK_LEXER_MAP
return result | python | def has_lexer(source_path):
"""
Initial quick check if there is a lexer for ``source_path``. This removes
the need for calling :py:func:`pygments.lexers.guess_lexer_for_filename()`
which fully reads the source file.
"""
result = bool(pygments.lexers.find_lexer_class_for_filename(source_path))
if not result:
suffix = os.path.splitext(os.path.basename(source_path))[1].lstrip('.')
result = suffix in _SUFFIX_TO_FALLBACK_LEXER_MAP
return result | [
"def",
"has_lexer",
"(",
"source_path",
")",
":",
"result",
"=",
"bool",
"(",
"pygments",
".",
"lexers",
".",
"find_lexer_class_for_filename",
"(",
"source_path",
")",
")",
"if",
"not",
"result",
":",
"suffix",
"=",
"os",
".",
"path",
".",
"splitext",
"(",... | Initial quick check if there is a lexer for ``source_path``. This removes
the need for calling :py:func:`pygments.lexers.guess_lexer_for_filename()`
which fully reads the source file. | [
"Initial",
"quick",
"check",
"if",
"there",
"is",
"a",
"lexer",
"for",
"source_path",
".",
"This",
"removes",
"the",
"need",
"for",
"calling",
":",
"py",
":",
"func",
":",
"pygments",
".",
"lexers",
".",
"guess_lexer_for_filename",
"()",
"which",
"fully",
... | c2c150a534ba5be498eb39cb78fc6a531d62f145 | https://github.com/roskakori/pygount/blob/c2c150a534ba5be498eb39cb78fc6a531d62f145/pygount/analysis.py#L459-L469 | train |
roskakori/pygount | pygount/analysis.py | source_analysis | def source_analysis(
source_path, group, encoding='automatic', fallback_encoding='cp1252',
generated_regexes=pygount.common.regexes_from(DEFAULT_GENERATED_PATTERNS_TEXT),
duplicate_pool=None):
"""
Analysis for line counts in source code stored in ``source_path``.
:param source_path:
:param group: name of a logical group the sourc code belongs to, e.g. a
package.
:param encoding: encoding according to :func:`encoding_for`
:param fallback_encoding: fallback encoding according to
:func:`encoding_for`
:return: a :class:`SourceAnalysis`
"""
assert encoding is not None
assert generated_regexes is not None
result = None
lexer = None
source_code = None
source_size = os.path.getsize(source_path)
if source_size == 0:
_log.info('%s: is empty', source_path)
result = pseudo_source_analysis(source_path, group, SourceState.empty)
elif is_binary_file(source_path):
_log.info('%s: is binary', source_path)
result = pseudo_source_analysis(source_path, group, SourceState.binary)
elif not has_lexer(source_path):
_log.info('%s: unknown language', source_path)
result = pseudo_source_analysis(source_path, group, SourceState.unknown)
elif duplicate_pool is not None:
duplicate_path = duplicate_pool.duplicate_path(source_path)
if duplicate_path is not None:
_log.info('%s: is a duplicate of %s', source_path, duplicate_path)
result = pseudo_source_analysis(source_path, group, SourceState.duplicate, duplicate_path)
if result is None:
if encoding in ('automatic', 'chardet'):
encoding = encoding_for(source_path, encoding, fallback_encoding)
try:
with open(source_path, 'r', encoding=encoding) as source_file:
source_code = source_file.read()
except (LookupError, OSError, UnicodeError) as error:
_log.warning('cannot read %s using encoding %s: %s', source_path, encoding, error)
result = pseudo_source_analysis(source_path, group, SourceState.error, error)
if result is None:
lexer = guess_lexer(source_path, source_code)
assert lexer is not None
if (result is None) and (len(generated_regexes) != 0):
number_line_and_regex = matching_number_line_and_regex(
pygount.common.lines(source_code), generated_regexes
)
if number_line_and_regex is not None:
number, _, regex = number_line_and_regex
message = 'line {0} matches {1}'.format(number, regex)
_log.info('%s: is generated code because %s', source_path, message)
result = pseudo_source_analysis(source_path, group, SourceState.generated, message)
if result is None:
assert lexer is not None
assert source_code is not None
language = lexer.name
if ('xml' in language.lower()) or (language == 'Genshi'):
dialect = pygount.xmldialect.xml_dialect(source_path, source_code)
if dialect is not None:
language = dialect
_log.info('%s: analyze as %s using encoding %s', source_path, language, encoding)
mark_to_count_map = {'c': 0, 'd': 0, 'e': 0, 's': 0}
for line_parts in _line_parts(lexer, source_code):
mark_to_increment = 'e'
for mark_to_check in ('d', 's', 'c'):
if mark_to_check in line_parts:
mark_to_increment = mark_to_check
mark_to_count_map[mark_to_increment] += 1
result = SourceAnalysis(
path=source_path,
language=language,
group=group,
code=mark_to_count_map['c'],
documentation=mark_to_count_map['d'],
empty=mark_to_count_map['e'],
string=mark_to_count_map['s'],
state=SourceState.analyzed.name,
state_info=None,
)
assert result is not None
return result | python | def source_analysis(
source_path, group, encoding='automatic', fallback_encoding='cp1252',
generated_regexes=pygount.common.regexes_from(DEFAULT_GENERATED_PATTERNS_TEXT),
duplicate_pool=None):
"""
Analysis for line counts in source code stored in ``source_path``.
:param source_path:
:param group: name of a logical group the sourc code belongs to, e.g. a
package.
:param encoding: encoding according to :func:`encoding_for`
:param fallback_encoding: fallback encoding according to
:func:`encoding_for`
:return: a :class:`SourceAnalysis`
"""
assert encoding is not None
assert generated_regexes is not None
result = None
lexer = None
source_code = None
source_size = os.path.getsize(source_path)
if source_size == 0:
_log.info('%s: is empty', source_path)
result = pseudo_source_analysis(source_path, group, SourceState.empty)
elif is_binary_file(source_path):
_log.info('%s: is binary', source_path)
result = pseudo_source_analysis(source_path, group, SourceState.binary)
elif not has_lexer(source_path):
_log.info('%s: unknown language', source_path)
result = pseudo_source_analysis(source_path, group, SourceState.unknown)
elif duplicate_pool is not None:
duplicate_path = duplicate_pool.duplicate_path(source_path)
if duplicate_path is not None:
_log.info('%s: is a duplicate of %s', source_path, duplicate_path)
result = pseudo_source_analysis(source_path, group, SourceState.duplicate, duplicate_path)
if result is None:
if encoding in ('automatic', 'chardet'):
encoding = encoding_for(source_path, encoding, fallback_encoding)
try:
with open(source_path, 'r', encoding=encoding) as source_file:
source_code = source_file.read()
except (LookupError, OSError, UnicodeError) as error:
_log.warning('cannot read %s using encoding %s: %s', source_path, encoding, error)
result = pseudo_source_analysis(source_path, group, SourceState.error, error)
if result is None:
lexer = guess_lexer(source_path, source_code)
assert lexer is not None
if (result is None) and (len(generated_regexes) != 0):
number_line_and_regex = matching_number_line_and_regex(
pygount.common.lines(source_code), generated_regexes
)
if number_line_and_regex is not None:
number, _, regex = number_line_and_regex
message = 'line {0} matches {1}'.format(number, regex)
_log.info('%s: is generated code because %s', source_path, message)
result = pseudo_source_analysis(source_path, group, SourceState.generated, message)
if result is None:
assert lexer is not None
assert source_code is not None
language = lexer.name
if ('xml' in language.lower()) or (language == 'Genshi'):
dialect = pygount.xmldialect.xml_dialect(source_path, source_code)
if dialect is not None:
language = dialect
_log.info('%s: analyze as %s using encoding %s', source_path, language, encoding)
mark_to_count_map = {'c': 0, 'd': 0, 'e': 0, 's': 0}
for line_parts in _line_parts(lexer, source_code):
mark_to_increment = 'e'
for mark_to_check in ('d', 's', 'c'):
if mark_to_check in line_parts:
mark_to_increment = mark_to_check
mark_to_count_map[mark_to_increment] += 1
result = SourceAnalysis(
path=source_path,
language=language,
group=group,
code=mark_to_count_map['c'],
documentation=mark_to_count_map['d'],
empty=mark_to_count_map['e'],
string=mark_to_count_map['s'],
state=SourceState.analyzed.name,
state_info=None,
)
assert result is not None
return result | [
"def",
"source_analysis",
"(",
"source_path",
",",
"group",
",",
"encoding",
"=",
"'automatic'",
",",
"fallback_encoding",
"=",
"'cp1252'",
",",
"generated_regexes",
"=",
"pygount",
".",
"common",
".",
"regexes_from",
"(",
"DEFAULT_GENERATED_PATTERNS_TEXT",
")",
","... | Analysis for line counts in source code stored in ``source_path``.
:param source_path:
:param group: name of a logical group the sourc code belongs to, e.g. a
package.
:param encoding: encoding according to :func:`encoding_for`
:param fallback_encoding: fallback encoding according to
:func:`encoding_for`
:return: a :class:`SourceAnalysis` | [
"Analysis",
"for",
"line",
"counts",
"in",
"source",
"code",
"stored",
"in",
"source_path",
"."
] | c2c150a534ba5be498eb39cb78fc6a531d62f145 | https://github.com/roskakori/pygount/blob/c2c150a534ba5be498eb39cb78fc6a531d62f145/pygount/analysis.py#L484-L570 | train |
radjkarl/imgProcessor | DUMP/interpolationMethods.py | polynomial | def polynomial(img, mask, inplace=False, replace_all=False,
max_dev=1e-5, max_iter=20, order=2):
'''
replace all masked values
calculate flatField from 2d-polynomal fit filling
all high gradient areas within averaged fit-image
returns flatField, average background level, fitted image, valid indices mask
'''
if inplace:
out = img
else:
out = img.copy()
lastm = 0
for _ in range(max_iter):
out2 = polyfit2dGrid(out, mask, order=order, copy=not inplace,
replace_all=replace_all)
if replace_all:
out = out2
break
res = (np.abs(out2 - out)).mean()
print('residuum: ', res)
if res < max_dev:
out = out2
break
out = out2
mask = _highGrad(out)
m = mask.sum()
if m == lastm or m == img.size:
break
lastm = m
out = np.clip(out, 0, 1, out=out) # if inplace else None)
return out | python | def polynomial(img, mask, inplace=False, replace_all=False,
max_dev=1e-5, max_iter=20, order=2):
'''
replace all masked values
calculate flatField from 2d-polynomal fit filling
all high gradient areas within averaged fit-image
returns flatField, average background level, fitted image, valid indices mask
'''
if inplace:
out = img
else:
out = img.copy()
lastm = 0
for _ in range(max_iter):
out2 = polyfit2dGrid(out, mask, order=order, copy=not inplace,
replace_all=replace_all)
if replace_all:
out = out2
break
res = (np.abs(out2 - out)).mean()
print('residuum: ', res)
if res < max_dev:
out = out2
break
out = out2
mask = _highGrad(out)
m = mask.sum()
if m == lastm or m == img.size:
break
lastm = m
out = np.clip(out, 0, 1, out=out) # if inplace else None)
return out | [
"def",
"polynomial",
"(",
"img",
",",
"mask",
",",
"inplace",
"=",
"False",
",",
"replace_all",
"=",
"False",
",",
"max_dev",
"=",
"1e-5",
",",
"max_iter",
"=",
"20",
",",
"order",
"=",
"2",
")",
":",
"if",
"inplace",
":",
"out",
"=",
"img",
"else"... | replace all masked values
calculate flatField from 2d-polynomal fit filling
all high gradient areas within averaged fit-image
returns flatField, average background level, fitted image, valid indices mask | [
"replace",
"all",
"masked",
"values",
"calculate",
"flatField",
"from",
"2d",
"-",
"polynomal",
"fit",
"filling",
"all",
"high",
"gradient",
"areas",
"within",
"averaged",
"fit",
"-",
"image",
"returns",
"flatField",
"average",
"background",
"level",
"fitted",
"... | 7c5a28718f81c01a430152c60a686ac50afbfd7c | https://github.com/radjkarl/imgProcessor/blob/7c5a28718f81c01a430152c60a686ac50afbfd7c/DUMP/interpolationMethods.py#L55-L88 | train |
radjkarl/imgProcessor | imgProcessor/uncertainty/simulateUncertDependencyOnExpTime.py | errorDist | def errorDist(scale, measExpTime, n_events_in_expTime,
event_duration, std,
points_per_time=100, n_repetitions=300):
'''
TODO
'''
ntimes = 10
s1 = measExpTime * scale * 10
# exp. time factor 1/16-->16:
p2 = np.logspace(-4, 4, 18, base=2)
t = np.linspace(0, s1, ntimes * points_per_time * s1)
err = None
for rr in range(n_repetitions):
f = _flux(t, n_events_in_expTime, event_duration, std)
e = np.array([_capture(f, t, measExpTime, pp) for pp in p2])
if err is None:
err = e
else:
err += e
err /= (rr + 1)
# normalize, so that error==1 at 1:
try:
fac = findXAt(err, p2, 1)
except:
fac = 1
err /= fac
return p2, err, t, f | python | def errorDist(scale, measExpTime, n_events_in_expTime,
event_duration, std,
points_per_time=100, n_repetitions=300):
'''
TODO
'''
ntimes = 10
s1 = measExpTime * scale * 10
# exp. time factor 1/16-->16:
p2 = np.logspace(-4, 4, 18, base=2)
t = np.linspace(0, s1, ntimes * points_per_time * s1)
err = None
for rr in range(n_repetitions):
f = _flux(t, n_events_in_expTime, event_duration, std)
e = np.array([_capture(f, t, measExpTime, pp) for pp in p2])
if err is None:
err = e
else:
err += e
err /= (rr + 1)
# normalize, so that error==1 at 1:
try:
fac = findXAt(err, p2, 1)
except:
fac = 1
err /= fac
return p2, err, t, f | [
"def",
"errorDist",
"(",
"scale",
",",
"measExpTime",
",",
"n_events_in_expTime",
",",
"event_duration",
",",
"std",
",",
"points_per_time",
"=",
"100",
",",
"n_repetitions",
"=",
"300",
")",
":",
"ntimes",
"=",
"10",
"s1",
"=",
"measExpTime",
"*",
"scale",
... | TODO | [
"TODO"
] | 7c5a28718f81c01a430152c60a686ac50afbfd7c | https://github.com/radjkarl/imgProcessor/blob/7c5a28718f81c01a430152c60a686ac50afbfd7c/imgProcessor/uncertainty/simulateUncertDependencyOnExpTime.py#L15-L47 | train |
radjkarl/imgProcessor | imgProcessor/uncertainty/simulateUncertDependencyOnExpTime.py | exampleSignals | def exampleSignals(std=1, dur1=1, dur2=3, dur3=0.2,
n1=0.5, n2=0.5, n3=2):
'''
std ... standard deviation of every signal
dur1...dur3 --> event duration per second
n1...n3 --> number of events per second
'''
np.random.seed(123)
t = np.linspace(0, 10, 100)
f0 = _flux(t, n1, dur1, std, offs=0)
f1 = _flux(t, n2, dur2, std, offs=0)
f2 = _flux(t, n3, dur3, std, offs=0)
return t,f0,f1,f2 | python | def exampleSignals(std=1, dur1=1, dur2=3, dur3=0.2,
n1=0.5, n2=0.5, n3=2):
'''
std ... standard deviation of every signal
dur1...dur3 --> event duration per second
n1...n3 --> number of events per second
'''
np.random.seed(123)
t = np.linspace(0, 10, 100)
f0 = _flux(t, n1, dur1, std, offs=0)
f1 = _flux(t, n2, dur2, std, offs=0)
f2 = _flux(t, n3, dur3, std, offs=0)
return t,f0,f1,f2 | [
"def",
"exampleSignals",
"(",
"std",
"=",
"1",
",",
"dur1",
"=",
"1",
",",
"dur2",
"=",
"3",
",",
"dur3",
"=",
"0.2",
",",
"n1",
"=",
"0.5",
",",
"n2",
"=",
"0.5",
",",
"n3",
"=",
"2",
")",
":",
"np",
".",
"random",
".",
"seed",
"(",
"123",... | std ... standard deviation of every signal
dur1...dur3 --> event duration per second
n1...n3 --> number of events per second | [
"std",
"...",
"standard",
"deviation",
"of",
"every",
"signal",
"dur1",
"...",
"dur3",
"--",
">",
"event",
"duration",
"per",
"second",
"n1",
"...",
"n3",
"--",
">",
"number",
"of",
"events",
"per",
"second"
] | 7c5a28718f81c01a430152c60a686ac50afbfd7c | https://github.com/radjkarl/imgProcessor/blob/7c5a28718f81c01a430152c60a686ac50afbfd7c/imgProcessor/uncertainty/simulateUncertDependencyOnExpTime.py#L50-L63 | train |
radjkarl/imgProcessor | imgProcessor/uncertainty/simulateUncertDependencyOnExpTime.py | _flux | def _flux(t, n, duration, std, offs=1):
'''
returns Gaussian shaped signal fluctuations [events]
t --> times to calculate event for
n --> numbers of events per sec
duration --> event duration per sec
std --> std of event if averaged over time
offs --> event offset
'''
duration *= len(t) / t[-1]
duration = int(max(duration, 1))
pos = []
n *= t[-1]
pp = np.arange(len(t))
valid = np.ones_like(t, dtype=bool)
for _ in range(int(round(n))):
try:
ppp = np.random.choice(pp[valid], 1)[0]
pos.append(ppp)
valid[max(0, ppp - duration - 1):ppp + duration + 1] = False
except ValueError:
break
sign = np.random.randint(0, 2, len(pos))
sign[sign == 0] = -1
out = np.zeros_like(t)
amps = np.random.normal(loc=0, scale=1, size=len(pos))
if duration > 2:
evt = gaussian(duration, duration)
evt -= evt[0]
else:
evt = np.ones(shape=duration)
for s, p, a in zip(sign, pos, amps):
pp = duration
if p + duration > len(out):
pp = len(out) - p
out[p:p + pp] = s * a * evt[:pp]
out /= out.std() / std
out += offs
return out | python | def _flux(t, n, duration, std, offs=1):
'''
returns Gaussian shaped signal fluctuations [events]
t --> times to calculate event for
n --> numbers of events per sec
duration --> event duration per sec
std --> std of event if averaged over time
offs --> event offset
'''
duration *= len(t) / t[-1]
duration = int(max(duration, 1))
pos = []
n *= t[-1]
pp = np.arange(len(t))
valid = np.ones_like(t, dtype=bool)
for _ in range(int(round(n))):
try:
ppp = np.random.choice(pp[valid], 1)[0]
pos.append(ppp)
valid[max(0, ppp - duration - 1):ppp + duration + 1] = False
except ValueError:
break
sign = np.random.randint(0, 2, len(pos))
sign[sign == 0] = -1
out = np.zeros_like(t)
amps = np.random.normal(loc=0, scale=1, size=len(pos))
if duration > 2:
evt = gaussian(duration, duration)
evt -= evt[0]
else:
evt = np.ones(shape=duration)
for s, p, a in zip(sign, pos, amps):
pp = duration
if p + duration > len(out):
pp = len(out) - p
out[p:p + pp] = s * a * evt[:pp]
out /= out.std() / std
out += offs
return out | [
"def",
"_flux",
"(",
"t",
",",
"n",
",",
"duration",
",",
"std",
",",
"offs",
"=",
"1",
")",
":",
"duration",
"*=",
"len",
"(",
"t",
")",
"/",
"t",
"[",
"-",
"1",
"]",
"duration",
"=",
"int",
"(",
"max",
"(",
"duration",
",",
"1",
")",
")",... | returns Gaussian shaped signal fluctuations [events]
t --> times to calculate event for
n --> numbers of events per sec
duration --> event duration per sec
std --> std of event if averaged over time
offs --> event offset | [
"returns",
"Gaussian",
"shaped",
"signal",
"fluctuations",
"[",
"events",
"]",
"t",
"--",
">",
"times",
"to",
"calculate",
"event",
"for",
"n",
"--",
">",
"numbers",
"of",
"events",
"per",
"sec",
"duration",
"--",
">",
"event",
"duration",
"per",
"sec",
... | 7c5a28718f81c01a430152c60a686ac50afbfd7c | https://github.com/radjkarl/imgProcessor/blob/7c5a28718f81c01a430152c60a686ac50afbfd7c/imgProcessor/uncertainty/simulateUncertDependencyOnExpTime.py#L66-L113 | train |
radjkarl/imgProcessor | imgProcessor/uncertainty/simulateUncertDependencyOnExpTime.py | _capture | def _capture(f, t, t0, factor):
'''
capture signal and return its standard deviation
#TODO: more detail
'''
n_per_sec = len(t) / t[-1]
# len of one split:
n = int(t0 * factor * n_per_sec)
s = len(f) // n
m = s * n
f = f[:m]
ff = np.split(f, s)
m = np.mean(ff, axis=1)
return np.std(m) | python | def _capture(f, t, t0, factor):
'''
capture signal and return its standard deviation
#TODO: more detail
'''
n_per_sec = len(t) / t[-1]
# len of one split:
n = int(t0 * factor * n_per_sec)
s = len(f) // n
m = s * n
f = f[:m]
ff = np.split(f, s)
m = np.mean(ff, axis=1)
return np.std(m) | [
"def",
"_capture",
"(",
"f",
",",
"t",
",",
"t0",
",",
"factor",
")",
":",
"n_per_sec",
"=",
"len",
"(",
"t",
")",
"/",
"t",
"[",
"-",
"1",
"]",
"# len of one split:\r",
"n",
"=",
"int",
"(",
"t0",
"*",
"factor",
"*",
"n_per_sec",
")",
"s",
"="... | capture signal and return its standard deviation
#TODO: more detail | [
"capture",
"signal",
"and",
"return",
"its",
"standard",
"deviation",
"#TODO",
":",
"more",
"detail"
] | 7c5a28718f81c01a430152c60a686ac50afbfd7c | https://github.com/radjkarl/imgProcessor/blob/7c5a28718f81c01a430152c60a686ac50afbfd7c/imgProcessor/uncertainty/simulateUncertDependencyOnExpTime.py#L116-L131 | train |
radjkarl/imgProcessor | imgProcessor/utils/genericCameraMatrix.py | genericCameraMatrix | def genericCameraMatrix(shape, angularField=60):
'''
Return a generic camera matrix
[[fx, 0, cx],
[ 0, fy, cy],
[ 0, 0, 1]]
for a given image shape
'''
# http://nghiaho.com/?page_id=576
# assume that the optical centre is in the middle:
cy = int(shape[0] / 2)
cx = int(shape[1] / 2)
# assume that the FOV is 60 DEG (webcam)
fx = fy = cx / np.tan(angularField / 2 * np.pi /
180) # camera focal length
# see
# http://docs.opencv.org/doc/tutorials/calib3d/camera_calibration/camera_calibration.html
return np.array([[fx, 0, cx],
[0, fy, cy],
[0, 0, 1]
], dtype=np.float32) | python | def genericCameraMatrix(shape, angularField=60):
'''
Return a generic camera matrix
[[fx, 0, cx],
[ 0, fy, cy],
[ 0, 0, 1]]
for a given image shape
'''
# http://nghiaho.com/?page_id=576
# assume that the optical centre is in the middle:
cy = int(shape[0] / 2)
cx = int(shape[1] / 2)
# assume that the FOV is 60 DEG (webcam)
fx = fy = cx / np.tan(angularField / 2 * np.pi /
180) # camera focal length
# see
# http://docs.opencv.org/doc/tutorials/calib3d/camera_calibration/camera_calibration.html
return np.array([[fx, 0, cx],
[0, fy, cy],
[0, 0, 1]
], dtype=np.float32) | [
"def",
"genericCameraMatrix",
"(",
"shape",
",",
"angularField",
"=",
"60",
")",
":",
"# http://nghiaho.com/?page_id=576\r",
"# assume that the optical centre is in the middle:\r",
"cy",
"=",
"int",
"(",
"shape",
"[",
"0",
"]",
"/",
"2",
")",
"cx",
"=",
"int",
"("... | Return a generic camera matrix
[[fx, 0, cx],
[ 0, fy, cy],
[ 0, 0, 1]]
for a given image shape | [
"Return",
"a",
"generic",
"camera",
"matrix",
"[[",
"fx",
"0",
"cx",
"]",
"[",
"0",
"fy",
"cy",
"]",
"[",
"0",
"0",
"1",
"]]",
"for",
"a",
"given",
"image",
"shape"
] | 7c5a28718f81c01a430152c60a686ac50afbfd7c | https://github.com/radjkarl/imgProcessor/blob/7c5a28718f81c01a430152c60a686ac50afbfd7c/imgProcessor/utils/genericCameraMatrix.py#L7-L28 | train |
radjkarl/imgProcessor | imgProcessor/filters/standardDeviation.py | standardDeviation2d | def standardDeviation2d(img, ksize=5, blurred=None):
'''
calculate the spatial resolved standard deviation
for a given 2d array
ksize -> kernel size
blurred(optional) -> with same ksize gaussian filtered image
setting this parameter reduces processing time
'''
if ksize not in (list, tuple):
ksize = (ksize,ksize)
if blurred is None:
blurred = gaussian_filter(img, ksize)
else:
assert blurred.shape == img.shape
std = np.empty_like(img)
_calc(img, ksize[0], ksize[1], blurred, std)
return std | python | def standardDeviation2d(img, ksize=5, blurred=None):
'''
calculate the spatial resolved standard deviation
for a given 2d array
ksize -> kernel size
blurred(optional) -> with same ksize gaussian filtered image
setting this parameter reduces processing time
'''
if ksize not in (list, tuple):
ksize = (ksize,ksize)
if blurred is None:
blurred = gaussian_filter(img, ksize)
else:
assert blurred.shape == img.shape
std = np.empty_like(img)
_calc(img, ksize[0], ksize[1], blurred, std)
return std | [
"def",
"standardDeviation2d",
"(",
"img",
",",
"ksize",
"=",
"5",
",",
"blurred",
"=",
"None",
")",
":",
"if",
"ksize",
"not",
"in",
"(",
"list",
",",
"tuple",
")",
":",
"ksize",
"=",
"(",
"ksize",
",",
"ksize",
")",
"if",
"blurred",
"is",
"None",
... | calculate the spatial resolved standard deviation
for a given 2d array
ksize -> kernel size
blurred(optional) -> with same ksize gaussian filtered image
setting this parameter reduces processing time | [
"calculate",
"the",
"spatial",
"resolved",
"standard",
"deviation",
"for",
"a",
"given",
"2d",
"array",
"ksize",
"-",
">",
"kernel",
"size",
"blurred",
"(",
"optional",
")",
"-",
">",
"with",
"same",
"ksize",
"gaussian",
"filtered",
"image",
"setting",
"this... | 7c5a28718f81c01a430152c60a686ac50afbfd7c | https://github.com/radjkarl/imgProcessor/blob/7c5a28718f81c01a430152c60a686ac50afbfd7c/imgProcessor/filters/standardDeviation.py#L9-L31 | train |
radjkarl/imgProcessor | imgProcessor/filters/maskedFilter.py | maskedFilter | def maskedFilter(arr, mask, ksize=30, fill_mask=True,
fn='median'):
'''
fn['mean', 'median']
fill_mask=True:
replaced masked areas with filtered results
fill_mask=False:
masked areas are ignored
'''
if fill_mask:
mask1 = mask
out = arr
else:
mask1 = ~mask
out = np.full_like(arr, fill_value=np.nan)
mask2 = ~mask
if fn == 'mean':
_calcMean(arr, mask1, mask2, out, ksize // 2)
else:
buff = np.empty(shape=(ksize * ksize), dtype=arr.dtype)
_calcMedian(arr, mask1, mask2, out, ksize // 2, buff)
return out | python | def maskedFilter(arr, mask, ksize=30, fill_mask=True,
fn='median'):
'''
fn['mean', 'median']
fill_mask=True:
replaced masked areas with filtered results
fill_mask=False:
masked areas are ignored
'''
if fill_mask:
mask1 = mask
out = arr
else:
mask1 = ~mask
out = np.full_like(arr, fill_value=np.nan)
mask2 = ~mask
if fn == 'mean':
_calcMean(arr, mask1, mask2, out, ksize // 2)
else:
buff = np.empty(shape=(ksize * ksize), dtype=arr.dtype)
_calcMedian(arr, mask1, mask2, out, ksize // 2, buff)
return out | [
"def",
"maskedFilter",
"(",
"arr",
",",
"mask",
",",
"ksize",
"=",
"30",
",",
"fill_mask",
"=",
"True",
",",
"fn",
"=",
"'median'",
")",
":",
"if",
"fill_mask",
":",
"mask1",
"=",
"mask",
"out",
"=",
"arr",
"else",
":",
"mask1",
"=",
"~",
"mask",
... | fn['mean', 'median']
fill_mask=True:
replaced masked areas with filtered results
fill_mask=False:
masked areas are ignored | [
"fn",
"[",
"mean",
"median",
"]",
"fill_mask",
"=",
"True",
":",
"replaced",
"masked",
"areas",
"with",
"filtered",
"results",
"fill_mask",
"=",
"False",
":",
"masked",
"areas",
"are",
"ignored"
] | 7c5a28718f81c01a430152c60a686ac50afbfd7c | https://github.com/radjkarl/imgProcessor/blob/7c5a28718f81c01a430152c60a686ac50afbfd7c/imgProcessor/filters/maskedFilter.py#L12-L37 | train |
radjkarl/imgProcessor | imgProcessor/camera/flatField/vignettingFromDifferentObjects.py | vignettingFromDifferentObjects | def vignettingFromDifferentObjects(imgs, bg):
'''
Extract vignetting from a set of images
containing different devices
The devices spatial inhomogeneities are averaged
This method is referred as 'Method C' in
---
K.Bedrich, M.Bokalic et al.:
ELECTROLUMINESCENCE IMAGING OF PV DEVICES:
ADVANCED FLAT FIELD CALIBRATION,2017
---
'''
f = FlatFieldFromImgFit(imgs, bg)
return f.result, f.mask | python | def vignettingFromDifferentObjects(imgs, bg):
'''
Extract vignetting from a set of images
containing different devices
The devices spatial inhomogeneities are averaged
This method is referred as 'Method C' in
---
K.Bedrich, M.Bokalic et al.:
ELECTROLUMINESCENCE IMAGING OF PV DEVICES:
ADVANCED FLAT FIELD CALIBRATION,2017
---
'''
f = FlatFieldFromImgFit(imgs, bg)
return f.result, f.mask | [
"def",
"vignettingFromDifferentObjects",
"(",
"imgs",
",",
"bg",
")",
":",
"f",
"=",
"FlatFieldFromImgFit",
"(",
"imgs",
",",
"bg",
")",
"return",
"f",
".",
"result",
",",
"f",
".",
"mask"
] | Extract vignetting from a set of images
containing different devices
The devices spatial inhomogeneities are averaged
This method is referred as 'Method C' in
---
K.Bedrich, M.Bokalic et al.:
ELECTROLUMINESCENCE IMAGING OF PV DEVICES:
ADVANCED FLAT FIELD CALIBRATION,2017
--- | [
"Extract",
"vignetting",
"from",
"a",
"set",
"of",
"images",
"containing",
"different",
"devices",
"The",
"devices",
"spatial",
"inhomogeneities",
"are",
"averaged",
"This",
"method",
"is",
"referred",
"as",
"Method",
"C",
"in",
"---",
"K",
".",
"Bedrich",
"M"... | 7c5a28718f81c01a430152c60a686ac50afbfd7c | https://github.com/radjkarl/imgProcessor/blob/7c5a28718f81c01a430152c60a686ac50afbfd7c/imgProcessor/camera/flatField/vignettingFromDifferentObjects.py#L129-L144 | train |
radjkarl/imgProcessor | imgProcessor/measure/SNR/SNR_IEC.py | SNR_IEC | def SNR_IEC(i1, i2, ibg=0, allow_color_images=False):
'''
Calculate the averaged signal-to-noise ratio SNR50
as defined by IEC NP 60904-13
needs 2 reference EL images and one background image
'''
# ensure images are type float64 (double precision):
i1 = np.asfarray(i1)
i2 = np.asfarray(i2)
if ibg is not 0:
ibg = np.asfarray(ibg)
assert i1.shape == ibg.shape, 'all input images need to have the same resolution'
assert i1.shape == i2.shape, 'all input images need to have the same resolution'
if not allow_color_images:
assert i1.ndim == 2, 'Images need to be in grayscale according to the IEC standard'
# SNR calculation as defined in 'IEC TS 60904-13':
signal = 0.5 * (i1 + i2) - ibg
noise = 0.5**0.5 * np.abs(i1 - i2) * ((2 / np.pi)**-0.5)
if signal.ndim == 3: # color
signal = np.average(signal, axis=2, weights=(0.114, 0.587, 0.299))
noise = np.average(noise, axis=2, weights=(0.114, 0.587, 0.299))
signal = signal.sum()
noise = noise.sum()
return signal / noise | python | def SNR_IEC(i1, i2, ibg=0, allow_color_images=False):
'''
Calculate the averaged signal-to-noise ratio SNR50
as defined by IEC NP 60904-13
needs 2 reference EL images and one background image
'''
# ensure images are type float64 (double precision):
i1 = np.asfarray(i1)
i2 = np.asfarray(i2)
if ibg is not 0:
ibg = np.asfarray(ibg)
assert i1.shape == ibg.shape, 'all input images need to have the same resolution'
assert i1.shape == i2.shape, 'all input images need to have the same resolution'
if not allow_color_images:
assert i1.ndim == 2, 'Images need to be in grayscale according to the IEC standard'
# SNR calculation as defined in 'IEC TS 60904-13':
signal = 0.5 * (i1 + i2) - ibg
noise = 0.5**0.5 * np.abs(i1 - i2) * ((2 / np.pi)**-0.5)
if signal.ndim == 3: # color
signal = np.average(signal, axis=2, weights=(0.114, 0.587, 0.299))
noise = np.average(noise, axis=2, weights=(0.114, 0.587, 0.299))
signal = signal.sum()
noise = noise.sum()
return signal / noise | [
"def",
"SNR_IEC",
"(",
"i1",
",",
"i2",
",",
"ibg",
"=",
"0",
",",
"allow_color_images",
"=",
"False",
")",
":",
"# ensure images are type float64 (double precision):\r",
"i1",
"=",
"np",
".",
"asfarray",
"(",
"i1",
")",
"i2",
"=",
"np",
".",
"asfarray",
"... | Calculate the averaged signal-to-noise ratio SNR50
as defined by IEC NP 60904-13
needs 2 reference EL images and one background image | [
"Calculate",
"the",
"averaged",
"signal",
"-",
"to",
"-",
"noise",
"ratio",
"SNR50",
"as",
"defined",
"by",
"IEC",
"NP",
"60904",
"-",
"13",
"needs",
"2",
"reference",
"EL",
"images",
"and",
"one",
"background",
"image"
] | 7c5a28718f81c01a430152c60a686ac50afbfd7c | https://github.com/radjkarl/imgProcessor/blob/7c5a28718f81c01a430152c60a686ac50afbfd7c/imgProcessor/measure/SNR/SNR_IEC.py#L7-L33 | train |
radjkarl/imgProcessor | imgProcessor/transform/StitchImages.py | StitchImages._rotate | def _rotate(img, angle):
'''
angle [DEG]
'''
s = img.shape
if angle == 0:
return img
else:
M = cv2.getRotationMatrix2D((s[1] // 2,
s[0] // 2), angle, 1)
return cv2.warpAffine(img, M, (s[1], s[0])) | python | def _rotate(img, angle):
'''
angle [DEG]
'''
s = img.shape
if angle == 0:
return img
else:
M = cv2.getRotationMatrix2D((s[1] // 2,
s[0] // 2), angle, 1)
return cv2.warpAffine(img, M, (s[1], s[0])) | [
"def",
"_rotate",
"(",
"img",
",",
"angle",
")",
":",
"s",
"=",
"img",
".",
"shape",
"if",
"angle",
"==",
"0",
":",
"return",
"img",
"else",
":",
"M",
"=",
"cv2",
".",
"getRotationMatrix2D",
"(",
"(",
"s",
"[",
"1",
"]",
"//",
"2",
",",
"s",
... | angle [DEG] | [
"angle",
"[",
"DEG",
"]"
] | 7c5a28718f81c01a430152c60a686ac50afbfd7c | https://github.com/radjkarl/imgProcessor/blob/7c5a28718f81c01a430152c60a686ac50afbfd7c/imgProcessor/transform/StitchImages.py#L83-L93 | train |
radjkarl/imgProcessor | imgProcessor/transform/StitchImages.py | StitchImages._findOverlap | def _findOverlap(self, img_rgb, overlap, overlapDeviation,
rotation, rotationDeviation):
'''
return offset(x,y) which fit best self._base_img
through template matching
'''
# get gray images
if len(img_rgb.shape) != len(img_rgb.shape):
raise Exception(
'number of channels(colors) for both images different')
if overlapDeviation == 0 and rotationDeviation == 0:
return (0, overlap, rotation)
s = self.base_img_rgb.shape
ho = int(round(overlap * 0.5))
overlap = int(round(overlap))
# create two image cuts to compare:
imgcut = self.base_img_rgb[s[0] - overlapDeviation - overlap:, :]
template = img_rgb[:overlap, ho:s[1] - ho]
def fn(angle):
rotTempl = self._rotate(template, angle)
# Apply template Matching
fn.res = cv2.matchTemplate(rotTempl.astype(np.float32),
imgcut.astype(np.float32),
cv2.TM_CCORR_NORMED)
return 1 / fn.res.mean()
if rotationDeviation == 0:
angle = rotation
fn(rotation)
else:
# find best rotation angle:
angle = brent(fn, brack=(rotation - rotationDeviation,
rotation + rotationDeviation))
loc = cv2.minMaxLoc(fn.res)[-1]
offsx = int(round(loc[0] - ho))
offsy = overlapDeviation + overlap - loc[1]
return offsx, offsy, angle | python | def _findOverlap(self, img_rgb, overlap, overlapDeviation,
rotation, rotationDeviation):
'''
return offset(x,y) which fit best self._base_img
through template matching
'''
# get gray images
if len(img_rgb.shape) != len(img_rgb.shape):
raise Exception(
'number of channels(colors) for both images different')
if overlapDeviation == 0 and rotationDeviation == 0:
return (0, overlap, rotation)
s = self.base_img_rgb.shape
ho = int(round(overlap * 0.5))
overlap = int(round(overlap))
# create two image cuts to compare:
imgcut = self.base_img_rgb[s[0] - overlapDeviation - overlap:, :]
template = img_rgb[:overlap, ho:s[1] - ho]
def fn(angle):
rotTempl = self._rotate(template, angle)
# Apply template Matching
fn.res = cv2.matchTemplate(rotTempl.astype(np.float32),
imgcut.astype(np.float32),
cv2.TM_CCORR_NORMED)
return 1 / fn.res.mean()
if rotationDeviation == 0:
angle = rotation
fn(rotation)
else:
# find best rotation angle:
angle = brent(fn, brack=(rotation - rotationDeviation,
rotation + rotationDeviation))
loc = cv2.minMaxLoc(fn.res)[-1]
offsx = int(round(loc[0] - ho))
offsy = overlapDeviation + overlap - loc[1]
return offsx, offsy, angle | [
"def",
"_findOverlap",
"(",
"self",
",",
"img_rgb",
",",
"overlap",
",",
"overlapDeviation",
",",
"rotation",
",",
"rotationDeviation",
")",
":",
"# get gray images\r",
"if",
"len",
"(",
"img_rgb",
".",
"shape",
")",
"!=",
"len",
"(",
"img_rgb",
".",
"shape"... | return offset(x,y) which fit best self._base_img
through template matching | [
"return",
"offset",
"(",
"x",
"y",
")",
"which",
"fit",
"best",
"self",
".",
"_base_img",
"through",
"template",
"matching"
] | 7c5a28718f81c01a430152c60a686ac50afbfd7c | https://github.com/radjkarl/imgProcessor/blob/7c5a28718f81c01a430152c60a686ac50afbfd7c/imgProcessor/transform/StitchImages.py#L95-L136 | train |
radjkarl/imgProcessor | imgProcessor/camera/NoiseLevelFunction.py | estimateFromImages | def estimateFromImages(imgs1, imgs2=None, mn_mx=None, nbins=100):
'''
estimate the noise level function as stDev over image intensity
from a set of 2 image groups
images at the same position have to show
the identical setup, so
imgs1[i] - imgs2[i] = noise
'''
if imgs2 is None:
imgs2 = [None] * len(imgs1)
else:
assert len(imgs1) == len(imgs2)
y_vals = np.empty((len(imgs1), nbins))
w_vals = np.zeros((len(imgs1), nbins))
if mn_mx is None:
print('estimating min and max image value')
mn = 1e6
mx = -1e6
# get min and max image value checking all first images:
for n, i1 in enumerate(imgs1):
print('%s/%s' % (n + 1, len(imgs1)))
i1 = imread(i1)
mmn, mmx = _getMinMax(i1)
mn = min(mn, mmn)
mx = mx = max(mx, mmx)
print('--> min(%s), max(%s)' % (mn, mx))
else:
mn, mx = mn_mx
x = None
print('get noise level function')
for n, (i1, i2) in enumerate(zip(imgs1, imgs2)):
print('%s/%s' % (n + 1, len(imgs1)))
i1 = imread(i1)
if i2 is not None:
i2 = imread(i2)
x, y, weights, _ = calcNLF(i1, i2, mn_mx_nbins=(mn, mx, nbins), x=x)
y_vals[n] = y
w_vals[n] = weights
# filter empty places:
filledPos = np.sum(w_vals, axis=0) != 0
w_vals = w_vals[:, filledPos]
y_vals = y_vals[:, filledPos]
x = x[filledPos]
y_avg = np.average(np.nan_to_num(y_vals),
weights=w_vals,
axis=0)
w_vals = np.sum(w_vals, axis=0)
w_vals /= w_vals.sum()
fitParams, fn, i = _evaluate(x, y_avg, w_vals)
return x, fn, y_avg, y_vals, w_vals, fitParams, i | python | def estimateFromImages(imgs1, imgs2=None, mn_mx=None, nbins=100):
'''
estimate the noise level function as stDev over image intensity
from a set of 2 image groups
images at the same position have to show
the identical setup, so
imgs1[i] - imgs2[i] = noise
'''
if imgs2 is None:
imgs2 = [None] * len(imgs1)
else:
assert len(imgs1) == len(imgs2)
y_vals = np.empty((len(imgs1), nbins))
w_vals = np.zeros((len(imgs1), nbins))
if mn_mx is None:
print('estimating min and max image value')
mn = 1e6
mx = -1e6
# get min and max image value checking all first images:
for n, i1 in enumerate(imgs1):
print('%s/%s' % (n + 1, len(imgs1)))
i1 = imread(i1)
mmn, mmx = _getMinMax(i1)
mn = min(mn, mmn)
mx = mx = max(mx, mmx)
print('--> min(%s), max(%s)' % (mn, mx))
else:
mn, mx = mn_mx
x = None
print('get noise level function')
for n, (i1, i2) in enumerate(zip(imgs1, imgs2)):
print('%s/%s' % (n + 1, len(imgs1)))
i1 = imread(i1)
if i2 is not None:
i2 = imread(i2)
x, y, weights, _ = calcNLF(i1, i2, mn_mx_nbins=(mn, mx, nbins), x=x)
y_vals[n] = y
w_vals[n] = weights
# filter empty places:
filledPos = np.sum(w_vals, axis=0) != 0
w_vals = w_vals[:, filledPos]
y_vals = y_vals[:, filledPos]
x = x[filledPos]
y_avg = np.average(np.nan_to_num(y_vals),
weights=w_vals,
axis=0)
w_vals = np.sum(w_vals, axis=0)
w_vals /= w_vals.sum()
fitParams, fn, i = _evaluate(x, y_avg, w_vals)
return x, fn, y_avg, y_vals, w_vals, fitParams, i | [
"def",
"estimateFromImages",
"(",
"imgs1",
",",
"imgs2",
"=",
"None",
",",
"mn_mx",
"=",
"None",
",",
"nbins",
"=",
"100",
")",
":",
"if",
"imgs2",
"is",
"None",
":",
"imgs2",
"=",
"[",
"None",
"]",
"*",
"len",
"(",
"imgs1",
")",
"else",
":",
"as... | estimate the noise level function as stDev over image intensity
from a set of 2 image groups
images at the same position have to show
the identical setup, so
imgs1[i] - imgs2[i] = noise | [
"estimate",
"the",
"noise",
"level",
"function",
"as",
"stDev",
"over",
"image",
"intensity",
"from",
"a",
"set",
"of",
"2",
"image",
"groups",
"images",
"at",
"the",
"same",
"position",
"have",
"to",
"show",
"the",
"identical",
"setup",
"so",
"imgs1",
"["... | 7c5a28718f81c01a430152c60a686ac50afbfd7c | https://github.com/radjkarl/imgProcessor/blob/7c5a28718f81c01a430152c60a686ac50afbfd7c/imgProcessor/camera/NoiseLevelFunction.py#L11-L68 | train |
radjkarl/imgProcessor | imgProcessor/camera/NoiseLevelFunction.py | _evaluate | def _evaluate(x, y, weights):
'''
get the parameters of the, needed by 'function'
through curve fitting
'''
i = _validI(x, y, weights)
xx = x[i]
y = y[i]
try:
fitParams = _fit(xx, y)
# bound noise fn to min defined y value:
minY = function(xx[0], *fitParams)
fitParams = np.insert(fitParams, 0, minY)
fn = lambda x, minY=minY: boundedFunction(x, *fitParams)
except RuntimeError:
print(
"couldn't fit noise function with filtered indices, use polynomial fit instead")
fitParams = None
fn = smooth(xx, y, weights[i])
return fitParams, fn, i | python | def _evaluate(x, y, weights):
'''
get the parameters of the, needed by 'function'
through curve fitting
'''
i = _validI(x, y, weights)
xx = x[i]
y = y[i]
try:
fitParams = _fit(xx, y)
# bound noise fn to min defined y value:
minY = function(xx[0], *fitParams)
fitParams = np.insert(fitParams, 0, minY)
fn = lambda x, minY=minY: boundedFunction(x, *fitParams)
except RuntimeError:
print(
"couldn't fit noise function with filtered indices, use polynomial fit instead")
fitParams = None
fn = smooth(xx, y, weights[i])
return fitParams, fn, i | [
"def",
"_evaluate",
"(",
"x",
",",
"y",
",",
"weights",
")",
":",
"i",
"=",
"_validI",
"(",
"x",
",",
"y",
",",
"weights",
")",
"xx",
"=",
"x",
"[",
"i",
"]",
"y",
"=",
"y",
"[",
"i",
"]",
"try",
":",
"fitParams",
"=",
"_fit",
"(",
"xx",
... | get the parameters of the, needed by 'function'
through curve fitting | [
"get",
"the",
"parameters",
"of",
"the",
"needed",
"by",
"function",
"through",
"curve",
"fitting"
] | 7c5a28718f81c01a430152c60a686ac50afbfd7c | https://github.com/radjkarl/imgProcessor/blob/7c5a28718f81c01a430152c60a686ac50afbfd7c/imgProcessor/camera/NoiseLevelFunction.py#L71-L91 | train |
radjkarl/imgProcessor | imgProcessor/camera/NoiseLevelFunction.py | boundedFunction | def boundedFunction(x, minY, ax, ay):
'''
limit [function] to a minimum y value
'''
y = function(x, ax, ay)
return np.maximum(np.nan_to_num(y), minY) | python | def boundedFunction(x, minY, ax, ay):
'''
limit [function] to a minimum y value
'''
y = function(x, ax, ay)
return np.maximum(np.nan_to_num(y), minY) | [
"def",
"boundedFunction",
"(",
"x",
",",
"minY",
",",
"ax",
",",
"ay",
")",
":",
"y",
"=",
"function",
"(",
"x",
",",
"ax",
",",
"ay",
")",
"return",
"np",
".",
"maximum",
"(",
"np",
".",
"nan_to_num",
"(",
"y",
")",
",",
"minY",
")"
] | limit [function] to a minimum y value | [
"limit",
"[",
"function",
"]",
"to",
"a",
"minimum",
"y",
"value"
] | 7c5a28718f81c01a430152c60a686ac50afbfd7c | https://github.com/radjkarl/imgProcessor/blob/7c5a28718f81c01a430152c60a686ac50afbfd7c/imgProcessor/camera/NoiseLevelFunction.py#L94-L99 | train |
radjkarl/imgProcessor | imgProcessor/camera/NoiseLevelFunction.py | function | def function(x, ax, ay):
'''
general square root function
'''
with np.errstate(invalid='ignore'):
return ay * (x - ax)**0.5 | python | def function(x, ax, ay):
'''
general square root function
'''
with np.errstate(invalid='ignore'):
return ay * (x - ax)**0.5 | [
"def",
"function",
"(",
"x",
",",
"ax",
",",
"ay",
")",
":",
"with",
"np",
".",
"errstate",
"(",
"invalid",
"=",
"'ignore'",
")",
":",
"return",
"ay",
"*",
"(",
"x",
"-",
"ax",
")",
"**",
"0.5"
] | general square root function | [
"general",
"square",
"root",
"function"
] | 7c5a28718f81c01a430152c60a686ac50afbfd7c | https://github.com/radjkarl/imgProcessor/blob/7c5a28718f81c01a430152c60a686ac50afbfd7c/imgProcessor/camera/NoiseLevelFunction.py#L102-L107 | train |
radjkarl/imgProcessor | imgProcessor/camera/NoiseLevelFunction.py | _validI | def _validI(x, y, weights):
'''
return indices that have enough data points and are not erroneous
'''
# density filter:
i = np.logical_and(np.isfinite(y), weights > np.median(weights))
# filter outliers:
try:
grad = np.abs(np.gradient(y[i]))
max_gradient = 4 * np.median(grad)
i[i][grad > max_gradient] = False
except (IndexError, ValueError):
pass
return i | python | def _validI(x, y, weights):
'''
return indices that have enough data points and are not erroneous
'''
# density filter:
i = np.logical_and(np.isfinite(y), weights > np.median(weights))
# filter outliers:
try:
grad = np.abs(np.gradient(y[i]))
max_gradient = 4 * np.median(grad)
i[i][grad > max_gradient] = False
except (IndexError, ValueError):
pass
return i | [
"def",
"_validI",
"(",
"x",
",",
"y",
",",
"weights",
")",
":",
"# density filter:\r",
"i",
"=",
"np",
".",
"logical_and",
"(",
"np",
".",
"isfinite",
"(",
"y",
")",
",",
"weights",
">",
"np",
".",
"median",
"(",
"weights",
")",
")",
"# filter outlie... | return indices that have enough data points and are not erroneous | [
"return",
"indices",
"that",
"have",
"enough",
"data",
"points",
"and",
"are",
"not",
"erroneous"
] | 7c5a28718f81c01a430152c60a686ac50afbfd7c | https://github.com/radjkarl/imgProcessor/blob/7c5a28718f81c01a430152c60a686ac50afbfd7c/imgProcessor/camera/NoiseLevelFunction.py#L110-L123 | train |
radjkarl/imgProcessor | imgProcessor/camera/NoiseLevelFunction.py | smooth | def smooth(x, y, weights):
'''
in case the NLF cannot be described by
a square root function
commit bounded polynomial interpolation
'''
# Spline hard to smooth properly, therefore solfed with
# bounded polynomal interpolation
# ext=3: no extrapolation, but boundary value
# return UnivariateSpline(x, y, w=weights,
# s=len(y)*weights.max()*100, ext=3)
# return np.poly1d(np.polyfit(x,y,w=weights,deg=2))
p = np.polyfit(x, y, w=weights, deg=2)
if np.any(np.isnan(p)):
# couldn't even do polynomial fit
# as last option: assume constant noise
my = np.average(y, weights=weights)
return lambda x: my
return lambda xint: np.poly1d(p)(np.clip(xint, x[0], x[-1])) | python | def smooth(x, y, weights):
'''
in case the NLF cannot be described by
a square root function
commit bounded polynomial interpolation
'''
# Spline hard to smooth properly, therefore solfed with
# bounded polynomal interpolation
# ext=3: no extrapolation, but boundary value
# return UnivariateSpline(x, y, w=weights,
# s=len(y)*weights.max()*100, ext=3)
# return np.poly1d(np.polyfit(x,y,w=weights,deg=2))
p = np.polyfit(x, y, w=weights, deg=2)
if np.any(np.isnan(p)):
# couldn't even do polynomial fit
# as last option: assume constant noise
my = np.average(y, weights=weights)
return lambda x: my
return lambda xint: np.poly1d(p)(np.clip(xint, x[0], x[-1])) | [
"def",
"smooth",
"(",
"x",
",",
"y",
",",
"weights",
")",
":",
"# Spline hard to smooth properly, therefore solfed with\r",
"# bounded polynomal interpolation\r",
"# ext=3: no extrapolation, but boundary value\r",
"# return UnivariateSpline(x, y, w=weights,\r",
"# ... | in case the NLF cannot be described by
a square root function
commit bounded polynomial interpolation | [
"in",
"case",
"the",
"NLF",
"cannot",
"be",
"described",
"by",
"a",
"square",
"root",
"function",
"commit",
"bounded",
"polynomial",
"interpolation"
] | 7c5a28718f81c01a430152c60a686ac50afbfd7c | https://github.com/radjkarl/imgProcessor/blob/7c5a28718f81c01a430152c60a686ac50afbfd7c/imgProcessor/camera/NoiseLevelFunction.py#L131-L150 | train |
radjkarl/imgProcessor | imgProcessor/camera/NoiseLevelFunction.py | oneImageNLF | def oneImageNLF(img, img2=None, signal=None):
'''
Estimate the NLF from one or two images of the same kind
'''
x, y, weights, signal = calcNLF(img, img2, signal)
_, fn, _ = _evaluate(x, y, weights)
return fn, signal | python | def oneImageNLF(img, img2=None, signal=None):
'''
Estimate the NLF from one or two images of the same kind
'''
x, y, weights, signal = calcNLF(img, img2, signal)
_, fn, _ = _evaluate(x, y, weights)
return fn, signal | [
"def",
"oneImageNLF",
"(",
"img",
",",
"img2",
"=",
"None",
",",
"signal",
"=",
"None",
")",
":",
"x",
",",
"y",
",",
"weights",
",",
"signal",
"=",
"calcNLF",
"(",
"img",
",",
"img2",
",",
"signal",
")",
"_",
",",
"fn",
",",
"_",
"=",
"_evalua... | Estimate the NLF from one or two images of the same kind | [
"Estimate",
"the",
"NLF",
"from",
"one",
"or",
"two",
"images",
"of",
"the",
"same",
"kind"
] | 7c5a28718f81c01a430152c60a686ac50afbfd7c | https://github.com/radjkarl/imgProcessor/blob/7c5a28718f81c01a430152c60a686ac50afbfd7c/imgProcessor/camera/NoiseLevelFunction.py#L153-L159 | train |
radjkarl/imgProcessor | imgProcessor/camera/NoiseLevelFunction.py | _getMinMax | def _getMinMax(img):
'''
Get the a range of image intensities
that most pixels are in with
'''
av = np.mean(img)
std = np.std(img)
# define range for segmentation:
mn = av - 3 * std
mx = av + 3 * std
return max(img.min(), mn, 0), min(img.max(), mx) | python | def _getMinMax(img):
'''
Get the a range of image intensities
that most pixels are in with
'''
av = np.mean(img)
std = np.std(img)
# define range for segmentation:
mn = av - 3 * std
mx = av + 3 * std
return max(img.min(), mn, 0), min(img.max(), mx) | [
"def",
"_getMinMax",
"(",
"img",
")",
":",
"av",
"=",
"np",
".",
"mean",
"(",
"img",
")",
"std",
"=",
"np",
".",
"std",
"(",
"img",
")",
"# define range for segmentation:\r",
"mn",
"=",
"av",
"-",
"3",
"*",
"std",
"mx",
"=",
"av",
"+",
"3",
"*",
... | Get the a range of image intensities
that most pixels are in with | [
"Get",
"the",
"a",
"range",
"of",
"image",
"intensities",
"that",
"most",
"pixels",
"are",
"in",
"with"
] | 7c5a28718f81c01a430152c60a686ac50afbfd7c | https://github.com/radjkarl/imgProcessor/blob/7c5a28718f81c01a430152c60a686ac50afbfd7c/imgProcessor/camera/NoiseLevelFunction.py#L162-L173 | train |
radjkarl/imgProcessor | imgProcessor/camera/NoiseLevelFunction.py | calcNLF | def calcNLF(img, img2=None, signal=None, mn_mx_nbins=None, x=None,
averageFn='AAD',
signalFromMultipleImages=False):
'''
Calculate the noise level function (NLF) as f(intensity)
using one or two image.
The approach for this work is published in JPV##########
img2 - 2nd image taken under same conditions
used to estimate noise via image difference
signalFromMultipleImages - whether the signal is an average of multiple
images and not just got from one median filtered image
'''
# CONSTANTS:
# factor Root mead square to average-absolute-difference:
F_RMS2AAD = (2 / np.pi)**-0.5
F_NOISE_WITH_MEDIAN = 1 + (1 / 3**2)
N_BINS = 100
MEDIAN_KERNEL_SIZE = 3
def _averageAbsoluteDeviation(d):
return np.mean(np.abs(d)) * F_RMS2AAD
def _rootMeanSquare(d):
return (d**2).mean()**0.5
if averageFn == 'AAD':
averageFn = _averageAbsoluteDeviation
else:
averageFn = _rootMeanSquare
img = np.asfarray(img)
if img2 is None:
if signal is None:
signal = median_filter(img, MEDIAN_KERNEL_SIZE)
if signalFromMultipleImages:
diff = img - signal
else:
# difference between the filtered and original image:
diff = (img - signal) * F_NOISE_WITH_MEDIAN
else:
img2 = np.asfarray(img2)
diff = (img - img2)
# 2**0.5 because noise is subtracted by noise
# and variance of sum = sum of variance:
# var(immg1-img2)~2*var(img)
# std(2*var) = 2**0.5*var**0.5
diff /= 2**0.5
if signal is None:
signal = median_filter(0.5 * (img + img2), MEDIAN_KERNEL_SIZE)
if mn_mx_nbins is not None:
mn, mx, nbins = mn_mx_nbins
min_len = 0
else:
mn, mx = _getMinMax(signal)
s = img.shape
min_len = int(s[0] * s[1] * 1e-3)
if min_len < 1:
min_len = 5
# number of bins/different intensity ranges to analyse:
nbins = N_BINS
if mx - mn < nbins:
nbins = int(mx - mn)
# bin width:
step = (mx - mn) / nbins
# empty arrays:
y = np.empty(shape=nbins)
set_x = False
if x is None:
set_x = True
x = np.empty(shape=nbins)
# give bins with more samples more weight:
weights = np.zeros(shape=nbins)
# cur step:
m = mn
for n in range(nbins):
# get indices of all pixel with in a bin:
ind = np.logical_and(signal >= m, signal <= m + step)
m += step
d = diff[ind]
ld = len(d)
if ld >= min_len:
weights[n] = ld
# average absolute deviation (AAD),
# scaled to RMS:
y[n] = averageFn(d)
if set_x:
x[n] = m - 0.5 * step
return x, y, weights, signal | python | def calcNLF(img, img2=None, signal=None, mn_mx_nbins=None, x=None,
averageFn='AAD',
signalFromMultipleImages=False):
'''
Calculate the noise level function (NLF) as f(intensity)
using one or two image.
The approach for this work is published in JPV##########
img2 - 2nd image taken under same conditions
used to estimate noise via image difference
signalFromMultipleImages - whether the signal is an average of multiple
images and not just got from one median filtered image
'''
# CONSTANTS:
# factor Root mead square to average-absolute-difference:
F_RMS2AAD = (2 / np.pi)**-0.5
F_NOISE_WITH_MEDIAN = 1 + (1 / 3**2)
N_BINS = 100
MEDIAN_KERNEL_SIZE = 3
def _averageAbsoluteDeviation(d):
return np.mean(np.abs(d)) * F_RMS2AAD
def _rootMeanSquare(d):
return (d**2).mean()**0.5
if averageFn == 'AAD':
averageFn = _averageAbsoluteDeviation
else:
averageFn = _rootMeanSquare
img = np.asfarray(img)
if img2 is None:
if signal is None:
signal = median_filter(img, MEDIAN_KERNEL_SIZE)
if signalFromMultipleImages:
diff = img - signal
else:
# difference between the filtered and original image:
diff = (img - signal) * F_NOISE_WITH_MEDIAN
else:
img2 = np.asfarray(img2)
diff = (img - img2)
# 2**0.5 because noise is subtracted by noise
# and variance of sum = sum of variance:
# var(immg1-img2)~2*var(img)
# std(2*var) = 2**0.5*var**0.5
diff /= 2**0.5
if signal is None:
signal = median_filter(0.5 * (img + img2), MEDIAN_KERNEL_SIZE)
if mn_mx_nbins is not None:
mn, mx, nbins = mn_mx_nbins
min_len = 0
else:
mn, mx = _getMinMax(signal)
s = img.shape
min_len = int(s[0] * s[1] * 1e-3)
if min_len < 1:
min_len = 5
# number of bins/different intensity ranges to analyse:
nbins = N_BINS
if mx - mn < nbins:
nbins = int(mx - mn)
# bin width:
step = (mx - mn) / nbins
# empty arrays:
y = np.empty(shape=nbins)
set_x = False
if x is None:
set_x = True
x = np.empty(shape=nbins)
# give bins with more samples more weight:
weights = np.zeros(shape=nbins)
# cur step:
m = mn
for n in range(nbins):
# get indices of all pixel with in a bin:
ind = np.logical_and(signal >= m, signal <= m + step)
m += step
d = diff[ind]
ld = len(d)
if ld >= min_len:
weights[n] = ld
# average absolute deviation (AAD),
# scaled to RMS:
y[n] = averageFn(d)
if set_x:
x[n] = m - 0.5 * step
return x, y, weights, signal | [
"def",
"calcNLF",
"(",
"img",
",",
"img2",
"=",
"None",
",",
"signal",
"=",
"None",
",",
"mn_mx_nbins",
"=",
"None",
",",
"x",
"=",
"None",
",",
"averageFn",
"=",
"'AAD'",
",",
"signalFromMultipleImages",
"=",
"False",
")",
":",
"# CONSTANTS:\r",
"# fact... | Calculate the noise level function (NLF) as f(intensity)
using one or two image.
The approach for this work is published in JPV##########
img2 - 2nd image taken under same conditions
used to estimate noise via image difference
signalFromMultipleImages - whether the signal is an average of multiple
images and not just got from one median filtered image | [
"Calculate",
"the",
"noise",
"level",
"function",
"(",
"NLF",
")",
"as",
"f",
"(",
"intensity",
")",
"using",
"one",
"or",
"two",
"image",
".",
"The",
"approach",
"for",
"this",
"work",
"is",
"published",
"in",
"JPV##########",
"img2",
"-",
"2nd",
"image... | 7c5a28718f81c01a430152c60a686ac50afbfd7c | https://github.com/radjkarl/imgProcessor/blob/7c5a28718f81c01a430152c60a686ac50afbfd7c/imgProcessor/camera/NoiseLevelFunction.py#L176-L271 | train |
radjkarl/imgProcessor | imgProcessor/interpolate/polyfit2d.py | polyfit2d | def polyfit2d(x, y, z, order=3 #bounds=None
):
'''
fit unstructured data
'''
ncols = (order + 1)**2
G = np.zeros((x.size, ncols))
ij = itertools.product(list(range(order+1)), list(range(order+1)))
for k, (i,j) in enumerate(ij):
G[:,k] = x**i * y**j
m = np.linalg.lstsq(G, z)[0]
return m | python | def polyfit2d(x, y, z, order=3 #bounds=None
):
'''
fit unstructured data
'''
ncols = (order + 1)**2
G = np.zeros((x.size, ncols))
ij = itertools.product(list(range(order+1)), list(range(order+1)))
for k, (i,j) in enumerate(ij):
G[:,k] = x**i * y**j
m = np.linalg.lstsq(G, z)[0]
return m | [
"def",
"polyfit2d",
"(",
"x",
",",
"y",
",",
"z",
",",
"order",
"=",
"3",
"#bounds=None\r",
")",
":",
"ncols",
"=",
"(",
"order",
"+",
"1",
")",
"**",
"2",
"G",
"=",
"np",
".",
"zeros",
"(",
"(",
"x",
".",
"size",
",",
"ncols",
")",
")",
"i... | fit unstructured data | [
"fit",
"unstructured",
"data"
] | 7c5a28718f81c01a430152c60a686ac50afbfd7c | https://github.com/radjkarl/imgProcessor/blob/7c5a28718f81c01a430152c60a686ac50afbfd7c/imgProcessor/interpolate/polyfit2d.py#L8-L19 | train |
radjkarl/imgProcessor | imgProcessor/interpolate/polyfit2d.py | polyfit2dGrid | def polyfit2dGrid(arr, mask=None, order=3, replace_all=False,
copy=True, outgrid=None):
'''
replace all masked values with polynomial fitted ones
'''
s0,s1 = arr.shape
if mask is None:
if outgrid is None:
y,x = np.mgrid[:float(s0),:float(s1)]
p = polyfit2d(x.flatten(),y.flatten(),arr.flatten(),order)
return polyval2d(x,y, p, dtype=arr.dtype)
mask = np.zeros_like(arr, dtype=bool)
elif mask.sum() == 0 and not replace_all and outgrid is None:
return arr
valid = ~mask
y,x = np.where(valid)
z = arr[valid]
p = polyfit2d(x,y,z,order)
if outgrid is not None:
yy,xx = outgrid
else:
if replace_all:
yy,xx = np.mgrid[:float(s0),:float(s1)]
else:
yy,xx = np.where(mask)
new = polyval2d(xx,yy, p, dtype=arr.dtype)
if outgrid is not None or replace_all:
return new
if copy:
arr = arr.copy()
arr[mask] = new
return arr | python | def polyfit2dGrid(arr, mask=None, order=3, replace_all=False,
copy=True, outgrid=None):
'''
replace all masked values with polynomial fitted ones
'''
s0,s1 = arr.shape
if mask is None:
if outgrid is None:
y,x = np.mgrid[:float(s0),:float(s1)]
p = polyfit2d(x.flatten(),y.flatten(),arr.flatten(),order)
return polyval2d(x,y, p, dtype=arr.dtype)
mask = np.zeros_like(arr, dtype=bool)
elif mask.sum() == 0 and not replace_all and outgrid is None:
return arr
valid = ~mask
y,x = np.where(valid)
z = arr[valid]
p = polyfit2d(x,y,z,order)
if outgrid is not None:
yy,xx = outgrid
else:
if replace_all:
yy,xx = np.mgrid[:float(s0),:float(s1)]
else:
yy,xx = np.where(mask)
new = polyval2d(xx,yy, p, dtype=arr.dtype)
if outgrid is not None or replace_all:
return new
if copy:
arr = arr.copy()
arr[mask] = new
return arr | [
"def",
"polyfit2dGrid",
"(",
"arr",
",",
"mask",
"=",
"None",
",",
"order",
"=",
"3",
",",
"replace_all",
"=",
"False",
",",
"copy",
"=",
"True",
",",
"outgrid",
"=",
"None",
")",
":",
"s0",
",",
"s1",
"=",
"arr",
".",
"shape",
"if",
"mask",
"is"... | replace all masked values with polynomial fitted ones | [
"replace",
"all",
"masked",
"values",
"with",
"polynomial",
"fitted",
"ones"
] | 7c5a28718f81c01a430152c60a686ac50afbfd7c | https://github.com/radjkarl/imgProcessor/blob/7c5a28718f81c01a430152c60a686ac50afbfd7c/imgProcessor/interpolate/polyfit2d.py#L31-L65 | train |
radjkarl/imgProcessor | imgProcessor/features/minimumLineInArray.py | minimumLineInArray | def minimumLineInArray(arr, relative=False, f=0,
refinePosition=True,
max_pos=100,
return_pos_arr=False,
# order=2
):
'''
find closest minimum position next to middle line
relative: return position relative to middle line
f: relative decrease (0...1) - setting this value close to one will
discriminate positions further away from the center
##order: 2 for cubic refinement
'''
s0, s1 = arr.shape[:2]
if max_pos >= s1:
x = np.arange(s1)
else:
# take fewer positions within 0->(s1-1)
x = np.rint(np.linspace(0, s1 - 1, min(max_pos, s1))).astype(int)
res = np.empty((s0, s0), dtype=float)
_lineSumXY(x, res, arr, f)
if return_pos_arr:
return res
# best integer index
i, j = np.unravel_index(np.nanargmin(res), res.shape)
if refinePosition:
try:
sub = res[i - 1:i + 2, j - 1:j + 2]
ii, jj = center_of_mass(sub)
if not np.isnan(ii):
i += (ii - 1)
if not np.isnan(jj):
j += (jj - 1)
except TypeError:
pass
if not relative:
return i, j
hs = (s0 - 1) / 2
return i - hs, j - hs | python | def minimumLineInArray(arr, relative=False, f=0,
refinePosition=True,
max_pos=100,
return_pos_arr=False,
# order=2
):
'''
find closest minimum position next to middle line
relative: return position relative to middle line
f: relative decrease (0...1) - setting this value close to one will
discriminate positions further away from the center
##order: 2 for cubic refinement
'''
s0, s1 = arr.shape[:2]
if max_pos >= s1:
x = np.arange(s1)
else:
# take fewer positions within 0->(s1-1)
x = np.rint(np.linspace(0, s1 - 1, min(max_pos, s1))).astype(int)
res = np.empty((s0, s0), dtype=float)
_lineSumXY(x, res, arr, f)
if return_pos_arr:
return res
# best integer index
i, j = np.unravel_index(np.nanargmin(res), res.shape)
if refinePosition:
try:
sub = res[i - 1:i + 2, j - 1:j + 2]
ii, jj = center_of_mass(sub)
if not np.isnan(ii):
i += (ii - 1)
if not np.isnan(jj):
j += (jj - 1)
except TypeError:
pass
if not relative:
return i, j
hs = (s0 - 1) / 2
return i - hs, j - hs | [
"def",
"minimumLineInArray",
"(",
"arr",
",",
"relative",
"=",
"False",
",",
"f",
"=",
"0",
",",
"refinePosition",
"=",
"True",
",",
"max_pos",
"=",
"100",
",",
"return_pos_arr",
"=",
"False",
",",
"# order=2\r",
")",
":",
"s0",
",",
"s1",
"=",
"arr",
... | find closest minimum position next to middle line
relative: return position relative to middle line
f: relative decrease (0...1) - setting this value close to one will
discriminate positions further away from the center
##order: 2 for cubic refinement | [
"find",
"closest",
"minimum",
"position",
"next",
"to",
"middle",
"line",
"relative",
":",
"return",
"position",
"relative",
"to",
"middle",
"line",
"f",
":",
"relative",
"decrease",
"(",
"0",
"...",
"1",
")",
"-",
"setting",
"this",
"value",
"close",
"to"... | 7c5a28718f81c01a430152c60a686ac50afbfd7c | https://github.com/radjkarl/imgProcessor/blob/7c5a28718f81c01a430152c60a686ac50afbfd7c/imgProcessor/features/minimumLineInArray.py#L27-L72 | train |
radjkarl/imgProcessor | imgProcessor/filters/FourierFilter.py | FourierFilter.highPassFilter | def highPassFilter(self, threshold):
'''
remove all low frequencies by setting a square in the middle of the
Fourier transformation of the size (2*threshold)^2 to zero
threshold = 0...1
'''
if not threshold:
return
rows, cols = self.img.shape
tx = int(cols * threshold)
ty = int(rows * threshold)
# middle:
crow, ccol = rows // 2, cols // 2
# square in the middle to zero
self.fshift[crow - tx:crow + tx, ccol - ty:ccol + ty] = 0 | python | def highPassFilter(self, threshold):
'''
remove all low frequencies by setting a square in the middle of the
Fourier transformation of the size (2*threshold)^2 to zero
threshold = 0...1
'''
if not threshold:
return
rows, cols = self.img.shape
tx = int(cols * threshold)
ty = int(rows * threshold)
# middle:
crow, ccol = rows // 2, cols // 2
# square in the middle to zero
self.fshift[crow - tx:crow + tx, ccol - ty:ccol + ty] = 0 | [
"def",
"highPassFilter",
"(",
"self",
",",
"threshold",
")",
":",
"if",
"not",
"threshold",
":",
"return",
"rows",
",",
"cols",
"=",
"self",
".",
"img",
".",
"shape",
"tx",
"=",
"int",
"(",
"cols",
"*",
"threshold",
")",
"ty",
"=",
"int",
"(",
"row... | remove all low frequencies by setting a square in the middle of the
Fourier transformation of the size (2*threshold)^2 to zero
threshold = 0...1 | [
"remove",
"all",
"low",
"frequencies",
"by",
"setting",
"a",
"square",
"in",
"the",
"middle",
"of",
"the",
"Fourier",
"transformation",
"of",
"the",
"size",
"(",
"2",
"*",
"threshold",
")",
"^2",
"to",
"zero",
"threshold",
"=",
"0",
"...",
"1"
] | 7c5a28718f81c01a430152c60a686ac50afbfd7c | https://github.com/radjkarl/imgProcessor/blob/7c5a28718f81c01a430152c60a686ac50afbfd7c/imgProcessor/filters/FourierFilter.py#L27-L41 | train |
radjkarl/imgProcessor | imgProcessor/filters/FourierFilter.py | FourierFilter.lowPassFilter | def lowPassFilter(self, threshold):
'''
remove all high frequencies by setting boundary around a quarry in the middle
of the size (2*threshold)^2 to zero
threshold = 0...1
'''
if not threshold:
return
rows, cols = self.img.shape
tx = int(cols * threshold * 0.25)
ty = int(rows * threshold * 0.25)
# upper side
self.fshift[rows - tx:rows, :] = 0
# lower side
self.fshift[0:tx, :] = 0
# left side
self.fshift[:, 0:ty] = 0
# right side
self.fshift[:, cols - ty:cols] = 0 | python | def lowPassFilter(self, threshold):
'''
remove all high frequencies by setting boundary around a quarry in the middle
of the size (2*threshold)^2 to zero
threshold = 0...1
'''
if not threshold:
return
rows, cols = self.img.shape
tx = int(cols * threshold * 0.25)
ty = int(rows * threshold * 0.25)
# upper side
self.fshift[rows - tx:rows, :] = 0
# lower side
self.fshift[0:tx, :] = 0
# left side
self.fshift[:, 0:ty] = 0
# right side
self.fshift[:, cols - ty:cols] = 0 | [
"def",
"lowPassFilter",
"(",
"self",
",",
"threshold",
")",
":",
"if",
"not",
"threshold",
":",
"return",
"rows",
",",
"cols",
"=",
"self",
".",
"img",
".",
"shape",
"tx",
"=",
"int",
"(",
"cols",
"*",
"threshold",
"*",
"0.25",
")",
"ty",
"=",
"int... | remove all high frequencies by setting boundary around a quarry in the middle
of the size (2*threshold)^2 to zero
threshold = 0...1 | [
"remove",
"all",
"high",
"frequencies",
"by",
"setting",
"boundary",
"around",
"a",
"quarry",
"in",
"the",
"middle",
"of",
"the",
"size",
"(",
"2",
"*",
"threshold",
")",
"^2",
"to",
"zero",
"threshold",
"=",
"0",
"...",
"1"
] | 7c5a28718f81c01a430152c60a686ac50afbfd7c | https://github.com/radjkarl/imgProcessor/blob/7c5a28718f81c01a430152c60a686ac50afbfd7c/imgProcessor/filters/FourierFilter.py#L43-L61 | train |
radjkarl/imgProcessor | imgProcessor/filters/FourierFilter.py | FourierFilter.reconstructImage | def reconstructImage(self):
'''
do inverse Fourier transform and return result
'''
f_ishift = np.fft.ifftshift(self.fshift)
return np.real(np.fft.ifft2(f_ishift)) | python | def reconstructImage(self):
'''
do inverse Fourier transform and return result
'''
f_ishift = np.fft.ifftshift(self.fshift)
return np.real(np.fft.ifft2(f_ishift)) | [
"def",
"reconstructImage",
"(",
"self",
")",
":",
"f_ishift",
"=",
"np",
".",
"fft",
".",
"ifftshift",
"(",
"self",
".",
"fshift",
")",
"return",
"np",
".",
"real",
"(",
"np",
".",
"fft",
".",
"ifft2",
"(",
"f_ishift",
")",
")"
] | do inverse Fourier transform and return result | [
"do",
"inverse",
"Fourier",
"transform",
"and",
"return",
"result"
] | 7c5a28718f81c01a430152c60a686ac50afbfd7c | https://github.com/radjkarl/imgProcessor/blob/7c5a28718f81c01a430152c60a686ac50afbfd7c/imgProcessor/filters/FourierFilter.py#L108-L113 | train |
radjkarl/imgProcessor | imgProcessor/interpolate/interpolate2dUnstructuredIDW.py | interpolate2dUnstructuredIDW | def interpolate2dUnstructuredIDW(x, y, v, grid, power=2):
'''
x,y,v --> 1d numpy.array
grid --> 2d numpy.array
fast if number of given values is small relative to grid resolution
'''
n = len(v)
gx = grid.shape[0]
gy = grid.shape[1]
for i in range(gx):
for j in range(gy):
overPx = False # if pixel position == point position
sumWi = 0.0
value = 0.0
for k in range(n):
xx = x[k]
yy = y[k]
vv = v[k]
if xx == i and yy == j:
grid[i, j] = vv
overPx = True
break
# weight from inverse distance:
wi = 1 / ((xx - i)**2 + (yy - j)**2)**(0.5 * power)
sumWi += wi
value += wi * vv
if not overPx:
grid[i, j] = value / sumWi
return grid | python | def interpolate2dUnstructuredIDW(x, y, v, grid, power=2):
'''
x,y,v --> 1d numpy.array
grid --> 2d numpy.array
fast if number of given values is small relative to grid resolution
'''
n = len(v)
gx = grid.shape[0]
gy = grid.shape[1]
for i in range(gx):
for j in range(gy):
overPx = False # if pixel position == point position
sumWi = 0.0
value = 0.0
for k in range(n):
xx = x[k]
yy = y[k]
vv = v[k]
if xx == i and yy == j:
grid[i, j] = vv
overPx = True
break
# weight from inverse distance:
wi = 1 / ((xx - i)**2 + (yy - j)**2)**(0.5 * power)
sumWi += wi
value += wi * vv
if not overPx:
grid[i, j] = value / sumWi
return grid | [
"def",
"interpolate2dUnstructuredIDW",
"(",
"x",
",",
"y",
",",
"v",
",",
"grid",
",",
"power",
"=",
"2",
")",
":",
"n",
"=",
"len",
"(",
"v",
")",
"gx",
"=",
"grid",
".",
"shape",
"[",
"0",
"]",
"gy",
"=",
"grid",
".",
"shape",
"[",
"1",
"]"... | x,y,v --> 1d numpy.array
grid --> 2d numpy.array
fast if number of given values is small relative to grid resolution | [
"x",
"y",
"v",
"--",
">",
"1d",
"numpy",
".",
"array",
"grid",
"--",
">",
"2d",
"numpy",
".",
"array",
"fast",
"if",
"number",
"of",
"given",
"values",
"is",
"small",
"relative",
"to",
"grid",
"resolution"
] | 7c5a28718f81c01a430152c60a686ac50afbfd7c | https://github.com/radjkarl/imgProcessor/blob/7c5a28718f81c01a430152c60a686ac50afbfd7c/imgProcessor/interpolate/interpolate2dUnstructuredIDW.py#L8-L38 | train |
radjkarl/imgProcessor | imgProcessor/features/hog.py | hog | def hog(image, orientations=8, ksize=(5, 5)):
'''
returns the Histogram of Oriented Gradients
:param ksize: convolution kernel size as (y,x) - needs to be odd
:param orientations: number of orientations in between rad=0 and rad=pi
similar to http://scikit-image.org/docs/dev/auto_examples/plot_hog.html
but faster and with less options
'''
s0, s1 = image.shape[:2]
# speed up the process through saving generated kernels:
try:
k = hog.kernels[str(ksize) + str(orientations)]
except KeyError:
k = _mkConvKernel(ksize, orientations)
hog.kernels[str(ksize) + str(orientations)] = k
out = np.empty(shape=(s0, s1, orientations))
image[np.isnan(image)] = 0
for i in range(orientations):
out[:, :, i] = convolve(image, k[i])
return out | python | def hog(image, orientations=8, ksize=(5, 5)):
'''
returns the Histogram of Oriented Gradients
:param ksize: convolution kernel size as (y,x) - needs to be odd
:param orientations: number of orientations in between rad=0 and rad=pi
similar to http://scikit-image.org/docs/dev/auto_examples/plot_hog.html
but faster and with less options
'''
s0, s1 = image.shape[:2]
# speed up the process through saving generated kernels:
try:
k = hog.kernels[str(ksize) + str(orientations)]
except KeyError:
k = _mkConvKernel(ksize, orientations)
hog.kernels[str(ksize) + str(orientations)] = k
out = np.empty(shape=(s0, s1, orientations))
image[np.isnan(image)] = 0
for i in range(orientations):
out[:, :, i] = convolve(image, k[i])
return out | [
"def",
"hog",
"(",
"image",
",",
"orientations",
"=",
"8",
",",
"ksize",
"=",
"(",
"5",
",",
"5",
")",
")",
":",
"s0",
",",
"s1",
"=",
"image",
".",
"shape",
"[",
":",
"2",
"]",
"# speed up the process through saving generated kernels:\r",
"try",
":",
... | returns the Histogram of Oriented Gradients
:param ksize: convolution kernel size as (y,x) - needs to be odd
:param orientations: number of orientations in between rad=0 and rad=pi
similar to http://scikit-image.org/docs/dev/auto_examples/plot_hog.html
but faster and with less options | [
"returns",
"the",
"Histogram",
"of",
"Oriented",
"Gradients",
":",
"param",
"ksize",
":",
"convolution",
"kernel",
"size",
"as",
"(",
"y",
"x",
")",
"-",
"needs",
"to",
"be",
"odd",
":",
"param",
"orientations",
":",
"number",
"of",
"orientations",
"in",
... | 7c5a28718f81c01a430152c60a686ac50afbfd7c | https://github.com/radjkarl/imgProcessor/blob/7c5a28718f81c01a430152c60a686ac50afbfd7c/imgProcessor/features/hog.py#L40-L64 | train |
radjkarl/imgProcessor | imgProcessor/features/hog.py | visualize | def visualize(hog, grid=(10, 10), radCircle=None):
'''
visualize HOG as polynomial around cell center
for [grid] * cells
'''
s0, s1, nang = hog.shape
angles = np.linspace(0, np.pi, nang + 1)[:-1]
# center of each sub array:
cx, cy = s0 // (2 * grid[0]), s1 // (2 * grid[1])
# max. radius of polynomial around cenetr:
rx, ry = cx, cy
# for drawing a position indicator (circle):
if radCircle is None:
radCircle = max(1, rx // 10)
# output array:
out = np.zeros((s0, s1), dtype=np.uint8)
# point of polynomial:
pts = np.empty(shape=(1, 2 * nang, 2), dtype=np.int32)
# takes grid[0]*grid[1] sample HOG values:
samplesHOG = subCell2DFnArray(hog, lambda arr: arr[cx, cy], grid)
mxHOG = samplesHOG.max()
# sub array slices:
slices = list(subCell2DSlices(out, grid))
m = 0
for m, hhh in enumerate(samplesHOG.reshape(grid[0] * grid[1], nang)):
hhmax = hhh.max()
hh = hhh / hhmax
sout = out[slices[m][2:4]]
for n, (o, a) in enumerate(zip(hh, angles)):
pts[0, n, 0] = cx + np.cos(a) * o * rx
pts[0, n, 1] = cy + np.sin(a) * o * ry
pts[0, n + nang, 0] = cx + np.cos(a + np.pi) * o * rx
pts[0, n + nang, 1] = cy + np.sin(a + np.pi) * o * ry
cv2.fillPoly(sout, pts, int(255 * hhmax / mxHOG))
cv2.circle(sout, (cx, cy), radCircle, 0, thickness=-1)
return out | python | def visualize(hog, grid=(10, 10), radCircle=None):
'''
visualize HOG as polynomial around cell center
for [grid] * cells
'''
s0, s1, nang = hog.shape
angles = np.linspace(0, np.pi, nang + 1)[:-1]
# center of each sub array:
cx, cy = s0 // (2 * grid[0]), s1 // (2 * grid[1])
# max. radius of polynomial around cenetr:
rx, ry = cx, cy
# for drawing a position indicator (circle):
if radCircle is None:
radCircle = max(1, rx // 10)
# output array:
out = np.zeros((s0, s1), dtype=np.uint8)
# point of polynomial:
pts = np.empty(shape=(1, 2 * nang, 2), dtype=np.int32)
# takes grid[0]*grid[1] sample HOG values:
samplesHOG = subCell2DFnArray(hog, lambda arr: arr[cx, cy], grid)
mxHOG = samplesHOG.max()
# sub array slices:
slices = list(subCell2DSlices(out, grid))
m = 0
for m, hhh in enumerate(samplesHOG.reshape(grid[0] * grid[1], nang)):
hhmax = hhh.max()
hh = hhh / hhmax
sout = out[slices[m][2:4]]
for n, (o, a) in enumerate(zip(hh, angles)):
pts[0, n, 0] = cx + np.cos(a) * o * rx
pts[0, n, 1] = cy + np.sin(a) * o * ry
pts[0, n + nang, 0] = cx + np.cos(a + np.pi) * o * rx
pts[0, n + nang, 1] = cy + np.sin(a + np.pi) * o * ry
cv2.fillPoly(sout, pts, int(255 * hhmax / mxHOG))
cv2.circle(sout, (cx, cy), radCircle, 0, thickness=-1)
return out | [
"def",
"visualize",
"(",
"hog",
",",
"grid",
"=",
"(",
"10",
",",
"10",
")",
",",
"radCircle",
"=",
"None",
")",
":",
"s0",
",",
"s1",
",",
"nang",
"=",
"hog",
".",
"shape",
"angles",
"=",
"np",
".",
"linspace",
"(",
"0",
",",
"np",
".",
"pi"... | visualize HOG as polynomial around cell center
for [grid] * cells | [
"visualize",
"HOG",
"as",
"polynomial",
"around",
"cell",
"center",
"for",
"[",
"grid",
"]",
"*",
"cells"
] | 7c5a28718f81c01a430152c60a686ac50afbfd7c | https://github.com/radjkarl/imgProcessor/blob/7c5a28718f81c01a430152c60a686ac50afbfd7c/imgProcessor/features/hog.py#L68-L105 | train |
radjkarl/imgProcessor | imgProcessor/camera/flatField/postProcessing.py | postProcessing | def postProcessing(arr, method='KW replace + Gauss', mask=None):
'''
Post process measured flat field [arr].
Depending on the measurement, different
post processing [method]s are beneficial.
The available methods are presented in
---
K.Bedrich, M.Bokalic et al.:
ELECTROLUMINESCENCE IMAGING OF PV DEVICES:
ADVANCED FLAT FIELD CALIBRATION,2017
---
methods:
'POLY replace' --> replace [arr] with a 2d polynomial fit
'KW replace' --> ... a fitted Kang-Weiss function
'AoV replace' --> ... a fitted Angle-of-view function
'POLY repair' --> same as above but either replacing empty
'KW repair' areas of smoothing out high gradient
'AoV repair' variations (POLY only)
'KW repair + Gauss' --> same as 'KW replace' with additional
'KW repair + Median' Gaussian or Median filter
mask:
None/2darray(bool) --> array of same shape ar [arr] indicating
invalid or empty positions
'''
assert method in ppMETHODS, \
'post processing method (%s) must be one of %s' % (method, ppMETHODS)
if method == 'POLY replace':
return polyfit2dGrid(arr, mask, order=2, replace_all=True)
elif method == 'KW replace':
return function(arr, mask, replace_all=True)
elif method == 'POLY repair':
return polynomial(arr, mask, replace_all=False)
elif method == 'KW repair':
return function(arr, mask, replace_all=False)
elif method == 'KW repair + Median':
return median_filter(function(arr, mask, replace_all=False),
min(method.shape) // 20)
elif method == 'KW repair + Gauss':
return gaussian_filter(function(arr, mask, replace_all=False),
min(arr.shape) // 20)
elif method == 'AoV repair':
return function(arr, mask, fn=lambda XY, a:
angleOfView(XY, method.shape, a=a), guess=(0.01),
down_scale_factor=1)
elif method == 'AoV replace':
return function(arr, mask, fn=lambda XY, a:
angleOfView(XY, arr.shape, a=a), guess=(0.01),
replace_all=True, down_scale_factor=1) | python | def postProcessing(arr, method='KW replace + Gauss', mask=None):
'''
Post process measured flat field [arr].
Depending on the measurement, different
post processing [method]s are beneficial.
The available methods are presented in
---
K.Bedrich, M.Bokalic et al.:
ELECTROLUMINESCENCE IMAGING OF PV DEVICES:
ADVANCED FLAT FIELD CALIBRATION,2017
---
methods:
'POLY replace' --> replace [arr] with a 2d polynomial fit
'KW replace' --> ... a fitted Kang-Weiss function
'AoV replace' --> ... a fitted Angle-of-view function
'POLY repair' --> same as above but either replacing empty
'KW repair' areas of smoothing out high gradient
'AoV repair' variations (POLY only)
'KW repair + Gauss' --> same as 'KW replace' with additional
'KW repair + Median' Gaussian or Median filter
mask:
None/2darray(bool) --> array of same shape ar [arr] indicating
invalid or empty positions
'''
assert method in ppMETHODS, \
'post processing method (%s) must be one of %s' % (method, ppMETHODS)
if method == 'POLY replace':
return polyfit2dGrid(arr, mask, order=2, replace_all=True)
elif method == 'KW replace':
return function(arr, mask, replace_all=True)
elif method == 'POLY repair':
return polynomial(arr, mask, replace_all=False)
elif method == 'KW repair':
return function(arr, mask, replace_all=False)
elif method == 'KW repair + Median':
return median_filter(function(arr, mask, replace_all=False),
min(method.shape) // 20)
elif method == 'KW repair + Gauss':
return gaussian_filter(function(arr, mask, replace_all=False),
min(arr.shape) // 20)
elif method == 'AoV repair':
return function(arr, mask, fn=lambda XY, a:
angleOfView(XY, method.shape, a=a), guess=(0.01),
down_scale_factor=1)
elif method == 'AoV replace':
return function(arr, mask, fn=lambda XY, a:
angleOfView(XY, arr.shape, a=a), guess=(0.01),
replace_all=True, down_scale_factor=1) | [
"def",
"postProcessing",
"(",
"arr",
",",
"method",
"=",
"'KW replace + Gauss'",
",",
"mask",
"=",
"None",
")",
":",
"assert",
"method",
"in",
"ppMETHODS",
",",
"'post processing method (%s) must be one of %s'",
"%",
"(",
"method",
",",
"ppMETHODS",
")",
"if",
"... | Post process measured flat field [arr].
Depending on the measurement, different
post processing [method]s are beneficial.
The available methods are presented in
---
K.Bedrich, M.Bokalic et al.:
ELECTROLUMINESCENCE IMAGING OF PV DEVICES:
ADVANCED FLAT FIELD CALIBRATION,2017
---
methods:
'POLY replace' --> replace [arr] with a 2d polynomial fit
'KW replace' --> ... a fitted Kang-Weiss function
'AoV replace' --> ... a fitted Angle-of-view function
'POLY repair' --> same as above but either replacing empty
'KW repair' areas of smoothing out high gradient
'AoV repair' variations (POLY only)
'KW repair + Gauss' --> same as 'KW replace' with additional
'KW repair + Median' Gaussian or Median filter
mask:
None/2darray(bool) --> array of same shape ar [arr] indicating
invalid or empty positions | [
"Post",
"process",
"measured",
"flat",
"field",
"[",
"arr",
"]",
".",
"Depending",
"on",
"the",
"measurement",
"different",
"post",
"processing",
"[",
"method",
"]",
"s",
"are",
"beneficial",
".",
"The",
"available",
"methods",
"are",
"presented",
"in",
"---... | 7c5a28718f81c01a430152c60a686ac50afbfd7c | https://github.com/radjkarl/imgProcessor/blob/7c5a28718f81c01a430152c60a686ac50afbfd7c/imgProcessor/camera/flatField/postProcessing.py#L16-L74 | train |
radjkarl/imgProcessor | imgProcessor/transform/rmBorder.py | rmBorder | def rmBorder(img, border=None):
'''
border [None], if images are corrected and device ends at
image border
[one number] (like 50),
if there is an equally spaced border
around the device
[two tuples] like ((50,60),(1500,900))
means ((Xfrom,Yfrom),(Xto, Yto))
[four tuples] like ((x0,y0),(x1,y1),...(x3,y3))
'''
if border is None:
pass
elif len(border) == 2:
s0 = slice(border[0][1], border[1][1])
s1 = slice(border[0][0], border[1][0])
img = img[s0, s1]
elif len(border) == 4:
# eval whether border values are orthogonal:
x = np.unique(border[:, 0])
y = np.unique(border[:, 1])
if len(x) == 2 and len(y) == 2:
s0 = slice(y[0], y[1])
s1 = slice(x[0], x[1])
img = img[s0, s1]
else:
# edges are irregular:
img = simplePerspectiveTransform(img, border)
else:
raise Exception('[border] input wrong')
return img | python | def rmBorder(img, border=None):
'''
border [None], if images are corrected and device ends at
image border
[one number] (like 50),
if there is an equally spaced border
around the device
[two tuples] like ((50,60),(1500,900))
means ((Xfrom,Yfrom),(Xto, Yto))
[four tuples] like ((x0,y0),(x1,y1),...(x3,y3))
'''
if border is None:
pass
elif len(border) == 2:
s0 = slice(border[0][1], border[1][1])
s1 = slice(border[0][0], border[1][0])
img = img[s0, s1]
elif len(border) == 4:
# eval whether border values are orthogonal:
x = np.unique(border[:, 0])
y = np.unique(border[:, 1])
if len(x) == 2 and len(y) == 2:
s0 = slice(y[0], y[1])
s1 = slice(x[0], x[1])
img = img[s0, s1]
else:
# edges are irregular:
img = simplePerspectiveTransform(img, border)
else:
raise Exception('[border] input wrong')
return img | [
"def",
"rmBorder",
"(",
"img",
",",
"border",
"=",
"None",
")",
":",
"if",
"border",
"is",
"None",
":",
"pass",
"elif",
"len",
"(",
"border",
")",
"==",
"2",
":",
"s0",
"=",
"slice",
"(",
"border",
"[",
"0",
"]",
"[",
"1",
"]",
",",
"border",
... | border [None], if images are corrected and device ends at
image border
[one number] (like 50),
if there is an equally spaced border
around the device
[two tuples] like ((50,60),(1500,900))
means ((Xfrom,Yfrom),(Xto, Yto))
[four tuples] like ((x0,y0),(x1,y1),...(x3,y3)) | [
"border",
"[",
"None",
"]",
"if",
"images",
"are",
"corrected",
"and",
"device",
"ends",
"at",
"image",
"border",
"[",
"one",
"number",
"]",
"(",
"like",
"50",
")",
"if",
"there",
"is",
"an",
"equally",
"spaced",
"border",
"around",
"the",
"device",
"[... | 7c5a28718f81c01a430152c60a686ac50afbfd7c | https://github.com/radjkarl/imgProcessor/blob/7c5a28718f81c01a430152c60a686ac50afbfd7c/imgProcessor/transform/rmBorder.py#L6-L36 | train |
radjkarl/imgProcessor | imgProcessor/features/SingleTimeEffectDetection.py | SingleTimeEffectDetection.addImage | def addImage(self, image, mask=None):
'''
#########
mask -- optional
'''
self._last_diff = diff = image - self.noSTE
ste = diff > self.threshold
removeSinglePixels(ste)
self.mask_clean = clean = ~ste
if mask is not None:
clean = np.logical_and(mask, clean)
self.mma.update(image, clean)
if self.save_ste_indices:
self.mask_STE += ste
return self | python | def addImage(self, image, mask=None):
'''
#########
mask -- optional
'''
self._last_diff = diff = image - self.noSTE
ste = diff > self.threshold
removeSinglePixels(ste)
self.mask_clean = clean = ~ste
if mask is not None:
clean = np.logical_and(mask, clean)
self.mma.update(image, clean)
if self.save_ste_indices:
self.mask_STE += ste
return self | [
"def",
"addImage",
"(",
"self",
",",
"image",
",",
"mask",
"=",
"None",
")",
":",
"self",
".",
"_last_diff",
"=",
"diff",
"=",
"image",
"-",
"self",
".",
"noSTE",
"ste",
"=",
"diff",
">",
"self",
".",
"threshold",
"removeSinglePixels",
"(",
"ste",
")... | #########
mask -- optional | [
"#########",
"mask",
"--",
"optional"
] | 7c5a28718f81c01a430152c60a686ac50afbfd7c | https://github.com/radjkarl/imgProcessor/blob/7c5a28718f81c01a430152c60a686ac50afbfd7c/imgProcessor/features/SingleTimeEffectDetection.py#L55-L75 | train |
radjkarl/imgProcessor | imgProcessor/features/SingleTimeEffectDetection.py | SingleTimeEffectDetection.relativeAreaSTE | def relativeAreaSTE(self):
'''
return STE area - relative to image area
'''
s = self.noSTE.shape
return np.sum(self.mask_STE) / (s[0] * s[1]) | python | def relativeAreaSTE(self):
'''
return STE area - relative to image area
'''
s = self.noSTE.shape
return np.sum(self.mask_STE) / (s[0] * s[1]) | [
"def",
"relativeAreaSTE",
"(",
"self",
")",
":",
"s",
"=",
"self",
".",
"noSTE",
".",
"shape",
"return",
"np",
".",
"sum",
"(",
"self",
".",
"mask_STE",
")",
"/",
"(",
"s",
"[",
"0",
"]",
"*",
"s",
"[",
"1",
"]",
")"
] | return STE area - relative to image area | [
"return",
"STE",
"area",
"-",
"relative",
"to",
"image",
"area"
] | 7c5a28718f81c01a430152c60a686ac50afbfd7c | https://github.com/radjkarl/imgProcessor/blob/7c5a28718f81c01a430152c60a686ac50afbfd7c/imgProcessor/features/SingleTimeEffectDetection.py#L83-L88 | train |
radjkarl/imgProcessor | imgProcessor/features/SingleTimeEffectDetection.py | SingleTimeEffectDetection.intensityDistributionSTE | def intensityDistributionSTE(self, bins=10, range=None):
'''
return distribution of STE intensity
'''
v = np.abs(self._last_diff[self.mask_STE])
return np.histogram(v, bins, range) | python | def intensityDistributionSTE(self, bins=10, range=None):
'''
return distribution of STE intensity
'''
v = np.abs(self._last_diff[self.mask_STE])
return np.histogram(v, bins, range) | [
"def",
"intensityDistributionSTE",
"(",
"self",
",",
"bins",
"=",
"10",
",",
"range",
"=",
"None",
")",
":",
"v",
"=",
"np",
".",
"abs",
"(",
"self",
".",
"_last_diff",
"[",
"self",
".",
"mask_STE",
"]",
")",
"return",
"np",
".",
"histogram",
"(",
... | return distribution of STE intensity | [
"return",
"distribution",
"of",
"STE",
"intensity"
] | 7c5a28718f81c01a430152c60a686ac50afbfd7c | https://github.com/radjkarl/imgProcessor/blob/7c5a28718f81c01a430152c60a686ac50afbfd7c/imgProcessor/features/SingleTimeEffectDetection.py#L90-L95 | train |
radjkarl/imgProcessor | imgProcessor/transformations.py | toUIntArray | def toUIntArray(img, dtype=None, cutNegative=True, cutHigh=True,
range=None, copy=True):
'''
transform a float to an unsigned integer array of a fitting dtype
adds an offset, to get rid of negative values
range = (min, max) - scale values between given range
cutNegative - all values <0 will be set to 0
cutHigh - set to False to rather scale values to fit
'''
mn, mx = None, None
if range is not None:
mn, mx = range
if dtype is None:
if mx is None:
mx = np.nanmax(img)
dtype = np.uint16 if mx > 255 else np.uint8
dtype = np.dtype(dtype)
if dtype == img.dtype:
return img
# get max px value:
b = {'uint8': 255,
'uint16': 65535,
'uint32': 4294967295,
'uint64': 18446744073709551615}[dtype.name]
if copy:
img = img.copy()
if range is not None:
img = np.asfarray(img)
img -= mn
# img[img<0]=0
# print np.nanmin(img), np.nanmax(img), mn, mx, range, b
img *= b / (mx - mn)
# print np.nanmin(img), np.nanmax(img), mn, mx, range, b
img = np.clip(img, 0, b)
else:
if cutNegative:
img[img < 0] = 0
else:
# add an offset to all values:
mn = np.min(img)
if mn < 0:
img -= mn # set minimum to 0
if cutHigh:
#ind = img > b
img[img > b] = b
else:
# scale values
mx = np.nanmax(img)
img = np.asfarray(img) * (float(b) / mx)
img = img.astype(dtype)
# if range is not None and cutHigh:
# img[ind] = b
return img | python | def toUIntArray(img, dtype=None, cutNegative=True, cutHigh=True,
range=None, copy=True):
'''
transform a float to an unsigned integer array of a fitting dtype
adds an offset, to get rid of negative values
range = (min, max) - scale values between given range
cutNegative - all values <0 will be set to 0
cutHigh - set to False to rather scale values to fit
'''
mn, mx = None, None
if range is not None:
mn, mx = range
if dtype is None:
if mx is None:
mx = np.nanmax(img)
dtype = np.uint16 if mx > 255 else np.uint8
dtype = np.dtype(dtype)
if dtype == img.dtype:
return img
# get max px value:
b = {'uint8': 255,
'uint16': 65535,
'uint32': 4294967295,
'uint64': 18446744073709551615}[dtype.name]
if copy:
img = img.copy()
if range is not None:
img = np.asfarray(img)
img -= mn
# img[img<0]=0
# print np.nanmin(img), np.nanmax(img), mn, mx, range, b
img *= b / (mx - mn)
# print np.nanmin(img), np.nanmax(img), mn, mx, range, b
img = np.clip(img, 0, b)
else:
if cutNegative:
img[img < 0] = 0
else:
# add an offset to all values:
mn = np.min(img)
if mn < 0:
img -= mn # set minimum to 0
if cutHigh:
#ind = img > b
img[img > b] = b
else:
# scale values
mx = np.nanmax(img)
img = np.asfarray(img) * (float(b) / mx)
img = img.astype(dtype)
# if range is not None and cutHigh:
# img[ind] = b
return img | [
"def",
"toUIntArray",
"(",
"img",
",",
"dtype",
"=",
"None",
",",
"cutNegative",
"=",
"True",
",",
"cutHigh",
"=",
"True",
",",
"range",
"=",
"None",
",",
"copy",
"=",
"True",
")",
":",
"mn",
",",
"mx",
"=",
"None",
",",
"None",
"if",
"range",
"i... | transform a float to an unsigned integer array of a fitting dtype
adds an offset, to get rid of negative values
range = (min, max) - scale values between given range
cutNegative - all values <0 will be set to 0
cutHigh - set to False to rather scale values to fit | [
"transform",
"a",
"float",
"to",
"an",
"unsigned",
"integer",
"array",
"of",
"a",
"fitting",
"dtype",
"adds",
"an",
"offset",
"to",
"get",
"rid",
"of",
"negative",
"values",
"range",
"=",
"(",
"min",
"max",
")",
"-",
"scale",
"values",
"between",
"given"... | 7c5a28718f81c01a430152c60a686ac50afbfd7c | https://github.com/radjkarl/imgProcessor/blob/7c5a28718f81c01a430152c60a686ac50afbfd7c/imgProcessor/transformations.py#L11-L75 | train |
radjkarl/imgProcessor | imgProcessor/transformations.py | toFloatArray | def toFloatArray(img):
'''
transform an unsigned integer array into a
float array of the right size
'''
_D = {1: np.float32, # uint8
2: np.float32, # uint16
4: np.float64, # uint32
8: np.float64} # uint64
return img.astype(_D[img.itemsize]) | python | def toFloatArray(img):
'''
transform an unsigned integer array into a
float array of the right size
'''
_D = {1: np.float32, # uint8
2: np.float32, # uint16
4: np.float64, # uint32
8: np.float64} # uint64
return img.astype(_D[img.itemsize]) | [
"def",
"toFloatArray",
"(",
"img",
")",
":",
"_D",
"=",
"{",
"1",
":",
"np",
".",
"float32",
",",
"# uint8\r",
"2",
":",
"np",
".",
"float32",
",",
"# uint16\r",
"4",
":",
"np",
".",
"float64",
",",
"# uint32\r",
"8",
":",
"np",
".",
"float64",
"... | transform an unsigned integer array into a
float array of the right size | [
"transform",
"an",
"unsigned",
"integer",
"array",
"into",
"a",
"float",
"array",
"of",
"the",
"right",
"size"
] | 7c5a28718f81c01a430152c60a686ac50afbfd7c | https://github.com/radjkarl/imgProcessor/blob/7c5a28718f81c01a430152c60a686ac50afbfd7c/imgProcessor/transformations.py#L78-L87 | train |
radjkarl/imgProcessor | imgProcessor/transformations.py | toNoUintArray | def toNoUintArray(arr):
'''
cast array to the next higher integer array
if dtype=unsigned integer
'''
d = arr.dtype
if d.kind == 'u':
arr = arr.astype({1: np.int16,
2: np.int32,
4: np.int64}[d.itemsize])
return arr | python | def toNoUintArray(arr):
'''
cast array to the next higher integer array
if dtype=unsigned integer
'''
d = arr.dtype
if d.kind == 'u':
arr = arr.astype({1: np.int16,
2: np.int32,
4: np.int64}[d.itemsize])
return arr | [
"def",
"toNoUintArray",
"(",
"arr",
")",
":",
"d",
"=",
"arr",
".",
"dtype",
"if",
"d",
".",
"kind",
"==",
"'u'",
":",
"arr",
"=",
"arr",
".",
"astype",
"(",
"{",
"1",
":",
"np",
".",
"int16",
",",
"2",
":",
"np",
".",
"int32",
",",
"4",
":... | cast array to the next higher integer array
if dtype=unsigned integer | [
"cast",
"array",
"to",
"the",
"next",
"higher",
"integer",
"array",
"if",
"dtype",
"=",
"unsigned",
"integer"
] | 7c5a28718f81c01a430152c60a686ac50afbfd7c | https://github.com/radjkarl/imgProcessor/blob/7c5a28718f81c01a430152c60a686ac50afbfd7c/imgProcessor/transformations.py#L90-L100 | train |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.