text
stringlengths 12
1.05M
| repo_name
stringlengths 5
86
| path
stringlengths 4
191
| language
stringclasses 1
value | license
stringclasses 15
values | size
int32 12
1.05M
| keyword
listlengths 1
23
| text_hash
stringlengths 64
64
|
|---|---|---|---|---|---|---|---|
#!/usr/bin/env python
"""
Easy Install
------------
A tool for doing automatic download/extract/build of distutils-based Python
packages. For detailed documentation, see the accompanying EasyInstall.txt
file, or visit the `EasyInstall home page`__.
__ https://pythonhosted.org/setuptools/easy_install.html
"""
from glob import glob
from distutils.util import get_platform
from distutils.util import convert_path, subst_vars
from distutils.errors import DistutilsArgError, DistutilsOptionError, \
DistutilsError, DistutilsPlatformError
from distutils.command.install import INSTALL_SCHEMES, SCHEME_KEYS
from distutils import log, dir_util
from distutils.command.build_scripts import first_line_re
from distutils.spawn import find_executable
import sys
import os
import zipimport
import shutil
import tempfile
import zipfile
import re
import stat
import random
import platform
import textwrap
import warnings
import site
import struct
import contextlib
import subprocess
import shlex
import io
from setuptools import Command
from setuptools.sandbox import run_setup
from setuptools.py31compat import get_path, get_config_vars
from setuptools.command import setopt
from setuptools.archive_util import unpack_archive
from setuptools.package_index import PackageIndex
from setuptools.package_index import URL_SCHEME
from setuptools.command import bdist_egg, egg_info
from setuptools.compat import (iteritems, maxsize, basestring, unicode,
reraise, PY2, PY3)
from pkg_resources import (
yield_lines, normalize_path, resource_string, ensure_directory,
get_distribution, find_distributions, Environment, Requirement,
Distribution, PathMetadata, EggMetadata, WorkingSet, DistributionNotFound,
VersionConflict, DEVELOP_DIST,
)
import pkg_resources
# Turn on PEP440Warnings
warnings.filterwarnings("default", category=pkg_resources.PEP440Warning)
__all__ = [
'samefile', 'easy_install', 'PthDistributions', 'extract_wininst_cfg',
'main', 'get_exe_prefixes',
]
def is_64bit():
return struct.calcsize("P") == 8
def samefile(p1, p2):
both_exist = os.path.exists(p1) and os.path.exists(p2)
use_samefile = hasattr(os.path, 'samefile') and both_exist
if use_samefile:
return os.path.samefile(p1, p2)
norm_p1 = os.path.normpath(os.path.normcase(p1))
norm_p2 = os.path.normpath(os.path.normcase(p2))
return norm_p1 == norm_p2
if PY2:
def _to_ascii(s):
return s
def isascii(s):
try:
unicode(s, 'ascii')
return True
except UnicodeError:
return False
else:
def _to_ascii(s):
return s.encode('ascii')
def isascii(s):
try:
s.encode('ascii')
return True
except UnicodeError:
return False
class easy_install(Command):
"""Manage a download/build/install process"""
description = "Find/get/install Python packages"
command_consumes_arguments = True
user_options = [
('prefix=', None, "installation prefix"),
("zip-ok", "z", "install package as a zipfile"),
("multi-version", "m", "make apps have to require() a version"),
("upgrade", "U", "force upgrade (searches PyPI for latest versions)"),
("install-dir=", "d", "install package to DIR"),
("script-dir=", "s", "install scripts to DIR"),
("exclude-scripts", "x", "Don't install scripts"),
("always-copy", "a", "Copy all needed packages to install dir"),
("index-url=", "i", "base URL of Python Package Index"),
("find-links=", "f", "additional URL(s) to search for packages"),
("build-directory=", "b",
"download/extract/build in DIR; keep the results"),
('optimize=', 'O',
"also compile with optimization: -O1 for \"python -O\", "
"-O2 for \"python -OO\", and -O0 to disable [default: -O0]"),
('record=', None,
"filename in which to record list of installed files"),
('always-unzip', 'Z', "don't install as a zipfile, no matter what"),
('site-dirs=', 'S', "list of directories where .pth files work"),
('editable', 'e', "Install specified packages in editable form"),
('no-deps', 'N', "don't install dependencies"),
('allow-hosts=', 'H', "pattern(s) that hostnames must match"),
('local-snapshots-ok', 'l',
"allow building eggs from local checkouts"),
('version', None, "print version information and exit"),
('no-find-links', None,
"Don't load find-links defined in packages being installed")
]
boolean_options = [
'zip-ok', 'multi-version', 'exclude-scripts', 'upgrade', 'always-copy',
'editable',
'no-deps', 'local-snapshots-ok', 'version'
]
if site.ENABLE_USER_SITE:
help_msg = "install in user site-package '%s'" % site.USER_SITE
user_options.append(('user', None, help_msg))
boolean_options.append('user')
negative_opt = {'always-unzip': 'zip-ok'}
create_index = PackageIndex
def initialize_options(self):
# the --user option seems to be an opt-in one,
# so the default should be False.
self.user = 0
self.zip_ok = self.local_snapshots_ok = None
self.install_dir = self.script_dir = self.exclude_scripts = None
self.index_url = None
self.find_links = None
self.build_directory = None
self.args = None
self.optimize = self.record = None
self.upgrade = self.always_copy = self.multi_version = None
self.editable = self.no_deps = self.allow_hosts = None
self.root = self.prefix = self.no_report = None
self.version = None
self.install_purelib = None # for pure module distributions
self.install_platlib = None # non-pure (dists w/ extensions)
self.install_headers = None # for C/C++ headers
self.install_lib = None # set to either purelib or platlib
self.install_scripts = None
self.install_data = None
self.install_base = None
self.install_platbase = None
if site.ENABLE_USER_SITE:
self.install_userbase = site.USER_BASE
self.install_usersite = site.USER_SITE
else:
self.install_userbase = None
self.install_usersite = None
self.no_find_links = None
# Options not specifiable via command line
self.package_index = None
self.pth_file = self.always_copy_from = None
self.site_dirs = None
self.installed_projects = {}
self.sitepy_installed = False
# Always read easy_install options, even if we are subclassed, or have
# an independent instance created. This ensures that defaults will
# always come from the standard configuration file(s)' "easy_install"
# section, even if this is a "develop" or "install" command, or some
# other embedding.
self._dry_run = None
self.verbose = self.distribution.verbose
self.distribution._set_command_options(
self, self.distribution.get_option_dict('easy_install')
)
def delete_blockers(self, blockers):
extant_blockers = (
filename for filename in blockers
if os.path.exists(filename) or os.path.islink(filename)
)
list(map(self._delete_path, extant_blockers))
def _delete_path(self, path):
log.info("Deleting %s", path)
if self.dry_run:
return
is_tree = os.path.isdir(path) and not os.path.islink(path)
remover = rmtree if is_tree else os.unlink
remover(path)
@staticmethod
def _render_version():
"""
Render the Setuptools version and installation details, then exit.
"""
ver = sys.version[:3]
dist = get_distribution('setuptools')
tmpl = 'setuptools {dist.version} from {dist.location} (Python {ver})'
print(tmpl.format(**locals()))
raise SystemExit()
def finalize_options(self):
self.version and self._render_version()
py_version = sys.version.split()[0]
prefix, exec_prefix = get_config_vars('prefix', 'exec_prefix')
self.config_vars = {
'dist_name': self.distribution.get_name(),
'dist_version': self.distribution.get_version(),
'dist_fullname': self.distribution.get_fullname(),
'py_version': py_version,
'py_version_short': py_version[0:3],
'py_version_nodot': py_version[0] + py_version[2],
'sys_prefix': prefix,
'prefix': prefix,
'sys_exec_prefix': exec_prefix,
'exec_prefix': exec_prefix,
# Only python 3.2+ has abiflags
'abiflags': getattr(sys, 'abiflags', ''),
}
if site.ENABLE_USER_SITE:
self.config_vars['userbase'] = self.install_userbase
self.config_vars['usersite'] = self.install_usersite
self._fix_install_dir_for_user_site()
self.expand_basedirs()
self.expand_dirs()
self._expand('install_dir', 'script_dir', 'build_directory',
'site_dirs')
# If a non-default installation directory was specified, default the
# script directory to match it.
if self.script_dir is None:
self.script_dir = self.install_dir
if self.no_find_links is None:
self.no_find_links = False
# Let install_dir get set by install_lib command, which in turn
# gets its info from the install command, and takes into account
# --prefix and --home and all that other crud.
self.set_undefined_options(
'install_lib', ('install_dir', 'install_dir')
)
# Likewise, set default script_dir from 'install_scripts.install_dir'
self.set_undefined_options(
'install_scripts', ('install_dir', 'script_dir')
)
if self.user and self.install_purelib:
self.install_dir = self.install_purelib
self.script_dir = self.install_scripts
# default --record from the install command
self.set_undefined_options('install', ('record', 'record'))
# Should this be moved to the if statement below? It's not used
# elsewhere
normpath = map(normalize_path, sys.path)
self.all_site_dirs = get_site_dirs()
if self.site_dirs is not None:
site_dirs = [
os.path.expanduser(s.strip()) for s in
self.site_dirs.split(',')
]
for d in site_dirs:
if not os.path.isdir(d):
log.warn("%s (in --site-dirs) does not exist", d)
elif normalize_path(d) not in normpath:
raise DistutilsOptionError(
d + " (in --site-dirs) is not on sys.path"
)
else:
self.all_site_dirs.append(normalize_path(d))
if not self.editable:
self.check_site_dir()
self.index_url = self.index_url or "https://pypi.python.org/simple"
self.shadow_path = self.all_site_dirs[:]
for path_item in self.install_dir, normalize_path(self.script_dir):
if path_item not in self.shadow_path:
self.shadow_path.insert(0, path_item)
if self.allow_hosts is not None:
hosts = [s.strip() for s in self.allow_hosts.split(',')]
else:
hosts = ['*']
if self.package_index is None:
self.package_index = self.create_index(
self.index_url, search_path=self.shadow_path, hosts=hosts,
)
self.local_index = Environment(self.shadow_path + sys.path)
if self.find_links is not None:
if isinstance(self.find_links, basestring):
self.find_links = self.find_links.split()
else:
self.find_links = []
if self.local_snapshots_ok:
self.package_index.scan_egg_links(self.shadow_path + sys.path)
if not self.no_find_links:
self.package_index.add_find_links(self.find_links)
self.set_undefined_options('install_lib', ('optimize', 'optimize'))
if not isinstance(self.optimize, int):
try:
self.optimize = int(self.optimize)
if not (0 <= self.optimize <= 2):
raise ValueError
except ValueError:
raise DistutilsOptionError("--optimize must be 0, 1, or 2")
if self.editable and not self.build_directory:
raise DistutilsArgError(
"Must specify a build directory (-b) when using --editable"
)
if not self.args:
raise DistutilsArgError(
"No urls, filenames, or requirements specified (see --help)")
self.outputs = []
def _fix_install_dir_for_user_site(self):
"""
Fix the install_dir if "--user" was used.
"""
if not self.user or not site.ENABLE_USER_SITE:
return
self.create_home_path()
if self.install_userbase is None:
msg = "User base directory is not specified"
raise DistutilsPlatformError(msg)
self.install_base = self.install_platbase = self.install_userbase
scheme_name = os.name.replace('posix', 'unix') + '_user'
self.select_scheme(scheme_name)
def _expand_attrs(self, attrs):
for attr in attrs:
val = getattr(self, attr)
if val is not None:
if os.name == 'posix' or os.name == 'nt':
val = os.path.expanduser(val)
val = subst_vars(val, self.config_vars)
setattr(self, attr, val)
def expand_basedirs(self):
"""Calls `os.path.expanduser` on install_base, install_platbase and
root."""
self._expand_attrs(['install_base', 'install_platbase', 'root'])
def expand_dirs(self):
"""Calls `os.path.expanduser` on install dirs."""
self._expand_attrs(['install_purelib', 'install_platlib',
'install_lib', 'install_headers',
'install_scripts', 'install_data', ])
def run(self):
if self.verbose != self.distribution.verbose:
log.set_verbosity(self.verbose)
try:
for spec in self.args:
self.easy_install(spec, not self.no_deps)
if self.record:
outputs = self.outputs
if self.root: # strip any package prefix
root_len = len(self.root)
for counter in range(len(outputs)):
outputs[counter] = outputs[counter][root_len:]
from distutils import file_util
self.execute(
file_util.write_file, (self.record, outputs),
"writing list of installed files to '%s'" %
self.record
)
self.warn_deprecated_options()
finally:
log.set_verbosity(self.distribution.verbose)
def pseudo_tempname(self):
"""Return a pseudo-tempname base in the install directory.
This code is intentionally naive; if a malicious party can write to
the target directory you're already in deep doodoo.
"""
try:
pid = os.getpid()
except:
pid = random.randint(0, maxsize)
return os.path.join(self.install_dir, "test-easy-install-%s" % pid)
def warn_deprecated_options(self):
pass
def check_site_dir(self):
"""Verify that self.install_dir is .pth-capable dir, if needed"""
instdir = normalize_path(self.install_dir)
pth_file = os.path.join(instdir, 'easy-install.pth')
# Is it a configured, PYTHONPATH, implicit, or explicit site dir?
is_site_dir = instdir in self.all_site_dirs
if not is_site_dir and not self.multi_version:
# No? Then directly test whether it does .pth file processing
is_site_dir = self.check_pth_processing()
else:
# make sure we can write to target dir
testfile = self.pseudo_tempname() + '.write-test'
test_exists = os.path.exists(testfile)
try:
if test_exists:
os.unlink(testfile)
open(testfile, 'w').close()
os.unlink(testfile)
except (OSError, IOError):
self.cant_write_to_target()
if not is_site_dir and not self.multi_version:
# Can't install non-multi to non-site dir
raise DistutilsError(self.no_default_version_msg())
if is_site_dir:
if self.pth_file is None:
self.pth_file = PthDistributions(pth_file, self.all_site_dirs)
else:
self.pth_file = None
PYTHONPATH = os.environ.get('PYTHONPATH', '').split(os.pathsep)
if instdir not in map(normalize_path, filter(None, PYTHONPATH)):
# only PYTHONPATH dirs need a site.py, so pretend it's there
self.sitepy_installed = True
elif self.multi_version and not os.path.exists(pth_file):
self.sitepy_installed = True # don't need site.py in this case
self.pth_file = None # and don't create a .pth file
self.install_dir = instdir
__cant_write_msg = textwrap.dedent("""
can't create or remove files in install directory
The following error occurred while trying to add or remove files in the
installation directory:
%s
The installation directory you specified (via --install-dir, --prefix, or
the distutils default setting) was:
%s
""").lstrip()
__not_exists_id = textwrap.dedent("""
This directory does not currently exist. Please create it and try again, or
choose a different installation directory (using the -d or --install-dir
option).
""").lstrip()
__access_msg = textwrap.dedent("""
Perhaps your account does not have write access to this directory? If the
installation directory is a system-owned directory, you may need to sign in
as the administrator or "root" account. If you do not have administrative
access to this machine, you may wish to choose a different installation
directory, preferably one that is listed in your PYTHONPATH environment
variable.
For information on other options, you may wish to consult the
documentation at:
https://pythonhosted.org/setuptools/easy_install.html
Please make the appropriate changes for your system and try again.
""").lstrip()
def cant_write_to_target(self):
msg = self.__cant_write_msg % (sys.exc_info()[1], self.install_dir,)
if not os.path.exists(self.install_dir):
msg += '\n' + self.__not_exists_id
else:
msg += '\n' + self.__access_msg
raise DistutilsError(msg)
def check_pth_processing(self):
"""Empirically verify whether .pth files are supported in inst. dir"""
instdir = self.install_dir
log.info("Checking .pth file support in %s", instdir)
pth_file = self.pseudo_tempname() + ".pth"
ok_file = pth_file + '.ok'
ok_exists = os.path.exists(ok_file)
try:
if ok_exists:
os.unlink(ok_file)
dirname = os.path.dirname(ok_file)
if not os.path.exists(dirname):
os.makedirs(dirname)
f = open(pth_file, 'w')
except (OSError, IOError):
self.cant_write_to_target()
else:
try:
f.write("import os; f = open(%r, 'w'); f.write('OK'); "
"f.close()\n" % (ok_file,))
f.close()
f = None
executable = sys.executable
if os.name == 'nt':
dirname, basename = os.path.split(executable)
alt = os.path.join(dirname, 'pythonw.exe')
if (basename.lower() == 'python.exe' and
os.path.exists(alt)):
# use pythonw.exe to avoid opening a console window
executable = alt
from distutils.spawn import spawn
spawn([executable, '-E', '-c', 'pass'], 0)
if os.path.exists(ok_file):
log.info(
"TEST PASSED: %s appears to support .pth files",
instdir
)
return True
finally:
if f:
f.close()
if os.path.exists(ok_file):
os.unlink(ok_file)
if os.path.exists(pth_file):
os.unlink(pth_file)
if not self.multi_version:
log.warn("TEST FAILED: %s does NOT support .pth files", instdir)
return False
def install_egg_scripts(self, dist):
"""Write all the scripts for `dist`, unless scripts are excluded"""
if not self.exclude_scripts and dist.metadata_isdir('scripts'):
for script_name in dist.metadata_listdir('scripts'):
if dist.metadata_isdir('scripts/' + script_name):
# The "script" is a directory, likely a Python 3
# __pycache__ directory, so skip it.
continue
self.install_script(
dist, script_name,
dist.get_metadata('scripts/' + script_name)
)
self.install_wrapper_scripts(dist)
def add_output(self, path):
if os.path.isdir(path):
for base, dirs, files in os.walk(path):
for filename in files:
self.outputs.append(os.path.join(base, filename))
else:
self.outputs.append(path)
def not_editable(self, spec):
if self.editable:
raise DistutilsArgError(
"Invalid argument %r: you can't use filenames or URLs "
"with --editable (except via the --find-links option)."
% (spec,)
)
def check_editable(self, spec):
if not self.editable:
return
if os.path.exists(os.path.join(self.build_directory, spec.key)):
raise DistutilsArgError(
"%r already exists in %s; can't do a checkout there" %
(spec.key, self.build_directory)
)
def easy_install(self, spec, deps=False):
tmpdir = tempfile.mkdtemp(prefix="easy_install-")
download = None
if not self.editable:
self.install_site_py()
try:
if not isinstance(spec, Requirement):
if URL_SCHEME(spec):
# It's a url, download it to tmpdir and process
self.not_editable(spec)
download = self.package_index.download(spec, tmpdir)
return self.install_item(None, download, tmpdir, deps,
True)
elif os.path.exists(spec):
# Existing file or directory, just process it directly
self.not_editable(spec)
return self.install_item(None, spec, tmpdir, deps, True)
else:
spec = parse_requirement_arg(spec)
self.check_editable(spec)
dist = self.package_index.fetch_distribution(
spec, tmpdir, self.upgrade, self.editable,
not self.always_copy, self.local_index
)
if dist is None:
msg = "Could not find suitable distribution for %r" % spec
if self.always_copy:
msg += " (--always-copy skips system and development eggs)"
raise DistutilsError(msg)
elif dist.precedence == DEVELOP_DIST:
# .egg-info dists don't need installing, just process deps
self.process_distribution(spec, dist, deps, "Using")
return dist
else:
return self.install_item(spec, dist.location, tmpdir, deps)
finally:
if os.path.exists(tmpdir):
rmtree(tmpdir)
def install_item(self, spec, download, tmpdir, deps, install_needed=False):
# Installation is also needed if file in tmpdir or is not an egg
install_needed = install_needed or self.always_copy
install_needed = install_needed or os.path.dirname(download) == tmpdir
install_needed = install_needed or not download.endswith('.egg')
install_needed = install_needed or (
self.always_copy_from is not None and
os.path.dirname(normalize_path(download)) ==
normalize_path(self.always_copy_from)
)
if spec and not install_needed:
# at this point, we know it's a local .egg, we just don't know if
# it's already installed.
for dist in self.local_index[spec.project_name]:
if dist.location == download:
break
else:
install_needed = True # it's not in the local index
log.info("Processing %s", os.path.basename(download))
if install_needed:
dists = self.install_eggs(spec, download, tmpdir)
for dist in dists:
self.process_distribution(spec, dist, deps)
else:
dists = [self.egg_distribution(download)]
self.process_distribution(spec, dists[0], deps, "Using")
if spec is not None:
for dist in dists:
if dist in spec:
return dist
def select_scheme(self, name):
"""Sets the install directories by applying the install schemes."""
# it's the caller's problem if they supply a bad name!
scheme = INSTALL_SCHEMES[name]
for key in SCHEME_KEYS:
attrname = 'install_' + key
if getattr(self, attrname) is None:
setattr(self, attrname, scheme[key])
def process_distribution(self, requirement, dist, deps=True, *info):
self.update_pth(dist)
self.package_index.add(dist)
if dist in self.local_index[dist.key]:
self.local_index.remove(dist)
self.local_index.add(dist)
self.install_egg_scripts(dist)
self.installed_projects[dist.key] = dist
log.info(self.installation_report(requirement, dist, *info))
if (dist.has_metadata('dependency_links.txt') and
not self.no_find_links):
self.package_index.add_find_links(
dist.get_metadata_lines('dependency_links.txt')
)
if not deps and not self.always_copy:
return
elif requirement is not None and dist.key != requirement.key:
log.warn("Skipping dependencies for %s", dist)
return # XXX this is not the distribution we were looking for
elif requirement is None or dist not in requirement:
# if we wound up with a different version, resolve what we've got
distreq = dist.as_requirement()
requirement = requirement or distreq
requirement = Requirement(
distreq.project_name, distreq.specs, requirement.extras
)
log.info("Processing dependencies for %s", requirement)
try:
distros = WorkingSet([]).resolve(
[requirement], self.local_index, self.easy_install
)
except DistributionNotFound as e:
raise DistutilsError(str(e))
except VersionConflict as e:
raise DistutilsError(e.report())
if self.always_copy or self.always_copy_from:
# Force all the relevant distros to be copied or activated
for dist in distros:
if dist.key not in self.installed_projects:
self.easy_install(dist.as_requirement())
log.info("Finished processing dependencies for %s", requirement)
def should_unzip(self, dist):
if self.zip_ok is not None:
return not self.zip_ok
if dist.has_metadata('not-zip-safe'):
return True
if not dist.has_metadata('zip-safe'):
return True
return False
def maybe_move(self, spec, dist_filename, setup_base):
dst = os.path.join(self.build_directory, spec.key)
if os.path.exists(dst):
msg = ("%r already exists in %s; build directory %s will not be "
"kept")
log.warn(msg, spec.key, self.build_directory, setup_base)
return setup_base
if os.path.isdir(dist_filename):
setup_base = dist_filename
else:
if os.path.dirname(dist_filename) == setup_base:
os.unlink(dist_filename) # get it out of the tmp dir
contents = os.listdir(setup_base)
if len(contents) == 1:
dist_filename = os.path.join(setup_base, contents[0])
if os.path.isdir(dist_filename):
# if the only thing there is a directory, move it instead
setup_base = dist_filename
ensure_directory(dst)
shutil.move(setup_base, dst)
return dst
def install_wrapper_scripts(self, dist):
if self.exclude_scripts:
return
for args in ScriptWriter.best().get_args(dist):
self.write_script(*args)
def install_script(self, dist, script_name, script_text, dev_path=None):
"""Generate a legacy script wrapper and install it"""
spec = str(dist.as_requirement())
is_script = is_python_script(script_text, script_name)
if is_script:
body = self._load_template(dev_path) % locals()
script_text = ScriptWriter.get_header(script_text) + body
self.write_script(script_name, _to_ascii(script_text), 'b')
@staticmethod
def _load_template(dev_path):
"""
There are a couple of template scripts in the package. This
function loads one of them and prepares it for use.
"""
# See https://bitbucket.org/pypa/setuptools/issue/134 for info
# on script file naming and downstream issues with SVR4
name = 'script.tmpl'
if dev_path:
name = name.replace('.tmpl', ' (dev).tmpl')
raw_bytes = resource_string('setuptools', name)
return raw_bytes.decode('utf-8')
def write_script(self, script_name, contents, mode="t", blockers=()):
"""Write an executable file to the scripts directory"""
self.delete_blockers( # clean up old .py/.pyw w/o a script
[os.path.join(self.script_dir, x) for x in blockers]
)
log.info("Installing %s script to %s", script_name, self.script_dir)
target = os.path.join(self.script_dir, script_name)
self.add_output(target)
mask = current_umask()
if not self.dry_run:
ensure_directory(target)
if os.path.exists(target):
os.unlink(target)
with open(target, "w" + mode) as f:
f.write(contents)
chmod(target, 0o777 - mask)
def install_eggs(self, spec, dist_filename, tmpdir):
# .egg dirs or files are already built, so just return them
if dist_filename.lower().endswith('.egg'):
return [self.install_egg(dist_filename, tmpdir)]
elif dist_filename.lower().endswith('.exe'):
return [self.install_exe(dist_filename, tmpdir)]
# Anything else, try to extract and build
setup_base = tmpdir
if os.path.isfile(dist_filename) and not dist_filename.endswith('.py'):
unpack_archive(dist_filename, tmpdir, self.unpack_progress)
elif os.path.isdir(dist_filename):
setup_base = os.path.abspath(dist_filename)
if (setup_base.startswith(tmpdir) # something we downloaded
and self.build_directory and spec is not None):
setup_base = self.maybe_move(spec, dist_filename, setup_base)
# Find the setup.py file
setup_script = os.path.join(setup_base, 'setup.py')
if not os.path.exists(setup_script):
setups = glob(os.path.join(setup_base, '*', 'setup.py'))
if not setups:
raise DistutilsError(
"Couldn't find a setup script in %s" %
os.path.abspath(dist_filename)
)
if len(setups) > 1:
raise DistutilsError(
"Multiple setup scripts in %s" %
os.path.abspath(dist_filename)
)
setup_script = setups[0]
# Now run it, and return the result
if self.editable:
log.info(self.report_editable(spec, setup_script))
return []
else:
return self.build_and_install(setup_script, setup_base)
def egg_distribution(self, egg_path):
if os.path.isdir(egg_path):
metadata = PathMetadata(egg_path, os.path.join(egg_path,
'EGG-INFO'))
else:
metadata = EggMetadata(zipimport.zipimporter(egg_path))
return Distribution.from_filename(egg_path, metadata=metadata)
def install_egg(self, egg_path, tmpdir):
destination = os.path.join(self.install_dir,
os.path.basename(egg_path))
destination = os.path.abspath(destination)
if not self.dry_run:
ensure_directory(destination)
dist = self.egg_distribution(egg_path)
if not samefile(egg_path, destination):
if os.path.isdir(destination) and not os.path.islink(destination):
dir_util.remove_tree(destination, dry_run=self.dry_run)
elif os.path.exists(destination):
self.execute(os.unlink, (destination,), "Removing " +
destination)
try:
new_dist_is_zipped = False
if os.path.isdir(egg_path):
if egg_path.startswith(tmpdir):
f, m = shutil.move, "Moving"
else:
f, m = shutil.copytree, "Copying"
elif self.should_unzip(dist):
self.mkpath(destination)
f, m = self.unpack_and_compile, "Extracting"
else:
new_dist_is_zipped = True
if egg_path.startswith(tmpdir):
f, m = shutil.move, "Moving"
else:
f, m = shutil.copy2, "Copying"
self.execute(f, (egg_path, destination),
(m + " %s to %s") %
(os.path.basename(egg_path),
os.path.dirname(destination)))
update_dist_caches(destination,
fix_zipimporter_caches=new_dist_is_zipped)
except:
update_dist_caches(destination, fix_zipimporter_caches=False)
raise
self.add_output(destination)
return self.egg_distribution(destination)
def install_exe(self, dist_filename, tmpdir):
# See if it's valid, get data
cfg = extract_wininst_cfg(dist_filename)
if cfg is None:
raise DistutilsError(
"%s is not a valid distutils Windows .exe" % dist_filename
)
# Create a dummy distribution object until we build the real distro
dist = Distribution(
None,
project_name=cfg.get('metadata', 'name'),
version=cfg.get('metadata', 'version'), platform=get_platform(),
)
# Convert the .exe to an unpacked egg
egg_path = dist.location = os.path.join(tmpdir, dist.egg_name() +
'.egg')
egg_tmp = egg_path + '.tmp'
_egg_info = os.path.join(egg_tmp, 'EGG-INFO')
pkg_inf = os.path.join(_egg_info, 'PKG-INFO')
ensure_directory(pkg_inf) # make sure EGG-INFO dir exists
dist._provider = PathMetadata(egg_tmp, _egg_info) # XXX
self.exe_to_egg(dist_filename, egg_tmp)
# Write EGG-INFO/PKG-INFO
if not os.path.exists(pkg_inf):
f = open(pkg_inf, 'w')
f.write('Metadata-Version: 1.0\n')
for k, v in cfg.items('metadata'):
if k != 'target_version':
f.write('%s: %s\n' % (k.replace('_', '-').title(), v))
f.close()
script_dir = os.path.join(_egg_info, 'scripts')
# delete entry-point scripts to avoid duping
self.delete_blockers(
[os.path.join(script_dir, args[0]) for args in
ScriptWriter.get_args(dist)]
)
# Build .egg file from tmpdir
bdist_egg.make_zipfile(
egg_path, egg_tmp, verbose=self.verbose, dry_run=self.dry_run
)
# install the .egg
return self.install_egg(egg_path, tmpdir)
def exe_to_egg(self, dist_filename, egg_tmp):
"""Extract a bdist_wininst to the directories an egg would use"""
# Check for .pth file and set up prefix translations
prefixes = get_exe_prefixes(dist_filename)
to_compile = []
native_libs = []
top_level = {}
def process(src, dst):
s = src.lower()
for old, new in prefixes:
if s.startswith(old):
src = new + src[len(old):]
parts = src.split('/')
dst = os.path.join(egg_tmp, *parts)
dl = dst.lower()
if dl.endswith('.pyd') or dl.endswith('.dll'):
parts[-1] = bdist_egg.strip_module(parts[-1])
top_level[os.path.splitext(parts[0])[0]] = 1
native_libs.append(src)
elif dl.endswith('.py') and old != 'SCRIPTS/':
top_level[os.path.splitext(parts[0])[0]] = 1
to_compile.append(dst)
return dst
if not src.endswith('.pth'):
log.warn("WARNING: can't process %s", src)
return None
# extract, tracking .pyd/.dll->native_libs and .py -> to_compile
unpack_archive(dist_filename, egg_tmp, process)
stubs = []
for res in native_libs:
if res.lower().endswith('.pyd'): # create stubs for .pyd's
parts = res.split('/')
resource = parts[-1]
parts[-1] = bdist_egg.strip_module(parts[-1]) + '.py'
pyfile = os.path.join(egg_tmp, *parts)
to_compile.append(pyfile)
stubs.append(pyfile)
bdist_egg.write_stub(resource, pyfile)
self.byte_compile(to_compile) # compile .py's
bdist_egg.write_safety_flag(
os.path.join(egg_tmp, 'EGG-INFO'),
bdist_egg.analyze_egg(egg_tmp, stubs)) # write zip-safety flag
for name in 'top_level', 'native_libs':
if locals()[name]:
txt = os.path.join(egg_tmp, 'EGG-INFO', name + '.txt')
if not os.path.exists(txt):
f = open(txt, 'w')
f.write('\n'.join(locals()[name]) + '\n')
f.close()
__mv_warning = textwrap.dedent("""
Because this distribution was installed --multi-version, before you can
import modules from this package in an application, you will need to
'import pkg_resources' and then use a 'require()' call similar to one of
these examples, in order to select the desired version:
pkg_resources.require("%(name)s") # latest installed version
pkg_resources.require("%(name)s==%(version)s") # this exact version
pkg_resources.require("%(name)s>=%(version)s") # this version or higher
""").lstrip()
__id_warning = textwrap.dedent("""
Note also that the installation directory must be on sys.path at runtime for
this to work. (e.g. by being the application's script directory, by being on
PYTHONPATH, or by being added to sys.path by your code.)
""")
def installation_report(self, req, dist, what="Installed"):
"""Helpful installation message for display to package users"""
msg = "\n%(what)s %(eggloc)s%(extras)s"
if self.multi_version and not self.no_report:
msg += '\n' + self.__mv_warning
if self.install_dir not in map(normalize_path, sys.path):
msg += '\n' + self.__id_warning
eggloc = dist.location
name = dist.project_name
version = dist.version
extras = '' # TODO: self.report_extras(req, dist)
return msg % locals()
__editable_msg = textwrap.dedent("""
Extracted editable version of %(spec)s to %(dirname)s
If it uses setuptools in its setup script, you can activate it in
"development" mode by going to that directory and running::
%(python)s setup.py develop
See the setuptools documentation for the "develop" command for more info.
""").lstrip()
def report_editable(self, spec, setup_script):
dirname = os.path.dirname(setup_script)
python = sys.executable
return '\n' + self.__editable_msg % locals()
def run_setup(self, setup_script, setup_base, args):
sys.modules.setdefault('distutils.command.bdist_egg', bdist_egg)
sys.modules.setdefault('distutils.command.egg_info', egg_info)
args = list(args)
if self.verbose > 2:
v = 'v' * (self.verbose - 1)
args.insert(0, '-' + v)
elif self.verbose < 2:
args.insert(0, '-q')
if self.dry_run:
args.insert(0, '-n')
log.info(
"Running %s %s", setup_script[len(setup_base) + 1:], ' '.join(args)
)
try:
run_setup(setup_script, args)
except SystemExit as v:
raise DistutilsError("Setup script exited with %s" % (v.args[0],))
def build_and_install(self, setup_script, setup_base):
args = ['bdist_egg', '--dist-dir']
dist_dir = tempfile.mkdtemp(
prefix='egg-dist-tmp-', dir=os.path.dirname(setup_script)
)
try:
self._set_fetcher_options(os.path.dirname(setup_script))
args.append(dist_dir)
self.run_setup(setup_script, setup_base, args)
all_eggs = Environment([dist_dir])
eggs = []
for key in all_eggs:
for dist in all_eggs[key]:
eggs.append(self.install_egg(dist.location, setup_base))
if not eggs and not self.dry_run:
log.warn("No eggs found in %s (setup script problem?)",
dist_dir)
return eggs
finally:
rmtree(dist_dir)
log.set_verbosity(self.verbose) # restore our log verbosity
def _set_fetcher_options(self, base):
"""
When easy_install is about to run bdist_egg on a source dist, that
source dist might have 'setup_requires' directives, requiring
additional fetching. Ensure the fetcher options given to easy_install
are available to that command as well.
"""
# find the fetch options from easy_install and write them out
# to the setup.cfg file.
ei_opts = self.distribution.get_option_dict('easy_install').copy()
fetch_directives = (
'find_links', 'site_dirs', 'index_url', 'optimize',
'site_dirs', 'allow_hosts',
)
fetch_options = {}
for key, val in ei_opts.items():
if key not in fetch_directives:
continue
fetch_options[key.replace('_', '-')] = val[1]
# create a settings dictionary suitable for `edit_config`
settings = dict(easy_install=fetch_options)
cfg_filename = os.path.join(base, 'setup.cfg')
setopt.edit_config(cfg_filename, settings)
def update_pth(self, dist):
if self.pth_file is None:
return
for d in self.pth_file[dist.key]: # drop old entries
if self.multi_version or d.location != dist.location:
log.info("Removing %s from easy-install.pth file", d)
self.pth_file.remove(d)
if d.location in self.shadow_path:
self.shadow_path.remove(d.location)
if not self.multi_version:
if dist.location in self.pth_file.paths:
log.info(
"%s is already the active version in easy-install.pth",
dist
)
else:
log.info("Adding %s to easy-install.pth file", dist)
self.pth_file.add(dist) # add new entry
if dist.location not in self.shadow_path:
self.shadow_path.append(dist.location)
if not self.dry_run:
self.pth_file.save()
if dist.key == 'setuptools':
# Ensure that setuptools itself never becomes unavailable!
# XXX should this check for latest version?
filename = os.path.join(self.install_dir, 'setuptools.pth')
if os.path.islink(filename):
os.unlink(filename)
f = open(filename, 'wt')
f.write(self.pth_file.make_relative(dist.location) + '\n')
f.close()
def unpack_progress(self, src, dst):
# Progress filter for unpacking
log.debug("Unpacking %s to %s", src, dst)
return dst # only unpack-and-compile skips files for dry run
def unpack_and_compile(self, egg_path, destination):
to_compile = []
to_chmod = []
def pf(src, dst):
if dst.endswith('.py') and not src.startswith('EGG-INFO/'):
to_compile.append(dst)
elif dst.endswith('.dll') or dst.endswith('.so'):
to_chmod.append(dst)
self.unpack_progress(src, dst)
return not self.dry_run and dst or None
unpack_archive(egg_path, destination, pf)
self.byte_compile(to_compile)
if not self.dry_run:
for f in to_chmod:
mode = ((os.stat(f)[stat.ST_MODE]) | 0o555) & 0o7755
chmod(f, mode)
def byte_compile(self, to_compile):
if sys.dont_write_bytecode:
self.warn('byte-compiling is disabled, skipping.')
return
from distutils.util import byte_compile
try:
# try to make the byte compile messages quieter
log.set_verbosity(self.verbose - 1)
byte_compile(to_compile, optimize=0, force=1, dry_run=self.dry_run)
if self.optimize:
byte_compile(
to_compile, optimize=self.optimize, force=1,
dry_run=self.dry_run
)
finally:
log.set_verbosity(self.verbose) # restore original verbosity
__no_default_msg = textwrap.dedent("""
bad install directory or PYTHONPATH
You are attempting to install a package to a directory that is not
on PYTHONPATH and which Python does not read ".pth" files from. The
installation directory you specified (via --install-dir, --prefix, or
the distutils default setting) was:
%s
and your PYTHONPATH environment variable currently contains:
%r
Here are some of your options for correcting the problem:
* You can choose a different installation directory, i.e., one that is
on PYTHONPATH or supports .pth files
* You can add the installation directory to the PYTHONPATH environment
variable. (It must then also be on PYTHONPATH whenever you run
Python and want to use the package(s) you are installing.)
* You can set up the installation directory to support ".pth" files by
using one of the approaches described here:
https://pythonhosted.org/setuptools/easy_install.html#custom-installation-locations
Please make the appropriate changes for your system and try again.""").lstrip()
def no_default_version_msg(self):
template = self.__no_default_msg
return template % (self.install_dir, os.environ.get('PYTHONPATH', ''))
def install_site_py(self):
"""Make sure there's a site.py in the target dir, if needed"""
if self.sitepy_installed:
return # already did it, or don't need to
sitepy = os.path.join(self.install_dir, "site.py")
source = resource_string("setuptools", "site-patch.py")
current = ""
if os.path.exists(sitepy):
log.debug("Checking existing site.py in %s", self.install_dir)
f = open(sitepy, 'rb')
current = f.read()
# we want str, not bytes
if PY3:
current = current.decode()
f.close()
if not current.startswith('def __boot():'):
raise DistutilsError(
"%s is not a setuptools-generated site.py; please"
" remove it." % sitepy
)
if current != source:
log.info("Creating %s", sitepy)
if not self.dry_run:
ensure_directory(sitepy)
f = open(sitepy, 'wb')
f.write(source)
f.close()
self.byte_compile([sitepy])
self.sitepy_installed = True
def create_home_path(self):
"""Create directories under ~."""
if not self.user:
return
home = convert_path(os.path.expanduser("~"))
for name, path in iteritems(self.config_vars):
if path.startswith(home) and not os.path.isdir(path):
self.debug_print("os.makedirs('%s', 0o700)" % path)
os.makedirs(path, 0o700)
INSTALL_SCHEMES = dict(
posix=dict(
install_dir='$base/lib/python$py_version_short/site-packages',
script_dir='$base/bin',
),
)
DEFAULT_SCHEME = dict(
install_dir='$base/Lib/site-packages',
script_dir='$base/Scripts',
)
def _expand(self, *attrs):
config_vars = self.get_finalized_command('install').config_vars
if self.prefix:
# Set default install_dir/scripts from --prefix
config_vars = config_vars.copy()
config_vars['base'] = self.prefix
scheme = self.INSTALL_SCHEMES.get(os.name, self.DEFAULT_SCHEME)
for attr, val in scheme.items():
if getattr(self, attr, None) is None:
setattr(self, attr, val)
from distutils.util import subst_vars
for attr in attrs:
val = getattr(self, attr)
if val is not None:
val = subst_vars(val, config_vars)
if os.name == 'posix':
val = os.path.expanduser(val)
setattr(self, attr, val)
def get_site_dirs():
# return a list of 'site' dirs
sitedirs = [_f for _f in os.environ.get('PYTHONPATH',
'').split(os.pathsep) if _f]
prefixes = [sys.prefix]
if sys.exec_prefix != sys.prefix:
prefixes.append(sys.exec_prefix)
for prefix in prefixes:
if prefix:
if sys.platform in ('os2emx', 'riscos'):
sitedirs.append(os.path.join(prefix, "Lib", "site-packages"))
elif os.sep == '/':
sitedirs.extend([os.path.join(prefix,
"lib",
"python" + sys.version[:3],
"site-packages"),
os.path.join(prefix, "lib", "site-python")])
else:
sitedirs.extend(
[prefix, os.path.join(prefix, "lib", "site-packages")]
)
if sys.platform == 'darwin':
# for framework builds *only* we add the standard Apple
# locations. Currently only per-user, but /Library and
# /Network/Library could be added too
if 'Python.framework' in prefix:
home = os.environ.get('HOME')
if home:
sitedirs.append(
os.path.join(home,
'Library',
'Python',
sys.version[:3],
'site-packages'))
lib_paths = get_path('purelib'), get_path('platlib')
for site_lib in lib_paths:
if site_lib not in sitedirs:
sitedirs.append(site_lib)
if site.ENABLE_USER_SITE:
sitedirs.append(site.USER_SITE)
sitedirs = list(map(normalize_path, sitedirs))
return sitedirs
def expand_paths(inputs):
"""Yield sys.path directories that might contain "old-style" packages"""
seen = {}
for dirname in inputs:
dirname = normalize_path(dirname)
if dirname in seen:
continue
seen[dirname] = 1
if not os.path.isdir(dirname):
continue
files = os.listdir(dirname)
yield dirname, files
for name in files:
if not name.endswith('.pth'):
# We only care about the .pth files
continue
if name in ('easy-install.pth', 'setuptools.pth'):
# Ignore .pth files that we control
continue
# Read the .pth file
f = open(os.path.join(dirname, name))
lines = list(yield_lines(f))
f.close()
# Yield existing non-dupe, non-import directory lines from it
for line in lines:
if not line.startswith("import"):
line = normalize_path(line.rstrip())
if line not in seen:
seen[line] = 1
if not os.path.isdir(line):
continue
yield line, os.listdir(line)
def extract_wininst_cfg(dist_filename):
"""Extract configuration data from a bdist_wininst .exe
Returns a configparser.RawConfigParser, or None
"""
f = open(dist_filename, 'rb')
try:
endrec = zipfile._EndRecData(f)
if endrec is None:
return None
prepended = (endrec[9] - endrec[5]) - endrec[6]
if prepended < 12: # no wininst data here
return None
f.seek(prepended - 12)
from setuptools.compat import StringIO, configparser
import struct
tag, cfglen, bmlen = struct.unpack("<iii", f.read(12))
if tag not in (0x1234567A, 0x1234567B):
return None # not a valid tag
f.seek(prepended - (12 + cfglen))
cfg = configparser.RawConfigParser(
{'version': '', 'target_version': ''})
try:
part = f.read(cfglen)
# Read up to the first null byte.
config = part.split(b'\0', 1)[0]
# Now the config is in bytes, but for RawConfigParser, it should
# be text, so decode it.
config = config.decode(sys.getfilesystemencoding())
cfg.readfp(StringIO(config))
except configparser.Error:
return None
if not cfg.has_section('metadata') or not cfg.has_section('Setup'):
return None
return cfg
finally:
f.close()
def get_exe_prefixes(exe_filename):
"""Get exe->egg path translations for a given .exe file"""
prefixes = [
('PURELIB/', ''), ('PLATLIB/pywin32_system32', ''),
('PLATLIB/', ''),
('SCRIPTS/', 'EGG-INFO/scripts/'),
('DATA/lib/site-packages', ''),
]
z = zipfile.ZipFile(exe_filename)
try:
for info in z.infolist():
name = info.filename
parts = name.split('/')
if len(parts) == 3 and parts[2] == 'PKG-INFO':
if parts[1].endswith('.egg-info'):
prefixes.insert(0, ('/'.join(parts[:2]), 'EGG-INFO/'))
break
if len(parts) != 2 or not name.endswith('.pth'):
continue
if name.endswith('-nspkg.pth'):
continue
if parts[0].upper() in ('PURELIB', 'PLATLIB'):
contents = z.read(name)
if PY3:
contents = contents.decode()
for pth in yield_lines(contents):
pth = pth.strip().replace('\\', '/')
if not pth.startswith('import'):
prefixes.append((('%s/%s/' % (parts[0], pth)), ''))
finally:
z.close()
prefixes = [(x.lower(), y) for x, y in prefixes]
prefixes.sort()
prefixes.reverse()
return prefixes
def parse_requirement_arg(spec):
try:
return Requirement.parse(spec)
except ValueError:
raise DistutilsError(
"Not a URL, existing file, or requirement spec: %r" % (spec,)
)
class PthDistributions(Environment):
"""A .pth file with Distribution paths in it"""
dirty = False
def __init__(self, filename, sitedirs=()):
self.filename = filename
self.sitedirs = list(map(normalize_path, sitedirs))
self.basedir = normalize_path(os.path.dirname(self.filename))
self._load()
Environment.__init__(self, [], None, None)
for path in yield_lines(self.paths):
list(map(self.add, find_distributions(path, True)))
def _load(self):
self.paths = []
saw_import = False
seen = dict.fromkeys(self.sitedirs)
if os.path.isfile(self.filename):
f = open(self.filename, 'rt')
for line in f:
if line.startswith('import'):
saw_import = True
continue
path = line.rstrip()
self.paths.append(path)
if not path.strip() or path.strip().startswith('#'):
continue
# skip non-existent paths, in case somebody deleted a package
# manually, and duplicate paths as well
path = self.paths[-1] = normalize_path(
os.path.join(self.basedir, path)
)
if not os.path.exists(path) or path in seen:
self.paths.pop() # skip it
self.dirty = True # we cleaned up, so we're dirty now :)
continue
seen[path] = 1
f.close()
if self.paths and not saw_import:
self.dirty = True # ensure anything we touch has import wrappers
while self.paths and not self.paths[-1].strip():
self.paths.pop()
def save(self):
"""Write changed .pth file back to disk"""
if not self.dirty:
return
rel_paths = list(map(self.make_relative, self.paths))
if rel_paths:
log.debug("Saving %s", self.filename)
lines = self._wrap_lines(rel_paths)
data = '\n'.join(lines) + '\n'
if os.path.islink(self.filename):
os.unlink(self.filename)
with open(self.filename, 'wt') as f:
f.write(data)
elif os.path.exists(self.filename):
log.debug("Deleting empty %s", self.filename)
os.unlink(self.filename)
self.dirty = False
@staticmethod
def _wrap_lines(lines):
return lines
def add(self, dist):
"""Add `dist` to the distribution map"""
new_path = (
dist.location not in self.paths and (
dist.location not in self.sitedirs or
# account for '.' being in PYTHONPATH
dist.location == os.getcwd()
)
)
if new_path:
self.paths.append(dist.location)
self.dirty = True
Environment.add(self, dist)
def remove(self, dist):
"""Remove `dist` from the distribution map"""
while dist.location in self.paths:
self.paths.remove(dist.location)
self.dirty = True
Environment.remove(self, dist)
def make_relative(self, path):
npath, last = os.path.split(normalize_path(path))
baselen = len(self.basedir)
parts = [last]
sep = os.altsep == '/' and '/' or os.sep
while len(npath) >= baselen:
if npath == self.basedir:
parts.append(os.curdir)
parts.reverse()
return sep.join(parts)
npath, last = os.path.split(npath)
parts.append(last)
else:
return path
class RewritePthDistributions(PthDistributions):
@classmethod
def _wrap_lines(cls, lines):
yield cls.prelude
for line in lines:
yield line
yield cls.postlude
_inline = lambda text: textwrap.dedent(text).strip().replace('\n', '; ')
prelude = _inline("""
import sys
sys.__plen = len(sys.path)
""")
postlude = _inline("""
import sys
new = sys.path[sys.__plen:]
del sys.path[sys.__plen:]
p = getattr(sys, '__egginsert', 0)
sys.path[p:p] = new
sys.__egginsert = p + len(new)
""")
if os.environ.get('SETUPTOOLS_SYS_PATH_TECHNIQUE', 'rewrite') == 'rewrite':
PthDistributions = RewritePthDistributions
def _first_line_re():
"""
Return a regular expression based on first_line_re suitable for matching
strings.
"""
if isinstance(first_line_re.pattern, str):
return first_line_re
# first_line_re in Python >=3.1.4 and >=3.2.1 is a bytes pattern.
return re.compile(first_line_re.pattern.decode())
def auto_chmod(func, arg, exc):
if func is os.remove and os.name == 'nt':
chmod(arg, stat.S_IWRITE)
return func(arg)
et, ev, _ = sys.exc_info()
reraise(et, (ev[0], ev[1] + (" %s %s" % (func, arg))))
def update_dist_caches(dist_path, fix_zipimporter_caches):
"""
Fix any globally cached `dist_path` related data
`dist_path` should be a path of a newly installed egg distribution (zipped
or unzipped).
sys.path_importer_cache contains finder objects that have been cached when
importing data from the original distribution. Any such finders need to be
cleared since the replacement distribution might be packaged differently,
e.g. a zipped egg distribution might get replaced with an unzipped egg
folder or vice versa. Having the old finders cached may then cause Python
to attempt loading modules from the replacement distribution using an
incorrect loader.
zipimport.zipimporter objects are Python loaders charged with importing
data packaged inside zip archives. If stale loaders referencing the
original distribution, are left behind, they can fail to load modules from
the replacement distribution. E.g. if an old zipimport.zipimporter instance
is used to load data from a new zipped egg archive, it may cause the
operation to attempt to locate the requested data in the wrong location -
one indicated by the original distribution's zip archive directory
information. Such an operation may then fail outright, e.g. report having
read a 'bad local file header', or even worse, it may fail silently &
return invalid data.
zipimport._zip_directory_cache contains cached zip archive directory
information for all existing zipimport.zipimporter instances and all such
instances connected to the same archive share the same cached directory
information.
If asked, and the underlying Python implementation allows it, we can fix
all existing zipimport.zipimporter instances instead of having to track
them down and remove them one by one, by updating their shared cached zip
archive directory information. This, of course, assumes that the
replacement distribution is packaged as a zipped egg.
If not asked to fix existing zipimport.zipimporter instances, we still do
our best to clear any remaining zipimport.zipimporter related cached data
that might somehow later get used when attempting to load data from the new
distribution and thus cause such load operations to fail. Note that when
tracking down such remaining stale data, we can not catch every conceivable
usage from here, and we clear only those that we know of and have found to
cause problems if left alive. Any remaining caches should be updated by
whomever is in charge of maintaining them, i.e. they should be ready to
handle us replacing their zip archives with new distributions at runtime.
"""
# There are several other known sources of stale zipimport.zipimporter
# instances that we do not clear here, but might if ever given a reason to
# do so:
# * Global setuptools pkg_resources.working_set (a.k.a. 'master working
# set') may contain distributions which may in turn contain their
# zipimport.zipimporter loaders.
# * Several zipimport.zipimporter loaders held by local variables further
# up the function call stack when running the setuptools installation.
# * Already loaded modules may have their __loader__ attribute set to the
# exact loader instance used when importing them. Python 3.4 docs state
# that this information is intended mostly for introspection and so is
# not expected to cause us problems.
normalized_path = normalize_path(dist_path)
_uncache(normalized_path, sys.path_importer_cache)
if fix_zipimporter_caches:
_replace_zip_directory_cache_data(normalized_path)
else:
# Here, even though we do not want to fix existing and now stale
# zipimporter cache information, we still want to remove it. Related to
# Python's zip archive directory information cache, we clear each of
# its stale entries in two phases:
# 1. Clear the entry so attempting to access zip archive information
# via any existing stale zipimport.zipimporter instances fails.
# 2. Remove the entry from the cache so any newly constructed
# zipimport.zipimporter instances do not end up using old stale
# zip archive directory information.
# This whole stale data removal step does not seem strictly necessary,
# but has been left in because it was done before we started replacing
# the zip archive directory information cache content if possible, and
# there are no relevant unit tests that we can depend on to tell us if
# this is really needed.
_remove_and_clear_zip_directory_cache_data(normalized_path)
def _collect_zipimporter_cache_entries(normalized_path, cache):
"""
Return zipimporter cache entry keys related to a given normalized path.
Alternative path spellings (e.g. those using different character case or
those using alternative path separators) related to the same path are
included. Any sub-path entries are included as well, i.e. those
corresponding to zip archives embedded in other zip archives.
"""
result = []
prefix_len = len(normalized_path)
for p in cache:
np = normalize_path(p)
if (np.startswith(normalized_path) and
np[prefix_len:prefix_len + 1] in (os.sep, '')):
result.append(p)
return result
def _update_zipimporter_cache(normalized_path, cache, updater=None):
"""
Update zipimporter cache data for a given normalized path.
Any sub-path entries are processed as well, i.e. those corresponding to zip
archives embedded in other zip archives.
Given updater is a callable taking a cache entry key and the original entry
(after already removing the entry from the cache), and expected to update
the entry and possibly return a new one to be inserted in its place.
Returning None indicates that the entry should not be replaced with a new
one. If no updater is given, the cache entries are simply removed without
any additional processing, the same as if the updater simply returned None.
"""
for p in _collect_zipimporter_cache_entries(normalized_path, cache):
# N.B. pypy's custom zipimport._zip_directory_cache implementation does
# not support the complete dict interface:
# * Does not support item assignment, thus not allowing this function
# to be used only for removing existing cache entries.
# * Does not support the dict.pop() method, forcing us to use the
# get/del patterns instead. For more detailed information see the
# following links:
# https://bitbucket.org/pypa/setuptools/issue/202/more-robust-zipimporter-cache-invalidation#comment-10495960
# https://bitbucket.org/pypy/pypy/src/dd07756a34a41f674c0cacfbc8ae1d4cc9ea2ae4/pypy/module/zipimport/interp_zipimport.py#cl-99
old_entry = cache[p]
del cache[p]
new_entry = updater and updater(p, old_entry)
if new_entry is not None:
cache[p] = new_entry
def _uncache(normalized_path, cache):
_update_zipimporter_cache(normalized_path, cache)
def _remove_and_clear_zip_directory_cache_data(normalized_path):
def clear_and_remove_cached_zip_archive_directory_data(path, old_entry):
old_entry.clear()
_update_zipimporter_cache(
normalized_path, zipimport._zip_directory_cache,
updater=clear_and_remove_cached_zip_archive_directory_data)
# PyPy Python implementation does not allow directly writing to the
# zipimport._zip_directory_cache and so prevents us from attempting to correct
# its content. The best we can do there is clear the problematic cache content
# and have PyPy repopulate it as needed. The downside is that if there are any
# stale zipimport.zipimporter instances laying around, attempting to use them
# will fail due to not having its zip archive directory information available
# instead of being automatically corrected to use the new correct zip archive
# directory information.
if '__pypy__' in sys.builtin_module_names:
_replace_zip_directory_cache_data = \
_remove_and_clear_zip_directory_cache_data
else:
def _replace_zip_directory_cache_data(normalized_path):
def replace_cached_zip_archive_directory_data(path, old_entry):
# N.B. In theory, we could load the zip directory information just
# once for all updated path spellings, and then copy it locally and
# update its contained path strings to contain the correct
# spelling, but that seems like a way too invasive move (this cache
# structure is not officially documented anywhere and could in
# theory change with new Python releases) for no significant
# benefit.
old_entry.clear()
zipimport.zipimporter(path)
old_entry.update(zipimport._zip_directory_cache[path])
return old_entry
_update_zipimporter_cache(
normalized_path, zipimport._zip_directory_cache,
updater=replace_cached_zip_archive_directory_data)
def is_python(text, filename='<string>'):
"Is this string a valid Python script?"
try:
compile(text, filename, 'exec')
except (SyntaxError, TypeError):
return False
else:
return True
def is_sh(executable):
"""Determine if the specified executable is a .sh (contains a #! line)"""
try:
with io.open(executable, encoding='latin-1') as fp:
magic = fp.read(2)
except (OSError, IOError):
return executable
return magic == '#!'
def nt_quote_arg(arg):
"""Quote a command line argument according to Windows parsing rules"""
return subprocess.list2cmdline([arg])
def is_python_script(script_text, filename):
"""Is this text, as a whole, a Python script? (as opposed to shell/bat/etc.
"""
if filename.endswith('.py') or filename.endswith('.pyw'):
return True # extension says it's Python
if is_python(script_text, filename):
return True # it's syntactically valid Python
if script_text.startswith('#!'):
# It begins with a '#!' line, so check if 'python' is in it somewhere
return 'python' in script_text.splitlines()[0].lower()
return False # Not any Python I can recognize
try:
from os import chmod as _chmod
except ImportError:
# Jython compatibility
def _chmod(*args):
pass
def chmod(path, mode):
log.debug("changing mode of %s to %o", path, mode)
try:
_chmod(path, mode)
except os.error as e:
log.debug("chmod failed: %s", e)
def fix_jython_executable(executable, options):
warnings.warn("Use JythonCommandSpec", DeprecationWarning, stacklevel=2)
if not JythonCommandSpec.relevant():
return executable
cmd = CommandSpec.best().from_param(executable)
cmd.install_options(options)
return cmd.as_header().lstrip('#!').rstrip('\n')
class CommandSpec(list):
"""
A command spec for a #! header, specified as a list of arguments akin to
those passed to Popen.
"""
options = []
split_args = dict()
@classmethod
def best(cls):
"""
Choose the best CommandSpec class based on environmental conditions.
"""
return cls if not JythonCommandSpec.relevant() else JythonCommandSpec
@classmethod
def _sys_executable(cls):
_default = os.path.normpath(sys.executable)
return os.environ.get('__PYVENV_LAUNCHER__', _default)
@classmethod
def from_param(cls, param):
"""
Construct a CommandSpec from a parameter to build_scripts, which may
be None.
"""
if isinstance(param, cls):
return param
if isinstance(param, list):
return cls(param)
if param is None:
return cls.from_environment()
# otherwise, assume it's a string.
return cls.from_string(param)
@classmethod
def from_environment(cls):
return cls([cls._sys_executable()])
@classmethod
def from_string(cls, string):
"""
Construct a command spec from a simple string representing a command
line parseable by shlex.split.
"""
items = shlex.split(string, **cls.split_args)
return cls(items)
def install_options(self, script_text):
self.options = shlex.split(self._extract_options(script_text))
cmdline = subprocess.list2cmdline(self)
if not isascii(cmdline):
self.options[:0] = ['-x']
@staticmethod
def _extract_options(orig_script):
"""
Extract any options from the first line of the script.
"""
first = (orig_script + '\n').splitlines()[0]
match = _first_line_re().match(first)
options = match.group(1) or '' if match else ''
return options.strip()
def as_header(self):
return self._render(self + list(self.options))
@staticmethod
def _render(items):
cmdline = subprocess.list2cmdline(items)
return '#!' + cmdline + '\n'
# For pbr compat; will be removed in a future version.
sys_executable = CommandSpec._sys_executable()
class WindowsCommandSpec(CommandSpec):
split_args = dict(posix=False)
class JythonCommandSpec(CommandSpec):
@classmethod
def relevant(cls):
return (
sys.platform.startswith('java')
and
__import__('java').lang.System.getProperty('os.name') != 'Linux'
)
def as_header(self):
"""
Workaround Jython's sys.executable being a .sh (an invalid
shebang line interpreter)
"""
if not is_sh(self[0]):
return super(JythonCommandSpec, self).as_header()
if self.options:
# Can't apply the workaround, leave it broken
log.warn(
"WARNING: Unable to adapt shebang line for Jython,"
" the following script is NOT executable\n"
" see http://bugs.jython.org/issue1112 for"
" more information.")
return super(JythonCommandSpec, self).as_header()
items = ['/usr/bin/env'] + self + list(self.options)
return self._render(items)
class ScriptWriter(object):
"""
Encapsulates behavior around writing entry point scripts for console and
gui apps.
"""
template = textwrap.dedent("""
# EASY-INSTALL-ENTRY-SCRIPT: %(spec)r,%(group)r,%(name)r
__requires__ = %(spec)r
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.exit(
load_entry_point(%(spec)r, %(group)r, %(name)r)()
)
""").lstrip()
command_spec_class = CommandSpec
@classmethod
def get_script_args(cls, dist, executable=None, wininst=False):
# for backward compatibility
warnings.warn("Use get_args", DeprecationWarning)
writer = (WindowsScriptWriter if wininst else ScriptWriter).best()
header = cls.get_script_header("", executable, wininst)
return writer.get_args(dist, header)
@classmethod
def get_script_header(cls, script_text, executable=None, wininst=False):
# for backward compatibility
warnings.warn("Use get_header", DeprecationWarning)
if wininst:
executable = "python.exe"
cmd = cls.command_spec_class.best().from_param(executable)
cmd.install_options(script_text)
return cmd.as_header()
@classmethod
def get_args(cls, dist, header=None):
"""
Yield write_script() argument tuples for a distribution's
console_scripts and gui_scripts entry points.
"""
if header is None:
header = cls.get_header()
spec = str(dist.as_requirement())
for type_ in 'console', 'gui':
group = type_ + '_scripts'
for name, ep in dist.get_entry_map(group).items():
cls._ensure_safe_name(name)
script_text = cls.template % locals()
args = cls._get_script_args(type_, name, header, script_text)
for res in args:
yield res
@staticmethod
def _ensure_safe_name(name):
"""
Prevent paths in *_scripts entry point names.
"""
has_path_sep = re.search(r'[\\/]', name)
if has_path_sep:
raise ValueError("Path separators not allowed in script names")
@classmethod
def get_writer(cls, force_windows):
# for backward compatibility
warnings.warn("Use best", DeprecationWarning)
return WindowsScriptWriter.best() if force_windows else cls.best()
@classmethod
def best(cls):
"""
Select the best ScriptWriter for this environment.
"""
return WindowsScriptWriter.best() if sys.platform == 'win32' else cls
@classmethod
def _get_script_args(cls, type_, name, header, script_text):
# Simply write the stub with no extension.
yield (name, header + script_text)
@classmethod
def get_header(cls, script_text="", executable=None):
"""Create a #! line, getting options (if any) from script_text"""
cmd = cls.command_spec_class.best().from_param(executable)
cmd.install_options(script_text)
return cmd.as_header()
class WindowsScriptWriter(ScriptWriter):
command_spec_class = WindowsCommandSpec
@classmethod
def get_writer(cls):
# for backward compatibility
warnings.warn("Use best", DeprecationWarning)
return cls.best()
@classmethod
def best(cls):
"""
Select the best ScriptWriter suitable for Windows
"""
writer_lookup = dict(
executable=WindowsExecutableLauncherWriter,
natural=cls,
)
# for compatibility, use the executable launcher by default
launcher = os.environ.get('SETUPTOOLS_LAUNCHER', 'executable')
return writer_lookup[launcher]
@classmethod
def _get_script_args(cls, type_, name, header, script_text):
"For Windows, add a .py extension"
ext = dict(console='.pya', gui='.pyw')[type_]
if ext not in os.environ['PATHEXT'].lower().split(';'):
warnings.warn("%s not listed in PATHEXT; scripts will not be "
"recognized as executables." % ext, UserWarning)
old = ['.pya', '.py', '-script.py', '.pyc', '.pyo', '.pyw', '.exe']
old.remove(ext)
header = cls._adjust_header(type_, header)
blockers = [name + x for x in old]
yield name + ext, header + script_text, 't', blockers
@classmethod
def _adjust_header(cls, type_, orig_header):
"""
Make sure 'pythonw' is used for gui and and 'python' is used for
console (regardless of what sys.executable is).
"""
pattern = 'pythonw.exe'
repl = 'python.exe'
if type_ == 'gui':
pattern, repl = repl, pattern
pattern_ob = re.compile(re.escape(pattern), re.IGNORECASE)
new_header = pattern_ob.sub(string=orig_header, repl=repl)
return new_header if cls._use_header(new_header) else orig_header
@staticmethod
def _use_header(new_header):
"""
Should _adjust_header use the replaced header?
On non-windows systems, always use. On
Windows systems, only use the replaced header if it resolves
to an executable on the system.
"""
clean_header = new_header[2:-1].strip('"')
return sys.platform != 'win32' or find_executable(clean_header)
class WindowsExecutableLauncherWriter(WindowsScriptWriter):
@classmethod
def _get_script_args(cls, type_, name, header, script_text):
"""
For Windows, add a .py extension and an .exe launcher
"""
if type_ == 'gui':
launcher_type = 'gui'
ext = '-script.pyw'
old = ['.pyw']
else:
launcher_type = 'cli'
ext = '-script.py'
old = ['.py', '.pyc', '.pyo']
hdr = cls._adjust_header(type_, header)
blockers = [name + x for x in old]
yield (name + ext, hdr + script_text, 't', blockers)
yield (
name + '.exe', get_win_launcher(launcher_type),
'b' # write in binary mode
)
if not is_64bit():
# install a manifest for the launcher to prevent Windows
# from detecting it as an installer (which it will for
# launchers like easy_install.exe). Consider only
# adding a manifest for launchers detected as installers.
# See Distribute #143 for details.
m_name = name + '.exe.manifest'
yield (m_name, load_launcher_manifest(name), 't')
# for backward-compatibility
get_script_args = ScriptWriter.get_script_args
get_script_header = ScriptWriter.get_script_header
def get_win_launcher(type):
"""
Load the Windows launcher (executable) suitable for launching a script.
`type` should be either 'cli' or 'gui'
Returns the executable as a byte string.
"""
launcher_fn = '%s.exe' % type
if platform.machine().lower() == 'arm':
launcher_fn = launcher_fn.replace(".", "-arm.")
if is_64bit():
launcher_fn = launcher_fn.replace(".", "-64.")
else:
launcher_fn = launcher_fn.replace(".", "-32.")
return resource_string('setuptools', launcher_fn)
def load_launcher_manifest(name):
manifest = pkg_resources.resource_string(__name__, 'launcher manifest.xml')
if PY2:
return manifest % vars()
else:
return manifest.decode('utf-8') % vars()
def rmtree(path, ignore_errors=False, onerror=auto_chmod):
"""Recursively delete a directory tree.
This code is taken from the Python 2.4 version of 'shutil', because
the 2.3 version doesn't really work right.
"""
if ignore_errors:
def onerror(*args):
pass
elif onerror is None:
def onerror(*args):
raise
names = []
try:
names = os.listdir(path)
except os.error:
onerror(os.listdir, path, sys.exc_info())
for name in names:
fullname = os.path.join(path, name)
try:
mode = os.lstat(fullname).st_mode
except os.error:
mode = 0
if stat.S_ISDIR(mode):
rmtree(fullname, ignore_errors, onerror)
else:
try:
os.remove(fullname)
except os.error:
onerror(os.remove, fullname, sys.exc_info())
try:
os.rmdir(path)
except os.error:
onerror(os.rmdir, path, sys.exc_info())
def current_umask():
tmp = os.umask(0o022)
os.umask(tmp)
return tmp
def bootstrap():
# This function is called when setuptools*.egg is run using /bin/sh
import setuptools
argv0 = os.path.dirname(setuptools.__path__[0])
sys.argv[0] = argv0
sys.argv.append(argv0)
main()
def main(argv=None, **kw):
from setuptools import setup
from setuptools.dist import Distribution
class DistributionWithoutHelpCommands(Distribution):
common_usage = ""
def _show_help(self, *args, **kw):
with _patch_usage():
Distribution._show_help(self, *args, **kw)
if argv is None:
argv = sys.argv[1:]
with _patch_usage():
setup(
script_args=['-q', 'easy_install', '-v'] + argv,
script_name=sys.argv[0] or 'easy_install',
distclass=DistributionWithoutHelpCommands, **kw
)
@contextlib.contextmanager
def _patch_usage():
import distutils.core
USAGE = textwrap.dedent("""
usage: %(script)s [options] requirement_or_url ...
or: %(script)s --help
""").lstrip()
def gen_usage(script_name):
return USAGE % dict(
script=os.path.basename(script_name),
)
saved = distutils.core.gen_usage
distutils.core.gen_usage = gen_usage
try:
yield
finally:
distutils.core.gen_usage = saved
|
HiroIshikawa/21playground
|
visualizer/_app_boilerplate/venv/lib/python3.5/site-packages/setuptools/command/easy_install.py
|
Python
|
mit
| 87,460
|
[
"VisIt"
] |
de3e1018799a3d56ca89e29213c32f71d9f42b2c541469e06b2e181509745344
|
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
#
# Okinawa Institute of Science and Technology, Japan.
#
# This script runs on STEPS 2.x http://steps.sourceforge.net
#
# H Anwar, I Hepburn, H Nedelescu, W Chen and E De Schutter
# Stochastic calcium mechanisms cause dendritic calcium spike variability
# J Neuroscience 2013
#
# *StochasticCaburst_cluster.py : The stochastic calcium burst model with
# P-type calcium channels clustered around BK channels
#
# Script authors: Haroon Anwar and Iain Hepburn
#
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
#
# USAGE
#
# $ python StochasticCaburst_cluster.py *mesh* *root* *iter_n* *clusterSize*
#
# *mesh* is the tetrahedral mesh (10um to 160um cylinder)
# *root* is the path to the location for data storage
# *iter_n* (is intened to be an integer) is an identifier number for each
# simulation iteration.
# *clusterSize* is the size of the P-type channel clusters
#
# E.g:
# $ python StochasticCaburst_cluster.py Cylinder2_dia2um_L10um_outer0_3um_0.3shell_0.3size_19156tets_adaptive.inp ~/stochcasims/ 1 4
#
#
# OUTPUT
#
# In (root)/data/StochasticCaburst_cluster/(mesh)/(iter_n+time) directory
# 5 data files will be recorded. Each file contains one row for every
# time-point at which data is recorded, organised into the following columns:
#
# currents.dat
# Time (ms), P-type current, T-type current, BK current, SK current
# (current units are Amps/m^2)
#
# voltage.dat
# Time (ms), voltage at mesh centre (mV)
#
# calcium.dat
# Time (ms), calcium concentration in submembrane (micromolar),
# number of calcium ions in submembrane.
#
# OpenBKandCa.dat
# Time (ms), (for every BK triangle): BK_O0, BK_O1, BK_O2, BK_O3, BK_O4, number
# of calcium ions in inner tetrahedron, calcium concentration in inner tetrahedron.
#
# ChannelsDistribution.dat
# The channel distributions. Please see script for details.
#
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
from __future__ import print_function
import math
import time
import random
import steps.model as smodel
import steps.geom as sgeom
import steps.rng as srng
import steps.utilities.meshio as meshio
import steps.solver as ssolver
import os
import meshes.gettets as gettets
from extra.constants import *
import extra.curr_funcs as cf
import sys
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
meshfile_ab, root, iter_n, clusterSize = sys.argv[1], sys.argv[2], sys.argv[3], sys.argv[4]
if meshfile_ab == 'Cylinder2_dia2um_L160um_outer0_0.3shell_0.3size_279152tets_adaptive.inp': cyl160=True
else: cyl160=False
clusterSize = int(clusterSize)
########################### BIOCHEMICAL MODEL ###############################
# Two models required: Stochastic and deterministic
mdl = smodel.Model()
# Calcium
Ca = smodel.Spec('Ca', mdl)
Ca.setValence(2)
# Pump
Pump = smodel.Spec('Pump', mdl)
# CaPump
CaPump = smodel.Spec('CaPump', mdl)
# iCBsf
iCBsf = smodel.Spec('iCBsf', mdl)
# iCBsCa
iCBsCa = smodel.Spec('iCBsCa', mdl)
# iCBCaf
iCBCaf = smodel.Spec('iCBCaf', mdl)
# iCBCaCa
iCBCaCa = smodel.Spec('iCBCaCa', mdl)
# CBsf
CBsf = smodel.Spec('CBsf', mdl)
# CBsCa
CBsCa = smodel.Spec('CBsCa', mdl)
# CBCaf
CBCaf = smodel.Spec('CBCaf', mdl)
# CBCaCa
CBCaCa = smodel.Spec('CBCaCa', mdl)
# PV
PV = smodel.Spec('PV', mdl)
# PVMg
PVMg = smodel.Spec('PVMg', mdl)
# PVCa
PVCa = smodel.Spec('PVCa', mdl)
# Mg
Mg = smodel.Spec('Mg', mdl)
# Vol/surface systems
vsys = smodel.Volsys('vsys', mdl)
ssys = smodel.Surfsys('ssys', mdl)
# Diffusions
diff_Ca = smodel.Diff('diff_Ca', vsys, Ca)
diff_Ca.setDcst(DCST)
diff_CBsf = smodel.Diff('diff_CBsf', vsys, CBsf)
diff_CBsf.setDcst(DCB)
diff_CBsCa = smodel.Diff('diff_CBsCa', vsys, CBsCa)
diff_CBsCa.setDcst(DCB)
diff_CBCaf = smodel.Diff('diff_CBCaf', vsys, CBCaf)
diff_CBCaf.setDcst(DCB)
diff_CBCaCa = smodel.Diff('diff_CBCaCa', vsys, CBCaCa)
diff_CBCaCa.setDcst(DCB)
diff_PV = smodel.Diff('diff_PV', vsys, PV)
diff_PV.setDcst(DPV)
diff_PVCa = smodel.Diff('diff_PVCa', vsys, PVCa)
diff_PVCa.setDcst(DPV)
diff_PVMg = smodel.Diff('diff_PVMg', vsys, PVMg)
diff_PVMg.setDcst(DPV)
#Pump
PumpD_f = smodel.SReac('PumpD_f', ssys, ilhs=[Ca], slhs=[Pump], srhs=[CaPump])
PumpD_f.setKcst(P_f_kcst)
PumpD_b = smodel.SReac('PumpD_b', ssys, slhs=[CaPump], irhs=[Ca], srhs=[Pump])
PumpD_b.setKcst(P_b_kcst)
PumpD_k = smodel.SReac('PumpD_k', ssys, slhs=[CaPump], srhs=[Pump])
PumpD_k.setKcst(P_k_kcst)
#iCBsf-fast
iCBsf1_f = smodel.Reac('iCBsf1_f', vsys, lhs=[Ca,iCBsf], rhs=[iCBsCa], kcst = iCBsf1_f_kcst)
iCBsf1_b = smodel.Reac('iCBsf1_b', vsys, lhs=[iCBsCa], rhs=[Ca, iCBsf], kcst = iCBsf1_b_kcst)
#iCBsCa
iCBsCa_f = smodel.Reac('iCBsCa_f', vsys, lhs=[Ca,iCBsCa], rhs=[iCBCaCa], kcst = iCBsCa_f_kcst)
iCBsCa_b = smodel.Reac('iCBsCa_b', vsys, lhs=[iCBCaCa], rhs=[Ca,iCBsCa], kcst = iCBsCa_b_kcst)
#iCBsf_slow
iCBsf2_f = smodel.Reac('iCBsf2_f', vsys, lhs=[Ca,iCBsf], rhs=[iCBCaf], kcst = iCBsf2_f_kcst)
iCBsf2_b = smodel.Reac('iCBsf2_b', vsys, lhs=[iCBCaf], rhs=[Ca,iCBsf], kcst = iCBsf2_b_kcst)
#iCBCaf
iCBCaf_f = smodel.Reac('iCBCaf_f', vsys, lhs=[Ca,iCBCaf], rhs=[iCBCaCa], kcst = iCBCaf_f_kcst)
iCBCaf_b = smodel.Reac('iCBCaf_b', vsys, lhs=[iCBCaCa], rhs=[Ca,iCBCaf], kcst = iCBCaf_b_kcst)
#CBsf-fast
CBsf1_f = smodel.Reac('CBsf1_f', vsys, lhs=[Ca,CBsf], rhs=[CBsCa], kcst = CBsf1_f_kcst)
CBsf1_b = smodel.Reac('CBsf1_b', vsys, lhs=[CBsCa], rhs=[Ca,CBsf], kcst = CBsf1_b_kcst)
#CBsCa
CBsCa_f = smodel.Reac('CBsCa_f', vsys, lhs=[Ca,CBsCa], rhs=[CBCaCa], kcst = CBsCa_f_kcst)
CBsCa_b = smodel.Reac('CBsCa_b', vsys, lhs=[CBCaCa], rhs=[Ca,CBsCa], kcst = CBsCa_b_kcst)
#CBsf_slow
CBsf2_f = smodel.Reac('CBsf2_f', vsys, lhs=[Ca,CBsf], rhs=[CBCaf], kcst = CBsf2_f_kcst)
CBsf2_b = smodel.Reac('CBsf2_b', vsys, lhs=[CBCaf], rhs=[Ca,CBsf], kcst = CBsf2_b_kcst)
#CBCaf
CBCaf_f = smodel.Reac('CBCaf_f', vsys, lhs=[Ca,CBCaf], rhs=[CBCaCa], kcst = CBCaf_f_kcst)
CBCaf_b = smodel.Reac('CBCaf_b', vsys, lhs=[CBCaCa], rhs=[Ca,CBCaf], kcst = CBCaf_b_kcst)
#PVca
PVca_f = smodel.Reac('PVca_f', vsys, lhs=[Ca,PV], rhs=[PVCa], kcst = PVca_f_kcst)
PVca_b = smodel.Reac('PVca_b', vsys, lhs=[PVCa], rhs=[Ca,PV], kcst = PVca_b_kcst)
#PVmg
PVmg_f = smodel.Reac('PVmg_f', vsys, lhs=[Mg,PV], rhs=[PVMg], kcst = PVmg_f_kcst)
PVmg_b = smodel.Reac('PVmg_b', vsys, lhs=[PVMg], rhs=[Mg,PV], kcst = PVmg_b_kcst)
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
# # # # # # # # # # # # # # # CHANNELS # # # # # # # # # # # # # # # # # #
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
###### CaP channel ##############
CaPchan = smodel.Chan('CaPchan', mdl)
CaP_m0 = smodel.ChanState('CaP_m0', mdl, CaPchan)
CaP_m1 = smodel.ChanState('CaP_m1', mdl, CaPchan)
CaP_m2 = smodel.ChanState('CaP_m2', mdl, CaPchan)
CaP_m3 = smodel.ChanState('CaP_m3', mdl, CaPchan)
CaPm0m1 = smodel.VDepSReac('CaPm0m1', ssys, slhs = [CaP_m0], srhs = [CaP_m1], k= lambda V: 1.0e3 *3.* alpha_cap(V*1.0e3)* Qt)
CaPm1m2 = smodel.VDepSReac('CaPm1m2', ssys, slhs = [CaP_m1], srhs = [CaP_m2], k= lambda V: 1.0e3 *2.* alpha_cap(V*1.0e3)* Qt)
CaPm2m3 = smodel.VDepSReac('CaPm2m3', ssys, slhs = [CaP_m2], srhs = [CaP_m3], k= lambda V: 1.0e3 *1.* alpha_cap(V*1.0e3)* Qt)
CaPm3m2 = smodel.VDepSReac('CaPm3m2', ssys, slhs = [CaP_m3], srhs = [CaP_m2], k= lambda V: 1.0e3 *3.* beta_cap(V*1.0e3)* Qt)
CaPm2m1 = smodel.VDepSReac('CaPm2m1', ssys, slhs = [CaP_m2], srhs = [CaP_m1], k= lambda V: 1.0e3 *2.* beta_cap(V*1.0e3)* Qt)
CaPm1m0 = smodel.VDepSReac('CaPm1m0', ssys, slhs = [CaP_m1], srhs = [CaP_m0], k= lambda V: 1.0e3 *1.* beta_cap(V*1.0e3)* Qt)
if cyl160:
OC_CaP = smodel.GHKcurr('OC_CaP', ssys, CaP_m3, Ca, virtual_oconc = Ca_oconc, computeflux = True)
else:
OC_CaP = smodel.GHKcurr('OC_CaP', ssys, CaP_m3, Ca, computeflux = True)
OC_CaP.setP(CaP_P)
######## CaT channel ##########
CaTchan = smodel.Chan('CaTchan', mdl)
CaT_m0h0 = smodel.ChanState('CaT_m0h0', mdl, CaTchan)
CaT_m0h1 = smodel.ChanState('CaT_m0h1', mdl, CaTchan)
CaT_m1h0 = smodel.ChanState('CaT_m1h0', mdl, CaTchan)
CaT_m1h1 = smodel.ChanState('CaT_m1h1', mdl, CaTchan)
CaT_m2h0 = smodel.ChanState('CaT_m2h0', mdl, CaTchan)
CaT_m2h1 = smodel.ChanState('CaT_m2h1', mdl, CaTchan)
CaTm0h0_m1h0 = smodel.VDepSReac('CaTm0h0_m1h0', ssys, slhs = [CaT_m0h0], srhs = [CaT_m1h0], k= lambda V: 1.0e3 *2.* alpham_cat(V*1.0e3))
CaTm1h0_m2h0 = smodel.VDepSReac('CaTm1h0_m2h0', ssys, slhs = [CaT_m1h0], srhs = [CaT_m2h0], k= lambda V: 1.0e3 *1.* alpham_cat(V*1.0e3))
CaTm2h0_m1h0 = smodel.VDepSReac('CaTm2h0_m1h0', ssys, slhs = [CaT_m2h0], srhs = [CaT_m1h0], k= lambda V: 1.0e3 *2.* betam_cat(V*1.0e3))
CaTm1h0_m0h0 = smodel.VDepSReac('CaTm1h0_m0h0', ssys, slhs = [CaT_m1h0], srhs = [CaT_m0h0], k= lambda V: 1.0e3 *1.* betam_cat(V*1.0e3))
CaTm0h1_m1h1 = smodel.VDepSReac('CaTm0h1_m1h1', ssys, slhs = [CaT_m0h1], srhs = [CaT_m1h1], k= lambda V: 1.0e3 *2.* alpham_cat(V*1.0e3))
CaTm1h1_m2h1 = smodel.VDepSReac('CaTm1h1_m2h1', ssys, slhs = [CaT_m1h1], srhs = [CaT_m2h1], k= lambda V: 1.0e3 *1.* alpham_cat(V*1.0e3))
CaTm2h1_m1h1 = smodel.VDepSReac('CaTm2h1_m1h1', ssys, slhs = [CaT_m2h1], srhs = [CaT_m1h1], k= lambda V: 1.0e3 *2.* betam_cat(V*1.0e3))
CaTm1h1_m0h1 = smodel.VDepSReac('CaTm1h1_m0h1', ssys, slhs = [CaT_m1h1], srhs = [CaT_m0h1], k= lambda V: 1.0e3 *1.* betam_cat(V*1.0e3))
CaTm0h0_m0h1 = smodel.VDepSReac('CaTm0h0_m0h1', ssys, slhs = [CaT_m0h0], srhs = [CaT_m0h1], k= lambda V: 1.0e3 *1.* alphah_cat(V*1.0e3))
CaTm1h0_m1h1 = smodel.VDepSReac('CaTm1h0_m1h1', ssys, slhs = [CaT_m1h0], srhs = [CaT_m1h1], k= lambda V: 1.0e3 *1.* alphah_cat(V*1.0e3))
CaTm2h0_m2h1 = smodel.VDepSReac('CaTm2h0_m2h1', ssys, slhs = [CaT_m2h0], srhs = [CaT_m2h1], k= lambda V: 1.0e3 *1.* alphah_cat(V*1.0e3))
CaTm2h1_m2h0 = smodel.VDepSReac('CaTm2h1_m2h0', ssys, slhs = [CaT_m2h1], srhs = [CaT_m2h0], k= lambda V: 1.0e3 *1.* betah_cat(V*1.0e3))
CaTm1h1_m1h0 = smodel.VDepSReac('CaTm1h1_m1h0', ssys, slhs = [CaT_m1h1], srhs = [CaT_m1h0], k= lambda V: 1.0e3 *1.* betah_cat(V*1.0e3))
CaTm0h1_m0h0 = smodel.VDepSReac('CaTm0h1_m0h0', ssys, slhs = [CaT_m0h1], srhs = [CaT_m0h0], k= lambda V: 1.0e3 *1.* betah_cat(V*1.0e3))
if cyl160:
OC_CaT = smodel.GHKcurr('OC_CaT', ssys, CaT_m2h1, Ca, virtual_oconc = Ca_oconc, computeflux = True)
else:
OC_CaT = smodel.GHKcurr('OC_CaT', ssys, CaT_m2h1, Ca, computeflux = True)
OC_CaT.setP(CaT_P)
##### BK channel ####################
BKchan = smodel.Chan('BKchan', mdl)
BK_C0 = smodel.ChanState('BK_C0', mdl, BKchan)
BK_C1 = smodel.ChanState('BK_C1', mdl, BKchan)
BK_C2 = smodel.ChanState('BK_C2', mdl, BKchan)
BK_C3 = smodel.ChanState('BK_C3', mdl, BKchan)
BK_C4 = smodel.ChanState('BK_C4', mdl, BKchan)
BK_O0 = smodel.ChanState('BK_O0', mdl, BKchan)
BK_O1 = smodel.ChanState('BK_O1', mdl, BKchan)
BK_O2 = smodel.ChanState('BK_O2', mdl, BKchan)
BK_O3 = smodel.ChanState('BK_O3', mdl, BKchan)
BK_O4 = smodel.ChanState('BK_O4', mdl, BKchan)
BKCAC0 = smodel.SReac('BKCAC0', ssys, slhs = [BK_C0], ilhs = [Ca], srhs = [BK_C1], kcst = c_01)
BKCAC1 = smodel.SReac('BKCAC1', ssys, slhs = [BK_C1], ilhs = [Ca], srhs = [BK_C2], kcst = c_12)
BKCAC2 = smodel.SReac('BKCAC2', ssys, slhs = [BK_C2], ilhs = [Ca], srhs = [BK_C3], kcst = c_23)
BKCAC3 = smodel.SReac('BKCAC3', ssys, slhs = [BK_C3], ilhs = [Ca], srhs = [BK_C4], kcst = c_34)
BKC0 = smodel.SReac('BKC0', ssys, slhs = [BK_C1], srhs = [BK_C0], irhs = [Ca], kcst = c_10)
BKC1 = smodel.SReac('BKC1', ssys, slhs = [BK_C2], srhs = [BK_C1], irhs = [Ca], kcst = c_21)
BKC2 = smodel.SReac('BKC2', ssys, slhs = [BK_C3], srhs = [BK_C2], irhs = [Ca], kcst = c_32)
BKC3 = smodel.SReac('BKC3', ssys, slhs = [BK_C4], srhs = [BK_C3], irhs = [Ca], kcst = c_43)
BKCAO0 = smodel.SReac('BKCAO0', ssys, slhs = [BK_O0], ilhs = [Ca], srhs = [BK_O1], kcst = o_01)
BKCAO1 = smodel.SReac('BKCAO1', ssys, slhs = [BK_O1], ilhs = [Ca], srhs = [BK_O2], kcst = o_12)
BKCAO2 = smodel.SReac('BKCAO2', ssys, slhs = [BK_O2], ilhs = [Ca], srhs = [BK_O3], kcst = o_23)
BKCAO3 = smodel.SReac('BKCAO3', ssys, slhs = [BK_O3], ilhs = [Ca], srhs = [BK_O4], kcst = o_34)
BKO0 = smodel.SReac('BKO0', ssys, slhs = [BK_O1], srhs = [BK_O0], irhs = [Ca], kcst = o_10)
BKO1 = smodel.SReac('BKO1', ssys, slhs = [BK_O2], srhs = [BK_O1], irhs = [Ca], kcst = o_21)
BKO2 = smodel.SReac('BKO2', ssys, slhs = [BK_O3], srhs = [BK_O2], irhs = [Ca], kcst = o_32)
BKO3 = smodel.SReac('BKO3', ssys, slhs = [BK_O4], srhs = [BK_O3], irhs = [Ca], kcst = o_43)
BKC0O0 = smodel.VDepSReac('BKC0O0', ssys, slhs = [BK_C0], srhs = [BK_O0], k=lambda V: f_0(V))
BKC1O1 = smodel.VDepSReac('BKC1O1', ssys, slhs = [BK_C1], srhs = [BK_O1], k=lambda V: f_1(V))
BKC2O2 = smodel.VDepSReac('BKC2O2', ssys, slhs = [BK_C2], srhs = [BK_O2], k=lambda V: f_2(V))
BKC3O3 = smodel.VDepSReac('BKC3O3', ssys, slhs = [BK_C3], srhs = [BK_O3], k=lambda V: f_3(V))
BKC4O4 = smodel.VDepSReac('BKC4O4', ssys, slhs = [BK_C4], srhs = [BK_O4], k=lambda V: f_4(V))
BKO0C0 = smodel.VDepSReac('BKO0C0', ssys, slhs = [BK_O0], srhs = [BK_C0], k=lambda V: b_0(V))
BKO1C1 = smodel.VDepSReac('BKO1C1', ssys, slhs = [BK_O1], srhs = [BK_C1], k=lambda V: b_1(V))
BKO2C2 = smodel.VDepSReac('BKO2C2', ssys, slhs = [BK_O2], srhs = [BK_C2], k=lambda V: b_2(V))
BKO3C3 = smodel.VDepSReac('BKO3C3', ssys, slhs = [BK_O3], srhs = [BK_C3], k=lambda V: b_3(V))
BKO4C4 = smodel.VDepSReac('BKO4C4', ssys, slhs = [BK_O4], srhs = [BK_C4], k=lambda V: b_4(V))
OC_BK0 = smodel.OhmicCurr('OC_BK0', ssys, chanstate = BK_O0, erev = BK_rev, g = BK_G )
OC_BK1 = smodel.OhmicCurr('OC_BK1', ssys, chanstate = BK_O1, erev = BK_rev, g = BK_G )
OC_BK2 = smodel.OhmicCurr('OC_BK2', ssys, chanstate = BK_O2, erev = BK_rev, g = BK_G )
OC_BK3 = smodel.OhmicCurr('OC_BK3', ssys, chanstate = BK_O3, erev = BK_rev, g = BK_G )
OC_BK4 = smodel.OhmicCurr('OC_BK4', ssys, chanstate = BK_O4, erev = BK_rev, g = BK_G )
###### SK channel ################## DETERMINISTIC
SKchan = smodel.Chan('SKchan', mdl)
SK_C1 = smodel.ChanState('SK_C1', mdl, SKchan)
SK_C2 = smodel.ChanState('SK_C2', mdl, SKchan)
SK_C3 = smodel.ChanState('SK_C3', mdl, SKchan)
SK_C4 = smodel.ChanState('SK_C4', mdl, SKchan)
SK_O1 = smodel.ChanState('SK_O1', mdl, SKchan)
SK_O2 = smodel.ChanState('SK_O2', mdl, SKchan)
SKCAC1 = smodel.SReac('SKCAC1', ssys, slhs = [SK_C1], ilhs = [Ca], srhs = [SK_C2], kcst = dirc2_t)
SKCAC2 = smodel.SReac('SKCAC2', ssys, slhs = [SK_C2], ilhs = [Ca], srhs = [SK_C3], kcst = dirc3_t)
SKCAC3 = smodel.SReac('SKCAC3', ssys, slhs = [SK_C3], ilhs = [Ca], srhs = [SK_C4], kcst = dirc4_t)
SKC1 = smodel.SReac('SKC1', ssys, slhs = [SK_C2], srhs = [SK_C1], irhs = [Ca], kcst = invc1_t)
SKC2 = smodel.SReac('SKC2', ssys, slhs = [SK_C3], srhs = [SK_C2], irhs = [Ca], kcst = invc2_t)
SKC3 = smodel.SReac('SKC3', ssys, slhs = [SK_C4], srhs = [SK_C3], irhs = [Ca], kcst = invc3_t)
SKC3O1 = smodel.SReac('SKC3O1', ssys, slhs = [SK_C3], srhs = [SK_O1], kcst = diro1_t)
SKC4O2 = smodel.SReac('SKC4O2', ssys, slhs = [SK_C4], srhs = [SK_O2], kcst = diro2_t)
SKO1C3 = smodel.SReac('SKO1C3', ssys, slhs = [SK_O1], srhs = [SK_C3], kcst = invo1_t)
SKO2C4 = smodel.SReac('SKO2C4', ssys, slhs = [SK_O2], srhs = [SK_C4], kcst = invo2_t)
OC1_SK = smodel.OhmicCurr('OC1_SK', ssys, chanstate = SK_O1, erev = SK_rev, g = SK_G )
OC2_SK = smodel.OhmicCurr('OC2_SK', ssys, chanstate = SK_O2, erev = SK_rev, g = SK_G )
###### Leak current channel #####
L = smodel.Chan('L', mdl)
Leak = smodel.ChanState('Leak', mdl, L)
OC_L = smodel.OhmicCurr('OC_L', ssys, chanstate = Leak, erev = L_rev, g = L_G)
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
########### MESH & COMPARTMENTALIZATION #################
##########Import Mesh
mesh = meshio.loadMesh('./meshes/'+meshfile_ab)[0]
outer_tets = range(mesh.ntets)
inner_tets = gettets.getcyl(mesh, 1e-6, -200e-6, 200e-6)[0]
for i in inner_tets: outer_tets.remove(i)
assert(outer_tets.__len__() + inner_tets.__len__() == mesh.ntets)
print(outer_tets.__len__(), " tets in outer compartment")
print(inner_tets.__len__(), " tets in inner compartment")
# Record voltage from the central tetrahedron
cent_tet = mesh.findTetByPoint([0.0,0.0,0.0])
########## Create an intracellular compartment i.e. cytosolic compartment
cyto = sgeom.TmComp('cyto', mesh, inner_tets)
cyto.addVolsys('vsys')
if not cyl160: outer = sgeom.TmComp('outer', mesh, outer_tets)
if cyl160:
# Ensure that we use points a small distance inside the boundary:
LENGTH = mesh.getBoundMax()[2] - mesh.getBoundMin()[2]
boundminz = mesh.getBoundMin()[2] + LENGTH/mesh.ntets
boundmaxz = mesh.getBoundMax()[2] - LENGTH/mesh.ntets
memb_tris = list(mesh.getSurfTris())
minztris = []
maxztris = []
for tri in memb_tris:
zminboundtri = True
zmaxboundtri = True
tritemp = mesh.getTri(tri)
trizs = [0.0, 0.0, 0.0]
trizs[0] = mesh.getVertex(tritemp[0])[2]
trizs[1] = mesh.getVertex(tritemp[1])[2]
trizs[2] = mesh.getVertex(tritemp[2])[2]
for j in range(3):
if (trizs[j]>boundminz): zminboundtri = False
if (zminboundtri):
minztris.append(tri)
continue
for j in range(3):
if (trizs[j]< boundmaxz): zmaxboundtri = False
if (zmaxboundtri):
maxztris.append(tri)
for t in minztris: memb_tris.remove(t)
for t in maxztris: memb_tris.remove(t)
else:
print('Finding connecting triangles...')
out_tris = set()
for i in outer_tets:
tritemp = mesh.getTetTriNeighb(i)
for j in range(4): out_tris.add(tritemp[j])
in_tris = set()
for i in inner_tets:
tritemp = mesh.getTetTriNeighb(i)
for j in range(4): in_tris.add(tritemp[j])
memb_tris = out_tris.intersection(in_tris)
memb_tris = list(memb_tris)
memb_tris_bk = []
memb_tris_sk = []
memb_tris_cat = []
memb_tris_cap = []
surfarea = 0.0
for i in memb_tris:
surfarea = surfarea + mesh.getTriArea(i)
random.seed(7)
while (len(memb_tris_bk)<round(BK_ro*surfarea)):
ctriID = random.choice(memb_tris)
if ctriID not in memb_tris_bk:
memb_tris_bk.append(ctriID)
while (len(memb_tris_sk)<round(SK_ro*surfarea)):
ctriID = random.choice(memb_tris)
if ctriID not in memb_tris_sk:
memb_tris_sk.append(ctriID)
while (len(memb_tris_cat)<round(CaT_ro*surfarea)):
ctriID = random.choice(memb_tris)
if ctriID not in memb_tris_cat:
memb_tris_cat.append(ctriID)
while (len(memb_tris_cap)<(round(CaP_ro*surfarea)-(clusterSize*round(BK_ro*surfarea)))):
ctriID = random.choice(memb_tris)
if ctriID not in memb_tris_bk:
memb_tris_cap.append(ctriID)
########## Find the submembrane tets
memb_tet_neighb = []
for i in memb_tris:
tettemp = mesh.getTriTetNeighb(i)
for j in tettemp:
memb_tet_neighb.append(j)
submemb_tets = []
for i in memb_tet_neighb:
if i in inner_tets:
submemb_tets.append(i)
print(len(submemb_tets))
vol = 0.0
for i in submemb_tets:
vol = vol + mesh.getTetVol(i)
print('Volume of submembrane region is', vol)
submemb_tets_surftris = dict()
for m in submemb_tets:
tris = mesh.getTetTriNeighb(m)
for t in tris:
if t in memb_tris:
submemb_tets_surftris[m] = t
break
assert(len(submemb_tets_surftris.values()) == len(submemb_tets))
for i in range(len(memb_tris)):
ctri = memb_tris[i]
ctet = submemb_tets[i]
tettemp = mesh.getTriTetNeighb(ctri)
if not ctet in tettemp:
print('Tri and Tet do not correspond to each other')
border_tets = []
border_tets_vols = 0.0
for i in inner_tets:
tritemp = mesh.getTetTriNeighb(i)
for t in tritemp:
if t in memb_tris:
border_tets.append(i)
border_tets_vols+=mesh.getTetVol(i)
break
print("Border tet vols:", border_tets_vols)
########## Create a membrane as a surface mesh
if cyl160:
memb = sgeom.TmPatch('memb', mesh, memb_tris, cyto)
else:
memb = sgeom.TmPatch('memb', mesh, memb_tris, cyto, outer)
memb.addSurfsys('ssys')
# For EField calculation
print("Creating membrane..")
membrane = sgeom.Memb('membrane', mesh, [memb])
print("Membrane created.")
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # SIMULATION # # # # # # # # # # # # # # # # # # # # # #
r = srng.create_mt19937(512)
r.initialize(7)
print("Creating tetexact solver...")
sim = ssolver.Tetexact(mdl, mesh, r, True)
print("Resetting simulation objects..")
sim.reset()
print("Injecting molecules..")
sim.setTemp(TEMPERATURE+273.15)
if not cyl160:
sim.setCompConc('outer', 'Ca', Ca_oconc)
sim.setCompClamped('outer', 'Ca', True)
sim.setCompConc('cyto', 'Ca', Ca_iconc)
print("Calcium concentration is: ", sim.getCompConc('cyto', 'Ca'))
print("No. of Ca molecules is: ", sim.getCompCount('cyto', 'Ca'))
sim.setCompConc('cyto', 'Mg', Mg_conc)
surfarea = sim.getPatchArea('memb')
pumpnbs = 6.022141e12*surfarea
sim.setPatchCount('memb', 'Pump', pumpnbs)
sim.setPatchCount('memb', 'CaPump', 0)
print("Injected ", sim.getPatchCount('memb', 'Pump'), "pumps")
sim.setCompConc('cyto', 'iCBsf', iCBsf_conc)
sim.setCompConc('cyto', 'iCBsCa', iCBsCa_conc)
sim.setCompConc('cyto', 'iCBCaf', iCBCaf_conc)
sim.setCompConc('cyto', 'iCBCaCa', iCBCaCa_conc)
sim.setCompConc('cyto', 'CBsf', CBsf_conc)
sim.setCompConc('cyto', 'CBsCa', CBsCa_conc)
sim.setCompConc('cyto', 'CBCaf', CBCaf_conc)
sim.setCompConc('cyto', 'CBCaCa', CBCaCa_conc)
sim.setCompConc('cyto', 'PV', PV_conc)
sim.setCompConc('cyto', 'PVCa', PVCa_conc)
sim.setCompConc('cyto', 'PVMg', PVMg_conc)
bk_c0_count = 0
bk_c1_count = 0
bk_c2_count = 0
bk_c3_count = 0
bk_c4_count = 0
bk_o0_count = 0
bk_o1_count = 0
bk_o2_count = 0
bk_o3_count = 0
bk_o4_count = 0
for i in memb_tris_bk:
if bk_c0_count<round(BK_ro*surfarea*BK_C0_p):
sim.setTriCount(i, 'BK_C0', sim.getTriCount(i, 'BK_C0') + 1)
bk_c0_count = bk_c0_count + 1
elif bk_c1_count<round(BK_ro*surfarea*BK_C1_p):
sim.setTriCount(i, 'BK_C1', sim.getTriCount(i, 'BK_C1') + 1)
bk_c1_count = bk_c1_count + 1
elif bk_c2_count<round(BK_ro*surfarea*BK_C2_p):
sim.setTriCount(i, 'BK_C2', sim.getTriCount(i, 'BK_C2') + 1)
bk_c2_count = bk_c2_count + 1
elif bk_c3_count<round(BK_ro*surfarea*BK_C3_p):
sim.setTriCount(i, 'BK_C3', sim.getTriCount(i, 'BK_C3') + 1)
bk_c3_count = bk_c3_count + 1
elif bk_c4_count<round(BK_ro*surfarea*BK_C4_p):
sim.setTriCount(i, 'BK_C4', sim.getTriCount(i, 'BK_C4') + 1)
bk_c4_count = bk_c4_count + 1
elif bk_o0_count<round(BK_ro*surfarea*BK_O0_p):
sim.setTriCount(i, 'BK_O0', sim.getTriCount(i, 'BK_O0') + 1)
bk_o0_count = bk_o0_count + 1
elif bk_o1_count<round(BK_ro*surfarea*BK_O1_p):
sim.setTriCount(i, 'BK_O1', sim.getTriCount(i, 'BK_O1') + 1)
bk_o1_count = bk_o1_count + 1
elif bk_o2_count<round(BK_ro*surfarea*BK_O2_p):
sim.setTriCount(i, 'BK_O2', sim.getTriCount(i, 'BK_O2') + 1)
bk_o2_count = bk_o2_count + 1
elif bk_o3_count<round(BK_ro*surfarea*BK_O3_p):
sim.setTriCount(i, 'BK_O3', sim.getTriCount(i, 'BK_O3') + 1)
bk_o3_count = bk_o3_count + 1
elif bk_o4_count<round(BK_ro*surfarea*BK_O4_p):
sim.setTriCount(i, 'BK_O4', sim.getTriCount(i, 'BK_O4') + 1)
bk_o4_count = bk_o4_count + 1
else:
print('More tris picked up by algorithm than the number of BK channels')
sk_c1_count = 0
sk_c2_count = 0
sk_c3_count = 0
sk_c4_count = 0
sk_o1_count = 0
sk_o2_count = 0
for i in memb_tris_sk:
if sk_c1_count<round(SK_ro*surfarea*SK_C1_p):
sim.setTriCount(i, 'SK_C1', sim.getTriCount(i, 'SK_C1') + 1)
sk_c1_count = sk_c1_count + 1
elif sk_c2_count<round(SK_ro*surfarea*SK_C2_p):
sim.setTriCount(i, 'SK_C2', sim.getTriCount(i, 'SK_C2') + 1)
sk_c2_count = sk_c2_count + 1
elif sk_c3_count<round(SK_ro*surfarea*SK_C3_p):
sim.setTriCount(i, 'SK_C3', sim.getTriCount(i, 'SK_C3') + 1)
sk_c3_count = sk_c3_count + 1
elif sk_c4_count<round(SK_ro*surfarea*SK_C4_p):
sim.setTriCount(i, 'SK_C4', sim.getTriCount(i, 'SK_C4') + 1)
sk_c4_count = sk_c4_count + 1
elif sk_o1_count<round(SK_ro*surfarea*SK_O1_p):
sim.setTriCount(i, 'SK_O1', sim.getTriCount(i, 'SK_O1') + 1)
sk_o1_count = sk_o1_count + 1
elif sk_o2_count<round(SK_ro*surfarea*SK_O2_p):
sim.setTriCount(i, 'SK_O2', sim.getTriCount(i, 'SK_O2') + 1)
sk_o2_count = sk_o2_count + 1
else:
print('More tris picked up by algorithm than the number of SK channels')
cat_m0h0_count = 0
cat_m1h0_count = 0
cat_m2h0_count = 0
cat_m0h1_count = 0
cat_m1h1_count = 0
cat_m2h1_count = 0
for i in memb_tris_cat:
if cat_m0h0_count<round(CaT_ro*surfarea*CaT_m0h0_p):
sim.setTriCount(i, 'CaT_m0h0', sim.getTriCount(i, 'CaT_m0h0') + 1)
cat_m0h0_count = cat_m0h0_count + 1
elif cat_m1h0_count<round(CaT_ro*surfarea*CaT_m1h0_p):
sim.setTriCount(i, 'CaT_m1h0', sim.getTriCount(i, 'CaT_m1h0') + 1)
cat_m1h0_count = cat_m1h0_count + 1
elif cat_m2h0_count<round(CaT_ro*surfarea*CaT_m2h0_p):
sim.setTriCount(i, 'CaT_m2h0', sim.getTriCount(i, 'CaT_m2h0') + 1)
cat_m2h0_count = cat_m2h0_count + 1
elif cat_m0h1_count<round(CaT_ro*surfarea*CaT_m0h1_p):
sim.setTriCount(i, 'CaT_m0h1', sim.getTriCount(i, 'CaT_m0h1') + 1)
cat_m0h1_count = cat_m0h1_count + 1
elif cat_m1h1_count<round(CaT_ro*surfarea*CaT_m1h1_p):
sim.setTriCount(i, 'CaT_m1h1', sim.getTriCount(i, 'CaT_m1h1') + 1)
cat_m1h1_count = cat_m1h1_count + 1
elif cat_m2h1_count<round(CaT_ro*surfarea*CaT_m2h1_p):
sim.setTriCount(i, 'CaT_m2h1', sim.getTriCount(i, 'CaT_m2h1') + 1)
cat_m2h1_count = cat_m2h1_count + 1
else:
print('More tris picked up by algorithm than the number of CaT channels')
cap_m0_count = 0
cap_m1_count = 0
cap_m2_count = 0
cap_m3_count = 0
if clusterSize>0:
for i in memb_tris_bk:
count = 0
while count<clusterSize:
if cap_m0_count<round(CaP_ro*surfarea*CaP_m0_p):
sim.setTriCount(i, 'CaP_m0', sim.getTriCount(i, 'CaP_m0') + 1)
cap_m0_count = cap_m0_count + 1
count = count +1
elif cap_m1_count<round(CaP_ro*surfarea*CaP_m1_p):
sim.setTriCount(i, 'CaP_m1', sim.getTriCount(i, 'CaP_m1') + 1)
cap_m1_count = cap_m1_count + 1
count = count +1
elif cap_m2_count<round(CaP_ro*surfarea*CaP_m2_p):
sim.setTriCount(i, 'CaP_m2', sim.getTriCount(i, 'CaP_m2') + 1)
cap_m2_count = cap_m2_count + 1
count = count +1
elif cap_m3_count<round(CaP_ro*surfarea*CaP_m3_p):
sim.setTriCount(i, 'CaP_m3', sim.getTriCount(i, 'CaP_m3') + 1)
cap_m3_count = cap_m3_count + 1
count = count +1
else:
print('Cluster size is larger than the number of CaP channels available')
for i in memb_tris_cap:
if cap_m0_count<round(CaP_ro*surfarea*CaP_m0_p):
sim.setTriCount(i, 'CaP_m0', sim.getTriCount(i, 'CaP_m0') + 1)
cap_m0_count = cap_m0_count + 1
elif cap_m1_count<round(CaP_ro*surfarea*CaP_m1_p):
sim.setTriCount(i, 'CaP_m1', sim.getTriCount(i, 'CaP_m1') + 1)
cap_m1_count = cap_m1_count + 1
elif cap_m2_count<round(CaP_ro*surfarea*CaP_m2_p):
sim.setTriCount(i, 'CaP_m2', sim.getTriCount(i, 'CaP_m2') + 1)
cap_m2_count = cap_m2_count + 1
elif cap_m3_count<round(CaP_ro*surfarea*CaP_m3_p):
sim.setTriCount(i, 'CaP_m3', sim.getTriCount(i, 'CaP_m3') + 1)
cap_m3_count = cap_m3_count + 1
else:
print('More tris picked up by the algorithm than the number of CaP channels available')
sim.setPatchCount('memb', 'Leak', int(L_ro * surfarea))
print("Injected ", int(L_ro * sim.getPatchArea('memb')), "Leak channels")
memb_triID_withBK=[]
memb_countBK_pertriID=[]
memb_tetID_withBK=[]
count = 0
for m in memb_tris:
BKchans=sim.getTriCount(m,'BK_C0')+sim.getTriCount(m,'BK_C1')+sim.getTriCount(m,'BK_C2')+sim.getTriCount(m,'BK_C3')+sim.getTriCount(m,'BK_C4')+sim.getTriCount(m,'BK_O0')+sim.getTriCount(m,'BK_O1')+sim.getTriCount(m,'BK_O2')+sim.getTriCount(m,'BK_O3')+sim.getTriCount(m,'BK_O4')
if (BKchans>0):
memb_triID_withBK.append(m)
memb_countBK_pertriID.append(BKchans)
memb_tetID_withBK.append(submemb_tets[count])
count = count+1
sim.setEfieldDT(EF_DT)
sim.setMembPotential('membrane', init_pot)
sim.setMembVolRes('membrane', Ra)
#cm = 1.5uF/cm2 -> 1.5e-6F/1e-4m2 ->1.5e-2 F/m2
sim.setMembCapac('membrane',memb_capac)
#### Recording #####
c=time.ctime()
dc = c.split()[1]+c.split()[2]+'_'+c.split()[3]+'_'+c.split()[4]
dc= dc.replace(':', '_')
try: os.mkdir(root+'data')
except: pass
try: os.mkdir(root+'data/' + 'StochasticCaburst_cluster')
except: pass
try: os.mkdir(root+'data/' + 'StochasticCaburst_cluster/'+meshfile_ab)
except: pass
os.mkdir(root+'data/' + 'StochasticCaburst_cluster/'+meshfile_ab+'/'+iter_n+'__'+dc )
datfile = open(root+'data/' + 'StochasticCaburst_cluster/'+meshfile_ab+'/'+iter_n+'__'+dc + '/currents.dat', 'w')
datfile2 = open(root+'data/' + 'StochasticCaburst_cluster/'+meshfile_ab+'/'+iter_n+'__'+dc + '/voltage.dat', 'w')
datfile3 = open(root+'data/' + 'StochasticCaburst_cluster/'+meshfile_ab+'/'+iter_n+'__'+dc + '/calcium.dat', 'w')
datfile4 = open(root+'data/' + 'StochasticCaburst_cluster/'+meshfile_ab+'/'+iter_n+'__'+dc + '/OpenBKandCa.dat', 'w')
datfile5 = open(root+'data/' + 'StochasticCaburst_cluster/'+meshfile_ab+'/'+iter_n+'__'+dc + '/ChannelsDistribution.dat', 'w')
for m in memb_tris:
tri_center = mesh.getTriBarycenter(m)
cap_m0_chans = sim.getTriCount(m,'CaP_m0')
cap_m1_chans = sim.getTriCount(m,'CaP_m1')
cap_m2_chans = sim.getTriCount(m,'CaP_m2')
cap_m3_chans = sim.getTriCount(m,'CaP_m3')
cat_m0h0_chans = sim.getTriCount(m,'CaT_m0h0')
cat_m1h0_chans = sim.getTriCount(m,'CaT_m1h0')
cat_m2h0_chans = sim.getTriCount(m,'CaT_m2h0')
cat_m0h1_chans = sim.getTriCount(m,'CaT_m0h1')
cat_m1h1_chans = sim.getTriCount(m,'CaT_m1h1')
cat_m2h1_chans = sim.getTriCount(m,'CaT_m2h1')
bk_c0_chans = sim.getTriCount(m,'BK_C0')
bk_c1_chans = sim.getTriCount(m,'BK_C1')
bk_c2_chans = sim.getTriCount(m,'BK_C2')
bk_c3_chans = sim.getTriCount(m,'BK_C3')
bk_c4_chans = sim.getTriCount(m,'BK_C4')
bk_o0_chans = sim.getTriCount(m,'BK_O0')
bk_o1_chans = sim.getTriCount(m,'BK_O1')
bk_o2_chans = sim.getTriCount(m,'BK_O2')
bk_o3_chans = sim.getTriCount(m,'BK_O3')
bk_o4_chans = sim.getTriCount(m,'BK_O4')
sk_c1_chans = sim.getTriCount(m,'SK_C1')
sk_c2_chans = sim.getTriCount(m,'SK_C2')
sk_c3_chans = sim.getTriCount(m,'SK_C3')
sk_c4_chans = sim.getTriCount(m,'SK_C4')
sk_o1_chans = sim.getTriCount(m,'SK_O1')
sk_o2_chans = sim.getTriCount(m,'SK_O2')
datfile5.write('%.6g' %(tri_center[0]) + ' ')
datfile5.write('%.6g' %(tri_center[1]) + ' ')
datfile5.write('%.6g' %(tri_center[2]) + ' ')
datfile5.write('%.6g' %(cap_m0_chans) + ' ')
datfile5.write('%.6g' %(cap_m1_chans) + ' ')
datfile5.write('%.6g' %(cap_m2_chans) + ' ')
datfile5.write('%.6g' %(cap_m3_chans) + ' ')
datfile5.write('%.6g' %(cat_m0h0_chans) + ' ')
datfile5.write('%.6g' %(cat_m1h0_chans) + ' ')
datfile5.write('%.6g' %(cat_m2h0_chans) + ' ')
datfile5.write('%.6g' %(cat_m0h1_chans) + ' ')
datfile5.write('%.6g' %(cat_m1h1_chans) + ' ')
datfile5.write('%.6g' %(cat_m2h1_chans) + ' ')
datfile5.write('%.6g' %(bk_c0_chans) + ' ')
datfile5.write('%.6g' %(bk_c1_chans) + ' ')
datfile5.write('%.6g' %(bk_c2_chans) + ' ')
datfile5.write('%.6g' %(bk_c3_chans) + ' ')
datfile5.write('%.6g' %(bk_c4_chans) + ' ')
datfile5.write('%.6g' %(bk_o0_chans) + ' ')
datfile5.write('%.6g' %(bk_o1_chans) + ' ')
datfile5.write('%.6g' %(bk_o2_chans) + ' ')
datfile5.write('%.6g' %(bk_o3_chans) + ' ')
datfile5.write('%.6g' %(bk_o4_chans) + ' ')
datfile5.write('%.6g' %(sk_c1_chans) + ' ')
datfile5.write('%.6g' %(sk_c2_chans) + ' ')
datfile5.write('%.6g' %(sk_c3_chans) + ' ')
datfile5.write('%.6g' %(sk_c3_chans) + ' ')
datfile5.write('%.6g' %(sk_o1_chans) + ' ')
datfile5.write('%.6g' %(sk_o2_chans) + ' ')
datfile5.write('\n')
r.initialize(int(time.time()%1000))
for l in range(NTIMEPOINTS):
print("Tpnt: ", l)
sim.run(TIMECONVERTER*l)
tcur_CaP = 0.0
tcur_CaT = 0.0
tcur_BK = 0.0
tcur_SK = 0.0
tca_count = 0.0
So = Ca_oconc
for m in submemb_tets:
ctriID = submemb_tets_surftris[m]
tcur_CaP = tcur_CaP + sim.getTriGHKI(ctriID,'OC_CaP')
tcur_CaT = tcur_CaT + sim.getTriGHKI(ctriID,'OC_CaT')
tcur_BK = tcur_BK + sim.getTriOhmicI(ctriID,'OC_BK0') \
+ sim.getTriOhmicI(ctriID,'OC_BK1') \
+ sim.getTriOhmicI(ctriID,'OC_BK2') \
+ sim.getTriOhmicI(ctriID,'OC_BK3') \
+ sim.getTriOhmicI(ctriID,'OC_BK4')
tcur_SK = tcur_SK + sim.getTriOhmicI(ctriID,'OC1_SK') + sim.getTriOhmicI(ctriID,'OC2_SK')
tca_count = tca_count + sim.getTetCount(m,'Ca')
datfile.write('%.6g' %(1.0e3*TIMECONVERTER*l) + ' ')
datfile.write('%.6g' %((tcur_CaP*1.0e-1)/surfarea) + ' ')
datfile.write('%.6g' %((tcur_CaT*1.0e-1)/surfarea) + ' ')
datfile.write('%.6g' %((tcur_BK*1.0e-1)/surfarea) + ' ')
datfile.write('%.6g' %((tcur_SK*1.0e-1)/surfarea) + ' ')
datfile.write('\n')
datfile2.write('%.6g' %(1.0e3*TIMECONVERTER*l) + ' ')
datfile2.write('%.6g' %(sim.getTetV(cent_tet)*1.0e3) + ' ')
datfile2.write('\n')
datfile3.write('%.6g' %(1.0e3*TIMECONVERTER*l) + ' ')
datfile3.write('%.6g' %(((tca_count/AVOGADRO)/(border_tets_vols*1.0e3))*1.0e6) +' ')
datfile3.write('%.6g' %(tca_count)+ ' ')
datfile3.write('\n')
datfile4.write('%.6g' %(1.0e3*TIMECONVERTER*l) + ' ')
for i in range(len(memb_triID_withBK)):
datfile4.write('%.6g' %sim.getTriCount(memb_triID_withBK[i], 'BK_O0') + ' ')
datfile4.write('%.6g' %sim.getTriCount(memb_triID_withBK[i], 'BK_O1') + ' ')
datfile4.write('%.6g' %sim.getTriCount(memb_triID_withBK[i], 'BK_O2') + ' ')
datfile4.write('%.6g' %sim.getTriCount(memb_triID_withBK[i], 'BK_O3') + ' ')
datfile4.write('%.6g' %sim.getTriCount(memb_triID_withBK[i], 'BK_O4') + ' ')
datfile4.write('%.6g' %sim.getTetCount(memb_tetID_withBK[i], 'Ca') + ' ')
datfile4.write('%.6g' %sim.getTetConc(memb_tetID_withBK[i], 'Ca') +' ')
datfile4.write('\n')
datfile.close()
datfile2.close()
datfile3.close()
datfile4.close()
datfile5.close()
|
CNS-OIST/STEPS_Example
|
publication_models/API_1/Anwar_J Neurosci_2013/StochasticCaburst_cluster.py
|
Python
|
gpl-2.0
| 35,247
|
[
"Avogadro"
] |
877111e3a60b9cac38e3eece856c4c439cc042fb3f269c1c75e89d25cfe1b7f5
|
#
# Copyright (C) 2002-2006 greg Landrum and Rational Discovery LLC
#
# @@ All Rights Reserved @@
# This file is part of the RDKit.
# The contents are covered by the terms of the BSD license
# which is included in the file license.txt, found at the root
# of the RDKit source tree.
#
"""unit testing code for the signatures
"""
import os
import unittest
from rdkit import Chem
from rdkit import RDConfig
from rdkit.Chem.Pharm2D import Gobbi_Pharm2D, Generate
class TestCase(unittest.TestCase):
def setUp(self):
self.factory = Gobbi_Pharm2D.factory
def test1Sigs(self):
probes = [
('OCCC=O', {
'HA': (1, ((0, ), (4, ))),
'HD': (1, ((0, ), )),
'LH': (0, None),
'AR': (0, None),
'RR': (0, None),
'X': (0, None),
'BG': (0, None),
'AG': (0, None),
}),
('OCCC(=O)O', {
'HA': (1, ((0, ), (4, ))),
'HD': (1, ((0, ), (5, ))),
'LH': (0, None),
'AR': (0, None),
'RR': (0, None),
'X': (0, None),
'BG': (0, None),
'AG': (1, ((3, ), )),
}),
('CCCN', {
'HA': (1, ((3, ), )),
'HD': (1, ((3, ), )),
'LH': (0, None),
'AR': (0, None),
'RR': (0, None),
'X': (0, None),
'BG': (1, ((3, ), )),
'AG': (0, None),
}),
('CCCCC', {
'HA': (0, None),
'HD': (0, None),
'LH': (1, ((1, ), (3, ))),
'AR': (0, None),
'RR': (0, None),
'X': (0, None),
'BG': (0, None),
'AG': (0, None),
}),
('CC1CCC1', {
'HA': (0, None),
'HD': (0, None),
'LH': (1, ((1, ), (3, ))),
'AR': (0, None),
'RR': (1, ((1, ), )),
'X': (0, None),
'BG': (0, None),
'AG': (0, None),
}),
('[SiH3]C1CCC1', {
'HA': (0, None),
'HD': (0, None),
'LH': (1, ((1, ), )),
'AR': (0, None),
'RR': (1, ((1, ), )),
'X': (1, ((0, ), )),
'BG': (0, None),
'AG': (0, None),
}),
('[SiH3]c1ccccc1', {
'HA': (0, None),
'HD': (0, None),
'LH': (0, None),
'AR': (1, ((1, ), )),
'RR': (0, None),
'X': (1, ((0, ), )),
'BG': (0, None),
'AG': (0, None),
}),
]
for smi, d in probes:
mol = Chem.MolFromSmiles(smi)
feats = self.factory.featFactory.GetFeaturesForMol(mol)
for k in d.keys():
shouldMatch, mapList = d[k]
feats = self.factory.featFactory.GetFeaturesForMol(mol, includeOnly=k)
if shouldMatch:
self.assertTrue(feats)
self.assertEqual(len(feats), len(mapList))
aids = [(x.GetAtomIds()[0], ) for x in feats]
aids.sort()
self.assertEqual(tuple(aids), mapList)
def test2Sigs(self):
probes = [('O=CCC=O', (149, )),
('OCCC=O', (149, 156)),
('OCCC(=O)O', (22, 29, 149, 154, 156, 184, 28822, 30134)), ]
for smi, tgt in probes:
sig = Generate.Gen2DFingerprint(Chem.MolFromSmiles(smi), self.factory)
self.assertEqual(len(sig), 39972)
bs = tuple(sig.GetOnBits())
self.assertEqual(len(bs), len(tgt))
self.assertEqual(bs, tgt)
def testOrderBug(self):
sdFile = os.path.join(RDConfig.RDCodeDir, 'Chem', 'Pharm2D', 'test_data', 'orderBug.sdf')
suppl = Chem.SDMolSupplier(sdFile)
m1 = next(suppl)
m2 = next(suppl)
sig1 = Generate.Gen2DFingerprint(m1, self.factory)
sig2 = Generate.Gen2DFingerprint(m2, self.factory)
self.assertEqual(sig1, sig2)
def testOrderBug2(self):
from rdkit.Chem import Randomize
from rdkit import DataStructs
probes = ['Oc1nc(Oc2ncccc2)ccc1']
for smi in probes:
m1 = Chem.MolFromSmiles(smi)
# m1.Debug()
sig1 = Generate.Gen2DFingerprint(m1, self.factory)
csmi = Chem.MolToSmiles(m1)
m2 = Chem.MolFromSmiles(csmi)
# m2.Debug()
sig2 = Generate.Gen2DFingerprint(m2, self.factory)
self.assertTrue(list(sig1.GetOnBits()) == list(sig2.GetOnBits()), '%s %s' % (smi, csmi))
self.assertEqual(DataStructs.DiceSimilarity(sig1, sig2), 1.0)
self.assertEqual(sig1, sig2)
for _ in range(10):
m2 = Randomize.RandomizeMol(m1)
sig2 = Generate.Gen2DFingerprint(m2, self.factory)
if sig2 != sig1:
Generate._verbose = True
print('----------------')
sig1 = Generate.Gen2DFingerprint(m1, self.factory)
print('----------------')
sig2 = Generate.Gen2DFingerprint(m2, self.factory)
print('----------------')
print(Chem.MolToMolBlock(m1))
print('----------------')
print(Chem.MolToMolBlock(m2))
print('----------------')
s1 = set(sig1.GetOnBits())
s2 = set(sig2.GetOnBits())
print(s1.difference(s2))
self.assertEqual(sig1, sig2)
def testBitInfo(self):
m = Chem.MolFromSmiles('OCC=CC(=O)O')
bi = {}
sig = Generate.Gen2DFingerprint(m, Gobbi_Pharm2D.factory, bitInfo=bi)
self.assertEqual(sig.GetNumOnBits(), len(bi))
self.assertEqual(list(sig.GetOnBits()), sorted(bi.keys()))
self.assertEqual(sorted(bi.keys()), [23, 30, 150, 154, 157, 185, 28878, 30184])
self.assertEqual(sorted(bi[28878]), [[(0, ), (5, ), (6, )]])
self.assertEqual(sorted(bi[157]), [[(0, ), (6, )], [(5, ), (0, )]])
if __name__ == '__main__': # pragma: nocover
unittest.main()
|
greglandrum/rdkit
|
rdkit/Chem/Pharm2D/UnitTestGobbi.py
|
Python
|
bsd-3-clause
| 6,235
|
[
"RDKit"
] |
c3942a7538666b36989ed61d961a267d479fd19ec3a6422e8fc3634d646250bb
|
"""
Testing for the forest module (sklearn.ensemble.forest).
"""
# Authors: Gilles Louppe,
# Brian Holt,
# Andreas Mueller,
# Arnaud Joly
# License: BSD 3 clause
import pickle
from collections import defaultdict
from itertools import product
import numpy as np
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_false, assert_true
from sklearn.utils.testing import assert_less, assert_greater
from sklearn.utils.testing import assert_greater_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import ignore_warnings
from sklearn import datasets
from sklearn.decomposition import TruncatedSVD
from sklearn.ensemble import ExtraTreesClassifier
from sklearn.ensemble import ExtraTreesRegressor
from sklearn.ensemble import RandomForestClassifier
from sklearn.ensemble import RandomForestRegressor
from sklearn.ensemble import RandomTreesEmbedding
from sklearn.grid_search import GridSearchCV
from sklearn.svm import LinearSVC
from sklearn.utils.validation import check_random_state
# toy sample
X = [[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]]
y = [-1, -1, -1, 1, 1, 1]
T = [[-1, -1], [2, 2], [3, 2]]
true_result = [-1, 1, 1]
# also load the iris dataset
# and randomly permute it
iris = datasets.load_iris()
rng = check_random_state(0)
perm = rng.permutation(iris.target.size)
iris.data = iris.data[perm]
iris.target = iris.target[perm]
# also load the boston dataset
# and randomly permute it
boston = datasets.load_boston()
perm = rng.permutation(boston.target.size)
boston.data = boston.data[perm]
boston.target = boston.target[perm]
FOREST_CLASSIFIERS = {
"ExtraTreesClassifier": ExtraTreesClassifier,
"RandomForestClassifier": RandomForestClassifier,
}
FOREST_REGRESSORS = {
"ExtraTreesRegressor": ExtraTreesRegressor,
"RandomForestRegressor": RandomForestRegressor,
}
FOREST_TRANSFORMERS = {
"RandomTreesEmbedding": RandomTreesEmbedding,
}
FOREST_ESTIMATORS = dict()
FOREST_ESTIMATORS.update(FOREST_CLASSIFIERS)
FOREST_ESTIMATORS.update(FOREST_REGRESSORS)
FOREST_ESTIMATORS.update(FOREST_TRANSFORMERS)
def check_classification_toy(name):
"""Check classification on a toy dataset."""
ForestClassifier = FOREST_CLASSIFIERS[name]
clf = ForestClassifier(n_estimators=10, random_state=1)
clf.fit(X, y)
assert_array_equal(clf.predict(T), true_result)
assert_equal(10, len(clf))
clf = ForestClassifier(n_estimators=10, max_features=1, random_state=1)
clf.fit(X, y)
assert_array_equal(clf.predict(T), true_result)
assert_equal(10, len(clf))
# also test apply
leaf_indices = clf.apply(X)
assert_equal(leaf_indices.shape, (len(X), clf.n_estimators))
def test_classification_toy():
for name in FOREST_CLASSIFIERS:
yield check_classification_toy, name
def check_iris_criterion(name, criterion):
"""Check consistency on dataset iris."""
ForestClassifier = FOREST_CLASSIFIERS[name]
clf = ForestClassifier(n_estimators=10, criterion=criterion,
random_state=1)
clf.fit(iris.data, iris.target)
score = clf.score(iris.data, iris.target)
assert_greater(score, 0.9, "Failed with criterion %s and score = %f"
% (criterion, score))
clf = ForestClassifier(n_estimators=10, criterion=criterion,
max_features=2, random_state=1)
clf.fit(iris.data, iris.target)
score = clf.score(iris.data, iris.target)
assert_greater(score, 0.5, "Failed with criterion %s and score = %f"
% (criterion, score))
def test_iris():
for name, criterion in product(FOREST_CLASSIFIERS, ("gini", "entropy")):
yield check_iris_criterion, name, criterion
def check_boston_criterion(name, criterion):
"""Check consistency on dataset boston house prices."""
ForestRegressor = FOREST_REGRESSORS[name]
clf = ForestRegressor(n_estimators=5, criterion=criterion, random_state=1)
clf.fit(boston.data, boston.target)
score = clf.score(boston.data, boston.target)
assert_greater(score, 0.95, "Failed with max_features=None, criterion %s "
"and score = %f" % (criterion, score))
clf = ForestRegressor(n_estimators=5, criterion=criterion,
max_features=6, random_state=1)
clf.fit(boston.data, boston.target)
score = clf.score(boston.data, boston.target)
assert_greater(score, 0.95, "Failed with max_features=6, criterion %s "
"and score = %f" % (criterion, score))
def test_boston():
for name, criterion in product(FOREST_REGRESSORS, ("mse", )):
yield check_boston_criterion, name, criterion
def check_regressor_attributes(name):
"""Regression models should not have a classes_ attribute."""
r = FOREST_REGRESSORS[name](random_state=0)
assert_false(hasattr(r, "classes_"))
assert_false(hasattr(r, "n_classes_"))
r.fit([[1, 2, 3], [4, 5, 6]], [1, 2])
assert_false(hasattr(r, "classes_"))
assert_false(hasattr(r, "n_classes_"))
def test_regressor_attributes():
for name in FOREST_REGRESSORS:
yield check_regressor_attributes, name
def check_probability(name):
"""Predict probabilities."""
ForestClassifier = FOREST_CLASSIFIERS[name]
with np.errstate(divide="ignore"):
clf = ForestClassifier(n_estimators=10, random_state=1, max_features=1,
max_depth=1)
clf.fit(iris.data, iris.target)
assert_array_almost_equal(np.sum(clf.predict_proba(iris.data), axis=1),
np.ones(iris.data.shape[0]))
assert_array_almost_equal(clf.predict_proba(iris.data),
np.exp(clf.predict_log_proba(iris.data)))
def test_probability():
for name in FOREST_CLASSIFIERS:
yield check_probability, name
def check_importance(name, X, y):
"""Check variable importances."""
ForestClassifier = FOREST_CLASSIFIERS[name]
clf = ForestClassifier(n_estimators=10)
clf.fit(X, y)
importances = clf.feature_importances_
n_important = np.sum(importances > 0.1)
assert_equal(importances.shape[0], 10)
assert_equal(n_important, 3)
X_new = clf.transform(X, threshold="mean")
assert_less(0 < X_new.shape[1], X.shape[1])
# Check with sample weights
sample_weight = np.ones(y.shape)
sample_weight[y == 1] *= 100
clf = ForestClassifier(n_estimators=50, random_state=0)
clf.fit(X, y, sample_weight=sample_weight)
importances = clf.feature_importances_
assert_true(np.all(importances >= 0.0))
clf = ForestClassifier(n_estimators=50, random_state=0)
clf.fit(X, y, sample_weight=3 * sample_weight)
importances_bis = clf.feature_importances_
assert_almost_equal(importances, importances_bis)
def test_importances():
X, y = datasets.make_classification(n_samples=1000, n_features=10,
n_informative=3, n_redundant=0,
n_repeated=0, shuffle=False,
random_state=0)
for name in FOREST_CLASSIFIERS:
yield check_importance, name, X, y
def check_oob_score(name, X, y, n_estimators=20):
"""Check that oob prediction is a good estimation of the generalization
error."""
# Proper behavior
est = FOREST_ESTIMATORS[name](oob_score=True, random_state=0,
n_estimators=n_estimators, bootstrap=True)
n_samples = X.shape[0]
est.fit(X[:n_samples // 2, :], y[:n_samples // 2])
test_score = est.score(X[n_samples // 2:, :], y[n_samples // 2:])
if name in FOREST_CLASSIFIERS:
assert_less(abs(test_score - est.oob_score_), 0.1)
else:
assert_greater(test_score, est.oob_score_)
assert_greater(est.oob_score_, .8)
# Check warning if not enough estimators
with np.errstate(divide="ignore", invalid="ignore"):
est = FOREST_ESTIMATORS[name](oob_score=True, random_state=0,
n_estimators=1, bootstrap=True)
assert_warns(UserWarning, est.fit, X, y)
def test_oob_score():
for name in FOREST_CLASSIFIERS:
yield check_oob_score, name, iris.data, iris.target
# non-contiguous targets in classification
yield check_oob_score, name, iris.data, iris.target * 2 + 1
for name in FOREST_REGRESSORS:
yield check_oob_score, name, boston.data, boston.target, 50
def check_oob_score_raise_error(name):
ForestEstimator = FOREST_ESTIMATORS[name]
if name in FOREST_TRANSFORMERS:
for oob_score in [True, False]:
assert_raises(TypeError, ForestEstimator, oob_score=oob_score)
assert_raises(NotImplementedError, ForestEstimator()._set_oob_score,
X, y)
else:
# Unfitted / no bootstrap / no oob_score
for oob_score, bootstrap in [(True, False), (False, True),
(False, False)]:
est = ForestEstimator(oob_score=oob_score, bootstrap=bootstrap,
random_state=0)
assert_false(hasattr(est, "oob_score_"))
# No bootstrap
assert_raises(ValueError, ForestEstimator(oob_score=True,
bootstrap=False).fit, X, y)
def test_oob_score_raise_error():
for name in FOREST_ESTIMATORS:
yield check_oob_score_raise_error, name
def check_gridsearch(name):
forest = FOREST_CLASSIFIERS[name]()
clf = GridSearchCV(forest, {'n_estimators': (1, 2), 'max_depth': (1, 2)})
clf.fit(iris.data, iris.target)
def test_gridsearch():
"""Check that base trees can be grid-searched."""
for name in FOREST_CLASSIFIERS:
yield check_gridsearch, name
def check_parallel(name, X, y):
"""Check parallel computations in classification"""
ForestEstimator = FOREST_ESTIMATORS[name]
forest = ForestEstimator(n_estimators=10, n_jobs=3, random_state=0)
forest.fit(X, y)
assert_equal(len(forest), 10)
forest.set_params(n_jobs=1)
y1 = forest.predict(X)
forest.set_params(n_jobs=2)
y2 = forest.predict(X)
assert_array_almost_equal(y1, y2, 3)
def test_parallel():
for name in FOREST_CLASSIFIERS:
yield check_parallel, name, iris.data, iris.target
for name in FOREST_REGRESSORS:
yield check_parallel, name, boston.data, boston.target
def check_pickle(name, X, y):
"""Check pickability."""
ForestEstimator = FOREST_ESTIMATORS[name]
obj = ForestEstimator(random_state=0)
obj.fit(X, y)
score = obj.score(X, y)
pickle_object = pickle.dumps(obj)
obj2 = pickle.loads(pickle_object)
assert_equal(type(obj2), obj.__class__)
score2 = obj2.score(X, y)
assert_equal(score, score2)
def test_pickle():
for name in FOREST_CLASSIFIERS:
yield check_pickle, name, iris.data[::2], iris.target[::2]
for name in FOREST_REGRESSORS:
yield check_pickle, name, boston.data[::2], boston.target[::2]
def check_multioutput(name):
"""Check estimators on multi-output problems."""
X_train = [[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1], [-2, 1],
[-1, 1], [-1, 2], [2, -1], [1, -1], [1, -2]]
y_train = [[-1, 0], [-1, 0], [-1, 0], [1, 1], [1, 1], [1, 1], [-1, 2],
[-1, 2], [-1, 2], [1, 3], [1, 3], [1, 3]]
X_test = [[-1, -1], [1, 1], [-1, 1], [1, -1]]
y_test = [[-1, 0], [1, 1], [-1, 2], [1, 3]]
est = FOREST_ESTIMATORS[name](random_state=0, bootstrap=False)
y_pred = est.fit(X_train, y_train).predict(X_test)
assert_array_almost_equal(y_pred, y_test)
if name in FOREST_CLASSIFIERS:
with np.errstate(divide="ignore"):
proba = est.predict_proba(X_test)
assert_equal(len(proba), 2)
assert_equal(proba[0].shape, (4, 2))
assert_equal(proba[1].shape, (4, 4))
log_proba = est.predict_log_proba(X_test)
assert_equal(len(log_proba), 2)
assert_equal(log_proba[0].shape, (4, 2))
assert_equal(log_proba[1].shape, (4, 4))
def test_multioutput():
for name in FOREST_CLASSIFIERS:
yield check_multioutput, name
for name in FOREST_REGRESSORS:
yield check_multioutput, name
def check_classes_shape(name):
"""Test that n_classes_ and classes_ have proper shape."""
ForestClassifier = FOREST_CLASSIFIERS[name]
# Classification, single output
clf = ForestClassifier(random_state=0).fit(X, y)
assert_equal(clf.n_classes_, 2)
assert_array_equal(clf.classes_, [-1, 1])
# Classification, multi-output
_y = np.vstack((y, np.array(y) * 2)).T
clf = ForestClassifier(random_state=0).fit(X, _y)
assert_array_equal(clf.n_classes_, [2, 2])
assert_array_equal(clf.classes_, [[-1, 1], [-2, 2]])
def test_classes_shape():
for name in FOREST_CLASSIFIERS:
yield check_classes_shape, name
def test_random_trees_dense_type():
'''
Test that the `sparse_output` parameter of RandomTreesEmbedding
works by returning a dense array.
'''
# Create the RTE with sparse=False
hasher = RandomTreesEmbedding(n_estimators=10, sparse_output=False)
X, y = datasets.make_circles(factor=0.5)
X_transformed = hasher.fit_transform(X)
# Assert that type is ndarray, not scipy.sparse.csr.csr_matrix
assert_equal(type(X_transformed), np.ndarray)
def test_random_trees_dense_equal():
'''
Test that the `sparse_output` parameter of RandomTreesEmbedding
works by returning the same array for both argument
values.
'''
# Create the RTEs
hasher_dense = RandomTreesEmbedding(n_estimators=10, sparse_output=False,
random_state=0)
hasher_sparse = RandomTreesEmbedding(n_estimators=10, sparse_output=True,
random_state=0)
X, y = datasets.make_circles(factor=0.5)
X_transformed_dense = hasher_dense.fit_transform(X)
X_transformed_sparse = hasher_sparse.fit_transform(X)
# Assert that dense and sparse hashers have same array.
assert_array_equal(X_transformed_sparse.toarray(), X_transformed_dense)
def test_random_hasher():
# test random forest hashing on circles dataset
# make sure that it is linearly separable.
# even after projected to two SVD dimensions
# Note: Not all random_states produce perfect results.
hasher = RandomTreesEmbedding(n_estimators=30, random_state=1)
X, y = datasets.make_circles(factor=0.5)
X_transformed = hasher.fit_transform(X)
# test fit and transform:
hasher = RandomTreesEmbedding(n_estimators=30, random_state=1)
assert_array_equal(hasher.fit(X).transform(X).toarray(),
X_transformed.toarray())
# one leaf active per data point per forest
assert_equal(X_transformed.shape[0], X.shape[0])
assert_array_equal(X_transformed.sum(axis=1), hasher.n_estimators)
svd = TruncatedSVD(n_components=2)
X_reduced = svd.fit_transform(X_transformed)
linear_clf = LinearSVC()
linear_clf.fit(X_reduced, y)
assert_equal(linear_clf.score(X_reduced, y), 1.)
def test_parallel_train():
rng = check_random_state(12321)
n_samples, n_features = 80, 30
X_train = rng.randn(n_samples, n_features)
y_train = rng.randint(0, 2, n_samples)
clfs = [
RandomForestClassifier(n_estimators=20, n_jobs=n_jobs,
random_state=12345).fit(X_train, y_train)
for n_jobs in [1, 2, 3, 8, 16, 32]
]
X_test = rng.randn(n_samples, n_features)
probas = [clf.predict_proba(X_test) for clf in clfs]
for proba1, proba2 in zip(probas, probas[1:]):
assert_array_almost_equal(proba1, proba2)
def test_distribution():
rng = check_random_state(12321)
# Single variable with 4 values
X = rng.randint(0, 4, size=(1000, 1))
y = rng.rand(1000)
n_trees = 500
clf = ExtraTreesRegressor(n_estimators=n_trees, random_state=42).fit(X, y)
uniques = defaultdict(int)
for tree in clf.estimators_:
tree = "".join(("%d,%d/" % (f, int(t)) if f >= 0 else "-")
for f, t in zip(tree.tree_.feature,
tree.tree_.threshold))
uniques[tree] += 1
uniques = sorted([(1. * count / n_trees, tree)
for tree, count in uniques.items()])
# On a single variable problem where X_0 has 4 equiprobable values, there
# are 5 ways to build a random tree. The more compact (0,1/0,0/--0,2/--) of
# them has probability 1/3 while the 4 others have probability 1/6.
assert_equal(len(uniques), 5)
assert_greater(0.20, uniques[0][0]) # Rough approximation of 1/6.
assert_greater(0.20, uniques[1][0])
assert_greater(0.20, uniques[2][0])
assert_greater(0.20, uniques[3][0])
assert_greater(uniques[4][0], 0.3)
assert_equal(uniques[4][1], "0,1/0,0/--0,2/--")
# Two variables, one with 2 values, one with 3 values
X = np.empty((1000, 2))
X[:, 0] = np.random.randint(0, 2, 1000)
X[:, 1] = np.random.randint(0, 3, 1000)
y = rng.rand(1000)
clf = ExtraTreesRegressor(n_estimators=100, max_features=1,
random_state=1).fit(X, y)
uniques = defaultdict(int)
for tree in clf.estimators_:
tree = "".join(("%d,%d/" % (f, int(t)) if f >= 0 else "-")
for f, t in zip(tree.tree_.feature,
tree.tree_.threshold))
uniques[tree] += 1
uniques = [(count, tree) for tree, count in uniques.items()]
assert_equal(len(uniques), 8)
def check_max_leaf_nodes_max_depth(name, X, y):
"""Test precedence of max_leaf_nodes over max_depth. """
ForestEstimator = FOREST_ESTIMATORS[name]
est = ForestEstimator(max_depth=1, max_leaf_nodes=4,
n_estimators=1).fit(X, y)
assert_greater(est.estimators_[0].tree_.max_depth, 1)
est = ForestEstimator(max_depth=1, n_estimators=1).fit(X, y)
assert_equal(est.estimators_[0].tree_.max_depth, 1)
def test_max_leaf_nodes_max_depth():
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
for name in FOREST_ESTIMATORS:
yield check_max_leaf_nodes_max_depth, name, X, y
def check_min_samples_leaf(name, X, y):
"""Test if leaves contain more than leaf_count training examples"""
ForestEstimator = FOREST_ESTIMATORS[name]
# test both DepthFirstTreeBuilder and BestFirstTreeBuilder
# by setting max_leaf_nodes
for max_leaf_nodes in (None, 1000):
est = ForestEstimator(min_samples_leaf=5,
max_leaf_nodes=max_leaf_nodes,
random_state=0)
est.fit(X, y)
out = est.estimators_[0].tree_.apply(X)
node_counts = np.bincount(out)
# drop inner nodes
leaf_count = node_counts[node_counts != 0]
assert_greater(np.min(leaf_count), 4,
"Failed with {0}".format(name))
def test_min_samples_leaf():
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
X = X.astype(np.float32)
for name in FOREST_ESTIMATORS:
yield check_min_samples_leaf, name, X, y
def check_min_weight_fraction_leaf(name, X, y):
"""Test if leaves contain at least min_weight_fraction_leaf of the
training set"""
ForestEstimator = FOREST_ESTIMATORS[name]
rng = np.random.RandomState(0)
weights = rng.rand(X.shape[0])
total_weight = np.sum(weights)
# test both DepthFirstTreeBuilder and BestFirstTreeBuilder
# by setting max_leaf_nodes
for max_leaf_nodes in (None, 1000):
for frac in np.linspace(0, 0.5, 6):
est = ForestEstimator(min_weight_fraction_leaf=frac,
max_leaf_nodes=max_leaf_nodes,
random_state=0)
if isinstance(est, (RandomForestClassifier,
RandomForestRegressor)):
est.bootstrap = False
est.fit(X, y, sample_weight=weights)
out = est.estimators_[0].tree_.apply(X)
node_weights = np.bincount(out, weights=weights)
# drop inner nodes
leaf_weights = node_weights[node_weights != 0]
assert_greater_equal(
np.min(leaf_weights),
total_weight * est.min_weight_fraction_leaf,
"Failed with {0} "
"min_weight_fraction_leaf={1}".format(
name, est.min_weight_fraction_leaf))
def test_min_weight_fraction_leaf():
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
X = X.astype(np.float32)
for name in FOREST_ESTIMATORS:
yield check_min_weight_fraction_leaf, name, X, y
def check_memory_layout(name, dtype):
"""Check that it works no matter the memory layout"""
est = FOREST_ESTIMATORS[name](random_state=0, bootstrap=False)
# Nothing
X = np.asarray(iris.data, dtype=dtype)
y = iris.target
assert_array_equal(est.fit(X, y).predict(X), y)
# C-order
X = np.asarray(iris.data, order="C", dtype=dtype)
y = iris.target
assert_array_equal(est.fit(X, y).predict(X), y)
# F-order
X = np.asarray(iris.data, order="F", dtype=dtype)
y = iris.target
assert_array_equal(est.fit(X, y).predict(X), y)
# Contiguous
X = np.ascontiguousarray(iris.data, dtype=dtype)
y = iris.target
assert_array_equal(est.fit(X, y).predict(X), y)
# Strided
X = np.asarray(iris.data[::3], dtype=dtype)
y = iris.target[::3]
assert_array_equal(est.fit(X, y).predict(X), y)
def test_memory_layout():
for name, dtype in product(FOREST_CLASSIFIERS, [np.float64, np.float32]):
yield check_memory_layout, name, dtype
for name, dtype in product(FOREST_REGRESSORS, [np.float64, np.float32]):
yield check_memory_layout, name, dtype
def check_1d_input(name, X, X_2d, y):
ForestEstimator = FOREST_ESTIMATORS[name]
assert_raises(ValueError, ForestEstimator(random_state=0).fit, X, y)
est = ForestEstimator(random_state=0)
est.fit(X_2d, y)
if name in FOREST_CLASSIFIERS or name in FOREST_REGRESSORS:
assert_raises(ValueError, est.predict, X)
def test_1d_input():
X = iris.data[:, 0].ravel()
X_2d = iris.data[:, 0].reshape((-1, 1))
y = iris.target
for name in FOREST_ESTIMATORS:
yield check_1d_input, name, X, X_2d, y
def check_warm_start(name, random_state=42):
"""Test if fitting incrementally with warm start gives a forest of the
right size and the same results as a normal fit."""
X, y = datasets.make_hastie_10_2(n_samples=20, random_state=1)
ForestEstimator = FOREST_ESTIMATORS[name]
clf_ws = None
for n_estimators in [5, 10]:
if clf_ws is None:
clf_ws = ForestEstimator(n_estimators=n_estimators,
random_state=random_state,
warm_start=True)
else:
clf_ws.set_params(n_estimators=n_estimators)
clf_ws.fit(X, y)
assert_equal(len(clf_ws), n_estimators)
clf_no_ws = ForestEstimator(n_estimators=10, random_state=random_state,
warm_start=False)
clf_no_ws.fit(X, y)
assert_equal(set([tree.random_state for tree in clf_ws]),
set([tree.random_state for tree in clf_no_ws]))
assert_array_equal(clf_ws.apply(X), clf_no_ws.apply(X),
err_msg="Failed with {0}".format(name))
def test_warm_start():
for name in FOREST_ESTIMATORS:
yield check_warm_start, name
def check_warm_start_clear(name):
"""Test if fit clears state and grows a new forest when warm_start==False.
"""
X, y = datasets.make_hastie_10_2(n_samples=20, random_state=1)
ForestEstimator = FOREST_ESTIMATORS[name]
clf = ForestEstimator(n_estimators=5, max_depth=1, warm_start=False,
random_state=1)
clf.fit(X, y)
clf_2 = ForestEstimator(n_estimators=5, max_depth=1, warm_start=True,
random_state=2)
clf_2.fit(X, y) # inits state
clf_2.set_params(warm_start=False, random_state=1)
clf_2.fit(X, y) # clears old state and equals clf
assert_array_almost_equal(clf_2.apply(X), clf.apply(X))
def test_warm_start_clear():
for name in FOREST_ESTIMATORS:
yield check_warm_start_clear, name
def check_warm_start_smaller_n_estimators(name):
"""Test if warm start second fit with smaller n_estimators raises error."""
X, y = datasets.make_hastie_10_2(n_samples=20, random_state=1)
ForestEstimator = FOREST_ESTIMATORS[name]
clf = ForestEstimator(n_estimators=5, max_depth=1, warm_start=True)
clf.fit(X, y)
clf.set_params(n_estimators=4)
assert_raises(ValueError, clf.fit, X, y)
def test_warm_start_smaller_n_estimators():
for name in FOREST_ESTIMATORS:
yield check_warm_start_smaller_n_estimators, name
def check_warm_start_equal_n_estimators(name):
"""Test if warm start with equal n_estimators does nothing and returns the
same forest and raises a warning."""
X, y = datasets.make_hastie_10_2(n_samples=20, random_state=1)
ForestEstimator = FOREST_ESTIMATORS[name]
clf = ForestEstimator(n_estimators=5, max_depth=3, warm_start=True,
random_state=1)
clf.fit(X, y)
clf_2 = ForestEstimator(n_estimators=5, max_depth=3, warm_start=True,
random_state=1)
clf_2.fit(X, y)
# Now clf_2 equals clf.
clf_2.set_params(random_state=2)
assert_warns(UserWarning, clf_2.fit, X, y)
# If we had fit the trees again we would have got a different forest as we
# changed the random state.
assert_array_equal(clf.apply(X), clf_2.apply(X))
def test_warm_start_equal_n_estimators():
for name in FOREST_ESTIMATORS:
yield check_warm_start_equal_n_estimators, name
def check_warm_start_oob(name):
"""Test that the warm start computes oob score when asked."""
X, y = datasets.make_hastie_10_2(n_samples=20, random_state=1)
ForestEstimator = FOREST_ESTIMATORS[name]
# Use 15 estimators to avoid 'some inputs do not have OOB scores' warning.
clf = ForestEstimator(n_estimators=15, max_depth=3, warm_start=False,
random_state=1, bootstrap=True, oob_score=True)
clf.fit(X, y)
clf_2 = ForestEstimator(n_estimators=5, max_depth=3, warm_start=False,
random_state=1, bootstrap=True, oob_score=False)
clf_2.fit(X, y)
clf_2.set_params(warm_start=True, oob_score=True, n_estimators=15)
clf_2.fit(X, y)
assert_true(hasattr(clf_2, 'oob_score_'))
assert_equal(clf.oob_score_, clf_2.oob_score_)
# Test that oob_score is computed even if we don't need to train
# additional trees.
clf_3 = ForestEstimator(n_estimators=15, max_depth=3, warm_start=True,
random_state=1, bootstrap=True, oob_score=False)
clf_3.fit(X, y)
assert_true(not(hasattr(clf_3, 'oob_score_')))
clf_3.set_params(oob_score=True)
ignore_warnings(clf_3.fit)(X, y)
assert_equal(clf.oob_score_, clf_3.oob_score_)
def test_warm_start_oob():
for name in FOREST_CLASSIFIERS:
yield check_warm_start_oob, name
for name in FOREST_REGRESSORS:
yield check_warm_start_oob, name
if __name__ == "__main__":
import nose
nose.runmodule()
|
soulmachine/scikit-learn
|
sklearn/ensemble/tests/test_forest.py
|
Python
|
bsd-3-clause
| 27,885
|
[
"Brian"
] |
7c70bf77f0edd08583a001d580c38e7ef4a6d5a7bfb7b5279938562ba3171e78
|
import os
import inspect
import getpass
import subprocess
import sys
import re
import shutil
import cutlass
import anadama.pipelines
from collections import OrderedDict
from . import workflows
from . import PrepProt
def get_proteomes(preps):
def _ps():
for prep in preps:
proteome_list=list()
for proteome in prep.proteomes():
proteome_list.append(proteome)
if len(proteome_list)!= 0:
yield prep, proteome_list
return map(PrepProt._make, _ps())
# Utility function to check whether the required softwares are installed.
def check_software_dependencies():
print('checking software dependencies')
# Check if ASCP is installed
try:
return_status=subprocess.check_output(["ascp", "-A"])
except OSError as e:
print('ASCP not installed or configured. Please install/configure ASCP.')
sys.exit(1)
# Check if Java(x64) greater than version 1.8 is intalled
try:
return_status=subprocess.check_output(["java", "-d64", "-version"], stderr=subprocess.STDOUT)
except subprocess.CalledProcessError as e:
print('64-bit Java not installed. Please install 64-bit java.')
sys.exit(1)
version=re.findall('"([^"]*)"',return_status.splitlines()[0])[0]
if version<'1.8':
print("Java verson 1.8 or greater required.")
sys.exit(1)
class PRIDEPipeline(anadama.pipelines.Pipeline):
""" Pipeline for submitting proteomics data from iHMP DCC's OSDF instance
to PRIDE repository.
Steps:
1. Given an OSDF id number for a Study, query the database to retrieve all the proteome
instances included in the study.
2. For each proteome instance retrieved, download the rsult file (in .mzid format) along
with the related peak file (in .mgf format), raw data file(s) and other files(if present).
3. Validate each set of result and related peak files using PRIDE Converter tool.
5. Create a submission summary file (submission.px) required for submitting the data.
6. Submit the entire set of files along with the submission file created in the previos
step to the PRIDE respository
"""
name="PRIDE"
default_options = {
"collect": {
"dcc_user": None,
"dcc_pw": None,
"study_id": None,
},
"submit": {
"pride_user": None,
"pride_pw": None,
"pride_server": None,
"pride_directory": None,
}
}
workflows= {
"collect": workflows.collect,
"submit": workflows.submit,
}
project_metadata = OrderedDict([ # Field populated from
("submitter_name", None), # .anadama_pride config file
("submitter_email", None), # .anadama_pride config file
("submitter_affiliation", None), # .anadama_pride config file
("lab_head_name", None), # .anadama_pride config file
("lab_head_email", None), # .anadama_pride config file
("lab_head_affiliation", None), # .anadama_pride config file
("submitter_pride_login", None), # .anadama_pride config file
("project_title", None), # .anadama_pride config file
("project_description", None), # .anadama_pride config file
("sample_processing_protocol", None), # Field 'protocol_steps' from
# corresponding assey_prep
("data_processing_protocol", None), # Field 'data_processing_protocol' from
# corresponding prodeome
("keywords", ''), # Field 'species' from
# corresponding assey_prep
("submission_type", 'COMPLETE'), # Only 'Complete' submissions are done
# Thats the way of a 'Shinobi'.
("experiment_type", list()), # Field 'experiment_type' from
# corresponding assey_prep
("species", list()), # Field 'species' from
# corresponding assey_prep
("tissue", list()), # Field 'tissue' from
# corresponding assey_prep
("instrument", list()), # Field 'instrument_name' from
# proteome OSDF entry
])
def __init__(self, workflow_options=dict(), *args, **kwargs):
""""""
super(PRIDEPipeline, self).__init__(*args, **kwargs)
check_software_dependencies()
config_file=None
# Check if config file is present in the following places in order of precedence:
# 1. User's Home directory
# 2. The directory where the module code is present
# 3. Any directory included in python path variable
path=os.path.expanduser("~")
file_path=os.path.join(path,'.anadama_pride')
if config_file is None and os.path.exists(file_path):
config_file=file_path
path=os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))
file_path=os.path.join(path,'.anadama_pride')
if config_file is None and os.path.exists(file_path):
config_file=file_path
paths=sys.path
for path in paths:
file_path=os.path.join(path,'.anadama_pride')
if config_file is None and os.path.exists(file_path):
config_file=file_path
if config_file is None:
print('Config file not present.')
sys.exit(1)
self.options = self.default_options.copy()
# Update the options to user provided options
for k in self.options.iterkeys():
self.options[k].update(workflow_options.get(k, {}))
# Read the .anadama_pride config file
with open(config_file) as _file:
for line in _file.readlines():
tags=line.strip('\n').split('=')
#Update the DCC Username and Password Fields
if(tags[0]=='dcc_user' or tags[0]=='dcc_pw'):
if len(tags[1]) == 0:
print('Please update the configuration file with your DCC username and password.')
sys.exit(1)
self.options['collect'][tags[0]] = tags[1]
# Update PRIDE username, password and server fields
elif(tags[0]=='pride_user' or tags[0]=='pride_pw' or tags[0]=='pride_server' or tags[0]=='pride_directory'):
# print tags[0]
if len(tags[1]) ==0:
print('Please update the configuration file with your PRIDE credentials.')
sys.exit(1)
self.options['submit'][tags[0]] = tags[1]
#Update the project metadata fileds
else:
if len(tags[1]) == 0:
print('Please update the configuration file with the required metadata.')
sys.exit(1)
self.project_metadata[tags[0]] = tags[1]
if(tags[0]=='project_description' and (len(tags[1])<50 or len(tags[1])>500)):
print('The length of the Project Description field must be between 50 to 500 charachters.')
sys.exit(1)
_file.close()
if not self.options['collect'].get('study_id', None):
prompt="Enter the study ID to submit: "
self.options['collect']['study_id'] = raw_input(prompt)
def metadata_from_prep(self,prepprot):
assay_prep = prepprot.prep
# There might be multiple proteome and multiple assay_preps included in one submission.
# The assumption made here is that the protocol stpes remains the same for all the
# assay_preps that belong to the study being submitted. Therefore, the metadata field
# 'sample_processing_protocol' is populated only from the first assay prep encountered.
if(self.project_metadata["sample_processing_protocol"] is None):
spp_string=assay_prep._protocol_steps
if (len(spp_string) < 50 or len(spp_string) > 500):
print('The length of the Sample Processing Protocol field must be between 50 to 500 charachters.\n'
'Please update the field protocol_steps in the related assay_prep entry in OSDF.')
sys.exit(1)
self.project_metadata["sample_processing_protocol"] = spp_string
if assay_prep._experiment_type not in self.project_metadata["experiment_type"]:
self.project_metadata["experiment_type"].append(assay_prep._experiment_type)
if assay_prep._species not in self.project_metadata["keywords"]:
self.project_metadata["keywords"] = self.project_metadata["keywords"] + assay_prep._species
if assay_prep._species not in self.project_metadata["species"]:
self.project_metadata["species"].append(assay_prep._species)
if assay_prep._tissue not in self.project_metadata["tissue"]:
self.project_metadata["tissue"].append(assay_prep._tissue)
for proteome in prepprot.proteome:
if proteome._instrument_name not in self.project_metadata["instrument"]:
self.project_metadata["instrument"].append(proteome._instrument_name)
# There might be multiple proteomes included in one submission.
# The assumption made here is that the data processing protocol remains the same for all the
# proteomes that belong to the study being submitted. Therefore, the metadata field
# 'data_processing_protocol' is populated only from the first proteome encountered.
if (self.project_metadata["data_processing_protocol"] is None):
dpp_string = proteome._data_processing_protocol
if (len(dpp_string) < 50 or len(dpp_string) > 500):
print('The length of the Data Processing Protocol field must be between 50 to 500 charachters.\n'
'Please update the field data_processing_protocol in the related proteome entry in OSDF.')
sys.exit(1)
self.project_metadata["data_processing_protocol"] = dpp_string
def _configure(self):
session = cutlass.iHMPSession(self.options['collect']['dcc_user'],
self.options['collect']['dcc_pw'])
# Connect to OSDF with the provided username and pasword
try:
session._osdf.get_info()
except ValueError as e:
print('Cannot connect to OSDF. Please check OSDF Username and Password')
sys.exit(1)
# Retrive the study instance for the id number provided.
try:
study = cutlass.Study.load(self.options['collect']['study_id'])
except Exception as e:
print('No study found with the entered study ID')
sys.exit(1)
# The name of folder containg the data to be submitted is the OSDF id of study being submitted.
result_dir=os.path.join(os.getcwd(),self.options['collect']['study_id'])
if os.path.exists(result_dir):
print('Removing existing result folder')
shutil.rmtree(result_dir)
os.mkdir(result_dir, 0777)
# Retrieve each proteome derived from either a host assay prep or a micrebiome assay prep
# prepared form each sample collected during each visit by each subject that participated
# in the given study.
record_proteomes=list()
for subject in study.subjects():
for visit in subject.visits():
for sample in visit.samples():
host_assay_preps=get_proteomes(sample.hostAssayPreps())
if host_assay_preps:
record_proteomes.append(host_assay_preps)
micro_assay_preps=get_proteomes(sample.microbAssayPreps())
if micro_assay_preps:
record_proteomes.append(micro_assay_preps)
# For each proteome retrieved, download and validate the data files.
for record in record_proteomes:
for prepprot in record:
self.metadata_from_prep(prepprot)
yield workflows.collect(session,prepprot,result_dir,**self.options['collect'])
# After all the proteome data included in this study is retrieved and validated,
# create a submission summary file and submit the data to the PRIDE repository.
yield workflows.submit(result_dir,self.project_metadata,**self.options['submit'])
|
ihmpdcc/anadama-pride
|
pride/pipeline.py
|
Python
|
mit
| 11,032
|
[
"VisIt"
] |
61b1fb77a247a88b3559c268ec9511a730f996189fe23343e7930778479263f5
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.conf import settings
from django.conf.urls import include, url
from django.conf.urls.static import static
from django.contrib import admin
from django.views.generic import TemplateView
from django.views import defaults as default_views
from main.views import *
urlpatterns = [
url(r'^$', TemplateView.as_view(template_name='pages/home.html'), name="home"),
url(r'^about/$', TemplateView.as_view(template_name='pages/about.html'), name="about"),
url(
regex=r'^calculate-similarity/$',
view=CalculateSimilarity.as_view(),
name='list'
),
# Django Admin, use {% url 'admin:index' %}
url(settings.ADMIN_URL, include(admin.site.urls)),
# User management
url(r'^users/', include("document_similarity.users.urls", namespace="users")),
url(r'^accounts/', include('allauth.urls')),
# Your stuff: custom urls includes go here
] + static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
if settings.DEBUG:
# This allows the error pages to be debugged during development, just visit
# these url in browser to see how these error pages look like.
urlpatterns += [
url(r'^400/$', default_views.bad_request, kwargs={'exception': Exception("Bad Request!")}),
url(r'^403/$', default_views.permission_denied, kwargs={'exception': Exception("Permission Denied")}),
url(r'^404/$', default_views.page_not_found, kwargs={'exception': Exception("Page not Found")}),
url(r'^500/$', default_views.server_error),
]
|
sujitmhj/document_similarity_based_on_bloom_filter
|
config/urls.py
|
Python
|
mit
| 1,590
|
[
"VisIt"
] |
29b514822874b39b5759b1203a0b04e8f26b516270f4bfbdbe0d13262b26cb72
|
from .base import SOLAR, EARTH, WATER, FIRE, AIR, LUNAR, Spell
from .. import effects
from .. import targetarea
from .. import enchantments
from .. import animobs
from .. import stats
from .. import invocations
# CIRCLE 1
BLESSING = Spell( "Blessing",
"Increases the physical and magic attack scores of all allies within 6 tiles by +5%. This effect lasts until the end of combat.",
effects.TargetIsAlly( on_true = (
effects.Enchant( enchantments.BlessingEn, anim=animobs.YellowSparkle )
,) ), rank=1, gems={SOLAR:1}, com_tar=targetarea.SelfCentered(),
ai_tar=invocations.TargetAllyWithoutEnchantment(enchantments.BlessingEn), mpfudge=-1 )
MINOR_CURE = Spell( "Minor Cure",
"This spell will heal one nearby ally for 1-10 damage.",
effects.HealthRestore( dice=(1,10,0) ),
rank=1, gems={SOLAR:1}, com_tar=targetarea.SingleTarget(reach=1), ai_tar=invocations.TargetWoundedAlly(),
exp_tar=targetarea.SinglePartyMember() )
# CIRCLE 2
MODERATE_CURE = Spell( "Moderate Cure",
"This spell will heal one nearby ally for 6-20 damage.",
effects.HealthRestore( dice=(2,8,4) ),
rank=2, gems={SOLAR:2}, com_tar=targetarea.SingleTarget(reach=1), ai_tar=invocations.TargetWoundedAlly(),
exp_tar=targetarea.SinglePartyMember(), mpfudge = -2 )
# CIRCLE 3
SUNRAY = Spell( "Sunray",
"This attack does 3d6 solar damage when it hits.",
effects.OpposedRoll( att_modifier=10, on_success = (
effects.HealthDamage( (3,6,0), stat_bonus=stats.INTELLIGENCE, element=stats.RESIST_SOLAR, anim=animobs.YellowExplosion )
,), on_failure = (
effects.NoEffect( anim=animobs.SmallBoom )
,) ), rank=3, gems={SOLAR:2}, com_tar=targetarea.SingleTarget(), shot_anim=animobs.YellowVortex, ai_tar=invocations.TargetEnemy() )
CURE_POISON = Spell( "Cure Poison",
"This spell will remove all poisoning from a single party member.",
effects.TidyEnchantments( enchantments.POISON, anim=animobs.YellowSparkle ),
rank=3, gems={SOLAR:1}, com_tar=targetarea.SingleTarget(reach=1),
exp_tar=targetarea.SinglePartyMember(), mpfudge = -1 )
# CIRCLE 4
MAJOR_CURE = Spell( "Major Cure",
"This spell will heal one nearby ally for 15-36 damage.",
effects.HealthRestore( dice=(3,8,12) ),
rank=4, gems={SOLAR:2}, com_tar=targetarea.SingleTarget(reach=1), ai_tar=invocations.TargetWoundedAlly(),
exp_tar=targetarea.SinglePartyMember(), mpfudge = -4 )
RESTORATION = Spell( "Restoration",
"This spell heals all stat damage that has been done to a single ally.",
effects.StatRestore( anim=animobs.HealthUp ),
rank=4, gems={SOLAR:2}, com_tar=targetarea.SingleTarget(reach=1),
exp_tar=targetarea.SinglePartyMember() )
# CIRCLE 5
MASS_CURE = Spell( "Mass Cure",
"This spell will heal all allies within 3 tiles for 4-40 damage.",
effects.TargetIsAlly( on_true = (
effects.HealthRestore( dice=(4,10,0) )
,) ), rank=5, gems={SOLAR:3}, com_tar=targetarea.SelfCentered(radius=3), ai_tar=invocations.TargetWoundedAlly(),
exp_tar=targetarea.AllPartyMembers() )
REMOVE_CURSE = Spell( "Remove Curse",
"This spell will dispel all harmful magical effects from a single party member.",
effects.TidyEnchantments( enchantments.CURSE, anim=animobs.YellowSparkle ),
rank=5, gems={SOLAR:2}, com_tar=targetarea.SingleTarget(reach=1),
exp_tar=targetarea.SinglePartyMember(), mpfudge = -5 )
# CIRCLE 6
MAXIMUM_CURE = Spell( "Maximum Cure",
"This spell will heal one nearby ally for 20-120 damage.",
effects.HealthRestore( dice=(20,6,0) ),
rank=6, gems={SOLAR:3}, com_tar=targetarea.SingleTarget(reach=1), ai_tar=invocations.TargetWoundedAlly(),
exp_tar=targetarea.SinglePartyMember() )
# Death Ward
# CIRCLE 7
SUNBURST = Spell( "Sunburst",
"Conjures an intense ball of light which does 5d10 solar damage to all enemies in a 3 tile radius.",
effects.TargetIsEnemy( on_true=(
effects.OpposedRoll( on_success = (
effects.HealthDamage( (5,10,0), stat_bonus=stats.INTELLIGENCE, element=stats.RESIST_SOLAR, anim=animobs.YellowExplosion )
,), on_failure = (
effects.HealthDamage( (2,12,0), stat_bonus=None, element=stats.RESIST_SOLAR, anim=animobs.YellowExplosion ), )
),
), on_false = (effects.NoEffect( anim=animobs.YellowExplosion ),)
), rank=7, gems={SOLAR:2}, com_tar=targetarea.Blast(radius=3,delay_from=1), shot_anim=animobs.GoldStone,
ai_tar=invocations.TargetEnemy(min_distance=4) )
RENEWAL = Spell( "Renewal",
"This spell will heal one nearby ally for 50-120 damage, retore drained stats, remove curses and cure poisoning.",
effects.TargetIsAlly( on_true = (
effects.HealthRestore( dice=(10,8,40) ),
effects.TidyEnchantments( enchantments.CURSE, anim=None ),
effects.TidyEnchantments( enchantments.POISON, anim=None ),
effects.StatRestore( anim=None ),
)),
rank=7, gems={SOLAR:4}, com_tar=targetarea.SingleTarget(reach=1), ai_tar=invocations.TargetWoundedAlly(),
exp_tar=targetarea.SinglePartyMember() )
# CIRCLE 8
# Resurrection
# CIRCLE 9
MIRACLE_CURE = Spell( "Miracle Cure",
"This spell will heal all allies within 10 tiles for 20-120 damage.",
effects.TargetIsAlly( on_true = (
effects.HealthRestore( dice=(20,6,0) )
,) ), rank=9, gems={SOLAR:4}, com_tar=targetarea.SelfCentered(radius=10), ai_tar=invocations.TargetWoundedAlly(),
exp_tar=targetarea.AllPartyMembers(), mpfudge = 25 )
|
jwvhewitt/dmeternal
|
old_game/spells/solarspells.py
|
Python
|
gpl-2.0
| 5,480
|
[
"BLAST"
] |
cd357e37daeff20b7d41d4fbd6137dd9dbe73c8ab0bef37abc4082aea12fc981
|
# Jackie's Map
# CMDR Jackie Silver, DISC
# Kay Johnston 2017 / 3303
# Thanks are due to everyone who's collected data for the various source lists, and particularly to edsm.
version = '3r2'
# Standard Python imports. Might need to change PIL to pillow on some versions of Python?
from tkinter import *
import PIL
from PIL import ImageTk, Image, ImageDraw, ImageFont
import math
# From Alot's excellent edts suite.
import pgnames
# Wants, needs, options:
############
# Sort out mousewheel zoom bindings on Linux. Should be <Button-4> and <Button-5> but ought to rework the whole event handler doodah.
# Maybe something to show the sphere of (presumed) Thargoid hyperspace interdictions. Do we even know its extent?
# Does the hyperdiction sphere intersect with the UA sphere?
# UPs supposed to be at "Ammonia planets in the Pleiades sector" and a number of convoys near Sol. I've added known examples, but can't verify yet.
# Add in proc-gen nebulae - at least the major ones like Colonia. Serious effort needed to run all them down though. Can I get this from edsm?
# Can we increase speed by drawing only those objects which are inside the canvas area?
# Add some kind of scale? And maybe a compass pointer towards the Core? Can lump this into Misc.
# Add rough indicator for hyperdiction sphere extent.
# Check Imperial Bubble extent and centrepoint. Perhaps add something to show Agri (terraformed ELW) and High Tech radii.
# Add category indicators for tourist POI.
# An "approximate density" function; would need to plug in the spiral approximation for the galaxy's shape and do a bunch of other stuff.
# Continue to update the various data files.
# Some better way of displaying the details when there are many POI in the same system. Scrolling display box?
# Need to read shipyard-present status for POIs and add an indicator for that (possibly, red circle outline?)
# Want to separate out drawing of asteroid bases, megaships, alien stuff?
# Player factions list should include the new Colonia factions.
# Other
# I should probably use setattr, and I could certainly lump all the different classes together into one uberclass.
# Could farm the cross-drawing bits out to separate method like the hats.
# Changelog
############
# 3i changes:
# Added scaling buttons, sector name check with Alot's edts, display of galmap style coordinates.
# Added display of body and latitude + longitude for POI.
# 3j changes:
# Added permit-locked HA stars to POI list and appropriate display. Added distance display. Added rare goods. Minor tweaks.
# Added some known UP locations. Corrected error with NGC 752 / IC 1848.
# 3k changes: (released version)
# Added pulsars. Updated tourist file. Changed mouseover to take account of scaling and only return objects which are drawn.
# Moved display of rare goods distance and tonnage into mouseover. Removed redundant indicator text and associated button.
# 3l changes:
# Added player factions. Updated tourist file, POI file.
# 3m changes:
# General tidying up, some UI changes. Updated data. Added toggle for Reorte-Riedquat line. Finished first pass of checking player factions.
# Changed player factions sheet to include a validity option, so details for some are loaded but not used.
# Reworked to show central nebulae and stars of ha sectors; the list of central stars is incomplete, and needs work.
# 3n changes:
# Moved RR line and UA sphere into a single Misc category. Added rough indicator of the Bubble's size into the same category.
# Added PRE logistics and Colonia stations to POIs. Added possible boundary lines for Guardian sites towards Regor. Added EAFOTS box to misc.
# Added distance indicators for tourist destinations. Improved drawing of sector fills.
# 3o changes: (released version)
# Adding non-sector HA star clusters gleaned from edsm (as the galmap search is a little strange);
# Changed handling of sectors from original list which don't exist as sectors. Introduced category for sectors which are nominally clusters but aren't.
# Added option to display the full list of individual stars known to edsm. Very interesting to see.
# 3p changes:
# Added search and highlight/filter functions. Added suppression corridor boundaries to misc toggle. Enabled PG sector name finding in search.
# Enabled filtering by sector including PG sectors. Added output .csv of filtered stars. Draws PG sector boundaries if searched.
# ...and disabled suppression corridor boundaries again.
# 3q / 3q2 changes:
# Improved name matching on HA sector filtering. Cleaned up line drawing stuff a bit. Added many Grauniad sites to POI list. Updated tourist sites.
# Updated edsm star list. Added new Colonia systems to POI list.
# 3r changes:
# Added asteroid bases, megaships into POI list.
# 3r2 changes:
# Added barnacles and many more POI. Removed EAFOTS box (obsolete post-Zurara), Guardians lines (toggling POI gives a good enough idea.)
# Removed Bright Star progress line. Left RR line in for nostalgia purposes. Added shipyard data for asteroid bases and landmarks (incomplete.)
# That still needs work, it just draws them in a slightly different colour; should be more obvious.
class App():
def __init__(self,master):
# Create a frame for the controls.
self.control_frame = Frame(master)
self.control_frame.pack()
# Defaults for offsets and scaling.
self.x_offset = 0
self.y_offset = 0
self.z_offset = 0
self.scaling = 2
# Search defaults.
self.search_x = 0
self.search_y = 0
self.search_performed = False
self.search_target = ''
self.highlight_target = ''
self.search_is_pg_sector = False
self.search_is_pg_x = 0
self.search_is_pg_y = 0
self.search_is_pg_z = 0 # Won't be needed, I suppose.
# Filtering default.
self.deferred = [] # Holds all stars that match the filter *and* match filtering by sector.
self.deferred_alpha = [] # Holds all stars that match the filter. (This is the first pass done, hence alpha. Go with it.)
# Create entry boxes for the controls.
self.x_co_box = Entry_Box(self.control_frame,'X:',str(self.x_offset),2,9)
self.y_co_box = Entry_Box(self.control_frame,'Y:',str(self.y_offset),2,9)
self.z_co_box = Entry_Box(self.control_frame,'Z:',str(self.z_offset),2,9)
self.scaling_box = Entry_Box(self.control_frame,'Scaling:',str(self.scaling),7,5)
# Bind the control entry boxes to the automatic update.
self.x_co_box.entry.bind('<Return>', self.auto_calculate)
self.y_co_box.entry.bind('<Return>', self.auto_calculate)
self.z_co_box.entry.bind('<Return>', self.auto_calculate)
self.scaling_box.entry.bind('<Return>', self.auto_calculate)
# Create a "save png" button.
self.save_button = Button(self.control_frame, text = 'Output', command = self.save, padx = 1)
self.save_button.pack(side = LEFT)
# Create buttons for moving z levels.
self.z_up_button = Button(self.control_frame, text = 'Z+', command = self.z_up, padx = 1)
self.z_up_button.pack(side = LEFT)
self.z_down_button = Button(self.control_frame, text = 'Z-', command = self.z_down, padx = 1)
self.z_down_button.pack(side = LEFT)
# Create buttons for changing scaling.
self.s_up_button = Button(self.control_frame, text = 'Zm Out', command = self.s_up, padx = 1)
self.s_up_button.pack(side = LEFT)
self.s_down_button = Button(self.control_frame, text = 'Zm In', command = self.s_down, padx = 1)
self.s_down_button.pack(side = LEFT)
# Create a frame to hold toggle buttons.
self.toggle_frame = Frame(master)
self.toggle_frame.pack()
# Create toggle buttons.
self.draw_crosses = IntVar()
self.draw_crosses.set(0)
self.toggle_crosses = Checkbutton(self.toggle_frame, text = 'Crosses', variable = self.draw_crosses, command = self.update_image)
self.toggle_crosses.pack(side = LEFT)
self.draw_fills = IntVar()
self.toggle_fills = Checkbutton(self.toggle_frame, text = 'Fills', variable = self.draw_fills, command = self.update_image)
self.toggle_fills.pack(side = LEFT)
self.draw_names = IntVar()
self.draw_names.set(1)
self.toggle_names = Checkbutton(self.toggle_frame, text = 'Names', variable = self.draw_names, command = self.update_image)
self.toggle_names.pack(side = LEFT)
self.draw_indicators = IntVar()
self.draw_indicators.set(1)
self.toggle_indicators = Checkbutton(self.toggle_frame, text = 'Indics', variable = self.draw_indicators, command = self.update_image)
self.toggle_indicators.pack(side = LEFT)
self.draw_poi = IntVar()
self.draw_poi.set(0)
self.toggle_poi = Checkbutton(self.toggle_frame, text = 'POI', variable = self.draw_poi, command = self.update_image)
self.toggle_poi.pack(side = LEFT)
self.draw_tourist = IntVar()
self.draw_tourist.set(0)
self.toggle_tourist = Checkbutton(self.toggle_frame, text = 'Tourist', variable = self.draw_tourist, command = self.update_image)
self.toggle_tourist.pack(side = LEFT)
self.draw_rares = IntVar()
self.draw_rares.set(0)
self.toggle_rares = Checkbutton(self.toggle_frame, text = 'Rares', variable = self.draw_rares, command = self.update_image)
self.toggle_rares.pack(side = LEFT)
self.draw_pulsars = IntVar()
self.draw_pulsars.set(0)
self.toggle_pulsars = Checkbutton(self.toggle_frame, text = 'PSR', variable = self.draw_pulsars, command = self.update_image)
self.toggle_pulsars.pack(side = LEFT)
self.draw_player = IntVar()
self.draw_player.set(0)
self.toggle_players = Checkbutton(self.toggle_frame, text = 'Plyr', variable = self.draw_player, command = self.update_image)
self.toggle_players.pack(side = LEFT)
self.draw_misc = IntVar()
self.draw_misc.set(0)
self.toggle_misc = Checkbutton(self.toggle_frame, text = 'Misc', variable = self.draw_misc, command = self.update_image)
self.toggle_misc.pack(side = LEFT)
self.draw_findiv = IntVar()
self.draw_findiv.set(0)
self.toggle_findiv = Checkbutton(self.toggle_frame, text = 'F!', variable = self.draw_findiv, command = self.update_image)
self.toggle_findiv.pack(side = LEFT)
# Create a frame to hold search and highlight controls.
self.search_frame = Frame(master)
self.search_frame.pack()
# Create highlight - well, filter - and search boxes.
self.highlight_box = Entry_Box(self.search_frame,'Filter','',6,10)
self.filter_by_box = Entry_Box(self.search_frame,'by Sector','',8,10)
self.search_box = Entry_Box(self.search_frame,'Search','',6,10)
# Create a label to show the search result.
self.search_result = StringVar()
self.search_result.set('')
self.search_result_label = Label(self.search_frame, textvariable = self.search_result,width = 32)
self.search_result_label.pack()
# Bind highlight and search to update functions.
self.highlight_box.entry.bind('<Return>', self.auto_calculate)
self.filter_by_box.entry.bind('<Return>', self.auto_calculate)
self.search_box.entry.bind('<Return>', self.update_search_target)
# Create a frame to display data.
self.data_frame = Frame(master)
self.data_frame.pack()
# Create a label for mouse coordinates.
self.data_mouse = StringVar()
mousetext = 'X: --- ly, Y: --- ly, Z: --- ly.'
self.data_mouse.set(mousetext)
self.data_mouse_label = Label(self.data_frame, textvariable = self.data_mouse)
self.data_mouse_label.pack()
# Create a frame to display current sectors.
self.current_sector_frame = Frame(master)
self.current_sector_frame.pack()
# Create a label to display current sectors.
self.current_sectors = StringVar()
self.current_sectors.set('')
self.current_sectors_label = Label(self.current_sector_frame, textvariable = self.current_sectors, width = 82)
self.current_sectors_label.pack()
# Create a frame to display current tourist destinations. (Holds current POI as well to save UI space.)
self.current_tourist_frame = Frame(master)
self.current_tourist_frame.pack()
# Create a label to display current tourist destinations.
self.current_tourists = StringVar()
self.current_tourists.set('')
self.current_tourists_label = Label(self.current_tourist_frame, textvariable = self.current_tourists, width = 82)
self.current_tourists_label.pack()
# Create a frame to show the map.
self.map_frame = Frame(master)
self.map_frame.pack()
# Load in a font.
self.fnt = ImageFont.truetype('Quicksand-Regular.otf', FONTSIZE)
# Create a canvas to show the map image.
self.map_canvas = Canvas(self.map_frame, width = XDIM, height = YDIM)
self.map_canvas.pack()
self.map_canvas_mx = 0
self.map_canvas_my = 0
# Bind mouse actions to the canvas.
self.map_canvas.bind('<Motion>',self.motion)
self.map_canvas.bind('<Button-1>',self.click)
self.map_canvas.bind_all('<MouseWheel>',self.mousewheel_zoom)
# Once everything else is done, call a function to update the display.
self.update_image()
def motion(self,event):
self.map_canvas_mx, self.map_canvas_my = event.x, event.y
# Arcane maneouvres to convert mouse position to map position.
mx_min = self.x_offset - (XDIM / 2 * self.scaling)
my_max = self.y_offset + (YDIM / 2 * self.scaling)
mx_calc = mx_min + (self.map_canvas_mx * self.scaling)
my_calc = my_max - (self.map_canvas_my * self.scaling)
mx_calc = round(mx_calc,1)
my_calc = round(my_calc,1)
# Display the calculated position.
mousetext = 'X: ' + str(mx_calc) + ' ly, Y: ' + str(my_calc)
mousetext += ' ly, Z: ' + str(self.z_offset) + ' ly.'
mousetext += ' (Galmap: ' + str(mx_calc) + ', ' + str(self.z_offset) + ', ' + str(my_calc) + ' )'
# Calculate distance from Sol for the display.
d_from_sol = ((mx_calc ** 2) + (my_calc ** 2) + (self.z_offset ** 2)) ** 0.5
if d_from_sol < 1000:
d_text = str(int(d_from_sol)) + ' ly from Sol.'
else:
d_text = str(round(d_from_sol / 1000,1)) + ' Kylies from Sol.'
mousetext += ' ' + d_text
self.data_mouse.set(mousetext)
# Clear search box.
self.search_result.set('')
# Reworked section; find the single primary ha sector at the current position.
current = single_member_of(mx_calc, my_calc, self.z_offset)
# Use edts to get the sector name at the current position.
vector_alot = pgnames.vector3.Vector3(mx_calc, self.z_offset, my_calc)
# If the coordinates are too far out this can start to return odd values or fail, hence try-except.
try:
sector_alot = pgnames.get_sector_name(vector_alot) # as (x,z,y)
sector_alot = str(sector_alot).upper()
except:
sector_alot = ''
# We have a list of known ha sectors. If edts would give an ha sector, ignore it.
# Ideally I'd like one proc-gen name and all HA names, sorted in order.
if sector_alot not in known_ha_secs:
builttext = sector_alot
else:
builttext = ''
builttext += current
# Clunky.
for sector in ha_sec_list:
if sector.name == builttext:
if sector.a_nebula != '':
builttext += ' - Nebula: ' + sector.a_nebula
if sector.a_star != '':
builttext += ' - Search: ' + sector.a_star
self.current_sectors.set(builttext)
# Work out which tourist POI are at the current position. (2d only)
d_lr = self.draw_poi.get()
d_pr = self.draw_pulsars.get()
d_ra = self.draw_rares.get()
d_to = self.draw_tourist.get()
d_pf = self.draw_player.get()
d_fi = self.draw_findiv.get()
ht = self.highlight_target
current = current_tourist(mx_calc, my_calc, self.scaling, d_lr, d_pr, d_ra, d_to, d_pf, d_fi,ht,self.deferred)
# For goodness sake move this inside the class!
builttext = ''
for destination in current:
if destination != '':
builttext += destination
builttext += ', '
builttext = builttext.rstrip(', ')
self.current_tourists.set(builttext[:110])
def mousewheel_zoom(self,event):
# Check that this works under Linux (&Mac OS if possible)
# At the moment, this is zooming in or out by one level each time.
# Could change it to take account of the full delta given.
if event.delta > 0:
self.scaling = self.scaling / ZOOMSPEED
else:
self.scaling = self.scaling * ZOOMSPEED
# Update the scaling box to show the new value.
self.scaling_box.entry.delete(0,END)
self.scaling_box.entry.insert(0,self.scaling)
self.update_image()
# Move down z levels when the button is pressed.
def z_down(self):
self.z_offset -= Z_MOVE_RATE
self.z_co_box.entry.delete(0,END)
self.z_co_box.entry.insert(0,self.z_offset)
self.update_image()
# Move up z levels when the button is pressed.
def z_up(self):
self.z_offset += Z_MOVE_RATE
self.z_co_box.entry.delete(0,END)
self.z_co_box.entry.insert(0,self.z_offset)
self.update_image()
# Increase scaling factor (zoom out) when the button is pressed.
def s_up(self):
self.scaling *= S_MOVE_RATE
# Update the scaling box to show the new value.
self.scaling_box.entry.delete(0,END)
self.scaling_box.entry.insert(0,self.scaling)
self.update_image()
# Decrease scaling factor (zoom in) when the button is pressed.
def s_down(self):
self.scaling /= S_MOVE_RATE
# Update the scaling box to show the new value.
self.scaling_box.entry.delete(0,END)
self.scaling_box.entry.insert(0,self.scaling)
self.update_image()
def click(self,event):
self.map_canvas_mx, self.map_canvas_my = event.x, event.y
# Arcane maneouvres to convert mouse position to map position.
mx_min = self.x_offset - (XDIM / 2 * self.scaling)
my_max = self.y_offset + (YDIM / 2 * self.scaling)
mx_calc = mx_min + (self.map_canvas_mx * self.scaling)
my_calc = my_max - (self.map_canvas_my * self.scaling)
# In this case, we are moving to the new position.
# So I'm rounding to 1 dp in the interests of common sense.
mx_calc = round(mx_calc,1)
my_calc = round(my_calc,1)
self.x_offset = mx_calc
self.y_offset = my_calc
self.x_co_box.entry.delete(0,END)
self.x_co_box.entry.insert(0,mx_calc)
self.y_co_box.entry.delete(0,END)
self.y_co_box.entry.insert(0,my_calc)
mousetext = 'X: ' + str(mx_calc) + ' ly, Y: ' + str(my_calc)
mousetext += ' ly, Z: ' + str(self.z_offset) + ' ly.'
self.data_mouse.set(mousetext)
self.update_image()
def update_search_target(self,A):
self.search_is_pg_sector = False
self.search_is_pg_x = 0
self.search_is_pg_y = 0
self.search_is_pg_z = 0
self.search_target = str(self.search_box.entry.get())
stu = self.search_target.upper()
found_rough = False
found_exact = False
rx = 0
ry = 0
rz = 0
rn = ''
ex = 0
ey = 0
ez = 0
en = ''
cx = 0
cy = 0
cz = 0
# A list of lists - we will search through each of these in turn looking for a match.
search_lists = [[findiv_list,'Sys'],[pulsar_list,'Psr'],[tourist_list,'Trst'],[player_list,'Plyr'],[rares_list,'RG'],[poi_list,'POI'],[ha_sec_list,'Sct']]
for sl in search_lists:
# Search through this list.
for f in sl[0]:
if stu == f.name.upper():
ex = f.x
ey = f.y
ez = f.z
en = 'Found: ' + f.name + ' (' + sl[1] + ')'
found_exact = True
elif stu in f.name.upper():
rx = f.x
ry = f.y
rz = f.z
rn = 'Try: ' + f.name + ' (' + sl[1] + ')'
found_rough = True
# If we have an exact match, update the entry boxes.
if found_exact == True:
self.x_co_box.entry.delete(0,END)
self.x_co_box.entry.insert(0,ex)
self.y_co_box.entry.delete(0,END)
self.y_co_box.entry.insert(0,ey)
self.z_co_box.entry.delete(0,END)
self.z_co_box.entry.insert(0,ez)
cx = ex
cy = ey
cz = ez
self.search_result.set(en)
self.search_x = cx
self.search_y = cy
self.search_performed = True
elif found_rough == True:
self.x_co_box.entry.delete(0,END)
self.x_co_box.entry.insert(0,rx)
self.y_co_box.entry.delete(0,END)
self.y_co_box.entry.insert(0,ry)
self.z_co_box.entry.delete(0,END)
self.z_co_box.entry.insert(0,rz)
self.search_result.set(rn)
self.search_x = rx
self.search_y = ry
self.search_performed = True
cx = rx
cy = ry
cz = rz
else:
# Might want to move this to an earlier point, so that rough matches in other names don't take precedence.
try:
pg_sector = pgnames.get_sector(stu,False)
# Offsets as the pg sectors ain't centred on Sol.
wx = (pg_sector.x * 1280) - 65
wy = (pg_sector.z * 1280) - 1065
wz = (pg_sector.y * 1280) - 25
wx += 640
wy += 640
wz += 640
self.x_co_box.entry.delete(0,END)
self.x_co_box.entry.insert(0,wx)
self.y_co_box.entry.delete(0,END)
self.y_co_box.entry.insert(0,wy)
self.z_co_box.entry.delete(0,END)
self.z_co_box.entry.insert(0,wz)
self.search_result.set('Found: ' + pg_sector.name + ' (PG)')
self.search_x = wx
self.search_y = wy
self.search_performed = True
self.search_is_pg_sector = True
self.search_is_pg_x = wx - 640
self.search_is_pg_y = wy + 640
self.search_is_pg_z = wz - 640
cx = wx
cy = wy
cz = wz
except:
self.search_result.set('No match found.')
cx = round(cx,1)
cy = round(cy,1)
cz = round(cz,1)
# Clunky bit, as we need to update the position shown to reflect the new coordinates.
# Display the calculated position.
mousetext = 'X: ' + str(cx) + ' ly, Y: ' + str(cy)
mousetext += ' ly, Z: ' + str(cz) + ' ly.'
mousetext += ' (Galmap: ' + str(cx) + ', ' + str(cz) + ', ' + str(cy) + ' )'
# Calculate distance from Sol for the display.
d_from_sol = ((cx ** 2) + (cy ** 2) + (cz ** 2)) ** 0.5
if d_from_sol < 1000:
d_text = str(int(d_from_sol)) + ' ly from Sol.'
else:
d_text = str(round(d_from_sol / 1000,1)) + ' Kylies from Sol.'
mousetext += ' ' + d_text
self.data_mouse.set(mousetext)
self.auto_calculate(A)
def auto_calculate(self,A):
self.x_offset = float(self.x_co_box.entry.get())
self.y_offset = float(self.y_co_box.entry.get())
self.z_offset = float(self.z_co_box.entry.get())
self.scaling = float(self.scaling_box.entry.get())
self.search_target = str(self.search_box.entry.get()) # Redundant now?
self.highlight_target = str(self.highlight_box.entry.get())
self.filter_by_target = str(self.filter_by_box.entry.get())
# Check to see which stars fall within the highlight and filtering parameters.
dp = self.draw_pulsars.get() # Moved here for speed.
self.deferred_alpha = []
self.deferred = []
# First we refine the list to only those stars whose name fits the filter.
for f in findiv_list:
if self.highlight_target != '':
if self.highlight_target.upper() in f.name.upper():
self.deferred_alpha.append(f)
elif self.highlight_target == '*':
self.deferred_alpha.append(f)
# Look to see if we have a proc-gen sector.
found_pg = False
wname = ''
try:
pg_sector = pgnames.get_sector(self.filter_by_target,False)
# Offsets as the pg sectors ain't centred on Sol.
wname = pg_sector.name
# Gets the south-west-down corner.
wx_swd = (pg_sector.x * 1280) - 65
wy_swd = (pg_sector.z * 1280) - 1065
wz_swd = (pg_sector.y * 1280) - 25
found_pg = True
# Get the north-east-up corner. Or possible NEU!
wx_neu = wx_swd + 1280
wy_neu = wy_swd + 1280
wz_neu = wz_swd + 1280
except:
found_pg = False
if found_pg == False:
# Now refine by sector. This only checks through HA sectors.
for d in self.deferred_alpha:
if self.filter_by_target != '':
d_is_in = single_member_of(d.x,d.y,d.z)
if self.filter_by_target.upper() in d_is_in.upper():
self.deferred.append(d)
else:
self.deferred.append(d)
else:
# This checks if we are in the boundaries of the given PG sector.
# Need to make sure that the stars are not in an HA sector instead.
for d in self.deferred_alpha:
if d.x >= wx_swd and d.x <= wx_neu:
if d.y >= wy_swd and d.y <= wy_neu:
if d.z >= wz_swd and d.z <= wz_neu:
# Use edts to get the sector name at the current position.
vector_alot = pgnames.vector3.Vector3(d.x, d.z, d.y)
# If the coordinates are too far out this can start to return odd values or fail, hence try-except.
try:
sector_alot = pgnames.get_sector_name(vector_alot) # as (x,z,y)
except:
sector_alot = ''
if sector_alot.upper() == wname.upper():
self.deferred.append(d)
if self.highlight_target != '':
self.draw_findiv.set(1)
self.update_image()
def update_image(self):
# Create a new image in PIL.
self.pil_image = Image.new('RGBA',(XDIM,YDIM),'white')
self.draw = ImageDraw.Draw(self.pil_image)
# Use galmap image as background? - could do, but confusing tbh. Make a toggle?
# Want to add axis lines for x or y = 0
x_axis = self.x_offset / self.scaling
y_axis = self.y_offset / self.scaling
self.draw.line(((XDIM/2 - x_axis,0),(XDIM/2 - x_axis,YDIM)), fill = 'gray', width = 1)
self.draw.line(((0,YDIM/2 + y_axis),(XDIM,YDIM/2 + y_axis)), fill = 'gray', width = 1)
# Want to draw the UA sphere.
if self.draw_misc.get() == 1:
cp_x = -78.6 - self.x_offset
cp_y = -340.5 - self.y_offset
adj_x = XDIM/2 + (cp_x / self.scaling)
adj_y = YDIM/2 - (cp_y / self.scaling)
# Need to get "r on this z level"; draw inner boundary at 130 ly (?) - needs rechecking
r_z = radius_on_plane(-149.6,130,self.z_offset)
if r_z > 0:
adj_r = r_z / self.scaling
self.draw.ellipse(((adj_x-adj_r,adj_y-adj_r),(adj_x+adj_r,adj_y+adj_r)), outline = (255,0,255,255))
# Need to get "r on this z level"; draw outer boundary at 150 ly (?) - needs rechecking
r_z = radius_on_plane(-149,150,self.z_offset)
if r_z > 0:
adj_r = r_z / self.scaling
self.draw.ellipse(((adj_x-adj_r,adj_y-adj_r),(adj_x+adj_r,adj_y+adj_r)), outline = (255,0,255,255))
# Want to draw the Bubble extent.
if self.draw_misc.get() == 1:
cp_x = 0 - self.x_offset
cp_y = 0 - self.y_offset
adj_x = XDIM/2 + (cp_x / self.scaling)
adj_y = YDIM/2 - (cp_y / self.scaling)
r_z = radius_on_plane(0,200,self.z_offset)
if r_z > 0:
adj_r = r_z / self.scaling
self.draw.ellipse(((adj_x-adj_r,adj_y-adj_r),(adj_x+adj_r,adj_y+adj_r)), outline = (0,0,255,255))
# And let's add one around Achenar, see if that works...
cp_x = 67.5 - self.x_offset
cp_y = 24.8 - self.y_offset
adj_x = XDIM/2 + (cp_x / self.scaling)
adj_y = YDIM/2 - (cp_y / self.scaling)
r_z = radius_on_plane(-119.5,100,self.z_offset)
if r_z > 0:
adj_r = r_z / self.scaling
self.draw.ellipse(((adj_x-adj_r,adj_y-adj_r),(adj_x+adj_r,adj_y+adj_r)), outline = (0,0,255,255))
# And a little one for Colonia. If I need many more of these, should do them with a list.
cp_x = -9530.5 - self.x_offset
cp_y = 19808.1 - self.y_offset
adj_x = XDIM/2 + (cp_x / self.scaling)
adj_y = YDIM/2 - (cp_y / self.scaling)
r_z = radius_on_plane(-910.3,40,self.z_offset)
if r_z > 0:
adj_r = r_z / self.scaling
self.draw.ellipse(((adj_x-adj_r,adj_y-adj_r),(adj_x+adj_r,adj_y+adj_r)), outline = (0,0,255,255))
# Draw the Reorte-Riedquat line.
if self.draw_misc.get() == 1:
# Riedquat (68.84375, 69.75, 48.75)
# Reorte (75.75, 75.15625, 48.75)
# Get the midpoint between Reorte and Riedquat.
midpoint_x = 72.296875
midpoint_y = 72.453125
# Get the slope of the line between Reorte and Riedquat.
x_diff = 75.75 - 68.84375
y_diff = 75.15625 - 69.75
line_start_x = midpoint_x - (RR_LENGTH * x_diff)
line_start_y = midpoint_y - (RR_LENGTH * y_diff)
line_end_x = midpoint_x + (RR_LENGTH * x_diff)
line_end_y = midpoint_y + (RR_LENGTH * y_diff)
line_start_x -= self.x_offset
line_start_y -= self.y_offset
line_end_x -= self.x_offset
line_end_y -= self.y_offset
ri_x = line_start_x
ri_y = line_start_y
re_x = line_end_x
re_y = line_end_y
adj_ls_x = XDIM/2 + (line_start_x / self.scaling)
adj_ls_y = YDIM/2 - (line_start_y / self.scaling)
adj_le_x = XDIM/2 + (line_end_x / self.scaling)
adj_le_y = YDIM/2 - (line_end_y / self.scaling)
self.draw.line(((adj_ls_x,adj_ls_y),(adj_le_x,adj_le_y)), fill = (0,0,255,255))
## # Draw (possible!) Guardians lines to Regor.
## if self.draw_misc.get() == 1:
## # Regor north about (1100,-30,-150), Regor south about (1100,-150,-150)
## self.doline(290,-7.9,1100,-30,(255,0,255,255))
## self.doline(290,-62.2,1100,-236,(255,0,255,255))
## # Draw current progress of Bright Star survey project.
## if self.draw_misc.get() == 1:
## self.doline(0,0,-8000,10000,(255,0,0,255))
# Draw Suppression corridor boundaries. (~x,y +/- 1100 ly Sol relative) Possibly add "Neutron field" rough extent markers?
# Disabled for the moment - need a better grasp on the shape.
## if self.draw_misc.get() == 1:
## x_axis_l = -380 - self.x_offset # This narrow boundary is roughly the distance from Sadge you need to go to see stellar remnants.
## x_axis_r = 410 - self.x_offset
#### y_axis_l = -1100 + self.y_offset
#### y_axis_r = 1100 + self.y_offset
##
## adj_x_l = XDIM/2 + (x_axis_l / self.scaling)
## adj_x_r = XDIM/2 + (x_axis_r / self.scaling)
#### adj_y_l = YDIM/2 + (y_axis_l / self.scaling)
#### adj_y_r = YDIM/2 + (y_axis_r / self.scaling)
##
## self.draw.line(((adj_x_l,0),(adj_x_l,YDIM)), fill = 'gray', width = 1)
## self.draw.line(((adj_x_r,0),(adj_x_r,YDIM)), fill = 'gray', width = 1)
##
#### self.draw.line(((0,adj_y_l),(XDIM,adj_y_l)), fill = 'gray', width = 1)
#### self.draw.line(((0,adj_y_r),(XDIM,adj_y_r)), fill = 'gray', width = 1)
## # Draw EAFOTS box.
## if self.draw_misc.get() == 1:
## # Southwest (-6466,-6186), northeast (-5186,-4906)
## ne_x = -5186 - self.x_offset
## ne_y = -4906 - self.y_offset
##
## sl = 1280
##
## sw_x = ne_x - sl
## sw_y = ne_y - sl
##
## adj_ne_x = XDIM/2 + (ne_x / self.scaling)
## adj_ne_y = YDIM/2 - (ne_y / self.scaling)
##
## adj_sw_x = XDIM/2 + (sw_x / self.scaling)
## adj_sw_y = YDIM/2 - (sw_y / self.scaling)
##
## box = ((adj_ne_x,adj_ne_y), (adj_sw_x,adj_sw_y))
## self.draw.rectangle(box, outline = (255,0,255,255))
# Iterates through drawing known pulsars.
if self.draw_pulsars.get() == 1:
for psr in pulsar_list:
cp_x = psr.x - self.x_offset
cp_y = psr.y - self.y_offset
adj_x = XDIM/2 + (cp_x / self.scaling)
adj_y = YDIM/2 - (cp_y / self.scaling)
nametext = psr.name
if psr.status == 'Invisible':
psr_colour = (200,100,100,255)
elif psr.status == 'Permit-locked':
psr_colour = (255,0,0,255)
else:
psr_colour = (10,140,190,255)
star_colour = (160,160,160,255)
if abs(self.z_offset - psr.z) < PSR_Z_RANGE:
self.draw.ellipse(((adj_x-PSRSIZE,adj_y-PSRSIZE),(adj_x+PSRSIZE,adj_y+PSRSIZE)), fill = psr_colour)
self.draw.line(((adj_x - 2,adj_y - 2),(adj_x + 2,adj_y + 2)), fill = star_colour, width = 1)
self.draw.line(((adj_x - 2,adj_y + 2),(adj_x + 2,adj_y - 2)), fill = star_colour, width = 1)
self.draw.line(((adj_x,adj_y - 3),(adj_x,adj_y + 3)), fill = star_colour, width = 1)
self.draw.line(((adj_x - 3,adj_y),(adj_x + 3,adj_y)), fill = star_colour, width = 1)
if self.draw_names.get() == 1: # Could control this with a separate button.
self.draw.text((adj_x + FONTSIZE/2,adj_y - FONTSIZE/2),nametext,font = self.fnt,fill = psr_colour)
else:
self.draw.ellipse(((adj_x-PSRSIZE,adj_y-PSRSIZE),(adj_x+PSRSIZE,adj_y+PSRSIZE)), fill = psr_colour)
self.draw.line(((adj_x - 2,adj_y - 2),(adj_x + 2,adj_y + 2)), fill = star_colour, width = 1)
self.draw.line(((adj_x - 2,adj_y + 2),(adj_x + 2,adj_y - 2)), fill = star_colour, width = 1)
self.draw.line(((adj_x,adj_y - 3),(adj_x,adj_y + 3)), fill = star_colour, width = 1)
self.draw.line(((adj_x - 3,adj_y),(adj_x + 3,adj_y)), fill = star_colour, width = 1)
self.draw_hat(psr.z,adj_x,adj_y,psr_colour)
# Reworked bit for drawing filtered stars.
dp = self.draw_pulsars.get()
if self.draw_findiv.get() == 1:
if self.highlight_target != '':
for d in self.deferred:
cp_x = d.x - self.x_offset
cp_y = d.y - self.y_offset
adj_x = XDIM/2 + (cp_x / self.scaling)
adj_y = YDIM/2 - (cp_y / self.scaling)
fc = (0,200,0,255)
if 'PSR' in d.name:
if dp == 0:
self.draw.line(((adj_x,adj_y - CROSSSIZE),(adj_x,adj_y + CROSSSIZE)),fill = fc)
self.draw.line(((adj_x - CROSSSIZE,adj_y),(adj_x + CROSSSIZE,adj_y)),fill = fc)
else:
self.draw.line(((adj_x,adj_y - CROSSSIZE),(adj_x,adj_y + CROSSSIZE)),fill = fc)
self.draw.line(((adj_x - CROSSSIZE,adj_y),(adj_x + CROSSSIZE,adj_y)),fill = fc)
# If no filter is set, draw all stars from the full individual list.
else:
for f in findiv_list:
cp_x = f.x - self.x_offset
cp_y = f.y - self.y_offset
adj_x = XDIM/2 + (cp_x / self.scaling)
adj_y = YDIM/2 - (cp_y / self.scaling)
fc = (180,180,0,255)
if 'PSR' in f.name:
if dp == 0:
self.draw.line(((adj_x,adj_y - CROSSSIZE),(adj_x,adj_y + CROSSSIZE)),fill = fc)
self.draw.line(((adj_x - CROSSSIZE,adj_y),(adj_x + CROSSSIZE,adj_y)),fill = fc)
else:
self.draw.line(((adj_x,adj_y - CROSSSIZE),(adj_x,adj_y + CROSSSIZE)),fill = fc)
self.draw.line(((adj_x - CROSSSIZE,adj_y),(adj_x + CROSSSIZE,adj_y)),fill = fc)
# Iterates through drawing known POI.
if self.draw_poi.get() == 1:
for landmark in poi_list:
cp_x = landmark.x - self.x_offset
cp_y = landmark.y - self.y_offset
adj_x = XDIM/2 + (cp_x / self.scaling)
adj_y = YDIM/2 - (cp_y / self.scaling)
nametext = landmark.name
if landmark.poi_type == 'Powerplay':
lm_colour = (150,20,230,255)
elif landmark.poi_type == 'Landmark':
if landmark.shipyard == 'Shipyard.':
lm_colour = (100,50,220,255)
else:
lm_colour = (50,50,220,255)
elif landmark.poi_type == 'Alien' or landmark.poi_type == 'Fungal':
lm_colour = (190,20,180,255)
elif landmark.poi_type == 'Permit':
lm_colour = (255,0,0,255)
elif landmark.poi_type == 'Asteroid Base':
if landmark.shipyard == 'Shipyard.':
lm_colour = (200,50,0,255)
else:
lm_colour = (150,50,0,255)
elif landmark.poi_type == 'Megaship':
lm_colour = (0,200,0,255)
else:
lm_colour = (45,180,225,255)
if abs(self.z_offset - landmark.z) < POI_Z_RANGE:
self.draw.ellipse(((adj_x-POISIZE,adj_y-POISIZE),(adj_x+POISIZE,adj_y+POISIZE)), fill = lm_colour)
if self.draw_names.get() == 1: # Could control this with a separate button.
self.draw.text((adj_x + FONTSIZE/2,adj_y - FONTSIZE/2),nametext,font = self.fnt,fill = lm_colour)
else:
self.draw.ellipse(((adj_x-POISIZE,adj_y-POISIZE),(adj_x+POISIZE,adj_y+POISIZE)), fill = lm_colour)
self.draw_hat(landmark.z,adj_x,adj_y,lm_colour)
# Iterates through drawing known player factions.
if self.draw_player.get() == 1:
for pf in player_list:
if pf.valid == 'Yes':
cp_x = pf.x - self.x_offset
cp_y = pf.y - self.y_offset
adj_x = XDIM/2 + (cp_x / self.scaling)
adj_y = YDIM/2 - (cp_y / self.scaling)
nametext = str(pf.name)
t_colour = (130,160,40,255)
if abs(self.z_offset - pf.z) < PF_Z_RANGE:
self.draw.line(((adj_x,adj_y - CROSSSIZE),(adj_x,adj_y + CROSSSIZE)),fill = t_colour)
self.draw.line(((adj_x - CROSSSIZE,adj_y),(adj_x + CROSSSIZE,adj_y)),fill = t_colour)
if self.draw_names.get() == 1:
self.draw.text((adj_x + FONTSIZE/4,adj_y - FONTSIZE/2),nametext,font = self.fnt,fill = t_colour)
else:
self.draw.line(((adj_x,adj_y - CROSSSIZE),(adj_x,adj_y + CROSSSIZE)),fill = t_colour)
self.draw.line(((adj_x - CROSSSIZE,adj_y),(adj_x + CROSSSIZE,adj_y)),fill = t_colour)
self.draw_hat(pf.z,adj_x,adj_y,t_colour)
# Iterates through drawing known tourist locations.
if self.draw_tourist.get() == 1:
for destination in tourist_list:
cp_x = destination.x - self.x_offset
cp_y = destination.y - self.y_offset
adj_x = XDIM/2 + (cp_x / self.scaling)
adj_y = YDIM/2 - (cp_y / self.scaling)
nametext = str(destination.number)
if nametext == '0':
nametext = '?'
t_colour = (10,110,10,255)
if abs(self.z_offset - destination.z) < TOURIST_Z_RANGE:
self.draw.ellipse(((adj_x-TOURISTSIZE,adj_y-TOURISTSIZE),(adj_x+TOURISTSIZE,adj_y+TOURISTSIZE)), fill = t_colour)
if self.draw_names.get() == 1: # This is slow.
self.draw.text((adj_x + FONTSIZE/4,adj_y - FONTSIZE/2),nametext,font = self.fnt,fill = t_colour)
else:
self.draw.ellipse(((adj_x-TOURISTSIZE,adj_y-TOURISTSIZE),(adj_x+TOURISTSIZE,adj_y+TOURISTSIZE)), fill = t_colour)
self.draw_hat(destination.z,adj_x,adj_y,t_colour)
# Iterates through drawing known rare goods.
if self.draw_rares.get() == 1:
for rare in rares_list:
cp_x = rare.x - self.x_offset
cp_y = rare.y - self.y_offset
adj_x = XDIM/2 + (cp_x / self.scaling)
adj_y = YDIM/2 - (cp_y / self.scaling)
nametext = str(rare.name)
if rare.distance < RARE_MAX_DISTANCE:
t_colour = (240,90,30,255)
else:
t_colour = (100,100,100,255)
if abs(self.z_offset - rare.z) < RARE_Z_RANGE:
self.draw.ellipse(((adj_x-RARESIZE,adj_y-RARESIZE),(adj_x+RARESIZE,adj_y+RARESIZE)), fill = t_colour)
if self.draw_names.get() == 1: # This is slow.
self.draw.text((adj_x + FONTSIZE/4,adj_y - FONTSIZE/2),nametext,font = self.fnt,fill = t_colour)
else:
self.draw.ellipse(((adj_x-RARESIZE,adj_y-RARESIZE),(adj_x+RARESIZE,adj_y+RARESIZE)), fill = t_colour)
self.draw_hat(rare.z,adj_x,adj_y,t_colour)
# Iterate through drawing sector fills first.
if self.draw_fills.get() == 1:
for sector in ha_sec_list:
if sector.state == 'Open':
fc = (0,0,0,255)
elif sector.state == 'Permit-locked.':
fc = (255,0,0,255)
else:
fc = (130,80,60,255) # Don't really need this but whatever.
cp_x = sector.x - self.x_offset
cp_y = sector.y - self.y_offset
adj_x = XDIM/2 + (cp_x / self.scaling)
adj_y = YDIM/2 - (cp_y / self.scaling)
# Need to get "r on this z level"
r_z = radius_on_plane(sector.z,sector.r,self.z_offset)
if r_z > 0:
adj_r = r_z / self.scaling
if sector.state != 'Not found':
if sector.state == 'Open':
self.draw.ellipse(((adj_x-adj_r,adj_y-adj_r),(adj_x+adj_r,adj_y+adj_r)), fill = (255,255,255,255))
else:
self.draw.ellipse(((adj_x-adj_r,adj_y-adj_r),(adj_x+adj_r,adj_y+adj_r)), fill = fc)
# Iterates through drawing known sectors.
for sector in ha_sec_list:
if sector.state == 'Open':
fc = (0,0,0,255)
elif sector.state == 'Permit-locked.':
fc = (255,0,0,255)
else:
fc = (130,80,60,255) # Don't really need this but whatever.
cp_x = sector.x - self.x_offset
cp_y = sector.y - self.y_offset
adj_x = XDIM/2 + (cp_x / self.scaling)
adj_y = YDIM/2 - (cp_y / self.scaling)
# Need to get "r on this z level"
r_z = radius_on_plane(sector.z,sector.r,self.z_offset)
if r_z > 0:
adj_r = r_z / self.scaling
if sector.state != 'Not found':
self.draw.ellipse(((adj_x-adj_r,adj_y-adj_r),(adj_x+adj_r,adj_y+adj_r)), outline = fc)
# Placeholder indicators for object types.
# Not drawn are LM (Landmark) and OS (Open Cluster that is sparse or non-existent on the map)
if self.draw_indicators.get() == 1:
if sector.sec_type == 'NB': # Ordinary emission nebula.
self.draw.ellipse(((adj_x-NEBSIZE,adj_y-NEBSIZE),(adj_x+NEBSIZE,adj_y+NEBSIZE)), fill = (230,170,50,255))
elif sector.sec_type == 'NX': # Ordinary emission nebula known to host barnacles.
self.draw.ellipse(((adj_x-NEBSIZE,adj_y-NEBSIZE),(adj_x+NEBSIZE,adj_y+NEBSIZE)), fill = (230,170,50,255), outline = (190,20,180,255))
elif sector.sec_type == 'PN': # Planetary nebula.
self.draw.ellipse(((adj_x-NEBSIZE,adj_y-NEBSIZE),(adj_x+NEBSIZE,adj_y+NEBSIZE)), fill = (70,240,240,255))
self.draw.line(((adj_x - 2,adj_y - 2),(adj_x + 2,adj_y + 2)), fill = (30,180,190,255), width = 1)
self.draw.line(((adj_x - 2,adj_y + 2),(adj_x + 2,adj_y - 2)), fill = (30,180,190,255), width = 1)
self.draw.line(((adj_x,adj_y - 3),(adj_x,adj_y + 3)), fill = (30,180,190,255), width = 1)
self.draw.line(((adj_x - 3,adj_y),(adj_x + 3,adj_y)), fill = (30,180,190,255), width = 1)
elif sector.sec_type == 'DN': # Dark nebula.
self.draw.ellipse(((adj_x-NEBSIZE,adj_y-NEBSIZE),(adj_x+NEBSIZE,adj_y+NEBSIZE)), fill = (35,30,0,255))
elif sector.sec_type == 'OC': # Open Cluster of stars.
self.draw.line(((adj_x - 2,adj_y - 2),(adj_x + 2,adj_y + 2)), fill = 'black', width = 1)
self.draw.line(((adj_x - 2,adj_y + 2),(adj_x + 2,adj_y - 2)), fill = 'black', width = 1)
self.draw.line(((adj_x,adj_y - 3),(adj_x,adj_y + 3)), fill = 'black', width = 1)
self.draw.line(((adj_x - 3,adj_y),(adj_x + 3,adj_y)), fill = 'black', width = 1)
# Draw an indicator if we have a 'sector' which contains only a number of named stars.
r_solo = radius_on_plane(sector.z,SOLO_ASSUMED_RADIUS,self.z_offset)
if r_solo > 0:
adj_r = r_solo / self.scaling
if self.draw_indicators.get() == 1:
# Should I use a different indicator here, to avoid confusion with OC sectors? Or not?
if sector.sec_type == 'ST':
self.draw.line(((adj_x - 2,adj_y - 2),(adj_x + 2,adj_y + 2)), fill = 'black', width = 1)
self.draw.line(((adj_x - 2,adj_y + 2),(adj_x + 2,adj_y - 2)), fill = 'black', width = 1)
self.draw.line(((adj_x,adj_y - 3),(adj_x,adj_y + 3)), fill = 'black', width = 1)
self.draw.line(((adj_x - 3,adj_y),(adj_x + 3,adj_y)), fill = 'black', width = 1)
for sector in ha_sec_list:
if sector.state == 'Open':
fc = (0,0,0,255)
elif sector.state == 'Permit-locked.':
fc = (255,0,0,255)
else:
fc = (130,80,60,255)
cp_x = sector.x - self.x_offset
cp_y = sector.y - self.y_offset
adj_x = XDIM/2 + (cp_x / self.scaling)
adj_y = YDIM/2 - (cp_y / self.scaling)
# Only draw text for sectors which are present on this z level.
r_z = radius_on_plane(sector.z,sector.r,self.z_offset)
nametext = sector.name
if r_z > 0:
if self.draw_names.get() == 1:
self.draw.text((adj_x + FONTSIZE/2,adj_y - FONTSIZE/2),nametext,font = self.fnt,fill = fc)
if self.draw_crosses.get() == 1 and sector.state == 'Not found':
self.draw.line(((adj_x - CROSSSIZE,adj_y - CROSSSIZE),(adj_x + CROSSSIZE,adj_y + CROSSSIZE)), fill = fc, width = CROSSWIDTH)
self.draw.line(((adj_x - CROSSSIZE,adj_y + CROSSSIZE),(adj_x + CROSSSIZE,adj_y - CROSSSIZE)), fill = fc, width = CROSSWIDTH)
self.draw_hat(sector.z,adj_x,adj_y,fc)
else:
if self.draw_crosses.get() == 1:
self.draw.line(((adj_x - CROSSSIZE,adj_y - CROSSSIZE),(adj_x + CROSSSIZE,adj_y + CROSSSIZE)), fill = fc, width = CROSSWIDTH)
self.draw.line(((adj_x - CROSSSIZE,adj_y + CROSSSIZE),(adj_x + CROSSSIZE,adj_y - CROSSSIZE)), fill = fc, width = CROSSWIDTH)
if sector.sec_type != 'ST':
self.draw_hat(sector.z,adj_x,adj_y,fc)
# Draw text if we have a 'sector' which contains only a number of named stars.
r_solo = radius_on_plane(sector.z,SOLO_ASSUMED_RADIUS,self.z_offset)
if r_solo > 0:
if self.draw_names.get() == 1:
if sector.sec_type == 'ST':
self.draw.text((adj_x + FONTSIZE/2,adj_y - FONTSIZE/2),nametext,font = self.fnt,fill = fc)
else:
if self.draw_crosses.get() == 1:
if sector.sec_type == 'ST':
self.draw.line(((adj_x - CROSSSIZE,adj_y - CROSSSIZE),(adj_x + CROSSSIZE,adj_y + CROSSSIZE)), fill = fc, width = CROSSWIDTH)
self.draw.line(((adj_x - CROSSSIZE,adj_y + CROSSSIZE),(adj_x + CROSSSIZE,adj_y - CROSSSIZE)), fill = fc, width = CROSSWIDTH)
self.draw_hat(sector.z,adj_x,adj_y,fc)
# Draw a marker at the latest search location.
if self.search_performed == True:
s_x = self.search_x - self.x_offset
s_y = self.search_y - self.y_offset
adj_x = XDIM/2 + (s_x / self.scaling)
adj_y = YDIM/2 - (s_y / self.scaling)
s_col = (150,0,0,255)
self.draw.ellipse(((adj_x-SEARCH_SIZE_I,adj_y-SEARCH_SIZE_I),(adj_x+SEARCH_SIZE_I,adj_y+SEARCH_SIZE_I)), outline = s_col)
self.draw.ellipse(((adj_x-SEARCH_SIZE_O,adj_y-SEARCH_SIZE_O),(adj_x+SEARCH_SIZE_O,adj_y+SEARCH_SIZE_O)), outline = s_col)
self.draw.line(((adj_x,adj_y-SEARCH_SIZE_I),(adj_x,adj_y-SEARCH_SIZE_I-S_S_EXT)),fill = s_col,width = 2)
self.draw.line(((adj_x,adj_y+SEARCH_SIZE_I),(adj_x,adj_y+SEARCH_SIZE_I+S_S_EXT)),fill = s_col,width = 2)
self.draw.line(((adj_x-SEARCH_SIZE_I,adj_y),(adj_x-SEARCH_SIZE_I-S_S_EXT,adj_y)),fill = s_col,width = 2)
self.draw.line(((adj_x+SEARCH_SIZE_I,adj_y),(adj_x+SEARCH_SIZE_I+S_S_EXT,adj_y)),fill = s_col,width = 2)
# If we have a pg sector, draw a box showing its outlines.
if self.search_is_pg_sector == True:
nw_x = self.search_is_pg_x - self.x_offset
nw_y = self.search_is_pg_y - self.y_offset
sl = 1280
se_x = nw_x + sl
se_y = nw_y - sl
adj_nw_x = XDIM/2 + (nw_x / self.scaling)
adj_nw_y = YDIM/2 - (nw_y / self.scaling)
adj_se_x = XDIM/2 + (se_x / self.scaling)
adj_se_y = YDIM/2 - (se_y / self.scaling)
box = ((adj_nw_x,adj_nw_y), (adj_se_x,adj_se_y))
self.draw.rectangle(box, outline = (150,0,0,255))
# Convert the image to one that tkinter can use, and draw it to the canvas.
self.working_image = ImageTk.PhotoImage(self.pil_image)
self.image_on_canvas = self.map_canvas.create_image(0, 0, anchor = NW, image = self.working_image)
# Draws a line from one pair of coordinates to another (adjusting for offsets.)
def doline(self,x1,y1,x2,y2,colour):
s_x = x1 - self.x_offset
s_y = y1 - self.y_offset
e_x = x2 - self.x_offset
e_y = y2 - self.y_offset
adj_s_x = XDIM/2 + (s_x / self.scaling)
adj_s_y = YDIM/2 - (s_y / self.scaling)
adj_e_x = XDIM/2 + (e_x / self.scaling)
adj_e_y = YDIM/2 - (e_y / self.scaling)
self.draw.line(((adj_s_x,adj_s_y),(adj_e_x,adj_e_y)), fill = colour)
def draw_hat(self,working_z,adj_x,adj_y,fc):
if working_z > self.z_offset:
self.draw.line(((adj_x - CROSSSIZE,adj_y - (2 * CROSSSIZE)),(adj_x,adj_y - (3 * CROSSSIZE))), fill = fc, width = CROSSWIDTH)
self.draw.line(((adj_x,adj_y - (3 * CROSSSIZE)),(adj_x + CROSSSIZE,adj_y - (2 * CROSSSIZE))), fill = fc, width = CROSSWIDTH)
else:
self.draw.line(((adj_x - CROSSSIZE,adj_y + (2 * CROSSSIZE)),(adj_x,adj_y + (3 * CROSSSIZE))), fill = fc, width = CROSSWIDTH)
self.draw.line(((adj_x,adj_y + (3 * CROSSSIZE)),(adj_x + CROSSSIZE,adj_y + (2 * CROSSSIZE))), fill = fc, width = CROSSWIDTH)
def save(self):
# Save a .png of the current canvas.
filename = 'output.png'
self.pil_image.save(filename)
# Save a .csv file with stars in the current filter list.
filename = 'output.csv'
with open(filename, 'w') as opened:
opened.write('System,X,Y,Z,GalmapX,GalmapY,GalmapZ\n')
if self.filter_by_target != '':
for f in self.deferred:
opened.write(f.name + ',')
opened.write(str(f.x) + ',')
opened.write(str(f.y) + ',')
opened.write(str(f.z) + ',')
opened.write(str(f.x) + ',')
opened.write(str(f.z) + ',')
opened.write(str(f.y))
opened.write('\n')
else:
for f in self.deferred_alpha:
opened.write(f.name + ',')
opened.write(str(f.x) + ',')
opened.write(str(f.y) + ',')
opened.write(str(f.z) + ',')
opened.write(str(f.x) + ',')
opened.write(str(f.z) + ',')
opened.write(str(f.y))
opened.write('\n')
# Entry boxes with an attached label.
class Entry_Box():
def __init__(self,master,nametext,default,w1,w2):
# Create a frame for this entry box.
self.frame = Frame(master, padx = 6)
self.frame.pack(side = LEFT)
# Create a label.
self.label = Label(self.frame,text = nametext,width = w1)
self.label.pack(side = LEFT)
# Create an entry box.
self.entry = Entry(self.frame, width = w2)
self.entry.pack(side = LEFT)
self.entry.insert(0,default)
# Class to hold details for the hand-authored sectors.
class ha_sec():
def __init__(self,name,x,y,z,r,state,sec_type,priority,a_nebula,a_star):
self.name = name
self.x = x
self.y = y
self.z = z
self.r = r
self.state = state
self.sec_type = sec_type
self.priority = priority
self.a_nebula = a_nebula
self.a_star = a_star
# Class to hold details for POI.
class poi():
def __init__(self,name,x,y,z,poi_type,star_system,body,lat,lon,notes,shipyard):
self.name = name
self.x = x
self.y = y
self.z = z
self.poi_type = poi_type
self.star_system = star_system
self.body = body
self.lat = lat
self.lon = lon
self.notes = notes
self.shipyard = shipyard
# setattr is for wimps and the competent.
# Class to hold details for tourist locations.
class tourist():
def __init__(self,number,name,system,x,y,z,description,body,location,distance):
self.number = number
self.name = name
self.system = system
self.x = x
self.y = y
self.z = z
self.description = description
self.body = body # Body the POI is near or on.
self.location = location # Whether the POI is in orbit or on the surface.
self.distance = distance # Distance from jump-in point.
class rare():
def __init__(self,system,station,name,quantity,x,y,z,distance):
self.system = system
self.station = station
self.name = name
self.quantity = quantity
self.x = x
self.y = y
self.z = z
self.distance = distance
class pulsar():
def __init__(self,system,x,y,z,status):
self.name = system
self.x = x
self.y = y
self.z = z
self.status = status
self.distance = ((x ** 2) + (y ** 2) + (z ** 2)) ** 0.5 # Is this needed for anything?
class player_faction():
def __init__(self,name,superpower,government,system,x,y,z,state,valid):
self.name = name
self.superpower = superpower
self.government = government
self.system = system
self.x = x
self.y = y
self.z = z
self.state = state
self.valid = valid
class findiv():
def __init__(self,name,x,y,z,distance):
self.name = name
self.x = x
self.y = y
self.z = z
self.distance = distance
def read_sectors_file(filename):
ha_sec_list = []
with open(filename,'r') as opened:
readtext = opened.read()
lines = readtext.split('\n')
for line in lines:
values = line.split(',')
try:
name = str(values[0])
x = float(values[1])
y = float(values[2])
z = float(values[3])
r = float(values[4])
state = str(values[5]) # Reads whether the sector is open, locked, or one of the erroneous sectors from the original dataset.
sec_type = str(values[6]) # Reads the type of sector - if it's an open cluster or nebula or what have you.
priority = int(values[8])
a_nebula = str(values[10])
a_star = str(values[11])
new_ha_sec = ha_sec(name,x,y,z,r,state,sec_type,priority,a_nebula,a_star)
ha_sec_list.append(new_ha_sec)
except:
alice = 'do nowt'
return ha_sec_list
def read_poi_file(filename):
poi_list = []
with open(filename,'r') as opened:
readtext = opened.read()
lines = readtext.split('\n')
for line in lines:
values = line.split(',')
try:
name = str(values[0])
x = float(values[1])
y = float(values[2])
z = float(values[3])
poi_type = str(values[4])
star_system = str(values[5])
body = str(values[6])
try:
lat = float(values[7])
lon = float(values[8])
except:
lat = 0
lon = 0
notes = str(values[9])
shipyard = str(values[10])
new_poi = poi(name,x,y,z,poi_type,star_system,body,lat,lon,notes,shipyard)
poi_list.append(new_poi)
except:
alice = 'do nowt'
return poi_list
def read_tourist_file(filename):
tourist_list = []
with open(filename,'r') as opened:
readtext = opened.read()
lines = readtext.split('\n')
for line in lines:
values = line.split(',')
try:
number = int(values[0])
name = str(values[1])
system = str(values[2])
x = float(values[3])
y = float(values[4])
z = float(values[5])
description = str(values[6])
body = str(values[8])
location = str(values[9])
distance = str(values[10])
new_tourist = tourist(number,name,system,x,y,z,description,body,location,distance)
tourist_list.append(new_tourist)
except:
alice = 'do nowt'
return tourist_list
def read_rares_file(filename):
rares_list = []
with open(filename,'r') as opened:
readtext = opened.read()
lines = readtext.split('\n')
for line in lines:
values = line.split(',')
try:
system = str(values[0])
station = str(values[1])
name = str(values[2])
quantity = str(values[3])
x = float(values[4])
y = float(values[5])
z = float(values[6])
distance = int(values[7])
new_rare = rare(system,station,name,quantity,x,y,z,distance)
rares_list.append(new_rare)
except:
alice = 'do nowt'
return rares_list
def read_pulsars_file(filename):
pulsar_list = []
with open(filename, 'r') as opened:
readtext = opened.read()
lines = readtext.split('\n')
for line in lines:
values = line.split(',')
try:
system = str(values[0])
x = float(values[1])
y = float(values[2])
z = float(values[3])
status = str(values[4])
new_pulsar = pulsar(system,x,y,z,status)
pulsar_list.append(new_pulsar)
except:
alice = 'do nowt'
return pulsar_list
def read_players_file(filename):
player_list = []
with open(filename, 'r') as opened:
readtext = opened.read()
lines = readtext.split('\n')
for line in lines:
values = line.split(',')
try:
name = str(values[0])
superpower = str(values[1])
government = str(values[2])
system = str(values[3])
x = float(values[4])
y = float(values[5])
z = float(values[6])
state = str(values[7])
valid = str(values[10])
new_player_faction = player_faction(name,superpower,government,system,x,y,z,state,valid)
player_list.append(new_player_faction)
except:
alice = 'do nowt'
return player_list
def read_findiv_file(filename):
findiv_list = []
with open(filename, 'r') as opened:
readtext = opened.read()
lines = readtext.split('\n')
for line in lines:
values = line.split(',')
try:
name = str(values[0])
x = float(values[1])
y = float(values[2])
z = float(values[3])
distance = float(values[4])
new_findiv = findiv(name,x,y,z,distance)
findiv_list.append(new_findiv)
except:
alice = 'do nowt'
return findiv_list
# Find the radius of a given sector on a given z plane.
def radius_on_plane(z,r,z_target):
d = z - z_target
right = (r ** 2) - (d ** 2)
r_target = right ** 0.5
if isinstance(r_target, (int, float)):
r_return = round(r_target,1)
else:
r_return = 0
return r_return
# Find which sectors are present at a given position.
def current_member_of(x,y,z):
current = []
for sector in ha_sec_list:
sx = sector.x
sy = sector.y
sz = sector.z
sr = sector.r
if (((sx-x) ** 2) + ((sy-y) ** 2) + ((sz-z) ** 2)) < (sr ** 2):
current.append(sector.name)
# Reverse the list, to give the highest priority in real terms (lowest number) first.
current.reverse()
return current
# Find the single primary sector present at a given position.
def single_member_of(x,y,z):
current = []
for sector in ha_sec_list:
sx = sector.x
sy = sector.y
sz = sector.z
sr = sector.r
if (((sx-x) ** 2) + ((sy-y) ** 2) + ((sz-z) ** 2)) < (sr ** 2):
current.append(sector.name)
# Reverse the list, to give the highest priority in real terms (lowest number) first.
current.reverse()
try:
result = current[0]
except:
result = ''
return result
# Find which tourist destinations and POI are present at a given position. (2d only) Should maybe move this inside the main App class?
def current_tourist(x,y,scaling,d_lm,d_pr,d_ra,d_to,d_pf,d_fi,highlight_target,deferred):
current = []
# Might as well catch POI here as well.
if d_lm == 1:
for landmark in poi_list:
lx = landmark.x
ly = landmark.y
lr = POIACC * scaling
if (((lx-x) ** 2) + ((ly-y) ** 2)) < (lr ** 2):
# This is a bit clunky. Depending on the amount of information available on the POI, draw its system, body and lat/lon.
if landmark.star_system != '':
if landmark.lon != 0 and landmark.lat != 0:
landmark_text = landmark.name + ' (' + landmark.star_system + ' ' + landmark.body + ' at ' + str(landmark.lat) + ',' + str(landmark.lon) + ')'
else:
landmark_text = landmark.name + ' (' + landmark.star_system + ' ' + landmark.body + ')'
else:
landmark_text = landmark.name
current.append(landmark_text)
# And pulsars.
if d_pr == 1:
for psr in pulsar_list:
px = psr.x
py = psr.y
pr = PSRACC * scaling
if (((px-x) ** 2) + ((py-y) ** 2)) < (pr ** 2):
if psr.name != '':
psr_text = psr.name
current.append(psr_text)
# And player factions.
if d_pf == 1:
for pf in player_list:
pfx = pf.x
pfy = pf.y
pfr = PFACC * scaling
if (((pfx-x) ** 2) + ((pfy-y) ** 2) < (pfr ** 2)):
if pf.name != '' and pf.valid == 'Yes':
pf_text = pf.name
pf_text += ' (' + pf.system + ')'
current.append(pf_text)
# And might as well catch rare goods here.
if d_ra == 1:
for rare in rares_list:
rx = rare.x
ry = rare.y
rr = RAREACC * scaling
if (((rx-x) ** 2) + ((ry-y) ** 2)) < (rr ** 2):
if rare.name != '':
rare_text = rare.name + ' (' + rare.system + ','
available = ' ' + rare.quantity
rare_text += available
distance = ' @ ' + str(rare.distance) + ' ls)'
rare_text += distance
current.append(rare_text)
# Now go through the tourist destinations. Should maybe add the bodies to this list.
if d_to == 1:
for destination in tourist_list:
dx = destination.x
dy = destination.y
dr = TOURISTACC * scaling
if (((dx-x) ** 2) + ((dy-y) ** 2)) < (dr ** 2):
tourist_text = destination.name
if destination.number != 0:
tourist_text += ' (#' + str(destination.number) + ', ' + destination.system
else:
tourist_text += ' (#???, ' + destination.system
if destination.body != '':
tourist_text += ' ' + destination.body + ' ' + destination.location
if destination.distance != '':
tourist_text += ', ' + destination.distance + ' ls'
tourist_text += ')'
current.append(tourist_text)
# Let's try adding from the full list of individual stars; this could be slow.
# Need to change this to pull only from the filtered lists.
if d_fi == 1:
fr = FINDIVACC * scaling
if highlight_target == '':
for f in findiv_list:
if (((f.x - x) ** 2) + ((f.y - y) ** 2)) < (fr ** 2):
findiv_text = f.name
current.append(findiv_text)
else:
for f in deferred:
if highlight_target == '*':
if (((f.x - x) ** 2) + ((f.y - y) ** 2)) < (fr ** 2):
findiv_text = f.name
current.append(findiv_text)
elif highlight_target.upper() in f.name.upper():
if (((f.x - x) ** 2) + ((f.y - y) ** 2)) < (fr ** 2):
findiv_text = f.name
current.append(findiv_text)
return current
# Finds the nearest tourist POI that hasn't got a number yet. Just for gathering data.
def find_nearest_unchecked(t_list,x,y,z):
bestfit = ''
previous = ''
bestdistance = 1000000
previousbest = 1000000
for possible in t_list:
newdistance = ((x-possible.x)**2) + ((y-possible.y)**2) + ((z-possible.z)**2)
newdistance = newdistance ** 0.5
if newdistance < bestdistance:
if possible.number == 0:
previous = bestfit
previousbest = bestdistance
bestdistance = newdistance
bestfit = possible.name
return bestfit, bestdistance, previous, previousbest
# Global variables for controlling the display.
XDIM = 580
YDIM = 580
FONTSIZE = 10
CROSSSIZE = 2 # Size of cross markers.
CROSSWIDTH = 1 # Width of line for crosses - doesn't look very good if set higher than 1, though.
NEBSIZE = 3 # Size of nebulae.
POISIZE = 2 # Size of POI markers.
POI_Z_RANGE = 52 # Z range in which a POI marker will be drawn without a hat.
PSRSIZE = 1 # Size of pulsar markers.
PSR_Z_RANGE = 52 # Z range in which a Pulsar marker will be drawn without a hat. Could make this much larger than the others?
TOURISTSIZE = 1 # Size of Tourist markers.
TOURIST_Z_RANGE = 52 # Z range in which a Tourist marker will be drawn without a hat.
RARESIZE = 1 # Size of Rare Goods markers
RARE_Z_RANGE = 52 # Z range in which a Rare Goods marker will be drawn without a hat.
ZOOMSPEED = 2
RARE_MAX_DISTANCE = 55000 # Maximum distance that a rare good will be considered as practical.
PF_Z_RANGE = 52
RR_LENGTH = 2000 # Length of RR line to draw.
SOLO_ASSUMED_RADIUS = 110 # Effective radius of a "sector" which contains only individual named stars.
SEARCH_SIZE_I = 5 # Radius of inner search circle icon.
SEARCH_SIZE_O = 8 # Radius of outer search circle icon.
S_S_EXT = 5 # Length of search circle lines.
# Global variables for controlling the base accuracy of the mouseover searches. Can maybe do away with these now the scaling works properly.
PSRACC = 6
POIACC = 6
RAREACC = 6
TOURISTACC = 6
PFACC = 6
FINDIVACC = 6
# Variables that control the z +/- and scaling when the buttons are pressed.
Z_MOVE_RATE = 100 # Z axis change. Could change this to a "z-slice-size" and adjust the various XXX_Z_RANGE appropriately to half the slice size.
S_MOVE_RATE = 2 # Scaling change.
# Read sectors file.
filename = 'seclist_ra.csv'
ha_sec_list = read_sectors_file(filename)
ha_sec_list.sort(key = lambda sector:sector.priority, reverse = True)
# Compile a list of known ha sector names.
known_ha_secs = []
for sector in ha_sec_list:
known_ha_secs.append(sector.name)
# Read poi file.
filename = 'poilist.csv'
poi_list = read_poi_file(filename)
# Read tourist file.
filename = 'tourist_3.csv'
tourist_list = read_tourist_file(filename)
# Read rare goods file.
filename = 'rares.csv'
rares_list = read_rares_file(filename)
# Read pulsars file.
filename = 'pulsars.csv'
pulsar_list = read_pulsars_file(filename)
# Read player factions file.
filename = 'pfac.csv'
player_list = read_players_file(filename)
# Read full individual stars file.
filename = 'findiv.csv'
findiv_list = read_findiv_file(filename)
# Main loop.
root = Tk()
root.title('Jackie\'s Map (v.' + version + ')')
mainapp = App(root)
root.mainloop()
|
KayJohnston/jackies-map
|
jmap3r2.py
|
Python
|
bsd-3-clause
| 76,060
|
[
"Galaxy"
] |
48ecaaa316f1be6f2a21bb20aaafc800873d5989b65ee72ef0c4f1aad947b9c1
|
#!/usr/bin/env python
""" Functions and Constants common to all the executables."""
from __future__ import print_function
import redhawk
import redhawk.utils.util as U
import logging
import os
import string
import sys
import tempfile
PYGRAPHVIZ_NOT_FOUND = """This feature requires the pygraphviz, which does not
seem to be installed.
It can be obtained using easy_install from http://pypi.python.org/pypi/pygraphviz
or from the distro's native package manager. pygraphviz goes by the name
python-pygraphviz on debian/ubuntu."""
# The standard Redhawk Database file.
DB_NAME = redhawk.GetDBName()
# The program name
PROGRAM_NAME = sys.argv[0]
# Default string passed at the end of help in optparse
OPTIONS_DEFAULT_STRING = "[default: %default]"
def ShowImage(filename, eog=False):
if eog:
os.system('eog %s'%(filename))
return
try:
import Image
except ImportError as e:
print("Cannot find the Image module. Opening in your web-browser.")
webbrowser.open(filename)
return
im = Image.open(filename)
im.show()
return
def ShowASTAsImage(ast, eog=False):
try:
import redhawk.common.writers.dot_writer as D
except ImportError as e:
ExitWithError(PYGRAPHVIZ_NOT_FOUND + "\n\nError: " + str(e))
temp_name = tempfile.mktemp(suffix='.png')
D.WriteToImage(ast, filename=temp_name)
ShowImage(temp_name, eog)
return
def MakeStringFromTemplate(template):
""" Convert the template into a string, by substituting for $prog and
$db."""
return string.Template(template).safe_substitute(
prog=PROGRAM_NAME,
db=DB_NAME)
def ExitWithError(s, error_code = 1):
"""A quieter version of redhawk.utils.util's ExitWithError"""
sys.stderr.write(s+"\n")
sys.exit(error_code)
def IsFileSupported(filepath):
""" Returns True if the file at `filepath` is supported by Redhawk. Else,
returns False"""
try:
U.GuessLanguage(filepath)
except KeyError as e:
return False
except ValueError as e:
return False
return True
def GetSupportedFiles(paths):
""" Return a list of files, (after walking through directories), that are
supported by redhawk."""
assert(type(paths) is list)
for p in paths:
if os.path.isdir(p):
for root, dirs, files in os.walk(p):
if '.git' in dirs: dirs.remove('.git') # don't visit git directories
for f in files:
path = os.path.join(root, f)
if IsFileSupported(path):
yield path
if os.path.isfile(p):
if IsFileSupported(p):
yield p
def GetDatabase():
""" Return the location to the redhawk database if any."""
try:
return U.FindFileInDirectoryOrAncestors(DB_NAME, os.curdir)
except IOError as e:
logging.warning("""The redhawk database exists, but does not have read & write
permissions. Fix this to prevent re-parsing. Carrying on..""")
return None
def GetKey(filepath, database):
""" Return the key corresponding to the given filepath."""
if not database:
return None
return os.path.relpath(filepath, os.path.dirname(database))
|
JordanMilne/Redhawk
|
redhawk/scripts/script_util.py
|
Python
|
bsd-2-clause
| 3,082
|
[
"VisIt"
] |
d89e28213d18965074182471b2ff76a621c4f7866c46df1876dd3f8f4ad5f5d9
|
# -*- coding: utf-8 -*-
# Copyright (c) 2010, Almar Klein, Ant1, Marius van Voorden
#
# This code is subject to the (new) BSD license:
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the <organization> nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
""" Module images2gif
Provides functionality for reading and writing animated GIF images.
Use writeGif to write a series of numpy arrays or PIL images as an
animated GIF. Use readGif to read an animated gif as a series of numpy
arrays.
Acknowledgements
----------------
Many thanks to Ant1 for:
* noting the use of "palette=PIL.Image.ADAPTIVE", which significantly
improves the results.
* the modifications to save each image with its own palette, or optionally
the global palette (if its the same).
Many thanks to Marius van Voorden for porting the NeuQuant quantization
algorithm of Anthony Dekker to Python (See the NeuQuant class for its
license).
This code is based on gifmaker (in the scripts folder of the source
distribution of PIL)
Some implementation details are ased on gif file structure as provided
by wikipedia.
"""
import os
try:
import PIL
from PIL import Image, ImageChops
from PIL.GifImagePlugin import getheader, getdata
except ImportError:
PIL = None
try:
import numpy as np
except ImportError:
np = None
try:
from scipy.spatial import cKDTree
except ImportError:
cKDTree = None
# getheader gives a 87a header and a color palette (two elements in a list).
# getdata()[0] gives the Image Descriptor up to (including) "LZW min code size".
# getdatas()[1:] is the image data itself in chuncks of 256 bytes (well
# technically the first byte says how many bytes follow, after which that
# amount (max 255) follows).
def checkImages(images):
""" checkImages(images)
Check numpy images and correct intensity range etc.
The same for all movie formats.
"""
# Init results
images2 = []
for im in images:
if PIL and isinstance(im, PIL.Image.Image):
# We assume PIL images are allright
images2.append(im)
elif np and isinstance(im, np.ndarray):
# Check and convert dtype
if im.dtype == np.uint8:
images2.append(im) # Ok
elif im.dtype in [np.float32, np.float64]:
im = im.copy()
im[im<0] = 0
im[im>1] = 1
im *= 255
images2.append( im.astype(np.uint8) )
else:
im = im.astype(np.uint8)
images2.append(im)
# Check size
if im.ndim == 2:
pass # ok
elif im.ndim == 3:
if im.shape[2] not in [3,4]:
raise ValueError('This array can not represent an image.')
else:
raise ValueError('This array can not represent an image.')
else:
raise ValueError('Invalid image type: ' + str(type(im)))
# Done
return images2
def intToBin(i):
""" Integer to two bytes """
# devide in two parts (bytes)
i1 = i % 256
i2 = int( i/256)
# make string (little endian)
return chr(i1) + chr(i2)
def getheaderAnim(im):
""" Animation header. To replace the getheader()[0] """
bb = "GIF89a"
bb += intToBin(im.size[0])
bb += intToBin(im.size[1])
bb += "\x87\x00\x00"
return bb
def getImageDescriptor(im):
""" Used for the local color table properties per image.
Otherwise global color table applies to all frames irrespective of
wether additional colours comes in play that require a redefined palette
Still a maximum of 256 color per frame, obviously.
Written by Ant1 on 2010-08-22
"""
bb = '\x2C' # Image separator,
bb += intToBin( 0 ) # Left position
bb += intToBin( 0 ) # Top position
bb += intToBin( im.size[0] ) # image width
bb += intToBin( im.size[1] ) # image height
bb += '\x87' # packed field : local color table flag1, interlace0, sorted table0, reserved00, lct size111=7=2^(7+1)=256.
# LZW minimum size code now comes later, begining of [image data] blocks
return bb
#def getAppExt(loops=float('inf')):
#compile error commented by zcwang
def getAppExt(loops=float(0)):
""" Application extention. Part that specifies amount of loops.
If loops is inf, it goes on infinitely.
"""
if loops == 0:
loops = 2**16-1
#bb = "" # application extension should not be used
# (the extension interprets zero loops
# to mean an infinite number of loops)
# Mmm, does not seem to work
if True:
bb = "\x21\xFF\x0B" # application extension
bb += "NETSCAPE2.0"
bb += "\x03\x01"
# if loops == float('inf'):
if loops == float(0):
loops = 2**16-1
bb += intToBin(loops)
bb += '\x00' # end
return bb
def getGraphicsControlExt(duration=0.1):
""" Graphics Control Extension. A sort of header at the start of
each image. Specifies transparancy and duration. """
bb = '\x21\xF9\x04'
bb += '\x08' # no transparancy
bb += intToBin( int(duration*100) ) # in 100th of seconds
bb += '\x00' # no transparant color
bb += '\x00' # end
return bb
def _writeGifToFile(fp, images, durations, loops):
""" Given a set of images writes the bytes to the specified stream.
"""
# Obtain palette for all images and count each occurance
palettes, occur = [], []
for im in images:
# FIX: Line changed thanks to StackOverflow: http://stackoverflow.com/questions/19149643/error-in-images2gif-py-with-globalpalette
palettes.append( im.palette.getdata()[1]) #getheader(im)[1] )
for palette in palettes:
occur.append( palettes.count( palette ) )
# Select most-used palette as the global one (or first in case no max)
globalPalette = palettes[ occur.index(max(occur)) ]
# Init
frames = 0
firstFrame = True
for im, palette in zip(images, palettes):
if firstFrame:
# Write header
# Gather info
header = getheaderAnim(im)
appext = getAppExt(loops)
# Write
fp.write(header)
fp.write(globalPalette)
fp.write(appext)
# Next frame is not the first
firstFrame = False
if True:
# Write palette and image data
# Gather info
data = getdata(im)
imdes, data = data[0], data[1:]
graphext = getGraphicsControlExt(durations[frames])
# Make image descriptor suitable for using 256 local color palette
lid = getImageDescriptor(im)
# Write local header
if palette != globalPalette:
# Use local color palette
fp.write(graphext)
fp.write(lid) # write suitable image descriptor
fp.write(palette) # write local color table
fp.write('\x08') # LZW minimum size code
else:
# Use global color palette
fp.write(graphext)
fp.write(imdes) # write suitable image descriptor
# Write image data
for d in data:
fp.write(d)
# Prepare for next round
frames = frames + 1
fp.write(";") # end gif
return frames
## Exposed functions
def writeGif(filename, images, duration=0.1, repeat=True, dither=False, nq=0):
""" writeGif(filename, images, duration=0.1, repeat=True, dither=False)
Write an animated gif from the specified images.
Parameters
----------
filename : string
The name of the file to write the image to.
images : list
Should be a list consisting of PIL images or numpy arrays.
The latter should be between 0 and 255 for integer types, and
between 0 and 1 for float types.
duration : scalar or list of scalars
The duration for all frames, or (if a list) for each frame.
repeat : bool or integer
The amount of loops. If True, loops infinitetely.
dither : bool
Whether to apply dithering
nq : integer
If nonzero, applies the NeuQuant quantization algorithm to create
the color palette. This algorithm is superior, but slower than
the standard PIL algorithm. The value of nq is the quality
parameter. 1 represents the best quality. 10 is in general a
good tradeoff between quality and speed.
"""
# Check PIL
if PIL is None:
raise RuntimeError("Need PIL to write animated gif files.")
# Check images
images = checkImages(images)
# Check loops
if repeat is False:
loops = 1
elif repeat is True:
loops = 0 # zero means infinite
else:
loops = int(repeat)
# Convert to PIL images
images2 = []
for im in images:
if isinstance(im, Image.Image):
images2.append(im)
elif np and isinstance(im, np.ndarray):
if im.ndim==3 and im.shape[2]==3:
im = Image.fromarray(im,'RGB')
elif im.ndim==2:
im = Image.fromarray(im,'L')
images2.append(im)
# Convert to paletted PIL images
images, images2 = images2, []
if nq >= 1:
# NeuQuant algorithm
for im in images:
im = im.convert("RGBA") # NQ assumes RGBA
nq = NeuQuant(im, int(nq)) # Learn colors from image
if dither:
im = im.convert("RGB").quantize(palette=nq.paletteImage())
else:
im = nq.quantize(im) # Use to quantize the image itself
images2.append(im)
else:
# Adaptive PIL algorithm
AD = Image.ADAPTIVE
for im in images:
im = im.convert('P', palette=AD, dither=dither)
images2.append(im)
# Check duration
if hasattr(duration, '__len__'):
if len(duration) == len(images2):
durations = [d for d in duration]
else:
raise ValueError("len(duration) doesn't match amount of images.")
else:
duration = [duration for im in images2]
# Open file
fp = open(filename, 'wb')
# Write
try:
n = _writeGifToFile(fp, images2, duration, loops)
finally:
fp.close()
def readGif(filename, asNumpy=True):
""" readGif(filename, asNumpy=True)
Read images from an animated GIF file. Returns a list of numpy
arrays, or, if asNumpy is false, a list if PIL images.
"""
# Check PIL
if PIL is None:
raise RuntimeError("Need PIL to read animated gif files.")
# Check Numpy
if np is None:
raise RuntimeError("Need Numpy to read animated gif files.")
# Check whether it exists
if not os.path.isfile(filename):
raise IOError('File not found: '+str(filename))
# Load file using PIL
pilIm = PIL.Image.open(filename)
pilIm.seek(0)
# Read all images inside
images = []
try:
while True:
# Get image as numpy array
tmp = pilIm.convert() # Make without palette
a = np.asarray(tmp)
if len(a.shape)==0:
raise MemoryError("Too little memory to convert PIL image to array")
# Store, and next
images.append(a)
pilIm.seek(pilIm.tell()+1)
except EOFError:
pass
# Convert to normal PIL images if needed
if not asNumpy:
images2 = images
images = []
for im in images2:
images.append( PIL.Image.fromarray(im) )
# Done
return images
class NeuQuant:
""" NeuQuant(image, samplefac=10, colors=256)
samplefac should be an integer number of 1 or higher, 1
being the highest quality, but the slowest performance.
With avalue of 10, one tenth of all pixels are used during
training. This value seems a nice tradeof between speed
and quality.
colors is the amount of colors to reduce the image to. This
should best be a power of two.
See also:
http://members.ozemail.com.au/~dekker/NEUQUANT.HTML
License of the NeuQuant Neural-Net Quantization Algorithm
---------------------------------------------------------
Copyright (c) 1994 Anthony Dekker
Ported to python by Marius van Voorden in 2010
NEUQUANT Neural-Net quantization algorithm by Anthony Dekker, 1994.
See "Kohonen neural networks for optimal colour quantization"
in "network: Computation in Neural Systems" Vol. 5 (1994) pp 351-367.
for a discussion of the algorithm.
See also http://members.ozemail.com.au/~dekker/NEUQUANT.HTML
Any party obtaining a copy of these files from the author, directly or
indirectly, is granted, free of charge, a full and unrestricted irrevocable,
world-wide, paid up, royalty-free, nonexclusive right and license to deal
in this software and documentation files (the "Software"), including without
limitation the rights to use, copy, modify, merge, publish, distribute, sublicense,
and/or sell copies of the Software, and to permit persons who receive
copies from any such party to do so, with the only requirement being
that this copyright notice remain intact.
"""
NCYCLES = None # Number of learning cycles
NETSIZE = None # Number of colours used
SPECIALS = None # Number of reserved colours used
BGCOLOR = None # Reserved background colour
CUTNETSIZE = None
MAXNETPOS = None
INITRAD = None # For 256 colours, radius starts at 32
RADIUSBIASSHIFT = None
RADIUSBIAS = None
INITBIASRADIUS = None
RADIUSDEC = None # Factor of 1/30 each cycle
ALPHABIASSHIFT = None
INITALPHA = None # biased by 10 bits
GAMMA = None
BETA = None
BETAGAMMA = None
network = None # The network itself
colormap = None # The network itself
netindex = None # For network lookup - really 256
bias = None # Bias and freq arrays for learning
freq = None
pimage = None
# Four primes near 500 - assume no image has a length so large
# that it is divisible by all four primes
PRIME1 = 499
PRIME2 = 491
PRIME3 = 487
PRIME4 = 503
MAXPRIME = PRIME4
pixels = None
samplefac = None
a_s = None
def setconstants(self, samplefac, colors):
self.NCYCLES = 100 # Number of learning cycles
self.NETSIZE = colors # Number of colours used
self.SPECIALS = 3 # Number of reserved colours used
self.BGCOLOR = self.SPECIALS-1 # Reserved background colour
self.CUTNETSIZE = self.NETSIZE - self.SPECIALS
self.MAXNETPOS = self.NETSIZE - 1
self.INITRAD = self.NETSIZE/8 # For 256 colours, radius starts at 32
self.RADIUSBIASSHIFT = 6
self.RADIUSBIAS = 1 << self.RADIUSBIASSHIFT
self.INITBIASRADIUS = self.INITRAD * self.RADIUSBIAS
self.RADIUSDEC = 30 # Factor of 1/30 each cycle
self.ALPHABIASSHIFT = 10 # Alpha starts at 1
self.INITALPHA = 1 << self.ALPHABIASSHIFT # biased by 10 bits
self.GAMMA = 1024.0
self.BETA = 1.0/1024.0
self.BETAGAMMA = self.BETA * self.GAMMA
self.network = np.empty((self.NETSIZE, 3), dtype='float64') # The network itself
self.colormap = np.empty((self.NETSIZE, 4), dtype='int32') # The network itself
self.netindex = np.empty(256, dtype='int32') # For network lookup - really 256
self.bias = np.empty(self.NETSIZE, dtype='float64') # Bias and freq arrays for learning
self.freq = np.empty(self.NETSIZE, dtype='float64')
self.pixels = None
self.samplefac = samplefac
self.a_s = {}
def __init__(self, image, samplefac=10, colors=256):
# Check Numpy
if np is None:
raise RuntimeError("Need Numpy for the NeuQuant algorithm.")
# Check image
if image.size[0] * image.size[1] < NeuQuant.MAXPRIME:
raise IOError("Image is too small")
assert image.mode == "RGBA"
# Initialize
self.setconstants(samplefac, colors)
self.pixels = np.fromstring(image.tostring(), np.uint32)
self.setUpArrays()
self.learn()
self.fix()
self.inxbuild()
def writeColourMap(self, rgb, outstream):
for i in range(self.NETSIZE):
bb = self.colormap[i,0];
gg = self.colormap[i,1];
rr = self.colormap[i,2];
out.write(rr if rgb else bb)
out.write(gg)
out.write(bb if rgb else rr)
return self.NETSIZE
def setUpArrays(self):
self.network[0,0] = 0.0 # Black
self.network[0,1] = 0.0
self.network[0,2] = 0.0
self.network[1,0] = 255.0 # White
self.network[1,1] = 255.0
self.network[1,2] = 255.0
# RESERVED self.BGCOLOR # Background
for i in range(self.SPECIALS):
self.freq[i] = 1.0 / self.NETSIZE
self.bias[i] = 0.0
for i in range(self.SPECIALS, self.NETSIZE):
p = self.network[i]
p[:] = (255.0 * (i-self.SPECIALS)) / self.CUTNETSIZE
self.freq[i] = 1.0 / self.NETSIZE
self.bias[i] = 0.0
# Omitted: setPixels
def altersingle(self, alpha, i, b, g, r):
"""Move neuron i towards biased (b,g,r) by factor alpha"""
n = self.network[i] # Alter hit neuron
n[0] -= (alpha*(n[0] - b))
n[1] -= (alpha*(n[1] - g))
n[2] -= (alpha*(n[2] - r))
def geta(self, alpha, rad):
try:
return self.a_s[(alpha, rad)]
except KeyError:
length = rad*2-1
mid = length/2
q = np.array(range(mid-1,-1,-1)+range(-1,mid))
a = alpha*(rad*rad - q*q)/(rad*rad)
a[mid] = 0
self.a_s[(alpha, rad)] = a
return a
def alterneigh(self, alpha, rad, i, b, g, r):
if i-rad >= self.SPECIALS-1:
lo = i-rad
start = 0
else:
lo = self.SPECIALS-1
start = (self.SPECIALS-1 - (i-rad))
if i+rad <= self.NETSIZE:
hi = i+rad
end = rad*2-1
else:
hi = self.NETSIZE
end = (self.NETSIZE - (i+rad))
a = self.geta(alpha, rad)[start:end]
p = self.network[lo+1:hi]
p -= np.transpose(np.transpose(p - np.array([b, g, r])) * a)
#def contest(self, b, g, r):
# """ Search for biased BGR values
# Finds closest neuron (min dist) and updates self.freq
# finds best neuron (min dist-self.bias) and returns position
# for frequently chosen neurons, self.freq[i] is high and self.bias[i] is negative
# self.bias[i] = self.GAMMA*((1/self.NETSIZE)-self.freq[i])"""
#
# i, j = self.SPECIALS, self.NETSIZE
# dists = abs(self.network[i:j] - np.array([b,g,r])).sum(1)
# bestpos = i + np.argmin(dists)
# biasdists = dists - self.bias[i:j]
# bestbiaspos = i + np.argmin(biasdists)
# self.freq[i:j] -= self.BETA * self.freq[i:j]
# self.bias[i:j] += self.BETAGAMMA * self.freq[i:j]
# self.freq[bestpos] += self.BETA
# self.bias[bestpos] -= self.BETAGAMMA
# return bestbiaspos
def contest(self, b, g, r):
""" Search for biased BGR values
Finds closest neuron (min dist) and updates self.freq
finds best neuron (min dist-self.bias) and returns position
for frequently chosen neurons, self.freq[i] is high and self.bias[i] is negative
self.bias[i] = self.GAMMA*((1/self.NETSIZE)-self.freq[i])"""
i, j = self.SPECIALS, self.NETSIZE
dists = abs(self.network[i:j] - np.array([b,g,r])).sum(1)
bestpos = i + np.argmin(dists)
biasdists = dists - self.bias[i:j]
bestbiaspos = i + np.argmin(biasdists)
self.freq[i:j] *= (1-self.BETA)
self.bias[i:j] += self.BETAGAMMA * self.freq[i:j]
self.freq[bestpos] += self.BETA
self.bias[bestpos] -= self.BETAGAMMA
return bestbiaspos
def specialFind(self, b, g, r):
for i in range(self.SPECIALS):
n = self.network[i]
if n[0] == b and n[1] == g and n[2] == r:
return i
return -1
def learn(self):
biasRadius = self.INITBIASRADIUS
alphadec = 30 + ((self.samplefac-1)/3)
lengthcount = self.pixels.size
samplepixels = lengthcount / self.samplefac
delta = samplepixels / self.NCYCLES
alpha = self.INITALPHA
i = 0;
rad = biasRadius >> self.RADIUSBIASSHIFT
if rad <= 1:
rad = 0
print "Beginning 1D learning: samplepixels =",samplepixels," rad =", rad
step = 0
pos = 0
if lengthcount%NeuQuant.PRIME1 != 0:
step = NeuQuant.PRIME1
elif lengthcount%NeuQuant.PRIME2 != 0:
step = NeuQuant.PRIME2
elif lengthcount%NeuQuant.PRIME3 != 0:
step = NeuQuant.PRIME3
else:
step = NeuQuant.PRIME4
i = 0
printed_string = ''
while i < samplepixels:
if i%100 == 99:
tmp = '\b'*len(printed_string)
printed_string = str((i+1)*100/samplepixels)+"%\n"
print tmp + printed_string,
p = self.pixels[pos]
r = (p >> 16) & 0xff
g = (p >> 8) & 0xff
b = (p ) & 0xff
if i == 0: # Remember background colour
self.network[self.BGCOLOR] = [b, g, r]
j = self.specialFind(b, g, r)
if j < 0:
j = self.contest(b, g, r)
if j >= self.SPECIALS: # Don't learn for specials
a = (1.0 * alpha) / self.INITALPHA
self.altersingle(a, j, b, g, r)
if rad > 0:
self.alterneigh(a, rad, j, b, g, r)
pos = (pos+step)%lengthcount
i += 1
if i%delta == 0:
alpha -= alpha / alphadec
biasRadius -= biasRadius / self.RADIUSDEC
rad = biasRadius >> self.RADIUSBIASSHIFT
if rad <= 1:
rad = 0
print "Finished 1D learning: final alpha =",(1.0*alpha)/self.INITALPHA,"!"
def fix(self):
for i in range(self.NETSIZE):
for j in range(3):
x = int(0.5 + self.network[i,j])
x = max(0, x)
x = min(255, x)
self.colormap[i,j] = x
self.colormap[i,3] = i
def inxbuild(self):
previouscol = 0
startpos = 0
for i in range(self.NETSIZE):
p = self.colormap[i]
q = None
smallpos = i
smallval = p[1] # Index on g
# Find smallest in i..self.NETSIZE-1
for j in range(i+1, self.NETSIZE):
q = self.colormap[j]
if q[1] < smallval: # Index on g
smallpos = j
smallval = q[1] # Index on g
q = self.colormap[smallpos]
# Swap p (i) and q (smallpos) entries
if i != smallpos:
p[:],q[:] = q, p.copy()
# smallval entry is now in position i
if smallval != previouscol:
self.netindex[previouscol] = (startpos+i) >> 1
for j in range(previouscol+1, smallval):
self.netindex[j] = i
previouscol = smallval
startpos = i
self.netindex[previouscol] = (startpos+self.MAXNETPOS) >> 1
for j in range(previouscol+1, 256): # Really 256
self.netindex[j] = self.MAXNETPOS
def paletteImage(self):
""" PIL weird interface for making a paletted image: create an image which
already has the palette, and use that in Image.quantize. This function
returns this palette image. """
if self.pimage is None:
palette = []
for i in range(self.NETSIZE):
palette.extend(self.colormap[i][:3])
palette.extend([0]*(256-self.NETSIZE)*3)
# a palette image to use for quant
self.pimage = Image.new("P", (1, 1), 0)
self.pimage.putpalette(palette)
return self.pimage
def quantize(self, image):
""" Use a kdtree to quickly find the closest palette colors for the pixels """
if cKDTree:
return self.quantize_with_scipy(image)
else:
print 'Scipy not available, falling back to slower version.'
return self.quantize_without_scipy(image)
def quantize_with_scipy(self, image):
w,h = image.size
px = np.asarray(image).copy()
px2 = px[:,:,:3].reshape((w*h,3))
kdtree = cKDTree(self.colormap[:,:3],leafsize=10)
result = kdtree.query(px2)
colorindex = result[1]
print "Distance:", (result[0].sum()/(w*h))
px2[:] = self.colormap[colorindex,:3]
return Image.fromarray(px).convert("RGB").quantize(palette=self.paletteImage())
def quantize_without_scipy(self, image):
"""" This function can be used if no scipy is availabe.
It's 7 times slower though.
"""
w,h = image.size
px = np.asarray(image).copy()
memo = {}
for j in range(w):
for i in range(h):
key = (px[i,j,0],px[i,j,1],px[i,j,2])
try:
val = memo[key]
except KeyError:
val = self.convert(key)
memo[key] = val
px[i,j,0],px[i,j,1],px[i,j,2] = val
return Image.fromarray(px).convert("RGB").quantize(palette=self.paletteImage())
def convert(self, (r, g, b)):
i = self.inxsearch(r, g, b)
return self.colormap[i,:3]
def inxsearch(self, r, g, b):
"""Search for BGR values 0..255 and return colour index"""
dists = (self.colormap[:,:3] - np.array([r,g,b]))
a= np.argmin((dists*dists).sum(1))
return a
if __name__ == '__main__':
im = np.zeros((200,200), dtype=np.uint8)
im[10:30,:] = 100
im[:,80:120] = 255
im[-50:-40,:] = 50
images = [im*1.0, im*0.8, im*0.6, im*0.4, im*0]
writeGif('lala3.gif',images, duration=0.5, dither=0)
|
jrwilliams/gif-hook
|
images2gif.py
|
Python
|
mit
| 29,501
|
[
"NEURON"
] |
c0efbb397c7bf948229ec48084d80416a766141f2e6cbda064ebbc45983bb4b1
|
from __future__ import absolute_import
from typing import Any, Dict, List, Set, Tuple, TypeVar, Text, \
Union, Optional, Sequence, AbstractSet, Pattern, AnyStr
from typing.re import Match
from zerver.lib.str_utils import NonBinaryStr
from django.db import models
from django.db.models.query import QuerySet
from django.db.models import Manager
from django.conf import settings
from django.contrib.auth.models import AbstractBaseUser, UserManager, \
PermissionsMixin
import django.contrib.auth
from django.core.exceptions import ValidationError
from django.core.validators import URLValidator
from django.dispatch import receiver
from zerver.lib.cache import cache_with_key, flush_user_profile, flush_realm, \
user_profile_by_id_cache_key, user_profile_by_email_cache_key, \
generic_bulk_cached_fetch, cache_set, flush_stream, \
display_recipient_cache_key, cache_delete, \
get_stream_cache_key, active_user_dicts_in_realm_cache_key, \
active_bot_dicts_in_realm_cache_key, active_user_dict_fields, \
active_bot_dict_fields, flush_message
from zerver.lib.utils import make_safe_digest, generate_random_token
from zerver.lib.str_utils import ModelReprMixin
from django.db import transaction
from zerver.lib.camo import get_camo_url
from django.utils import timezone
from django.contrib.sessions.models import Session
from zerver.lib.timestamp import datetime_to_timestamp
from django.db.models.signals import pre_save, post_save, post_delete
from django.core.validators import MinLengthValidator, RegexValidator
from django.utils.translation import ugettext_lazy as _
from zerver.lib import cache
from bitfield import BitField
from bitfield.types import BitHandler
from collections import defaultdict
from datetime import timedelta
import pylibmc
import re
import logging
import sre_constants
import time
import datetime
MAX_SUBJECT_LENGTH = 60
MAX_MESSAGE_LENGTH = 10000
MAX_LANGUAGE_ID_LENGTH = 50 # type: int
STREAM_NAMES = TypeVar('STREAM_NAMES', Sequence[Text], AbstractSet[Text])
# Doing 1000 remote cache requests to get_display_recipient is quite slow,
# so add a local cache as well as the remote cache cache.
per_request_display_recipient_cache = {} # type: Dict[int, List[Dict[str, Any]]]
def get_display_recipient_by_id(recipient_id, recipient_type, recipient_type_id):
# type: (int, int, int) -> Union[Text, List[Dict[str, Any]]]
if recipient_id not in per_request_display_recipient_cache:
result = get_display_recipient_remote_cache(recipient_id, recipient_type, recipient_type_id)
per_request_display_recipient_cache[recipient_id] = result
return per_request_display_recipient_cache[recipient_id]
def get_display_recipient(recipient):
# type: (Recipient) -> Union[Text, List[Dict[str, Any]]]
return get_display_recipient_by_id(
recipient.id,
recipient.type,
recipient.type_id
)
def flush_per_request_caches():
# type: () -> None
global per_request_display_recipient_cache
per_request_display_recipient_cache = {}
global per_request_realm_filters_cache
per_request_realm_filters_cache = {}
@cache_with_key(lambda *args: display_recipient_cache_key(args[0]),
timeout=3600*24*7)
def get_display_recipient_remote_cache(recipient_id, recipient_type, recipient_type_id):
# type: (int, int, int) -> Union[Text, List[Dict[str, Any]]]
"""
returns: an appropriate object describing the recipient. For a
stream this will be the stream name as a string. For a huddle or
personal, it will be an array of dicts about each recipient.
"""
if recipient_type == Recipient.STREAM:
stream = Stream.objects.get(id=recipient_type_id)
return stream.name
# We don't really care what the ordering is, just that it's deterministic.
user_profile_list = (UserProfile.objects.filter(subscription__recipient_id=recipient_id)
.select_related()
.order_by('email'))
return [{'email': user_profile.email,
'domain': user_profile.realm.domain,
'full_name': user_profile.full_name,
'short_name': user_profile.short_name,
'id': user_profile.id,
'is_mirror_dummy': user_profile.is_mirror_dummy} for user_profile in user_profile_list]
def get_realm_emoji_cache_key(realm):
# type: (Realm) -> Text
return u'realm_emoji:%s' % (realm.id,)
class Realm(ModelReprMixin, models.Model):
# domain is a domain in the Internet sense. It must be structured like a
# valid email domain. We use is to restrict access, identify bots, etc.
domain = models.CharField(max_length=40, db_index=True, unique=True) # type: Text
# name is the user-visible identifier for the realm. It has no required
# structure.
AUTHENTICATION_FLAGS = [u'Google', u'Email', u'GitHub', u'LDAP', u'Dev', u'RemoteUser']
name = models.CharField(max_length=40, null=True) # type: Optional[Text]
string_id = models.CharField(max_length=40, unique=True) # type: Text
restricted_to_domain = models.BooleanField(default=False) # type: bool
invite_required = models.BooleanField(default=True) # type: bool
invite_by_admins_only = models.BooleanField(default=False) # type: bool
create_stream_by_admins_only = models.BooleanField(default=False) # type: bool
add_emoji_by_admins_only = models.BooleanField(default=False) # type: bool
mandatory_topics = models.BooleanField(default=False) # type: bool
show_digest_email = models.BooleanField(default=True) # type: bool
name_changes_disabled = models.BooleanField(default=False) # type: bool
allow_message_editing = models.BooleanField(default=True) # type: bool
DEFAULT_MESSAGE_CONTENT_EDIT_LIMIT_SECONDS = 600 # if changed, also change in admin.js
message_content_edit_limit_seconds = models.IntegerField(default=DEFAULT_MESSAGE_CONTENT_EDIT_LIMIT_SECONDS) # type: int
message_retention_days = models.IntegerField(null=True) # type: Optional[int]
# Valid org_types are {CORPORATE, COMMUNITY}
CORPORATE = 1
COMMUNITY = 2
org_type = models.PositiveSmallIntegerField(default=COMMUNITY) # type: int
date_created = models.DateTimeField(default=timezone.now) # type: datetime.datetime
notifications_stream = models.ForeignKey('Stream', related_name='+', null=True, blank=True) # type: Optional[Stream]
deactivated = models.BooleanField(default=False) # type: bool
default_language = models.CharField(default=u'en', max_length=MAX_LANGUAGE_ID_LENGTH) # type: Text
authentication_methods = BitField(flags=AUTHENTICATION_FLAGS,
default=2**31 - 1) # type: BitHandler
waiting_period_threshold = models.PositiveIntegerField(default=0) # type: int
DEFAULT_NOTIFICATION_STREAM_NAME = u'announce'
def authentication_methods_dict(self):
# type: () -> Dict[Text, bool]
"""Returns the a mapping from authentication flags to their status,
showing only those authentication flags that are supported on
the current server (i.e. if EmailAuthBackend is not configured
on the server, this will not return an entry for "Email")."""
# This mapping needs to be imported from here due to the cyclic
# dependency.
from zproject.backends import AUTH_BACKEND_NAME_MAP
ret = {} # type: Dict[Text, bool]
supported_backends = {backend.__class__ for backend in django.contrib.auth.get_backends()}
for k, v in self.authentication_methods.iteritems():
backend = AUTH_BACKEND_NAME_MAP[k]
if backend in supported_backends:
ret[k] = v
return ret
def __unicode__(self):
# type: () -> Text
return u"<Realm: %s %s>" % (self.domain, self.id)
@cache_with_key(get_realm_emoji_cache_key, timeout=3600*24*7)
def get_emoji(self):
# type: () -> Dict[Text, Dict[str, Text]]
return get_realm_emoji_uncached(self)
@property
def deployment(self):
# type: () -> Any # returns a Deployment from zilencer.models
# see https://github.com/zulip/zulip/issues/1845 before you
# attempt to add test coverage for this method, as we may
# be revisiting the deployments model soon
try:
return self._deployments.all()[0]
except IndexError:
return None
@deployment.setter # type: ignore # https://github.com/python/mypy/issues/220
def set_deployments(self, value):
# type: (Any) -> None
self._deployments = [value] # type: Any
def get_admin_users(self):
# type: () -> Sequence[UserProfile]
# TODO: Change return type to QuerySet[UserProfile]
return UserProfile.objects.filter(realm=self, is_realm_admin=True,
is_active=True).select_related()
def get_active_users(self):
# type: () -> Sequence[UserProfile]
# TODO: Change return type to QuerySet[UserProfile]
return UserProfile.objects.filter(realm=self, is_active=True).select_related()
@property
def subdomain(self):
# type: () -> Text
if settings.REALMS_HAVE_SUBDOMAINS:
return self.string_id
return None
@property
def uri(self):
# type: () -> str
if settings.REALMS_HAVE_SUBDOMAINS and self.subdomain is not None:
return '%s%s.%s' % (settings.EXTERNAL_URI_SCHEME,
self.subdomain, settings.EXTERNAL_HOST)
return settings.SERVER_URI
@property
def host(self):
# type: () -> str
if settings.REALMS_HAVE_SUBDOMAINS and self.subdomain is not None:
return "%s.%s" % (self.subdomain, settings.EXTERNAL_HOST)
return settings.EXTERNAL_HOST
@property
def is_zephyr_mirror_realm(self):
# type: () -> bool
return self.domain == "mit.edu"
@property
def webathena_enabled(self):
# type: () -> bool
return self.is_zephyr_mirror_realm
@property
def presence_disabled(self):
# type: () -> bool
return self.is_zephyr_mirror_realm
class Meta(object):
permissions = (
('administer', "Administer a realm"),
('api_super_user', "Can send messages as other users for mirroring"),
)
post_save.connect(flush_realm, sender=Realm)
def get_realm(string_id):
# type: (Text) -> Optional[Realm]
if not string_id:
return None
try:
return Realm.objects.get(string_id=string_id)
except Realm.DoesNotExist:
return None
def completely_open(realm):
# type: (Realm) -> bool
# This realm is completely open to everyone on the internet to
# join. E-mail addresses do not need to match a realmalias and
# an invite from an existing user is not required.
if not realm:
return False
return not realm.invite_required and not realm.restricted_to_domain
def get_unique_open_realm():
# type: () -> Optional[Realm]
"""We only return a realm if there is a unique non-system-only realm,
it is completely open, and there are no subdomains."""
if settings.REALMS_HAVE_SUBDOMAINS:
return None
realms = Realm.objects.filter(deactivated=False)
# On production installations, the (usually "zulip.com") system
# realm is an empty realm just used for system bots, so don't
# include it in this accounting.
realms = realms.exclude(domain__in=settings.SYSTEM_ONLY_REALMS)
if len(realms) != 1:
return None
realm = realms[0]
if realm.invite_required or realm.restricted_to_domain:
return None
return realm
def name_changes_disabled(realm):
# type: (Optional[Realm]) -> bool
if realm is None:
return settings.NAME_CHANGES_DISABLED
return settings.NAME_CHANGES_DISABLED or realm.name_changes_disabled
class RealmAlias(models.Model):
realm = models.ForeignKey(Realm, null=True) # type: Optional[Realm]
# should always be stored lowercase
domain = models.CharField(max_length=80, db_index=True) # type: Text
def can_add_alias(domain):
# type: (Text) -> bool
if settings.REALMS_HAVE_SUBDOMAINS:
return True
if RealmAlias.objects.filter(domain=domain).exists():
return False
return True
# These functions should only be used on email addresses that have
# been validated via django.core.validators.validate_email
#
# Note that we need to use some care, since can you have multiple @-signs; e.g.
# "tabbott@test"@zulip.com
# is valid email address
def email_to_username(email):
# type: (Text) -> Text
return "@".join(email.split("@")[:-1]).lower()
# Returns the raw domain portion of the desired email address
def email_to_domain(email):
# type: (Text) -> Text
return email.split("@")[-1].lower()
class GetRealmByDomainException(Exception):
pass
def get_realm_by_email_domain(email):
# type: (Text) -> Optional[Realm]
if settings.REALMS_HAVE_SUBDOMAINS:
raise GetRealmByDomainException(
"Cannot get realm from email domain when settings.REALMS_HAVE_SUBDOMAINS = True")
try:
alias = RealmAlias.objects.select_related('realm').get(domain = email_to_domain(email))
return alias.realm
except RealmAlias.DoesNotExist:
return None
# Is a user with the given email address allowed to be in the given realm?
# (This function does not check whether the user has been invited to the realm.
# So for invite-only realms, this is the test for whether a user can be invited,
# not whether the user can sign up currently.)
def email_allowed_for_realm(email, realm):
# type: (Text, Realm) -> bool
if not realm.restricted_to_domain:
return True
domain = email_to_domain(email)
return RealmAlias.objects.filter(realm = realm, domain = domain).exists()
def list_of_domains_for_realm(realm):
# type: (Realm) -> List[Text]
return list(RealmAlias.objects.filter(realm = realm).values_list('domain', flat=True))
class RealmEmoji(ModelReprMixin, models.Model):
author = models.ForeignKey('UserProfile', blank=True, null=True)
realm = models.ForeignKey(Realm) # type: Realm
# Second part of the regex (negative lookbehind) disallows names ending with one of the punctuation characters
name = models.TextField(validators=[MinLengthValidator(1),
RegexValidator(regex=r'^[0-9a-zA-Z.\-_]+(?<![.\-_])$',
message=_("Invalid characters in Emoji name"))]) # type: Text
# URLs start having browser compatibility problem below 2000
# characters, so 1000 seems like a safe limit.
img_url = models.URLField(max_length=1000) # type: Text
class Meta(object):
unique_together = ("realm", "name")
def __unicode__(self):
# type: () -> Text
return u"<RealmEmoji(%s): %s %s>" % (self.realm.domain, self.name, self.img_url)
def get_realm_emoji_uncached(realm):
# type: (Realm) -> Dict[Text, Dict[str, Text]]
d = {}
for row in RealmEmoji.objects.filter(realm=realm).select_related('author'):
if row.author:
author = {
'id': row.author.id,
'email': row.author.email,
'full_name': row.author.full_name}
else:
author = None
d[row.name] = dict(source_url=row.img_url,
display_url=get_camo_url(row.img_url),
author=author)
return d
def flush_realm_emoji(sender, **kwargs):
# type: (Any, **Any) -> None
realm = kwargs['instance'].realm
cache_set(get_realm_emoji_cache_key(realm),
get_realm_emoji_uncached(realm),
timeout=3600*24*7)
post_save.connect(flush_realm_emoji, sender=RealmEmoji)
post_delete.connect(flush_realm_emoji, sender=RealmEmoji)
def filter_pattern_validator(value):
# type: (Text) -> None
regex = re.compile(r'(?:[\w\-#]*)(\(\?P<\w+>.+\))')
error_msg = 'Invalid filter pattern, you must use the following format OPTIONAL_PREFIX(?P<id>.+)'
if not regex.match(str(value)):
raise ValidationError(error_msg)
try:
re.compile(value)
except sre_constants.error:
# Regex is invalid
raise ValidationError(error_msg)
def filter_format_validator(value):
# type: (str) -> None
regex = re.compile(r'^[\.\/:a-zA-Z0-9_-]+%\(([a-zA-Z0-9_-]+)\)s[a-zA-Z0-9_-]*$')
if not regex.match(value):
raise ValidationError('URL format string must be in the following format: `https://example.com/%(\w+)s`')
class RealmFilter(models.Model):
realm = models.ForeignKey(Realm) # type: Realm
pattern = models.TextField(validators=[filter_pattern_validator]) # type: Text
url_format_string = models.TextField(validators=[URLValidator, filter_format_validator]) # type: Text
class Meta(object):
unique_together = ("realm", "pattern")
def __unicode__(self):
# type: () -> Text
return u"<RealmFilter(%s): %s %s>" % (self.realm.domain, self.pattern, self.url_format_string)
def get_realm_filters_cache_key(realm_id):
# type: (int) -> Text
return u'all_realm_filters:%s' % (realm_id,)
# We have a per-process cache to avoid doing 1000 remote cache queries during page load
per_request_realm_filters_cache = {} # type: Dict[int, List[Tuple[Text, Text, int]]]
def realm_in_local_realm_filters_cache(realm_id):
# type: (int) -> bool
return realm_id in per_request_realm_filters_cache
def realm_filters_for_realm(realm_id):
# type: (int) -> List[Tuple[Text, Text, int]]
if not realm_in_local_realm_filters_cache(realm_id):
per_request_realm_filters_cache[realm_id] = realm_filters_for_realm_remote_cache(realm_id)
return per_request_realm_filters_cache[realm_id]
@cache_with_key(get_realm_filters_cache_key, timeout=3600*24*7)
def realm_filters_for_realm_remote_cache(realm_id):
# type: (int) -> List[Tuple[Text, Text, int]]
filters = []
for realm_filter in RealmFilter.objects.filter(realm_id=realm_id):
filters.append((realm_filter.pattern, realm_filter.url_format_string, realm_filter.id))
return filters
def all_realm_filters():
# type: () -> Dict[int, List[Tuple[Text, Text, int]]]
filters = defaultdict(list) # type: Dict[int, List[Tuple[Text, Text, int]]]
for realm_filter in RealmFilter.objects.all():
filters[realm_filter.realm_id].append((realm_filter.pattern, realm_filter.url_format_string, realm_filter.id))
return filters
def flush_realm_filter(sender, **kwargs):
# type: (Any, **Any) -> None
realm_id = kwargs['instance'].realm_id
cache_delete(get_realm_filters_cache_key(realm_id))
try:
per_request_realm_filters_cache.pop(realm_id)
except KeyError:
pass
post_save.connect(flush_realm_filter, sender=RealmFilter)
post_delete.connect(flush_realm_filter, sender=RealmFilter)
class UserProfile(ModelReprMixin, AbstractBaseUser, PermissionsMixin):
DEFAULT_BOT = 1
"""
Incoming webhook bots are limited to only sending messages via webhooks.
Thus, it is less of a security risk to expose their API keys to third-party services,
since they can't be used to read messages.
"""
INCOMING_WEBHOOK_BOT = 2
# Fields from models.AbstractUser minus last_name and first_name,
# which we don't use; email is modified to make it indexed and unique.
email = models.EmailField(blank=False, db_index=True, unique=True) # type: Text
is_staff = models.BooleanField(default=False) # type: bool
is_active = models.BooleanField(default=True, db_index=True) # type: bool
is_realm_admin = models.BooleanField(default=False, db_index=True) # type: bool
is_bot = models.BooleanField(default=False, db_index=True) # type: bool
bot_type = models.PositiveSmallIntegerField(null=True, db_index=True) # type: Optional[int]
is_api_super_user = models.BooleanField(default=False, db_index=True) # type: bool
date_joined = models.DateTimeField(default=timezone.now) # type: datetime.datetime
is_mirror_dummy = models.BooleanField(default=False) # type: bool
bot_owner = models.ForeignKey('self', null=True, on_delete=models.SET_NULL) # type: Optional[UserProfile]
USERNAME_FIELD = 'email'
MAX_NAME_LENGTH = 100
# Our custom site-specific fields
full_name = models.CharField(max_length=MAX_NAME_LENGTH) # type: Text
short_name = models.CharField(max_length=MAX_NAME_LENGTH) # type: Text
# pointer points to Message.id, NOT UserMessage.id.
pointer = models.IntegerField() # type: int
last_pointer_updater = models.CharField(max_length=64) # type: Text
realm = models.ForeignKey(Realm) # type: Realm
api_key = models.CharField(max_length=32) # type: Text
tos_version = models.CharField(null=True, max_length=10) # type: Text
### Notifications settings. ###
# Stream notifications.
enable_stream_desktop_notifications = models.BooleanField(default=False) # type: bool
enable_stream_sounds = models.BooleanField(default=False) # type: bool
# PM + @-mention notifications.
enable_desktop_notifications = models.BooleanField(default=True) # type: bool
pm_content_in_desktop_notifications = models.BooleanField(default=True) # type: bool
enable_sounds = models.BooleanField(default=True) # type: bool
enable_offline_email_notifications = models.BooleanField(default=True) # type: bool
enable_offline_push_notifications = models.BooleanField(default=True) # type: bool
enable_online_push_notifications = models.BooleanField(default=False) # type: bool
enable_digest_emails = models.BooleanField(default=True) # type: bool
# Old notification field superseded by existence of stream notification
# settings.
default_desktop_notifications = models.BooleanField(default=True) # type: bool
###
last_reminder = models.DateTimeField(default=timezone.now, null=True) # type: Optional[datetime.datetime]
rate_limits = models.CharField(default=u"", max_length=100) # type: Text # comma-separated list of range:max pairs
# Default streams
default_sending_stream = models.ForeignKey('zerver.Stream', null=True, related_name='+') # type: Optional[Stream]
default_events_register_stream = models.ForeignKey('zerver.Stream', null=True, related_name='+') # type: Optional[Stream]
default_all_public_streams = models.BooleanField(default=False) # type: bool
# UI vars
enter_sends = models.NullBooleanField(default=False) # type: Optional[bool]
autoscroll_forever = models.BooleanField(default=False) # type: bool
left_side_userlist = models.BooleanField(default=False) # type: bool
# display settings
twenty_four_hour_time = models.BooleanField(default=False) # type: bool
default_language = models.CharField(default=u'en', max_length=MAX_LANGUAGE_ID_LENGTH) # type: Text
# Hours to wait before sending another email to a user
EMAIL_REMINDER_WAITPERIOD = 24
# Minutes to wait before warning a bot owner that her bot sent a message
# to a nonexistent stream
BOT_OWNER_STREAM_ALERT_WAITPERIOD = 1
AVATAR_FROM_GRAVATAR = u'G'
AVATAR_FROM_USER = u'U'
AVATAR_SOURCES = (
(AVATAR_FROM_GRAVATAR, 'Hosted by Gravatar'),
(AVATAR_FROM_USER, 'Uploaded by user'),
)
avatar_source = models.CharField(default=AVATAR_FROM_GRAVATAR, choices=AVATAR_SOURCES, max_length=1) # type: Text
TUTORIAL_WAITING = u'W'
TUTORIAL_STARTED = u'S'
TUTORIAL_FINISHED = u'F'
TUTORIAL_STATES = ((TUTORIAL_WAITING, "Waiting"),
(TUTORIAL_STARTED, "Started"),
(TUTORIAL_FINISHED, "Finished"))
tutorial_status = models.CharField(default=TUTORIAL_WAITING, choices=TUTORIAL_STATES, max_length=1) # type: Text
# Contains serialized JSON of the form:
# [("step 1", true), ("step 2", false)]
# where the second element of each tuple is if the step has been
# completed.
onboarding_steps = models.TextField(default=u'[]') # type: Text
invites_granted = models.IntegerField(default=0) # type: int
invites_used = models.IntegerField(default=0) # type: int
alert_words = models.TextField(default=u'[]') # type: Text # json-serialized list of strings
# Contains serialized JSON of the form:
# [["social", "mit"], ["devel", "ios"]]
muted_topics = models.TextField(default=u'[]') # type: Text
objects = UserManager() # type: UserManager
def can_admin_user(self, target_user):
# type: (UserProfile) -> bool
"""Returns whether this user has permission to modify target_user"""
if target_user.bot_owner == self:
return True
elif self.is_realm_admin and self.realm == target_user.realm:
return True
else:
return False
def __unicode__(self):
# type: () -> Text
return u"<UserProfile: %s %s>" % (self.email, self.realm)
@property
def is_incoming_webhook(self):
# type: () -> bool
return self.bot_type == UserProfile.INCOMING_WEBHOOK_BOT
@staticmethod
def emails_from_ids(user_ids):
# type: (Sequence[int]) -> Dict[int, Text]
rows = UserProfile.objects.filter(id__in=user_ids).values('id', 'email')
return {row['id']: row['email'] for row in rows}
def can_create_streams(self):
# type: () -> bool
diff = (timezone.now() - self.date_joined).days
if self.is_realm_admin:
return True
elif self.realm.create_stream_by_admins_only:
return False
if diff >= self.realm.waiting_period_threshold:
return True
return False
def major_tos_version(self):
# type: () -> int
if self.tos_version is not None:
return int(self.tos_version.split('.')[0])
else:
return -1
def receives_offline_notifications(user_profile):
# type: (UserProfile) -> bool
return ((user_profile.enable_offline_email_notifications or
user_profile.enable_offline_push_notifications) and
not user_profile.is_bot)
def receives_online_notifications(user_profile):
# type: (UserProfile) -> bool
return (user_profile.enable_online_push_notifications and
not user_profile.is_bot)
def remote_user_to_email(remote_user):
# type: (Text) -> Text
if settings.SSO_APPEND_DOMAIN is not None:
remote_user += "@" + settings.SSO_APPEND_DOMAIN
return remote_user
# Make sure we flush the UserProfile object from our remote cache
# whenever we save it.
post_save.connect(flush_user_profile, sender=UserProfile)
class PreregistrationUser(models.Model):
email = models.EmailField() # type: Text
referred_by = models.ForeignKey(UserProfile, null=True) # Optional[UserProfile]
streams = models.ManyToManyField('Stream') # type: Manager
invited_at = models.DateTimeField(auto_now=True) # type: datetime.datetime
realm_creation = models.BooleanField(default=False)
# status: whether an object has been confirmed.
# if confirmed, set to confirmation.settings.STATUS_ACTIVE
status = models.IntegerField(default=0) # type: int
realm = models.ForeignKey(Realm, null=True) # type: Optional[Realm]
class PushDeviceToken(models.Model):
APNS = 1
GCM = 2
KINDS = (
(APNS, 'apns'),
(GCM, 'gcm'),
)
kind = models.PositiveSmallIntegerField(choices=KINDS) # type: int
# The token is a unique device-specific token that is
# sent to us from each device:
# - APNS token if kind == APNS
# - GCM registration id if kind == GCM
token = models.CharField(max_length=4096, unique=True) # type: Text
last_updated = models.DateTimeField(auto_now=True) # type: datetime.datetime
# The user who's device this is
user = models.ForeignKey(UserProfile, db_index=True) # type: UserProfile
# [optional] Contains the app id of the device if it is an iOS device
ios_app_id = models.TextField(null=True) # type: Optional[Text]
def generate_email_token_for_stream():
# type: () -> Text
return generate_random_token(32)
class Stream(ModelReprMixin, models.Model):
MAX_NAME_LENGTH = 60
name = models.CharField(max_length=MAX_NAME_LENGTH, db_index=True) # type: Text
realm = models.ForeignKey(Realm, db_index=True) # type: Realm
invite_only = models.NullBooleanField(default=False) # type: Optional[bool]
# Used by the e-mail forwarder. The e-mail RFC specifies a maximum
# e-mail length of 254, and our max stream length is 30, so we
# have plenty of room for the token.
email_token = models.CharField(
max_length=32, default=generate_email_token_for_stream) # type: Text
description = models.CharField(max_length=1024, default=u'') # type: Text
date_created = models.DateTimeField(default=timezone.now) # type: datetime.datetime
deactivated = models.BooleanField(default=False) # type: bool
def __unicode__(self):
# type: () -> Text
return u"<Stream: %s>" % (self.name,)
def is_public(self):
# type: () -> bool
# All streams are private in Zephyr mirroring realms.
return not self.invite_only and not self.realm.is_zephyr_mirror_realm
class Meta(object):
unique_together = ("name", "realm")
def num_subscribers(self):
# type: () -> int
return Subscription.objects.filter(
recipient__type=Recipient.STREAM,
recipient__type_id=self.id,
user_profile__is_active=True,
active=True
).count()
# This is stream information that is sent to clients
def to_dict(self):
# type: () -> Dict[str, Any]
return dict(name=self.name,
stream_id=self.id,
description=self.description,
invite_only=self.invite_only)
post_save.connect(flush_stream, sender=Stream)
post_delete.connect(flush_stream, sender=Stream)
def valid_stream_name(name):
# type: (Text) -> bool
return name != ""
# The Recipient table is used to map Messages to the set of users who
# received the message. It is implemented as a set of triples (id,
# type_id, type). We have 3 types of recipients: Huddles (for group
# private messages), UserProfiles (for 1:1 private messages), and
# Streams. The recipient table maps a globally unique recipient id
# (used by the Message table) to the type-specific unique id (the
# stream id, user_profile id, or huddle id).
class Recipient(ModelReprMixin, models.Model):
type_id = models.IntegerField(db_index=True) # type: int
type = models.PositiveSmallIntegerField(db_index=True) # type: int
# Valid types are {personal, stream, huddle}
PERSONAL = 1
STREAM = 2
HUDDLE = 3
class Meta(object):
unique_together = ("type", "type_id")
# N.B. If we used Django's choice=... we would get this for free (kinda)
_type_names = {
PERSONAL: 'personal',
STREAM: 'stream',
HUDDLE: 'huddle'}
def type_name(self):
# type: () -> str
# Raises KeyError if invalid
return self._type_names[self.type]
def __unicode__(self):
# type: () -> Text
display_recipient = get_display_recipient(self)
return u"<Recipient: %s (%d, %s)>" % (display_recipient, self.type_id, self.type)
class Client(ModelReprMixin, models.Model):
name = models.CharField(max_length=30, db_index=True, unique=True) # type: Text
def __unicode__(self):
# type: () -> Text
return u"<Client: %s>" % (self.name,)
get_client_cache = {} # type: Dict[Text, Client]
def get_client(name):
# type: (Text) -> Client
# Accessing KEY_PREFIX through the module is necessary
# because we need the updated value of the variable.
cache_name = cache.KEY_PREFIX + name
if cache_name not in get_client_cache:
result = get_client_remote_cache(name)
get_client_cache[cache_name] = result
return get_client_cache[cache_name]
def get_client_cache_key(name):
# type: (Text) -> Text
return u'get_client:%s' % (make_safe_digest(name),)
@cache_with_key(get_client_cache_key, timeout=3600*24*7)
def get_client_remote_cache(name):
# type: (Text) -> Client
(client, _) = Client.objects.get_or_create(name=name)
return client
# get_stream_backend takes either a realm id or a realm
@cache_with_key(get_stream_cache_key, timeout=3600*24*7)
def get_stream_backend(stream_name, realm):
# type: (Text, Realm) -> Stream
return Stream.objects.select_related("realm").get(
name__iexact=stream_name.strip(), realm_id=realm.id)
def get_active_streams(realm):
# type: (Realm) -> QuerySet
"""
Return all streams (including invite-only streams) that have not been deactivated.
"""
return Stream.objects.filter(realm=realm, deactivated=False)
def get_stream(stream_name, realm):
# type: (Text, Realm) -> Optional[Stream]
try:
return get_stream_backend(stream_name, realm)
except Stream.DoesNotExist:
return None
def bulk_get_streams(realm, stream_names):
# type: (Realm, STREAM_NAMES) -> Dict[Text, Any]
def fetch_streams_by_name(stream_names):
# type: (List[Text]) -> Sequence[Stream]
#
# This should be just
#
# Stream.objects.select_related("realm").filter(name__iexact__in=stream_names,
# realm_id=realm_id)
#
# But chaining __in and __iexact doesn't work with Django's
# ORM, so we have the following hack to construct the relevant where clause
if len(stream_names) == 0:
return []
upper_list = ", ".join(["UPPER(%s)"] * len(stream_names))
where_clause = "UPPER(zerver_stream.name::text) IN (%s)" % (upper_list,)
return get_active_streams(realm.id).select_related("realm").extra(
where=[where_clause],
params=stream_names)
return generic_bulk_cached_fetch(lambda stream_name: get_stream_cache_key(stream_name, realm),
fetch_streams_by_name,
[stream_name.lower() for stream_name in stream_names],
id_fetcher=lambda stream: stream.name.lower())
def get_recipient_cache_key(type, type_id):
# type: (int, int) -> Text
return u"get_recipient:%s:%s" % (type, type_id,)
@cache_with_key(get_recipient_cache_key, timeout=3600*24*7)
def get_recipient(type, type_id):
# type: (int, int) -> Recipient
return Recipient.objects.get(type_id=type_id, type=type)
def bulk_get_recipients(type, type_ids):
# type: (int, List[int]) -> Dict[int, Any]
def cache_key_function(type_id):
# type: (int) -> Text
return get_recipient_cache_key(type, type_id)
def query_function(type_ids):
# type: (List[int]) -> Sequence[Recipient]
# TODO: Change return type to QuerySet[Recipient]
return Recipient.objects.filter(type=type, type_id__in=type_ids)
return generic_bulk_cached_fetch(cache_key_function, query_function, type_ids,
id_fetcher=lambda recipient: recipient.type_id)
def sew_messages_and_reactions(messages, reactions):
# type: (List[Dict[str, Any]], List[Dict[str, Any]]) -> List[Dict[str, Any]]
"""Given a iterable of messages and reactions stitch reactions
into messages.
"""
# Add all messages with empty reaction item
for message in messages:
message['reactions'] = []
# Convert list of messages into dictionary to make reaction stitching easy
converted_messages = {message['id']: message for message in messages}
for reaction in reactions:
converted_messages[reaction['message_id']]['reactions'].append(
reaction)
return list(converted_messages.values())
class Message(ModelReprMixin, models.Model):
sender = models.ForeignKey(UserProfile) # type: UserProfile
recipient = models.ForeignKey(Recipient) # type: Recipient
subject = models.CharField(max_length=MAX_SUBJECT_LENGTH, db_index=True) # type: Text
content = models.TextField() # type: Text
rendered_content = models.TextField(null=True) # type: Optional[Text]
rendered_content_version = models.IntegerField(null=True) # type: Optional[int]
pub_date = models.DateTimeField('date published', db_index=True) # type: datetime.datetime
sending_client = models.ForeignKey(Client) # type: Client
last_edit_time = models.DateTimeField(null=True) # type: Optional[datetime.datetime]
edit_history = models.TextField(null=True) # type: Optional[Text]
has_attachment = models.BooleanField(default=False, db_index=True) # type: bool
has_image = models.BooleanField(default=False, db_index=True) # type: bool
has_link = models.BooleanField(default=False, db_index=True) # type: bool
def topic_name(self):
# type: () -> Text
"""
Please start using this helper to facilitate an
eventual switch over to a separate topic table.
"""
return self.subject
def __unicode__(self):
# type: () -> Text
display_recipient = get_display_recipient(self.recipient)
return u"<Message: %s / %s / %r>" % (display_recipient, self.subject, self.sender)
def get_realm(self):
# type: () -> Realm
return self.sender.realm
def save_rendered_content(self):
# type: () -> None
self.save(update_fields=["rendered_content", "rendered_content_version"])
@staticmethod
def need_to_render_content(rendered_content, rendered_content_version, bugdown_version):
# type: (Optional[Text], int, int) -> bool
return rendered_content is None or rendered_content_version < bugdown_version
def to_log_dict(self):
# type: () -> Dict[str, Any]
return dict(
id = self.id,
sender_id = self.sender.id,
sender_email = self.sender.email,
sender_domain = self.sender.realm.domain,
sender_full_name = self.sender.full_name,
sender_short_name = self.sender.short_name,
sending_client = self.sending_client.name,
type = self.recipient.type_name(),
recipient = get_display_recipient(self.recipient),
subject = self.topic_name(),
content = self.content,
timestamp = datetime_to_timestamp(self.pub_date))
@staticmethod
def get_raw_db_rows(needed_ids):
# type: (List[int]) -> List[Dict[str, Any]]
# This is a special purpose function optimized for
# callers like get_old_messages_backend().
fields = [
'id',
'subject',
'pub_date',
'last_edit_time',
'edit_history',
'content',
'rendered_content',
'rendered_content_version',
'recipient_id',
'recipient__type',
'recipient__type_id',
'sender_id',
'sending_client__name',
'sender__email',
'sender__full_name',
'sender__short_name',
'sender__realm__id',
'sender__realm__domain',
'sender__avatar_source',
'sender__is_mirror_dummy',
]
messages = Message.objects.filter(id__in=needed_ids).values(*fields)
"""Adding one-many or Many-Many relationship in values results in N X
results.
Link: https://docs.djangoproject.com/en/1.8/ref/models/querysets/#values
"""
reactions = Reaction.get_raw_db_rows(needed_ids)
return sew_messages_and_reactions(messages, reactions)
def sent_by_human(self):
# type: () -> bool
sending_client = self.sending_client.name.lower()
return (sending_client in ('zulipandroid', 'zulipios', 'zulipdesktop',
'website', 'ios', 'android')) or (
'desktop app' in sending_client)
@staticmethod
def content_has_attachment(content):
# type: (Text) -> Match
return re.search(r'[/\-]user[\-_]uploads[/\.-]', content)
@staticmethod
def content_has_image(content):
# type: (Text) -> bool
return bool(re.search(r'[/\-]user[\-_]uploads[/\.-]\S+\.(bmp|gif|jpg|jpeg|png|webp)', content, re.IGNORECASE))
@staticmethod
def content_has_link(content):
# type: (Text) -> bool
return ('http://' in content or
'https://' in content or
'/user_uploads' in content or
(settings.ENABLE_FILE_LINKS and 'file:///' in content))
@staticmethod
def is_status_message(content, rendered_content):
# type: (Text, Text) -> bool
"""
Returns True if content and rendered_content are from 'me_message'
"""
if content.startswith('/me ') and '\n' not in content:
if rendered_content.startswith('<p>') and rendered_content.endswith('</p>'):
return True
return False
def update_calculated_fields(self):
# type: () -> None
# TODO: rendered_content could also be considered a calculated field
content = self.content
self.has_attachment = bool(Message.content_has_attachment(content))
self.has_image = bool(Message.content_has_image(content))
self.has_link = bool(Message.content_has_link(content))
@receiver(pre_save, sender=Message)
def pre_save_message(sender, **kwargs):
# type: (Any, **Any) -> None
if kwargs['update_fields'] is None or "content" in kwargs['update_fields']:
message = kwargs['instance']
message.update_calculated_fields()
def get_context_for_message(message):
# type: (Message) -> Sequence[Message]
# TODO: Change return type to QuerySet[Message]
return Message.objects.filter(
recipient_id=message.recipient_id,
subject=message.subject,
id__lt=message.id,
pub_date__gt=message.pub_date - timedelta(minutes=15),
).order_by('-id')[:10]
post_save.connect(flush_message, sender=Message)
class Reaction(ModelReprMixin, models.Model):
user_profile = models.ForeignKey(UserProfile) # type: UserProfile
message = models.ForeignKey(Message) # type: Message
emoji_name = models.TextField() # type: Text
class Meta(object):
unique_together = ("user_profile", "message", "emoji_name")
@staticmethod
def get_raw_db_rows(needed_ids):
# type: (List[int]) -> List[Dict[str, Any]]
fields = ['message_id', 'emoji_name', 'user_profile__email',
'user_profile__id', 'user_profile__full_name']
return Reaction.objects.filter(message_id__in=needed_ids).values(*fields)
# Whenever a message is sent, for each user current subscribed to the
# corresponding Recipient object, we add a row to the UserMessage
# table, which has has columns (id, user profile id, message id,
# flags) indicating which messages each user has received. This table
# allows us to quickly query any user's last 1000 messages to generate
# the home view.
#
# Additionally, the flags field stores metadata like whether the user
# has read the message, starred the message, collapsed or was
# mentioned the message, etc.
#
# UserMessage is the largest table in a Zulip installation, even
# though each row is only 4 integers.
class UserMessage(ModelReprMixin, models.Model):
user_profile = models.ForeignKey(UserProfile) # type: UserProfile
message = models.ForeignKey(Message) # type: Message
# We're not using the archived field for now, but create it anyway
# since this table will be an unpleasant one to do schema changes
# on later
ALL_FLAGS = ['read', 'starred', 'collapsed', 'mentioned', 'wildcard_mentioned',
'summarize_in_home', 'summarize_in_stream', 'force_expand', 'force_collapse',
'has_alert_word', "historical", 'is_me_message']
flags = BitField(flags=ALL_FLAGS, default=0) # type: BitHandler
class Meta(object):
unique_together = ("user_profile", "message")
def __unicode__(self):
# type: () -> Text
display_recipient = get_display_recipient(self.message.recipient)
return u"<UserMessage: %s / %s (%s)>" % (display_recipient, self.user_profile.email, self.flags_list())
def flags_list(self):
# type: () -> List[str]
return [flag for flag in self.flags.keys() if getattr(self.flags, flag).is_set]
def parse_usermessage_flags(val):
# type: (int) -> List[str]
flags = []
mask = 1
for flag in UserMessage.ALL_FLAGS:
if val & mask:
flags.append(flag)
mask <<= 1
return flags
class Attachment(ModelReprMixin, models.Model):
file_name = models.TextField(db_index=True) # type: Text
# path_id is a storage location agnostic representation of the path of the file.
# If the path of a file is http://localhost:9991/user_uploads/a/b/abc/temp_file.py
# then its path_id will be a/b/abc/temp_file.py.
path_id = models.TextField(db_index=True) # type: Text
owner = models.ForeignKey(UserProfile) # type: UserProfile
realm = models.ForeignKey(Realm, blank=True, null=True) # type: Realm
is_realm_public = models.BooleanField(default=False) # type: bool
messages = models.ManyToManyField(Message) # type: Manager
create_time = models.DateTimeField(default=timezone.now, db_index=True) # type: datetime.datetime
def __unicode__(self):
# type: () -> Text
return u"<Attachment: %s>" % (self.file_name,)
def is_claimed(self):
# type: () -> bool
return self.messages.count() > 0
def get_old_unclaimed_attachments(weeks_ago):
# type: (int) -> Sequence[Attachment]
# TODO: Change return type to QuerySet[Attachment]
delta_weeks_ago = timezone.now() - datetime.timedelta(weeks=weeks_ago)
old_attachments = Attachment.objects.filter(messages=None, create_time__lt=delta_weeks_ago)
return old_attachments
class Subscription(ModelReprMixin, models.Model):
user_profile = models.ForeignKey(UserProfile) # type: UserProfile
recipient = models.ForeignKey(Recipient) # type: Recipient
active = models.BooleanField(default=True) # type: bool
in_home_view = models.NullBooleanField(default=True) # type: Optional[bool]
DEFAULT_STREAM_COLOR = u"#c2c2c2"
color = models.CharField(max_length=10, default=DEFAULT_STREAM_COLOR) # type: Text
pin_to_top = models.BooleanField(default=False) # type: bool
desktop_notifications = models.BooleanField(default=True) # type: bool
audible_notifications = models.BooleanField(default=True) # type: bool
# Combination desktop + audible notifications superseded by the
# above.
notifications = models.BooleanField(default=False) # type: bool
class Meta(object):
unique_together = ("user_profile", "recipient")
def __unicode__(self):
# type: () -> Text
return u"<Subscription: %r -> %s>" % (self.user_profile, self.recipient)
@cache_with_key(user_profile_by_id_cache_key, timeout=3600*24*7)
def get_user_profile_by_id(uid):
# type: (int) -> UserProfile
return UserProfile.objects.select_related().get(id=uid)
@cache_with_key(user_profile_by_email_cache_key, timeout=3600*24*7)
def get_user_profile_by_email(email):
# type: (Text) -> UserProfile
return UserProfile.objects.select_related().get(email__iexact=email.strip())
@cache_with_key(active_user_dicts_in_realm_cache_key, timeout=3600*24*7)
def get_active_user_dicts_in_realm(realm):
# type: (Realm) -> List[Dict[str, Any]]
return UserProfile.objects.filter(realm=realm, is_active=True) \
.values(*active_user_dict_fields)
@cache_with_key(active_bot_dicts_in_realm_cache_key, timeout=3600*24*7)
def get_active_bot_dicts_in_realm(realm):
# type: (Realm) -> List[Dict[str, Any]]
return UserProfile.objects.filter(realm=realm, is_active=True, is_bot=True) \
.values(*active_bot_dict_fields)
def get_owned_bot_dicts(user_profile, include_all_realm_bots_if_admin=True):
# type: (UserProfile, bool) -> List[Dict[str, Any]]
if user_profile.is_realm_admin and include_all_realm_bots_if_admin:
result = get_active_bot_dicts_in_realm(user_profile.realm)
else:
result = UserProfile.objects.filter(realm=user_profile.realm, is_active=True, is_bot=True,
bot_owner=user_profile).values(*active_bot_dict_fields)
# TODO: Remove this import cycle
from zerver.lib.avatar import get_avatar_url
return [{'email': botdict['email'],
'user_id': botdict['id'],
'full_name': botdict['full_name'],
'api_key': botdict['api_key'],
'default_sending_stream': botdict['default_sending_stream__name'],
'default_events_register_stream': botdict['default_events_register_stream__name'],
'default_all_public_streams': botdict['default_all_public_streams'],
'owner': botdict['bot_owner__email'],
'avatar_url': get_avatar_url(botdict['avatar_source'], botdict['email']),
}
for botdict in result]
def get_prereg_user_by_email(email):
# type: (Text) -> PreregistrationUser
# A user can be invited many times, so only return the result of the latest
# invite.
return PreregistrationUser.objects.filter(email__iexact=email.strip()).latest("invited_at")
def get_cross_realm_emails():
# type: () -> Set[Text]
return set(settings.CROSS_REALM_BOT_EMAILS)
# The Huddle class represents a group of individuals who have had a
# Group Private Message conversation together. The actual membership
# of the Huddle is stored in the Subscription table just like with
# Streams, and a hash of that list is stored in the huddle_hash field
# below, to support efficiently mapping from a set of users to the
# corresponding Huddle object.
class Huddle(models.Model):
# TODO: We should consider whether using
# CommaSeparatedIntegerField would be better.
huddle_hash = models.CharField(max_length=40, db_index=True, unique=True) # type: Text
def get_huddle_hash(id_list):
# type: (List[int]) -> Text
id_list = sorted(set(id_list))
hash_key = ",".join(str(x) for x in id_list)
return make_safe_digest(hash_key)
def huddle_hash_cache_key(huddle_hash):
# type: (Text) -> Text
return u"huddle_by_hash:%s" % (huddle_hash,)
def get_huddle(id_list):
# type: (List[int]) -> Huddle
huddle_hash = get_huddle_hash(id_list)
return get_huddle_backend(huddle_hash, id_list)
@cache_with_key(lambda huddle_hash, id_list: huddle_hash_cache_key(huddle_hash), timeout=3600*24*7)
def get_huddle_backend(huddle_hash, id_list):
# type: (Text, List[int]) -> Huddle
(huddle, created) = Huddle.objects.get_or_create(huddle_hash=huddle_hash)
if created:
with transaction.atomic():
recipient = Recipient.objects.create(type_id=huddle.id,
type=Recipient.HUDDLE)
subs_to_create = [Subscription(recipient=recipient,
user_profile=get_user_profile_by_id(user_profile_id))
for user_profile_id in id_list]
Subscription.objects.bulk_create(subs_to_create)
return huddle
def clear_database():
# type: () -> None
pylibmc.Client(['127.0.0.1']).flush_all()
model = None # type: Any
for model in [Message, Stream, UserProfile, Recipient,
Realm, Subscription, Huddle, UserMessage, Client,
DefaultStream]:
model.objects.all().delete()
Session.objects.all().delete()
class UserActivity(models.Model):
user_profile = models.ForeignKey(UserProfile) # type: UserProfile
client = models.ForeignKey(Client) # type: Client
query = models.CharField(max_length=50, db_index=True) # type: Text
count = models.IntegerField() # type: int
last_visit = models.DateTimeField('last visit') # type: datetime.datetime
class Meta(object):
unique_together = ("user_profile", "client", "query")
class UserActivityInterval(models.Model):
user_profile = models.ForeignKey(UserProfile) # type: UserProfile
start = models.DateTimeField('start time', db_index=True) # type: datetime.datetime
end = models.DateTimeField('end time', db_index=True) # type: datetime.datetime
class UserPresence(models.Model):
user_profile = models.ForeignKey(UserProfile) # type: UserProfile
client = models.ForeignKey(Client) # type: Client
# Valid statuses
ACTIVE = 1
IDLE = 2
timestamp = models.DateTimeField('presence changed') # type: datetime.datetime
status = models.PositiveSmallIntegerField(default=ACTIVE) # type: int
@staticmethod
def status_to_string(status):
# type: (int) -> str
if status == UserPresence.ACTIVE:
return 'active'
elif status == UserPresence.IDLE:
return 'idle'
@staticmethod
def get_status_dict_by_realm(realm_id):
# type: (int) -> defaultdict[Any, Dict[Any, Any]]
user_statuses = defaultdict(dict) # type: defaultdict[Any, Dict[Any, Any]]
query = UserPresence.objects.filter(
user_profile__realm_id=realm_id,
user_profile__is_active=True,
user_profile__is_bot=False
).values(
'client__name',
'status',
'timestamp',
'user_profile__email',
'user_profile__id',
'user_profile__enable_offline_push_notifications',
'user_profile__is_mirror_dummy',
)
mobile_user_ids = [row['user'] for row in PushDeviceToken.objects.filter(
user__realm_id=1,
user__is_active=True,
user__is_bot=False,
).distinct("user").values("user")]
for row in query:
info = UserPresence.to_presence_dict(
client_name=row['client__name'],
status=row['status'],
dt=row['timestamp'],
push_enabled=row['user_profile__enable_offline_push_notifications'],
has_push_devices=row['user_profile__id'] in mobile_user_ids,
is_mirror_dummy=row['user_profile__is_mirror_dummy'],
)
user_statuses[row['user_profile__email']][row['client__name']] = info
return user_statuses
@staticmethod
def to_presence_dict(client_name=None, status=None, dt=None, push_enabled=None,
has_push_devices=None, is_mirror_dummy=None):
# type: (Optional[Text], Optional[int], Optional[datetime.datetime], Optional[bool], Optional[bool], Optional[bool]) -> Dict[str, Any]
presence_val = UserPresence.status_to_string(status)
timestamp = datetime_to_timestamp(dt)
return dict(
client=client_name,
status=presence_val,
timestamp=timestamp,
pushable=(push_enabled and has_push_devices),
)
def to_dict(self):
# type: () -> Dict[str, Any]
return UserPresence.to_presence_dict(
client_name=self.client.name,
status=self.status,
dt=self.timestamp
)
@staticmethod
def status_from_string(status):
# type: (NonBinaryStr) -> Optional[int]
if status == 'active':
status_val = UserPresence.ACTIVE
elif status == 'idle':
status_val = UserPresence.IDLE
else:
status_val = None
return status_val
class Meta(object):
unique_together = ("user_profile", "client")
class DefaultStream(models.Model):
realm = models.ForeignKey(Realm) # type: Realm
stream = models.ForeignKey(Stream) # type: Stream
class Meta(object):
unique_together = ("realm", "stream")
class Referral(models.Model):
user_profile = models.ForeignKey(UserProfile) # type: UserProfile
email = models.EmailField(blank=False, null=False) # type: Text
timestamp = models.DateTimeField(auto_now_add=True, null=False) # type: datetime.datetime
# This table only gets used on Zulip Voyager instances
# For reasons of deliverability (and sending from multiple email addresses),
# we will still send from mandrill when we send things from the (staging.)zulip.com install
class ScheduledJob(models.Model):
scheduled_timestamp = models.DateTimeField(auto_now_add=False, null=False) # type: datetime.datetime
type = models.PositiveSmallIntegerField() # type: int
# Valid types are {email}
# for EMAIL, filter_string is recipient_email
EMAIL = 1
# JSON representation of the job's data. Be careful, as we are not relying on Django to do validation
data = models.TextField() # type: Text
# Kind if like a ForeignKey, but table is determined by type.
filter_id = models.IntegerField(null=True) # type: Optional[int]
filter_string = models.CharField(max_length=100) # type: Text
|
niftynei/zulip
|
zerver/models.py
|
Python
|
apache-2.0
| 57,696
|
[
"VisIt"
] |
fbd96b63aca1547771bf617e6e09125ff7662601a48a6448bf8d196d2eb37f20
|
"""
Course Outline page in Studio.
"""
import datetime
from bok_choy.page_object import PageObject
from bok_choy.promise import EmptyPromise
from selenium.webdriver.support.ui import Select
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.common.action_chains import ActionChains
from .course_page import CoursePage
from .container import ContainerPage
from .utils import set_input_value_and_save, set_input_value, click_css, confirm_prompt
class CourseOutlineItem(object):
"""
A mixin class for any :class:`PageObject` shown in a course outline.
"""
BODY_SELECTOR = None
EDIT_BUTTON_SELECTOR = '.xblock-field-value-edit'
NAME_SELECTOR = '.item-title'
NAME_INPUT_SELECTOR = '.xblock-field-input'
NAME_FIELD_WRAPPER_SELECTOR = '.xblock-title .wrapper-xblock-field'
STATUS_MESSAGE_SELECTOR = '> div[class$="status"] .status-message'
CONFIGURATION_BUTTON_SELECTOR = '.action-item .configure-button'
def __repr__(self):
# CourseOutlineItem is also used as a mixin for CourseOutlinePage, which doesn't have a locator
# Check for the existence of a locator so that errors when navigating to the course outline page don't show up
# as errors in the repr method instead.
try:
return "{}(<browser>, {!r})".format(self.__class__.__name__, self.locator)
except AttributeError:
return "{}(<browser>)".format(self.__class__.__name__)
def _bounded_selector(self, selector):
"""
Returns `selector`, but limited to this particular `CourseOutlineItem` context
"""
# If the item doesn't have a body selector or locator, then it can't be bounded
# This happens in the context of the CourseOutlinePage
if self.BODY_SELECTOR and hasattr(self, 'locator'):
return '{}[data-locator="{}"] {}'.format(
self.BODY_SELECTOR,
self.locator,
selector
)
else:
return selector
@property
def name(self):
"""
Returns the display name of this object.
"""
name_element = self.q(css=self._bounded_selector(self.NAME_SELECTOR)).first
if name_element:
return name_element.text[0]
else:
return None
@property
def has_status_message(self):
"""
Returns True if the item has a status message, False otherwise.
"""
return self.q(css=self._bounded_selector(self.STATUS_MESSAGE_SELECTOR)).first.visible
@property
def status_message(self):
"""
Returns the status message of this item.
"""
return self.q(css=self._bounded_selector(self.STATUS_MESSAGE_SELECTOR)).text[0]
@property
def has_staff_lock_warning(self):
""" Returns True if the 'Contains staff only content' message is visible """
return self.status_message == 'Contains staff only content' if self.has_status_message else False
@property
def is_staff_only(self):
""" Returns True if the visiblity state of this item is staff only (has a black sidebar) """
return "is-staff-only" in self.q(css=self._bounded_selector(''))[0].get_attribute("class")
def edit_name(self):
"""
Puts the item's name into editable form.
"""
self.q(css=self._bounded_selector(self.EDIT_BUTTON_SELECTOR)).first.click()
def enter_name(self, new_name):
"""
Enters new_name as the item's display name.
"""
set_input_value(self, self._bounded_selector(self.NAME_INPUT_SELECTOR), new_name)
def change_name(self, new_name):
"""
Changes the container's name.
"""
self.edit_name()
set_input_value_and_save(self, self._bounded_selector(self.NAME_INPUT_SELECTOR), new_name)
self.wait_for_ajax()
def finalize_name(self):
"""
Presses ENTER, saving the value of the display name for this item.
"""
self.q(css=self._bounded_selector(self.NAME_INPUT_SELECTOR)).results[0].send_keys(Keys.ENTER)
self.wait_for_ajax()
def set_staff_lock(self, is_locked):
"""
Sets the explicit staff lock of item on the container page to is_locked.
"""
modal = self.edit()
modal.is_explicitly_locked = is_locked
modal.save()
def in_editable_form(self):
"""
Return whether this outline item's display name is in its editable form.
"""
return "is-editing" in self.q(
css=self._bounded_selector(self.NAME_FIELD_WRAPPER_SELECTOR)
)[0].get_attribute("class")
def edit(self):
self.q(css=self._bounded_selector(self.CONFIGURATION_BUTTON_SELECTOR)).first.click()
modal = CourseOutlineModal(self)
EmptyPromise(lambda: modal.is_shown(), 'Modal is shown.')
return modal
@property
def release_date(self):
element = self.q(css=self._bounded_selector(".status-release-value"))
return element.first.text[0] if element.present else None
@property
def due_date(self):
element = self.q(css=self._bounded_selector(".status-grading-date"))
return element.first.text[0] if element.present else None
@property
def policy(self):
element = self.q(css=self._bounded_selector(".status-grading-value"))
return element.first.text[0] if element.present else None
def publish(self):
"""
Publish the unit.
"""
click_css(self, self._bounded_selector('.action-publish'), require_notification=False)
modal = CourseOutlineModal(self)
EmptyPromise(lambda: modal.is_shown(), 'Modal is shown.')
modal.publish()
@property
def publish_action(self):
"""
Returns the link for publishing a unit.
"""
return self.q(css=self._bounded_selector('.action-publish')).first
class CourseOutlineContainer(CourseOutlineItem):
"""
A mixin to a CourseOutline page object that adds the ability to load
a child page object by title or by index.
CHILD_CLASS must be a :class:`CourseOutlineChild` subclass.
"""
CHILD_CLASS = None
ADD_BUTTON_SELECTOR = '> .outline-content > .add-item a.button-new'
def child(self, title, child_class=None):
"""
:type self: object
"""
if not child_class:
child_class = self.CHILD_CLASS
return child_class(
self.browser,
self.q(css=child_class.BODY_SELECTOR).filter(
lambda el: title in [inner.text for inner in
el.find_elements_by_css_selector(child_class.NAME_SELECTOR)]
).attrs('data-locator')[0]
)
def children(self, child_class=None):
"""
Returns all the children page objects of class child_class.
"""
if not child_class:
child_class = self.CHILD_CLASS
return self.q(css=self._bounded_selector(child_class.BODY_SELECTOR)).map(
lambda el: child_class(self.browser, el.get_attribute('data-locator'))).results
def child_at(self, index, child_class=None):
"""
Returns the child at the specified index.
:type self: object
"""
if not child_class:
child_class = self.CHILD_CLASS
return self.children(child_class)[index]
def add_child(self, require_notification=True):
"""
Adds a child to this xblock, waiting for notifications.
"""
click_css(
self,
self._bounded_selector(self.ADD_BUTTON_SELECTOR),
require_notification=require_notification,
)
def toggle_expand(self):
"""
Toggle the expansion of this subsection.
"""
self.browser.execute_script("jQuery.fx.off = true;")
def subsection_expanded():
add_button = self.q(css=self._bounded_selector(self.ADD_BUTTON_SELECTOR)).first.results
return add_button and add_button[0].is_displayed()
currently_expanded = subsection_expanded()
self.q(css=self._bounded_selector('.ui-toggle-expansion i')).first.click()
EmptyPromise(
lambda: subsection_expanded() != currently_expanded,
"Check that the container {} has been toggled".format(self.locator)
).fulfill()
return self
@property
def is_collapsed(self):
"""
Return whether this outline item is currently collapsed.
"""
return "is-collapsed" in self.q(css=self._bounded_selector('')).first.attrs("class")[0]
class CourseOutlineChild(PageObject, CourseOutlineItem):
"""
A page object that will be used as a child of :class:`CourseOutlineContainer`.
"""
url = None
BODY_SELECTOR = '.outline-item'
def __init__(self, browser, locator):
super(CourseOutlineChild, self).__init__(browser)
self.locator = locator
def is_browser_on_page(self):
return self.q(css='{}[data-locator="{}"]'.format(self.BODY_SELECTOR, self.locator)).present
def delete(self, cancel=False):
"""
Clicks the delete button, then cancels at the confirmation prompt if cancel is True.
"""
click_css(self, self._bounded_selector('.delete-button'), require_notification=False)
confirm_prompt(self, cancel)
def _bounded_selector(self, selector):
"""
Return `selector`, but limited to this particular `CourseOutlineChild` context
"""
return '{}[data-locator="{}"] {}'.format(
self.BODY_SELECTOR,
self.locator,
selector
)
@property
def name(self):
titles = self.q(css=self._bounded_selector(self.NAME_SELECTOR)).text
if titles:
return titles[0]
else:
return None
@property
def children(self):
"""
Will return any first-generation descendant items of this item.
"""
descendants = self.q(css=self._bounded_selector(self.BODY_SELECTOR)).map(
lambda el: CourseOutlineChild(self.browser, el.get_attribute('data-locator'))).results
# Now remove any non-direct descendants.
grandkids = []
for descendant in descendants:
grandkids.extend(descendant.children)
grand_locators = [grandkid.locator for grandkid in grandkids]
return [descendant for descendant in descendants if not descendant.locator in grand_locators]
class CourseOutlineUnit(CourseOutlineChild):
"""
PageObject that wraps a unit link on the Studio Course Outline page.
"""
url = None
BODY_SELECTOR = '.outline-unit'
NAME_SELECTOR = '.unit-title a'
def go_to(self):
"""
Open the container page linked to by this unit link, and return
an initialized :class:`.ContainerPage` for that unit.
"""
return ContainerPage(self.browser, self.locator).visit()
def is_browser_on_page(self):
return self.q(css=self.BODY_SELECTOR).present
def children(self):
return self.q(css=self._bounded_selector(self.BODY_SELECTOR)).map(
lambda el: CourseOutlineUnit(self.browser, el.get_attribute('data-locator'))).results
class CourseOutlineSubsection(CourseOutlineContainer, CourseOutlineChild):
"""
:class`.PageObject` that wraps a subsection block on the Studio Course Outline page.
"""
url = None
BODY_SELECTOR = '.outline-subsection'
NAME_SELECTOR = '.subsection-title'
NAME_FIELD_WRAPPER_SELECTOR = '.subsection-header .wrapper-xblock-field'
CHILD_CLASS = CourseOutlineUnit
def unit(self, title):
"""
Return the :class:`.CourseOutlineUnit with the title `title`.
"""
return self.child(title)
def units(self):
"""
Returns the units in this subsection.
"""
return self.children()
def unit_at(self, index):
"""
Returns the CourseOutlineUnit at the specified index.
"""
return self.child_at(index)
def add_unit(self):
"""
Adds a unit to this subsection
"""
self.q(css=self._bounded_selector(self.ADD_BUTTON_SELECTOR)).click()
class CourseOutlineSection(CourseOutlineContainer, CourseOutlineChild):
"""
:class`.PageObject` that wraps a section block on the Studio Course Outline page.
"""
url = None
BODY_SELECTOR = '.outline-section'
NAME_SELECTOR = '.section-title'
NAME_FIELD_WRAPPER_SELECTOR = '.section-header .wrapper-xblock-field'
CHILD_CLASS = CourseOutlineSubsection
def subsection(self, title):
"""
Return the :class:`.CourseOutlineSubsection` with the title `title`.
"""
return self.child(title)
def subsections(self):
"""
Returns a list of the CourseOutlineSubsections of this section
"""
return self.children()
def subsection_at(self, index):
"""
Returns the CourseOutlineSubsection at the specified index.
"""
return self.child_at(index)
def add_subsection(self):
"""
Adds a subsection to this section
"""
self.add_child()
class ExpandCollapseLinkState:
"""
Represents the three states that the expand/collapse link can be in
"""
MISSING = 0
COLLAPSE = 1
EXPAND = 2
class CourseOutlinePage(CoursePage, CourseOutlineContainer):
"""
Course Outline page in Studio.
"""
url_path = "course"
CHILD_CLASS = CourseOutlineSection
EXPAND_COLLAPSE_CSS = '.button-toggle-expand-collapse'
BOTTOM_ADD_SECTION_BUTTON = '.outline > .add-section .button-new'
def is_browser_on_page(self):
return self.q(css='body.view-outline').present and self.q(css='div.ui-loading.is-hidden').present
def view_live(self):
"""
Clicks the "View Live" link and switches to the new tab
"""
click_css(self, '.view-live-button', require_notification=False)
self.browser.switch_to_window(self.browser.window_handles[-1])
def section(self, title):
"""
Return the :class:`.CourseOutlineSection` with the title `title`.
"""
return self.child(title)
def section_at(self, index):
"""
Returns the :class:`.CourseOutlineSection` at the specified index.
"""
return self.child_at(index)
def click_section_name(self, parent_css=''):
"""
Find and click on first section name in course outline
"""
self.q(css='{} .section-name'.format(parent_css)).first.click()
def get_section_name(self, parent_css='', page_refresh=False):
"""
Get the list of names of all sections present
"""
if page_refresh:
self.browser.refresh()
return self.q(css='{} .section-name'.format(parent_css)).text
def section_name_edit_form_present(self, parent_css=''):
"""
Check that section name edit form present
"""
return self.q(css='{} .section-name input'.format(parent_css)).present
def change_section_name(self, new_name, parent_css=''):
"""
Change section name of first section present in course outline
"""
self.click_section_name(parent_css)
self.q(css='{} .section-name input'.format(parent_css)).first.fill(new_name)
self.q(css='{} .section-name .save-button'.format(parent_css)).first.click()
self.wait_for_ajax()
def click_release_date(self):
"""
Open release date edit modal of first section in course outline
"""
self.q(css='div.section-published-date a.edit-release-date').first.click()
def sections(self):
"""
Returns the sections of this course outline page.
"""
return self.children()
def add_section_from_top_button(self):
"""
Clicks the button for adding a section which resides at the top of the screen.
"""
click_css(self, '.wrapper-mast nav.nav-actions .button-new')
def add_section_from_bottom_button(self, click_child_icon=False):
"""
Clicks the button for adding a section which resides at the bottom of the screen.
"""
element_css = self.BOTTOM_ADD_SECTION_BUTTON
if click_child_icon:
element_css += " .icon-plus"
click_css(self, element_css)
def toggle_expand_collapse(self):
"""
Toggles whether all sections are expanded or collapsed
"""
self.q(css=self.EXPAND_COLLAPSE_CSS).click()
@property
def bottom_add_section_button(self):
"""
Returns the query representing the bottom add section button.
"""
return self.q(css=self.BOTTOM_ADD_SECTION_BUTTON).first
@property
def has_no_content_message(self):
"""
Returns true if a message informing the user that the course has no content is visible
"""
return self.q(css='.outline .no-content').is_present()
@property
def has_rerun_notification(self):
"""
Returns true iff the rerun notification is present on the page.
"""
return self.q(css='.wrapper-alert.is-shown').is_present()
def dismiss_rerun_notification(self):
"""
Clicks the dismiss button in the rerun notification.
"""
self.q(css='.dismiss-button').click()
@property
def expand_collapse_link_state(self):
"""
Returns the current state of the expand/collapse link
"""
link = self.q(css=self.EXPAND_COLLAPSE_CSS)[0]
if not link.is_displayed():
return ExpandCollapseLinkState.MISSING
elif "collapse-all" in link.get_attribute("class"):
return ExpandCollapseLinkState.COLLAPSE
else:
return ExpandCollapseLinkState.EXPAND
def expand_all_subsections(self):
"""
Expands all the subsections in this course.
"""
for section in self.sections():
if section.is_collapsed:
section.toggle_expand()
for subsection in section.subsections():
if subsection.is_collapsed:
subsection.toggle_expand()
@property
def xblocks(self):
"""
Return a list of xblocks loaded on the outline page.
"""
return self.children(CourseOutlineChild)
class CourseOutlineModal(object):
MODAL_SELECTOR = ".wrapper-modal-window"
def __init__(self, page):
self.page = page
def _bounded_selector(self, selector):
"""
Returns `selector`, but limited to this particular `CourseOutlineModal` context.
"""
return " ".join([self.MODAL_SELECTOR, selector])
def is_shown(self):
return self.page.q(css=self.MODAL_SELECTOR).present
def find_css(self, selector):
return self.page.q(css=self._bounded_selector(selector))
def click(self, selector, index=0):
self.find_css(selector).nth(index).click()
def save(self):
self.click(".action-save")
self.page.wait_for_ajax()
def publish(self):
self.click(".action-publish")
self.page.wait_for_ajax()
def cancel(self):
self.click(".action-cancel")
def has_release_date(self):
return self.find_css("#start_date").present
def has_due_date(self):
return self.find_css("#due_date").present
def has_policy(self):
return self.find_css("#grading_type").present
def set_date(self, property_name, input_selector, date):
"""
Set `date` value to input pointed by `selector` and `property_name`.
"""
month, day, year = map(int, date.split('/'))
self.click(input_selector)
if getattr(self, property_name):
current_month, current_year = map(int, getattr(self, property_name).split('/')[1:])
else: # Use default timepicker values, which are current month and year.
current_month, current_year = datetime.datetime.today().month, datetime.datetime.today().year
date_diff = 12 * (year - current_year) + month - current_month
selector = "a.ui-datepicker-{}".format('next' if date_diff > 0 else 'prev')
for i in xrange(abs(date_diff)):
self.page.q(css=selector).click()
self.page.q(css="a.ui-state-default").nth(day - 1).click() # set day
self.page.wait_for_element_invisibility("#ui-datepicker-div", "datepicker should be closed")
EmptyPromise(
lambda: getattr(self, property_name) == u'{m}/{d}/{y}'.format(m=month, d=day, y=year),
"{} is updated in modal.".format(property_name)
).fulfill()
@property
def release_date(self):
return self.find_css("#start_date").first.attrs('value')[0]
@release_date.setter
def release_date(self, date):
"""
Date is "mm/dd/yyyy" string.
"""
self.set_date('release_date', "#start_date", date)
@property
def due_date(self):
return self.find_css("#due_date").first.attrs('value')[0]
@due_date.setter
def due_date(self, date):
"""
Date is "mm/dd/yyyy" string.
"""
self.set_date('due_date', "#due_date", date)
@property
def policy(self):
"""
Select the grading format with `value` in the drop-down list.
"""
element = self.find_css('#grading_type')[0]
return self.get_selected_option_text(element)
@policy.setter
def policy(self, grading_label):
"""
Select the grading format with `value` in the drop-down list.
"""
element = self.find_css('#grading_type')[0]
select = Select(element)
select.select_by_visible_text(grading_label)
EmptyPromise(
lambda: self.policy == grading_label,
"Grading label is updated.",
).fulfill()
@property
def is_explicitly_locked(self):
"""
Returns true if the explict staff lock checkbox is checked, false otherwise.
"""
return self.find_css('#staff_lock')[0].is_selected()
@is_explicitly_locked.setter
def is_explicitly_locked(self, value):
"""
Checks the explicit staff lock box if value is true, otherwise unchecks the box.
"""
if value != self.is_explicitly_locked:
self.find_css('label[for="staff_lock"]').click()
EmptyPromise(lambda: value == self.is_explicitly_locked, "Explicit staff lock is updated").fulfill()
def shows_staff_lock_warning(self):
"""
Returns true iff the staff lock warning is visible.
"""
return self.find_css('.staff-lock .tip-warning').visible
def get_selected_option_text(self, element):
"""
Returns the text of the first selected option for the element.
"""
if element:
select = Select(element)
return select.first_selected_option.text
else:
return None
|
wwj718/ANALYSE
|
common/test/acceptance/pages/studio/overview.py
|
Python
|
agpl-3.0
| 23,238
|
[
"VisIt"
] |
bf0929ce4e361d1f4b45c6747b2040d698385b11ef3c4c0adc487501286978bd
|
# Copyright (c) 2015-2016 Claudiu Popa <pcmanticore@gmail.com>
# Licensed under the GPL: https://www.gnu.org/licenses/old-licenses/gpl-2.0.html
# For details: https://github.com/PyCQA/pylint/blob/master/COPYING
"""Micro reports objects.
A micro report is a tree of layout and content objects.
"""
from six import string_types
class VNode(object):
def __init__(self, nid=None):
self.id = nid
# navigation
self.parent = None
self.children = []
def __iter__(self):
return iter(self.children)
def append(self, child):
"""add a node to children"""
self.children.append(child)
child.parent = self
def insert(self, index, child):
"""insert a child node"""
self.children.insert(index, child)
child.parent = self
def _get_visit_name(self):
"""
return the visit name for the mixed class. When calling 'accept', the
method <'visit_' + name returned by this method> will be called on the
visitor
"""
try:
return self.TYPE.replace('-', '_')
except Exception:
return self.__class__.__name__.lower()
def accept(self, visitor, *args, **kwargs):
func = getattr(visitor, 'visit_%s' % self._get_visit_name())
return func(self, *args, **kwargs)
def leave(self, visitor, *args, **kwargs):
func = getattr(visitor, 'leave_%s' % self._get_visit_name())
return func(self, *args, **kwargs)
class BaseLayout(VNode):
"""base container node
attributes
* children : components in this table (i.e. the table's cells)
"""
def __init__(self, children=(), **kwargs):
super(BaseLayout, self).__init__(**kwargs)
for child in children:
if isinstance(child, VNode):
self.append(child)
else:
self.add_text(child)
def append(self, child):
"""overridden to detect problems easily"""
assert child not in self.parents()
VNode.append(self, child)
def parents(self):
"""return the ancestor nodes"""
assert self.parent is not self
if self.parent is None:
return []
return [self.parent] + self.parent.parents()
def add_text(self, text):
"""shortcut to add text data"""
self.children.append(Text(text))
# non container nodes #########################################################
class Text(VNode):
"""a text portion
attributes :
* data : the text value as an encoded or unicode string
"""
def __init__(self, data, escaped=True, **kwargs):
super(Text, self).__init__(**kwargs)
#if isinstance(data, unicode):
# data = data.encode('ascii')
assert isinstance(data, string_types), data.__class__
self.escaped = escaped
self.data = data
class VerbatimText(Text):
"""a verbatim text, display the raw data
attributes :
* data : the text value as an encoded or unicode string
"""
# container nodes #############################################################
class Section(BaseLayout):
"""a section
attributes :
* BaseLayout attributes
a title may also be given to the constructor, it'll be added
as a first element
a description may also be given to the constructor, it'll be added
as a first paragraph
"""
def __init__(self, title=None, description=None, **kwargs):
super(Section, self).__init__(**kwargs)
if description:
self.insert(0, Paragraph([Text(description)]))
if title:
self.insert(0, Title(children=(title,)))
class Title(BaseLayout):
"""a title
attributes :
* BaseLayout attributes
A title must not contains a section nor a paragraph!
"""
class Paragraph(BaseLayout):
"""a simple text paragraph
attributes :
* BaseLayout attributes
A paragraph must not contains a section !
"""
class Table(BaseLayout):
"""some tabular data
attributes :
* BaseLayout attributes
* cols : the number of columns of the table (REQUIRED)
* rheaders : the first row's elements are table's header
* cheaders : the first col's elements are table's header
* title : the table's optional title
"""
def __init__(self, cols, title=None,
rheaders=0, cheaders=0,
**kwargs):
super(Table, self).__init__(**kwargs)
assert isinstance(cols, int)
self.cols = cols
self.title = title
self.rheaders = rheaders
self.cheaders = cheaders
|
axbaretto/beam
|
sdks/python/.tox/lint/lib/python2.7/site-packages/pylint/reporters/ureports/nodes.py
|
Python
|
apache-2.0
| 4,643
|
[
"VisIt"
] |
9e38310354ebfd5a43d2f79c129dddfcd5655fb8e905a2b608698ed0f9584f35
|
# https://www.hackerrank.com/challenges/ctci-connected-cell-in-a-grid
def DFS(grid, i, j):
# If coordinates exceed bounds, return 0.
if i >= len(grid) or j >= len(grid[0]) or i < 0 or j < 0:
return 0
# If coordinates are empty or already visited, return 0.
if grid[i][j] == -1 or grid[i][j] == 0:
return 0
# If its here, it must be a 1. So count and mark visited.
count, grid[i][j] = 1, -1
# Now visit all neighbors recursively and add found areas.
for x in [-1, 0, 1]:
for y in [-1, 0, 1]:
count += DFS(grid, i + x, j + y)
return count
# n, m: dimensions of the grid.
n, m = int(raw_input()), int(raw_input())
# Read grid line by line.
grid = [map(int, raw_input().split()) for _ in range(n)]
# res will hold the max area value.
res = -1
for i in range(n):
for j in range(m):
# Do a DFS from each starting point, but the ones
# already visited or nothing there, will be skipped.
if grid[i][j] == 1:
# Store the max area value found.
res = max(res, DFS(grid, i, j))
print res
|
zubie7a/Algorithms
|
HackerRank/Cracking_The_Coding_Interview/Algorithms/05_DFS_Connected_Cells_In_A_Grid.py
|
Python
|
mit
| 1,102
|
[
"VisIt"
] |
897ffea74daf8322b74039b120a85698451d8af08ac9555f11713227252229fa
|
from sfepy.base.testing import TestCommon
import numpy as nm
from sfepy import data_dir
def tetravolume(cells, vertices):
vol = 0.0
c1 = nm.ones((4,4), dtype=nm.float64)
mul = 1.0 / 6.0
for ic in cells:
c1[:,:3] = vertices[ic,:]
vol += mul * nm.linalg.det(c1)
return -vol
expected_volumes = (1.22460186e-4, 1.46950423e-4, 1.22460186e-4)
class Test(TestCommon):
@staticmethod
def from_conf(conf, options):
return Test(conf=conf, options=options)
def test_spbox(self):
"""
Check volume change of the mesh which is deformed using
the SplineBox functions.
"""
from sfepy.discrete.fem import Mesh
from sfepy.mesh.splinebox import SplineBox
mesh = Mesh.from_file(data_dir + '/meshes/3d/cylinder.vtk')
conn = mesh.get_conn('3_4')
vol0 = tetravolume(conn, mesh.coors)
bbox = nm.array(mesh.get_bounding_box()).T
spbox = SplineBox(bbox, mesh.coors)
cpoints0 = spbox.get_control_points(init=True)
for ii in range(4):
for jj in range(4):
spbox.change_shape((0, ii, jj), [-0.02, 0, 0])
coors = spbox.evaluate()
vol1 = tetravolume(conn, coors)
mesh.coors[:] = coors
spbox.set_control_points(cpoints0)
coors = spbox.evaluate()
vol2 = tetravolume(conn, coors)
ok = True
actual_volumes = (vol0, vol1, vol2)
for ii in range(3):
relerr = abs(actual_volumes[ii] - expected_volumes[ii])\
/ expected_volumes[ii]
ok = ok and (relerr < 1e-6)
if not ok:
self.report('expected volumes:')
self.report(expected_volumes)
self.report('actual volumes:')
self.report(actual_volumes)
return ok
|
RexFuzzle/sfepy
|
tests/test_splinebox.py
|
Python
|
bsd-3-clause
| 1,843
|
[
"VTK"
] |
e67feca950b3ee601f451468d8021f91277fbadaeb520c3325d4e10fb47ae37b
|
"""
An estimator for modelling data from a mixture of Gaussians,
using an objective function based on minimum message length.
"""
__all__ = [
"GaussianMixture",
"kullback_leibler_for_multivariate_normals",
"responsibility_matrix",
"split_component", "merge_component", "delete_component",
]
import logging
import numpy as np
import scipy
import scipy.stats as stats
import scipy.optimize as op
from collections import defaultdict
from sklearn.cluster import k_means_ as kmeans
logger = logging.getLogger(__name__)
def _total_parameters(K, D, covariance_type):
r"""
Return the total number of model parameters :math:`Q`, if a full
covariance matrix structure is assumed.
.. math:
Q = \frac{K}{2}\left[D(D+3) + 2\right] - 1
:param K:
The number of Gaussian mixtures.
:param D:
The dimensionality of the data.
:returns:
The total number of model parameters, :math:`Q`.
"""
return (0.5 * D * (D + 3) * K) + (K - 1)
def _responsibility_matrix(y, mean, covariance, weight, covariance_type):
r"""
Return the responsibility matrix,
.. math::
r_{ij} = \frac{w_{j}f\left(x_i;\theta_j\right)}{\sum_{k=1}^{K}{w_k}f\left(x_i;\theta_k\right)}
where :math:`r_{ij}` denotes the conditional probability of a datum
:math:`x_i` belonging to the :math:`j`-th component. The effective
membership associated with each component is then given by
.. math::
n_j = \sum_{i=1}^{N}r_{ij}
\textrm{and}
\sum_{j=1}^{M}n_{j} = N
where something.
:param y:
The data values, :math:`y`.
:param mu:
The mean values of the :math:`K` multivariate normal distributions.
:param cov:
The covariance matrices of the :math:`K` multivariate normal
distributions. The shape of this array will depend on the
``covariance_type``.
:param weight:
The current estimates of the relative mixing weight.
:param full_output: [optional]
If ``True``, return the responsibility matrix, and the log likelihood,
which is evaluated for free (default: ``False``).
:returns:
The responsibility matrix. If ``full_output=True``, then the
log likelihood (per observation) will also be returned.
"""
precision = _compute_precision_cholesky(covariance, covariance_type)
weighted_log_prob = np.log(weight) + \
_estimate_log_gaussian_prob(y, mean, precision, covariance_type)
log_likelihood = scipy.misc.logsumexp(weighted_log_prob, axis=1)
with np.errstate(under="ignore"):
log_responsibility = weighted_log_prob - log_likelihood[:, np.newaxis]
responsibility = np.exp(log_responsibility).T
return (responsibility, log_likelihood)
class BaseGaussianMixture(object):
r"""
Model data from (potentially) many multivariate Gaussian distributions,
using minimum message length (MML) as the objective function.
:param covariance_type: [optional]
The structure of the covariance matrix for individual components.
The available options are: `full` for a free covariance matrix, or
`diag` for a diagonal covariance matrix (default: ``diag``).
:param covariance_regularization: [optional]
Regularization strength to add to the diagonal of covariance matrices
(default: ``0``).
:param threshold: [optional]
The relative improvement in message length required before stopping an
expectation-maximization step (default: ``1e-5``).
:param max_em_iterations: [optional]
The maximum number of iterations to run per expectation-maximization
loop (default: ``10000``).
"""
parameter_names = ("mean", "covariance", "weight")
def __init__(self, covariance_type="full", covariance_regularization=0,
mixture_probability=1e-3, percent_scatter=1, predict_mixtures=3,
threshold=1e-3, max_em_iterations=10000, **kwargs):
available = ("full", "diag", )
covariance_type = covariance_type.strip().lower()
if covariance_type not in available:
raise ValueError("covariance type '{}' is invalid. "\
"Must be one of: {}".format(
covariance_type, ", ".join(available)))
if 0 > covariance_regularization:
raise ValueError(
"covariance_regularization must be a non-negative float")
if 0 >= threshold:
raise ValueError("threshold must be a positive value")
if 1 > max_em_iterations:
raise ValueError("max_em_iterations must be a positive integer")
self._threshold = threshold
self._mixture_probability = mixture_probability
self._percent_scatter = percent_scatter
self._predict_mixtures = predict_mixtures
self._max_em_iterations = max_em_iterations
self._covariance_type = covariance_type
self._covariance_regularization = covariance_regularization
return None
@property
def mean(self):
r""" Return the multivariate means of the Gaussian mixtures. """
return self._mean
@property
def covariance(self):
r""" Return the covariance matrices of the Gaussian mixtures. """
return self._covariance
@property
def weight(self):
r""" Return the relative weights of the Gaussian mixtures. """
return self._weight
@property
def covariance_type(self):
r""" Return the type of covariance stucture assumed. """
return self._covariance_type
@property
def covariance_regularization(self):
r"""
Return the regularization applied to diagonals of covariance matrices.
"""
return self._covariance_regularization
@property
def threshold(self):
r""" Return the threshold improvement required in message length. """
return self._threshold
@property
def max_em_iterations(self):
r""" Return the maximum number of expectation-maximization steps. """
return self._max_em_iterations
def _expectation(self, y, **kwargs):
r"""
Perform the expectation step of the expectation-maximization algorithm.
:param y:
The data values, :math:`y`.
:returns:
A three-length tuple containing the responsibility matrix,
the log likelihood, and the change in message length.
"""
responsibility, log_likelihood = _responsibility_matrix(
y, self.mean, self.covariance, self.weight, self.covariance_type)
ll = np.sum(log_likelihood)
I = _message_length(y, self.mean, self.covariance, self.weight,
responsibility, -ll, self.covariance_type,
**kwargs)
return (responsibility, log_likelihood, I)
def _maximization(self, y, responsibility, parent_responsibility=1,
**kwargs):
r"""
Perform the maximization step of the expectation-maximization
algorithm.
:param y:
The data values, :math:`y`.
# TODO
:param responsibility:
The responsibility matrix for all :math:`N` observations being
partially assigned to each :math:`K` component.
# TODO
"""
K = self.weight.size
N, D = y.shape
# Update the weights.
effective_membership = np.sum(responsibility, axis=1)
weight = (effective_membership + 0.5)/(N + K/2.0)
w_responsibility = parent_responsibility * responsibility
w_effective_membership = np.sum(w_responsibility, axis=1)
mean = np.empty(self.mean.shape)
for k, (R, Nk) in enumerate(zip(w_responsibility, w_effective_membership)):
mean[k] = np.sum(R * y.T, axis=1) / Nk
# TODO: Use parent responsibility when initializing?
covariance = _estimate_covariance_matrix(y, responsibility, mean,
self.covariance_type, self.covariance_regularization)
# TODO: callback?
return self.set_parameters(
weight=weight, mean=mean, covariance=covariance)
def _expectation_maximization(self, y, responsibility=None, **kwargs):
r"""
Run the expectation-maximization algorithm on the current set of
multivariate Gaussian mixtures.
:param y:
A :math:`N\times{}D` array of the observations :math:`y`,
where :math:`N` is the number of observations, and :math:`D` is the
number of dimensions per observation.
:param responsibility: [optional]
The responsibility matrix for all :math:`N` observations being
partially assigned to each :math:`K` component. If ``None`` is given
then the responsibility matrix will be calculated in the first
expectation step.
"""
# Calculate log-likelihood and initial expectation step.
__init_responsibility, ll, dl = self._expectation(y, **kwargs)
if responsibility is None:
responsibility = __init_responsibility
ll_dl = [(ll.sum(), dl)]
meta = dict(warnflag=False)
for iteration in range(self.max_em_iterations):
# M-step.
self._maximization(y, responsibility, **kwargs)
# E-step.
responsibility, ll, dl = self._expectation(y, **kwargs)
# Check for convergence.
lls = ll.sum()
prev_ll, prev_dl = ll_dl[-1]
change = (lls - prev_ll)/prev_ll
ll_dl.append([lls, dl])
#print("E-M", iteration, change, self.threshold)
if abs(change) <= self.threshold:
break
else:
meta.update(warnflag=True)
logger.warn("Maximum number of E-M iterations reached ({})"\
.format(self.max_em_iterations))
meta.update(log_likelihood=lls, message_length=dl)
return (responsibility, meta)
@property
def parameters(self):
return dict([(k, getattr(self, k, None)) for k in self.parameter_names])
def set_parameters(self, **kwargs):
r"""
Set specific parameters.
"""
invalid_params = set(self.parameter_names).difference(kwargs.keys())
if invalid_params:
raise ValueError(
"unknown parameters: {}".format(", ".join(invalid_params)))
for parameter_name, value in kwargs.items():
setattr(self, "_{}".format(parameter_name), value)
return kwargs
class GaussianMixture(BaseGaussianMixture):
def __init__(self, **kwargs):
super(GaussianMixture, self).__init__(**kwargs)
# For predictions.
self._proposed_mixtures = []
# Store the following summary pieces of information about mixtures.
# (1) Sum of the log of the determinant of the covariance matrices.
# (2) The sum of the log-likelihood.
# (3) The sum of the log of the weights.
# Do we want this from each E-M step, or all steps?
# (K, sum_log_weights, sum_log_likelihood, sum_log_det_covariances)
self._mixture_predictors = []
def _optimize_split_mixture(self, y, responsibility, component_index):
r"""
Split a component from the current mixture, and run partial
expectation-maximization algorithm on the split component.
"""
U, S, V = _svd(self.covariance[component_index], self.covariance_type)
split_mean = self.mean[component_index] \
+ np.vstack([+V[0], -V[0]]) * S[0]**0.5
# Responsibilities are initialized by allocating the data points to
# the closest of the two means.
distance = np.sum((y[:, :, None] - split_mean.T)**2, axis=1).T
N, D = y.shape
split_responsibility = np.zeros((2, N))
split_responsibility[np.argmin(distance, axis=0), np.arange(N)] = 1.0
# Calculate the child covariance matrices.
split_covariance = _estimate_covariance_matrix(
y, split_responsibility, split_mean,
self.covariance_type, self.covariance_regularization)
split_effective_membership = np.sum(split_responsibility, axis=1)
split_weight = split_effective_membership.T \
/ np.sum(split_effective_membership)
# Integrate the split components with the existing mixture.
parent_weight = self.weight[component_index]
parent_responsibility = responsibility[component_index]
mixture = self.__class__(
threshold=self.threshold,
covariance_type=self.covariance_type,
max_em_iterations=self.max_em_iterations,
covariance_regularization=self.covariance_regularization)
# Initialize it.
mixture.set_parameters(mean=split_mean, weight=split_weight,
covariance=split_covariance)
# Run E-M on the partial mixture.
R, meta = mixture._expectation_maximization(
y, parent_responsibility=responsibility[component_index])
if self.weight.size > 1:
# Integrate the partial mixture with the full mixture.
weight = np.hstack([self.weight,
[parent_weight * mixture.weight[1]]])
weight[component_index] = parent_weight * mixture.weight[0]
mean = np.vstack([self.mean, [mixture.mean[1]]])
mean[component_index] = mixture.mean[0]
covariance = np.vstack([self.covariance, [mixture.covariance[1]]])
covariance[component_index] = mixture.covariance[0]
responsibility = np.vstack([responsibility,
[parent_responsibility * R[1]]])
responsibility[component_index] = parent_responsibility * R[0]
mixture.set_parameters(
mean=mean, covariance=covariance, weight=weight)
R, meta = mixture._expectation_maximization(
y, responsibility=responsibility)
# Store the mixture.
slogdet = np.sum(_slogdet(mixture.covariance, mixture.covariance_type))
self._proposed_mixtures.append(mixture)
self._mixture_predictors.append([
mixture.weight.size,
np.sum(np.log(mixture.weight)),
meta["log_likelihood"],
slogdet,
-meta["log_likelihood"] + (D+2)/2.0 * slogdet
])
# TODO: Remove predictors that we don't use.
#self._slogs.append(np.linalg.det(mixture.covariance))
return (len(self._proposed_mixtures) - 1, R, meta)
# Run
kwds = dict(
threshold=self._threshold,
max_em_iterations=self._max_em_iterations,
covariance_type=self._covariance_type,
covariance_regularization=self._covariance_regularization)
# Run E-M on the split mixture, keeping all else fixed.
#(dict(mean=mu, covariance=cov, weight=weight), responsibility, meta, dl)
params, R, meta, dl = _expectation_maximization(y, split_mean, split_covariance,
split_weight, responsibility=split_responsibility,
parent_responsibility=parent_responsibility,
**kwds)
if self.weight.size > 1:
# Integrate the child mixtures back.
weight = np.hstack([self.weight, [parent_weight * params["weight"][1]]])
weight[component_index] = parent_weight * params["weight"][0]
mean = np.vstack([self.mean, [params["mean"][1]]])
mean[component_index] = params["mean"][0]
covariance = np.vstack([self.covariance, [params["covariance"][1]]])
covariance[component_index] = params["covariance"][0]
responsibility = np.vstack([responsibility,
[parent_responsibility * R[1]]])
responsibility[component_index] \
= parent_responsibility * R[0]
return _expectation_maximization(y, mean, covariance, weight,
responsibility=responsibility, **kwds)
else:
return (params, R, meta, dl)
def _initialize_parameters(self, y, **kwargs):
r"""
Return initial estimates of the parameters.
:param y:
The data values, :math:`y`.
# TODO COMMON DOCS
"""
# If you *really* know what you're doing, then you can give your own.
if kwargs.get("__initialize", None) is not None:
logger.warn("Using specified initialization point.")
return self.set_parameters(**kwargs.pop("__initialize"))
weight = np.ones(1)
mean = np.mean(y, axis=0).reshape((1, -1))
N, D = y.shape
covariance = _estimate_covariance_matrix(y, np.ones((1, N)), mean,
self.covariance_type, self.covariance_regularization)
# Set parameters.
return self.set_parameters(
weight=weight, mean=mean, covariance=covariance)
def _predict_message_length_change(self, K, N, lower_bound_sigma=5):
r"""
Predict the minimum message length of a target mixture of K Gaussian
distributions, where K is an integer larger than the current mixture.
:param K:
The target number of Gaussian distributions. This must be an
integer value larger than the current number of Gaussian mixtures.
:returns:
A pdf of some description. #TODO #YOLO
"""
current_K, D = self.mean.shape
#K = current_K + 1 if K is None else int(K)
K = np.atleast_1d(K)
if np.any(current_K >= K):
raise ValueError(
"the target K mixture must contain more Gaussians than the "\
"current mixture ({} > {})".format(K, current_K))
predictors = np.array(self._mixture_predictors)
kwds = dict(target_K=K, predictors=predictors)
dK = K - current_K
slw_expectation, slw_variance, slw_upper \
= self._approximate_sum_log_weights(**kwds)
# Now approximate the sum of the negative log-likelihood, minus the
# sum of the log of the determinant of the covariance matrices.
nll_mslogdetcov_expectation, nll_mslogdetcov_variance \
= self._approximate_nllpslogdetcov(**kwds)
# Calculate the change in message length.
current_ll = np.max(predictors.T[2][predictors.T[0] == current_K])
slogdet = _slogdet(self.covariance, self.covariance_type)
dI_expectation = dK * (
(1 - D/2.0)*np.log(2) + 0.25 * (D*(D+3) + 2)*np.log(N/(2*np.pi))) \
+ 0.5 * (D*(D+3)/2.0 - 1) * (slw_expectation - np.sum(np.log(self.weight))) \
- np.array([np.sum(np.log(current_K + np.arange(_))) for _ in dK])\
+ 0.5 * np.log(_total_parameters(K, D, self.covariance_type)/float(_total_parameters(current_K, D, self.covariance_type))) \
- (D + 2)/2.0 * (np.sum(slogdet)) \
+ current_ll + nll_mslogdetcov_expectation
dI_scatter = nll_mslogdetcov_variance**0.5
dI_lower_bound = dK * (
(1 - D/2.0)*np.log(2) + 0.25 * (D*(D+3) + 2)*np.log(N/(2*np.pi))) \
+ 0.5 * (D*(D+3)/2.0 - 1) * (slw_upper - np.sum(np.log(self.weight))) \
- np.array([np.sum(np.log(current_K + np.arange(_))) for _ in dK])\
+ 0.5 * np.log(_total_parameters(K, D, self.covariance_type)/float(_total_parameters(current_K, D, self.covariance_type))) \
- (D + 2)/2.0 * (np.sum(slogdet)) \
+ current_ll + nll_mslogdetcov_expectation \
- lower_bound_sigma * dI_scatter
result = (dI_expectation, dI_scatter, dI_lower_bound)
return result if np.array(dK).size > 1 \
else tuple([_[0] for _ in result])
def _approximate_sum_log_weights(self, target_K, predictors=None):
r"""
Return an approximate expectation of the function:
.. math:
\sum_{k=1}^{K}\log{w_k}
Where :math:`K` is the number of mixtures, and :math:`w` is a multinomial
distribution. The approximating function is:
.. math:
\sum_{k=1}^{K}\log{w_k} \approx -K\log{K}
:param target_K:
The number of target Gaussian mixtures.
"""
if predictors is None:
predictors = np.array(self._mixture_predictors)
k, slw = (predictors.T[0], predictors.T[1])
# Upper bound.
upper_bound = lambda k, c=0: -k * np.log(k) + c
#upper = -target_K * np.log(target_K)
# Some expectation value.
if 2 > len(k):
# Don't provide an expectation value.
expectation = upper_bound(target_K)
variance \
= np.abs(upper_bound(target_K**2) - upper_bound(target_K)**2)
else:
lower_values = [[k[0], slw[0]]]
for k_, slw_ in zip(k[1:], slw[1:]):
if k_ == lower_values[-1][0] and slw_ < lower_values[-1][1]:
lower_values[-1][1] = slw_
elif k_ > lower_values[-1][0]:
lower_values.append([k_, slw_])
lower_values = np.array(lower_values)
function = lambda x, *p: -x * p[0] * np.log(x) + p[1]
# Expectation, from the best that can be done.
exp_params, exp_cov = op.curve_fit(
function, lower_values.T[0], lower_values.T[1], p0=[1, 0])
expectation = function(target_K, *exp_params)
#exp_params, exp_cov = op.curve_fit(function, k, slw, p0=[1, 0])
#expectation = function(target_K, *exp_params)
variance = 0.0
return (expectation, variance, upper_bound(target_K))
def _approximate_nllpslogdetcov(self, target_K, predictors=None,
draws=100):
r"""
Approximate the function:
.. math:
-\sum_{n=1}^{N}\log\sum_{k=1}^{K+\Delta{}K}w_{k}f_{k}(y_{n}|\mu_k,C_k) + \frac{(D + 2)}{2}\sum_{k=1}^{(K + \Delta{}K)}\log{|C_k|^{(K+\Delta{}K)}}
"""
if predictors is None:
predictors = np.array(self._mixture_predictors)
k, y = (predictors.T[0], predictors.T[-1])
k = np.unique(predictors.T[0])
y = np.empty(k.shape)
yerr = np.empty(k.shape)
for i, k_ in enumerate(k):
match = (predictors.T[0] == k_)
values = np.log(predictors.T[-1][match])
y[i] = np.median(values)
yerr[i] = np.std(values)
# The zero-th entry of yerr occurs when K = 2, and we only have one
# estimate of y, so the std is zero.
#yerr[0] = yerr[1]
yerr[yerr==0] = np.max(yerr)
f = lambda x, *p: np.polyval(p, x)
p0 = np.zeros(2)
#p0 = np.array([-1, y[0]])
#f = lambda x, *p: np.polyval(p, 1.0/x)
#p0 = np.hstack([1, np.zeros(min(k.size - 2, 3))])
op_params, op_cov = op.curve_fit(f, k, y,
p0=p0, sigma=yerr, absolute_sigma=True)
"""
if target_K[0] >= 16:
import matplotlib.pyplot as plt
fig, ax = plt.subplots()
ax.scatter(k, y)
ax.fill_between(k, y - yerr, y + yerr, alpha=0.5, zorder=-1)
op_params, op_cov = op.curve_fit(
f, k, y, p0=p0, sigma=yerr, absolute_sigma=True)
ax.plot(k, f(k, *op_params))
for i in range(4, k.size + 1):
op_params, op_cov = op.curve_fit(
f, k[:i], y[:i], p0=p0, sigma=yerr[:i], absolute_sigma=True)
ax.plot(k[i:] + 1, [f(_, *op_params) for _ in k[i:] + 1], c="g")
v = np.array([f(_, *op_params) for _ in k[i:] + 1])
stds = np.array([np.std(f(_, *(np.random.multivariate_normal(op_params, op_cov, size=100).T))) for _ in k[i:] + 1])
assert np.all(np.isfinite(stds))
ax.fill_between(k[i:] + 1, v - stds, v + stds, facecolor="g",
alpha=0.5)
#log_y = np.empty(k.shape)
#log_yerr = np.empty(k.shape)
#for i, k_ in enumerate(k):
# match = (predictors.T[0] == k_)
# values = np.log(predictors.T[-1][match])
# y[i] = np.median(values)
# yerr[i] = np.std(values)
#fig, ax = plt.subplots()
#ax.scatter(k, y)
#ax.scatter(k, y + yerr, facecolor="g")
raise a
"""
exp_f = lambda x, *p: np.exp(f(x, *p))
target_K = np.atleast_1d(target_K)
expectation = np.array([exp_f(tk, *op_params) for tk in target_K])
if not np.all(np.isfinite(op_cov)):
import matplotlib.pyplot as plt
fig, ax = plt.subplots()
x = k
ax.scatter(x, y)
ax.scatter(x, y + yerr, facecolor="g")
ax.plot(x, f(x, *op_params), c='r')
fig, ax = plt.subplots()
ax.scatter(x, np.exp(y))
ax.scatter(x, np.exp(y + yerr), facecolor='g')
ax.plot(target_K, expectation, c='r')
ax.plot(x, exp_f(x, *op_params), c='m')
variance = np.array([np.var(exp_f(tk,
*(np.random.multivariate_normal(op_params, op_cov, size=draws).T)))
for tk in target_K])
ax.fill_between(target_K, expectation - variance**0.5, expectation + variance**0.5, facecolor='r', alpha=0.5)
raise a
variance = np.array([np.var(exp_f(tk,
*(np.random.multivariate_normal(op_params, op_cov, size=draws).T)))
for tk in target_K])
"""
import matplotlib.pyplot as plt
fig, ax = plt.subplots()
x = k
ax.scatter(x, np.exp(y))
ax.scatter(x, np.exp(y + yerr), facecolor='g')
ax.plot(target_K, expectation, c='r')
ax.fill_between(target_K, expectation - variance**0.5, expectation + variance**0.5, facecolor='r', alpha=0.5)
raise a
"""
return (expectation, variance)
def _ftl_jump(self, y, K, **kwargs):
r"""
Jump to a totally new mixture of K number of gaussians.
"""
logger.debug("Re-initializing with K-means++ at K = {}".format(K))
# Initialize new centroids by k-means++
mixtures = []
mls = []
for z in range(30):
mean = kmeans._k_init(y, K, kmeans.row_norms(y, squared=True),
kmeans.check_random_state(None))
# Calculate weights by L2 distances to closest centers.
distance = np.sum((y[:, :, None] - mean.T)**2, axis=1).T
N, D = y.shape
responsibility = np.zeros((K, N))
responsibility[np.argmin(distance, axis=0), np.arange(N)] = 1.0
weight = responsibility.sum(axis=1)/N
covariance = _estimate_covariance_matrix(y, responsibility, mean,
self.covariance_type, self.covariance_regularization)
mixture = self.__class__(
threshold=self.threshold,
covariance_type=self.covariance_type,
max_em_iterations=self.max_em_iterations,
covariance_regularization=self.covariance_regularization)
# Initialize it.
mixture.set_parameters(mean=mean, weight=weight, covariance=covariance)
# Run E-M on the partial mixture.
R, meta = mixture._expectation_maximization(
y, parent_responsibility=responsibility)
raise UnsureError
mixtures.append(mixture)
mls.append(meta["message_length"])
print(np.std(mls))
index = np.argmin(mls)
mixture = mixtures[index]
#slogdet = np.sum(np.log(np.linalg.det(mixture.covariance)))
slogdet = np.sum(_slogdet(mixture.covariance, mixture.covariance_type))
self._proposed_mixtures.append(mixture)
self._mixture_predictors.append([
mixture.weight.size,
np.sum(np.log(mixture.weight)),
meta["log_likelihood"],
slogdet,
-meta["log_likelihood"] + (D+2)/2.0 * slogdet
])
# TODO: Remove predictors that we don't use.
#self._slogs.append(np.linalg.det(mixture.covariance))
return mixture, R, meta #(len(self._proposed_mixtures) - 1, R, meta)
raise a
#self.set_parameters(
# weight=weight, mean=mean, covariance=covariance)
#return responsibility
def _merge_component_with_closest_component(self, y, responsibility, index, **kwargs):
R, meta, mixture = _merge_components(y, self.mean, self.covariance, self.weight,
responsibility, index, index_b, **kwargs)
return mixture, R, meta
def _component_kl_distances(self):
r"""
Calculate the K-L distances for all current components.
"""
K = self.weight.size
if K == 1: return ([])
kl = np.inf * np.ones((K, K))
for i in range(K):
for j in range(i + 1, K):
kl[i, j] = kullback_leibler_for_multivariate_normals(
self.mean[i], self.covariance[i],
self.mean[j], self.covariance[j])
kl[j, i] = kullback_leibler_for_multivariate_normals(
self.mean[j], self.covariance[j],
self.mean[i], self.covariance[i])
# Best for each *from*.
indices = list(zip(*(np.arange(K), np.argsort(kl, axis=1).T[0])))
_ = np.array(indices).T
sorted_indices = np.argsort(kl[_[0], _[1]])
return tuple([indices[_] for _ in sorted_indices if indices[_][0] != indices[_][1]])
return foo
def _optimize_merge_mixture(self, y, responsibility, a_index):
b_index = _index_of_most_similar_component(y,
self.mean, self.covariance, a_index)
# Initialize.
weight_k = np.sum(self.weight[[a_index, b_index]])
responsibility_k = np.sum(responsibility[[a_index, b_index]], axis=0)
effective_membership_k = np.sum(responsibility_k)
mean_k = np.sum(responsibility_k * y.T, axis=1) / effective_membership_k
covariance_k = _estimate_covariance_matrix(
y, np.atleast_2d(responsibility_k), np.atleast_2d(mean_k),
self.covariance_type, self.covariance_regularization)
# Delete the b-th component.
del_index = np.max([a_index, b_index])
keep_index = np.min([a_index, b_index])
new_mean = np.delete(self.mean, del_index, axis=0)
new_covariance = np.delete(self.covariance, del_index, axis=0)
new_weight = np.delete(self.weight, del_index, axis=0)
new_responsibility = np.delete(responsibility, del_index, axis=0)
new_mean[keep_index] = mean_k
new_covariance[keep_index] = covariance_k
new_weight[keep_index] = weight_k
new_responsibility[keep_index] = responsibility_k
mixture = self.__class__(
threshold=1e-3, # MAGICself.threshold,
covariance_type=self.covariance_type,
max_em_iterations=self.max_em_iterations,
covariance_regularization=self.covariance_regularization)
mixture.set_parameters(mean=new_mean, weight=new_weight,
covariance=new_covariance)
R, meta = mixture._expectation_maximization(
y, responsibility=new_responsibility)
#R, ll, I = mixture._expectation(y)
#meta = {"log_likelihood": ll.sum(), "message_length": I}
N, D = y.shape
# Store the mixture.
#slogdet = np.sum(np.log(np.linalg.det(mixture.covariance)))
slogdet = np.sum(_slogdet(mixture.covariance, mixture.covariance_type))
self._proposed_mixtures.append(mixture)
self._mixture_predictors.append([
mixture.weight.size,
np.sum(np.log(mixture.weight)),
meta["log_likelihood"],
slogdet,
-meta["log_likelihood"] + (D+2)/2.0 * slogdet
])
# TODO: Remove predictors that we don't use.
#self._slogs.append(np.linalg.det(mixture.covariance))
return (len(self._proposed_mixtures) - 1, R, meta)
raise a
def _consider_merging_components(self, y, responsibility, current_I):
for i, j in self._component_kl_distances():
# Initialize the merge.
weight_k = np.sum(self.weight[[i, j]])
responsibility_k = np.sum(responsibility[[i, j]], axis=0)
effective_membership_k = np.sum(responsibility_k)
mean_k = np.sum(responsibility_k * y.T, axis=1) / effective_membership_k
covariance_k = _estimate_covariance_matrix(
y, np.atleast_2d(responsibility_k), np.atleast_2d(mean_k),
self.covariance_type, self.covariance_regularization)
del_index = np.max([i, j])
keep_index = np.min([i, j])
new_mean = np.delete(self.mean, del_index, axis=0)
new_covariance = np.delete(self.covariance, del_index, axis=0)
new_weight = np.delete(self.weight, del_index, axis=0)
new_responsibility = np.delete(responsibility, del_index, axis=0)
new_mean[keep_index] = mean_k
new_covariance[keep_index] = covariance_k
new_weight[keep_index] = weight_k
new_responsibility[keep_index] = responsibility_k
mixture = self.__class__(
threshold=1e-3, # MAGICself.threshold,
covariance_type=self.covariance_type,
max_em_iterations=self.max_em_iterations,
covariance_regularization=self.covariance_regularization)
mixture.set_parameters(mean=new_mean, weight=new_weight,
covariance=new_covariance)
# Calculate message length.
R, ll, I = mixture._expectation(y)
logger.debug("Considered merging {} {} --> {}".format(i, j, I))
if I < current_I:
logger.debug("omg this is better! ({} < {})".format(
I, current_I))
# Run E-M on this.
R, meta = mixture._expectation_maximization(
y, responsibility=R)
N, D = y.shape
# Store the mixture.
#slogdet = np.sum(np.log(np.linalg.det(mixture.covariance)))
slogdet = np.sum(_slogdet(mixture.covariance, mixture.covariance_type))
self._proposed_mixtures.append(mixture)
self._mixture_predictors.append([
mixture.weight.size,
np.sum(np.log(mixture.weight)),
meta["log_likelihood"],
slogdet,
-meta["log_likelihood"] + (D+2)/2.0 * slogdet
])
return mixture
# (len(self._proposed_mixtures) - 1, R, meta)
else:
logger.debug(
"Considered merging, but nothing immediately looked great")
return None
def lognormal_search(self, y, **kwargs):
N, D = y.shape
dist = scipy.stats.lognorm(1, loc=0, scale=(N/2.0)**0.5)
initial_draws = 10
initial_k = []
while initial_draws > len(initial_k):
K = int(np.round(dist.rvs()))
if K not in initial_k:
initial_k.append(K)
# Instead should we just keep drawing until we have a good prediction?
# TODO
# Propose some mixtures.
row_norms = kmeans.row_norms(y, squared=True)
initial_k = np.repeat(np.arange(1, 21), 10)
initial_k = np.arange(1, 200)
print("INITIALS", initial_k)
mixtures = []
mls = []
for K in initial_k:
logger.debug("Trialling k = {}".format(K))
mean = kmeans._k_init(y, K, row_norms,
kmeans.check_random_state(None))
# Calculate weights by L2 distances to closest centers.
distance = np.sum((y[:, :, None] - mean.T)**2, axis=1).T
N, D = y.shape
responsibility = np.zeros((K, N))
responsibility[np.argmin(distance, axis=0), np.arange(N)] = 1.0
weight = responsibility.sum(axis=1)/N
try:
covariance = _estimate_covariance_matrix(y, responsibility, mean,
self.covariance_type, self.covariance_regularization)
mixture = self.__class__(
threshold=self.threshold,
covariance_type=self.covariance_type,
max_em_iterations=self.max_em_iterations,
covariance_regularization=self.covariance_regularization)
# Initialize it.
mixture.set_parameters(mean=mean, weight=weight, covariance=covariance)
# Run E-M on the partial mixture.
R, meta = mixture._expectation_maximization(y)
except:
print("FAILED")
mls.append(np.nan)
mixtures.append(None)
else:
# y, responsibility=responsibility)
# Record some messag elength
mls.append(meta["message_length"])
mixtures.append(mixture)
import matplotlib.pyplot as plt
fig, ax = plt.subplots()
ax.scatter(initial_k, mls)
raise a
def search(self, y, **kwargs):
r"""
Simultaneously perform model selection and parameter estimation for an
unknown number of multivariate Gaussian distributions.
:param y:
A :math:`N\times{}D` array of the observations :math:`y`,
where :math:`N` is the number of observations, and :math:`D` is
the number of dimensions per observation.
"""
# Initialize.
# --> Start on "splitting_mode"
# --> If we hyperjump, then try merging mode.
N, D = y.shape
# Initialize the mixture.
self._initialize_parameters(y, **kwargs)
R, ll, I = self._expectation(y, **kwargs)
converged, just_jumped = (False, False)
while not converged:
while True:
K = self.weight.size
logger.debug("State: {} {}".format(K, I))
# Do a very fast scan of component merging.
mixture = self._consider_merging_components(y, R, I)
if mixture is not None:
logger.debug("ACCEPTED A FAST MERGE")
self.set_parameters(**mixture.parameters)
R, ll, I = self._expectation(y, **kwargs)
break
if just_jumped or K > 1:
# Try to merge components.
best_merge = []
for k in range(K):
try:
idx, _, meta = self._optimize_merge_mixture(y, R, k)
except:
continue
logger.debug("Merging: {} {} {}".format(K, k, meta))
if k == 0 \
or best_merge[-1] > meta["message_length"]:
best_merge = [idx, meta["message_length"]]
# TODO: Run E-M each time?
if best_merge[-1] < I:
idx, I = best_merge
mixture = self._proposed_mixtures[idx]
self.set_parameters(**mixture.parameters)
R, ll, I = self._expectation(y, **kwargs)
# TODO: Consider hyperjump?
continue
else:
just_jumped = False
else:
# Split all components.
best_split = []
for k in range(K):
idx, _, meta = self._optimize_split_mixture(y, R, k)
logger.debug("Splitting: {} {} {}".format(K, k, meta))
if k == 0 \
or best_split[-1] > meta["message_length"]:
best_split = [idx, meta["message_length"]]
if best_split[-1] < I:
idx, I = best_split
mixture = self._proposed_mixtures[idx]
self.set_parameters(**mixture.parameters)
R, ll, I = self._expectation(y, **kwargs)
else:
converged = True
break
# Consider hyperjump.
if self.weight.size > 2:
K = self.weight.size
K_dK = K + np.arange(1, self._predict_mixtures)
dI, pI_scatter, dI_lower \
= self._predict_message_length_change(K_dK, N)
pI = I + dI
logger.debug("Actual: {}".format(I))
logger.debug("Prediction for next mixture: {}".format(I + dI[0]))
logger.debug("Predicted lower bound for next mixture: {}".format(I + dI_lower[0]))
logger.debug("Predicted delta for next mixture: {} {}".format(dI[0], pI_scatter[0]))
logger.debug("K = {}".format(self.weight.size))
ommp = 1 - self._mixture_probability
acceptable_jump \
= (abs(100 * pI_scatter/pI) < self._percent_scatter) \
* (stats.norm(dI, pI_scatter).cdf(0) > self._mixture_probability)
if any(acceptable_jump):
K_jump = K_dK[np.where(acceptable_jump)[0]]
# If the jumps are noisy, be conservative.
idx = np.where(np.diff(K_jump) > 1)[0]
idx = idx[0] if len(idx) > 0 else -1
K_jump = K_jump[idx]
if K_jump - K > 1:
logger.debug(
"We should JUMP to K = {}!".format(K_jump))
mixture, _, meta = self._ftl_jump(y, K_jump)
logger.debug("New meta: {}".format(meta))
if meta["message_length"] < I:
# Set the current mixture.
self.set_parameters(**mixture.parameters)
R, ll, I = self._expectation(y, **kwargs)
just_jumped = True
else:
#This is a bad jump, so don't accept it.
None
# I think we are converged.
elif best_split[-1] > I:
converged = True
break
import matplotlib.pyplot as plt
logger.debug("Ended at K = {}".format(self.weight.size))
import matplotlib.pyplot as plt
from matplotlib.patches import Ellipse
fig, ax = plt.subplots()
ax.scatter(y.T[0], y.T[1], facecolor="#666666", alpha=0.5)
K = self.weight.size
for k in range(K):
mean = self.mean[k][:2]
cov = self.covariance[k]
vals, vecs = np.linalg.eigh(cov[:2, :2])
order = vals.argsort()[::-1]
vals = vals[order]
vecs = vecs[:,order]
theta = np.degrees(np.arctan2(*vecs[:,0][::-1]))
width, height = 2 * 1 * np.sqrt(vals)
ellip = Ellipse(xy=mean, width=width, height=height, angle=theta,
facecolor="r", alpha=0.5)
ax.add_artist(ellip)
ax.scatter([mean[0]], [mean[1]], facecolor="r")
fig, ax = plt.subplots()
K = self.weight.size
K_dK = K + np.arange(1, self._predict_mixtures)
dI, pI_scatter, dI_lower \
= self._predict_message_length_change(K_dK, N)
pI = I + dI
ax.scatter(K_dK, pI)
ax.scatter(K_dK, pI + dI_lower, facecolor="r")
raise a
fig, axes = plt.subplots(2)
axes[0].scatter(y.T[0], y.T[1])
axes[1].scatter(y.T[0], y.T[2])
raise a
"""
# Delete all components.
K = self.weight.size
best_merge = []
if K > 2:
# TODO: Some heuristic just to say only try merge if we
# hyperjumped?
for k in range(K):
idx, _, meta = self._optimize_merge_mixture(y, R, k)
print("k", k, meta)
if k == 0 \
or best_merge[-1] > meta["message_length"]:
best_merge = [idx, meta["message_length"]]
# Split all components, and run partial E-M on each.
K = self.weight.size
best_perturbation = []
hyperjump = False
for k in range(K):
# Split the mixture, run partial E-M then full E-M.
idx, _, meta = self._optimize_split_mixture(y, R, k)
logger.debug(
"partial EM {} {} {} {}".format(K, k, idx, meta))
# FTL jump!
if k > 0 and self.weight.size > 2:
K = self.weight.size
K_dK = K + np.arange(1, self._predict_mixtures)
dI, pI_scatter, dI_lower \
= self._predict_message_length_change(K_dK, N)
pI = I + dI
logger.debug("Actual: {}".format(I))
logger.debug("Prediction for next mixture: {}".format(I + dI[0]))
logger.debug("Predicted lower bound for next mixture: {}".format(I + dI_lower[0]))
logger.debug("Predicted delta for next mixture: {} {}".format(dI[0], pI_scatter[0]))
logger.debug("K = {}".format(self.weight.size))
ommp = 1 - self._mixture_probability
acceptable_jump \
= (abs(100 * pI_scatter/pI) < self._percent_scatter) \
* (stats.norm(dI, pI_scatter).cdf(0) < ommp)
#= (stats.norm(pI, pI_scatter).cdf(I) < ommp) \
if any(acceptable_jump):
K_jump = K_dK[np.where(acceptable_jump)[0]]
# If the jumps are noisy, be conservative.
idx = np.where(np.diff(K_jump) > 1)[0]
idx = idx[0] if idx else -1
K_jump = K_jump[idx]
raise a
if K_jump - K > 1:
logger.debug(
"We should JUMP to K = {}!".format(K_jump))
mixture, R, meta = self._ftl_jump(y, K_jump)
logger.debug("New meta: {}".format(meta))
# Set the current mixture.
self.set_parameters(**mixture.parameters)
R, ll, I = self._expectation(y, **kwargs)
hyperjump = True
break
if k == 0 \
or best_perturbation[-1] > meta["message_length"]:
best_perturbation = [idx, meta["message_length"]]
if hyperjump:
print("Hyperjump EARLY!")
continue
# Is the best perturbation better than the current mixture?
if best_perturbation[-1] < I and (len(best_merge) == 0 or best_perturbation[-1] < best_merge[-1]):
idx, I = best_perturbation
mixture = self._proposed_mixtures[idx]
self.set_parameters(**mixture.parameters)
elif len(best_merge) > 0 and best_merge[-1] < I and best_merge[-1] < best_perturbation[-1]:
idx, I = best_merge
mixture = self._proposed_mixtures[idx]
self.set_parameters(**mixture.parameters)
else:
# All split perturbations had longer message lengths.
converged = True
logger.debug(
"All split perturbations had longer message lengths.")
break
# To update message length, max log likelihood tec
# TODO refactor
R, ll, I = self._expectation(y, **kwargs)
# Only start making predictions when we have some data.
if self.weight.size > 2:
K = self.weight.size
K_dK = K + np.arange(1, self._predict_mixtures)
dI, pI_scatter, dI_lower \
= self._predict_message_length_change(K_dK, N)
pI = I + dI
logger.debug("Actual: {}".format(I))
logger.debug("Prediction for next mixture: {}".format(I + dI[0]))
logger.debug("Predicted lower bound for next mixture: {}".format(I + dI_lower[0]))
logger.debug("Predicted delta for next mixture: {} {}".format(dI[0], pI_scatter[0]))
logger.debug("K = {}".format(self.weight.size))
ommp = 1 - self._mixture_probability
acceptable_jump \
= (abs(100 * pI_scatter/pI) < self._percent_scatter) \
* (stats.norm(dI, pI_scatter).cdf(0) < ommp)
#= (stats.norm(pI, pI_scatter).cdf(I) < ommp) \
if any(acceptable_jump):
K_jump = K_dK[np.where(acceptable_jump)[0]]
# If the jumps are noisy, be conservative.
idx = np.where(np.diff(K_jump) > 1)[0]
idx = idx[0] if idx else -1
K_jump = K_jump[idx]
if K_jump - K > 1:
logger.debug(
"We should JUMP to K = {}!".format(K_jump))
mixture, R, meta = self._ftl_jump(y, K_jump)
logger.debug("New meta: {}".format(meta))
# Set the current mixture.
self.set_parameters(**mixture.parameters)
R, ll, I = self._expectation(y, **kwargs)
else:
# Just split to K+1
continue
if converged:
logger.debug("Skipping final sweep")
break
logger.debug("Doing final sweep")
# Do a final sweep to be sure.
K = self.weight.size
best_perturbation = []
for k in range(K):
perturbation = self._propose_split_mixtures(y, R, k)
if k == 0 \
or best_perturbation[-1] > perturbation[-1]:
best_perturbation = [k] + list(perturbation)
logger.debug("Actual: {}".format(best_perturbation[-1]))
if best_perturbation[-1] < I:
k, params, _R, _meta, I = best_perturbation
self.set_parameters(**params)
R, ll, I = self._expectation(y, **kwargs)
# Make a prediction for the next one either way.
pdf = self._predict_message_length_change(K + 1, N)
logger.debug("Prediction for next mixture: {}".format(pdf))
else:
# Converged.
converged = True
"""
logger.debug("Ended at K = {}".format(self.weight.size))
import matplotlib.pyplot as plt
from matplotlib.patches import Ellipse
fig, ax = plt.subplots()
ax.scatter(y.T[0], y.T[1], facecolor="#666666", alpha=0.5)
K = self.weight.size
for k in range(K):
mean = self.mean[k][:2]
cov = self.covariance[k]
vals, vecs = np.linalg.eigh(cov[:2, :2])
order = vals.argsort()[::-1]
vals = vals[order]
vecs = vecs[:,order]
theta = np.degrees(np.arctan2(*vecs[:,0][::-1]))
width, height = 2 * 1 * np.sqrt(vals)
ellip = Ellipse(xy=mean, width=width, height=height, angle=theta,
facecolor="r", alpha=0.5)
ax.add_artist(ellip)
ax.scatter([mean[0]], [mean[1]], facecolor="r")
fig, ax = plt.subplots()
foo = np.array(self._mixture_predictors)
ax.scatter(foo.T[0], -foo.T[2] - foo.T[3])
raise a
def kullback_leibler_for_multivariate_normals(mu_a, cov_a, mu_b, cov_b):
r"""
Return the Kullback-Leibler distance from one multivariate normal
distribution with mean :math:`\mu_a` and covariance :math:`\Sigma_a`,
to another multivariate normal distribution with mean :math:`\mu_b` and
covariance matrix :math:`\Sigma_b`. The two distributions are assumed to
have the same number of dimensions, such that the Kullback-Leibler
distance is
.. math::
D_{\mathrm{KL}}\left(\mathcal{N}_{a}||\mathcal{N}_{b}\right) =
\frac{1}{2}\left(\mathrm{Tr}\left(\Sigma_{b}^{-1}\Sigma_{a}\right) + \left(\mu_{b}-\mu_{a}\right)^\top\Sigma_{b}^{-1}\left(\mu_{b} - \mu_{a}\right) - k + \ln{\left(\frac{\det{\Sigma_{b}}}{\det{\Sigma_{a}}}\right)}\right)
where :math:`k` is the number of dimensions and the resulting distance is
given in units of nats.
.. warning::
It is important to remember that
:math:`D_{\mathrm{KL}}\left(\mathcal{N}_{a}||\mathcal{N}_{b}\right) \neq D_{\mathrm{KL}}\left(\mathcal{N}_{b}||\mathcal{N}_{a}\right)`.
:param mu_a:
The mean of the first multivariate normal distribution.
:param cov_a:
The covariance matrix of the first multivariate normal distribution.
:param mu_b:
The mean of the second multivariate normal distribution.
:param cov_b:
The covariance matrix of the second multivariate normal distribution.
:returns:
The Kullback-Leibler distance from distribution :math:`a` to :math:`b`
in units of nats. Dividing the result by :math:`\log_{e}2` will give
the distance in units of bits.
"""
if len(cov_a.shape) == 1:
cov_a = cov_a * np.eye(cov_a.size)
if len(cov_b.shape) == 1:
cov_b = cov_b * np.eye(cov_b.size)
U, S, V = np.linalg.svd(cov_a)
Ca_inv = np.dot(np.dot(V.T, np.linalg.inv(np.diag(S))), U.T)
U, S, V = np.linalg.svd(cov_b)
Cb_inv = np.dot(np.dot(V.T, np.linalg.inv(np.diag(S))), U.T)
k = mu_a.size
offset = mu_b - mu_a
return 0.5 * np.sum([
np.trace(np.dot(Ca_inv, cov_b)),
+ np.dot(offset.T, np.dot(Cb_inv, offset)),
- k,
+ np.log(np.linalg.det(cov_b)/np.linalg.det(cov_a))
])
def _parameters_per_mixture(D, covariance_type):
r"""
Return the number of parameters per Gaussian component, given the number
of observed dimensions and the covariance type.
:param D:
The number of dimensions per data point.
:param covariance_type:
The structure of the covariance matrix for individual components.
The available options are: `full` for a free covariance matrix, or
`diag` for a diagonal covariance matrix.
:returns:
The number of parameters required to fully specify the multivariate
mean and covariance matrix of a :math:`D`-dimensional Gaussian.
"""
if covariance_type == "full":
return int(D + D*(D + 1)/2.0)
elif covariance_type == "diag":
return 2 * D
else:
raise ValueError("unknown covariance type '{}'".format(covariance_type))
def log_kappa(D):
cd = -0.5 * D * np.log(2 * np.pi) + 0.5 * np.log(D * np.pi)
return -1 + 2 * cd/D
def _message_length(y, mu, cov, weight, responsibility, nll,
covariance_type, eps=0.10, dofail=False, full_output=False, **kwargs):
# THIS IS SO BAD
N, D = y.shape
M = weight.size
# I(M) = M\log{2} + constant
I_m = M # [bits]
# I(w) = \frac{(M - 1)}{2}\log{N} - \frac{1}{2}\sum_{j=1}^{M}\log{w_j} - (M - 1)!
I_w = (M - 1) / 2.0 * np.log(N) \
- 0.5 * np.sum(np.log(weight)) \
- scipy.special.gammaln(M)
# TODO: why gammaln(M) ~= log(K-1)! or (K-1)!
#- np.math.factorial(M - 1) \
#+ 1
I_w = I_w/np.log(2) # [bits]
if D == 1:
log_F_m = np.log(2) + (2 * np.log(N)) - 4 * np.log(cov.flatten()[0]**0.5)
raise UnsureError
else:
if covariance_type == "diag":
cov_ = np.array([_ * np.eye(D) for _ in cov])
else:
# full
cov_ = cov
log_det_cov = np.log(np.linalg.det(cov_))
# TODO: What about for diag.
log_F_m = 0.5 * D * (D + 3) * np.log(np.sum(responsibility, axis=1))
log_F_m += -log_det_cov
log_F_m += -(D * np.log(2) + (D + 1) * log_det_cov)
# TODO: No prior on h(theta).. thus -\sum_{j=1}^{M}\log{h\left(\theta_j\right)} = 0
# TODO: bother about including this? -N * D * np.log(eps)
AOM = 0.001 # MAGIC
Il = nll - (D * N * np.log(AOM))
Il = Il/np.log(2) # [bits]
"""
if D == 1:log_likelihood
# R1
R1 = 10 # MAGIC
R2 = 2 # MAGIC
log_prior = D * np.log(R1) # mu
log_prior += np.log(R2)
log_prior += np.log(cov.flatten()[0]**0.5)
else:
R1 = 10
log_prior = D * np.log(R1) + 0.5 * (D + 1) * log_det_cov
"""
log_prior = 0
I_t = (log_prior + 0.5 * log_F_m)/np.log(2)
sum_It = np.sum(I_t)
K = M
Q = _total_parameters(K, D, covariance_type)
lattice = 0.5 * Q * log_kappa(Q) / np.log(2)
part1 = I_m + I_w + np.sum(I_t) + lattice
part2 = Il + (0.5 * Q)/np.log(2)
I = part1 + part2
assert I > 0
if full_output:
return (I, dict(I_m=I_m, I_w=I_w, log_F_m=log_F_m, nll=nll, I_l=Il, I_t=I_t,
lattice=lattice, part1=part1, part2=part2))
return I
def _index_of_most_similar_component(y, mean, covariance, index):
r"""
Find the index of the most similar component, as judged by K-L divergence.
"""
K, D = mean.shape
D_kl = np.inf * np.ones(K)
for k in range(K):
if k == index: continue
D_kl[k] = kullback_leibler_for_multivariate_normals(
mean[index], covariance[index], mean[index], covariance[index])
return np.nanargmin(D_kl)
def _merge_component_with_closest_component(y, mean, covariance, weight,
responsibility, index, **kwargs):
index_b = _index_of_most_similar_component(y, mean, covariance, index)
return _merge_components(
y, mean, covariance, weight, responsibility, index, index_b, **kwargs)
def _merge_components(y, mean, covariance, weight, responsibility, index_a,
index_b, **kwargs):
r"""
Merge a component from the mixture with its "closest" component, as
judged by the Kullback-Leibler distance.
:param y:
A :math:`N\times{}D` array of the observations :math:`y`,
where :math:`N` is the number of observations, and :math:`D` is the
number of dimensions per observation.
"""
logger.debug("Merging component {} (of {}) with {}".format(
a_index, weight.size, b_index))
# Initialize.
weight_k = np.sum(weight[[a_index, b_index]])
responsibility_k = np.sum(responsibility[[a_index, b_index]], axis=0)
effective_membership_k = np.sum(responsibility_k)
mean_k = np.sum(responsibility_k * y.T, axis=1) / effective_membership_k
covariance_k = _estimate_covariance_matrix(
y, np.atleast_2d(responsibility_k), np.atleast_2d(mean_k),
kwargs["covariance_type"], kwargs["covariance_regularization"])
# Delete the b-th component.
del_index = np.max([a_index, b_index])
keep_index = np.min([a_index, b_index])
new_mean = np.delete(mu, del_index, axis=0)
new_covariance = np.delete(cov, del_index, axis=0)
new_weight = np.delete(weight, del_index, axis=0)
new_responsibility = np.delete(responsibility, del_index, axis=0)
new_mean[keep_index] = mean_k
new_covariance[keep_index] = covariance_k
new_weight[keep_index] = weight_k
new_responsibility[keep_index] = responsibility_k
# Calculate log-likelihood.
# Generate a mixture.
mixture = GaussianMixture()
raise a
#return _expectation_maximization(y, new_mean, new_covariance, new_weight,
# responsibility=new_responsibility, **kwargs)
def _compute_precision_cholesky(covariances, covariance_type):
r"""
Compute the Cholesky decomposition of the precision of the covariance
matrices provided.
:param covariances:
An array of covariance matrices.
:param covariance_type:
The structure of the covariance matrix for individual components.
The available options are: `full` for a free covariance matrix, or
`diag` for a diagonal covariance matrix.
"""
singular_matrix_error = "Failed to do Cholesky decomposition"
if covariance_type in "full":
M, D, _ = covariances.shape
cholesky_precision = np.empty((M, D, D))
for m, covariance in enumerate(covariances):
try:
cholesky_cov = scipy.linalg.cholesky(covariance, lower=True)
except scipy.linalg.LinAlgError:
raise ValueError(singular_matrix_error)
cholesky_precision[m] = scipy.linalg.solve_triangular(
cholesky_cov, np.eye(D), lower=True).T
elif covariance_type in "diag":
if np.any(np.less_equal(covariances, 0.0)):
raise ValueError(singular_matrix_error)
cholesky_precision = covariances**(-0.5)
else:
raise NotImplementedError("nope")
return cholesky_precision
def _slogdet(covariance, covariance_type):
if covariance_type == "full":
sign, slogdet = np.linalg.slogdet(covariance)
assert np.all(sign == 1)
return slogdet
elif covariance_type == "diag":
K, D = covariance.shape
cov = np.array([_ * np.eye(D) for _ in covariance])
sign, slogdet = np.linalg.slogdet(cov)
assert np.all(sign == 1)
return slogdet
def _estimate_covariance_matrix_full(y, responsibility, mean,
covariance_regularization=0):
N, D = y.shape
M, N = responsibility.shape
membership = np.sum(responsibility, axis=1)
I = np.eye(D)
cov = np.empty((M, D, D))
for m, (mu, rm, nm) in enumerate(zip(mean, responsibility, membership)):
diff = y - mu
denominator = nm - 1 if nm > 1 else nm
cov[m] = np.dot(rm * diff.T, diff) / denominator \
+ covariance_regularization * I
return cov
def _estimate_covariance_matrix(y, responsibility, mean, covariance_type,
covariance_regularization):
available = {
"full": _estimate_covariance_matrix_full,
"diag": _estimate_covariance_matrix_diag
}
try:
function = available[covariance_type]
except KeyError:
raise ValueError("unknown covariance type")
return function(y, responsibility, mean, covariance_regularization)
def _estimate_covariance_matrix_diag(y, responsibility, mean,
covariance_regularization=0):
N, D = y.shape
M, N = responsibility.shape
denominator = np.sum(responsibility, axis=1)
denominator[denominator > 1] = denominator[denominator > 1] - 1
membership = np.sum(responsibility, axis=1)
I = np.eye(D)
cov = np.empty((M, D))
for m, (mu, rm, nm) in enumerate(zip(mean, responsibility, membership)):
diff = y - mu
denominator = nm - 1 if nm > 1 else nm
cov[m] = np.dot(rm, diff**2) / denominator + covariance_regularization
#cov[m] = rm * diff**2 / denominator + covariance_regularization
return cov
#avg_X2 = np.dot(responsibility, y * y) / denominator
#avg_means2 = mean**2
#avg_X_means = mean * np.dot(responsibility, y) / denominator
#return avg_X2 - 2 * avg_X_means + avg_means2 + covariance_regularization
def _compute_log_det_cholesky(matrix_chol, covariance_type, n_features):
"""Compute the log-det of the cholesky decomposition of matrices.
Parameters
----------
matrix_chol : array-like,
Cholesky decompositions of the matrices.
'full' : shape of (n_components, n_features, n_features)
'tied' : shape of (n_features, n_features)
'diag' : shape of (n_components, n_features)
'spherical' : shape of (n_components,)
covariance_type : {'full', 'tied', 'diag', 'spherical'}
n_features : int
Number of features.
Returns
-------
log_det_precision_chol : array-like, shape (n_components,)
The determinant of the precision matrix for each component.
"""
if covariance_type == 'full':
n_components, _, _ = matrix_chol.shape
log_det_chol = (np.sum(np.log(
matrix_chol.reshape(
n_components, -1)[:, ::n_features + 1]), 1))
elif covariance_type == 'tied':
log_det_chol = (np.sum(np.log(np.diag(matrix_chol))))
elif covariance_type == 'diag':
log_det_chol = (np.sum(np.log(matrix_chol), axis=1))
else:
log_det_chol = n_features * (np.log(matrix_chol))
return log_det_chol
def _estimate_log_gaussian_prob(X, means, precision_cholesky, covariance_type):
n_samples, n_features = X.shape
n_components, _ = means.shape
# det(precision_chol) is half of det(precision)
log_det = _compute_log_det_cholesky(
precision_cholesky, covariance_type, n_features)
if covariance_type in 'full':
log_prob = np.empty((n_samples, n_components))
for k, (mu, prec_chol) in enumerate(zip(means, precision_cholesky)):
y = np.dot(X, prec_chol) - np.dot(mu, prec_chol)
log_prob[:, k] = np.sum(np.square(y), axis=1)
elif covariance_type in 'diag':
precisions = precision_cholesky**2
log_prob = (np.sum((means ** 2 * precisions), 1) - 2.0 * np.dot(X, (means * precisions).T) + np.dot(X**2, precisions.T))
return -0.5 * (n_features * np.log(2 * np.pi) + log_prob) + log_det
def _svd(covariance, covariance_type):
if covariance_type == "full":
return np.linalg.svd(covariance)
elif covariance_type == "diag":
return np.linalg.svd(covariance * np.eye(covariance.size))
else:
raise ValueError("unknown covariance type")
|
andycasey/snob
|
snob/nips_search.py
|
Python
|
mit
| 67,954
|
[
"Gaussian"
] |
003cdd5571d90a2412475250750b20e30ba1abdcc40a14bf8bece7d677b6e40d
|
from os.path import join, dirname, abspath
from os import makedirs
''' dir variables'''
PATH_ROOT = dirname(dirname(abspath(__file__)))
PATH_TEST = join(PATH_ROOT, 'test_data')
PATH_SCRIPTS = join(PATH_ROOT, 'scripts')
PATH_DATABASES = join(PATH_ROOT, 'databases')
PATH_DATABASE_LOGS = join(PATH_DATABASES, 'log_files')
PATH_ASSEMBLIES = join(PATH_ROOT, 'assemblies')
PATH_TOOLS = join(PATH_ROOT, 'external_tools')
PATH_UTIL = join(PATH_SCRIPTS, 'util')
''' db variables '''
PATH_UNIPROT_SPROT_DIR = join(PATH_DATABASES, 'uniprot_sprot')
PATH_UNIPROT_SPROT = join(PATH_UNIPROT_SPROT_DIR, 'uniprot_sprot.fasta')
PATH_UNIREF90_DIR = join(PATH_DATABASES, 'uniref90')
PATH_UNIREF90 = join(PATH_UNIREF90_DIR, 'uniref90.fasta')
PATH_NR_DIR = join(PATH_DATABASES, 'nr')
PATH_NR = join(PATH_NR_DIR, 'nr.fasta')
PATH_BUSCO_REFERENCE_DIR = join(PATH_DATABASES, 'busco')
PATH_BUSCO_METAZOA = join(PATH_BUSCO_REFERENCE_DIR, 'metazoa_buscos')
PATH_BUSCO_ANTHROPODA = join(PATH_BUSCO_REFERENCE_DIR, 'arthropoda_buscos')
PATH_BUSCO_VERTEBRATA = join(PATH_BUSCO_REFERENCE_DIR, 'vertebrata_buscos')
PATH_BUSCO_EUKARYOTA = join(PATH_BUSCO_REFERENCE_DIR, 'eukaryota_buscos')
PATH_BUSCO_FUNGI = join(PATH_BUSCO_REFERENCE_DIR, 'fungi_buscos')
PATH_BUSCO_BACTERIA = join(PATH_BUSCO_REFERENCE_DIR, 'bacteria_buscos')
PATH_BUSCO_PLANT = join(PATH_BUSCO_REFERENCE_DIR, 'plant_buscos')
PATH_PFAM_DIR = join(PATH_DATABASES, 'pfam')
PATH_PFAM_DATABASE = join(PATH_PFAM_DIR, 'Pfam-A.hmm')
PATH_NOG_CATEGORIES = join(PATH_DATABASES, 'nog_categories')
PATH_NOG_FUNCTIONS = join(PATH_DATABASES, 'NOG.annotations.tsv')
PATH_GO_PATHWAY = join(PATH_DATABASES, 'go_pathway.txt')
PATH_SWISS_ENZYME = join(PATH_DATABASES, 'swiss_enzyme.list')
PATH_PFAM_ENZYME = join(PATH_DATABASES, 'pfam_enzyme.list')
PATH_ID_MAPPING_DIR = join(PATH_DATABASES, 'id_mapping')
PATH_ID_MAPPING = join(PATH_ID_MAPPING_DIR, 'idmapping.dat')
PATH_ID_MAPPING_BIOCYC = PATH_ID_MAPPING+'.biocyc'
PATH_ID_MAPPING_EGGNOG = PATH_ID_MAPPING+'.eggNOG'
PATH_ID_MAPPING_KO = PATH_ID_MAPPING+'.KO'
PATH_ID_MAPPING_ORTHODB = PATH_ID_MAPPING+'.orthodb'
PATH_UNIPROT_SPROT_MAP = join(PATH_DATABASES, 'uniprot_sprot.dat')
PATH_KOG_FUNCTIONAL = join(PATH_DATABASES, 'allKOG_functional_info.txt')
PATH_SLIM_GENERIC = join(PATH_DATABASES, 'goslim_generic.obo')
PATH_ID_MAPPING_SELECTED = join(PATH_DATABASES, 'idmapping_selected.tab')
PATH_DATABASE_LOG = join(PATH_DATABASES, '.database_supervisor_log ')
PATH_DB_CONFIG_FILE = join(PATH_ROOT, 'db_config.json')
PATH_SWISS_ENZYME = join(PATH_DATABASES, 'swiss_enzyme.list')
PATH_ENZYME_PATHWAY = join(PATH_DATABASES, 'enzyme_pathway.list')
PATH_ORTHOLOGY_PATHWAY = join(PATH_DATABASES, 'orthology_pathway.list')
''' url variables '''
#URL_BUSCO_BACTERIA = 'http://busco.ezlab.org/files/bacteria_buscos.tar.gz'
URL_BUSCO_BACTERIA = 'http://busco.ezlab.org/datasets/bacteria_odb9.tar.gz'
#URL_BUSCO_FUNGI = 'http://busco.ezlab.org/files/fungi_buscos.tar.gz'
URL_BUSCO_FUNGI = 'http://busco.ezlab.org/datasets/fungi_odb9.tar.gz'
#URL_BUSCO_EUKARYOTA = 'http://busco.ezlab.org/files/eukaryota_buscos.tar.gz'
URL_BUSCO_EUKARYOTA = 'http://busco.ezlab.org/datasets/eukaryota_odb9.tar.gz'
#URL_BUSCO_VERTEBRATA = 'http://busco.ezlab.org/files/vertebrata_buscos.tar.gz'
URL_BUSCO_VERTEBRATA = 'http://busco.ezlab.org/datasets/vertebrata_odb9.tar.gz'
#URL_BUSCO_ANTHROPODA = 'http://busco.ezlab.org/files/arthropoda_buscos.tar.gz'
URL_BUSCO_ANTHROPODA = 'http://busco.ezlab.org/datasets/arthropoda_odb9.tar.gz'
#URL_BUSCO_METAZOA = 'http://busco.ezlab.org/files/metazoa_buscos.tar.gz'
URL_BUSCO_METAZOA = 'http://busco.ezlab.org/datasets/metazoa_odb9.tar.gz'
#URL_BUSCO_PLANT = 'http://buscos.ezlab.org/files/plant_early_release.tar.gz'
URL_BUSCO_PLANT = 'http://busco.ezlab.org/datasets/embryophyta_odb9.tar.gz'
URL_UNIPROT_SPROT = 'ftp://ftp.uniprot.org/pub/databases/uniprot/current_release/knowledgebase/complete/uniprot_sprot.fasta.gz'
URL_UNIREF90 = 'ftp://ftp.uniprot.org/pub/databases/uniprot/uniref/uniref90/uniref90.fasta.gz'
URL_NR = 'ftp://ftp.ncbi.nih.gov/blast/db/FASTA/nr.gz'
URL_GO_PATHWAY = 'http://rest.genome.jp/link/go/pathway'
URL_PFAM_ENZYME = 'http://rest.genome.jp/link/enzyme/pfam'
URL_ID_MAPPING = 'ftp://ftp.uniprot.org/pub/databases/uniprot/current_release/knowledgebase/idmapping/idmapping.dat.gz'
URL_UNIPROT_SPROT_MAP = 'ftp://ftp.uniprot.org/pub/databases/uniprot/current_release/knowledgebase/complete/uniprot_sprot.dat.gz'
URL_KOG_FUNCTIONAL = 'http://eggnogdb.embl.de/download/eggnog_4.1/data/NOG/NOG.annotations.tsv.gz'
URL_SLIM_GENERIC = 'http://www.geneontology.org/ontology/subsets/goslim_generic.obo'
URL_PFAM_DATABASE = 'ftp://ftp.ebi.ac.uk/pub/databases/Pfam/releases/Pfam29.0/Pfam-A.hmm.gz'
URL_ID_MAPPING_SELECTED = 'ftp://ftp.uniprot.org/pub/databases/uniprot/current_release/knowledgebase/idmapping/idmapping_selected.tab.gz'
URL_SWISS_ENZYME = 'http://rest.genome.jp/link/enzyme/swissprot'
URL_ORTHOLOGY_PATHWAY = None
URL_ENZYME_PATHWAY = None
URL_NOG_CATEGORIES = None
URL_NOG_FUNCTIONS = 'http://eggnogdb.embl.de/download/eggnog_4.5/data/NOG/NOG.annotations.tsv.gz'
class Output_Path_Vars:
def __init__(self, basename, out_dir=PATH_ASSEMBLIES):
''' A simple class that simplifies generating names during creation of
tasks. The only two input are "basename", which should be the name
of the output dir you wish to create and have paths for, along with
an optional "out_dir", which species the directory in which the
out_dir should be created. The paths will be assesible as member
variables. The variables are listed below...
self.assembly_name : basename for output
self.path_dir : path to output dir
self.path_assembly_files : path to assembly files
self.path_quality_files : path to quality files
self.path_annotation_files : path to annotation files
self.path_expression_files : path to expression files
self.path_filter_files : path to filtered_assemblies dir
self.path_logs : path to log file containing dir
self.path_assembly : path to the fasta assembly
self.path_gene_trans_map : path to .gene_trans_map output
self.path_transdecoder_dir : path to transdecoder dir
self.path_transrate_dir : path to transrate dir
self.path_pep : path to transdecoder pep file
self.path_annot_table : path to annotation table
self.path_history : path to history.json
'''
self.assembly_name = basename
self.path_dir = join(out_dir, basename)
self.path_assembly_files = join(self.path_dir, 'assembly_files')
self.path_quality_files = join(self.path_dir, 'quality_files')
self.path_annotation_files = join(self.path_dir, 'annotation_files')
self.path_expression_files = join(self.path_dir, 'expression_files')
self.path_filter_files = join(self.path_dir, 'filtered_assemblies')
self.path_logs = join(self.path_dir, 'log_files')
self.path_assembly = join(self.path_dir, basename + '.fasta')
self.path_gene_trans_map = join(self.path_assembly_files, basename + '.gene_trans_map')
self.path_transdecoder_dir = join(self.path_annotation_files, 'transdecoder')
self.path_transrate_dir = join(self.path_quality_files, 'transrate')
self.path_pep = join(self.path_transdecoder_dir, basename + '.fasta.transdecoder.pep')
self.path_annot_table = join(self.path_dir, basename + '_annotation.txt')
self.path_history = join(self.path_logs, 'history.json')
def build(self):
dirs = [self.path_dir, self.path_assembly_files, self.path_quality_files,
self.path_annotation_files, self.path_expression_files, self.path_filter_files,
self.path_logs, self.path_transdecoder_dir, self.path_transrate_dir]
for d in dirs:
try:
makedirs(d)
except:
pass
|
bluegenes/MakeMyTranscriptome
|
scripts/mmt_defaults.py
|
Python
|
bsd-3-clause
| 8,123
|
[
"BLAST"
] |
b920beac1cabea2ef2b37c098cc1b4233ef7044edce9d73cebdc6eb835d60078
|
'''
txBOM lets you integrate asynchronous weather forecast and
observations retrieval from the Australian Bureau of Meteorology
into your Twisted application.
Data definitions extracted from http://www.bom.gov.au/inside/itb/dm/idcodes/struc.shtml
'''
version = (0, 0, 2)
# General form of the ID Code
#
# The general form of the identification code is:
#
# IDcxxxxx.ext
#
# where:
# ID = a constant which identifies this string as an ID code;
# c = category of product
# xxxxx = individual product identifier whose form depends on the category, c;
# ext = optional file extension, indicating file type
#
# Category of product (idCxxxxx) may have the following values:
#
# B = Bundled products, eg IDBxxxxx
# C = Climate, eg IDCxxxxx
# D = NT, eg IDDxxxxx
# E = Satellite products, eg IDExxxxx
# G = Graphical Weather Packages, eg IDGxxxxx
# N = NSW/ACT, eg IDNxxxxx
# Q = Qld, eg IDQxxxxx
# R = Radar, eg IDRxxxxx
# S = SA, eg IDSxxxxx
# T = Tasmanian products, eg IDTxxxxx
# V = Victoria, eg IDVxxxxx
# W = WA, eg IDWxxxxx
# X = Digital Fax, eg IDXxxxxx
# Y = National Meteorological Operations Centre, eg IDYxxxxx
#
ACT = "N" # e.g. IDNxxxxx
Bundled = "B"
Climate = "C"
NT = "D" # e.g. IDDxxxxx
Satellite = "E"
Graphical = "G"
NSW = "N" # e.g. IDNxxxxx
QLD = "Q" # e.g. IDQxxxxx
Radar = "R"
SA = "S" # e.g. IDSxxxxx
TAS = "T" # e.g. IDTxxxxx
VIC = "V" # e.g. IDVxxxxx
WA = "W" # e.g. IDWxxxxx
Digital_Fax = "X"
National_Operations_Centre = "Y"
Categories = [ACT,
Bundled,
Climate,
NT,
Satellite,
Graphical,
NSW,
QLD,
Radar,
SA,
TAS,
VIC,
WA,
Digital_Fax,
National_Operations_Centre]
StateCategories = [ACT,
NSW,
NT,
QLD,
SA,
TAS,
VIC,
WA]
# Individual product identifier (idcXXXXX)
#
# The identifier field varies in length and composition, depending upon the category of product - c.
# For full details, refer to the Product Identification Code Listing.
#
# Optional file extension (idcxxxxx.EXT)
#
# The file extension is optional. When required it indicates the product's file type or format as follows:
#
# .au = voice file
# .axf = AIFS Exchange Format file
# .cat = concatenated voice file
# .gif = gif image file
# .htm = html/shtml file
# .jpg = jpeg image file
# .mpg = mpeg image file
# .nc = NetCDF file
# .ps = postscript
# .txt = text file
# .wav = voice file
# Convert wind direction acronym to words
WindDirections = {"N": "northerly",
"NNE": "north north easterly",
"NE": "north easterly",
"ENE": "east north easterly",
"E": "easterly",
"ESE": "east south easterly",
"SE": "south easterly",
"SSE": "south south easterly",
"S": "southerly",
"SSW": "south south westerly",
"SW": "south westerly",
"WSW": "west south westerly",
"W": "westerly",
"WNW": "west north westerly",
"NW": "north westerly",
"NNW": "north north westerly",
"CALM": "calm"}
WindDirectionsToBearing = {"N": 90.0,
"NNE": 67.5,
"NE": 45.0,
"ENE": 22.5,
"E": 0.0,
"ESE": 337.5,
"SE": 315,
"SSE": 292.5,
"S": 270.0,
"SSW": 247.5,
"SW": 225.0,
"WSW": 202.5,
"W": 180.0,
"WNW": 157.5,
"NW": 135.0,
"NNW": 112.5,
"CALM": None}
|
claws/txBOM
|
txbom/__init__.py
|
Python
|
mit
| 4,089
|
[
"NetCDF"
] |
8047506b3312ccd89a10d4be4eb3f5684c9b2cd4d2a79897daaabb8f967f5bee
|
from moose import *
SIMDT = 1e-5
PLOTDT = 1e-4
GLDT = 1e-2
RUNTIME = 0.05
context = PyMooseBase.getContext()
container = Neutral("/test")
proto_file_name = "../../../DEMOS/gbar/myelin2.p"
context.readCell(proto_file_name, "/test/axon")
gl0 = GLview("GLview", container)
gl0.vizpath = '/test/axon/##[CLASS=Compartment]'
gl0.host = 'localhost'
gl0.port = '9999'
gl0.bgcolor = '050050050'
gl0.value1 = 'Vm'
gl0.value1min = -0.1
gl0.value1max = 0.05
gl0.morph_val = 1
gl0.color_val = 1
gl0.sync = 'off'
gl0.grid = 'off'
context.setClock(0, SIMDT, 0)
context.setClock(1, PLOTDT, 0)
context.setClock(4, GLDT, 0)
context.useClock(4, "/#[TYPE=GLview]")
context.reset()
context.step(RUNTIME)
|
BhallaLab/moose-thalamocortical
|
pymoose/tests/gl/testGLview.py
|
Python
|
lgpl-2.1
| 688
|
[
"MOOSE"
] |
33efea074711257dc288791d2cb18b53af0f666e4d7c0429e0250aef033db83d
|
#!/usr/bin/env python
import sys
from setuptools import find_packages, setup
import versioneer
DISTNAME = 'xarray'
LICENSE = 'Apache'
AUTHOR = 'xarray Developers'
AUTHOR_EMAIL = 'xarray@googlegroups.com'
URL = 'https://github.com/pydata/xarray'
CLASSIFIERS = [
'Development Status :: 4 - Beta',
'License :: OSI Approved :: Apache Software License',
'Operating System :: OS Independent',
'Intended Audience :: Science/Research',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Topic :: Scientific/Engineering',
]
INSTALL_REQUIRES = ['numpy >= 1.11', 'pandas >= 0.18.0']
TESTS_REQUIRE = ['pytest >= 2.7.1']
if sys.version_info[0] < 3:
TESTS_REQUIRE.append('mock')
DESCRIPTION = "N-D labeled arrays and datasets in Python"
LONG_DESCRIPTION = """
**xarray** (formerly **xray**) is an open source project and Python package
that aims to bring the labeled data power of pandas_ to the physical sciences,
by providing N-dimensional variants of the core pandas data structures.
Our goal is to provide a pandas-like and pandas-compatible toolkit for
analytics on multi-dimensional arrays, rather than the tabular data for which
pandas excels. Our approach adopts the `Common Data Model`_ for self-
describing scientific data in widespread use in the Earth sciences:
``xarray.Dataset`` is an in-memory representation of a netCDF file.
.. _pandas: http://pandas.pydata.org
.. _Common Data Model: http://www.unidata.ucar.edu/software/thredds/current/netcdf-java/CDM
.. _netCDF: http://www.unidata.ucar.edu/software/netcdf
.. _OPeNDAP: http://www.opendap.org/
Important links
---------------
- HTML documentation: http://xarray.pydata.org
- Issue tracker: http://github.com/pydata/xarray/issues
- Source code: http://github.com/pydata/xarray
- SciPy2015 talk: https://www.youtube.com/watch?v=X0pAhJgySxk
""" # noqa
setup(name=DISTNAME,
version=versioneer.get_version(),
cmdclass=versioneer.get_cmdclass(),
license=LICENSE,
author=AUTHOR,
author_email=AUTHOR_EMAIL,
classifiers=CLASSIFIERS,
description=DESCRIPTION,
long_description=LONG_DESCRIPTION,
install_requires=INSTALL_REQUIRES,
tests_require=TESTS_REQUIRE,
url=URL,
packages=find_packages(),
package_data={'xarray': ['tests/data/*', 'plot/default_colormap.csv']})
|
jcmgray/xarray
|
setup.py
|
Python
|
apache-2.0
| 2,580
|
[
"NetCDF"
] |
e467e7bc6853e219ee0cfffed85961c6c2a315084f15e11642739380ab7323f2
|
import pytest
import os
from fontTools.ttLib import TTFont
from fontbakery.checkrunner import (DEBUG, INFO, WARN, ERROR,
SKIP, PASS, FAIL, ENDCHECK)
from fontbakery.codetesting import (assert_results_contain,
assert_PASS,
assert_SKIP,
portable_path,
TEST_FILE,
GLYPHSAPP_TEST_FILE,
CheckTester)
from fontbakery.configuration import Configuration
from fontbakery.constants import (NameID,
PlatformID,
WindowsEncodingID,
WindowsLanguageID,
MacintoshEncodingID,
MacintoshLanguageID)
from fontbakery.profiles import googlefonts as googlefonts_profile
import math
check_statuses = (ERROR, FAIL, SKIP, PASS, WARN, INFO, DEBUG)
mada_fonts = [
TEST_FILE("mada/Mada-Black.ttf"),
TEST_FILE("mada/Mada-ExtraLight.ttf"),
TEST_FILE("mada/Mada-Medium.ttf"),
TEST_FILE("mada/Mada-SemiBold.ttf"),
TEST_FILE("mada/Mada-Bold.ttf"),
TEST_FILE("mada/Mada-Light.ttf"),
TEST_FILE("mada/Mada-Regular.ttf"),
]
@pytest.fixture
def mada_ttFonts():
return [TTFont(path) for path in mada_fonts]
cabin_fonts = [
TEST_FILE("cabin/Cabin-BoldItalic.ttf"),
TEST_FILE("cabin/Cabin-Bold.ttf"),
TEST_FILE("cabin/Cabin-Italic.ttf"),
TEST_FILE("cabin/Cabin-MediumItalic.ttf"),
TEST_FILE("cabin/Cabin-Medium.ttf"),
TEST_FILE("cabin/Cabin-Regular.ttf"),
TEST_FILE("cabin/Cabin-SemiBoldItalic.ttf"),
TEST_FILE("cabin/Cabin-SemiBold.ttf")
]
cabin_condensed_fonts = [
TEST_FILE("cabincondensed/CabinCondensed-Regular.ttf"),
TEST_FILE("cabincondensed/CabinCondensed-Medium.ttf"),
TEST_FILE("cabincondensed/CabinCondensed-Bold.ttf"),
TEST_FILE("cabincondensed/CabinCondensed-SemiBold.ttf")
]
montserrat_fonts = [
TEST_FILE("montserrat/Montserrat-Black.ttf"),
TEST_FILE("montserrat/Montserrat-BlackItalic.ttf"),
TEST_FILE("montserrat/Montserrat-Bold.ttf"),
TEST_FILE("montserrat/Montserrat-BoldItalic.ttf"),
TEST_FILE("montserrat/Montserrat-ExtraBold.ttf"),
TEST_FILE("montserrat/Montserrat-ExtraBoldItalic.ttf"),
TEST_FILE("montserrat/Montserrat-ExtraLight.ttf"),
TEST_FILE("montserrat/Montserrat-ExtraLightItalic.ttf"),
TEST_FILE("montserrat/Montserrat-Italic.ttf"),
TEST_FILE("montserrat/Montserrat-Light.ttf"),
TEST_FILE("montserrat/Montserrat-LightItalic.ttf"),
TEST_FILE("montserrat/Montserrat-Medium.ttf"),
TEST_FILE("montserrat/Montserrat-MediumItalic.ttf"),
TEST_FILE("montserrat/Montserrat-Regular.ttf"),
TEST_FILE("montserrat/Montserrat-SemiBold.ttf"),
TEST_FILE("montserrat/Montserrat-SemiBoldItalic.ttf"),
TEST_FILE("montserrat/Montserrat-Thin.ttf"),
TEST_FILE("montserrat/Montserrat-ThinItalic.ttf")
]
cjk_font = TEST_FILE("cjk/SourceHanSans-Regular.otf")
@pytest.fixture
def montserrat_ttFonts():
return [TTFont(path) for path in montserrat_fonts]
@pytest.fixture
def cabin_ttFonts():
return [TTFont(path) for path in cabin_fonts]
@pytest.fixture
def vf_ttFont():
path = TEST_FILE("varfont/Oswald-VF.ttf")
return TTFont(path)
def change_name_table_id(ttFont, nameID, newEntryString, platEncID=0):
for i, nameRecord in enumerate(ttFont['name'].names):
if nameRecord.nameID == nameID and nameRecord.platEncID == platEncID:
ttFont['name'].names[i].string = newEntryString
def delete_name_table_id(ttFont, nameID):
delete = []
for i, nameRecord in enumerate(ttFont['name'].names):
if nameRecord.nameID == nameID:
delete.append(i)
for i in sorted(delete, reverse=True):
del(ttFont['name'].names[i])
@pytest.fixture
def cabin_regular_path():
return portable_path('data/test/cabin/Cabin-Regular.ttf')
def test_example_checkrunner_based(cabin_regular_path):
""" This is just an example test. We'll probably need something like
this setup in a checkrunner_test.py testsuite.
Leave it here for the moment until we implemented a real case.
This test is run via the checkRunner and demonstrate how to get
(mutable) objects from the conditions cache and change them.
NOTE: the actual fontbakery checks of conditions should never
change a condition object.
"""
from fontbakery.checkrunner import CheckRunner
from fontbakery.profiles.googlefonts import profile
values = dict(fonts=[cabin_regular_path])
runner = CheckRunner(profile, values, Configuration(explicit_checks=['com.google.fonts/check/vendor_id']))
# we could also reuse the `iterargs` that was assigned in the previous
# for loop, but this here is more explicit
iterargs = (('font', 0),)
ttFont = runner.get('ttFont', iterargs)
print('Test PASS ...')
# prepare
ttFont['OS/2'].achVendID = "APPL"
# run
for status, message, _ in runner.run():
if status in check_statuses:
last_check_message = message
if status == ENDCHECK:
assert message == PASS
break
print('Test WARN ...')
# prepare
ttFont['OS/2'].achVendID = "????"
# run
for status, message, _ in runner.run():
if status in check_statuses:
last_check_message = message
if status == ENDCHECK:
assert message == WARN and last_check_message.code == 'unknown'
break
def test_check_canonical_filename():
""" Files are named canonically. """
check = CheckTester(googlefonts_profile,
"com.google.fonts/check/canonical_filename")
static_canonical_names = [
TEST_FILE("montserrat/Montserrat-Thin.ttf"),
TEST_FILE("montserrat/Montserrat-ExtraLight.ttf"),
TEST_FILE("montserrat/Montserrat-Light.ttf"),
TEST_FILE("montserrat/Montserrat-Regular.ttf"),
TEST_FILE("montserrat/Montserrat-Medium.ttf"),
TEST_FILE("montserrat/Montserrat-SemiBold.ttf"),
TEST_FILE("montserrat/Montserrat-Bold.ttf"),
TEST_FILE("montserrat/Montserrat-ExtraBold.ttf"),
TEST_FILE("montserrat/Montserrat-Black.ttf"),
TEST_FILE("montserrat/Montserrat-ThinItalic.ttf"),
TEST_FILE("montserrat/Montserrat-ExtraLightItalic.ttf"),
TEST_FILE("montserrat/Montserrat-LightItalic.ttf"),
TEST_FILE("montserrat/Montserrat-Italic.ttf"),
TEST_FILE("montserrat/Montserrat-MediumItalic.ttf"),
TEST_FILE("montserrat/Montserrat-SemiBoldItalic.ttf"),
TEST_FILE("montserrat/Montserrat-BoldItalic.ttf"),
TEST_FILE("montserrat/Montserrat-ExtraBoldItalic.ttf"),
TEST_FILE("montserrat/Montserrat-BlackItalic.ttf"),
]
varfont_canonical_names = [
TEST_FILE("cabinvfbeta/CabinVFBeta-Italic[wght].ttf"),
TEST_FILE("cabinvfbeta/CabinVFBeta[wdth,wght].ttf"), # axis tags are sorted
]
non_canonical_names = [
TEST_FILE("cabinvfbeta/CabinVFBeta.ttf"),
TEST_FILE("cabinvfbeta/Cabin-Italic.ttf"),
TEST_FILE("cabinvfbeta/Cabin-Roman.ttf"),
TEST_FILE("cabinvfbeta/Cabin-Italic-VF.ttf"),
TEST_FILE("cabinvfbeta/Cabin-Roman-VF.ttf"),
TEST_FILE("cabinvfbeta/Cabin-VF.ttf"),
TEST_FILE("cabinvfbeta/CabinVFBeta[wght,wdth].ttf"), # axis tags are NOT sorted here
]
for canonical in static_canonical_names + varfont_canonical_names:
assert_PASS(check(canonical),
f'with "{canonical}" ...')
for non_canonical in non_canonical_names:
assert_results_contain(check(non_canonical),
FAIL, 'bad-varfont-filename',
f'with "{non_canonical}" ...')
assert_results_contain(check(TEST_FILE("Bad_Name.ttf")),
FAIL, 'invalid-char',
'with filename containing an underscore...')
assert_results_contain(check(TEST_FILE("mutatorsans-vf/MutatorSans-VF.ttf")),
FAIL, 'unknown-name',
'with a variable font that lacks some important name table entries...')
# TODO: FAIL, 'bad-static-filename'
# TODO: FAIL, 'varfont-with-static-filename'
def test_check_description_broken_links():
""" Does DESCRIPTION file contain broken links ? """
check = CheckTester(googlefonts_profile,
"com.google.fonts/check/description/broken_links")
font = TEST_FILE("cabin/Cabin-Regular.ttf")
assert_PASS(check(font),
'with description file that has no links...')
good_desc = check['description']
good_desc += ("<a href='http://example.com'>Good Link</a>"
"<a href='http://fonts.google.com'>Another Good One</a>")
assert_PASS(check(font, {"description": good_desc}),
'with description file that has good links...')
good_desc += "<a href='mailto:juca@members.fsf.org'>An example mailto link</a>"
assert_results_contain(check(font, {"description": good_desc}),
INFO, "email",
'with a description file containing "mailto" links...')
assert_PASS(check(font, {"description": good_desc}),
'with a description file containing "mailto" links...')
bad_desc = good_desc + "<a href='http://thisisanexampleofabrokenurl.com/'>This is a Bad Link</a>"
assert_results_contain(check(font, {"description": bad_desc}),
FAIL, 'broken-links',
'with a description file containing a known-bad URL...')
#TODO: WARN, 'timeout'
def test_check_description_git_url():
""" Does DESCRIPTION file contain an upstream Git repo URL? """
check = CheckTester(googlefonts_profile,
"com.google.fonts/check/description/git_url")
# TODO: test INFO 'url-found'
font = TEST_FILE("cabin/Cabin-Regular.ttf")
assert_results_contain(check(font),
FAIL, 'lacks-git-url',
'with description file that has no git repo URLs...')
good_desc = ("<a href='https://github.com/uswds/public-sans'>Good URL</a>"
"<a href='https://gitlab.com/smc/fonts/uroob'>Another Good One</a>")
assert_PASS(check(font, {"description": good_desc}),
'with description file that has good links...')
bad_desc = "<a href='https://v2.designsystem.digital.gov'>Bad URL</a>"
assert_results_contain(check(font, {"description": bad_desc}),
FAIL, 'lacks-git-url',
'with description file that has false git in URL...')
def test_check_description_valid_html():
""" DESCRIPTION file is a propper HTML snippet ? """
check = CheckTester(googlefonts_profile,
"com.google.fonts/check/description/valid_html")
font = TEST_FILE("nunito/Nunito-Regular.ttf")
assert_PASS(check(font),
'with description file that contains a good HTML snippet...')
bad_desc = open(TEST_FILE("cabin/FONTLOG.txt"), "r").read() # :-)
assert_results_contain(check(font, {"description": bad_desc}),
FAIL, 'lacks-paragraph',
'with a known-bad file (without HTML paragraph tags)...')
bad_desc = "<html>foo</html>"
assert_results_contain(check(font, {"description": bad_desc}),
FAIL, 'html-tag',
'with description file that contains the <html> tag...')
bad_desc = ("<p>This example has the & caracter,"
" but does not escape it with an HTML entity code."
" It should use & instead."
"</p>")
assert_results_contain(check(font, {"description": bad_desc}),
FAIL, 'malformed-snippet',
'with a known-bad file (not using HTML entity syntax)...')
def test_check_description_min_length():
""" DESCRIPTION.en_us.html must have more than 200 bytes. """
check = CheckTester(googlefonts_profile,
"com.google.fonts/check/description/min_length")
font = TEST_FILE("nunito/Nunito-Regular.ttf")
bad_length = 'a' * 199
assert_results_contain(check(font, {"description": bad_length}),
FAIL, 'too-short',
'with 199-byte buffer...')
bad_length = 'a' * 200
assert_results_contain(check(font, {"description": bad_length}),
FAIL, 'too-short',
'with 200-byte buffer...')
good_length = 'a' * 201
assert_PASS(check(font, {"description": good_length}),
'with 201-byte buffer...')
def test_check_description_max_length():
""" DESCRIPTION.en_us.html must have less than 2000 bytes. """
check = CheckTester(googlefonts_profile,
"com.google.fonts/check/description/max_length")
font = TEST_FILE("nunito/Nunito-Regular.ttf")
bad_length = 'a' * 2001
assert_results_contain(check(font, {"description": bad_length}),
FAIL, "too-long",
'with 2001-byte buffer...')
bad_length = 'a' * 2000
assert_results_contain(check(font, {"description": bad_length}),
FAIL, "too-long",
'with 2000-byte buffer...')
good_length = 'a' * 1999
assert_PASS(check(font, {"description": good_length}),
'with 1999-byte buffer...')
def test_check_description_eof_linebreak():
""" DESCRIPTION.en_us.html should end in a linebreak. """
check = CheckTester(googlefonts_profile,
"com.google.fonts/check/description/eof_linebreak")
font = TEST_FILE("nunito/Nunito-Regular.ttf")
bad = ("We want to avoid description files\n"
"without an end-of-file linebreak\n"
"like this one.")
assert_results_contain(check(font, {"description": bad}),
WARN, "missing-eof-linebreak",
'when we lack an end-of-file linebreak...')
good = ("On the other hand, this one\n"
"is good enough.\n")
assert_PASS(check(font, {"description": good}),
'when we add one...')
def test_check_name_family_and_style_max_length():
""" Combined length of family and style must not exceed 27 characters. """
check = CheckTester(googlefonts_profile,
"com.google.fonts/check/name/family_and_style_max_length")
# Our reference Cabin Regular is known to be good
ttFont = TTFont(TEST_FILE("cabin/Cabin-Regular.ttf"))
# So it must PASS the check:
assert_PASS(check(ttFont),
'with a good font...')
# Then we emit a WARNing with long family/style names
# Originaly these were based on the example on the glyphs tutorial
# (at https://glyphsapp.com/tutorials/multiple-masters-part-3-setting-up-instances)
# but later we increased a bit the max allowed length.
# First we expect a WARN with a bad FAMILY NAME
for index, name in enumerate(ttFont["name"].names):
if name.nameID == NameID.FONT_FAMILY_NAME:
# This has 28 chars, while the max currently allowed is 27.
bad = "AnAbsurdlyLongFamilyNameFont"
assert len(bad) == 28
ttFont["name"].names[index].string = bad.encode(name.getEncoding())
break
assert_results_contain(check(ttFont),
WARN, 'too-long',
'with a bad font...')
# Now let's restore the good Cabin Regular...
ttFont = TTFont(TEST_FILE("cabin/Cabin-Regular.ttf"))
# ...and break the check again with a bad SUBFAMILY NAME:
for index, name in enumerate(ttFont["name"].names):
if name.nameID == NameID.FONT_SUBFAMILY_NAME:
bad = "WithAVeryLongAndBadStyleName"
assert len(bad) == 28
ttFont["name"].names[index].string = bad.encode(name.getEncoding())
break
assert_results_contain(check(ttFont),
WARN, 'too-long',
'with a bad font...')
def test_check_glyphs_file_name_family_and_style_max_length():
""" Combined length of family and style must not exceed 27 characters. """
check = CheckTester(googlefonts_profile,
"com.google.fonts/check/glyphs_file/name/family_and_style_max_length")
# Our reference Comfortaa.glyphs is known to be good
glyphsFile = GLYPHSAPP_TEST_FILE("Comfortaa.glyphs")
# So it must PASS the check:
assert_PASS(check(glyphsFile),
'with a good font...')
# Then we emit a WARNing with long family/style names
# Originaly these were based on the example on the glyphs tutorial
# (at https://glyphsapp.com/tutorials/multiple-masters-part-3-setting-up-instances)
# but later we increased a bit the max allowed length.
# First we expect a WARN with a bad FAMILY NAME
# This has 28 chars, while the max currently allowed is 27.
bad = "AnAbsurdlyLongFamilyNameFont"
assert len(bad) == 28
glyphsFile.familyName = bad
assert_results_contain(check(glyphsFile),
WARN, 'too-long',
'with a too long font familyname...')
for i in range(len(glyphsFile.instances)):
# Restore the good glyphs file...
glyphsFile = GLYPHSAPP_TEST_FILE("Comfortaa.glyphs")
# ...and break the check again with a long SUBFAMILY NAME
# on one of its instances:
bad_stylename = "WithAVeryLongAndBadStyleName"
assert len(bad_stylename) == 28
glyphsFile.instances[i].fullName = f"{glyphsFile.familyName} {bad_stylename}"
assert_results_contain(check(glyphsFile),
WARN, 'too-long',
'with a too long stylename...')
def test_check_name_line_breaks():
""" Name table entries should not contain line-breaks. """
check = CheckTester(googlefonts_profile,
"com.google.fonts/check/name/line_breaks")
# Our reference Mada Regular font is good here:
ttFont = TTFont(TEST_FILE("mada/Mada-Regular.ttf"))
# So it must PASS the check:
assert_PASS(check(ttFont),
'with a good font...')
num_entries = len(ttFont["name"].names)
for i in range(num_entries):
ttFont = TTFont(TEST_FILE("mada/Mada-Regular.ttf"))
encoding = ttFont["name"].names[i].getEncoding()
ttFont["name"].names[i].string = "bad\nstring".encode(encoding)
assert_results_contain(check(ttFont),
FAIL, 'line-break',
f'with name entries containing a linebreak ({i}/{num_entries})...')
def test_check_name_rfn():
""" Name table strings must not contain 'Reserved Font Name'. """
check = CheckTester(googlefonts_profile,
"com.google.fonts/check/name/rfn")
ttFont = TTFont(TEST_FILE("nunito/Nunito-Regular.ttf"))
assert_PASS(check(ttFont))
ttFont["name"].setName("Bla Reserved Font Name", 5, 3, 1, 0x409)
assert_results_contain(check(ttFont),
FAIL, 'rfn',
'with "Reserved Font Name" on a name table entry...')
def test_check_metadata_parses():
""" Check METADATA.pb parse correctly. """
check = CheckTester(googlefonts_profile,
"com.google.fonts/check/metadata/parses")
good = TEST_FILE("merriweather/Merriweather-Regular.ttf")
assert_PASS(check(good),
'with a good METADATA.pb file...')
skip = TEST_FILE("slabo/Slabo-Regular.ttf")
assert_results_contain(check(skip),
SKIP, 'file-not-found',
'with a missing METADATA.pb file...')
bad = TEST_FILE("broken_metadata/foo.ttf")
assert_results_contain(check(bad),
FAIL, 'parsing-error',
'with a bad METADATA.pb file...')
def test_check_metadata_unknown_designer():
""" Font designer field in METADATA.pb must not be 'unknown'. """
check = CheckTester(googlefonts_profile,
"com.google.fonts/check/metadata/unknown_designer")
font = TEST_FILE("merriweather/Merriweather.ttf")
assert_PASS(check(font),
'with a good METADATA.pb file...')
md = check["family_metadata"]
md.designer = "unknown"
assert_results_contain(check(font, {"family_metadata": md}),
FAIL, 'unknown-designer',
'with a bad METADATA.pb file...')
def test_check_metadata_designer_values():
""" Multiple values in font designer field in
METADATA.pb must be separated by commas. """
check = CheckTester(googlefonts_profile,
"com.google.fonts/check/metadata/designer_values")
font = TEST_FILE("merriweather/Merriweather.ttf")
assert_PASS(check(font),
'with a good METADATA.pb file...')
md = check["family_metadata"]
md.designer = "Pentagram, MCKL"
assert_PASS(check(font, {"family_metadata": md}),
'with a good multiple-designers string...')
md.designer = "Pentagram / MCKL" # This actually happened on an
# early version of the Red Hat Text family
assert_results_contain(check(font, {"family_metadata": md}),
FAIL, 'slash',
'with a bad multiple-designers string'
' (names separated by a slash char)...')
def test_check_metadata_broken_links():
""" Does DESCRIPTION file contain broken links? """
#check = CheckTester(googlefonts_profile,
# "com.google.fonts/check/metadata/broken_links")
# TODO: Implement-me!
# INFO, "email"
# WARN, "timeout"
# FAIL, "broken-links"
def test_check_metadata_undeclared_fonts():
""" Ensure METADATA.pb lists all font binaries. """
check = CheckTester(googlefonts_profile,
"com.google.fonts/check/metadata/undeclared_fonts")
# Our reference Nunito family is know to be good here.
font = TEST_FILE("nunito/Nunito-Regular.ttf")
assert_PASS(check(font))
# Our reference Cabin family has files that are not declared in its METADATA.pb:
# - CabinCondensed-Medium.ttf
# - CabinCondensed-SemiBold.ttf
# - CabinCondensed-Regular.ttf
# - CabinCondensed-Bold.ttf
font = TEST_FILE("cabin/Cabin-Regular.ttf")
assert_results_contain(check(font),
FAIL, 'file-not-declared')
# We placed an additional file on a subdirectory of our reference
# OverpassMono family with the name "another_directory/ThisShouldNotBeHere.otf"
font = TEST_FILE("overpassmono/OverpassMono-Regular.ttf")
assert_results_contain(check(font),
WARN, 'font-on-subdir')
# We do accept statics folder though!
# Jura is an example:
font = TEST_FILE("varfont/jura/Jura.ttf")
assert_PASS(check(font))
@pytest.mark.skip(reason="re-enable after addressing issue #1998")
def test_check_family_equal_numbers_of_glyphs(mada_ttFonts, cabin_ttFonts):
""" Fonts have equal numbers of glyphs? """
check = CheckTester(googlefonts_profile,
"com.google.fonts/check/family/equal_numbers_of_glyphs")
# our reference Cabin family is know to be good here.
assert_PASS(check(cabin_ttFonts),
'with a good family.')
# our reference Mada family is bad here with 407 glyphs on most font files
# except the Black and the Medium, that both have 408 glyphs.
assert_results_contain(check(mada_ttFonts),
FAIL, 'glyph-count-diverges',
'with fonts that diverge on number of glyphs.')
@pytest.mark.skip(reason="re-enable after addressing issue #1998")
def test_check_family_equal_glyph_names(mada_ttFonts, cabin_ttFonts):
""" Fonts have equal glyph names? """
check = CheckTester(googlefonts_profile,
"com.google.fonts/check/family/equal_glyph_names")
# our reference Cabin family is know to be good here.
assert_PASS(check(cabin_ttFonts),
'with a good family.')
# our reference Mada family is bad here with 407 glyphs on most font files
# except the Black and the Medium, that both have 408 glyphs (that extra glyph
# causes the check to fail).
assert_results_contain(check(mada_ttFonts),
FAIL, 'missing-glyph',
'with fonts that diverge on number of glyphs.')
def test_check_fstype():
""" Checking OS/2 fsType """
check = CheckTester(googlefonts_profile,
"com.google.fonts/check/fstype")
# our reference Cabin family is know to be good here.
ttFont = TTFont(TEST_FILE("cabin/Cabin-Regular.ttf"))
assert_PASS(check(ttFont),
'with a good font without DRM.')
# modify the OS/2 fsType value to something different than zero:
ttFont['OS/2'].fsType = 1
assert_results_contain(check(ttFont),
FAIL, 'drm',
'with fonts that enable DRM restrictions via non-zero fsType bits.')
def test_condition__registered_vendor_ids():
""" Get a list of vendor IDs from Microsoft's website. """
from fontbakery.profiles.googlefonts import registered_vendor_ids
registered_ids = registered_vendor_ids()
print('As of July 2018, "MLAG": "Michael LaGattuta" must show up in the list...')
assert "MLAG" in registered_ids # Michael LaGattuta
print('As of December 2020, "GOOG": "Google" must show up in the list...')
assert "GOOG" in registered_ids # Google
print('"CFA ": "Computer Fonts Australia" is a good vendor id, lacking a URL')
assert "CFA " in registered_ids # Computer Fonts Australia
print('"GNU ": "Free Software Foundation, Inc." is a good vendor id with 3 letters and a space.')
assert "GNU " in registered_ids # Free Software Foundation, Inc. / http://www.gnu.org/
print('"GNU" without the right-padding space must not be on the list!')
assert "GNU" not in registered_ids # All vendor ids must be 4 chars long!
print('"ADBE": "Adobe" is a good 4-letter vendor id.')
assert "ADBE" in registered_ids # Adobe
print('"B&H ": "Bigelow & Holmes" is a valid vendor id that contains an ampersand.')
assert "B&H " in registered_ids # Bigelow & Holmes
print('"MS ": "Microsoft Corp." is a good vendor id with 2 letters and padded with spaces.')
assert "MS " in registered_ids # Microsoft Corp.
print('"TT\0\0": we also accept vendor-IDs containing NULL-padding.')
assert "TT\0\0" in registered_ids # constains NULL bytes
print('All vendor ids must be 4 chars long!')
assert "GNU" not in registered_ids # 3 chars long is bad
assert "MS" not in registered_ids # 2 chars long is bad
assert "H" not in registered_ids # 1 char long is bad
print('"H ": "Hurme Design" is a good vendor id with a single letter padded with spaces.')
assert "H " in registered_ids # Hurme Design
print('" H": But not padded on the left, please!')
assert " H" not in registered_ids # a bad vendor id (presumably for "Hurme Design"
# but with a vendor id parsing bug)
print('"????" is an unknown vendor id.')
assert "????" not in registered_ids
def test_check_vendor_id():
""" Checking OS/2 achVendID """
check = CheckTester(googlefonts_profile,
"com.google.fonts/check/vendor_id")
# Let's start with our reference Merriweather Regular
ttFont = TTFont(TEST_FILE("merriweather/Merriweather-Regular.ttf"))
bad_vids = ['UKWN', 'ukwn', 'PfEd']
for bad_vid in bad_vids:
ttFont['OS/2'].achVendID = bad_vid
assert_results_contain(check(ttFont),
WARN, 'bad',
f'with bad vid "{bad_vid}".')
ttFont['OS/2'].achVendID = None
assert_results_contain(check(ttFont),
WARN, 'not-set',
'with font missing vendor id info.')
ttFont['OS/2'].achVendID = "????"
assert_results_contain(check(ttFont),
WARN, 'unknown',
'with unknwon vendor id.')
# we now change the fields into a known good vendor id:
ttFont['OS/2'].achVendID = "APPL"
assert_PASS(check(ttFont),
'with a good font.')
# And let's also make sure it works here:
ttFont['OS/2'].achVendID = "GOOG"
assert_PASS(check(ttFont),
'with a good font.')
def NOT_IMPLEMENTED__test_check_glyph_coverage():
""" Check glyph coverage. """
#check = CheckTester(googlefonts_profile,
# "com.google.fonts/check/glyph_coverage")
#TODO: Implement-me!
## Our reference Mada Regular is know to be bad here.
#ttFont = TTFont(TEST_FILE("mada/Mada-Regular.ttf"))
#assert_results_contain(check(ttFont),
# FAIL, 'missing-codepoints',
# 'with a bad font...')
## Our reference Cabin Regular is know to be good here.
#ttFont = TTFont(TEST_FILE("cabin/Cabin-Regular.ttf"))
#assert_PASS(check(ttFont),
# 'with a good font...')
def test_check_name_unwanted_chars():
""" Substitute copyright, registered and trademark
symbols in name table entries. """
check = CheckTester(googlefonts_profile,
"com.google.fonts/check/name/unwanted_chars")
# Our reference Mada Regular is know to be bad here.
font = TEST_FILE("mada/Mada-Regular.ttf")
assert_results_contain(check(font),
FAIL, 'unwanted-chars',
'with a bad font...')
# Our reference Cabin Regular is know to be good here.
font = TEST_FILE("cabin/Cabin-Regular.ttf")
assert_PASS(check(font),
'with a good font...')
def test_check_usweightclass():
""" Checking OS/2 usWeightClass. """
check = CheckTester(googlefonts_profile,
"com.google.fonts/check/usweightclass")
# Our reference Mada Regular is know to be bad here.
font = TEST_FILE("mada/Mada-Regular.ttf")
ttFont = TTFont(font)
assert_results_contain(check(ttFont),
FAIL, 'bad-value',
f'with bad font "{font}" ...')
# All fonts in our reference Cabin family are know to be good here.
for font in cabin_fonts:
ttFont = TTFont(font)
assert_PASS(check(ttFont),
f'with good font "{font}"...')
# Check otf Thin == 250 and ExtraLight == 275
font = TEST_FILE("rokkitt/Rokkitt-Thin.otf")
ttFont = TTFont(font)
assert_results_contain(check(ttFont),
FAIL, "bad-value",
f'with bad font "{font}"...')
ttFont['OS/2'].usWeightClass = 250
assert_PASS(check(ttFont),
f'with good font "{font}" (usWeightClass = 250) ...')
font = TEST_FILE("rokkitt/Rokkitt-ExtraLight.otf")
ttFont = TTFont(font)
assert_results_contain(check(ttFont),
FAIL, "bad-value",
f'with bad font "{font}" ...')
ttFont['OS/2'].usWeightClass = 275
assert_PASS(check(ttFont),
f'with good font "{font}" (usWeightClass = 275) ...')
# TODO: test italic variants to ensure we do not get regressions of
# this bug: https://github.com/googlefonts/fontbakery/issues/2650
def test_family_directory_condition():
from fontbakery.profiles.shared_conditions import family_directory
assert family_directory("some_directory/Foo.ttf") == "some_directory"
assert family_directory("some_directory/subdir/Foo.ttf") == "some_directory/subdir"
assert family_directory("Foo.ttf") == "." # This is meant to ensure license files
# are correctly detected on the current
# working directory.
def test_check_family_has_license():
""" Check font project has a license. """
from fontbakery.profiles.googlefonts import (com_google_fonts_check_family_has_license as check,
licenses)
# The lines maked with 'hack' below are meant to
# not let fontbakery's own license to mess up
# this code test.
detected_licenses = licenses(portable_path("data/test/028/multiple"))
detected_licenses.pop(-1) # hack
assert_results_contain(check(detected_licenses),
FAIL, 'multiple',
'with multiple licenses...')
detected_licenses = licenses(portable_path("data/test/028/none"))
detected_licenses.pop(-1) # hack
assert_results_contain(check(detected_licenses),
FAIL, 'no-license',
'with no license...')
detected_licenses = licenses(portable_path("data/test/028/pass_ofl"))
detected_licenses.pop(-1) # hack
assert_PASS(check(detected_licenses),
'with a single OFL license...')
detected_licenses = licenses(portable_path("data/test/028/pass_apache"))
detected_licenses.pop(-1) # hack
assert_PASS(check(detected_licenses),
'with a single Apache license...')
def test_check_license_ofl_body_text():
"""Check OFL.txt contains correct body text."""
check = CheckTester(googlefonts_profile,
"com.google.fonts/check/license/OFL_body_text")
# Our reference Montserrat family is know to have
# a proper OFL.txt license file.
# NOTE: This is currently considered good
# even though it uses an "http://" URL
font = TEST_FILE("montserrat/Montserrat-Regular.ttf")
ttFont = TTFont(font)
assert_PASS(check(ttFont),
'with a good OFL.txt license with "http://" url.')
# using "https://" is also considered good:
good_license = check["license_contents"].replace("http://", "https://")
assert_PASS(check(ttFont, {'license_contents': good_license}),
'with a good OFL.txt license with "https://" url.')
# modify a tiny bit of the license text, to trigger the FAIL:
bad_license = check["license_contents"].replace("SIL OPEN FONT LICENSE Version 1.1",
"SOMETHING ELSE :-P Version Foo")
assert_results_contain(check(ttFont, {'license_contents': bad_license}),
FAIL, "incorrect-ofl-body-text",
"with incorrect ofl body text")
def test_check_name_license(mada_ttFonts):
""" Check copyright namerecords match license file. """
from fontbakery.profiles.googlefonts import com_google_fonts_check_name_license as check
# Our reference Mada family has its copyright name records properly set
# identifying it as being licensed under the Open Font License
license = 'OFL.txt'
wrong_license = 'LICENSE.txt' # Apache
for ttFont in mada_ttFonts:
assert_PASS(check(ttFont, license),
'with good fonts ...')
for ttFont in mada_ttFonts:
assert_results_contain(check(ttFont, wrong_license),
FAIL, 'wrong',
'with wrong entry values ...')
for ttFont in mada_ttFonts:
delete_name_table_id(ttFont, NameID.LICENSE_DESCRIPTION)
assert_results_contain(check(ttFont, wrong_license),
FAIL, 'missing',
'with missing copyright namerecords ...')
# TODO:
# WARN, "http" / "http-in-description"
def NOT_IMPLEMENTED_test_check_name_license_url():
""" License URL matches License text on name table? """
# check = CheckTester(googlefonts_profile,
# "com.google.fonts/check/name/license_url")
# TODO: Implement-me!
#
# code-paths:
# - FAIL, code="ufl"
# - FAIL, code="licensing-inconsistency"
# - FAIL, code="no-license-found"
# - FAIL, code="bad-entries"
# - WARN, code="http-in-description"
# - WARN, code="http"
# - PASS
def test_check_name_description_max_length():
""" Description strings in the name table
must not exceed 200 characters.
"""
check = CheckTester(googlefonts_profile,
"com.google.fonts/check/name/description_max_length")
# Our reference Mada Regular is know to be good here.
ttFont = TTFont(TEST_FILE("mada/Mada-Regular.ttf"))
assert_PASS(check(ttFont),
'with a good font...')
# Here we add strings to NameID.DESCRIPTION with exactly 100 chars,
# so it should still PASS:
for i, name in enumerate(ttFont['name'].names):
if name.nameID == NameID.DESCRIPTION:
ttFont['name'].names[i].string = ('a' * 200).encode(name.getEncoding())
assert_PASS(check(ttFont),
'with a 200 char string...')
# And here we make the strings longer than 200 chars
# in order to make the check emit a WARN:
for i, name in enumerate(ttFont['name'].names):
if name.nameID == NameID.DESCRIPTION:
ttFont['name'].names[i].string = ('a' * 201).encode(name.getEncoding())
assert_results_contain(check(ttFont),
WARN, 'too-long',
'with a too long description string...')
def test_check_hinting_impact():
""" Show hinting filesize impact. """
check = CheckTester(googlefonts_profile,
"com.google.fonts/check/hinting_impact")
font = TEST_FILE("mada/Mada-Regular.ttf")
assert_results_contain(check(font),
INFO, 'size-impact',
'this check always emits an INFO result...')
# TODO: test the CFF code-path
def test_check_file_size():
"""Ensure files are not too large."""
check = CheckTester(googlefonts_profile,
"com.google.fonts/check/file_size")
assert_PASS(check(TEST_FILE("mada/Mada-Regular.ttf")))
assert_results_contain(check(TEST_FILE("varfont/inter/Inter[slnt,wght].ttf")),
WARN, 'large-font',
'with quite a big font...')
assert_results_contain(check(TEST_FILE("cjk/SourceHanSans-Regular.otf")),
FAIL, 'massive-font',
'with a very big font...')
def test_check_name_version_format():
""" Version format is correct in 'name' table ? """
check = CheckTester(googlefonts_profile,
"com.google.fonts/check/name/version_format")
# Our reference Mada Regular font is good here:
ttFont = TTFont(TEST_FILE("mada/Mada-Regular.ttf"))
# So it must PASS the check:
assert_PASS(check(ttFont),
'with a good font...')
# then we introduce bad strings in all version-string entries:
for i, name in enumerate(ttFont["name"].names):
if name.nameID == NameID.VERSION_STRING:
invalid = "invalid-version-string".encode(name.getEncoding())
ttFont["name"].names[i].string = invalid
assert_results_contain(check(ttFont),
FAIL, 'bad-version-strings',
'with bad version format in name table...')
# and finally we remove all version-string entries:
for i, name in enumerate(ttFont["name"].names):
if name.nameID == NameID.VERSION_STRING:
del ttFont["name"].names[i]
assert_results_contain(check(ttFont),
FAIL, 'no-version-string',
'with font lacking version string entries in name table...')
def NOT_IMPLEMENTED_test_check_old_ttfautohint():
""" Font has old ttfautohint applied? """
# check = CheckTester(googlefonts_profile,
# "com.google.fonts/check/old_ttfautohint")
# TODO: Implement-me!
#
# code-paths:
# - FAIL, code="lacks-version-strings"
# - INFO, code="version-not-detected" "Could not detect which version of ttfautohint
# was used in this font."
# - WARN, code="old-ttfa" "detected an old ttfa version"
# - PASS
# - FAIL, code="parse-error"
@pytest.mark.parametrize("expected_status,expected_keyword,reason,font",[
(FAIL, "lacks-ttfa-params",
'with a font lacking ttfautohint params on its version strings on the name table.',
TEST_FILE("coveredbyyourgrace/CoveredByYourGrace.ttf")),
(SKIP, "not-hinted",
'with a font which appears to our heuristic as not hinted using ttfautohint.',
TEST_FILE("mada/Mada-Regular.ttf")),
(PASS, "ok",
'with a font that has ttfautohint params'
' (-l 6 -r 36 -G 0 -x 10 -H 350 -D latn -f cyrl -w "" -X "")',
TEST_FILE("merriweather/Merriweather-Regular.ttf"))
])
def test_check_has_ttfautohint_params(expected_status, expected_keyword, reason, font):
""" Font has ttfautohint params? """
check = CheckTester(googlefonts_profile,
"com.google.fonts/check/has_ttfautohint_params")
assert_results_contain(check(font),
expected_status, expected_keyword,
reason)
def test_check_epar():
""" EPAR table present in font? """
check = CheckTester(googlefonts_profile,
"com.google.fonts/check/epar")
# Our reference Mada Regular lacks an EPAR table:
ttFont = TTFont(TEST_FILE("mada/Mada-Regular.ttf"))
# So it must emit an INFO message inviting the designers
# to learn more about it:
assert_results_contain(check(ttFont),
INFO, 'lacks-EPAR',
'with a font lacking an EPAR table...')
# add a fake EPAR table to validate the PASS code-path:
ttFont["EPAR"] = "foo"
assert_PASS(check(ttFont),
'with a good font...')
def NOT_IMPLEMENTED_test_check_gasp():
""" Is GASP table correctly set? """
# check = CheckTester(googlefonts_profile,
# "com.google.fonts/check/gasp")
# TODO: Implement-me!
#
# code-paths:
# - FAIL, "lacks-gasp" "Font is missing the gasp table."
# - FAIL, "empty" "The gasp table has no values."
# - FAIL, "lacks-ffff-range" "The gasp table does not have a 0xFFFF gasp range."
# - INFO, "ranges" "These are the ppm ranges declared on the gasp table:"
# - WARN, "non-ffff-range" "The gasp table has a range that may be unneccessary."
# - WARN, "unset-flags" "All flags in gasp range 0xFFFF (i.e. all font sizes) must be set to 1"
# - PASS "The gasp table is correctly set."
def test_check_name_familyname_first_char():
""" Make sure family name does not begin with a digit. """
check = CheckTester(googlefonts_profile,
"com.google.fonts/check/name/familyname_first_char")
# Our reference Mada Regular is known to be good
ttFont = TTFont(TEST_FILE("mada/Mada-Regular.ttf"))
# So it must PASS the check:
assert_PASS(check(ttFont),
'with a good font...')
# alter the family-name prepending a digit:
for i, name in enumerate(ttFont["name"].names):
if name.nameID == NameID.FONT_FAMILY_NAME:
ttFont["name"].names[i].string = "1badname".encode(name.getEncoding())
# and make sure the check FAILs:
assert_results_contain(check(ttFont),
FAIL, 'begins-with-digit',
'with a font in which the family name begins with a digit...')
def test_check_name_ascii_only_entries():
""" Are there non-ASCII characters in ASCII-only NAME table entries? """
check = CheckTester(googlefonts_profile,
"com.google.fonts/check/name/ascii_only_entries")
# Our reference Merriweather Regular is known to be good
ttFont = TTFont(TEST_FILE("merriweather/Merriweather-Regular.ttf"))
# So it must PASS the check:
assert_PASS(check(ttFont),
'with a good font...')
# The OpenType spec requires ASCII for the POSTSCRIPT_NAME (nameID 6).
# For COPYRIGHT_NOTICE (nameID 0) ASCII is required because that
# string should be the same in CFF fonts which also have this
# requirement in the OpenType spec.
# Let's check detection of both. First nameId 6:
for i, name in enumerate(ttFont['name'].names):
if name.nameID == NameID.POSTSCRIPT_NAME:
ttFont['name'].names[i].string = "Infração".encode(encoding="utf-8")
assert_results_contain(check(ttFont),
FAIL, 'bad-string',
'with non-ascii on nameID 6 entry (Postscript name)...')
assert_results_contain(check(ttFont),
FAIL, 'non-ascii-strings',
'with non-ascii on nameID 6 entry (Postscript name)...')
# Then reload the good font
ttFont = TTFont(TEST_FILE("merriweather/Merriweather-Regular.ttf"))
# And check detection of a problem on nameId 0:
for i, name in enumerate(ttFont['name'].names):
if name.nameID == NameID.COPYRIGHT_NOTICE:
ttFont['name'].names[i].string = "Infração".encode(encoding="utf-8")
assert_results_contain(check(ttFont),
FAIL, 'bad-string',
'with non-ascii on nameID 0 entry (Copyright notice)...')
assert_results_contain(check(ttFont),
FAIL, 'non-ascii-strings',
'with non-ascii on nameID 0 entry (Copyright notice)...')
# Reload the good font once more:
ttFont = TTFont(TEST_FILE("merriweather/Merriweather-Regular.ttf"))
# Note:
# A common place where we find non-ASCII strings is on name table
# entries with NameID > 18, which are expressly for localising
# the ASCII-only IDs into Hindi / Arabic / etc.
# Let's check a good case of a non-ascii on the name table then!
# Choose an arbitrary name entry to mess up with:
index = 5
ttFont['name'].names[index].nameID = 19
ttFont['name'].names[index].string = "Fantástico!".encode(encoding="utf-8")
assert_PASS(check(ttFont),
'with non-ascii on entries with nameId > 18...')
def test_split_camel_case_condition():
from fontbakery.utils import split_camel_case
assert split_camel_case("Lobster") == "Lobster"
assert split_camel_case("LibreCaslonText") == "Libre Caslon Text"
def test_check_metadata_listed_on_gfonts():
""" METADATA.pb: Fontfamily is listed on Google Fonts API? """
check = CheckTester(googlefonts_profile,
"com.google.fonts/check/metadata/listed_on_gfonts")
font = TEST_FILE("familysans/FamilySans-Regular.ttf")
# Our reference FamilySans family is a just a generic example
# and thus is not really hosted (nor will ever be hosted) at Google Fonts servers:
assert_results_contain(check(font),
WARN, 'not-found',
f'with "{font}", from a family that\'s'
f' not listed on GFonts...')
font = TEST_FILE("merriweather/Merriweather-Regular.ttf")
# Our reference Merriweather family is available on the Google Fonts collection:
assert_PASS(check(font),
f'with "{font}", from a family that is'
f' listed on Google Fonts API...')
font = TEST_FILE("abeezee/ABeeZee-Regular.ttf")
# This is to ensure the code handles well camel-cased familynames.
assert_PASS(check(font),
f'with "{font}", listed and with a camel-cased name...')
font = TEST_FILE("librecaslontext/LibreCaslonText[wght].ttf")
# And the check should also properly handle space-separated multi-word familynames.
assert_PASS(check(font),
f'with "{font}", available and with a space-separated family name...')
def test_check_metadata_unique_full_name_values():
""" METADATA.pb: check if fonts field only has unique "full_name" values. """
check = CheckTester(googlefonts_profile,
"com.google.fonts/check/metadata/unique_full_name_values")
# Our reference FamilySans family is good:
font = TEST_FILE("familysans/FamilySans-Regular.ttf")
assert_PASS(check(font),
'with a good family...')
# then duplicate a full_name entry to make it FAIL:
md = check["family_metadata"]
md.fonts[0].full_name = md.fonts[1].full_name
assert_results_contain(check(font, {"family_metadata": md}),
FAIL, 'duplicated',
'with a duplicated full_name entry.')
def test_check_metadata_unique_weight_style_pairs():
""" METADATA.pb: check if fonts field only contains unique style:weight pairs. """
check = CheckTester(googlefonts_profile,
"com.google.fonts/check/metadata/unique_weight_style_pairs")
# Our reference FamilySans family is good:
font = TEST_FILE("familysans/FamilySans-Regular.ttf")
assert_PASS(check(font),
'with a good family...')
# then duplicate a pair of style & weight entries to make it FAIL:
md = check["family_metadata"]
md.fonts[0].style = md.fonts[1].style
md.fonts[0].weight = md.fonts[1].weight
assert_results_contain(check(font, {"family_metadata": md}),
FAIL, 'duplicated',
'with a duplicated pair of style & weight entries')
def test_check_metadata_license():
""" METADATA.pb license is "APACHE2", "UFL" or "OFL"? """
check = CheckTester(googlefonts_profile,
"com.google.fonts/check/metadata/license")
# Let's start with our reference FamilySans family:
font = TEST_FILE("familysans/FamilySans-Regular.ttf")
good_licenses = ["APACHE2", "UFL", "OFL"]
some_bad_values = ["APACHE", "Apache", "Ufl", "Ofl", "Open Font License"]
check(font)
md = check["family_metadata"]
for good in good_licenses:
md.license = good
assert_PASS(check(font, {"family_metadata": md}),
f': {good}')
for bad in some_bad_values:
md.license = bad
assert_results_contain(check(font, {"family_metadata": md}),
FAIL, 'bad-license',
f': {bad}')
def test_check_metadata_menu_and_latin():
""" METADATA.pb should contain at least "menu" and "latin" subsets. """
check = CheckTester(googlefonts_profile,
"com.google.fonts/check/metadata/menu_and_latin")
# Let's start with our reference FamilySans family:
fonts = TEST_FILE("familysans/FamilySans-Regular.ttf")
good_cases = [
["menu", "latin"],
["menu", "cyrillic", "latin"],
]
bad_cases = [
["menu"],
["latin"],
[""],
["latin", "cyrillyc"],
["khmer"]
]
check(fonts)
md = check["family_metadata"]
for good in good_cases:
del md.subsets[:]
md.subsets.extend(good)
assert_PASS(check(fonts, {"family_metadata": md}),
f'with subsets = {good}')
for bad in bad_cases:
del md.subsets[:]
md.subsets.extend(bad)
assert_results_contain(check(fonts, {"family_metadata": md}),
FAIL, 'missing',
f'with subsets = {bad}')
def test_check_metadata_subsets_order():
""" METADATA.pb subsets should be alphabetically ordered. """
check = CheckTester(googlefonts_profile,
"com.google.fonts/check/metadata/subsets_order")
# Let's start with our reference FamilySans family:
fonts = TEST_FILE("familysans/FamilySans-Regular.ttf")
good_cases = [
["latin", "menu"],
["cyrillic", "latin", "menu"],
["cyrillic", "khmer", "latin", "menu"],
]
bad_cases = [
["menu", "latin"],
["latin", "cyrillic", "menu"],
["cyrillic", "menu", "khmer", "latin"],
]
check(fonts)
md = check["family_metadata"]
for good in good_cases:
del md.subsets[:]
md.subsets.extend(good)
assert_PASS(check(fonts, {"family_metadata": md}),
f'with subsets = {good}')
md = check["family_metadata"]
for bad in bad_cases:
del md.subsets[:]
md.subsets.extend(bad)
assert_results_contain(check(fonts, {"family_metadata": md}),
FAIL, 'not-sorted',
f'with subsets = {bad}')
def test_check_metadata_includes_production_subsets():
"""Check METADATA.pb has production subsets."""
check = CheckTester(googlefonts_profile,
"com.google.fonts/check/metadata/includes_production_subsets")
# We need to use a family that is already in production
# Our reference Cabin is known to be good
fonts = cabin_fonts
# So it must PASS the check:
assert_PASS(check(fonts),
"with a good METADATA.pb for this family...")
# Then we induce the problem by removing a subset:
md = check["family_metadata"]
md.subsets.pop()
assert_results_contain(check(fonts, {"family_metadata": md}),
FAIL, 'missing-subsets',
'with a bad METADATA.pb (last subset has been removed)...')
def test_check_metadata_copyright():
""" METADATA.pb: Copyright notice is the same in all fonts? """
check = CheckTester(googlefonts_profile,
"com.google.fonts/check/metadata/copyright")
# Let's start with our reference FamilySans family:
font = TEST_FILE("familysans/FamilySans-Regular.ttf")
# We know its copyright notices are consistent:
assert_PASS(check(font),
'with consistent copyright notices on FamilySans...')
# Now we make them diverge:
md = check["family_metadata"]
md.fonts[1].copyright = md.fonts[0].copyright + " arbitrary suffix!" # to make it different
# To ensure the problem is detected:
assert_results_contain(check(font, {"family_metadata": md}),
FAIL, 'inconsistency',
'with diverging copyright notice strings...')
def test_check_metadata_familyname():
""" Check that METADATA.pb family values are all the same. """
check = CheckTester(googlefonts_profile,
"com.google.fonts/check/metadata/familyname")
# Let's start with our reference FamilySans family:
font = TEST_FILE("familysans/FamilySans-Regular.ttf")
# We know its family name entries on METADATA.pb are consistent:
assert_PASS(check(font),
'with consistent family name...')
# Now we make them diverge:
md = check["family_metadata"]
md.fonts[1].name = md.fonts[0].name + " arbitrary suffix!" # to make it different
# To ensure the problem is detected:
assert_results_contain(check(font, {"family_metadata": md}),
FAIL, 'inconsistency',
'With diverging Family name metadata entries...')
def test_check_metadata_has_regular():
""" METADATA.pb: According Google Fonts standards, families should have a Regular style. """
check = CheckTester(googlefonts_profile,
"com.google.fonts/check/metadata/has_regular")
# Let's start with our reference FamilySans family:
font = TEST_FILE("familysans/FamilySans-Regular.ttf")
# We know that Family Sans has got a regular declares in its METADATA.pb file:
assert_PASS(check(font),
'with Family Sans, a family with a regular style...')
# We remove the regular:
md = check["family_metadata"]
for i in range(len(md.fonts)):
if md.fonts[i].filename == "FamilySans-Regular.ttf":
del md.fonts[i]
break
# and make sure the check now FAILs:
assert_results_contain(check(font, {"family_metadata": md}),
FAIL, 'lacks-regular',
'with a METADATA.pb file without a regular...')
def test_check_metadata_regular_is_400():
""" METADATA.pb: Regular should be 400. """
check = CheckTester(googlefonts_profile,
"com.google.fonts/check/metadata/regular_is_400")
# Let's start with the METADATA.pb file from our reference FamilySans family:
font = TEST_FILE("familysans/FamilySans-Regular.ttf")
# We know that Family Sans' Regular has a weight value equal to 400, so the check should PASS:
assert_PASS(check(font),
'with Family Sans, a family with regular=400...')
md = check["family_metadata"]
# The we change the value for its regular:
for i in range(len(md.fonts)):
if md.fonts[i].filename == "FamilySans-Regular.ttf":
md.fonts[i].weight = 500
# and make sure the check now FAILs:
assert_results_contain(check(font, {"family_metadata": md}),
FAIL, 'not-400',
'with METADATA.pb with regular=500...')
def test_check_metadata_nameid_family_name():
""" Checks METADATA.pb font.name field matches
family name declared on the name table. """
check = CheckTester(googlefonts_profile,
"com.google.fonts/check/metadata/nameid/family_name")
# Let's start with the METADATA.pb file from our reference FamilySans family:
font = TEST_FILE("familysans/FamilySans-Regular.ttf")
# We know that Family Sans Regular is good here:
assert_PASS(check(font))
# Then cause it to fail:
md = check["font_metadata"]
md.name = "Foo"
assert_results_contain(check(font, {"font_metadata": md}),
FAIL, "mismatch")
# TODO: the failure-mode below seems more generic than the scope
# of this individual check. This could become a check by itself!
#
# code-paths:
# - FAIL code="missing", "Font lacks a FONT_FAMILY_NAME entry"
def test_check_metadata_nameid_post_script_name():
""" Checks METADATA.pb font.post_script_name matches
postscript name declared on the name table. """
check = CheckTester(googlefonts_profile,
"com.google.fonts/check/metadata/nameid/post_script_name")
# Let's start with the METADATA.pb file from our reference FamilySans family:
font = TEST_FILE("familysans/FamilySans-Regular.ttf")
# We know that Family Sans Regular is good here:
assert_PASS(check(font))
# Then cause it to fail:
md = check["font_metadata"]
md.post_script_name = "Foo"
assert_results_contain(check(font, {'font_metadata': md}),
FAIL, 'mismatch')
# TODO: the failure-mode below seems more generic than the scope
# of this individual check. This could become a check by itself!
#
# code-paths:
# - FAIL code="missing", "Font lacks a POSTSCRIPT_NAME"
def test_check_metadata_nameid_full_name():
""" METADATA.pb font.fullname value matches fullname declared on the name table ? """
check = CheckTester(googlefonts_profile,
"com.google.fonts/check/metadata/nameid/full_name")
font = TEST_FILE("merriweather/Merriweather-Regular.ttf")
assert_PASS(check(font),
'with a good font...')
# here we change the font.fullname on the METADATA.pb
# to introduce a "mismatch" error condition:
font_metadata = check['font_metadata']
good = font_metadata.full_name
font_metadata.full_name = good + "bad-suffix"
assert_results_contain(check(font, {"font_metadata": font_metadata}),
FAIL, 'mismatch',
'with mismatching fullname values...')
# and restore the good value prior to the next test case:
font_metadata.full_name = good
# And here we remove all FULL_FONT_NAME entries
# in order to get a "lacks-entry" error condition:
ttFont = check['ttFont']
for i, name in enumerate(ttFont["name"].names):
if name.nameID == NameID.FULL_FONT_NAME:
del ttFont["name"].names[i]
assert_results_contain(check(ttFont),
FAIL, 'lacks-entry',
'when a font lacks FULL_FONT_NAME entries in its name table...')
def test_check_metadata_nameid_font_name():
""" METADATA.pb font.name value should be same as the family name declared on the name table. """
check = CheckTester(googlefonts_profile,
"com.google.fonts/check/metadata/nameid/font_name")
# Our reference Merriweather-Regular is know to have good fullname metadata
font = TEST_FILE("merriweather/Merriweather-Regular.ttf")
ttFont = TTFont(font)
assert_PASS(check(ttFont),
'with a good font...')
for i, name in enumerate(ttFont["name"].names):
if name.nameID == NameID.FONT_FAMILY_NAME:
good = name.string.decode(name.getEncoding()) # keep a copy of the good value
ttFont["name"].names[i].string = (good + "bad-suffix").encode(name.getEncoding())
assert_results_contain(check(ttFont),
FAIL, 'mismatch',
f'with a bad FULL_FONT_NAME entry ({i})...')
ttFont["name"].names[i].string = good # restore good value
# TODO:
# FAIL, "lacks-entry"
def test_check_metadata_match_fullname_postscript():
""" METADATA.pb family.full_name and family.post_script_name
fields have equivalent values ? """
check = CheckTester(googlefonts_profile,
"com.google.fonts/check/metadata/match_fullname_postscript")
regular_font = TEST_FILE("merriweather/Merriweather-Regular.ttf")
lightitalic_font = TEST_FILE("merriweather/Merriweather-LightItalic.ttf")
assert_PASS(check(lightitalic_font),
'with good entries (Merriweather-LightItalic)...')
# post_script_name: "Merriweather-LightItalic"
# full_name: "Merriweather Light Italic"
# TODO: Verify why/whether "Regular" cannot be omited on font.full_name
# There's some relevant info at:
# https://github.com/googlefonts/fontbakery/issues/1517
#
# FIXME: com.google.fonts/check/metadata/nameid/full_name
# ties the full_name values from the METADATA.pb file and the
# internal name table entry (FULL_FONT_NAME)
# to be strictly identical. So it seems that the test below is
# actually wrong (as well as the current implementation):
#
assert_results_contain(check(regular_font),
FAIL, 'mismatch',
'with bad entries (Merriweather-Regular)...')
# post_script_name: "Merriweather-Regular"
# full_name: "Merriweather"
# fix the regular metadata:
md = check['font_metadata']
md.full_name = "Merriweather Regular"
assert_PASS(check(regular_font, {"font_metadata": md}),
'with good entries (Merriweather-Regular after full_name fix)...')
# post_script_name: "Merriweather-Regular"
# full_name: "Merriweather Regular"
# introduce an error in the metadata:
md.full_name = "MistakenFont Regular"
assert_results_contain(check(regular_font, {"font_metadata": md}),
FAIL, 'mismatch',
'with a mismatch...')
# post_script_name: "Merriweather-Regular"
# full_name: "MistakenFont Regular"
def NOT_IMPLEMENTED_test_check_match_filename_postscript():
""" METADATA.pb family.filename and family.post_script_name
fields have equivalent values? """
# check = CheckTester(googlefonts_profile,
# "com.google.fonts/check/match_filename_postscript")
# TODO: Implement-me!
#
# code-paths:
# - FAIL, "mismatch" "METADATA.pb filename does not match post_script_name"
# - PASS
MONTSERRAT_RIBBI = [
TEST_FILE("montserrat/Montserrat-Regular.ttf"),
TEST_FILE("montserrat/Montserrat-Italic.ttf"),
TEST_FILE("montserrat/Montserrat-Bold.ttf"),
TEST_FILE("montserrat/Montserrat-BoldItalic.ttf")
]
MONTSERRAT_NON_RIBBI = [
TEST_FILE("montserrat/Montserrat-BlackItalic.ttf"),
TEST_FILE("montserrat/Montserrat-Black.ttf"),
TEST_FILE("montserrat/Montserrat-ExtraBoldItalic.ttf"),
TEST_FILE("montserrat/Montserrat-ExtraBold.ttf"),
TEST_FILE("montserrat/Montserrat-ExtraLightItalic.ttf"),
TEST_FILE("montserrat/Montserrat-ExtraLight.ttf"),
TEST_FILE("montserrat/Montserrat-LightItalic.ttf"),
TEST_FILE("montserrat/Montserrat-Light.ttf"),
TEST_FILE("montserrat/Montserrat-MediumItalic.ttf"),
TEST_FILE("montserrat/Montserrat-Medium.ttf"),
TEST_FILE("montserrat/Montserrat-SemiBoldItalic.ttf"),
TEST_FILE("montserrat/Montserrat-SemiBold.ttf"),
TEST_FILE("montserrat/Montserrat-ThinItalic.ttf"),
TEST_FILE("montserrat/Montserrat-Thin.ttf")
]
def test_check_metadata_valid_name_values():
""" METADATA.pb font.name field contains font name in right format? """
check = CheckTester(googlefonts_profile,
"com.google.fonts/check/metadata/valid_name_values")
# Our reference Montserrat family is a good 18-styles family:
for font in MONTSERRAT_RIBBI:
# So it must PASS the check:
assert_PASS(check(font),
f'with a good RIBBI font ({font})...')
# And fail if it finds a bad font_familyname:
assert_results_contain(check(font, {"font_familynames": ["WrongFamilyName"]}),
FAIL, 'mismatch',
f'with a bad RIBBI font ({font})...')
# We do the same for NON-RIBBI styles:
for font in MONTSERRAT_NON_RIBBI:
# So it must PASS the check:
assert_PASS(check(font),
'with a good NON-RIBBI font ({fontfile})...')
# And fail if it finds a bad font_familyname:
assert_results_contain(check(font, {"typographic_familynames": ["WrongFamilyName"]}),
FAIL, 'mismatch',
f'with a bad NON-RIBBI font ({font})...')
def test_check_metadata_valid_full_name_values():
""" METADATA.pb font.full_name field contains font name in right format? """
check = CheckTester(googlefonts_profile,
"com.google.fonts/check/metadata/valid_full_name_values")
# Our reference Montserrat family is a good 18-styles family:
# properly described in its METADATA.pb file:
for font in MONTSERRAT_RIBBI:
# So it must PASS the check:
assert_PASS(check(font),
'with a good RIBBI font ({fontfile})...')
# And fail if the full familyname in METADATA.pb diverges
# from the name inferred from the name table:
assert_results_contain(check(font, {"font_familynames": ["WrongFamilyName"]}),
FAIL, 'mismatch',
f'with a bad RIBBI font ({font})...')
# We do the same for NON-RIBBI styles:
for font in MONTSERRAT_NON_RIBBI:
# So it must PASS the check:
assert_PASS(check(font),
f'with a good NON-RIBBI font ({font})...')
# Unless when not matching typographic familyname from the name table:
assert_results_contain(check(font, {"typographic_familynames": ["WrongFamilyName"]}),
FAIL, 'mismatch',
f'with a bad NON-RIBBI font ({font})...')
def test_check_metadata_valid_filename_values():
""" METADATA.pb font.filename field contains font name in right format? """
# FIXME: CheckTester
from fontbakery.profiles.googlefonts \
import (com_google_fonts_check_metadata_valid_filename_values as check,
metadata_file,
family_metadata)
# Our reference Montserrat family is a good 18-styles family:
for fontfile in MONTSERRAT_RIBBI + MONTSERRAT_NON_RIBBI:
family_directory = os.path.dirname(fontfile)
meta = family_metadata(metadata_file(family_directory))
# So it must PASS the check:
assert_PASS(check(fontfile, meta),
f"with a good font ({fontfile})...")
# And fail if it finds a bad filename:
for font in meta.fonts:
font.filename = "WrongFileName"
assert_results_contain(check(fontfile, meta),
FAIL, 'bad-field',
f'with a bad font ({fontfile})...')
def test_check_metadata_valid_post_script_name_values():
""" METADATA.pb font.post_script_name field contains font name in right format? """
check = CheckTester(googlefonts_profile,
"com.google.fonts/check/metadata/valid_post_script_name_values")
# Our reference Montserrat family is a good 18-styles family:
for fontfile in MONTSERRAT_RIBBI + MONTSERRAT_NON_RIBBI:
# So it must PASS the check:
assert_PASS(check(fontfile),
f"with a good font ({fontfile})...")
# And fail if it finds a bad filename:
md = check["font_metadata"]
md.post_script_name = "WrongPSName"
assert_results_contain(check(fontfile, {"font_metadata": md}),
FAIL, 'mismatch',
f'with a bad font ({fontfile})...')
def test_check_metadata_valid_copyright():
""" Copyright notice on METADATA.pb matches canonical pattern ? """
check = CheckTester(googlefonts_profile,
"com.google.fonts/check/metadata/valid_copyright")
# Our reference Cabin Regular is known to be bad
# Since it provides an email instead of a git URL:
font = TEST_FILE("cabin/Cabin-Regular.ttf")
assert_results_contain(check(font),
FAIL, 'bad-notice-format',
'with a bad copyright notice string...')
# Then we change it into a good string (example extracted from Archivo Black):
# note: the check does not actually verify that the project name is correct.
# It only focuses on the string format.
good_string = ("Copyright 2017 The Archivo Black Project Authors"
" (https://github.com/Omnibus-Type/ArchivoBlack)")
md = check["font_metadata"]
md.copyright = good_string
assert_PASS(check(font, {"font_metadata": md}),
'with a good copyright notice string...')
# We also ignore case, so these should also PASS:
md.copyright = good_string.upper()
assert_PASS(check(font, {"font_metadata": md}),
'with all uppercase...')
md.copyright = good_string.lower()
assert_PASS(check(font, {"font_metadata": md}),
'with all lowercase...')
def test_check_font_copyright():
"""Copyright notices match canonical pattern in fonts"""
check = CheckTester(googlefonts_profile,
"com.google.fonts/check/font_copyright")
# Our reference Cabin Regular is known to be bad
# Since it provides an email instead of a git URL:
ttFont = TTFont(TEST_FILE("cabin/Cabin-Regular.ttf"))
assert_results_contain(check(ttFont),
FAIL, 'bad-notice-format',
'with a bad copyright notice string...')
# We change it into a good string (example extracted from Archivo Black):
# note: the check does not actually verify that the project name is correct.
# It only focuses on the string format.
good_string = ("Copyright 2017 The Archivo Black Project Authors"
" (https://github.com/Omnibus-Type/ArchivoBlack)")
for i, entry in enumerate(ttFont['name'].names):
if entry.nameID == NameID.COPYRIGHT_NOTICE:
ttFont['name'].names[i].string = good_string.encode(entry.getEncoding())
assert_PASS(check(ttFont),
'with good strings...')
def test_check_glyphs_file_font_copyright():
"""Copyright notices match canonical pattern in fonts"""
check = CheckTester(googlefonts_profile,
"com.google.fonts/check/glyphs_file/font_copyright")
glyphsFile = GLYPHSAPP_TEST_FILE("Comfortaa.glyphs")
# note: the check does not actually verify that the project name is correct.
# It only focuses on the string format.
# Use an email instead of a git URL:
bad_string = ("Copyright 2017 The Archivo Black Project Authors"
" (contact-us@fake-address.com)")
glyphsFile.copyright = bad_string
assert_results_contain(check(glyphsFile),
FAIL, 'bad-notice-format',
'with a bad copyright notice string...')
# We change it into a good string (example extracted from Archivo Black):
good_string = ("Copyright 2017 The Archivo Black Project Authors"
" (https://github.com/Omnibus-Type/ArchivoBlack)")
glyphsFile.copyright = good_string
assert_PASS(check(glyphsFile),
'with a good coopyright string...')
def test_check_metadata_reserved_font_name():
""" Copyright notice on METADATA.pb should not contain Reserved Font Name. """
check = CheckTester(googlefonts_profile,
"com.google.fonts/check/metadata/reserved_font_name")
font = TEST_FILE("cabin/Cabin-Regular.ttf")
assert_PASS(check(font),
'with a good copyright notice string...')
# Then we make it bad:
md = check["font_metadata"]
md.copyright += "Reserved Font Name"
assert_results_contain(check(font, {"font_metadata": md}),
WARN, 'rfn',
'with a notice containing "Reserved Font Name"...')
def test_check_metadata_copyright_max_length():
""" METADATA.pb: Copyright notice shouldn't exceed 500 chars. """
check = CheckTester(googlefonts_profile,
"com.google.fonts/check/metadata/copyright_max_length")
font = TEST_FILE("cabin/Cabin-Regular.ttf")
check(font)
md = check["font_metadata"]
md.copyright = 500 * "x"
assert_PASS(check(font, {"font_metadata": md}),
'with a 500-char copyright notice string...')
md.copyright = 501 * "x"
assert_results_contain(check(font, {"font_metadata": md}),
FAIL, 'max-length',
'with a 501-char copyright notice string...')
def test_check_metadata_filenames():
""" METADATA.pb: Font filenames match font.filename entries? """
from fontbakery.profiles.googlefonts import (com_google_fonts_check_metadata_filenames as check,
metadata_file,
family_metadata)
family_dir = portable_path('data/test/montserrat/')
family_meta = family_metadata(metadata_file(family_dir))
# test PASS:
fonts = montserrat_fonts
assert_PASS(check(fonts, family_dir, family_meta),
'with matching list of font files...')
# make sure missing files are detected by the check:
fonts = montserrat_fonts
original_name = fonts[0]
os.rename(original_name, "font.tmp") # rename one font file in order to trigger the FAIL
assert_results_contain(check(fonts, family_dir, family_meta),
FAIL, 'file-not-found',
'with missing font files...')
os.rename("font.tmp", original_name) # restore filename
family_dir = portable_path('data/test/cabin/')
family_meta = family_metadata(metadata_file(family_dir))
# From all TTFs in Cabin's directory, the condensed ones are not
# listed on METADATA.pb, so the check must FAIL, even if we do not
# explicitely include them in the set of files to be checked:
assert_results_contain(check(cabin_fonts,
family_dir,
family_meta),
FAIL, 'file-not-declared',
'with some font files not declared...')
def test_check_metadata_italic_style():
""" METADATA.pb font.style "italic" matches font internals ? """
from fontbakery.constants import MacStyle
check = CheckTester(googlefonts_profile,
"com.google.fonts/check/metadata/italic_style")
# Our reference Merriweather Italic is known to good
ttFont = TTFont(TEST_FILE("merriweather/Merriweather-Italic.ttf"))
assert_PASS(check(ttFont),
'with a good font...')
# now let's introduce issues on the FULL_FONT_NAME entries
# to test the "bad-fullfont-name" codepath:
for i, name in enumerate(ttFont['name'].names):
if name.nameID == NameID.FULL_FONT_NAME:
backup = name.string
ttFont['name'].names[i].string = "BAD VALUE".encode(name.getEncoding())
assert_results_contain(check(ttFont),
FAIL, 'bad-fullfont-name',
'with a bad NameID.FULL_FONT_NAME entry...')
# and restore the good value:
ttFont['name'].names[i].string = backup
# And, finally, let's flip off that italic bit
# and get a "bad-macstyle" FAIL (so much fun!):
ttFont['head'].macStyle &= ~MacStyle.ITALIC
assert_results_contain(check(ttFont),
FAIL, 'bad-macstyle',
'with bad macstyle bit value...')
def test_check_metadata_normal_style():
""" METADATA.pb font.style "normal" matches font internals ? """
check = CheckTester(googlefonts_profile,
"com.google.fonts/check/metadata/normal_style")
from fontbakery.constants import MacStyle
# This one is pretty similar to check/metadata/italic_style
# You may want to take a quick look above...
# Our reference Merriweather Regular is known to be good here.
ttFont = TTFont(TEST_FILE("merriweather/Merriweather-Regular.ttf"))
assert_PASS(check(ttFont),
'with a good font...')
# now we sadically insert brokenness into
# each occurrence of the FONT_FAMILY_NAME nameid:
for i, name in enumerate(ttFont['name'].names):
if name.nameID == NameID.FONT_FAMILY_NAME:
backup = name.string
ttFont['name'].names[i].string = "Merriweather-Italic".encode(name.getEncoding())
assert_results_contain(check(ttFont),
FAIL, 'familyname-italic',
'with a non-italic font that has a "-Italic" in FONT_FAMILY_NAME...')
# and restore the good value:
ttFont['name'].names[i].string = backup
# now let's do the same with
# occurrences of the FULL_FONT_NAME nameid:
for i, name in enumerate(ttFont['name'].names):
if name.nameID == NameID.FULL_FONT_NAME:
backup = name.string
ttFont['name'].names[i].string = "Merriweather-Italic".encode(name.getEncoding())
assert_results_contain(check(ttFont),
FAIL, 'fullfont-italic',
'with a non-italic font that has a "-Italic" in FULL_FONT_NAME...')
# and restore the good value:
ttFont['name'].names[i].string = backup
# And, finally, again, we flip a bit and...
#
# Note: This time the boolean logic is the quite opposite in comparison
# to the test for com.google.fonts/check/metadata/italic_style above.
# Here we have to set the bit back to 1 to get a wrongful "this font is an italic" setting:
ttFont['head'].macStyle |= MacStyle.ITALIC
assert_results_contain(check(ttFont),
FAIL, 'bad-macstyle',
'with bad macstyle bit value...')
def test_check_metadata_nameid_family_and_full_names():
""" METADATA.pb font.name and font.full_name fields match the values declared on the name table? """
check = CheckTester(googlefonts_profile,
"com.google.fonts/check/metadata/nameid/family_and_full_names")
# Our reference Merriweather Regular is known to be good here.
ttFont = TTFont(TEST_FILE("merriweather/Merriweather-Regular.ttf"))
assert_PASS(check(ttFont),
'with a good font...')
# There we go again!
# Breaking FULL_FONT_NAME entries one by one:
for i, name in enumerate(ttFont['name'].names):
if name.nameID == NameID.FULL_FONT_NAME:
backup = name.string
ttFont['name'].names[i].string = "This is utterly wrong!".encode(name.getEncoding())
assert_results_contain(check(ttFont),
FAIL, 'fullname-mismatch',
'with a METADATA.pb / FULL_FONT_NAME mismatch...')
# and restore the good value:
ttFont['name'].names[i].string = backup
# And then we do the same with FONT_FAMILY_NAME entries:
for i, name in enumerate(ttFont['name'].names):
if name.nameID == NameID.FONT_FAMILY_NAME:
backup = name.string
ttFont['name'].names[i].string = ("I'm listening to"
" The Players with Hiromasa Suzuki - Galaxy (1979)").encode(name.getEncoding())
assert_results_contain(check(ttFont),
FAIL, 'familyname-mismatch',
'with a METADATA.pb / FONT_FAMILY_NAME mismatch...')
# and restore the good value:
ttFont['name'].names[i].string = backup
def test_check_metadata_fontname_not_camel_cased():
""" METADATA.pb: Check if fontname is not camel cased. """
check = CheckTester(googlefonts_profile,
"com.google.fonts/check/metadata/fontname_not_camel_cased")
# Our reference Cabin Regular is known to be good
font = TEST_FILE("cabin/Cabin-Regular.ttf")
assert_PASS(check(font),
'with a good font...')
# Then we FAIL with a CamelCased name:
md = check["font_metadata"]
md.name = "GollyGhost"
assert_results_contain(check(font, {"font_metadata": md}),
FAIL, 'camelcase',
'with a bad font name (CamelCased)...')
# And we also make sure the check PASSes with a few known good names:
for good_name in ["VT323",
"PT Sans",
"Amatic SC"]:
md.name = good_name
assert_PASS(check(font, {"font_metadata": md}),
f'with a good font name "{good_name}"...')
def test_check_metadata_match_name_familyname():
""" METADATA.pb: Check font name is the same as family name. """
check = CheckTester(googlefonts_profile,
"com.google.fonts/check/metadata/match_name_familyname")
# Our reference Cabin Regular is known to be good
font = TEST_FILE("cabin/Cabin-Regular.ttf")
assert_PASS(check(font),
'with a good font...')
# Then we FAIL with mismatching names:
family_md = check["family_metadata"]
font_md = check["font_metadata"]
family_md.name = "Some Fontname"
font_md.name = "Something Else"
assert_results_contain(check(font, {"family_metadata": family_md,
"font_metadata": font_md}),
FAIL, 'mismatch',
'with bad font/family name metadata...')
def test_check_check_metadata_canonical_weight_value():
""" METADATA.pb: Check that font weight has a canonical value. """
check = CheckTester(googlefonts_profile,
"com.google.fonts/check/metadata/canonical_weight_value")
font = TEST_FILE("cabin/Cabin-Regular.ttf")
check(font)
md = check["font_metadata"]
for w in [100, 200, 300, 400, 500, 600, 700, 800, 900]:
md.weight = w
assert_PASS(check(font, {"font_metadata": md}),
f'with a good weight value ({w})...')
for w in [150, 250, 350, 450, 550, 650, 750, 850]:
md.weight = w
assert_results_contain(check(font, {"font_metadata": md}),
FAIL, 'bad-weight',
'with a bad weight value ({w})...')
def test_check_metadata_os2_weightclass():
""" Checking OS/2 usWeightClass matches weight specified at METADATA.pb """
check = CheckTester(googlefonts_profile,
"com.google.fonts/check/metadata/os2_weightclass")
# === test cases for Variable Fonts ===
# Our reference Jura is known to be good
ttFont = TTFont(TEST_FILE("varfont/jura/Jura[wght].ttf"))
assert_PASS(check(ttFont),
f'with a good metadata...')
# Should report if a bad weight value is ifound though:
md = check["font_metadata"]
good_value = md.weight
bad_value = good_value + 100
md.weight = bad_value
assert_results_contain(check(ttFont, {"font_metadata": md}),
FAIL, 'mismatch',
f'with a bad metadata...')
# === test cases for Static Fonts ===
# Our reference Montserrat family is a good 18-styles family:
for fontfile in MONTSERRAT_RIBBI + MONTSERRAT_NON_RIBBI:
ttFont = TTFont(fontfile)
assert_PASS(check(ttFont),
f'with a good font ({fontfile})...')
# but should report bad weight values:
md = check["font_metadata"]
good_value = md.weight
bad_value = good_value + 50
md.weight = bad_value
assert_results_contain(check(ttFont, {"font_metadata": md}),
FAIL, 'mismatch',
f'with bad metadata for {fontfile}...')
# If font is Thin or ExtraLight, ensure that this check can
# accept both 100, 250 for Thin and 200, 275 for ExtraLight
if "Thin" in fontfile:
ttFont["OS/2"].usWeightClass = 100
assert_PASS(check(ttFont),
f'with weightclass 100 on ({fontfile})...')
ttFont["OS/2"].usWeightClass = 250
assert_PASS(check(ttFont),
f'with weightclass 250 on ({fontfile})...')
if "ExtraLight" in fontfile:
ttFont["OS/2"].usWeightClass = 200
assert_PASS(check(ttFont),
f'with weightClass 200 on ({fontfile})...')
ttFont["OS/2"].usWeightClass = 275
assert_PASS(check(ttFont),
f'with weightClass 275 on ({fontfile})...')
def NOT_IMPLEMENTED_test_check_metadata_match_weight_postscript():
""" METADATA.pb: Metadata weight matches postScriptName. """
# check = CheckTester(googlefonts_profile,
# "com.google.fonts/check/metadata/match_weight_postscript")
# TODO: Implement-me!
#
# code-paths:
# - FAIL, "METADATA.pb: Font weight value is invalid."
# - FAIL, "METADATA.pb: Mismatch between postScriptName and weight value."
# - PASS
def NOT_IMPLEMENTED_test_check_metadata_canonical_style_names():
""" METADATA.pb: Font styles are named canonically? """
# check = CheckTester(googlefonts_profile,
# "com.google.fonts/check/metadata/canonical_style_names")
# TODO: Implement-me!
#
# code-paths:
# - SKIP "Applicable only to font styles declared as 'italic' or 'normal' on METADATA.pb."
# - FAIL, "italic" "Font style should be italic."
# - FAIL, "normal" "Font style should be normal."
# - PASS "Font styles are named canonically."
def test_check_unitsperem_strict():
""" Stricter unitsPerEm criteria for Google Fonts. """
check = CheckTester(googlefonts_profile,
"com.google.fonts/check/unitsperem_strict")
ttFont = TTFont(TEST_FILE("cabin/Cabin-Regular.ttf"))
PASS_VALUES = [16, 32, 64, 128, 256, 512, 1024] # Good for better performance on legacy renderers
PASS_VALUES.extend([500, 1000]) # or common typical values
PASS_VALUES.extend([2000, 2048]) # not so common, but still ok
WARN_LARGE_VALUES = [2500, 4000, 4096] # uncommon and large,
# but we've seen legitimate cases such as the
# Big Shoulders Family which uses 4000 since
# it needs more details.
# and finally the bad ones, including:
FAIL_VALUES = [0, 1, 2, 4, 8, 15, 16385] # simply invalid
FAIL_VALUES.extend([100, 1500, 5000]) # suboptimal (uncommon and not power of two)
FAIL_VALUES.extend([8192, 16384]) # and valid ones suggested by the opentype spec,
# but too large, causing undesireable filesize bloat.
for pass_value in PASS_VALUES:
ttFont["head"].unitsPerEm = pass_value
assert_PASS(check(ttFont),
f'with unitsPerEm = {pass_value}...')
for warn_value in WARN_LARGE_VALUES:
ttFont["head"].unitsPerEm = warn_value
assert_results_contain(check(ttFont),
WARN, 'large-value',
f'with unitsPerEm = {warn_value}...')
for fail_value in FAIL_VALUES:
ttFont["head"].unitsPerEm = fail_value
assert_results_contain(check(ttFont),
FAIL, 'bad-value',
f'with unitsPerEm = {fail_value}...')
def NOT_IMPLEMENTED_test_check_version_bump():
""" Version number has increased since previous release on Google Fonts? """
# check = CheckTester(googlefonts_profile,
# "com.google.fonts/check/version_bump")
# TODO: Implement-me!
#
# code-paths:
# - FAIL, "Version number is equal to version on Google Fonts."
# - FAIL, "Version number is less than version on Google Fonts."
# - FAIL, "Version number is equal to version on Google Fonts GitHub repo."
# - FAIL, "Version number is less than version on Google Fonts GitHub repo."
# - PASS
def NOT_IMPLEMENTED_test_check_production_glyphs_similarity():
""" Glyphs are similiar to Google Fonts version? """
# check = CheckTester(googlefonts_profile,
# "com.google.fonts/check/production_glyphs_similarity")
# TODO: Implement-me!
#
# code-paths:
# - WARN, "Following glyphs differ greatly from Google Fonts version"
# - PASS, "Glyphs are similar"
def NOT_IMPLEMENTED_test_check_fsselection():
""" Checking OS/2 fsSelection value. """
# check = CheckTester(googlefonts_profile,
# "com.google.fonts/check/fsselection")
# TODO: Implement-me!
#
# code-paths:
# ...
def test_check_italic_angle():
""" Checking post.italicAngle value. """
check = CheckTester(googlefonts_profile,
"com.google.fonts/check/italic_angle")
ttFont = TTFont(TEST_FILE("cabin/Cabin-Regular.ttf"))
# italic-angle, style, fail_message
test_cases = [
[1, "Italic", FAIL, "positive"],
[0, "Regular", PASS, None], # This must PASS as it is a non-italic
[-21, "ThinItalic", WARN, "over-minus20-degrees"],
[-30, "ThinItalic", WARN, "over-minus20-degrees"],
[-31, "ThinItalic", FAIL, "over-minus30-degrees"],
[0, "Italic", FAIL, "zero-italic"],
[-1,"ExtraBold", FAIL, "non-zero-normal"]
]
for value, style, expected_result, expected_msg in test_cases:
ttFont["post"].italicAngle = value
if expected_result != PASS:
assert_results_contain(check(ttFont, {"style": style}),
expected_result,
expected_msg,
f"with italic-angle:{value} style:{style}...")
else:
assert_PASS(check(ttFont, {"style": style}),
f'with italic-angle:{value} style:{style}...')
def test_check_mac_style():
""" Checking head.macStyle value. """
check = CheckTester(googlefonts_profile,
"com.google.fonts/check/mac_style")
from fontbakery.constants import MacStyle
ttFont = TTFont(TEST_FILE("cabin/Cabin-Regular.ttf"))
# macStyle-value, style, expected
test_cases = [
[0, "Thin", PASS],
[0, "Bold", "bad-BOLD"],
[0, "Italic", "bad-ITALIC"],
[MacStyle.ITALIC, "Italic", PASS],
[MacStyle.ITALIC, "Thin", "bad-ITALIC"],
[MacStyle.BOLD, "Bold", PASS],
[MacStyle.BOLD, "Thin", "bad-BOLD"],
[MacStyle.BOLD | MacStyle.ITALIC, "BoldItalic", PASS]
]
for macStyle_value, style, expected in test_cases:
ttFont["head"].macStyle = macStyle_value
if expected == PASS:
assert_PASS(check(ttFont, {"style": style}),
'with macStyle:{macStyle_value} style:{style}...')
else:
assert_results_contain(check(ttFont, {"style": style}),
FAIL, expected,
f"with macStyle:{macStyle_value} style:{style}...")
def test_check_contour_count(montserrat_ttFonts):
"""Check glyphs contain the recommended contour count"""
check = CheckTester(googlefonts_profile,
"com.google.fonts/check/contour_count")
# TODO: FAIL, "lacks-cmap"
for ttFont in montserrat_ttFonts:
assert_PASS(check(ttFont),
'Montserrat which was used to assemble the glyph data...')
# Lets swap the glyf 'a' (2 contours) with glyf 'c' (1 contour)
for ttFont in montserrat_ttFonts:
ttFont['glyf']['a'] = ttFont['glyf']['c']
assert_results_contain(check(ttFont),
WARN, 'contour-count')
# FIXME!
# GFonts hosted Cabin files seem to have changed in ways
# that break some of the assumptions in the code-test below.
# More info at https://github.com/googlefonts/fontbakery/issues/2581
@pytest.mark.xfail(strict=True)
def test_check_production_encoded_glyphs(cabin_ttFonts):
"""Check glyphs are not missing when compared to version on fonts.google.com"""
# FIXME:
# check = CheckTester(googlefonts_profile,
# "com.google.fonts/check/production_encoded_glyphs")
from fontbakery.profiles.shared_conditions import family_directory
from fontbakery.profiles.googlefonts \
import (com_google_fonts_check_production_encoded_glyphs as check,
api_gfonts_ttFont,
style,
remote_styles,
metadata_file,
family_metadata)
family_meta = family_metadata(metadata_file(family_directory(cabin_fonts[0])))
remote = remote_styles(family_meta.name)
if remote:
for font in cabin_fonts:
ttFont = TTFont(font)
gfont = api_gfonts_ttFont(style(font), remote)
# Cabin font hosted on fonts.google.com contains
# all the glyphs for the font in data/test/cabin
assert_PASS(check(ttFont, gfont),
f"with '{font}'")
# Take A glyph out of font
ttFont['cmap'].getcmap(3, 1).cmap.pop(ord('A'))
ttFont['glyf'].glyphs.pop('A')
assert_results_contain(check(ttFont, gfont),
FAIL, 'lost-glyphs')
else:
print (f"Warning: Seems to have failed to download remote font files: {cabin_ttFonts}.")
def test_check_transformed_components():
"""Ensure component transforms do not perform scaling or rotation."""
check = CheckTester(googlefonts_profile,
"com.google.fonts/check/transformed_components")
ttFont = TTFont(TEST_FILE("cabin/Cabin-Regular.ttf"))
assert_PASS(check(ttFont),
"with a good font...")
# DM Sans v1.100 had some transformed components
ttFont = TTFont(TEST_FILE("dm-sans-v1.100/DMSans-Regular.ttf"))
assert_results_contain(check(ttFont),
FAIL, 'transformed-components')
def test_check_metadata_nameid_copyright():
""" Copyright field for this font on METADATA.pb matches
all copyright notice entries on the name table? """
check = CheckTester(googlefonts_profile,
"com.google.fonts/check/metadata/nameid/copyright")
from fontbakery.utils import get_name_entry_strings
# Our reference Cabin Regular is known to be good
ttFont = TTFont(TEST_FILE("cabin/Cabin-Regular.ttf"))
assert_PASS(check(ttFont),
"with a good METADATA.pb for this font...")
# But the check must report when mismatching names are found:
good_value = get_name_entry_strings(ttFont, NameID.COPYRIGHT_NOTICE)[0]
md = check["font_metadata"]
md.copyright = good_value + "something bad"
assert_results_contain(check(ttFont, {"font_metadata": md}),
FAIL, 'mismatch',
'with a bad METADATA.pb (with a copyright string not matching this font)...')
def test_check_metadata_category():
""" Category field for this font on METADATA.pb is valid? """
check = CheckTester(googlefonts_profile,
"com.google.fonts/check/metadata/category")
# Our reference Cabin family...
font = TEST_FILE("cabin/Cabin-Regular.ttf")
check(font)
md = check["family_metadata"]
assert md.category == "SANS_SERIF" # ...is known to be good ;-)
assert_PASS(check(font),
"with a good METADATA.pb...")
# We then report a problem with this sample of bad values:
for bad_value in ["SAN_SERIF",
"MONO_SPACE",
"sans_serif",
"monospace"]:
md.category = bad_value
assert_results_contain(check(font, {"family_metadata": md}),
FAIL, 'bad-value',
f'with a bad category "{bad_value}"...')
# And we accept the good ones:
for good_value in ["MONOSPACE",
"SANS_SERIF",
"SERIF",
"DISPLAY",
"HANDWRITING"]:
md.category = good_value
assert_PASS(check(font, {"family_metadata": md}),
f'with "{good_value}"...')
def test_check_name_mandatory_entries():
""" Font has all mandatory 'name' table entries ? """
check = CheckTester(googlefonts_profile,
"com.google.fonts/check/name/mandatory_entries")
# We'll check both RIBBI and non-RIBBI fonts
# so that we cover both cases for FAIL/PASS scenarios
# === First with a RIBBI font: ===
# Our reference Cabin Regular is known to be good
ttFont = TTFont(TEST_FILE("cabin/Cabin-Regular.ttf"))
assert_PASS(check(ttFont),
"with a good RIBBI font...")
mandatory_entries = [NameID.FONT_FAMILY_NAME,
NameID.FONT_SUBFAMILY_NAME,
NameID.FULL_FONT_NAME,
NameID.POSTSCRIPT_NAME]
# then we "remove" each mandatory entry one by one:
for mandatory in mandatory_entries:
ttFont = TTFont(TEST_FILE("cabin/Cabin-Regular.ttf"))
for i, name in enumerate(ttFont['name'].names):
if name.nameID == mandatory:
ttFont['name'].names[i].nameID = 0 # not really removing it, but replacing it
# by something else completely irrelevant
# for the purposes of this specific check
assert_results_contain(check(ttFont),
FAIL, 'missing-entry',
f'with a missing madatory (RIBBI) name entry (id={mandatory})...')
# === And now a non-RIBBI font: ===
# Our reference Merriweather Black is known to be good
ttFont = TTFont(TEST_FILE("merriweather/Merriweather-Black.ttf"))
assert_PASS(check(ttFont),
"with a good non-RIBBI font...")
mandatory_entries = [NameID.FONT_FAMILY_NAME,
NameID.FONT_SUBFAMILY_NAME,
NameID.FULL_FONT_NAME,
NameID.POSTSCRIPT_NAME,
NameID.TYPOGRAPHIC_FAMILY_NAME,
NameID.TYPOGRAPHIC_SUBFAMILY_NAME]
# then we (again) "remove" each mandatory entry one by one:
for mandatory in mandatory_entries:
ttFont = TTFont(TEST_FILE("merriweather/Merriweather-Black.ttf"))
for i, name in enumerate(ttFont['name'].names):
if name.nameID in mandatory_entries:
ttFont['name'].names[i].nameID = 0 # not really removing it, but replacing it
# by something else completely irrelevant
# for the purposes of this specific check
assert_results_contain(check(ttFont),
FAIL, 'missing-entry',
'with a missing madatory (non-RIBBI) name entry (id={mandatory})...')
def test_condition_familyname_with_spaces():
from fontbakery.profiles.googlefonts_conditions import familyname_with_spaces
assert familyname_with_spaces("OverpassMono") == "Overpass Mono"
assert familyname_with_spaces("BodoniModa11") == "Bodoni Moda 11"
def test_check_name_familyname():
""" Check name table: FONT_FAMILY_NAME entries. """
check = CheckTester(googlefonts_profile,
"com.google.fonts/check/name/familyname")
# TODO: FAIL, "lacks-name"
test_cases = [
#expect filename mac_value win_value
(PASS, "ok", TEST_FILE("cabin/Cabin-Regular.ttf"), "Cabin", "Cabin"),
(FAIL, "mismatch", TEST_FILE("cabin/Cabin-Regular.ttf"), "Wrong", "Cabin"),
(PASS, "ok", TEST_FILE("overpassmono/OverpassMono-Regular.ttf"), "Overpass Mono", "Overpass Mono"),
(PASS, "ok", TEST_FILE("overpassmono/OverpassMono-Bold.ttf"), "Overpass Mono", "Overpass Mono"),
(FAIL, "mismatch", TEST_FILE("overpassmono/OverpassMono-Regular.ttf"), "Overpass Mono", "Foo"),
(PASS, "ok", TEST_FILE("merriweather/Merriweather-Black.ttf"), "Merriweather", "Merriweather Black"),
(PASS, "ok", TEST_FILE("merriweather/Merriweather-LightItalic.ttf"), "Merriweather", "Merriweather Light"),
(FAIL, "mismatch", TEST_FILE("merriweather/Merriweather-LightItalic.ttf"), "Merriweather", "Merriweather Light Italic")
]
for expected, keyword, filename, mac_value, win_value in test_cases:
ttFont = TTFont(filename)
for i, name in enumerate(ttFont['name'].names):
if name.platformID == PlatformID.MACINTOSH:
value = mac_value
if name.platformID == PlatformID.WINDOWS:
value = win_value
assert value
if name.nameID == NameID.FONT_FAMILY_NAME:
ttFont['name'].names[i].string = value.encode(name.getEncoding())
assert_results_contain(check(ttFont),
expected, keyword,
f'with filename="{filename}",'
f' value="{value}", style="{check["style"]}"...')
def test_check_name_subfamilyname():
""" Check name table: FONT_SUBFAMILY_NAME entries. """
check = CheckTester(googlefonts_profile,
"com.google.fonts/check/name/subfamilyname")
PASS_test_cases = [
# filename mac_value win_value
(TEST_FILE("overpassmono/OverpassMono-Regular.ttf"), "Regular", "Regular"),
(TEST_FILE("overpassmono/OverpassMono-Bold.ttf"), "Bold", "Bold"),
(TEST_FILE("merriweather/Merriweather-Black.ttf"), "Black", "Regular"),
(TEST_FILE("merriweather/Merriweather-LightItalic.ttf"), "Light Italic", "Italic"),
(TEST_FILE("montserrat/Montserrat-BlackItalic.ttf"), "Black Italic", "Italic"),
(TEST_FILE("montserrat/Montserrat-Black.ttf"), "Black", "Regular"),
(TEST_FILE("montserrat/Montserrat-BoldItalic.ttf"), "Bold Italic", "Bold Italic"),
(TEST_FILE("montserrat/Montserrat-Bold.ttf"), "Bold", "Bold"),
(TEST_FILE("montserrat/Montserrat-ExtraBoldItalic.ttf"), "ExtraBold Italic", "Italic"),
(TEST_FILE("montserrat/Montserrat-ExtraBold.ttf"), "ExtraBold", "Regular"),
(TEST_FILE("montserrat/Montserrat-ExtraLightItalic.ttf"), "ExtraLight Italic", "Italic"),
(TEST_FILE("montserrat/Montserrat-ExtraLight.ttf"), "ExtraLight", "Regular"),
(TEST_FILE("montserrat/Montserrat-Italic.ttf"), "Italic", "Italic"),
(TEST_FILE("montserrat/Montserrat-LightItalic.ttf"), "Light Italic", "Italic"),
(TEST_FILE("montserrat/Montserrat-Light.ttf"), "Light", "Regular"),
(TEST_FILE("montserrat/Montserrat-MediumItalic.ttf"), "Medium Italic", "Italic"),
(TEST_FILE("montserrat/Montserrat-Medium.ttf"), "Medium", "Regular"),
(TEST_FILE("montserrat/Montserrat-Regular.ttf"), "Regular", "Regular"),
(TEST_FILE("montserrat/Montserrat-SemiBoldItalic.ttf"), "SemiBold Italic", "Italic"),
(TEST_FILE("montserrat/Montserrat-SemiBold.ttf"), "SemiBold", "Regular"),
(TEST_FILE("montserrat/Montserrat-ThinItalic.ttf"), "Thin Italic", "Italic"),
(TEST_FILE("montserrat/Montserrat-Thin.ttf"), "Thin", "Regular")
]
for filename, mac_value, win_value in PASS_test_cases:
ttFont = TTFont(filename)
for i, name in enumerate(ttFont['name'].names):
if name.platformID == PlatformID.MACINTOSH:
value = mac_value
if name.platformID == PlatformID.WINDOWS:
value = win_value
assert value
if name.nameID == NameID.FONT_SUBFAMILY_NAME:
ttFont['name'].names[i].string = value.encode(name.getEncoding())
results = check(ttFont)
style = check["expected_style"]
assert_PASS(results,
f"with filename='{filename}', value='{value}', "
f"style_win='{style.win_style_name}', "
f"style_mac='{style.mac_style_name}'...")
# - FAIL, "bad-familyname" - "Bad familyname value on a FONT_SUBFAMILY_NAME entry."
filename = TEST_FILE("montserrat/Montserrat-ThinItalic.ttf")
ttFont = TTFont(filename)
# We setup a bad entry:
ttFont["name"].setName("Not a proper style",
NameID.FONT_SUBFAMILY_NAME,
PlatformID.MACINTOSH,
MacintoshEncodingID.ROMAN,
MacintoshLanguageID.ENGLISH)
# And this should now FAIL:
assert_results_contain(check(ttFont),
FAIL, 'bad-familyname')
# Repeat this for a Win subfamily name
ttFont = TTFont(filename)
ttFont["name"].setName("Not a proper style",
NameID.FONT_SUBFAMILY_NAME,
PlatformID.WINDOWS,
WindowsEncodingID.UNICODE_BMP,
WindowsLanguageID.ENGLISH_USA)
assert_results_contain(check(ttFont),
FAIL, 'bad-familyname')
def test_check_name_fullfontname():
""" Check name table: FULL_FONT_NAME entries. """
check = CheckTester(googlefonts_profile,
"com.google.fonts/check/name/fullfontname")
# Our reference Cabin Regular is known to be good
ttFont = TTFont(TEST_FILE("cabin/Cabin-Regular.ttf"))
assert_PASS(check(ttFont),
"with a good Regular font...")
# Let's now test the Regular exception
# ('Regular' can be optionally ommited on the FULL_FONT_NAME entry):
for index, name in enumerate(ttFont["name"].names):
if name.nameID == NameID.FULL_FONT_NAME:
backup = name.string
ttFont["name"].names[index].string = "Cabin".encode(name.getEncoding())
assert_results_contain(check(ttFont),
WARN, 'lacks-regular',
'with a good Regular font that omits "Regular" on FULL_FONT_NAME...')
# restore it:
ttFont["name"].names[index].string = backup
# Let's also make sure our good reference Cabin BoldItalic PASSes the check.
# This also tests the splitting of filename infered style with a space char
ttFont = TTFont(TEST_FILE("cabin/Cabin-BoldItalic.ttf"))
assert_PASS(check(ttFont),
"with a good Bold Italic font...")
# And here we test the FAIL codepath:
for index, name in enumerate(ttFont["name"].names):
if name.nameID == NameID.FULL_FONT_NAME:
backup = name.string
ttFont["name"].names[index].string = "MAKE IT FAIL".encode(name.getEncoding())
assert_results_contain(check(ttFont),
FAIL, 'bad-entry',
'with a bad FULL_FONT_NAME entry...')
# restore it:
ttFont["name"].names[index].string = backup
def NOT_IMPLEMENTED_test_check_name_postscriptname():
""" Check name table: POSTSCRIPT_NAME entries. """
# check = CheckTester(googlefonts_profile,
# "com.google.fonts/check/name/postscriptname")
# TODO: Implement-me!
#
# code-paths:
# - FAIL, "bad-entry"
# - PASS
def test_check_name_typographicfamilyname():
""" Check name table: TYPOGRAPHIC_FAMILY_NAME entries. """
check = CheckTester(googlefonts_profile,
"com.google.fonts/check/name/typographicfamilyname")
# RIBBI fonts must not have a TYPOGRAPHIC_FAMILY_NAME entry
ttFont = TTFont(TEST_FILE("montserrat/Montserrat-BoldItalic.ttf"))
assert_PASS(check(ttFont),
f"with a RIBBI without nameid={NameID.TYPOGRAPHIC_FAMILY_NAME} entry...")
# so we add one and make sure the check reports the problem:
ttFont['name'].names[5].nameID = NameID.TYPOGRAPHIC_FAMILY_NAME # 5 is arbitrary here
assert_results_contain(check(ttFont),
FAIL, 'ribbi',
f'with a RIBBI that has got a nameid={NameID.TYPOGRAPHIC_FAMILY_NAME} entry...')
# non-RIBBI fonts must have a TYPOGRAPHIC_FAMILY_NAME entry
ttFont = TTFont(TEST_FILE("montserrat/Montserrat-ExtraLight.ttf"))
assert_PASS(check(ttFont),
f"with a non-RIBBI containing a nameid={NameID.TYPOGRAPHIC_FAMILY_NAME} entry...")
# set bad values on all TYPOGRAPHIC_FAMILY_NAME entries:
for i, name in enumerate(ttFont['name'].names):
if name.nameID == NameID.TYPOGRAPHIC_FAMILY_NAME:
ttFont['name'].names[i].string = "foo".encode(name.getEncoding())
assert_results_contain(check(ttFont),
FAIL, 'non-ribbi-bad-value',
'with a non-RIBBI with bad nameid={NameID.TYPOGRAPHIC_FAMILY_NAME} entries...')
# remove all TYPOGRAPHIC_FAMILY_NAME entries
# by changing their nameid to something else:
for i, name in enumerate(ttFont['name'].names):
if name.nameID == NameID.TYPOGRAPHIC_FAMILY_NAME:
ttFont['name'].names[i].nameID = 255 # blah! :-)
assert_results_contain(check(ttFont),
FAIL, 'non-ribbi-lacks-entry',
f'with a non-RIBBI lacking a nameid={NameID.TYPOGRAPHIC_FAMILY_NAME} entry...')
def test_check_name_typographicsubfamilyname():
""" Check name table: TYPOGRAPHIC_SUBFAMILY_NAME entries. """
check = CheckTester(googlefonts_profile,
"com.google.fonts/check/name/typographicsubfamilyname")
RIBBI = "montserrat/Montserrat-BoldItalic.ttf"
NON_RIBBI = "montserrat/Montserrat-ExtraLight.ttf"
# Add incorrect TYPOGRAPHIC_SUBFAMILY_NAME entries to a RIBBI font
ttFont = TTFont(TEST_FILE(RIBBI))
ttFont['name'].setName("FOO",
NameID.TYPOGRAPHIC_SUBFAMILY_NAME,
PlatformID.WINDOWS,
WindowsEncodingID.UNICODE_BMP,
WindowsLanguageID.ENGLISH_USA)
ttFont['name'].setName("BAR",
NameID.TYPOGRAPHIC_SUBFAMILY_NAME,
PlatformID.MACINTOSH,
MacintoshEncodingID.ROMAN,
MacintoshLanguageID.ENGLISH)
assert_results_contain(check(ttFont),
FAIL, 'mismatch',
f'with a RIBBI that has got incorrect'
f' nameid={NameID.TYPOGRAPHIC_SUBFAMILY_NAME} entries...')
assert_results_contain(check(ttFont),
FAIL, 'bad-win-name')
assert_results_contain(check(ttFont),
FAIL, 'bad-mac-name')
# non-RIBBI fonts must have a TYPOGRAPHIC_SUBFAMILY_NAME entry
ttFont = TTFont(TEST_FILE(NON_RIBBI))
assert_PASS(check(ttFont),
f'with a non-RIBBI containing a nameid={NameID.TYPOGRAPHIC_SUBFAMILY_NAME} entry...')
# set bad values on the win TYPOGRAPHIC_SUBFAMILY_NAME entry:
ttFont = TTFont(TEST_FILE(NON_RIBBI))
ttFont['name'].setName("Generic subfamily name",
NameID.TYPOGRAPHIC_SUBFAMILY_NAME,
PlatformID.WINDOWS,
WindowsEncodingID.UNICODE_BMP,
WindowsLanguageID.ENGLISH_USA)
assert_results_contain(check(ttFont),
FAIL, 'bad-typo-win',
f'with a non-RIBBI with bad nameid={NameID.TYPOGRAPHIC_SUBFAMILY_NAME} entries...')
# set bad values on the mac TYPOGRAPHIC_SUBFAMILY_NAME entry:
ttFont = TTFont(TEST_FILE(NON_RIBBI))
ttFont['name'].setName("Generic subfamily name",
NameID.TYPOGRAPHIC_SUBFAMILY_NAME,
PlatformID.MACINTOSH,
MacintoshEncodingID.ROMAN,
MacintoshLanguageID.ENGLISH)
assert_results_contain(check(ttFont),
FAIL, 'bad-typo-mac',
f'with a non-RIBBI with bad nameid={NameID.TYPOGRAPHIC_SUBFAMILY_NAME} entries...')
# remove all TYPOGRAPHIC_SUBFAMILY_NAME entries
ttFont = TTFont(TEST_FILE(NON_RIBBI))
win_name = ttFont['name'].getName(NameID.TYPOGRAPHIC_SUBFAMILY_NAME,
PlatformID.WINDOWS,
WindowsEncodingID.UNICODE_BMP,
WindowsLanguageID.ENGLISH_USA)
mac_name = ttFont['name'].getName(NameID.TYPOGRAPHIC_SUBFAMILY_NAME,
PlatformID.MACINTOSH,
MacintoshEncodingID.ROMAN,
MacintoshLanguageID.ENGLISH)
win_name.nameID = 254
if mac_name:
mac_name.nameID = 255
assert_results_contain(check(ttFont),
FAIL, 'missing-typo-win',
f'with a non-RIBBI lacking a nameid={NameID.TYPOGRAPHIC_SUBFAMILY_NAME} entry...')
# note: the check must not complain
# about the lack of a mac entry!
def test_check_name_copyright_length():
""" Length of copyright notice must not exceed 500 characters. """
check = CheckTester(googlefonts_profile,
"com.google.fonts/check/name/copyright_length")
ttFont = TTFont(TEST_FILE("cabin/Cabin-Regular.ttf"))
good_entry = 'a' * 499
for i, entry in enumerate(ttFont['name'].names):
if entry.nameID == NameID.COPYRIGHT_NOTICE:
ttFont['name'].names[i].string = good_entry.encode(entry.getEncoding())
assert_PASS(check(ttFont),
'with 499-byte copyright notice string...')
good_entry = 'a' * 500
for i, entry in enumerate(ttFont['name'].names):
if entry.nameID == NameID.COPYRIGHT_NOTICE:
ttFont['name'].names[i].string = good_entry.encode(entry.getEncoding())
assert_PASS(check(ttFont),
'with 500-byte copyright notice string...')
bad_entry = 'a' * 501
for i, entry in enumerate(ttFont['name'].names):
if entry.nameID == NameID.COPYRIGHT_NOTICE:
ttFont['name'].names[i].string = bad_entry.encode(entry.getEncoding())
assert_results_contain(check(ttFont),
FAIL, 'too-long',
'with 501-byte copyright notice string...')
# TODO: Maybe skip this code-test if the service is offline?
# we could use pytest.mak.skipif here together with a piece of code that
# verifies whether or not the namecheck.fontdata.com website is online at the moment
def test_check_fontdata_namecheck():
""" Familyname is unique according to namecheck.fontdata.com """
check = CheckTester(googlefonts_profile,
"com.google.fonts/check/fontdata_namecheck")
TIMEOUT_MSG = ("Sometimes namecheck.fontdata.com times out"
" and we don't want to stop running all the other"
" code tests. Unless you touched this portion of"
" the code, it is generaly safe to ignore this glitch.")
# We dont FAIL because this is meant as a merely informative check
# There may be frequent cases when fonts are being updated and thus
# already have a public family name registered on the
# namecheck.fontdata.com database.
font = TEST_FILE("cabin/Cabin-Regular.ttf")
assert_results_contain(check(font),
INFO, 'name-collision',
'with an already used name...',
ignore_error=TIMEOUT_MSG)
# Here we know that FamilySans has not been (and will not be)
# registered as a real family.
font = TEST_FILE("familysans/FamilySans-Regular.ttf")
assert_PASS(check(font),
'with a unique family name...',
ignore_error=TIMEOUT_MSG)
def test_check_fontv():
""" Check for font-v versioning """
check = CheckTester(googlefonts_profile,
"com.google.fonts/check/fontv")
ttFont = TTFont(TEST_FILE("cabin/Cabin-Regular.ttf"))
assert_results_contain(check(ttFont),
INFO, 'bad-format',
'with a font that does not follow'
' the suggested font-v versioning scheme ...')
from fontv.libfv import FontVersion
fv = FontVersion(ttFont)
fv.set_state_git_commit_sha1(development=True)
version_string = fv.get_name_id5_version_string()
for record in ttFont['name'].names:
if record.nameID == NameID.VERSION_STRING:
record.string = version_string
assert_PASS(check(ttFont),
'with one that follows the suggested scheme ...')
def test_check_glyf_nested_components():
"""Check glyphs do not have nested components."""
check = CheckTester(googlefonts_profile,
"com.google.fonts/check/glyf_nested_components")
ttFont = TTFont(TEST_FILE("nunito/Nunito-Regular.ttf"))
assert_PASS(check(ttFont))
# We need to create a nested component. "second" has components, so setting
# one of "quotedbl"'s components to "second" should do it.
ttFont['glyf']['quotedbl'].components[0].glyphName = "second"
assert_results_contain(check(ttFont),
FAIL, 'found-nested-components')
# Temporarily disabling this code-test since check/negative_advance_width itself
# is disabled waiting for an implementation targetting the
# actual root cause of the issue.
#
# See also comments at googlefons.py as well as at
# https://github.com/googlefonts/fontbakery/issues/1727
def disabled_test_check_negative_advance_width():
""" Check that advance widths cannot be inferred as negative. """
check = CheckTester(googlefonts_profile,
"com.google.fonts/check/negative_advance_width")
# Our reference Cabin Regular is good
ttFont = TTFont(TEST_FILE("cabin/Cabin-Regular.ttf"))
# So it must PASS
assert_PASS(check(ttFont),
'with a good font...')
# We then change values in an arbitrary glyph
# in the glyf table in order to cause the problem:
glyphName = "J"
coords = ttFont["glyf"].glyphs[glyphName].coordinates
# FIXME:
# Note: I thought this was the proper way to induce the
# issue, but now I think I'll need to look more
# carefully at sample files providedby MarcFoley
# to see what's really at play here and how the relevant
# data is encoded into the affected OpenType files.
rightSideX = coords[-3][0]
# leftSideX: (make right minus left a negative number)
coords[-4][0] = rightSideX + 1
ttFont["glyf"].glyphs[glyphName].coordinates = coords
# and now this should FAIL:
assert_results_contain(check(ttFont),
FAIL, 'bad-coordinates',
'with bad coordinates on the glyf table...')
def test_check_varfont_generate_static():
""" Check a static ttf can be generated from a variable font. """
check = CheckTester(googlefonts_profile,
"com.google.fonts/check/varfont/generate_static")
ttFont = TTFont(TEST_FILE("cabinvfbeta/CabinVFBeta.ttf"))
assert_PASS(check(ttFont))
# Removing a table to deliberately break variable font
del ttFont['fvar']
assert_results_contain(check(ttFont),
FAIL, 'varlib-mutator')
def test_check_varfont_has_HVAR():
""" Check that variable fonts have an HVAR table. """
check = CheckTester(googlefonts_profile,
"com.google.fonts/check/varfont/has_HVAR")
# Our reference Cabin Variable Font contains an HVAR table.
ttFont = TTFont(TEST_FILE("cabinvfbeta/CabinVFBeta.ttf"))
# So the check must PASS.
assert_PASS(check(ttFont))
# Introduce the problem by removing the HVAR table:
del ttFont['HVAR']
assert_results_contain(check(ttFont),
FAIL, 'lacks-HVAR')
def test_check_smart_dropout():
""" Font enables smart dropout control in "prep" table instructions? """
check = CheckTester(googlefonts_profile,
"com.google.fonts/check/smart_dropout")
ttFont = TTFont(TEST_FILE("nunito/Nunito-Regular.ttf"))
# "Program at 'prep' table contains
# instructions enabling smart dropout control."
assert_PASS(check(ttFont))
# "Font does not contain TrueType instructions enabling
# smart dropout control in the 'prep' table program."
import array
ttFont["prep"].program.bytecode = array.array('B', [0])
assert_results_contain(check(ttFont),
FAIL, 'lacks-smart-dropout')
def test_check_vttclean():
""" There must not be VTT Talk sources in the font. """
check = CheckTester(googlefonts_profile,
"com.google.fonts/check/vttclean")
good_font = TEST_FILE("mada/Mada-Regular.ttf")
assert_PASS(check(good_font))
bad_font = TEST_FILE("hinting/Roboto-VF.ttf")
assert_results_contain(check(bad_font),
FAIL, 'has-vtt-sources')
def test_check_aat():
""" Are there unwanted Apple tables ? """
check = CheckTester(googlefonts_profile,
"com.google.fonts/check/aat")
unwanted_tables = [
'EBSC', 'Zaph', 'acnt', 'ankr', 'bdat', 'bhed', 'bloc',
'bmap', 'bsln', 'fdsc', 'feat', 'fond', 'gcid', 'just',
'kerx', 'lcar', 'ltag', 'mort', 'morx', 'opbd', 'prop',
'trak', 'xref'
]
# Our reference Mada Regular font is good here:
ttFont = TTFont(TEST_FILE("mada/Mada-Regular.ttf"))
# So it must PASS the check:
assert_PASS(check(ttFont),
'with a good font...')
# We now add unwanted tables one-by-one to validate the FAIL code-path:
for unwanted in unwanted_tables:
ttFont = TTFont(TEST_FILE("mada/Mada-Regular.ttf"))
ttFont.reader.tables[unwanted] = "foo"
assert_results_contain(check(ttFont),
FAIL, 'has-unwanted-tables',
f'with unwanted table {unwanted} ...')
def test_check_fvar_name_entries():
""" All name entries referenced by fvar instances exist on the name table? """
check = CheckTester(googlefonts_profile,
"com.google.fonts/check/fvar_name_entries")
# This broken version of the Expletus variable font, was where this kind of problem was first observed:
ttFont = TTFont(TEST_FILE("broken_expletus_vf/ExpletusSansBeta-VF.ttf"))
# So it must FAIL the check:
assert_results_contain(check(ttFont),
FAIL, 'missing-name',
'with a bad font...')
# If we add the name entry with id=265 (which was the one missing)
# then the check must now PASS:
from fontTools.ttLib.tables._n_a_m_e import makeName
ttFont["name"].names.append(makeName("Foo", 265, 1, 0, 0))
assert_PASS(check(ttFont),
'with a good font...')
def test_check_varfont_has_instances():
""" A variable font must have named instances. """
check = CheckTester(googlefonts_profile,
"com.google.fonts/check/varfont_has_instances")
# ExpletusVF does have instances.
# Note: The "broken" in the path name refers to something else.
# (See test_check_fvar_name_entries)
ttFont = TTFont(TEST_FILE("broken_expletus_vf/ExpletusSansBeta-VF.ttf"))
# So it must PASS the check:
assert_PASS(check(ttFont),
'with a good font...')
# If we delete all instances, then it must FAIL:
while len(ttFont["fvar"].instances):
del ttFont["fvar"].instances[0]
assert_results_contain(check(ttFont),
FAIL, 'lacks-named-instances',
'with a bad font...')
def test_check_varfont_weight_instances():
""" Variable font weight coordinates must be multiples of 100. """
check = CheckTester(googlefonts_profile,
"com.google.fonts/check/varfont_weight_instances")
# This copy of Markazi Text has an instance with
# a 491 'wght' coordinate instead of 500.
ttFont = TTFont(TEST_FILE("broken_markazitext/MarkaziText-VF.ttf"))
# So it must FAIL the check:
assert_results_contain(check(ttFont),
FAIL, 'bad-coordinate',
'with a bad font...')
# Let's then change the weight coordinates to make it PASS the check:
for i, instance in enumerate(ttFont["fvar"].instances):
ttFont["fvar"].instances[i].coordinates['wght'] -= instance.coordinates['wght'] % 100
assert_PASS(check(ttFont),
'with a good font...')
def NOT_IMPLEMENTED_test_check_family_tnum_horizontal_metrics():
""" All tabular figures must have the same width across the RIBBI-family. """
# check = CheckTester(googlefonts_profile,
# "com.google.fonts/check/family/tnum_horizontal_metrics")
# TODO: Implement-me!
#
# code-paths:
# - FAIL, "inconsistent-widths"
# - PASS
def test_check_integer_ppem_if_hinted():
""" PPEM must be an integer on hinted fonts. """
check = CheckTester(googlefonts_profile,
"com.google.fonts/check/integer_ppem_if_hinted")
# Our reference Merriweather Regular is hinted, but does not set
# the "rounded PPEM" flag (bit 3 on the head table flags) as
# described at https://docs.microsoft.com/en-us/typography/opentype/spec/head
ttFont = TTFont(TEST_FILE("merriweather/Merriweather-Regular.ttf"))
# So it must FAIL the check:
assert_results_contain(check(ttFont),
FAIL, 'bad-flags',
'with a bad font...')
# hotfixing it should make it PASS:
ttFont["head"].flags |= (1 << 3)
assert_PASS(check(ttFont),
'with a good font...')
def test_check_ligature_carets():
""" Is there a caret position declared for every ligature? """
check = CheckTester(googlefonts_profile,
"com.google.fonts/check/ligature_carets")
# Our reference Mada Medium is known to be bad
ttFont = TTFont(TEST_FILE("mada/Mada-Medium.ttf"))
assert_results_contain(check(ttFont),
WARN, 'lacks-caret-pos',
'with a bad font...')
# And FamilySans Regular is also bad
ttFont = TTFont("data/test/familysans/FamilySans-Regular.ttf")
assert_results_contain(check(ttFont),
WARN, 'GDEF-missing',
'with a bad font...')
# TODO: test the following code-paths:
# - WARN "incomplete-caret-pos-data"
# - FAIL "malformed"
# - PASS (We currently lack a reference family that PASSes this check!)
def test_check_kerning_for_non_ligated_sequences():
""" Is there kerning info for non-ligated sequences ? """
check = CheckTester(googlefonts_profile,
"com.google.fonts/check/kerning_for_non_ligated_sequences")
# Our reference Mada Medium is known to be good
ttFont = TTFont(TEST_FILE("mada/Mada-Medium.ttf"))
assert_PASS(check(ttFont),
'with a good font...')
# And Merriweather Regular is known to be bad
ttFont = TTFont(TEST_FILE("merriweather/Merriweather-Regular.ttf"))
assert_results_contain(check(ttFont),
WARN, 'lacks-kern-info',
'with a bad font...')
def test_check_family_control_chars():
"""Are any unacceptable control characters present in font files?"""
check = CheckTester(googlefonts_profile,
"com.google.fonts/check/family/control_chars")
good_font = TEST_FILE("bad_character_set/control_chars/"
"FontbakeryTesterCCGood-Regular.ttf")
onebad_cc_font = TEST_FILE("bad_character_set/control_chars/"
"FontbakeryTesterCCOneBad-Regular.ttf")
multibad_cc_font = TEST_FILE("bad_character_set/control_chars/"
"FontbakeryTesterCCMultiBad-Regular.ttf")
# No unacceptable control characters should pass with one file
fonts = [good_font]
assert_PASS(check(fonts),
'with one good font...')
# No unacceptable control characters should pass with multiple good files
fonts = [good_font,
good_font]
assert_PASS(check(fonts),
'with multiple good fonts...')
# Unacceptable control chars should fail with one file x one bad char in font
fonts = [onebad_cc_font]
assert_results_contain(check(fonts),
FAIL, 'unacceptable',
'with one bad font that has one bad char...')
# Unacceptable control chars should fail with one file x multiple bad char in font
fonts = [multibad_cc_font]
assert_results_contain(check(fonts),
FAIL, 'unacceptable',
'with one bad font that has multiple bad char...')
# Unacceptable control chars should fail with multiple files x multiple bad chars in fonts
fonts = [onebad_cc_font,
multibad_cc_font]
assert_results_contain(check(fonts),
FAIL, 'unacceptable',
'with multiple bad fonts that have multiple bad chars...')
def test_check_family_italics_have_roman_counterparts():
"""Ensure Italic styles have Roman counterparts."""
check = CheckTester(googlefonts_profile,
"com.google.fonts/check/family/italics_have_roman_counterparts")
# The path used here, "some-crazy.path/", is meant to ensure
# that the parsing code does not get lost when trying to
# extract the style of a font file.
fonts = ['some-crazy.path/merriweather/Merriweather-BlackItalic.ttf',
'some-crazy.path/merriweather/Merriweather-Black.ttf',
'some-crazy.path/merriweather/Merriweather-BoldItalic.ttf',
'some-crazy.path/merriweather/Merriweather-Bold.ttf',
'some-crazy.path/merriweather/Merriweather-Italic.ttf',
'some-crazy.path/merriweather/Merriweather-LightItalic.ttf',
'some-crazy.path/merriweather/Merriweather-Light.ttf',
'some-crazy.path/merriweather/Merriweather-Regular.ttf']
assert_PASS(check(fonts),
'with a good family...')
fonts.pop(-1) # remove the last one, which is the Regular
assert 'some-crazy.path/merriweather/Merriweather-Regular.ttf' not in fonts
assert 'some-crazy.path/merriweather/Merriweather-Italic.ttf' in fonts
assert_results_contain(check(fonts),
FAIL, 'missing-roman',
'with a family that has an Italic but lacks a Regular.')
fonts.append('some-crazy.path/merriweather/MerriweatherItalic.ttf')
assert_results_contain(check(fonts),
WARN, 'bad-filename',
'with a family that has a non-canonical italic filename.')
# This check must also be able to deal with variable fonts!
fonts = ["cabinvfbeta/CabinVFBeta-Italic[wdth,wght].ttf",
"cabinvfbeta/CabinVFBeta[wdth,wght].ttf"]
assert_PASS(check(fonts),
'with a good set of varfonts...')
fonts = ["cabinvfbeta/CabinVFBeta-Italic[wdth,wght].ttf"]
assert_results_contain(check(fonts),
FAIL, 'missing-roman',
'with an Italic varfont that lacks a Roman counterpart.')
def NOT_IMPLEMENTED__test_com_google_fonts_check_repo_dirname_match_nameid_1():
"""Are any unacceptable control characters present in font files?"""
# check = CheckTester(googlefonts_profile,
# "com.google.fonts/check/repo_dirname_match_nameid_1")
# TODO: Implement-me!
#
# PASS
# FAIL, "lacks-regular"
# FAIL, "mismatch"
#
# passing_file = TEST_FILE(".../.ttf")
# fonts = [passing_file]
# assert_PASS(check(fonts),
# 'with one good font...')
def test_check_repo_vf_has_static_fonts():
"""Check VF family dirs in google/fonts contain static fonts"""
from fontbakery.profiles.googlefonts import com_google_fonts_check_repo_vf_has_static_fonts as check
import tempfile
import shutil
# in order for this check to work, we need to
# mimic the folder structure of the Google Fonts repository
with tempfile.TemporaryDirectory() as tmp_gf_dir:
family_dir = portable_path(tmp_gf_dir + "/ofl/testfamily")
src_family = portable_path("data/test/varfont")
shutil.copytree(src_family, family_dir)
assert_results_contain(check(family_dir),
WARN, 'missing',
'for a VF family which does not has a static dir.')
static_dir = portable_path(family_dir + "/static")
os.mkdir(static_dir)
assert_results_contain(check(family_dir),
FAIL, 'empty',
'for a VF family which has a static dir but no fonts in the static dir.')
static_fonts = portable_path("data/test/cabin")
shutil.rmtree(static_dir)
shutil.copytree(static_fonts, static_dir)
assert_PASS(check(family_dir),
'for a VF family which has a static dir and static fonts')
def test_check_repo_upstream_yaml_has_required_fields():
"""Check upstream.yaml has all required fields"""
from fontbakery.profiles.googlefonts import com_google_fonts_check_repo_upstream_yaml_has_required_fields as check
upstream_yaml = {
"branch": "main",
"repository_url": "https://www.github.com/googlefonts/testFamily",
"files": {"TestFamily-Regular.ttf": "TestFamily-Regular.ttf"}
}
# Pass if upstream.yaml file contains all fields
assert_PASS(check(upstream_yaml),
'for an upstream.yaml which contains all fields')
# Fail if it doesn't
upstream_yaml.pop("repository_url")
assert_results_contain(check(upstream_yaml),
FAIL, "missing-fields",
"for an upsream.yaml which doesn't contain all fields")
def test_check_repo_fb_report():
""" A font repository should not include fontbakery report files """
from fontbakery.profiles.googlefonts import com_google_fonts_check_repo_fb_report as check
import tempfile
import shutil
with tempfile.TemporaryDirectory() as tmp_dir:
family_dir = portable_path(tmp_dir)
src_family = portable_path("data/test/varfont")
shutil.copytree(src_family, family_dir)
assert_PASS(check(family_dir),
'for a repo without Font Bakery report files.')
assert_PASS(check(family_dir),
'with a json file that is not a Font Bakery report.')
# Add a json file that is not a FB report
open(os.path.join(family_dir, "something_else.json"), "w+").write("this is not a FB report")
FB_REPORT_SNIPPET = """
{
"result": {
"INFO": 8,
"PASS": 81,
"SKIP": 74,
"WARN": 4
},
"sections": [
"""
# Report files must be detected even if placed on subdirectories
# and the check code shuld not rely only on filename (such as "Jura-Regular.fb-report.json")
# but should instead inspect the contents of the file:
open(os.path.join(family_dir,
"jura",
"static",
"my_fontfamily_name.json"), "w+").write(FB_REPORT_SNIPPET)
assert_results_contain(check(family_dir),
WARN, 'fb-report',
'with an actual snippet of a report.')
def test_check_repo_zip_files():
""" A font repository should not include ZIP files """
from fontbakery.profiles.googlefonts import com_google_fonts_check_repo_zip_files as check
import tempfile
import shutil
with tempfile.TemporaryDirectory() as tmp_dir:
family_dir = portable_path(tmp_dir)
src_family = portable_path("data/test/varfont")
shutil.copytree(src_family, family_dir)
assert_PASS(check(family_dir),
'for a repo without ZIP files.')
for ext in ["zip", "rar", "7z"]:
# ZIP files must be detected even if placed on subdirectories:
filepath = os.path.join(family_dir,
f"jura",
f"static",
f"fonts-release.{ext}")
#create an empty file. The check won't care about the contents:
open(filepath, "w+")
assert_results_contain(check(family_dir),
FAIL, 'zip-files',
f"when a {ext} file is found.")
# remove the file before testing the next one ;-)
os.remove(filepath)
def test_check_vertical_metrics_regressions(cabin_ttFonts):
from fontbakery.profiles.shared_conditions import family_directory
from fontbakery.profiles.googlefonts \
import (com_google_fonts_check_vertical_metrics_regressions as check,
api_gfonts_ttFont,
style,
remote_styles,
regular_ttFont,
regular_remote_style,
metadata_file,
family_metadata)
from copy import deepcopy
remote = regular_ttFont([TTFont(f) for f in cabin_fonts])
ttFont = regular_ttFont([TTFont(f) for f in cabin_fonts])
# Cabin test family should match by default
assert_PASS(check(ttFont, remote),
'with a good family...')
# FAIL with a changed vertical metric values
remote2 = deepcopy(remote)
ttFont2 = deepcopy(ttFont)
ttFont2['OS/2'].sTypoAscender = 0
assert_results_contain(check(ttFont2, remote2),
FAIL, 'bad-typo-ascender',
'with a family which has an incorrect typoAscender...')
ttFont2['OS/2'].sTypoDescender = 0
assert_results_contain(check(ttFont2, remote2),
FAIL, 'bad-typo-descender',
'with a family which has an incorrect typoDescender...')
ttFont2['hhea'].ascent = 0
assert_results_contain(check(ttFont2, remote2),
FAIL, 'bad-hhea-ascender',
'with a family which has an incorrect hhea ascender...')
ttFont2['hhea'].descent = 0
assert_results_contain(check(ttFont2, remote2),
FAIL, 'bad-hhea-descender',
'with a family which has an incorrect hhea descender...')
# Fail if family on Google Fonts has fsSelection bit 7 enabled but checked fonts don't
remote3 = deepcopy(remote)
ttFont3 = deepcopy(ttFont)
ttFont3["OS/2"].fsSelection &= ~(1 << 7)
assert_results_contain(check(ttFont3, remote3),
FAIL, "bad-fsselection-bit7",
"with a remote family which has typo metrics "
"enabled and the fonts being checked don't.")
# Pass if family on Google Fonts doesn't have fsSelection bit 7 enabled but checked
# fonts has taken this into consideration
remote4 = deepcopy(remote)
ttFont4 = deepcopy(ttFont)
remote4["OS/2"].fsSelection &= ~(1 << 7)
ttFont4["OS/2"].sTypoAscender = remote4["OS/2"].usWinAscent
ttFont4["OS/2"].sTypoDescender = -remote4["OS/2"].usWinDescent
ttFont4["hhea"].ascent = remote4["OS/2"].usWinAscent
ttFont4["hhea"].descent = -remote4["OS/2"].usWinDescent
assert_PASS(check(ttFont4, remote4),
'with a remote family which does not have typo metrics'
' enabled but the checked fonts vertical metrics have been'
' set so its typo and hhea metrics match the remote'
' fonts win metrics.')
# Same as previous check but using a remote font which has a different upm
remote5 = deepcopy(remote)
ttFont5 = deepcopy(ttFont)
remote5["OS/2"].fsSelection &= ~(1 << 7)
remote5["head"].unitsPerEm = 2000
# divide by 2 since we've doubled the upm
ttFont5["OS/2"].sTypoAscender = math.ceil(remote5["OS/2"].usWinAscent / 2)
ttFont5["OS/2"].sTypoDescender = math.ceil(-remote5["OS/2"].usWinDescent / 2)
ttFont5["hhea"].ascent = math.ceil(remote5["OS/2"].usWinAscent / 2)
ttFont5["hhea"].descent = math.ceil(-remote5["OS/2"].usWinDescent / 2)
assert_PASS(check(ttFont5, remote5),
'with a remote family which does not have typo metrics '
'enabled but the checked fonts vertical metrics have been '
'set so its typo and hhea metrics match the remote '
'fonts win metrics.')
remote6 = deepcopy(remote)
ttFont6 = deepcopy(ttFont)
ttFont6['OS/2'].fsSelection &= ~(1 << 7)
assert_results_contain(check(ttFont6, remote6),
FAIL, "bad-fsselection-bit7",
'OS/2 fsSelection bit 7 must be enabled.')
# Disable bit 7 in both fonts but change win metrics of ttFont
ttFont7 = deepcopy(remote)
remote7 = deepcopy(remote)
remote7["OS/2"].fsSelection &= ~(1 << 7)
ttFont7["OS/2"].fsSelection &= ~(1 << 7)
ttFont7["OS/2"].usWinAscent = 2500
assert_results_contain(check(ttFont7, remote7),
FAIL, "bad-fsselection-bit7",
'OS/2 fsSelection bit 7 must be enabled.')
#
#else:
# TODO: There should be a warning message here
def test_check_cjk_vertical_metrics():
check = CheckTester(googlefonts_profile,
"com.google.fonts/check/cjk_vertical_metrics")
ttFont = TTFont(cjk_font)
assert_PASS(check(ttFont),
'for Source Han Sans')
ttFont = TTFont(cjk_font)
ttFont['OS/2'].fsSelection |= (1 << 7)
assert_results_contain(check(ttFont),
FAIL, 'bad-fselection-bit7',
'for font where OS/2 fsSelection bit 7 is enabled')
ttFont = TTFont(cjk_font)
ttFont['OS/2'].sTypoAscender = float('inf')
assert_results_contain(check(ttFont),
FAIL, 'bad-OS/2.sTypoAscender',
'for font with bad OS/2.sTypoAscender')
ttFont = TTFont(cjk_font)
ttFont['OS/2'].sTypoDescender = float('inf')
assert_results_contain(check(ttFont),
FAIL, 'bad-OS/2.sTypoDescender',
'for font with bad OS/2.sTypoDescender')
ttFont = TTFont(cjk_font)
ttFont['OS/2'].sTypoLineGap = float('inf')
assert_results_contain(check(ttFont),
FAIL, 'bad-OS/2.sTypoLineGap',
'for font where linegaps have been set (OS/2 table)')
ttFont = TTFont(cjk_font)
ttFont['hhea'].lineGap = float('inf')
assert_results_contain(check(ttFont),
FAIL, 'bad-hhea.lineGap',
'for font where linegaps have been set (hhea table)')
ttFont = TTFont(cjk_font)
ttFont['OS/2'].usWinAscent = float('inf')
assert_results_contain(check(ttFont),
FAIL, 'ascent-mismatch',
'for a font where typo ascender != 0.88 * upm')
ttFont = TTFont(cjk_font)
ttFont['OS/2'].usWinDescent = -float('inf')
assert_results_contain(check(ttFont),
FAIL, 'descent-mismatch',
'for a font where typo descender != 0.12 * upm')
ttFont = TTFont(cjk_font)
ttFont['OS/2'].usWinAscent = float('inf')
ttFont['hhea'].ascent = float('inf')
assert_results_contain(check(ttFont),
WARN, 'bad-hhea-range',
'if font hhea and win metrics are greater than 1.5 * upm')
def test_check_cjk_vertical_metrics_regressions():
from copy import deepcopy
check = CheckTester(googlefonts_profile,
"com.google.fonts/check/cjk_vertical_metrics_regressions")
ttFont = TTFont(cjk_font)
regular_remote_style = deepcopy(ttFont)
# Check on duplicate
regular_remote_style = deepcopy(ttFont)
assert_PASS(check(ttFont, {"regular_remote_style": regular_remote_style}),
'for Source Han Sans')
# Change a single metric
ttFont2 = deepcopy(ttFont)
ttFont2['hhea'].ascent = 0
assert_results_contain(check(ttFont2, {"regular_remote_style": regular_remote_style}),
FAIL, "cjk-metric-regression",
'hhea ascent is 0 when it should be 880')
# Change upm of font being checked
ttFont3 = deepcopy(ttFont)
ttFont3['head'].unitsPerEm = 2000
assert_results_contain(check(ttFont3, {"regular_remote_style": regular_remote_style}),
FAIL, "cjk-metric-regression",
'upm is 2000 and vert metrics values are not updated')
# Change upm of checked font and update vert metrics
ttFont4 = deepcopy(ttFont)
ttFont4['head'].unitsPerEm = 2000
for tbl, attrib in [
("OS/2", "sTypoAscender"),
("OS/2", "sTypoDescender"),
("OS/2", "sTypoLineGap"),
("OS/2", "usWinAscent"),
("OS/2", "usWinDescent"),
("hhea", "ascent"),
("hhea", "descent"),
("hhea", "lineGap"),
]:
current_val = getattr(ttFont4[tbl], attrib)
setattr(ttFont4[tbl], attrib, current_val * 2)
assert_PASS(check(ttFont4, {"regular_remote_style": regular_remote_style}),
'for Source Han Sans with doubled upm and doubled vert metrics')
def test_check_cjk_not_enough_glyphs():
check = CheckTester(googlefonts_profile,
"com.google.fonts/check/cjk_not_enough_glyphs")
ttFont = TTFont(cjk_font)
assert_PASS(check(ttFont),
'for Source Han Sans')
ttFont = TTFont(TEST_FILE("montserrat/Montserrat-Regular.ttf"))
assert_PASS(check(ttFont),
'for Montserrat')
# Let's modify Montserrat's cmap so there's a cjk glyph
cmap = ttFont['cmap'].getcmap(3,1)
# Add first character of the CJK unified Ideographs
cmap.cmap[0x4E00] = "A"
assert_results_contain(check(ttFont),
WARN, "cjk-not-enough-glyphs",
"There is only 1 CJK glyphs")
# Add second character of the CJK unified Ideographs
cmap.cmap[0x4E01] = "B"
assert_results_contain(check(ttFont),
WARN, "cjk-not-enough-glyphs",
"There are only 2 CJK glyphs")
def test_check_varfont_instance_coordinates(vf_ttFont):
check = CheckTester(googlefonts_profile,
"com.google.fonts/check/varfont_instance_coordinates")
# OpenSans-Roman-VF is correct
assert_PASS(check(vf_ttFont),
'with a variable font which has correct instance coordinates.')
from copy import copy
vf_ttFont2 = copy(vf_ttFont)
for instance in vf_ttFont2['fvar'].instances:
for axis in instance.coordinates.keys():
instance.coordinates[axis] = 0
assert_results_contain(check(vf_ttFont2),
FAIL, "bad-coordinate",
'with a variable font which does not have'
' correct instance coordinates.')
def test_check_varfont_instance_names(vf_ttFont):
check = CheckTester(googlefonts_profile,
"com.google.fonts/check/varfont_instance_names")
assert_PASS(check(vf_ttFont),
'with a variable font which has correct instance names.')
from copy import copy
vf_ttFont2 = copy(vf_ttFont)
for instance in vf_ttFont2['fvar'].instances:
instance.subfamilyNameID = 300
broken_name ="ExtraBlack Condensed 300pt"
vf_ttFont2['name'].setName(broken_name,
300,
PlatformID.MACINTOSH,
MacintoshEncodingID.ROMAN,
MacintoshLanguageID.ENGLISH)
vf_ttFont2['name'].setName(broken_name,
300,
PlatformID.WINDOWS,
WindowsEncodingID.UNICODE_BMP,
WindowsLanguageID.ENGLISH_USA)
assert_results_contain(check(vf_ttFont2),
FAIL, 'bad-instance-names',
'with a variable font which does not have correct instance names.')
def test_check_varfont_duplicate_instance_names(vf_ttFont):
check = CheckTester(googlefonts_profile,
"com.google.fonts/check/varfont_duplicate_instance_names")
assert_PASS(check(vf_ttFont),
'with a variable font which has unique instance names.')
from copy import copy
vf_ttFont2 = copy(vf_ttFont)
duplicate_instance_name = vf_ttFont2['name'].getName(
vf_ttFont2['fvar'].instances[0].subfamilyNameID,
PlatformID.WINDOWS,
WindowsEncodingID.UNICODE_BMP,
WindowsLanguageID.ENGLISH_USA
).toUnicode()
vf_ttFont2['name'].setName(string=duplicate_instance_name,
nameID=vf_ttFont2['fvar'].instances[1].subfamilyNameID,
platformID=PlatformID.WINDOWS,
platEncID=WindowsEncodingID.UNICODE_BMP,
langID=WindowsLanguageID.ENGLISH_USA)
assert_results_contain(check(vf_ttFont2),
FAIL, 'duplicate-instance-names')
def test_check_varfont_unsupported_axes():
"""Ensure VFs do not contain opsz or ital axes."""
check = CheckTester(googlefonts_profile,
"com.google.fonts/check/varfont/unsupported_axes")
# Our reference varfont, CabinVFBeta.ttf, lacks 'ital' and 'slnt' variation axes.
# So, should pass the check:
ttFont = TTFont(TEST_FILE("cabinvfbeta/CabinVFBeta.ttf"))
assert_PASS(check(ttFont))
# If we add 'ital' it must FAIL:
from fontTools.ttLib.tables._f_v_a_r import Axis
new_axis = Axis()
new_axis.axisTag = "ital"
ttFont["fvar"].axes.append(new_axis)
assert_results_contain(check(ttFont),
FAIL, 'unsupported-ital')
# Then we reload the font and add 'opsz'
# so it must also FAIL:
ttFont = TTFont("data/test/cabinvfbeta/CabinVFBeta.ttf")
new_axis = Axis()
new_axis.axisTag = "slnt"
ttFont["fvar"].axes.append(new_axis)
assert_results_contain(check(ttFont),
FAIL, 'unsupported-slnt')
def test_check_varfont_grade_reflow():
""" Ensure VFs with the GRAD axis do not vary horizontal advance. """
check = CheckTester(googlefonts_profile,
"com.google.fonts/check/varfont/grade_reflow")
ttFont = TTFont(TEST_FILE("BadGrades/BadGrades-VF.ttf"))
assert_results_contain(check(ttFont),
FAIL, 'grad-causes-reflow')
# Zero out the horizontal advances
gvar = ttFont["gvar"]
for glyph, deltas in gvar.variations.items():
for delta in deltas:
if "GRAD" not in delta.axes:
continue
if delta.coordinates:
delta.coordinates = delta.coordinates[:-4] + [(0,0)] * 4
# But the kern rules should still be a problem
assert_results_contain(check(ttFont),
FAIL, 'grad-kern-causes-reflow')
ttFont["GPOS"].table.LookupList.Lookup = []
assert_PASS(check(ttFont))
def test_check_gfaxisregistry_bounds():
"""Validate METADATA.pb axes values are within gf-axisregistry bounds."""
check = CheckTester(googlefonts_profile,
"com.google.fonts/check/metadata/gf-axisregistry_bounds")
# Our reference varfont, CabinVF, has good axes bounds:
ttFont = TTFont(TEST_FILE("cabinvf/Cabin[wdth,wght].ttf"))
assert_PASS(check(ttFont))
# The first axis declared in this family is 'wdth' (Width)
# And the GF Axis Registry expects this axis to have a range
# not broader than min: 25 / max: 200
# So...
md = check["family_metadata"]
md.axes[0].min_value = 20
assert_results_contain(check(ttFont, {"family_metadata": md}),
FAIL, "bad-axis-range")
md.axes[0].min_value = 25
md.axes[0].max_value = 250
assert_results_contain(check(ttFont, {"family_metadata": md}),
FAIL, "bad-axis-range")
def test_check_gf_axisregistry_valid_tags():
"""Validate METADATA.pb axes tags are defined in gf-axisregistry."""
check = CheckTester(googlefonts_profile,
"com.google.fonts/check/metadata/gf-axisregistry_valid_tags")
# The axis tags in our reference varfont, CabinVF,
# are properly defined in the registry:
ttFont = TTFont(TEST_FILE("cabinvf/Cabin[wdth,wght].ttf"))
assert_PASS(check(ttFont))
md = check["family_metadata"]
md.axes[0].tag = "crap" # I'm pretty sure this one wont ever be included in the registry
assert_results_contain(check(ttFont, {"family_metadata": md}),
FAIL, "bad-axis-tag")
def test_check_gf_axisregistry_fvar_axis_defaults():
"""Validate METADATA.pb axes tags are defined in gf-axisregistry."""
check = CheckTester(googlefonts_profile,
"com.google.fonts/check/gf-axisregistry/fvar_axis_defaults")
# The default value for the axes in this reference varfont
# are properly registered in the registry:
ttFont = TTFont(TEST_FILE("cabinvf/Cabin[wdth,wght].ttf"))
assert_PASS(check(ttFont))
# And this value surely doen't map to a fallback name in the registry
ttFont['fvar'].axes[0].defaultValue = 123
assert_results_contain(check(ttFont),
FAIL, "not-registered")
def test_check_STAT_gf_axisregistry():
"""Validate STAT particle names and values match the fallback names in GFAxisRegistry."""
check = CheckTester(googlefonts_profile,
"com.google.fonts/check/STAT/gf-axisregistry")
# Our reference varfont, CabinVF,
# has "Regular", instead of "Roman" in its 'ital' axis on the STAT table:
ttFont = TTFont(TEST_FILE("cabinvf/Cabin[wdth,wght].ttf"))
assert_results_contain(check(ttFont),
FAIL, "invalid-name")
# LibreCaslonText is good though:
ttFont = TTFont(TEST_FILE("librecaslontext/LibreCaslonText[wght].ttf"))
assert_PASS(check(ttFont))
# Let's break it by setting an invalid coordinate for "Bold":
assert ttFont['STAT'].table.AxisValueArray.AxisValue[3].ValueNameID == ttFont['name'].names[4].nameID
assert ttFont['name'].names[4].toUnicode() == "Bold"
ttFont['STAT'].table.AxisValueArray.AxisValue[3].Value = 800 # instead of the expected 700
# Note: I know it is AxisValue[3] and names[4] because I inspected the font using ttx.
assert_results_contain(check(ttFont),
FAIL, "bad-coordinate")
# Let's remove all Axis Values. This will fail since we Google Fonts
# requires them.
ttFont['STAT'].table.AxisValueArray = None
assert_results_contain(check(ttFont),
FAIL, "missing-axis-values")
def test_check_metadata_consistent_axis_enumeration():
"""Validate VF axes match the ones declared on METADATA.pb."""
check = CheckTester(googlefonts_profile,
"com.google.fonts/check/metadata/consistent_axis_enumeration")
# The axis tags of CabinVF,
# are properly declared on its METADATA.pb:
ttFont = TTFont(TEST_FILE("cabinvf/Cabin[wdth,wght].ttf"))
assert_PASS(check(ttFont))
md = check["family_metadata"]
md.axes[1].tag = "wdth" # this effectively removes the "wght" axis while not adding an extra one
assert_results_contain(check(ttFont, {"family_metadata": md}),
FAIL, "missing-axes")
md.axes[1].tag = "ouch" # and this is an unwanted extra axis
assert_results_contain(check(ttFont, {"family_metadata": md}),
FAIL, "extra-axes")
def test_check_STAT_axis_order():
"""Check axis ordering on the STAT table."""
check = CheckTester(googlefonts_profile,
"com.google.fonts/check/STAT/axis_order")
fonts = [TEST_FILE("cabinvf/Cabin[wdth,wght].ttf")]
assert_results_contain(check(fonts),
INFO, "summary")
fonts = [TEST_FILE("merriweather/Merriweather-Regular.ttf")]
assert_results_contain(check(fonts),
SKIP, "missing-STAT")
# A real-world case here would be a corrupted TTF file.
# This clearly is not a TTF, but is good enough for testing:
fonts = [TEST_FILE("merriweather/METADATA.pb")]
assert_results_contain(check(fonts),
ERROR, "bad-font")
def test_check_metadata_escaped_strings():
"""Ensure METADATA.pb does not use escaped strings."""
check = CheckTester(googlefonts_profile,
"com.google.fonts/check/metadata/escaped_strings")
good = TEST_FILE("issue_2932/good/SomeFont-Regular.ttf")
assert_PASS(check(good))
bad = TEST_FILE("issue_2932/bad/SomeFont-Regular.ttf")
assert_results_contain(check(bad),
FAIL, "escaped-strings")
def test_check_metadata_designer_profiles():
"""METADATA.pb: Designer is listed with the correct name on
the Google Fonts catalog of designers?"""
check = CheckTester(googlefonts_profile,
"com.google.fonts/check/metadata/designer_profiles")
# Delve Withrington is still not listed on the designers catalog.
# Note: Once it is listed, this code-test will start failing and will need to be updated.
font = TEST_FILE("overpassmono/OverpassMono-Regular.ttf")
assert_results_contain(check(font),
WARN, "profile-not-found")
# Cousine lists designers: "Multiple Designers"
font = TEST_FILE("cousine/Cousine-Regular.ttf")
assert_results_contain(check(font),
FAIL, "multiple-designers")
# This reference Merriweather font family lists "Sorkin Type" in its METADATA.pb file.
# And this foundry has a good profile on the catalog.
font = TEST_FILE("merriweather/Merriweather-Regular.ttf")
assert_PASS(check(font))
# TODO: FAIL, "mismatch"
# TODO: FAIL, "link-field"
# TODO: FAIL, "missing-avatar"
# TODO: FAIL, "bad-avatar-filename"
def test_check_mandatory_avar_table():
"""Ensure variable fonts include an avar table."""
check = CheckTester(googlefonts_profile,
"com.google.fonts/check/mandatory_avar_table")
ttFont = TTFont(TEST_FILE("ibmplexsans-vf/IBMPlexSansVar-Roman.ttf"))
assert_PASS(check(ttFont))
del ttFont["avar"]
assert_results_contain(check(ttFont),
FAIL, "missing-avar")
def test_check_description_family_update():
"""On a family update, the DESCRIPTION.en_us.html file should ideally also be updated."""
check = CheckTester(googlefonts_profile,
"com.google.fonts/check/description/family_update")
font = TEST_FILE("abeezee/ABeeZee-Regular.ttf")
ABEEZEE_DESC = ('https://raw.githubusercontent.com/google/fonts/'
'main/ofl/abeezee/DESCRIPTION.en_us.html')
import requests
desc = requests.get(ABEEZEE_DESC).text
assert_results_contain(check(font, {'description': desc}),
FAIL, "description-not-updated")
assert_PASS(check(font, {'description': desc + '\nSomething else...'}))
def test_check_os2_use_typo_metrics():
"""All non-CJK fonts checked with the googlefonts profile
should have OS/2.fsSelection bit 7 (USE TYPO METRICS) set."""
check = CheckTester(googlefonts_profile,
"com.google.fonts/check/os2/use_typo_metrics")
ttFont = TTFont(TEST_FILE("abeezee/ABeeZee-Regular.ttf"))
fsel = ttFont["OS/2"].fsSelection
# set bit 7
ttFont["OS/2"].fsSelection = fsel | (1 << 7)
assert_PASS(check(ttFont))
# clear bit 7
ttFont["OS/2"].fsSelection = fsel & ~(1 << 7)
assert_results_contain(check(ttFont),
FAIL, 'missing-os2-fsselection-bit7')
# TODO: If I recall correctly, there was something wrong with
# code-tests that try to ensure a check skips.
# I will have to review this one at some point to verify
# if that's the reason for this test not working properly.
#
# -- Felipe Sanches (May 31, 2021)
def TODO_test_check_os2_use_typo_metrics_with_cjk():
"""All CJK fonts checked with the googlefonts profile should skip this check"""
check = CheckTester(googlefonts_profile,
"com.google.fonts/check/os2/use_typo_metrics")
tt_pass_clear = TTFont(TEST_FILE("cjk/SourceHanSans-Regular.otf"))
tt_pass_set = TTFont(TEST_FILE("cjk/SourceHanSans-Regular.otf"))
fs_selection = 0
# test skip with font that contains cleared bit
tt_pass_clear["OS/2"].fsSelection = fs_selection
# test skip with font that contains set bit
tt_pass_set["OS/2"].fsSelection = fs_selection | (1 << 7)
assert_SKIP(check(tt_pass_clear))
assert_SKIP(check(tt_pass_set))
def test_check_missing_small_caps_glyphs():
"""Check small caps glyphs are available."""
#check = CheckTester(googlefonts_profile,
# "com.google.fonts/check/missing_small_caps_glyphs")
# TODO: Implement-me!
def test_check_stylisticset_description():
"""Ensure Stylistic Sets have description."""
#check = CheckTester(googlefonts_profile,
# "com.google.fonts/check/stylisticset_description")
# TODO: Implement-me!
def test_check_meta_script_lang_tags():
"""Ensure font has ScriptLangTags in the 'meta' table."""
check = CheckTester(googlefonts_profile,
"com.google.fonts/check/meta/script_lang_tags")
# This sample font from the Noto project declares
# the script/lang tags in the meta table correctly:
ttFont = TTFont(TEST_FILE("meta_tag/NotoSansPhagsPa-Regular-with-meta.ttf"))
assert_results_contain(check(ttFont), INFO, 'dlng-tag')
assert_results_contain(check(ttFont), INFO, 'slng-tag')
del ttFont["meta"].data['dlng']
assert_results_contain(check(ttFont),
FAIL, 'missing-dlng-tag')
del ttFont["meta"].data['slng']
assert_results_contain(check(ttFont),
FAIL, 'missing-slng-tag')
del ttFont["meta"]
assert_results_contain(check(ttFont),
WARN, 'lacks-meta-table')
def test_check_no_debugging_tables():
"""Ensure fonts do not contain any preproduction tables."""
check = CheckTester(googlefonts_profile,
"com.google.fonts/check/no_debugging_tables")
ttFont = TTFont(TEST_FILE("overpassmono/OverpassMono-Regular.ttf"))
assert_results_contain(check(ttFont),
WARN, 'has-debugging-tables')
del ttFont["FFTM"]
assert_PASS(check(ttFont))
def test_check_metadata_family_directory_name():
"""Check family directory name."""
check = CheckTester(googlefonts_profile,
"com.google.fonts/check/metadata/family_directory_name")
ttFont = TEST_FILE("overpassmono/OverpassMono-Regular.ttf")
assert_PASS(check(ttFont))
# Note:
# Here I explicitly pass 'family_metadata' to avoid it being recomputed
# after I make the family_directory wrong:
assert_results_contain(check(ttFont, {'family_metadata': check['family_metadata'],
'family_directory': 'overpass'}),
FAIL, 'bad-directory-name')
def test_check_render_own_name():
"""Check family directory name."""
check = CheckTester(googlefonts_profile,
"com.google.fonts/check/render_own_name")
ttFont = TEST_FILE("overpassmono/OverpassMono-Regular.ttf")
assert_PASS(check(ttFont))
ttFont = TEST_FILE("noto_sans_tamil_supplement/NotoSansTamilSupplement-Regular.ttf")
assert_results_contain(check(ttFont),
FAIL, 'render-own-name')
@pytest.mark.debug
def test_check_repo_sample_image():
"""Check README.md has a sample image."""
check = CheckTester(googlefonts_profile,
"com.google.fonts/check/repo/sample_image")
# That's what we'd like to see:
# README.md including a sample image and highlighting it in the
# upper portion of the document (no more than 10 lines from the top).
readme = TEST_FILE("issue_2898/good/README.md")
assert_PASS(check(readme))
# This one is still good, but places the sample image too late in the page:
readme = TEST_FILE("issue_2898/not-ideal-placement/README.md")
assert_results_contain(check(readme),
WARN, 'not-ideal-placement')
# Here's a README.md in a project completely lacking such sample image.
# This will likely become a FAIL in the future:
readme = TEST_FILE("issue_2898/no-sample/README.md")
assert_results_contain(check(readme),
WARN, 'no-sample') # FIXME: Make this a FAIL!
# This is really broken, as it references an image that is not available:
readme = TEST_FILE("issue_2898/image-missing/README.md")
assert_results_contain(check(readme),
FAIL, 'image-missing')
# An here a README.md that does not include any sample image,
# while an image file can be found within the project's directory tree.
# This image could potentially be a font sample, so we let the user know
# that it might be the case:
readme = TEST_FILE("issue_2898/image-not-displayed/README.md")
assert_results_contain(check(readme),
WARN, 'image-not-displayed')
|
moyogo/fontbakery
|
tests/profiles/googlefonts_test.py
|
Python
|
apache-2.0
| 170,205
|
[
"Galaxy"
] |
f3fbf814f906bbfb09ee94e8545368f3113136931c3de2297e4b1bc6066f8f9a
|
from tao.models import Simulation, StellarModel, DustModel, BandPassFilter
from tao.settings import PROJECT_DIR
from tao.tests.integration_tests.helper import LiveServerTest
from tao.tests.support.factories import UserFactory, SimulationFactory, GalaxyModelFactory, DataSetFactory, JobFactory, DataSetPropertyFactory, DustModelFactory, StellarModelFactory, BandPassFilterFactory, GlobalParameterFactory, SnapshotFactory, SurveyPresetFactory
import os.path
class JobTypeFormTests(LiveServerTest):
def setUp(self):
super(JobTypeFormTests, self).setUp()
GlobalParameterFactory.create(parameter_name='maximum-random-light-cones', parameter_value='10')
box_sim = SimulationFactory.create(box_size=500, name='simulation_000')
lc_sim = SimulationFactory.create(box_size=60,name='simulation_001')
self.params_path = os.path.join(PROJECT_DIR, 'test_data', 'params.xml')
params_string = open(self.params_path).read()
for i in range(3):
g = GalaxyModelFactory.create(name='galaxy_model_%03d' % i)
ds = DataSetFactory.create(simulation=box_sim, galaxy_model=g, max_job_box_count=25)
for j in range(10):
SnapshotFactory.create(dataset=ds, redshift=str(j)+".0")
for j in range(3):
dsp = DataSetPropertyFactory.create(dataset=ds, label='parameter_%03d label' % j, name='name_%03d' % j, description='description_%03d' % j)
ds.default_filter_field = dsp
ds.save()
for i in range(4,8):
g = GalaxyModelFactory.create(name='galaxy_model_%03d' % i)
ds = DataSetFactory.create(simulation=lc_sim, galaxy_model=g, max_job_box_count=25, id=i)
for j in range(10):
SnapshotFactory.create(dataset=ds, redshift=str(j)+".0")
for j in range(4,7):
dsp = DataSetPropertyFactory.create(dataset=ds, label='parameter_%03d label' % j, name='name_%03d' % j, description='description_%03d' % j)
ds.default_filter_field = dsp
ds.save()
for i in range(3):
StellarModelFactory.create(label='stellar_label_%03d' % i,
name='model{0}/sspm.dat'.format(i),
description='<p>Description %d </p>' % i)
BandPassFilterFactory.create(label='Band pass filter %03d' % i, filter_id='%d' % i)
DustModelFactory.create(name='Dust_model_%03d.dat' % i, label='Dust model %03d' % i, details='<p>Detail %d </p>' % i)
SurveyPresetFactory.create(name='Preset %d' % i, parameters=params_string)
username = "person"
password = "funnyfish"
self.user = UserFactory.create(username=username, password=password, is_staff=True, is_active=True, is_superuser=True)
self.login(username, password)
self.visit('mock_galaxy_factory')
self.click('tao-tabs-job_type')
def tearDown(self):
super(JobTypeFormTests, self).tearDown()
def test_light_cone_params(self):
self.upload_params_file()
lc_geometry = self.get_selected_option_text(self.lc_id('catalogue_geometry'))
self.assertEqual('Light-Cone', lc_geometry)
lc_sim = self.get_selected_option_text(self.lc_id('dark_matter_simulation'))
self.assertEqual('simulation_001', lc_sim)
lc_galaxy = self.get_selected_option_text(self.lc_id('galaxy_model'))
self.assertEqual('galaxy_model_006', lc_galaxy)
lc_expected = {
self.lc_id('ra_opening_angle'): '1',
self.lc_id('dec_opening_angle'): '2',
self.lc_id('redshift_min'): '3',
self.lc_id('redshift_max'): '4',
self.lc_id('number_of_light_cones'): '3'
}
self.assert_attribute_equals('value', lc_expected)
self.assert_is_checked(self.lc_id('light_cone_type_1'))
self.assert_multi_selected_text_equals(self.lc_id('output_properties-right'), ['parameter_005 label'])
rng_seeds_expected = [111111, 222222, 333333]
rng_seeds_actual = self.selenium.execute_script('return catalogue.vm.light_cone.rng_seeds()')
self.assertEqual(rng_seeds_expected, rng_seeds_actual)
def test_sed_params(self):
self.upload_params_file()
self.click('tao-tabs-sed')
sed_pop = self.get_selected_option_text(self.sed_id('single_stellar_population_model'))
self.assertEqual('stellar_label_001', sed_pop)
self.assert_multi_selected_text_equals(self.sed_id('band_pass_filters-right'), ['Band pass filter 000 (Apparent)','Band pass filter 002 (Apparent)'])
def test_mock_image_params(self):
self.upload_params_file()
self.click('tao-tabs-mock_image')
self.assertEqual([u'ALL', 0, 1, 2], self.get_ko_array('catalogue.vm.mock_image.sub_cone_options()', 'value'))
self.assertEqual([u'1_apparent', u'3_apparent'],
self.get_ko_array('catalogue.modules.mock_image.vm.image_settings()[0].mag_field_options()', 'pk'))
self.assertEqual([u'FITS'], self.get_ko_array('catalogue.vm.mock_image.format_options', 'value'))
self.assertEqual(1, self.get_image_setting_ko_field(0,'sub_cone'))
self.assertEqual('3_apparent', self.get_image_setting_ko_field(0, 'mag_field', field='pk'))
self.assertEqual('7', self.get_image_setting_ko_value(0, 'min_mag'))
self.assertEqual('12', self.get_image_setting_ko_value(0, 'max_mag'))
self.assertEqual('3', self.get_image_setting_ko_value(0, 'z_min'))
self.assertEqual('4', self.get_image_setting_ko_value(0, 'z_max'))
self.assertEqual('0.5', self.get_image_setting_ko_value(0, 'origin_ra'))
self.assertEqual('1', self.get_image_setting_ko_value(0, 'origin_dec'))
self.assertEqual('1', self.get_image_setting_ko_value(0, 'fov_ra'))
self.assertEqual('2', self.get_image_setting_ko_value(0, 'fov_dec'))
self.assertEqual('667', self.get_image_setting_ko_value(0, 'width'))
self.assertEqual('666', self.get_image_setting_ko_value(0, 'height'))
self.assertEqual(2, self.get_image_setting_ko_field(1,'sub_cone'))
self.assertEqual('3_apparent', self.get_image_setting_ko_field(1, 'mag_field', field='pk'))
self.assertEqual('', self.get_image_setting_ko_value(1, 'min_mag'))
self.assertEqual('11', self.get_image_setting_ko_value(1, 'max_mag'))
self.assertEqual('3', self.get_image_setting_ko_value(1, 'z_min'))
self.assertEqual('4', self.get_image_setting_ko_value(1, 'z_max'))
self.assertEqual('0.5', self.get_image_setting_ko_value(1, 'origin_ra'))
self.assertEqual('1', self.get_image_setting_ko_value(1, 'origin_dec'))
self.assertEqual('1', self.get_image_setting_ko_value(1, 'fov_ra'))
self.assertEqual('2', self.get_image_setting_ko_value(1, 'fov_dec'))
self.assertEqual('778', self.get_image_setting_ko_value(1, 'width'))
self.assertEqual('777', self.get_image_setting_ko_value(1, 'height'))
def test_rf_params(self):
self.upload_params_file()
self.click('tao-tabs-record_filter')
rf_filter = self.get_selected_option_text(self.rf_id('filter'))
self.assertEqual('parameter_005 label', rf_filter)
rf_expected = {
self.rf_id('min'): '',
self.rf_id('max'): '12'
}
def test_output_params(self):
self.upload_params_file()
self.click('tao-tabs-output_format')
out_format = self.get_selected_option_text('#id_output_format-supported_formats')
self.assertEqual('FITS', out_format)
def test_summary_params(self):
self.upload_params_file()
self.click('tao-tabs-summary_submit')
self.assert_summary_field_correctly_shown('Light-Cone', 'light_cone', 'geometry_type')
self.assert_summary_field_correctly_shown('simulation_001', 'light_cone', 'simulation')
self.assert_summary_field_correctly_shown('galaxy_model_006', 'light_cone', 'galaxy_model')
self.assert_summary_field_correctly_shown('1 properties selected', 'light_cone', 'output_properties')
self.assert_summary_field_correctly_shown('stellar_label_001', 'sed', 'single_stellar_population_model')
self.assert_summary_field_correctly_shown('2 properties selected', 'sed', 'band_pass_filters')
self.assert_summary_field_correctly_shown('Not selected', 'sed', 'apply_dust')
self.assert_summary_field_correctly_shown('2 images', 'mock_image', 'select_mock_image')
self.assert_summary_field_correctly_shown(u'parameter_005 label \u2264 12.0', 'record_filter', 'record_filter')
self.assert_summary_field_correctly_shown('FITS', 'output', 'output_format')
def test_load_preset(self):
self.click('presets_button')
self.click('load_survey_preset_button')
self.assert_page_has_content("Survey Preset 'Preset 0' loaded successfully.")
def test_handles_malformed_xml(self):
from selenium.webdriver.support.wait import WebDriverWait
timeout = 2
json_path = os.path.join(PROJECT_DIR, 'test_data', 'test_data.json')
self.selenium.find_element_by_id('id_job_type-params_file').send_keys(json_path)
WebDriverWait(self.selenium, timeout).until(lambda driver: driver.find_element_by_css_selector('.alert-error'))
self.assert_page_has_content("Failed to process parameter file: 'test_data.json'.")
def get_ko_array(self, vm_ko_array, field):
js = 'return $.map(' + vm_ko_array + ', function(v, i) { return v.' + field + '; });'
return self.selenium.execute_script(js)
def get_image_setting_ko_field(self, index, setting, field='value'):
js = 'return catalogue.modules.mock_image.vm.image_settings()[%d].%s().%s' % (index, setting, field)
return self.selenium.execute_script(js)
def get_image_setting_ko_value(self, index, setting):
js = 'return catalogue.modules.mock_image.vm.image_settings()[%d].%s()' % (index, setting)
return self.selenium.execute_script(js)
def upload_params_file(self):
self.selenium.find_element_by_id('id_job_type-params_file').send_keys(self.params_path)
|
IntersectAustralia/asvo-tao
|
web/tao/tests/forms/job_type_form_tests.py
|
Python
|
gpl-3.0
| 10,308
|
[
"VisIt"
] |
bc8f26d5355a130c69828130b36a1ad8cf6c36db1dbef2bcde9d24c1bdd96a06
|
from pylab import *
from math import exp, sqrt
import numpy
import Image
# return the energy measure of the transformation from image I to J.
# Works for d-dimensional images I and J
def transformation_energy(I, J):
s = image_similarity(I, J)
r = image_regularity(J)
return (s, r, s + r)
# return the similarity between image I and J. Works for d-dimensional images
def image_similarity(I, J):
return ((I - J) ** 2).mean()
# return the regularity of image I. Works for d-dimensional images
def image_regularity(I):
G = gradient(I)
sum_of_G = sum([g ** 2 for g in G], 0)
return (sum_of_G).mean()
def image_regularity3(I):
gx, gy, gz = gradient(I)
return (gx ** 2 + gy ** 2 + gz ** 2).mean()
# add gaussian white noise to image I with random values taken from
# a gaussian distribution with mean 0 and sigma standard deviation
def add_gaussian_noise(I, sigma):
random=(randn(*I.shape) *sigma)
result=I + random
result=rescale_grayscale_image(result)
return result
# rescale an image to the 0..255 interval, and convert it to int8
def rescale_grayscale_image(i):
i=i - i.min()
scale=255.0 / i.max()
return ( i*scale ).astype(uint8)
# save image i with filename and extension
def save_image(i, filename, extension):
rescaled = rescale_grayscale_image(i)
im = Image.fromarray(rescaled)
im.save(filename + '.' + extension)
def save_image_png(i, filename):
save_image(i, filename, 'png')
def rgb2gray_color_preserving(rgb):
r, g, b = rgb[:, :, 0], rgb[:, :, 1], rgb[:, :, 2]
gray = 0.2989 * r + 0.5870 * g + 0.1140 * b
return gray
def rgb2gray(rgb):
r, g, b = rgb[:, :, 0], rgb[:, :, 1], rgb[:, :, 2]
gray = (r + g + b) / 3
return gray
def saturate_values(image, min_value, max_value):
image[image < min_value] = min_value
image[image > max_value] = max_value
def circle_image2(w, h, radius):
x, y = mgrid[-w / 2:w / 2, -h / 2:h / 2]
return array(((x ** 2 + y ** 2) <= radius ** 2).astype(int))
def circle_image3(w, h, d, radius):
x, y, z = mgrid[-w / 2:w / 2, -h / 2:h / 2, -d / 2:d / 2]
return ((x ** 2 + y ** 2 + z ** 2) <= radius ** 2).astype(int)
|
facundoq/ipim
|
tp1/py/image.py
|
Python
|
gpl-3.0
| 2,196
|
[
"Gaussian"
] |
296919795fa00687f4de59825d9e5021edc2abc6f48d4dbd085868377f606ea9
|
#
# Copyright (c) 2015 nexB Inc. and others. All rights reserved.
# http://nexb.com and https://github.com/nexB/scancode-toolkit/
# The ScanCode software is licensed under the Apache License version 2.0.
# Data generated with ScanCode require an acknowledgment.
# ScanCode is a trademark of nexB Inc.
#
# You may not use this software except in compliance with the License.
# You may obtain a copy of the License at: http://apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software distributed
# under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
# CONDITIONS OF ANY KIND, either express or implied. See the License for the
# specific language governing permissions and limitations under the License.
#
# When you publish or redistribute any data created with ScanCode or any ScanCode
# derivative work, you must accompany this data with the following acknowledgment:
#
# Generated with ScanCode and provided on an "AS IS" BASIS, WITHOUT WARRANTIES
# OR CONDITIONS OF ANY KIND, either express or implied. No content created from
# ScanCode should be considered or used as legal advice. Consult an Attorney
# for any legal advice.
# ScanCode is a free software code scanning tool from nexB Inc. and others.
# Visit https://github.com/nexB/scancode-toolkit/ for support and download.
from __future__ import print_function, absolute_import
from collections import defaultdict
from functools import partial
import logging
from textcode import analysis
from textcode.analysis import Token
logger = logging.getLogger(__name__)
# import sys
# logging.basicConfig(level=logging.DEBUG, stream=sys.stdout)
# logger.setLevel(logging.DEBUG)
DEBUG = False
DEBUG_CANDIDATES = False
DEBUG_ALIGN = False
if DEBUG or DEBUG_CANDIDATES or DEBUG_ALIGN:
from pprint import pformat
def posting_list():
"""
Per doc postings mapping a docid to a list of positions.
"""
return defaultdict(list)
def build_empty_indexes(ngram_len):
"""
Build and return the nested indexes structure.
The resulting index structure can be visualized this way::
1. The unigrams index is in indexes[1] with this structure:
{1:
{
u1: {index_docid1: [posting_list1], index_docid2: [posting_list2]},
u2: {index_docid1: [posting_list3], index_docid3: [posting_list4]}
}
}
2. The bigrams index is in indexes[2] with this structure:
{2:
{
u3, u4: {index_docid1: [posting_list7], index_docid2: [posting_list6]},
u5, u6: {index_docid1: [posting_list5], index_docid3: [posting_list8]}
}
}
and so on, until ngram_len
"""
indexes = {}
for i in range(1, ngram_len + 1):
indexes[i] = defaultdict(posting_list)
return indexes
def tokenizers(ngram_len=analysis.DEFAULT_NGRAM_LEN):
"""
Return a tuple of specialized tokenizers given an `ngram_len` for each
of: (plain text, template text, query text).
"""
text = partial(analysis.ngram_tokenizer, template=False, ngram_len=ngram_len)
template = partial(analysis.ngram_tokenizer, template=True, ngram_len=ngram_len)
query = partial(analysis.multigram_tokenizer, ngram_len=ngram_len)
return text, template, query
class Index(object):
"""
An index is used to index reference documents and then match query documents
against these reference documents.
Terms used here:
- index_doc: indexed document
- index_docid: indexed document ID
- query_doc: query document
- query_docid: query document ID
We use several inverted indexes mapping a Token value to a list of
per Token positions for each indexed document ID (index_docid): There is one
index for every ngram length from one up to ngram_len.
These multiple indexes handle cases where the a query document text to
detect cannot matched with a given ngram length for instance when there are
regions of text with fewer tokens than an ngram length such as with very
short query documents or very short indexed documents. This approach ensures
that we can detect texts of (very) short texts such as GPL_v2 which is only
two tokens once tokenized and could not be detected with an ngram length of
three.
Typically indexes for smaller ngrams length are rather small and contain
short (but important) documents.
Templated indexed documents (i.e. with gaps) are supported for all ngram
lengths.
These cases are supported:
- small index_doc or query_doc with fewer tokens than ngram length.
- small regions of text between two template regions with fewer tokens
than an ngram length.
- small regions of text at the beginning of an index_doc just before a
template region and with fewer tokens than an ngram length.
- small regions of text at the end of an index_doc and just after a template
region and with fewer tokens than an ngram length.
"""
def __init__(self, ngram_len=analysis.DEFAULT_NGRAM_LEN):
self.ngram_len = ngram_len
self.text_tknzr, self.template_tknzr, self.query_tknzr = tokenizers(ngram_len)
# the nested indexes structure
self.indexes = build_empty_indexes(ngram_len)
# a mapping of docid to a count of Tokens in an index_doc
self.tokens_count_per_index_doc = {}
def get_tokens_count(self, index_docid):
return self.tokens_count_per_index_doc[index_docid]
def set_tokens_count(self, index_docid, val):
self.tokens_count_per_index_doc[index_docid] = val
def index_one(self, docid, doc, template=False):
"""
Index one `doc` document where `docid` is a document identifier and
`doc` is an iterable of unicode text lines. Use the template tokenizer
if template is True.
"""
if template:
tokenizer = self.template_tknzr
else:
tokenizer = self.text_tknzr
self.index_one_from_tokens(docid, tokenizer(doc))
def index_one_from_tokens(self, docid, tokens):
"""
Index one document where `docid` is a document identifier and `tokens`
is an iterable of tokens.
"""
for token in tokens:
# token.value is a tuple of words, hence len(token.value) gets us
# the index to populate for a certain ngram length
token_ngramlen = len(token.value)
idx_for_ngramlen = self.indexes[token_ngramlen]
idx_for_ngramlen[token.value][docid].append(token)
# FIXME: this will repeatedly reset the tokens count
# and this is likely incorrect or wasteful
# set the tokens count for a doc to the end of the last token
# the token start/end is zero-based. So we increment the count by one
self.set_tokens_count(docid, token.end + 1)
def _index_many(self, docs, template=False):
"""
Index a `docs` iterable of (docid, doc) tuples where `docid` is a
document identifier and `doc` is an iterable of unicode text lines.
Use a template tokenizer if template is True.
"""
for docid, doc in docs:
self.index_one(docid, doc, template)
def match(self, query_doc, perfect=True):
"""
Return matches as a mapping of matched index docid to a list of tuples
(matched index doc pos, matched query doc pos).
Match `query_doc` against the index where `query_doc` is an iterable
of unicode text lines.
Only check for perfect, exact matches if `perfect` is True.
"""
if not query_doc:
return {}
# get candidates sharing at least one ngram
candidate_matches = self.candidates(query_doc)
if not candidate_matches:
return {}
all_results = defaultdict(list)
by_index_position_start = lambda x: x[0].start
# first find contiguous matches
for docid, matches in candidate_matches.items():
for idx, match in enumerate(sorted(matches, key=by_index_position_start)):
index_position, query_position = match
# perfect contiguous matches must start at index_position 0
if index_position.start != 0:
break
else:
# TODO: "if not perfect " if we are not starting at 0
# collect partial matches
pass
# start of a possible full match at index_position 0
subset = matches[idx + 1:]
if DEBUG:
lsub = len(subset) + 1
print(' Index.match: about to align %(lsub)r '
'candidate matches for %(docid)r:\n'
'index_position: %(index_position)r\nquery_position: %(query_position)r\n'
% locals())
matched_positions = self.align_matches(index_position, query_position, subset)
if DEBUG:
lmp = len(matched_positions)
print(' Index.match: aligned %(lmp)r matches for '
'%(docid)r. Now merging' % locals())
merged = merge_aligned_positions(matched_positions)
if DEBUG:
lmrg = len(merged)
print(' Index.match: merged %(lmp)r aligned '
'matches in %(lmrg)r positions for %(docid)r'
% locals())
print(' Index.match: merged positions are: '
'\n%s\n' % pformat(merged))
if merged:
all_results[docid].append(merged)
filtered = self.filter_matches(all_results, perfect)
return filtered
def align_matches(self, cur_index_position, cur_query_position, matches):
"""
Given a first match and subsequent potential matches, try to find a
longer match skipping eventual gaps to yield the best alignment.
This how ngrams are handled with ngram_len of 3:
-----------------------------------------------
With this index_doc and this query_doc:
index_doc: name is joker, name is joker
ngrams: name is joker, is joker name, joker name is, name is joker
0 1 2 3
query_doc: Hi my name is joker, name is joker yes.
ngrams: hi my name, my name is, name is joker, is joker name, joker name is, name is joker, is joker yes
0 1 2 3 4 5 6
will yield these candidates:
i0, q2
i0, q5 ==> this should be skipped because q5 does not follow q2
i1, q3
i2, q4
i3, q2 ==> this should be skipped because q2 does not follow q4
i3, q5
And this how gaps are handled:
------------------------------
With this index_doc and this query_doc::
index_doc: my name is {{2 Joe}} the joker
i0 i1 i2-g2 i3 i4
query_doc: Yet, my name is Jane Heinz the joker.
q0 q1 q2 q3 q4 q5 q6 q7
will yield these candidates:
i0, q1
i1, q2
i2-g2, q3
i3, q6 : here q6 <= q3 + 1 + g2
i4, q7
With the same index_doc and this query_doc:
query_doc: Yet, my name is Jane the joker.
q0 q1 q2 q3 q4 q5 q6
will yet these candidates:
i0, q1
i1, q2
i2-g2, q3
i3, q5 : here q5 <= q3 + 1 + g2
i4, q7
"""
# add first match
matched = [(cur_index_position, cur_query_position,)]
cumulative_gap = 0
if DEBUG_ALIGN:
print()
for match in iter(matches):
prev_index_position, prev_query_position = matched[-1]
cumulative_gap += prev_index_position.gap
cur_index_position, cur_query_position = match
if DEBUG_ALIGN:
print(''.join(['Index.aligned match: positions \n',
' prev_index_position: %(start)r %(end)r %(value)r\n'
% prev_index_position._asdict(),
' cur_index_position : %(start)r %(end)r %(value)r\n'
% cur_index_position._asdict(),
' prev_query_position: %(start)r %(end)r %(value)r\n'
% prev_query_position._asdict(),
' cur_query_position : %(start)r %(end)r %(value)r'
% cur_query_position._asdict(),
]))
print('Index.aligned match: prev_index_position.start:%d < '
'cur_index_position.start:%d <= prev_index_position.end + 1:%d'
% (prev_index_position.start, cur_index_position.start,
prev_index_position.end + 1,))
if prev_index_position.start < cur_index_position.start <= prev_index_position.end + 1:
if DEBUG_ALIGN:
print('Index.aligned match: possible contiguous tokens')
# we are contiguous in index_position: are we contiguous in query_position?
if prev_query_position.start + 1 == cur_query_position.start:
if DEBUG_ALIGN:
print('Index.aligned match: Keeping contiguous '
'tokens: prev_query_position.start + 1 '
'== cur_query_position.start\n')
matched.append((cur_index_position, cur_query_position,))
continue
else:
# we are not contiguous, but could we be when gaps are
# considered?
if DEBUG_ALIGN:
print('Index.aligned match: '
'prev_query_position.start:%d < cur_query_position.start:%d '
'<= prev_query_position.start + 1 + cumulative_gap '
'+ self.ngram_len: %d' %
(prev_query_position.start, cur_query_position.start,
prev_query_position.start + cumulative_gap
+ self.ngram_len,))
if (prev_query_position.start < cur_query_position.start and
cur_query_position.start <= (prev_query_position.start + cumulative_gap + self.ngram_len)):
# we are contiguous gap-wise, keep this match
if DEBUG_ALIGN:
print('Index.aligned match: '
'Keeping gap-wise contiguous tokens\n')
matched.append((cur_index_position, cur_query_position,))
continue
else:
if DEBUG_ALIGN:
print('Index.aligned match: Skipping tokens\n')
continue
return matched
def candidates(self, query_doc):
"""
Find candidate matches for query_doc against index where query doc is
an iterable of unicode text lines. Return candidate matches as a
mapping of:
matched index docid -> sorted set of tuples (matched index doc pos,
matched query doc pos).
"""
if DEBUG_CANDIDATES:
print()
print('=>Index.candidates: entering')
query_doc = list(query_doc)
print(' Index.candidates: Query doc has %d lines.'
% len(query_doc))
print(u''.join(query_doc))
print()
query_doc = iter(query_doc)
# map index_docid -> sorted set of tuples (index_position, query_position)
candidate_matches = defaultdict(list)
# iterate over query_doc tokens using query_tknzr
for qtoken in self.query_tknzr(query_doc):
if DEBUG_CANDIDATES:
print(' Index.candidates: processing\n %(qtoken)r' % locals())
# query the proper inverted index for the value len, aka the ngram length
matches = self.indexes[len(qtoken.value)].get(qtoken.value)
if not matches:
continue
# accumulate matches for each docid
for docid, postings in matches.items():
for itoken in postings:
if DEBUG_CANDIDATES:
print(' Index.candidates: %(docid)r matched '
'from:\n %(itoken)r\n %(qtoken)r'
% locals())
candidate_matches[docid].append((itoken, qtoken))
return candidate_matches
def filter_matches(self, all_matches, perfect=True):
"""
Filter matches such as non-perfect or overlapping matches. If perfect
is True, return only perfect matches.
"""
if DEBUG:
print('=>Index.filter_matches entering with perfect %r' % perfect)
if not perfect:
# TODO: implement me
return all_matches
else:
# keep only perfect matches
kept_results = defaultdict(list)
for docid, matches in all_matches.iteritems():
tok_cnt = self.get_tokens_count(docid)
for index_position, query_position in matches:
# perfect matches length must match the index_doc token count
# the token count is 1-based, the end is zero-based
if tok_cnt == index_position.end + 1:
kept_results[docid].append((index_position, query_position))
return kept_results
def merge_aligned_positions(positions):
"""
Given a sequence of tuples of (index_doc, query_doc) Token positions, return a single
tuple of new (index_doc, query_doc) Token positions representing the merged positions
from every index_position and every query_position.
"""
index_docs, query_docs = zip(*positions)
return merge_positions(index_docs), merge_positions(query_docs)
def merge_positions(positions):
"""
Given a iterable of Token positions, return a new merged Token position
computed from the first and last positions (do not keep gap and token
values). Does not check if positions are contiguous or overlapping.
"""
positions = sorted(positions)
first = positions[0]
last = positions[-1]
return Token(start=first.start, end=last.end,
start_line=first.start_line, start_char=first.start_char,
end_line=last.end_line, end_char=last.end_char)
|
pierrelapointe/scancode-toolkit
|
src/licensedcode/index.py
|
Python
|
apache-2.0
| 18,939
|
[
"VisIt"
] |
0e8c6fd6e5d543cb68238287ee8ec82ec19b64c709e63d0d923b6d7e60037867
|
from django.contrib.auth.models import User
from lettuce import step, world
from notification_prefs import NOTIFICATION_PREF_KEY
from user_api.models import UserPreference
USERNAME = "robot"
UNSUB_TOKEN = "av9E-14sAP1bVBRCPbrTHQ=="
@step(u"I have notifications enabled")
def enable_notifications(step_):
user = User.objects.get(username=USERNAME)
UserPreference.objects.create(user=user, key=NOTIFICATION_PREF_KEY, value=UNSUB_TOKEN)
@step(u"I access my unsubscribe url")
def access_unsubscribe_url(step_):
world.visit("/notification_prefs/unsubscribe/{0}/".format(UNSUB_TOKEN))
@step(u"my notifications should be disabled")
def notifications_should_be_disabled(step_):
user = User.objects.get(username=USERNAME)
assert not UserPreference.objects.filter(user=user, key=NOTIFICATION_PREF_KEY).exists()
|
yokose-ks/edx-platform
|
lms/djangoapps/notification_prefs/features/unsubscribe.py
|
Python
|
agpl-3.0
| 829
|
[
"VisIt"
] |
fc50ccea07f53d5af3d3f2336111d1b7f60a2c709cdf6c9e2e871cb48001c1bb
|
# Copyright (C) 2019 The ESPResSo project
#
# This file is part of ESPResSo.
#
# ESPResSo is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ESPResSo is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import unittest as ut
import importlib_wrapper
import numpy as np
tutorial, skipIfMissingFeatures = importlib_wrapper.configure_and_import(
"@TUTORIALS_DIR@/ferrofluid/ferrofluid_part2.py",
equil_steps=200, equil_rounds=10, loops=500, alphas=[0.5])
@skipIfMissingFeatures
class Tutorial(ut.TestCase):
system = tutorial.system
def test(self):
langevin_magnetization_curve = tutorial.L(np.array(tutorial.alphas))
self.assertGreater(
tutorial.magnetization_para[0],
tutorial.magnetization_perp[0])
self.assertGreater(
tutorial.magnetization_para[0] / tutorial.N_PART,
langevin_magnetization_curve)
self.assertLess(
tutorial.magnetization_perp[0] / tutorial.N_PART,
langevin_magnetization_curve)
if __name__ == "__main__":
ut.main()
|
espressomd/espresso
|
testsuite/scripts/tutorials/test_ferrofluid_2.py
|
Python
|
gpl-3.0
| 1,566
|
[
"ESPResSo"
] |
21738e2d291d71df2ee3cd9028a517a451631b575a0b110d4d82f64e60fb9fc2
|
"""Provides utilities for creating and working with Databases in ESPEI
"""
import logging
from typing import Dict, Union
from pycalphad import Database, variables as v
import espei.refdata
from espei.utils import extract_aliases
_log = logging.getLogger(__name__)
def _get_ser_data(element, ref_state, fallback_ref_state="SGTE91") -> Dict[str, Union[str, float]]:
"""Return a dictionary of the stable element reference (SER) data.
If no SER data is found, returns an empty dictionary.
"""
ser_ref_state = ref_state + "SER"
# Return an empty dict for backwards compatibility, since the SER data may not exist
ser_dict = getattr(espei.refdata, ser_ref_state, {})
fallback_ser_ref_state = fallback_ref_state + "SER"
fallback_ser_dict = getattr(espei.refdata, fallback_ser_ref_state)
el_ser_data = ser_dict.get(element)
if el_ser_data is None and ref_state == fallback_ref_state:
# No data found, no fallback alternative
_log.warning("%s has no entry in the %s reference data. Fitting formation energies will not be possible.", element, ser_ref_state)
elif el_ser_data is None:
# No data found, try the fallback
el_ser_data = fallback_ser_dict.get(element)
if el_ser_data is None:
# No data found in the fallback
_log.warning("%s has no entry in the %s reference data nor in the %s fallback reference data. Fitting formation energies will not be possible.", element, ser_ref_state + "SER", fallback_ser_ref_state)
return {}
else:
# Data found in the fallback
_log.trace("%s has no entry in the %s reference data, but was available in the %s fallback reference data.", element, ser_ref_state + "SER", fallback_ser_ref_state)
if el_ser_data is not None:
return el_ser_data
else:
return {}
def initialize_database(phase_models, ref_state, dbf=None, fallback_ref_state="SGTE91"):
"""Return a Database boostraped with elements, species, phases and unary lattice stabilities.
Parameters
----------
phase_models : Dict[str, Any]
Dictionary of components and phases to fit.
ref_state : str
String of the reference data to use, e.g. 'SGTE91' or 'SR2016'
dbf : Optional[Database]
Initial pycalphad Database that can have parameters that would not be fit by ESPEI
fallback_ref_state : str
String of the reference data to use for SER data, defaults to 'SGTE91'
Returns
-------
Database
A new pycalphad Database object, or a modified one if it was given.
"""
if dbf is None:
dbf = Database()
lattice_stabilities = getattr(espei.refdata, ref_state)
ser_stability = getattr(espei.refdata, ref_state + "Stable")
aliases = extract_aliases(phase_models)
phases = sorted({ph.upper() for ph in phase_models["phases"].keys()})
elements = {el.upper() for el in phase_models["components"]}
dbf.elements.update(elements)
dbf.species.update({v.Species(el, {el: 1}, 0) for el in elements})
# Add SER reference data for this element
for element in dbf.elements:
if element in dbf.refstates:
continue # Do not clobber user reference states
el_ser_data = _get_ser_data(element, ref_state, fallback_ref_state=fallback_ref_state)
# Try to look up the alias that we are using in this fitting
el_ser_data["phase"] = aliases.get(el_ser_data["phase"], el_ser_data["phase"])
# Don't warn if the element is a species with no atoms because per-atom
# formation energies are not possible (e.g. VA (VACUUM) or /- (ELECTRON_GAS))
if el_ser_data["phase"] not in phases and v.Species(element).number_of_atoms != 0:
# We have the Gibbs energy expression that we need in the reference
# data, but this phase is not a candidate in the phase models. The
# phase won't be added to the database, so looking up the phases's
# energy won't work.
_log.warning(
"The reference phase for %s, %s, is not in the supplied phase models "
"and won't be added to the Database phases. Fitting formation "
"energies will not be possible.", element, el_ser_data["phase"]
)
dbf.refstates[element] = el_ser_data
# Add the phases
for phase_name, phase_data in phase_models['phases'].items():
if phase_name not in dbf.phases.keys(): # Do not clobber user phases
# TODO: Need to support model hints for: magnetic, order-disorder, etc.
site_ratios = phase_data['sublattice_site_ratios']
subl_model = phase_data['sublattice_model']
# Only generate the sublattice model for active components
subl_model = [sorted(set(subl).intersection(dbf.elements)) for subl in subl_model]
if all(len(subl) > 0 for subl in subl_model):
dbf.add_phase(phase_name, dict(), site_ratios)
dbf.add_phase_constituents(phase_name, subl_model)
# Add the GHSER functions to the Database
for element in dbf.elements:
# Use setdefault here to not clobber user-provided functions
if element == "VA":
dbf.symbols.setdefault("GHSERVA", 0)
else:
# note that `c.upper()*2)[:2]` returns "AL" for "Al" and "BB" for "B"
# Using this ensures that GHSER functions will be unique, e.g.
# GHSERC would be an abbreviation for GHSERCA.
sym_name = "GHSER" + (element.upper()*2)[:2]
dbf.symbols.setdefault(sym_name, ser_stability[element])
return dbf
|
PhasesResearchLab/ESPEI
|
espei/database_utils.py
|
Python
|
mit
| 5,678
|
[
"pycalphad"
] |
54b66c18e2afce300dbaa951daa3f8998c5fcfd78e86b6004f76ae1ecac68230
|
#
# This file is part of GNU Enterprise.
#
# GNU Enterprise is free software; you can redistribute it
# and/or modify it under the terms of the GNU General Public
# License as published by the Free Software Foundation; either
# version 2, or (at your option) any later version.
#
# GNU Enterprise is distributed in the hope that it will be
# useful, but WITHOUT ANY WARRANTY; without even the implied
# warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
# PURPOSE. See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public
# License along with program; see the file COPYING. If not,
# write to the Free Software Foundation, Inc., 59 Temple Place
# - Suite 330, Boston, MA 02111-1307, USA.
#
# Copyright 2002-2007 Free Software Foundation
#
# FILE:
# GMimeTypes.py
#
# DESCRIPTION:
# Contains a mapping of Mime-Types to their extensions
#
# NOTES:
#
import string
#
# Return a default extension for a mimetype...
# will try to build one if we have no clue.
#
def getExtension(mimetype):
try:
fileExt = _mimetypes[mimetype][0]
except:
fileExt = string.split(mimetype,'/')[-1]
if fileExt[:2] == 'x-':
fileExt = fileExt[2:]
return fileExt
#
# Return a tuple of valid/common extensions for a mimetype
#
def getExtensions(mimetype):
try:
return _mimetypes[mimetype]
except:
return (),
#
# Originally ripped from the Debian mime-support package.
#
_mimetypes = {
'application/activemessage': (),
'application/andrew-inset': ('ez',),
'application/applefile': (),
'application/atomicmail': (),
'application/batch-SMTP': (),
'application/beep+xml': (),
'application/cals-1840': (),
'application/commonground': (),
'application/cu-seeme': ('csm','cu'),
'application/cybercash': (),
'application/dca-rft': (),
'application/dec-dx': (),
'application/dsptype': ('tsp',),
'application/dvcs': (),
'application/EDI-Consent': (),
'application/EDIFACT': (),
'application/EDI-X12': (),
'application/eshop': (),
'application/font-tdpfr': (),
'application/futuresplash': ('spl',),
'application/ghostview': (),
'application/http': (),
'application/hyperstudio': (),
'application/iges': (),
'application/index': (),
'application/index.cmd': (),
'application/index.obj': (),
'application/index.response': (),
'application/index.vnd': (),
'application/iotp': (),
'application/ipp': (),
'application/isup': (),
'application/mac-compactpro': ('cpt',),
'application/marc': (),
'application/mac-binhex40': ('hqx',),
'application/macwriteii': (),
'application/mathematica': ('nb',),
'application/mathematica-old': (),
'application/msaccess': ('mdb',),
'application/msword': ('doc','dot'),
'application/news-message-id': (),
'application/news-transmission': (),
'application/octet-stream': ('bin',),
'application/ocsp-request': (),
'application/ocsp-response': (),
'application/oda': ('oda',),
'application/parityfec': (),
'application/pgp-encrypted': (),
'application/pgp-keys': (),
'application/pdf': ('pdf',),
'application/pgp-signature': ('pgp',),
'application/pkcs10': (),
'application/pkcs7-mime': (),
'application/pkcs7-signature': (),
'application/pkix-cert': (),
'application/pkixcmp': (),
'application/pkix-crl': (),
'application/postscript': ('ps','ai','eps'),
'application/prs.alvestrand.titrax-sheet': (),
'application/prs.cww': (),
'application/prs.nprend': (),
'application/qsig': (),
'application/riscos': (),
'application/remote-printing': (),
'application/rtf': ('rtf',),
'application/sdp': (),
'application/set-payment': (),
'application/set-payment-initiation': (),
'application/set-registration': (),
'application/set-registration-initiation': (),
'application/sgml': (),
'application/sgml-open-catalog': (),
'application/sieve': (),
'application/slate': (),
'application/smil': ('smi','smil'),
'application/timestamp-query': (),
'application/timestamp-reply': (),
'application/vemmi': (),
'application/vnd.3M.Post-it-Notes': (),
'application/vnd.accpac.simply.aso': (),
'application/vnd.accpac.simply.imp': (),
'application/vnd.acucobol': (),
'application/vnd.aether.imp': (),
'application/vnd.anser-web-certificate-issue-initiation': (),
'application/vnd.anser-web-funds-transfer-initiation': (),
'application/vnd.audiograph': (),
'application/vnd.bmi': (),
'application/vnd.businessobjects': (),
'application/vnd.canon-cpdl': (),
'application/vnd.canon-lips': (),
'application/vnd.claymore': (),
'application/vnd.commerce-battelle': (),
'application/vnd.commonspace': (),
'application/vnd.comsocaller': (),
'application/vnd.contact.cmsg': (),
'application/vnd.cosmocaller': (),
'application/vnd.ctc-posml': (),
'application/vnd.cups-postscript': (),
'application/vnd.cups-raster': (),
'application/vnd.cups-raw': (),
'application/vnd.cybank': (),
'application/vnd.dna': (),
'application/vnd.dpgraph': (),
'application/vnd.dxr': (),
'application/vnd.ecdis-update': (),
'application/vnd.ecowin.chart': (),
'application/vnd.ecowin.filerequest': (),
'application/vnd.ecowin.fileupdate': (),
'application/vnd.ecowin.series': (),
'application/vnd.ecowin.seriesrequest': (),
'application/vnd.ecowin.seriesupdate': (),
'application/vnd.enliven': (),
'application/vnd.epson.esf': (),
'application/vnd.epson.msf': (),
'application/vnd.epson.quickanime': (),
'application/vnd.epson.salt': (),
'application/vnd.epson.ssf': (),
'application/vnd.ericsson.quickcall': (),
'application/vnd.eudora.data': (),
'application/vnd.fdf': (),
'application/vnd.ffsns': (),
'application/vnd.FloGraphIt': (),
'application/vnd.framemaker': (),
'application/vnd.fsc.weblaunch': (),
'application/vnd.fujitsu.oasys': (),
'application/vnd.fujitsu.oasys2': (),
'application/vnd.fujitsu.oasys3': (),
'application/vnd.fujitsu.oasysgp': (),
'application/vnd.fujitsu.oasysprs': (),
'application/vnd.fujixerox.ddd': (),
'application/vnd.fujixerox.docuworks': (),
'application/vnd.fujixerox.docuworks.binder': (),
'application/vnd.fut-misnet': (),
'application/vnd.grafeq': (),
'application/vnd.groove-account': (),
'application/vnd.groove-identity-message': (),
'application/vnd.groove-injector': (),
'application/vnd.groove-tool-message': (),
'application/vnd.groove-tool-template': (),
'application/vnd.groove-vcard': (),
'application/vnd.hhe.lesson-player': (),
'application/vnd.hp-HPGL': (),
'application/vnd.hp-hpid': (),
'application/vnd.hp-hps': (),
'application/vnd.hp-PCL': (),
'application/vnd.hp-PCLXL': (),
'application/vnd.httphone': (),
'application/vnd.hzn-3d-crossword': (),
'application/vnd.ibm.afplinedata': (),
'application/vnd.ibm.MiniPay': (),
'application/vnd.ibm.modcap': (),
'application/vnd.informix-visionary': (),
'application/vnd.intercon.formnet': (),
'application/vnd.intertrust.digibox': (),
'application/vnd.intertrust.nncp': (),
'application/vnd.intu.qbo': (),
'application/vnd.intu.qfx': (),
'application/vnd.irepository.package+xml': (),
'application/vnd.is-xpr': (),
'application/vnd.japannet-directory-service': (),
'application/vnd.japannet-jpnstore-wakeup': (),
'application/vnd.japannet-payment-wakeup': (),
'application/vnd.japannet-registration': (),
'application/vnd.japannet-registration-wakeup': (),
'application/vnd.japannet-setstore-wakeup': (),
'application/vnd.japannet-verification': (),
'application/vnd.japannet-verification-wakeup': (),
'application/vnd.koan': (),
'application/vnd.lotus-1-2-3': (),
'application/vnd.lotus-approach': (),
'application/vnd.lotus-freelance': (),
'application/vnd.lotus-notes': (),
'application/vnd.lotus-organizer': (),
'application/vnd.lotus-screencam': (),
'application/vnd.lotus-wordpro': (),
'application/vnd.mcd': (),
'application/vnd.mediastation.cdkey': (),
'application/vnd.meridian-slingshot': (),
'application/vnd.mif': ('mif',),
'application/vnd.minisoft-hp3000-save': (),
'application/vnd.mitsubishi.misty-guard.trustweb': (),
'application/vnd.mobius.daf': (),
'application/vnd.mobius.dis': (),
'application/vnd.mobius.msl': (),
'application/vnd.mobius.plc': (),
'application/vnd.mobius.txf': (),
'application/vnd.motorola.flexsuite': (),
'application/vnd.motorola.flexsuite.adsi': (),
'application/vnd.motorola.flexsuite.fis': (),
'application/vnd.motorola.flexsuite.gotap': (),
'application/vnd.motorola.flexsuite.kmr': (),
'application/vnd.motorola.flexsuite.ttc': (),
'application/vnd.motorola.flexsuite.wem': (),
'application/vnd.mozilla.xul+xml': (),
'application/vnd.ms-artgalry': (),
'application/vnd.ms-asf': (),
'application/vnd.mseq': (),
'application/vnd.ms-excel': ('xls','xlb'),
'application/vnd.msign': (),
'application/vnd.ms-lrm': (),
'application/vnd.ms-powerpoint': ('ppt','pps','pot'),
'application/vnd.ms-project': (),
'application/vnd.ms-tnef': (),
'application/vnd.ms-works': (),
'application/vnd.musician': (),
'application/vnd.music-niff': (),
'application/vnd.netfpx': (),
'application/vnd.noblenet-directory': (),
'application/vnd.noblenet-sealer': (),
'application/vnd.noblenet-web': (),
'application/vnd.novadigm.EDM': (),
'application/vnd.novadigm.EDX': (),
'application/vnd.novadigm.EXT': (),
'application/vnd.osa.netdeploy': (),
'application/vnd.palm': (),
'application/vnd.pg.format': (),
'application/vnd.pg.osasli': (),
'application/vnd.powerbuilder6': (),
'application/vnd.powerbuilder6-s': (),
'application/vnd.powerbuilder7': (),
'application/vnd.powerbuilder75': (),
'application/vnd.powerbuilder75-s': (),
'application/vnd.powerbuilder7-s': (),
'application/vnd.previewsystems.box': (),
'application/vnd.publishare-delta-tree': (),
'application/vnd.pvi.ptid1': (),
'application/vnd.pwg-xhtml-print+xml': (),
'application/vnd.rapid': (),
'application/vnd.s3sms': (),
'application/vnd.seemail': (),
'application/vnd.shana.informed.formdata': (),
'application/vnd.shana.informed.formtemplate': (),
'application/vnd.shana.informed.interchange': (),
'application/vnd.shana.informed.package': (),
'application/vnd.sss-cod': (),
'application/vnd.sss-dtf': (),
'application/vnd.sss-ntf': (),
'application/vnd.stardivision.writer': ('sdw',),
'application/vnd.stardivision.writer-global': ('sgl',),
'application/vnd.stardivision.writer': ('vor',),
'application/vnd.stardivision.calc': ('sdc',),
'application/vnd.stardivision.draw': ('sda',),
'application/vnd.stardivision.impress': ('sdd',),
'application/vnd.stardivision.impress-packed': ('sdp',),
'application/vnd.stardivision.math': ('smf',),
'application/vnd.stardivision.chart': ('sds',),
'application/vnd.stardivision.mail': ('smd',),
'application/vnd.street-stream': (),
'application/vnd.svd': (),
'application/vnd.swiftview-ics': (),
'application/vnd.triscape.mxs': (),
'application/vnd.trueapp': (),
'application/vnd.truedoc': (),
'application/vnd.tve-trigger': (),
'application/vnd.ufdl': (),
'application/vnd.uplanet.alert': (),
'application/vnd.uplanet.alert-wbxml': (),
'application/vnd.uplanet.bearer-choice': (),
'application/vnd.uplanet.bearer-choice-wbxml': (),
'application/vnd.uplanet.cacheop': (),
'application/vnd.uplanet.cacheop-wbxml': (),
'application/vnd.uplanet.channel': (),
'application/vnd.uplanet.channel-wbxml': (),
'application/vnd.uplanet.list': (),
'application/vnd.uplanet.listcmd': (),
'application/vnd.uplanet.listcmd-wbxml': (),
'application/vnd.uplanet.list-wbxml': (),
'application/vnd.uplanet.signal': (),
'application/vnd.vcx': (),
'application/vnd.vectorworks': (),
'application/vnd.vidsoft.vidconference': (),
'application/vnd.visio': (),
'application/vnd.vividence.scriptfile': (),
'application/vnd.wap.sic': (),
'application/vnd.wap.slc': (),
'application/vnd.wap.wbxml': ('wbxml',),
'application/vnd.wap.wmlc': ('wmlc',),
'application/vnd.wap.wmlscriptc': ('wmlsc',),
'application/vnd.webturbo': (),
'application/vnd.wrq-hp3000-labelled': (),
'application/vnd.wt.stf': (),
'application/vnd.xara': (),
'application/vnd.xfdl': (),
'application/vnd.yellowriver-custom-menu': (),
'application/whoispp-query': (),
'application/whoispp-response': (),
'application/wita': (),
'application/wordperfect5.1': ('wp5',),
'application/zip': ('zip',),
'application/x-123': ('wk',),
'application/x400-bp': (),
'application/x-bcpio': ('bcpio',),
'application/x-cdlink': ('vcd',),
'application/x-chess-pgn': ('pgn',),
'application/x-core': (),
'application/x-cpio': ('cpio',),
'application/x-csh': ('csh',),
'application/x-debian-package': ('deb',),
'application/x-director': ('dcr','dir','dxr'),
'application/x-doom': ('wad',),
'application/x-dms': ('dms',),
'application/x-dvi': ('dvi',),
'application/x-executable': (),
'application/x-font': ('pfa','pfb','gsf','pcf','pcf.Z'),
'application/x-futuresplash': ('spl',),
'application/x-gnumeric': ('gnumeric',),
'application/x-gtar': ('gtar','tgz','taz'),
'application/x-hdf': ('hdf',),
'application/x-httpd-php': ('phtml','pht','php'),
'application/x-httpd-php-source': ('phps',),
'application/x-httpd-php3': ('php3',),
'application/x-httpd-php3-preprocessed': ('php3p',),
'application/x-httpd-php4': ('php4',),
'application/x-ica': ('ica',),
'application/x-java-applet': (),
'application/x-java-archive': ('jar',),
'application/x-java-bean': (),
'application/x-java-jnlp-file': ('jnlp',),
'application/x-java-serialized-object': ('ser',),
'application/x-java-vm': ('class',),
'application/x-javascript': ('js',),
'application/x-kdelnk': (),
'application/x-kchart': ('chrt',),
'application/x-killustrator': ('kil',),
'application/x-kontour': ('kil',),
'application/x-kpresenter': ('kpr','kpt'),
'application/x-koan': ('skp','skd','skt','skm'),
'application/x-kspread': ('ksp',),
'application/x-kword': ('kwd','kwt'),
'application/x-latex': ('latex',),
'application/x-lha': ('lha',),
'application/x-lzh': ('lzh',),
'application/x-lzx': ('lzx',),
'application/x-maker': ('frm','maker','frame','fm','fb','book','fbdoc'),
'application/x-mif': ('mif',),
'application/xml': (),
'application/xml-dtd': (),
'application/xml-external-parsed-entity': (),
'application/x-msdos-program': ('com','exe','bat','dll'),
'application/x-msi': ('msi',),
'application/x-netcdf': ('nc','cdf'),
'application/x-ns-proxy-autoconfig': ('pac',),
'application/x-object': ('o',),
'application/x-ogg': ('ogg',),
'application/x-oz-application': ('oza',),
'application/x-perl': ('pl','pm'),
'application/x-pkcs7-crl': ('crl',),
'application/x-redhat-package-manager': ('rpm',),
'application/x-rx': (),
'application/x-sh': (),
'application/x-shar': ('shar',),
'application/x-shellscript': (),
'application/x-shockwave-flash': ('swf','swfl'),
'application/x-sh': ('sh',),
'application/x-stuffit': ('sit',),
'application/x-sv4cpio': ('sv4cpio',),
'application/x-sv4crc': ('sv4crc',),
'application/x-tar': ('tar',),
'application/x-tcl': ('tcl',),
'application/x-tex': ('tex',),
'application/x-tex-gf': ('gf',),
'application/x-tex-pk': ('pk',),
'application/x-texinfo': ('texinfo','texi'),
'application/x-trash': ('~','%','bak','old','sik'),
'application/x-troff': ('t','tr','roff'),
'application/x-troff-man': ('man',),
'application/x-troff-me': ('me',),
'application/x-troff-ms': ('ms',),
'application/x-ustar': ('ustar',),
'application/x-wais-source': ('src',),
'application/x-wingz': ('wz',),
'application/x-x509-ca-cert': ('crt',),
'application/x-xfig': ('fig',),
'audio/32kadpcm': (),
'audio/basic': ('au','snd'),
'audio/g.722.1': (),
'audio/l16': (),
'audio/midi': ('mid','midi','kar'),
'audio/mp4a-latm': (),
'audio/mpa-robust': (),
'audio/mpeg': ('mpga','mpega','mp2','mp3'),
'audio/mpegurl': ('m3u',),
'audio/parityfec': (),
'audio/prs.sid': ('sid',),
'audio/telephone-event': (),
'audio/tone': (),
'audio/vnd.cisco.nse': (),
'audio/vnd.cns.anp1': (),
'audio/vnd.cns.inf1': (),
'audio/vnd.digital-winds': (),
'audio/vnd.everad.plj': (),
'audio/vnd.lucent.voice': (),
'audio/vnd.nortel.vbk': (),
'audio/vnd.nuera.ecelp4800': (),
'audio/vnd.nuera.ecelp7470': (),
'audio/vnd.nuera.ecelp9600': (),
'audio/vnd.octel.sbc': (),
'audio/vnd.qcelp': (),
'audio/vnd.rhetorex.32kadpcm': (),
'audio/vnd.vmx.cvsd': (),
'audio/x-aiff': ('aif','aiff','aifc'),
'audio/x-gsm': ('gsm',),
'audio/x-mpegurl': ('m3u',),
'audio/x-pn-realaudio-plugin': ('rpm',),
'audio/x-pn-realaudio': ('ra','rm','ram'),
'audio/x-realaudio': ('ra',),
'audio/x-scpls': ('pls',),
'audio/x-wav': ('wav',),
'chemical/x-pdb': ('pdb',),
'chemical/x-xyz': ('xyz',),
'image/bmp': ('bmp',),
'image/cgm': (),
'image/g3fax': (),
'image/gif': ('gif',),
'image/ief': ('ief',),
'image/jpeg': ('jpg','jpeg','jpe'),
'image/naplps': (),
'image/pcx': ('pcx',),
'image/png': ('png',),
'image/prs.btif': (),
'image/prs.pti': (),
'image/svg+xml': ('svg','svgz'),
'image/tiff': ('tif','tiff'),
'image/vnd.cns.inf2': (),
'image/vnd.dwg': (),
'image/vnd.dxf': (),
'image/vnd.fastbidsheet': (),
'image/vnd.fpx': (),
'image/vnd.fst': (),
'image/vnd.fujixerox.edmics-mmr': (),
'image/vnd.fujixerox.edmics-rlc': (),
'image/vnd.mix': (),
'image/vnd.net-fpx': (),
'image/vnd.svf': (),
'image/vnd.wap.wbmp': ('wbmp',),
'image/vnd.xiff': (),
'image/x-cmu-raster': ('ras',),
'image/x-coreldraw': ('cdr',),
'image/x-coreldrawpattern': ('pat',),
'image/x-coreldrawtemplate': ('cdt',),
'image/x-corelphotopaint': ('cpt',),
'image/x-djvu': ('djvu','djv'),
'image/x-jng': ('jng',),
'image/x-ms-bmp': ('bmp',),
'image/x-portable-anymap': ('pnm',),
'image/x-portable-bitmap': ('pbm',),
'image/x-portable-graymap': ('pgm',),
'image/x-portable-pixmap': ('ppm',),
'image/x-rgb': ('rgb',),
'image/x-xbitmap': ('xbm',),
'image/x-xpixmap': ('xpm',),
'image/x-xwindowdump': ('xwd',),
'inode/chardevice': (),
'inode/blockdevice': (),
'inode/directory-locked': (),
'inode/directory': (),
'inode/fifo': (),
'inode/socket': (),
'message/delivery-status': (),
'message/disposition-notification': (),
'message/external-body': (),
'message/http': (),
'message/s-http': (),
'message/news': (),
'message/partial': (),
'message/rfc822': (),
'model/iges': ('igs','iges'),
'model/mesh': ('msh','mesh','silo'),
'model/vnd.dwf': (),
'model/vnd.flatland.3dml': (),
'model/vnd.gdl': (),
'model/vnd.gs-gdl': (),
'model/vnd.gtw': (),
'model/vnd.mts': (),
'model/vnd.vtu': (),
'model/vrml': ('wrl','vrml'),
'multipart/alternative': (),
'multipart/appledouble': (),
'multipart/byteranges': (),
'multipart/digest': (),
'multipart/encrypted': (),
'multipart/form-data': (),
'multipart/header-set': (),
'multipart/mixed': (),
'multipart/parallel': (),
'multipart/related': (),
'multipart/report': (),
'multipart/signed': (),
'multipart/voice-message': (),
'text/calendar': (),
'text/comma-separated-values': ('csv',),
'text/css': ('css',),
'text/directory': (),
'text/english': (),
'text/enriched': (),
'text/html': ('html','htm','xhtml'),
'text/mathml': ('mml',),
'text/parityfec': (),
'text/plain': ('txt','asc','text','diff'),
'text/prs.lines.tag': (),
'text/rfc822-headers': (),
'text/richtext': ('rtx',),
'text/rtf': ('rtf',),
'text/t140': (),
'text/tab-separated-values': ('tsv',),
'text/uri-list': (),
'text/vnd.abc': (),
'text/vnd.curl': (),
'text/vnd.DMClientScript': (),
'text/vnd.flatland.3dml': (),
'text/vnd.fly': (),
'text/vnd.fmi.flexstor': (),
'text/vnd.in3d.3dml': (),
'text/vnd.in3d.spot': (),
'text/vnd.IPTC.NewsML': (),
'text/vnd.IPTC.NITF': (),
'text/vnd.latex-z': (),
'text/vnd.motorola.reflex': (),
'text/vnd.ms-mediapackage': (),
'text/vnd.wap.si': (),
'text/vnd.wap.sl': (),
'text/vnd.wap.wml': ('wml',),
'text/vnd.wap.wmlscript': ('wmls',),
'text/xml': ('xml','xsl'),
'text/x-c++hdr': ('h++','hpp','hxx','hh'),
'text/x-c++src': ('c++','cpp','cxx','cc'),
'text/x-chdr': ('h',),
'text/x-crontab': (),
'text/x-csh': ('csh',),
'text/x-csrc': ('c',),
'text/x-java': ('java',),
'text/x-makefile': (),
'text/xml-external-parsed-entity': (),
'text/x-moc': ('moc',),
'text/x-pascal': ('p','pas'),
'text/x-setext': ('etx',),
'text/x-sh': ('sh',),
'text/x-tcl': ('tcl','tk'),
'text/x-tex': ('tex','ltx','sty','cls'),
'text/x-vcalendar': ('vcs',),
'text/x-vcard': ('vcf',),
'video/dl': ('dl',),
'video/fli': ('fli',),
'video/gl': ('gl',),
'video/mpeg': ('mpeg','mpg','mpe'),
'video/quicktime': ('qt','mov'),
'video/mp4v-es': (),
'video/parityfec': (),
'video/pointer': (),
'video/vnd.fvt': (),
'video/vnd.motorola.video': (),
'video/vnd.motorola.videop': (),
'video/vnd.mpegurl': ('mxu',),
'video/vnd.mts': (),
'video/vnd.nokia.interleaved-multimedia': (),
'video/vnd.vivo': (),
'video/x-mng': ('mng',),
'video/x-ms-asf': ('asf','asx'),
'video/x-msvideo': ('avi',),
'video/x-sgi-movie': ('movie',),
'x-conference/x-cooltalk': ('ice',),
'x-world/x-vrml': ('vrm','vrml','wrl'),
}
|
HarmonyEnterpriseSolutions/harmony-platform
|
src/gnue/common/utils/GMimeTypes.py
|
Python
|
gpl-2.0
| 30,650
|
[
"NetCDF"
] |
77f09e36e378eeed6ecfe701bf56e9dd9ceeea266e85bb749d628c21b00534e2
|
#!/usr/bin/env python
# -*- coding: UTF-8 -*-
"""
SYNOPSIS
.
DESCRIPTION
EXAMPLES
python inverse_document.py
EXIT STATUS
0 program exit normal
1 program had problem on execution
AUTHOR
Theofilis George <theofilis.g@gmail.com>
LICENSE
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
VERSION
1
"""
import matplotlib.pyplot as plt
import numpy as np
import math
class InvertedDocument:
def __init__(self, D):
self.data = {}
self.N = 0
self.normalize = {}
for i in D:
D[i] = D[i].split()
self.N += 1
for i in D:
for token in D[i]:
if token not in self.data:
self.data[token] = { i : 1}
else:
if i in self.data[token]:
self.data[token][i] += 1
else:
self.data[token][i] = 1
d = []
for i in range(self.N):
for j, token in enumerate(self.data.keys()):
d += [self.w(token, i)]
self.normalize[i] = np.linalg.norm(d, 2)
d = []
def tf(self, t, d):
if d in self.data[t]:
return self.data[t][d]
else:
return 0
def df(self, t):
return len(self.data[t])
def idf(self, t):
def log2(x):
return math.log(float(x)) / math.log(2.0)
return log2(self.N) - log2(self.df(t))
def w(self, t, d):
return self.tf(t, d) * self.idf(t)
def dictionary(self):
return self.data.keys()
def norm(self, d):
return self.normalize[d]
def sim(self, q):
q = q.split()
Q = {}
for token in q:
if token in Q:
Q[token] += 1
else:
Q[token] = 1
n = np.linalg.norm(Q.values(), 2)
for t in Q:
Q[t] = Q[t] * self.idf(t) / n
sim = {}
for i in range(self.N):
sim[i] = 0
for t in Q:
sim[i] += Q[t] * self.w(t, i) / self.norm(i)
import operator
sim = sorted(sim.iteritems(), key=operator.itemgetter(1), reverse=True)
return sim
def sim(self, d, q):
s = np.dot(q, d)
n = np.linalg.norm(q, 2) * np.linalg.norm(d, 2)
return s[0] / n
def main ():
# Documents
D = {
0 : "I am here on Twitter because my family is on Facebook",
1 : "Missing Mexico youths identified http://bbc.in/18PTkHR",
2 : "Mercantilism is one of the great whipping boys in the history of economics http://econ.st/14ph2Lp",
3 : "On a runway at JFK waiting for a gate My daughter is playing with a Tinker Bell helium balloon http://4sq.com/18PSBXf"
}
index = InvertedDocument(D)
print index.sim('JFK')
if __name__ == '__main__':
main()
|
theofilis/tutorial-information-retrieval
|
inverse_document.py
|
Python
|
gpl-3.0
| 3,502
|
[
"TINKER"
] |
d3759458ed2c20a1b6212f37c60baab07158b7e7a20c9af6f47fed060f5031c1
|
ITEMS = [
"An unfavorable wind blowing the ship towards shore. Sailing into the wind means tacking, which is hard and exhausting work for the crew and navigator.",
"A Blue sea dragon demands tribute, Gold, Valuables, or Blood",
"A Crate with a cures Crown made of blue scales and golden thorns",
"A Floating bottle with a Treasure map inside! (DC 18 to spot)",
"A Floating Treasure chest filled with GOLD! PAYDAY! (Dex DC 17 to catch) (STR DC 15 to lift it out)",
"A large sea creature surfaces nearby",
"A Legendary Party boat floats by, almost like a floating city and they try to get the party to join!",
"A marshy isthmus blocks the way ahead, the crew may have to get out and pull the ship across",
"A mass of bubbles and cavitation beneath the ship cause it sink suddenly, before the bubbles cease and the now swamped vessel and crew must struggle to the surface.",
"A Merchant ship who is willing to trade goods!",
"A pre-existing naval battle between two opposing fleets. Dare they try to navigate around them, or will they push forwards and into the crossfire?",
"A tear in the Material Plane leading to the Elemental Plane of Water, causing more and more water to violently pour into the sea along with whatever creatures may follow… These could be the roughest waves seen to date!",
"Ahead of the ship you can see a massive storm event. The distance to it and its size are indiscernible, but it looks like a hurricane, and the sudden choppy wake only confirms it.",
"An Aquamarine Sea Dragon appears, and, with cooperation, blesses the crew",
"An unexpected desert island (Very small) In the distance, with an NPC trying to flag the ship (DC 18 perception)",
"Beautiful sunset, no clouds, decent winds but not very fast",
"Castaways on a lifeboat needing help,",
"Choppy grey waters that elude to a storm despite a mostly clear sky",
"Choppy seas on a sunny day. Tritons and Kuo-ta wage battle just beneath the surface.",
"Clear skies wind against",
"Clear sunny skies no wind",
"Cloudy and Rainy with capsizable waves",
"Cloudy and Rainy with Favorable winds",
"Cloudy and the wind is running away with the ship",
"Cloudy with choppy seas, struggle to maintain direction",
"Cold air, travel waters become half ocean half ice sludge; impedes travel to a slow pace",
"Crimson floods the sky and the moon seems closer than usual. The water seems to be behaving… for now.",
"Dark night, not a star in the sky – making night time navigation difficult",
"Dark, cloudy sky and cold air. Dark shadows swim beneath what at first glance appears to be an empty ocean",
"Doldrums – scorching heat, clear sky, no wind whatsoever. Better have packed fresh water.",
"Foggy and no wind to guide direction",
"Frenzy of flying fish feeding in fog upon which the frequency and size of the fish increases over time eventually overwhelming even the largest ships as the fattest whale sized flying fish slap down onto the decks",
"glowing woman appears in front of the ship. (Ghost Encounter)",
"Green waters that move unnaturally, it’s unclear if it really is water or not",
"Heavy winds creating a waterspout",
"In the middle of the deep the water becomes still; Lilly pads grow on the surface and the water is rich with vegetation and life.. the water is sweet and pleasant to drink",
"Incredibly cold and slightly snowy, poor visibility",
"It is overcast but otherwise calm, but the air is filled with static and occassionally lightning strikes in the distance.",
"Jagged rocky spires suddenly jutting out of the ocean, corpses of boats litter them – requires expert and slow movement",
"lear skies and favorable wind",
"Lightning strikes the water up ahead.",
"Mega waves sweeping across the deck of the boat",
"Patches of the sea start bubbling, causing boats to sink",
"Progressively larger icebergs float by, nearly hitting your ship, Mementos from each characters past bob up to the surface, yet when reached for, they reveal themselves as mirages. DC 12 DEX check to avoid falling in.",
"Sargasaum. No wind or current, seaweed and garbage everywhere",
"Scorching hot and stale winds",
"Scratching sounds coming from outside the hull, heard belowdecks",
"Shallow reefs or rocks make travel dangerous.",
"Stormy the winds rip the sails",
"Stormy, un-tamable winds",
"Suddenly a meteor strikes the ocean not even 200 ft ahead of the ship. The massive energy from the impact suddenly creates a massive tidal wave like effect and heats the water so it burns to the touch.",
"Sunny with light winds moving into sandbars",
"Sunny, but with a light rain and a cold breeze",
"The air is thick and humid, so much so that you can see the misty vapors as the light wind moves it.",
"The boat sails past jetsam from a carnival boat",
"The boat sails past thousands of dead fish",
"The boatswain spies a spectral ship off the port bow!",
"The party is visited by a god of chaos, and is willing to play a game, for a fee",
"The sea and air taste sweet, not salty",
"The sea is unusually calm, flat as far as the eye can see, makes a perfect reflection of the sky",
"The seas are brimming with fish and life, dolphins accompany the boat as it glides though the water",
"The water Begins to smell of rotten eggs (Con save)",
"The water exudes a greenish mist, but is otherwise calm as one would expect from good weather at sea.",
"The water is abnormally glassy, like an untouched lake.",
"The water is abnormally salty and the ship seems unstably buoyant.",
"The water seems normal. Fish are jumping out of the water in delight of the clear skies.",
"The water seems to be accelerating towards port side. With careful inspection through a spyglass, you can see a large whirlpool in the distance.",
"The water seems to be… boiling?",
"The water suddenly goes silent and stops moving as the boat slides through the wake",
"There are a suspicious amount of fish in these waters, almost as if they’re fleeing something",
"There is a mirage of boats floating upside down beneath the water",
"They are visited by a god one of the party members worship and is blessed.",
"Unfavorable winds or currents requiring ‘kedging’ (using rowboats, ropes, pulleys, poles, and anchors to move the ship manually).",
"You find a Derelict ship that’s better than theirs, with no crew on board.",
"You hear arrhythmic soft thumps against the hull of the ship. Looking overboard is a sea of floating lifeless bodies.",
"“What’s that singing I hear off in the distance?”",
"A twenty-meter tall wave crashes into the hull of the ship, dealing 2d10 bludgeoning damage to the ship and players. (Players can make a DC 18 Dexterity saving throw to half the damage to themselves).",
"The water in a 100-meter radius of the boat turns into magic ice which can be broken with either a DC 22 Strength check or the use of a damaging 1st-level spell or higher.",
"Gale-force winds push you to your location at four times the traveling speed of your vessel for one hour. Each hour, any player on the top of the deck has to make a DC 10 Dexterity saving throw in order to no be thrown off the ship.",
"An airship (see Unearthed Arcana: Of Ships and the Sea for star block) flies overhead and shoots a ballista (3d10 piercing damage) at your vessel.",
"Strange magnetic ore in the coral reefs causes the compass to spin in circles.",
"Several large ships seem to be chasing a smaller ship.",
"Water in the area starts freezing, creating chunks of ice.",
"You spot a ship circling a giant whirlpool. The ship cant quite seem to get enough speed to break free from the whirlpools grip.",
"A forest of giant kelp starts brushing against the bottom of the ship.",
"Discover a strong ocean current. Bonus points to speed if traveling in the same direction.",
"Some titanic sized creature surfaces underneath the ship, lifting the ship out of the water. The creature is large enough to not have even noticed the ship or it’s crew.",
"A sea monster leaps from the water and lands on the deck. It keeps pacing along the deck and stares over the rails while shivering.",
"Hundreds of sea birds start landing on the deck.",
"You spy a derelict ship. Upon closer inspection, it looks like the entire crew was massacred by something.",
"You spy a large tower in the middle of the sea. As you get closer, you see several docks around it with several ships already docked. The tower is the entrance to an underwater city.",
"You see a group of Sauhaugin riding towards you atop a pack of sharks. You’re under attack!",
"You here a mysterious scratching from below deck. Further investigation reveals that it’s comg from underneath the ship.",
"It is raining cats and dogs. All the meowing and barking is going to get annoying very quickly. The cats and dogs that land on deck seem ready to go to war.",
"A water elemental emerges from the sea, attacking you and screaming about pollution.",
"A group of sea elves swims up beside your ship, inviting you to visit an underwater city.",
"A school of killer whales use their collective strength to push your ship off course. It seems like they want you to follow them.",
"You are sailing into a lot of large rocks and many wrecked ships. This is a ship graveyard.",
"A mesmerizing song on the wind beckons to you. Sailors must save or jump overboard and swim toward what each of them imagines is the source.",
"Perfect weather. All clear.",
]
|
d2emon/generator-pack
|
src/sample_data/dndspeak/sailing_conditions.py
|
Python
|
gpl-3.0
| 9,755
|
[
"VisIt"
] |
fbfc7b6c9b7b11ccff45b9e1a34cef6350cd8c78ae960c8cfee5cf8f54a93002
|
###############################
# This file is part of PyLaDa.
#
# Copyright (C) 2013 National Renewable Energy Lab
#
# PyLaDa is a high throughput computational platform for Physics. It aims to make it easier to submit
# large numbers of jobs on supercomputers. It provides a python interface to physical input, such as
# crystal structures, as well as to a number of DFT (VASP, CRYSTAL) and atomic potential programs. It
# is able to organise and launch computational jobs on PBS and SLURM.
#
# PyLaDa is free software: you can redistribute it and/or modify it under the terms of the GNU General
# Public License as published by the Free Software Foundation, either version 3 of the License, or (at
# your option) any later version.
#
# PyLaDa is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even
# the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details.
#
# You should have received a copy of the GNU General Public License along with PyLaDa. If not, see
# <http://www.gnu.org/licenses/>.
###############################
""" IPython related configuration. """
is_interactive = False
""" Whether we are in an IPython shell or not.
Tries and converts exceptions to simply printing error messages.
"""
jobfolder_glob = ['*.dict']
""" Globs (unix path wildcards) to identify jobfolders. """
|
pylada/pylada-light
|
src/pylada/config/ipython.py
|
Python
|
gpl-3.0
| 1,425
|
[
"CRYSTAL",
"VASP"
] |
26ef81844f0676dcbb6530c6baafeb000737708637e45c0f6dc4419948499662
|
# Licensed under GPL version 3 - see LICENSE.rst
'''Add additional scattering to processed photons.
Classes in this file add additional scattering in a statistical sense.
The classes in this module do not trace the photon to a specific location, they
just add the scatter at the point of the last interaction. For example,
reflection from a (flat) mirror is implemented as a perfect reflection, but in
practice there is some roughness to the mirror that adds a small Gaussian blur
to the reflection. To represent this, the point of origin of the ray remains
unchanged, but a small random change is added to the direction vector.
'''
import numpy as np
from warnings import warn
import astropy.units as u
from ..math.utils import e2h, h2e, norm_vector
from ..math.rotations import axangle2mat
from ..math.polarization import parallel_transport
from .base import FlatOpticalElement
class RadialMirrorScatter(FlatOpticalElement):
'''Add scatter to any sort of radial mirror.
Scatter is added in the plane of reflection, which is defined here
as the plane which contains (i) the current direction the ray and (ii) the
vector connecting the center of the `RadialMirrorScatter` element and the
point of last interaction for the ray.
Scatter can also be added perpendicular to the plane-of-reflection.
Parameters
----------
inplanescatter : `astropy.Quantity`
sigma of Gaussian for in-plane scatter
perpplanescatter : `astropy.Quantity`
sigma of Gaussian for scatter perpendicular to the plane of reflection
(default = 0)
'''
inplanescattercol = 'inplanescatter'
perpplanescattercol = 'perpplanescatter'
def __init__(self, **kwargs):
self.inplanescatter = kwargs.pop('inplanescatter').to(u.rad).value
self.perpplanescatter = kwargs.pop('perpplanescatter', 0. * u.rad).to(u.rad).value
super().__init__(**kwargs)
def specific_process_photons(self, photons, intersect, interpos, intercoos):
n = intersect.sum()
center = self.pos4d[:-1, -1]
radial = h2e(photons['pos'][intersect].data) - center
perpplane = np.cross(h2e(photons['dir'][intersect].data), radial)
# np.random.normal does not work with scale=0
# so special case that here.
if self.inplanescatter != 0:
inplaneangle = np.random.normal(loc=0., scale=self.inplanescatter, size=n)
rot = axangle2mat(perpplane, inplaneangle)
outdir = e2h(np.einsum('...ij,...i->...j', rot, h2e(photons['dir'][intersect])), 0)
else:
inplaneangle = np.zeros(n)
outdir = photons['dir'][intersect]
if self.perpplanescatter !=0:
perpangle = np.random.normal(loc=0., scale=self.perpplanescatter, size=n)
rot = axangle2mat(radial, perpangle)
outdir = e2h(np.einsum('...ij,...i->...j', rot, h2e(outdir)), 0)
else:
perpangle = np.zeros_like(inplaneangle)
pol = parallel_transport(photons['dir'].data[intersect, :], outdir,
photons['polarization'].data[intersect, :])
return {'dir': outdir, 'polarization': pol,
self.inplanescattercol: inplaneangle,
self.perpplanescattercol: perpangle}
class RandomGaussianScatter(FlatOpticalElement):
'''Add scatter to any sort of radial mirror.
This element scatters rays by a small angle, drawn from a Gaussian
distribution. The direction of the scatter is random.
Parameters
----------
scatter : `astropy.Quantitiy` or callable
This this is a number, scattering angles will be drawn from a Gaussian
with the given sigma. For a variable scatter, this can be a
function with the following call signature: ``angle = func(photons,
intersect, interpos, intercoos)``. The function should return an
`astropy.Quantity` array, containing one angle for each intersecting
photon. A function passed in for this parameter can makes the
scattering time, location, or energy-dependent.
'''
scattername = 'scatter'
def __init__(self, **kwargs):
if 'scatter' in kwargs:
if hasattr(self, 'scatter'):
warn('Overriding class level "scatter" definition.')
self.scatter = kwargs.pop('scatter') # in rad
else:
if not hasattr(self, 'scatter'):
raise ValueError('Keyword "scatter" missing.')
super().__init__(**kwargs)
def specific_process_photons(self, photons, intersect, interpos, intercoos):
n = intersect.sum()
# np.random.normal does not work with scale=0
# so special case that here.
with u.set_enabled_equivalencies(u.dimensionless_angles()):
scatterzero = self.scatter == 0
if scatterzero:
angle = np.zeros(n)
out = {}
else:
pdir = norm_vector(h2e(photons['dir'][intersect].data))
# Now, find a direction that is perpendicular to the photon direction
# Any perpendicular direction will do
# Start by making a set of vectors that at least are not parallel
# to the photon direction
guessvec = np.zeros_like(pdir)
ind = np.abs(pdir[:, 0]) < 0.99999
guessvec[ind, 0] = 1
guessvec[~ind, 1] = 1
perpvec = np.cross(pdir, guessvec)
if callable(self.scatter):
angle = self.scatter(photons, intersect,
interpos, intercoos).to(u.rad).value
else:
angle = np.random.normal(loc=0.,
scale=self.scatter.to(u.rad).value,
size=n)
rot = axangle2mat(perpvec, angle)
outdir = np.einsum('...ij,...i->...j', rot, pdir)
# Now rotate result by up to 2 pi to randomize direction
angle2 = np.random.uniform(size=n) * 2 * np.pi
rot = axangle2mat(pdir, angle2)
outdir = e2h(np.einsum('...ij,...i->...j', rot, outdir), 0)
pol = parallel_transport(photons['dir'].data[intersect, :], outdir,
photons['polarization'].data[intersect, :])
out = {'dir': outdir, 'polarization': pol, self.scattername: angle}
return out
|
Chandra-MARX/marxs
|
marxs/optics/scatter.py
|
Python
|
gpl-3.0
| 6,432
|
[
"Gaussian"
] |
13b5fb8968806c866046b777c62e9d35c53e2d6b82b41492fbd71ab6cf63e82c
|
'''
CMEMS module
Builds copernicus formatted daily netCDFs from a single pandas dataframe/csv.
Originally written by Steve Jones.
Modified to better interact with export routine
Maren K. Karlsen 2020.10.29
'''
import sys, os
import tempfile
import datetime
import pandas as pd
import numpy as np
import numpy.ma as ma
import csv, json
import toml
from math import isnan
from io import BytesIO
from zipfile import ZipFile
from netCDF4 import Dataset
from re import match
import logging
TIME_BASE = datetime.datetime(1950, 1, 1, 0, 0, 0)
QC_LONG_NAME = "quality flag"
QC_CONVENTIONS = "Copernicus Marine In Situ reference table 2"
QC_VALID_MIN = 0
QC_VALID_MAX = 9
QC_FILL_VALUE = -127
QC_FLAG_VALUES = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
QC_FLAG_MEANINGS = "no_qc_performed good_data probably_good_data " \
+ "bad_data_that_are_potentially_correctable bad_data value_changed " \
+ "not_used nominal_value interpolated_value missing_value"
PLATFORM_CODES = {
"31" : "research vessel",
"32" : "vessel of opportunity",
"41" : "moored surface buoy",
"3B" : "autonomous surface water vehicle"
}
def buildnetcdfs(datasetname, fieldconfig, filedata,platform,CP_pid):
''' Construct CMEMS complient netCDF files from filedata'''
logging.info(f'Constructing netcdf-files based on {datasetname} to send to CMEMS')
result = []
currentline = 0
currentdate = None
daystartline = currentline
dayendline = None
while currentline < filedata.shape[0]:
linedate = getlinedate(filedata.iloc[[currentline]])
if linedate != currentdate:
if currentdate:
result.append(
makenetcdf(datasetname, fieldconfig, platform, \
filedata[daystartline:dayendline + 1],CP_pid))
currentdate = linedate
daystartline = currentline
dayendline = currentline
currentline = currentline + 1
# Make the last netCDF
if dayendline:
result.append(
makenetcdf(datasetname, fieldconfig, platform, \
filedata[daystartline:dayendline+1],CP_pid))
return result
def makenetcdf(datasetname, fieldconfig, platform, records,CP_pid):
filedate = getlinedate(records.iloc[[0]])
ncbytes = None
platform_code = getplatformcode(datasetname)
filenameroot = "GL_TS_TS_" + platform[platform_code]['call_sign'] + "_" + str(filedate)
# Open a new netCDF file
ncpath = tempfile.gettempdir() + "/" + filenameroot + ".nc"
nc = Dataset(ncpath, format="NETCDF4_CLASSIC", mode="w")
# The DEPTH dimension is singular. Assume 5m for ships
depthdim = nc.createDimension("DEPTH", 1)
# Time, lat and lon dimensions are created per record
timedim = nc.createDimension("TIME", records.shape[0])
timevar = nc.createVariable("TIME", "d", ("TIME"))
timevar.long_name = "Time"
timevar.standard_name = "time"
timevar.units = "days since 1950-01-01T00:00:00Z"
timevar.valid_min = -90000;
timevar.valid_max = 90000;
timevar.uncertainty = " "
timevar.comment = ""
timevar.axis = "T"
timevar.ancillary_variables = "TIME_QC"
timevar.calendar = "standard"
latdim = nc.createDimension("LATITUDE", records.shape[0])
latvar = nc.createVariable("LATITUDE", "f", ("LATITUDE"))
latvar.long_name = "Latitude of each location"
latvar.standard_name = "latitude"
latvar.units = "degree_north"
latvar.valid_min = -90
latvar.valid_max = 90
latvar.uncertainty = " "
latvar.comment = ""
latvar.axis = "Y"
latvar.ancillary_variables = "POSITION_QC"
londim = nc.createDimension("LONGITUDE", records.shape[0])
lonvar = nc.createVariable("LONGITUDE", "f", ("LONGITUDE"))
lonvar.long_name = "Longitude of each location"
lonvar.standard_name = "longitude"
lonvar.units = "degree_east"
lonvar.valid_min = -180
lonvar.valid_max = 180
lonvar.uncertainty = " "
lonvar.comment = ""
lonvar.axis = "X"
lonvar.ancillary_variables = "POSITION_QC"
positiondim = nc.createDimension("POSITION", records.shape[0])
# Fill in dimension variables
timevar[:] = records['Timestamp'].apply(maketimefield).to_numpy()
latvar[:] = records['ALATGP01'].to_numpy()
lonvar[:] = records['ALONGP01'].to_numpy()
# QC flags for dimension variables. Assume all are good
timeqcvar = nc.createVariable("TIME_QC", "b", ("TIME"), \
fill_value = QC_FILL_VALUE)
assignqcvarattributes(timeqcvar)
timeqcvar[:] = 1
positionqcvar = nc.createVariable("POSITION_QC", "b", ("POSITION"), \
fill_value = QC_FILL_VALUE)
assignqcvarattributes(positionqcvar)
positionqcvar[:] = 1
positionvar = nc.createVariable("POSITIONING_SYSTEM", "c", ("TIME", "DEPTH"),\
fill_value = " ")
positionvar.longname = "Positioning system"
positionvar.flag_values = "A, G, L, N, U"
positionvar.flag_meanings = "Argos, GPS, Loran, Nominal, Unknown"
positions = np.empty([records.shape[0], 1], dtype="object")
positions[:,0] = "G"
positionvar[:,:] = positions
# DM values
dms = np.empty([records.shape[0], 1], dtype="object")
dms[:,0] = "R"
# Fields
for index, field in fieldconfig.iterrows():
var = nc.createVariable(field['netCDF Name'], field['Data Type'], \
("TIME", "DEPTH"), fill_value=field['FillValue'])
attributes = json.loads(field['Attributes'])
for key, value in attributes.items():
var.setncattr(key, value)
# Read the data values
datavalues = records[field['Export Column']].to_numpy()
# Calculate QC values
qc_values = np.empty([records.shape[0], 1])
if field['QC'] == "Data":
qc_values[:,0] = makeqcvalues(datavalues, records[field['Export Column'] + '_QC'].to_numpy())
else:
qc_values[:,0] = field['QC']
if field['add_scale_factor']:
var.setncattr('add_offset', 0)
var.setncattr('scale_factor', field['scale_factor'])
# The netCDF library detects FillValues *after* it's done the packing step.
# So we replace the NaN values so that the packing will get the correct FillValue
datavalues[np.isnan(datavalues)] = float(field['FillValue']) * float(field['scale_factor'])
varvalues = np.empty([records.shape[0], 1])
varvalues[:,0] = datavalues
var[:,:] = varvalues
# # DM variable
# dmvar = nc.createVariable(field['netCDF Name'] + '_DM', 'c', ("TIME", "DEPTH"), fill_value=' ')
# assigndmvarattributes(dmvar)
# dmvar[:,:] = dms
qcvar = nc.createVariable(field['netCDF Name'] + '_QC', "b", ("TIME", "DEPTH"), \
fill_value = QC_FILL_VALUE)
assignqcvarattributes(qcvar)
qcvar[:,:] = qc_values
# Global attributes
nc.id = filenameroot
nc.data_type = "OceanSITES trajectory data"
nc.netcdf_version = "netCDF-4 classic model"
nc.format_version = "1.4"
nc.Conventions = " CF-1.6 Copernicus-InSituTAC-FormatManual-1.4 Copernicus-InSituTAC-SRD-1.41 Copernicus-InSituTAC-ParametersList-3.2.0"
nc.cdm_data_type = "trajectory"
nc.data_mode = "R"
nc.area = "Global Ocean"
nc.bottom_depth = " ";
nc.geospatial_lat_min = str(np.nanmin(latvar))
nc.geospatial_lat_max = str(np.nanmax(latvar))
nc.geospatial_lon_min = str(np.nanmin(lonvar))
nc.geospatial_lon_max = str(np.nanmax(lonvar))
nc.geospatial_vertical_min = str(min(records['ADEPZZ01'].to_numpy()))
nc.geospatial_vertical_max = str(max(records['ADEPZZ01'].to_numpy()))
nc.last_latitude_observation = str(records['ALATGP01'].iloc[[-1]].to_numpy()[0])
nc.last_longitude_observation = str(records['ALONGP01'].iloc[[-1]].to_numpy()[0])
nc.last_date_observation = records['Timestamp'].iloc[[-1]].to_numpy()[0]
nc.time_coverage_start = records['Timestamp'].iloc[[0]].to_numpy()[0]
nc.time_coverage_end = records['Timestamp'].iloc[[-1]].to_numpy()[0]
nc.update_interval = "P1D"
nc.data_assembly_center = "University of Bergen"
nc.institution = "University of Bergen / Geophysical Institute"
nc.institution_edmo_code = "4595"
nc.institution_references = " "
nc.contact = "bcdc@uib.no cmems-service@ifremer.fr"
nc.title = "Global Ocean - In Situ near-real time carbon observation"
nc.author = "cmems-service"
nc.naming_authority = "Copernicus Marine In Situ"
nc.pi_name = platform[platform_code]['author_list']
nc.qc_manual = ""
nc.wmo_inst_type = ""
nc.wmo_platform_code = ""
nc.ices_platform_code = str(platform_code)
nc.doi = CP_pid
nc.platform_code = platform[platform_code]['call_sign']
nc.site_code = platform[platform_code]['call_sign']
# For buoys -> Mooring observation.
platform_category_code = platform[platform_code]['category_code']
nc.platform_name = platform[platform_code]['name']
nc.source_platform_category_code = platform_category_code
nc.source = PLATFORM_CODES[platform_category_code]
nc.quality_control_indicator = "6" # "Not used"
nc.quality_index = "0"
nc.comment = " "
nc.summary = " "
nc.references = "http://marine.copernicus.eu http://www.marineinsitu.eu https://www.icos-cp.eu"
#nc.citation = "These data were collected and made freely available by the " \
# + "Copernicus project and the programs that contribute to it."
nc.citation = (
"These data were collected and made freely available by the Copernicus project and the programs that contribute to it."
+ platform[platform_code]['author_list']
+ "(" + str( datetime.datetime.now().year)
+ "): NRT data from " + platform[platform_code]['name'] + ". "
+ CP_pid
+ " Made available through the Copernicus project.")
nc.distribution_statement = ("These data follow Copernicus standards; they "
+ "are public and free of charge. User assumes all risk for use of data. "
+ "User must display citation in any publication or product using data. "
+ "User must contact PI prior to any commercial use of data.")
# Write the netCDF
nc.close()
# Read the netCDF file into memory
with open(ncpath, "rb") as ncfile:
ncbytes = ncfile.read()
# Delete the temp netCDF file
os.remove(ncpath)
return [filenameroot, ncbytes]
def makeqcvalues(values, qc):
result = np.empty(len(values))
for i in range(0, len(values)):
if isnan(values[i]):
result[i] = 9
else:
result[i] = makeqcvalue(qc[i])
return result
def assigndmvarattributes(dmvar):
dmvar.long_name = DM_LONG_NAME
dmvar.conventions = DM_CONVENTIONS
dmvar.flag_values = DM_FLAG_VALUES
dmvar.flag_meanings = DM_FLAG_MEANINGS
def assignqcvarattributes(qcvar):
qcvar.long_name = QC_LONG_NAME
qcvar.conventions = QC_CONVENTIONS
qcvar.valid_min = QC_VALID_MIN
qcvar.valid_max = QC_VALID_MAX
qcvar.flag_values = QC_FLAG_VALUES
qcvar.flag_meanings = QC_FLAG_MEANINGS
def maketimefield(timestr):
timeobj = maketimeobject(timestr)
diff = timeobj - TIME_BASE
return diff.days + diff.seconds / 86400
def maketimeobject(timestr):
timeobj = datetime.datetime(int(timestr[0:4]), int(timestr[5:7]), \
int(timestr[8:10]), int(timestr[11:13]), int(timestr[14:16]), \
int(timestr[17:19]))
return timeobj
def makeqcvalue(flag):
result = 9 # Missing
if flag == 2:
result = 2
elif flag == 3:
result = 2
elif flag == 4:
result = 4
else:
raise ValueError("Unrecognised flag value " + str(flag))
return result
def getlinedate(line):
return pd.to_datetime(line.Timestamp).iloc[0].date().strftime('%Y%m%d')
def getplatformcode(datasetname):
platform_code = None
# NRT data sets
# Named as NRT[Platform][milliseconds]
# Milliseconds is currently a 13 digit number. At the time of writing it
# will be ~316 years before that changes.
matched = match("^NRT(.....*)[0-9][0-9][0-9][0-9][0-9][0-9][0-9][0-9][0-9][0-9][0-9][0-9][0-9]$",
datasetname)
if matched is None:
# Normal data sets - standard EXPO codes
matched = match("^(.....*)[0-9][0-9][0-9][0-9][0-9][0-9][0-9][0-9]$",
datasetname)
if matched is None:
raise ValueError("Cannot parse dataset name")
else:
platform_code = matched.group(1)
return platform_code
def main():
fieldconfig = pd.read_csv('fields.csv', delimiter=',', quotechar='\'')
zipfile = sys.argv[1]
datasetname = os.path.splitext(zipfile)[0]
datasetpath = datasetname + "/dataset/Copernicus/" + datasetname + ".csv"
platform_lookup_file = 'platforms.toml'
with open(platform_lookup_file) as f: platform = toml.load(f)
filedata = None
with ZipFile(zipfile, "r") as unzip:
filedata = pd.read_csv(BytesIO(unzip.read(datasetpath)), delimiter=',')
filedata['Timestamp'] = filedata['Timestamp'].astype(str)
netcdfs = buildnetcdfs(datasetname, fieldconfig, filedata, platform)
for i in range(0, len(netcdfs)):
with open(netcdfs[i][0] + ".nc", "wb") as outchan:
outchan.write(netcdfs[i][1])
if __name__ == '__main__':
main()
|
BjerknesClimateDataCentre/QuinCe
|
external_scripts/export/modules/CMEMS/Export_CMEMS_netCDF_builder.py
|
Python
|
gpl-3.0
| 12,637
|
[
"NetCDF"
] |
5240050eb9d5193a9e22e7587fc2a670b815a47ee65659cbc05d83d6cf6841e9
|
#!/usr/bin/env python
# ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2015, Numenta, Inc. Unless you have purchased from
# Numenta, Inc. a separate commercial license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
# This converts data from the individual 4 Key files outputted by avogadro
import datetime, sys, csv, os.path
try:
inputPath = sys.argv[1]
outputPath = sys.argv[2]
except IndexError:
print ("Usage: python %s path/to/input/ path/to/output/" % sys.argv[0])
exit()
if not os.path.exists(outputPath):
os.makedirs(outputPath)
countInputFilePath = inputPath + "KeyCount.csv"
ddInputFilePath = inputPath + "KeyDownDown.csv"
udInputFilePath = inputPath + "KeyUpDown.csv"
holdInputFilePath = inputPath + "KeyHold.csv"
countOutputFilePath = outputPath + "Count.csv"
ddOutputFilePath = outputPath + "DownDown.csv"
udOutputFilePath = outputPath + "UpDown.csv"
holdOutputFilePath = outputPath + "Hold.csv"
totalData = []
# Open all the input files
with open(countInputFilePath, "rb") as countInputFile, \
open(ddInputFilePath, "rb") as ddInputFile, \
open(udInputFilePath, "rb") as udInputFile,\
open(holdInputFilePath, "rb") as holdInputFile:
countReader = csv.reader(countInputFile)
ddReader = csv.reader(ddInputFile)
udReader = csv.reader(udInputFile)
holdReader = csv.reader(holdInputFile)
# Get total number of rows
numRows = sum(1 for row in countReader)
# Reset the file pointer (there should be a better way to do this...)
countInputFile.close()
countInputFile = open(countInputFilePath, "rb")
countReader = csv.reader(countInputFile)
for _ in xrange(numRows):
countRow = countReader.next()
ddRow = ddReader.next()
udRow = udReader.next()
holdRow = holdReader.next()
# Timestamp, count, dd, ud, hold
totalData.append([countRow[2], countRow[1], ddRow[1], udRow[1], holdRow[1]])
# Now we have all the data in one place
# Write it to separate files in the formatted directory for processing by
# process_keys.py
lastTS = None
with open(countOutputFilePath, "wb") as countFile, \
open(ddOutputFilePath, "wb") as ddFile, \
open(udOutputFilePath, "wb") as udFile,\
open(holdOutputFilePath, "wb") as holdFile:
countWriter = csv.writer(countFile)
ddWriter = csv.writer(ddFile)
udWriter = csv.writer(udFile)
holdWriter = csv.writer(holdFile)
headerRow = ["name", "value", "dttm"]
countWriter.writerow(headerRow)
ddWriter.writerow(headerRow)
udWriter.writerow(headerRow)
holdWriter.writerow(headerRow)
for row in totalData:
ts = int(float(row[0])) / 300 * 300
readableTS = datetime.datetime.fromtimestamp(ts).strftime('%Y-%m-%d %H:%M:%S')
count = row[1]
dd = row[2]
ud = row[3]
hold = row[4]
#Bugs caused by lid closing
if float(hold) > 1.0:
continue
elif float(dd) > 300 or float(ud) > 300:
continue
if float(dd) > float(ud):
countWriter.writerow(["KeyCount", count, readableTS])
ddWriter.writerow(["KeyDownDown", "nan", readableTS])
udWriter.writerow(["KeyUpDown", "nan", readableTS])
holdWriter.writerow(["KeyHold", hold, readableTS])
lastTS = ts
continue
if lastTS is None:
countWriter.writerow(["KeyCount", count, readableTS])
ddWriter.writerow(["KeyDownDown", dd, readableTS])
udWriter.writerow(["KeyUpDown", ud, readableTS])
holdWriter.writerow(["KeyHold", hold, readableTS])
lastTS = ts
continue
if (ts - lastTS) > 300:
while (ts - lastTS > 300):
lastTS += + 300
readableLastTS = datetime.datetime.fromtimestamp(lastTS).strftime('%Y-%m-%d %H:%M:%S')
countWriter.writerow(["KeyCount", "nan", readableLastTS])
ddWriter.writerow(["KeyDownDown", "nan", readableLastTS])
udWriter.writerow(["KeyUpDown", "nan", readableLastTS])
holdWriter.writerow(["KeyHold", "nan", readableLastTS])
if (ts - lastTS) == 300:
countWriter.writerow(["KeyCount", count, readableTS])
ddWriter.writerow(["KeyDownDown", dd, readableTS])
udWriter.writerow(["KeyUpDown", ud, readableTS])
holdWriter.writerow(["KeyHold", hold, readableTS])
lastTS = ts
|
neuroidss/nupic.rogue
|
tools/convert_key_data.py
|
Python
|
agpl-3.0
| 4,972
|
[
"Avogadro"
] |
15de39dbd795ab633d21f7767a805630ed526d37a77e3076fa8fa1040ac965f8
|
#!/usr/bin/env python
# Copyright (c) 2014, Palo Alto Networks
#
# Permission to use, copy, modify, and/or distribute this software for any
# purpose with or without fee is hereby granted, provided that the above
# copyright notice and this permission notice appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
# OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
# Author: Brian Torres-Gil <btorres-gil@paloaltonetworks.com>
"""
userid.py
=========
Update User-ID by adding or removing a user-to-ip mapping on the firewall
**Usage**::
userid.py [-h] [-v] [-q] hostname username password action user ip
**Examples**:
Send a User-ID login event to a firewall at 10.0.0.1::
$ python userid.py 10.0.0.1 admin password login exampledomain/user1 4.4.4.4
Send a User-ID logout event to a firewall at 172.16.4.4::
$ python userid.py 172.16.4.4 admin password logout user2 5.1.2.2
"""
__author__ = 'btorres-gil'
import sys
import os
import argparse
import logging
curpath = os.path.dirname(os.path.abspath(__file__))
sys.path[:0] = [os.path.join(curpath, os.pardir)]
from pandevice.base import PanDevice
from pandevice.panorama import Panorama
def main():
# Get command line arguments
parser = argparse.ArgumentParser(description="Update User-ID by adding or removing a user-to-ip mapping")
parser.add_argument('-v', '--verbose', action='count', help="Verbose (-vv for extra verbose)")
parser.add_argument('-q', '--quiet', action='store_true', help="No output")
# Palo Alto Networks related arguments
fw_group = parser.add_argument_group('Palo Alto Networks Device')
fw_group.add_argument('hostname', help="Hostname of Firewall")
fw_group.add_argument('username', help="Username for Firewall")
fw_group.add_argument('password', help="Password for Firewall")
fw_group.add_argument('action', help="The action of the user. Must be 'login' or 'logout'.")
fw_group.add_argument('user', help="The username of the user")
fw_group.add_argument('ip', help="The IP address of the user")
args = parser.parse_args()
### Set up logger
# Logging Levels
# WARNING is 30
# INFO is 20
# DEBUG is 10
if args.verbose is None:
args.verbose = 0
if not args.quiet:
logging_level = 20 - (args.verbose * 10)
if logging_level <= logging.DEBUG:
logging_format = '%(levelname)s:%(name)s:%(message)s'
else:
logging_format = '%(message)s'
logging.basicConfig(format=logging_format, level=logging_level)
# Connect to the device and determine its type (Firewall or Panorama).
device = PanDevice.create_from_device(args.hostname,
args.username,
args.password,
)
logging.debug("Detecting type of device")
# Panorama does not have a userid API, so exit.
# You can use the userid API on a firewall with the Panorama 'target'
# parameter by creating a Panorama object first, then create a
# Firewall object with the 'panorama' and 'serial' variables populated.
if issubclass(type(device), Panorama):
logging.error("Connected to a Panorama, but user-id API is not possible on Panorama. Exiting.")
sys.exit(1)
if args.action == "login":
logging.debug("Login user %s at IP %s" % (args.user, args.ip))
device.userid.login(args.user, args.ip)
elif args.action == "logout":
logging.debug("Logout user %s at IP %s" % (args.user, args.ip))
device.userid.logout(args.user, args.ip)
else:
raise ValueError("Unknown action: %s. Must be 'login' or 'logout'." % args.action)
logging.debug("Done")
# Call the main() function to begin the program if not
# loaded as a module.
if __name__ == '__main__':
main()
|
PaloAltoNetworks-BD/SplunkforPaloAltoNetworks
|
SplunkforPaloAltoNetworks/bin/lib/pandevice/examples/userid.py
|
Python
|
isc
| 4,289
|
[
"Brian"
] |
58133d3218914bc676d63a26aab7c2eed0d7f243cd201f1d19f402f67dbb8785
|
# (c) 2012-2016, Ansible by Red Hat
#
# This file is part of Ansible Galaxy
#
# Ansible Galaxy is free software: you can redistribute it and/or modify
# it under the terms of the Apache License as published by
# the Apache Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# Ansible Galaxy is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# Apache License for more details.
#
# You should have received a copy of the Apache License
# along with Galaxy. If not, see <http://www.apache.org/licenses/>.
from django.core.management.base import BaseCommand
from django.contrib.auth import get_user_model
from galaxy.main.celerytasks.tasks import import_role
from galaxy.main.models import Role, ImportTask
User = get_user_model()
class Command(BaseCommand):
def add_arguments(self, parser):
parser.add_argument('role_id', nargs='+', type=int)
def handle(self, *args, **options):
if not options.get('role_id'):
raise Exception("Please provide a role ID.")
role_id = options.get('role_id')[0]
role = Role.objects.get(id=role_id)
last_task = ImportTask.objects.filter(role=role, state='SUCCESS').order_by('-id').first()
task = ImportTask.objects.create(
github_user=role.github_user,
github_repo=role.github_repo,
github_reference=role.github_branch,
alternate_role_name=last_task.alternate_role_name,
role=role,
owner=last_task.owner,
state='PENDING'
)
import_role.delay(task.id)
|
chouseknecht/galaxy
|
galaxy/main/management/commands/reimport_role.py
|
Python
|
apache-2.0
| 1,725
|
[
"Galaxy"
] |
a2a57563b537d000fd43ad482704897ef045d8ce59feadf127ff0fc3bc0d6aa3
|
# -*- coding: utf-8 -*-
{
'!langcode!': 'nl',
'!langname!': 'Nederlands',
'"update" is an optional expression like "field1=\'newvalue\'". You cannot update or delete the results of a JOIN': '"update" is an optional expression like "field1=\'newvalue\'". You cannot update or delete the results of a JOIN',
'%(nrows)s records found': '%(nrows)s records gevonden',
'%d days ago': '%d dagen geleden',
'%d weeks ago': '%d weken gelden',
'%s %%{row} deleted': '%s rijen verwijderd',
'%s %%{row} updated': '%s rijen geupdate',
'%s selected': '%s geselecteerd',
'%Y-%m-%d': '%Y-%m-%d',
'%Y-%m-%d %H:%M:%S': '%Y-%m-%d %H:%M:%S',
'(something like "it-it")': '(zoiets als "nl-nl")',
'1 day ago': '1 dag geleden',
'1 week ago': '1 week gelden',
'<': '<',
'<=': '<=',
'=': '=',
'>': '>',
'>=': '>=',
'@markmin\x01An error occured, please [[reload %s]] the page': 'An error occured, please [[reload %s]] the page',
'A new version of web2py is available': 'Een nieuwe versie van web2py is beschikbaar',
'A new version of web2py is available: %s': 'Een nieuwe versie van web2py is beschikbaar: %s',
'About': 'Over',
'about': 'over',
'About application': 'Over applicatie',
'Access Control': 'Toegangscontrole',
'Add': 'Toevoegen',
'additional code for your application': 'additionele code voor je applicatie',
'admin disabled because no admin password': 'admin is uitgezet omdat er geen admin wachtwoord is',
'admin disabled because not supported on google app engine': 'admin is uitgezet omdat dit niet ondersteund wordt op google app engine',
'admin disabled because unable to access password file': 'admin is uitgezet omdat het wachtwoordbestand niet geopend kan worden',
'Admin is disabled because insecure channel': 'Admin is uitgezet om het kanaal onveilig is',
'Admin is disabled because unsecure channel': 'Admin is uitgezet om het kanaal onveilig is',
'Administration': 'Administratie',
'Administrative Interface': 'Administratieve Interface',
'Administrator Password:': 'Administrator Wachtwoord',
'Ajax Recipes': 'Ajax Recepten',
'And': 'En',
'and rename it (required):': 'en hernoem deze (vereist)',
'and rename it:': 'en hernoem:',
'appadmin': 'appadmin',
'appadmin is disabled because insecure channel': 'appadmin is uitgezet vanwege een onveilig kanaal',
'application "%s" uninstalled': 'applicatie "%s" gedeïnstalleerd',
'application compiled': 'applicatie gecompileerd',
'application is compiled and cannot be designed': 'applicatie is gecompileerd en kan niet worden ontworpen',
'Are you sure you want to delete file "%s"?': 'Weet je zeker dat je bestand "%s" wilt verwijderen?',
'Are you sure you want to delete this object?': 'Weet je zeker dat je dit object wilt verwijderen?',
'Are you sure you want to uninstall application "%s"?': 'Weet je zeker dat je applicatie "%s" wilt deïnstalleren?',
'ATTENTION: Login requires a secure (HTTPS) connection or running on localhost.': 'LET OP: Login vereist een beveiligde (HTTPS) connectie of moet draaien op localhost.',
'ATTENTION: TESTING IS NOT THREAD SAFE SO DO NOT PERFORM MULTIPLE TESTS CONCURRENTLY.': 'LET OP: TESTEN IS NIET THREAD SAFE, PROBEER NIET GELIJKTIJDIG MEERDERE TESTS TE DOEN.',
'ATTENTION: you cannot edit the running application!': 'LET OP: je kan de applicatie die nu draait niet editen!',
'Authentication': 'Authenticatie',
'Available Databases and Tables': 'Beschikbare databases en tabellen',
'Back': 'Terug',
'Buy this book': 'Koop dit boek',
'Cache': 'Cache',
'cache': 'cache',
'Cache Keys': 'Cache Keys',
'cache, errors and sessions cleaned': 'cache, errors en sessies geleegd',
'Cannot be empty': 'Mag niet leeg zijn',
'Cannot compile: there are errors in your app. Debug it, correct errors and try again.': 'Kan niet compileren: er bevinden zich fouten in je app. Debug, corrigeer de fouten en probeer opnieuw.',
'cannot create file': 'kan bestand niet maken',
'cannot upload file "%(filename)s"': 'kan bestand "%(filename)s" niet uploaden',
'Change Password': 'Wijzig wachtwoord',
'Change password': 'Wijzig Wachtwoord',
'change password': 'wijzig wachtwoord',
'check all': 'vink alles aan',
'Check to delete': 'Vink aan om te verwijderen',
'clean': 'leeg',
'Clear': 'Leeg',
'Clear CACHE?': 'Leeg CACHE?',
'Clear DISK': 'Leeg DISK',
'Clear RAM': 'Clear RAM',
'click to check for upgrades': 'Klik om voor upgrades te controleren',
'Client IP': 'Client IP',
'Community': 'Community',
'compile': 'compileren',
'compiled application removed': 'gecompileerde applicatie verwijderd',
'Components and Plugins': 'Components en Plugins',
'contains': 'bevat',
'Controller': 'Controller',
'Controllers': 'Controllers',
'controllers': 'controllers',
'Copyright': 'Copyright',
'create file with filename:': 'maak bestand met de naam:',
'Create new application': 'Maak nieuwe applicatie:',
'create new application:': 'maak nieuwe applicatie',
'Created By': 'Gemaakt Door',
'Created On': 'Gemaakt Op',
'crontab': 'crontab',
'Current request': 'Huidige request',
'Current response': 'Huidige response',
'Current session': 'Huidige sessie',
'currently saved or': 'op het moment opgeslagen of',
'customize me!': 'pas me aan!',
'data uploaded': 'data geupload',
'Database': 'Database',
'Database %s select': 'Database %s select',
'database administration': 'database administratie',
'Date and Time': 'Datum en Tijd',
'db': 'db',
'DB Model': 'DB Model',
'defines tables': 'definieer tabellen',
'Delete': 'Verwijder',
'delete': 'verwijder',
'delete all checked': 'verwijder alle aangevinkten',
'Delete:': 'Verwijder:',
'Demo': 'Demo',
'Deploy on Google App Engine': 'Deploy op Google App Engine',
'Deployment Recipes': 'Deployment Recepten',
'Description': 'Beschrijving',
'design': 'design',
'DESIGN': 'DESIGN',
'Design for': 'Design voor',
'DISK': 'DISK',
'Disk Cache Keys': 'Disk Cache Keys',
'Disk Cleared': 'Disk Geleegd',
'Documentation': 'Documentatie',
"Don't know what to do?": 'Weet je niet wat je moet doen?',
'done!': 'gereed!',
'Download': 'Download',
'E-mail': 'E-mail',
'E-mail invalid': 'E-mail ongeldig',
'edit': 'bewerk',
'EDIT': 'BEWERK',
'Edit': 'Bewerk',
'Edit application': 'Bewerk applicatie',
'edit controller': 'bewerk controller',
'Edit current record': 'Bewerk huidig record',
'Edit Profile': 'Bewerk Profiel',
'edit profile': 'bewerk profiel',
'Edit This App': 'Bewerk Deze App',
'Editing file': 'Bewerk bestand',
'Editing file "%s"': 'Bewerk bestand "%s"',
'Email and SMS': 'E-mail en SMS',
'enter a number between %(min)g and %(max)g': 'geef een getal tussen %(min)g en %(max)g',
'Enter an integer between %(min)g and %(max)g': 'Enter an integer between %(min)g and %(max)g',
'enter an integer between %(min)g and %(max)g': 'geef een integer tussen %(min)g en %(max)g',
'Error logs for "%(app)s"': 'Error logs voor "%(app)s"',
'errors': 'errors',
'Errors': 'Errors',
'Export': 'Export',
'export as csv file': 'exporteer als csv-bestand',
'exposes': 'stelt bloot',
'extends': 'extends',
'failed to reload module': 'niet gelukt om module te herladen',
'False': 'Onwaar',
'FAQ': 'FAQ',
'file "%(filename)s" created': 'bestand "%(filename)s" gemaakt',
'file "%(filename)s" deleted': 'bestand "%(filename)s" verwijderd',
'file "%(filename)s" uploaded': 'bestand "%(filename)s" geupload',
'file "%(filename)s" was not deleted': 'bestand "%(filename)s" was niet verwijderd',
'file "%s" of %s restored': 'bestand "%s" van %s hersteld',
'file changed on disk': 'bestand aangepast op schijf',
'file does not exist': 'bestand bestaat niet',
'file saved on %(time)s': 'bestand bewaard op %(time)s',
'file saved on %s': 'bestand bewaard op %s',
'First name': 'Voornaam',
'Forbidden': 'Verboden',
'Forms and Validators': 'Formulieren en Validators',
'Free Applications': 'Gratis Applicaties',
'Functions with no doctests will result in [passed] tests.': 'Functies zonder doctests zullen resulteren in [passed] tests.',
'Group %(group_id)s created': 'Groep %(group_id)s gemaakt',
'Group ID': 'Groep ID',
'Group uniquely assigned to user %(id)s': 'Groep is uniek toegekend aan gebruiker %(id)s',
'Groups': 'Groepen',
'Hello World': 'Hallo Wereld',
'help': 'help',
'Home': 'Home',
'How did you get here?': 'Hoe ben je hier gekomen?',
'htmledit': 'Bewerk HTML',
'import': 'import',
'Import/Export': 'Import/Export',
'includes': 'includes',
'Index': 'Index',
'insert new': 'voeg nieuwe',
'insert new %s': 'voeg nieuwe %s',
'Installed applications': 'Geïnstalleerde applicaties',
'internal error': 'interne error',
'Internal State': 'Interne State',
'Introduction': 'Introductie',
'Invalid action': 'Ongeldige actie',
'Invalid email': 'Ongeldig emailadres',
'invalid password': 'ongeldig wachtwoord',
'Invalid password': 'Ongeldig wachtwoord',
'Invalid Query': 'Ongeldige Query',
'invalid request': 'ongeldige request',
'invalid ticket': 'ongeldige ticket',
'Is Active': 'Is Actief',
'Key': 'Key',
'language file "%(filename)s" created/updated': 'taalbestand "%(filename)s" gemaakt/geupdate',
'Language files (static strings) updated': 'Taalbestanden (statische strings) geupdate',
'languages': 'talen',
'Languages': 'Talen',
'languages updated': 'talen geupdate',
'Last name': 'Achternaam',
'Last saved on:': 'Laatst bewaard op:',
'Layout': 'Layout',
'Layout Plugins': 'Layout Plugins',
'Layouts': 'Layouts',
'License for': 'Licentie voor',
'Live Chat': 'Live Chat',
'loading...': 'laden...',
'Log In': 'Log In',
'Logged in': 'Ingelogd',
'Logged out': 'Uitgelogd',
'Login': 'Login',
'login': 'login',
'Login to the Administrative Interface': 'Inloggen op de Administratieve Interface',
'logout': 'logout',
'Logout': 'Logout',
'Lost Password': 'Wachtwoord Kwijt',
'Lost password?': 'Wachtwoord kwijt?',
'Main Menu': 'Hoofdmenu',
'Manage Cache': 'Beheer Cache',
'Menu Model': 'Menu Model',
'merge': 'samenvoegen',
'Models': 'Modellen',
'models': 'modellen',
'Modified By': 'Aangepast Door',
'Modified On': 'Aangepast Op',
'Modules': 'Modules',
'modules': 'modules',
'My Sites': 'Mijn Sites',
'Name': 'Naam',
'New': 'Nieuw',
'new application "%s" created': 'nieuwe applicatie "%s" gemaakt',
'New password': 'Nieuw wachtwoord',
'New Record': 'Nieuw Record',
'new record inserted': 'nieuw record ingevoegd',
'next 100 rows': 'volgende 100 rijen',
'NO': 'NEE',
'No databases in this application': 'Geen database in deze applicatie',
'Object or table name': 'Object of tabelnaam',
'Old password': 'Oude wachtwoord',
'Online examples': 'Online voorbeelden',
'Or': 'Of',
'or import from csv file': 'of importeer van csv-bestand',
'or provide application url:': 'of geef een applicatie url:',
'Origin': 'Bron',
'Original/Translation': 'Oorspronkelijk/Vertaling',
'Other Plugins': 'Andere Plugins',
'Other Recipes': 'Andere Recepten',
'Overview': 'Overzicht',
'pack all': 'pack all',
'pack compiled': 'pack compiled',
'Password': 'Wachtwoord',
"Password fields don't match": 'Wachtwoordvelden komen niet overeen',
'Peeking at file': 'Naar bestand aan het gluren',
'please input your password again': 'geef alstublieft nogmaals uw wachtwoord',
'Plugins': 'Plugins',
'Powered by': 'Powered by',
'Preface': 'Inleiding',
'previous 100 rows': 'vorige 100 rijen',
'Profile': 'Profiel',
'Python': 'Python',
'Query': 'Query',
'Query:': 'Query:',
'Quick Examples': 'Snelle Voorbeelden',
'RAM': 'RAM',
'RAM Cache Keys': 'RAM Cache Keys',
'Ram Cleared': 'Ram Geleegd',
'Recipes': 'Recepten',
'Record': 'Record',
'record does not exist': 'record bestaat niet',
'Record ID': 'Record ID',
'Record id': 'Record id',
'register': 'registreer',
'Register': 'Registreer',
'Registration identifier': 'Registratie identifier',
'Registration key': 'Registratie sleutel',
'Registration successful': 'Registratie succesvol',
'Remember me (for 30 days)': 'Onthoudt mij (voor 30 dagen)',
'remove compiled': 'verwijder gecompileerde',
'Request reset password': 'Vraag een wachtwoord reset aan',
'Reset Password key': 'Reset Wachtwoord sleutel',
'Resolve Conflict file': 'Los Conflictbestand op',
'restore': 'herstel',
'revert': 'herstel',
'Role': 'Rol',
'Rows in Table': 'Rijen in tabel',
'Rows selected': 'Rijen geselecteerd',
's přispěním': 's přispěním',
'save': 'bewaar',
'Save profile': 'Bewaar profiel',
'Saved file hash:': 'Opgeslagen file hash:',
'Search': 'Zoek',
'Semantic': 'Semantisch',
'Services': 'Services',
'session expired': 'sessie verlopen',
'shell': 'shell',
'site': 'site',
'Size of cache:': 'Grootte van cache:',
'some files could not be removed': 'sommige bestanden konden niet worden verwijderd',
'starts with': 'begint met',
'state': 'state',
'static': 'statisch',
'Static files': 'Statische bestanden',
'Statistics': 'Statistieken',
'Stylesheet': 'Stylesheet',
'Submit': 'Submit',
'submit': 'submit',
'Support': 'Support',
'Sure you want to delete this object?': 'Weet je zeker dat je dit object wilt verwijderen?',
'Table': 'Tabel',
'Table name': 'Tabelnaam',
'test': 'test',
'Testing application': 'Applicatie testen',
'The "query" is a condition like "db.table1.field1==\'value\'". Something like "db.table1.field1==db.table2.field2" results in a SQL JOIN.': 'De "query" is een conditie zoals "db.tabel1.veld1==\'waarde\'". Zoiets als "db.tabel1.veld1==db.tabel2.veld2" resulteert in een SQL JOIN.',
'the application logic, each URL path is mapped in one exposed function in the controller': 'the applicatie logica, elk URL pad is gemapped in een blootgestelde functie in de controller',
'The Core': 'De Core',
'the data representation, define database tables and sets': 'de data representatie, definieert database tabellen en sets',
'The output of the file is a dictionary that was rendered by the view %s': 'De output van het bestand is een dictionary die gerenderd werd door de view %s',
'the presentations layer, views are also known as templates': 'de presentatie laag, views zijn ook bekend als templates',
'The Views': 'De Views',
'There are no controllers': 'Er zijn geen controllers',
'There are no models': 'Er zijn geen modellen',
'There are no modules': 'Er zijn geen modules',
'There are no static files': 'Er zijn geen statische bestanden',
'There are no translators, only default language is supported': 'Er zijn geen vertalingen, alleen de standaard taal wordt ondersteund.',
'There are no views': 'Er zijn geen views',
'these files are served without processing, your images go here': 'Deze bestanden worden geserveerd zonder verdere verwerking, je afbeeldingen horen hier',
'This App': 'Deze App',
'This is a copy of the scaffolding application': 'Dit is een kopie van de steiger-applicatie',
'This is the %(filename)s template': 'Dit is de %(filename)s template',
'Ticket': 'Ticket',
'Time in Cache (h:m:s)': 'Tijd in Cache (h:m:s)',
'Timestamp': 'Timestamp (timestamp)',
'to previous version.': 'naar vorige versie.',
'too short': 'te kort',
'translation strings for the application': 'vertaalstrings voor de applicatie',
'True': 'Waar',
'try': 'probeer',
'try something like': 'probeer zoiets als',
'Twitter': 'Twitter',
'Unable to check for upgrades': 'Niet mogelijk om te controleren voor upgrades',
'unable to create application "%s"': 'niet mogelijk om applicatie "%s" te maken',
'unable to delete file "%(filename)s"': 'niet mogelijk om bestand "%(filename)s" te verwijderen',
'Unable to download': 'Niet mogelijk om te downloaden',
'Unable to download app': 'Niet mogelijk om app te downloaden',
'unable to parse csv file': 'niet mogelijk om csv-bestand te parsen',
'unable to uninstall "%s"': 'niet mogelijk om "%s" te deïnstalleren',
'uncheck all': 'vink alles uit',
'uninstall': ' deïnstalleer',
'update': 'update',
'update all languages': 'update alle talen',
'Update:': 'Update:',
'upload application:': 'upload applicatie:',
'Upload existing application': 'Upload bestaande applicatie',
'upload file:': 'upload bestand',
'Use (...)&(...) for AND, (...)|(...) for OR, and ~(...) for NOT to build more complex queries.': 'Gebruik (...)&(...) voor AND, (...)|(...) voor OR, en ~(...) voor NOT om meer complexe queries te maken.',
'User %(id)s Logged-in': 'Gebruiker %(id)s Logged-in',
'User %(id)s Logged-out': 'Gebruiker %(id)s Logged-out',
'User %(id)s Password changed': 'Wachtwoord van gebruiker %(id)s is veranderd',
'User %(id)s Password reset': 'Wachtwoord van gebruiker %(id)s is gereset',
'User %(id)s Profile updated': 'Profiel van Gebruiker %(id)s geupdate',
'User %(id)s Registered': 'Gebruiker %(id)s Geregistreerd',
'User ID': 'User ID',
'value already in database or empty': 'waarde al in database of leeg',
'Verify Password': 'Verifieer Wachtwoord',
'versioning': 'versionering',
'Videos': 'Videos',
'View': 'View',
'view': 'view',
'Views': 'Vieuws',
'views': 'vieuws',
'web2py is up to date': 'web2py is up to date',
'web2py Recent Tweets': 'web2py Recente Tweets',
'Welcome': 'Welkom',
'Welcome %s': 'Welkom %s',
'Welcome to web2py': 'Welkom bij web2py',
'Welcome to web2py!': 'Welkom bij web2py!',
'Which called the function %s located in the file %s': 'Die functie %s aanriep en zich bevindt in het bestand %s',
'Working...': 'Working...',
'YES': 'JA',
'You are successfully running web2py': 'Je draait web2py succesvol',
'You can modify this application and adapt it to your needs': 'Je kan deze applicatie aanpassen naar je eigen behoeften',
'You visited the url %s': 'Je bezocht de url %s',
}
|
zvolsky/zv
|
languages/nl.py
|
Python
|
agpl-3.0
| 17,128
|
[
"Elk"
] |
3c48c44678a1e1faa5095d1a5d4eede43fbbc0361ed68d1c1f197c8792ac912d
|
import gen_utils
from module_base import ModuleBase
from module_mixins import NoConfigModuleMixin
import module_utils
import vtk
class transformPolyData(NoConfigModuleMixin, ModuleBase):
def __init__(self, module_manager):
# initialise our base class
ModuleBase.__init__(self, module_manager)
self._transformPolyData = vtk.vtkTransformPolyDataFilter()
NoConfigModuleMixin.__init__(
self, {'vtkTransformPolyDataFilter' : self._transformPolyData})
module_utils.setup_vtk_object_progress(self, self._transformPolyData,
'Transforming geometry')
self.sync_module_logic_with_config()
def close(self):
# we play it safe... (the graph_editor/module_manager should have
# disconnected us by now)
for input_idx in range(len(self.get_input_descriptions())):
self.set_input(input_idx, None)
# this will take care of all display thingies
NoConfigModuleMixin.close(self)
# get rid of our reference
del self._transformPolyData
def get_input_descriptions(self):
return ('vtkPolyData', 'vtkTransform')
def set_input(self, idx, inputStream):
if idx == 0:
self._transformPolyData.SetInput(inputStream)
else:
self._transformPolyData.SetTransform(inputStream)
def get_output_descriptions(self):
return (self._transformPolyData.GetOutput().GetClassName(), )
def get_output(self, idx):
return self._transformPolyData.GetOutput()
def logic_to_config(self):
pass
def config_to_logic(self):
pass
def view_to_config(self):
pass
def config_to_view(self):
pass
def execute_module(self):
self._transformPolyData.Update()
|
nagyistoce/devide
|
modules/filters/transformPolyData.py
|
Python
|
bsd-3-clause
| 1,855
|
[
"VTK"
] |
4f604ab61ebe8b895856799ce344729ce35abf949da0ec1a27f6ae39ed66323f
|
#!/usr/bin/python
#
# Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This example shows how to use validateOnly SOAP header.
The LoadFromStorage method is pulling credentials and properties from a
"googleads.yaml" file. By default, it looks for this file in your home
directory. For more information, see the "Caching authentication information"
section of our README.
Tags: CampaignService.mutate
Api: AdWordsOnly
"""
__author__ = ('api.kwinter@gmail.com (Kevin Winter)'
'Joseph DiLallo')
import suds
from googleads import adwords
AD_GROUP_ID = 'INSERT_AD_GROUP_ID_HERE'
def main(client, ad_group_id):
# Initialize appropriate service with validate only flag enabled.
client.validate_only = True
ad_group_ad_service = client.GetService('AdGroupAdService', version='v201502')
# Construct operations to add a text ad.
operations = [{
'operator': 'ADD',
'operand': {
'xsi_type': 'AdGroupAd',
'adGroupId': ad_group_id,
'ad': {
'xsi_type': 'TextAd',
'finalUrls': {
'urls': ['http://www.example.com']
},
'displayUrl': 'example.com',
'description1': 'Visit the Red Planet in style.',
'description2': 'Low-gravity fun for everyone!',
'headline': 'Luxury Cruise to Mars'
}
}
}]
ad_group_ad_service.mutate(operations)
# No error means the request is valid.
# Now let's check an invalid ad using a very long line to trigger an error.
operations = [{
'operator': 'ADD',
'operand': {
'xsi_type': 'AdGroupAd',
'adGroupId': ad_group_id,
'ad': {
'xsi_type': 'TextAd',
'url': 'http://www.example.com',
'displayUrl': 'example.com',
'description1': 'Visit the Red Planet in style.',
'description2': 'Low-gravity fun for all astronauts in orbit',
'headline': 'Luxury Cruise to Mars'
}
}
}]
try:
ad_group_ad_service.mutate(operations)
except suds.WebFault, e:
print 'Validation correctly failed with \'%s\'.' % str(e)
if __name__ == '__main__':
# Initialize client object.
adwords_client = adwords.AdWordsClient.LoadFromStorage()
main(adwords_client, AD_GROUP_ID)
|
coxmediagroup/googleads-python-lib
|
examples/adwords/v201502/campaign_management/validate_text_ad.py
|
Python
|
apache-2.0
| 2,880
|
[
"VisIt"
] |
237f8e73c493cb9fded6d4409e2855c0eb1bf4dc66fb9048cf18a981866bdc68
|
from copy import deepcopy
from decimal import Decimal
from collections import OrderedDict
from collections import Iterable
from enum import Enum
from my_visitor import NodeVisitor
from my_ast import FuncDecl
from my_ast import VarDecl
from my_ast import Else
from my_grammar import *
# The Interpreter has been abandoned for now. Maybe I'll get back to it if I see a use for it
class Interpreter(NodeVisitor):
def __init__(self, file_name=None):
super().__init__()
self.file_name = file_name
def visit_program(self, node):
self.visit(node.block)
def visit_compound(self, node):
for child in node.children:
temp = self.visit(child)
if temp is not None:
return temp
def visit_typedeclaration(self, node):
pass
def visit_vardecl(self, node):
self.define(node.var_node.value, None)
def visit_type(self, node):
return self.search_scopes(node.value)
def visit_noop(self, node):
pass
def visit_if(self, node):
for x, comp in enumerate(node.comps):
c = self.visit(comp)
if c == TRUE:
return self.visit(node.blocks[x])
elif isinstance(comp, Else):
return self.visit(node.blocks[x])
def visit_else(self, node):
pass
def visit_while(self, node):
while self.visit(node.comp) == TRUE:
if self.visit(node.block) == BREAK:
break
def visit_for(self, node):
iterator = self.visit(node.iterator)
for x in iterator:
if isinstance(x, Iterable) and not isinstance(x, str):
if len(x) != len(node.elements):
raise SyntaxError('file={} line={}: Unpacking to wrong number of elements. elements: {}, container length: {}'.format(self.file_name, node.line_num, len(node.elements), len(x)))
for y, element in enumerate(node.elements):
self.define(element.value, x[y])
else:
self.define(node.elements[0].value, x)
self.visit(node.block)
def visit_loopblock(self, node):
for child in node.children:
temp = self.visit(child)
if temp == CONTINUE or temp == BREAK:
return temp
def visit_switch(self, node):
switch_var = self.visit(node.value)
cases = OrderedDict() # TODO: see if a list will work instead
for case in node.cases:
if case.value == DEFAULT:
cases[DEFAULT] = case.block
else:
cases[self.visit(case.value)] = case.block
if switch_var not in cases:
switch_var = DEFAULT
index = list(cases.keys()).index(switch_var)
c = list(cases.values())
result = None
while result != BREAK and index < len(c):
result = self.visit(c[index])
index += 1
def visit_case(self, node):
pass
@staticmethod
def visit_break(_):
return BREAK
@staticmethod
def visit_continue(_):
return CONTINUE
@staticmethod
def visit_pass(_):
return
def visit_binop(self, node):
op = node.op.value
left = self.visit(node.left)
right = self.visit(node.right)
if op == PLUS:
return left + right
elif op == MINUS:
return left - right
elif op == MUL:
return left * right
elif op == FLOORDIV:
return left // right
elif op == DIV:
return Decimal(left / right)
elif op == MOD:
return left % right
elif op == POWER:
return left ** right
elif op == AND:
return left and right
elif op == OR:
return left or right
elif op == EQUALS:
return TRUE if left == right else FALSE
elif op == NOT_EQUALS:
return TRUE if left != right else FALSE
elif op == LESS_THAN:
return TRUE if left < right else FALSE
elif op == LESS_THAN_OR_EQUAL_TO:
return TRUE if left <= right else FALSE
elif op == GREATER_THAN:
return TRUE if left > right else FALSE
elif op == GREATER_THAN_OR_EQUAL_TO:
return TRUE if left >= right else FALSE
elif op == CAST:
cast_type = node.right.value
if cast_type == INT:
return int(left)
elif cast_type == DEC:
return Decimal(left)
elif cast_type == FLOAT:
return float(left)
elif cast_type == COMPLEX:
return complex(left)
elif cast_type == STR:
return str(left)
elif cast_type == BOOL:
return bool(left)
elif cast_type == BYTES:
return bytes(left)
elif cast_type == LIST:
return list(left)
elif cast_type == DICT:
return dict(left)
elif cast_type == ENUM:
return Enum(left.value)
elif cast_type in (ANY, FUNC):
raise TypeError('file={} line={}: Cannot cast to type {}'.format(self.file_name, node.line_num, cast_type))
def visit_unaryop(self, node):
op = node.op.value
if op == PLUS:
return +self.visit(node.expr)
elif op == MINUS:
return -self.visit(node.expr)
elif op == NOT:
return FALSE if self.visit(node.expr) == TRUE else TRUE
def visit_range(self, node):
left = self.visit(node.left)
right = self.visit(node.right)
return range(left, right)
def visit_assign(self, node):
if isinstance(node.left, VarDecl):
var_name = node.left.value.value
if node.left.type.value == FLOAT:
node.right.value = float(node.right.value)
else:
var_name = node.left.value
var_value = self.top_scope.get(var_name)
if var_value and isinstance(var_value, float):
node.right.value = float(node.right.value)
self.define(var_name, self.visit(node.right))
def visit_opassign(self, node):
var_name = node.left.value
op = node.op.value
right = self.visit(node.right)
if op == PLUS_ASSIGN:
self.top_scope[var_name] += right
elif op == MINUS_ASSIGN:
self.top_scope[var_name] -= right
elif op == MUL_ASSIGN:
self.top_scope[var_name] *= right
elif op == FLOORDIV_ASSIGN:
self.top_scope[var_name] //= right
elif op == DIV_ASSIGN:
self.top_scope[var_name] /= right
elif op == MOD_ASSIGN:
self.top_scope[var_name] %= right
elif op == POWER_ASSIGN:
self.top_scope[var_name] **= right
def visit_var(self, node):
return self.search_scopes(node.value)
def visit_funcdecl(self, node):
self.define(node.name.value, node)
@staticmethod
def visit_anonymousfunc(node):
return node
def visit_funccall(self, node):
if node.name.value in BUILTIN_FUNCTIONS:
args = []
for arg in node.arguments:
args.append(self.visit(arg))
self.search_scopes(node.name.value)(*args)
else:
func = deepcopy(self.search_scopes(node.name.value))
func.args = node.arguments
self.new_scope()
if hasattr(func, '_scope'):
self.top_scope.update(func._scope)
for x, key in enumerate(func.parameters.keys()):
self.define(key, self.visit(node.arguments[x]))
return_var = self.visit(func.body)
if isinstance(return_var, FuncDecl):
scope = self.top_scope
if return_var.name.value in scope:
del scope[return_var.name.value]
return_var._scope = scope
if return_var is None and func.return_type.value != VOID:
raise TypeError('file={} line={}'.format(self.file_name, node.line_num))
self.drop_top_scope()
return return_var
def visit_return(self, node):
return self.visit(node.value)
def visit_constant(self, node):
if node.value == TRUE:
return TRUE
elif node.value == FALSE:
return FALSE
elif node.value == NAN:
return Decimal(NAN)
elif node.value == INF:
return Decimal(INF)
elif node.value == NEGATIVE_INF:
return Decimal(NEGATIVE_INF)
else:
raise NotImplementedError('file={} line={}'.format(self.file_name, node.line_num))
@staticmethod
def visit_num(node):
return node.value
@staticmethod
def visit_str(node):
return node.value
def visit_collection(self, node):
items = []
for item in node.items:
items.append(self.visit(item))
if node.read_only:
return tuple(items)
return items
def visit_hashmap(self, node):
types = {}
for key, val in node.items.items():
types[key] = self.visit(val)
return types
def visit_collectionaccess(self, node):
collection = self.search_scopes(node.collection.value)
key = self.visit(node.key)
if not key:
key = node.key.value
return collection[key]
def interpret(self, tree):
return self.visit(tree)
def visit_print(self, node):
print(self.visit(node.value))
if __name__ == '__main__':
from my_lexer import Lexer
from my_parser import Parser
from my_preprocessor import Preprocessor
file = 'test.my'
code = open(file).read()
lexer = Lexer(code, file)
parser = Parser(lexer)
t = parser.parse()
symtab_builder = Preprocessor(parser.file_name)
symtab_builder.check(t)
if not symtab_builder.warnings:
interpreter = Interpreter(parser.file_name)
interpreter.interpret(t)
else:
print('Did not run')
|
Ayehavgunne/Mythril
|
my_interpreter.py
|
Python
|
unlicense
| 8,348
|
[
"VisIt"
] |
e8dcc9ce4cfd0bac1728add808217265452f61f7128c1427fc92cb62b28892ca
|
# -*- coding: utf-8 -*-
#
# test_refractory.py
#
# This file is part of NEST.
#
# Copyright (C) 2004 The NEST Initiative
#
# NEST is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# NEST is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with NEST. If not, see <http://www.gnu.org/licenses/>.
import unittest
import numpy as np
import nest
"""
Assert that all neuronal models that have a refractory period implement it
correctly (except for Hodgkin-Huxley models which cannot be tested).
Details
-------
Submit the neuron to a constant excitatory current so that it spikes in the
[0, 50] ms.
A ``spike_recorder`` is used to detect the time at which the neuron spikes and
a ``voltmeter`` is then used to make sure the voltage is clamped to ``V_reset``
during exactly ``t_ref``.
For neurons that do not clamp the potential, use a very large current to
trigger immediate spiking.
For untested models please see the ignore_model list.
"""
# --------------------------------------------------------------------------- #
# Models, specific parameters
# --------------------------------------------------------------------------- #
# Neurons that must be tested through a high current to spike immediately
# (t_ref = interspike)
neurons_interspike = [
"amat2_psc_exp",
"ht_neuron",
"mat2_psc_exp",
]
neurons_interspike_ps = [
"iaf_psc_alpha_canon",
"iaf_psc_alpha_ps",
"iaf_psc_delta_ps",
"iaf_psc_exp_ps",
]
# Models that first clamp the membrane potential at a higher value
neurons_with_clamping = [
"aeif_psc_delta_clopath",
]
# Multi-compartment models
mc_models = [
"iaf_cond_alpha_mc",
]
# Models that cannot be tested
ignore_model = [
"gif_pop_psc_exp", # This one commits spikes at same time
"hh_cond_exp_traub", # This one does not support V_reset
"hh_cond_beta_gap_traub", # This one does not support V_reset
"hh_psc_alpha", # This one does not support V_reset
"hh_psc_alpha_clopath", # This one does not support V_reset
"hh_psc_alpha_gap", # This one does not support V_reset
"pp_cond_exp_mc_urbanczik", # This one does not support V_reset
"iaf_psc_exp_ps_lossless", # This one use presice times
"siegert_neuron", # This one does not connect to voltmeter
"step_rate_generator" # No regular neuron model
]
tested_models = [m for m in nest.node_models
if nest.GetDefaults(m, "element_type") == "neuron"
and m not in ignore_model]
# Additional parameters for the connector
add_connect_param = {
"iaf_cond_alpha_mc": {"receptor_type": 7},
}
# --------------------------------------------------------------------------- #
# Simulation time and refractory time limits
# --------------------------------------------------------------------------- #
simtime = 100
resolution = 0.1
# --------------------------------------------------------------------------- #
# Test class
# --------------------------------------------------------------------------- #
class TestRefractoryCase(unittest.TestCase):
"""
Check the correct implementation of refractory time in all neuronal models.
"""
def reset(self):
nest.ResetKernel()
nest.resolution = resolution
nest.rng_seed = 123456
def compute_reftime(self, model, sr, vm, neuron):
'''
Compute the refractory time of the neuron.
Parameters
----------
model : str
Name of the neuronal model.
sr : tuple
node ID of the spike recorder.
vm : tuple
node ID of the voltmeter.
neuron : tuple
node ID of the recorded neuron.
Returns
-------
t_ref_sim : double
Value of the simulated refractory period.
'''
spike_times = nest.GetStatus(sr, "events")[0]["times"]
if model in neurons_interspike:
# Spike emitted at next timestep so substract resolution
return spike_times[1]-spike_times[0]-resolution
elif model in neurons_interspike_ps:
return spike_times[1]-spike_times[0]
else:
Vr = nest.GetStatus(neuron, "V_reset")[0]
times = nest.GetStatus(vm, "events")[0]["times"]
# Index of the 2nd spike
idx_max = np.argwhere(times == spike_times[1])[0][0]
name_Vm = "V_m.s" if model in mc_models else "V_m"
Vs = nest.GetStatus(vm, "events")[0][name_Vm]
# Get the index at which the spike occured
idx_spike = np.argwhere(times == spike_times[0])[0][0]
# Find end of refractory period between 1st and 2nd spike
idx_end = np.where(
np.isclose(Vs[idx_spike:idx_max], Vr, 1e-6))[0][-1]
t_ref_sim = idx_end * resolution
return t_ref_sim
def test_refractory_time(self):
'''
Check that refractory time implementation is correct.
'''
for model in tested_models:
self.reset()
if "t_ref" not in nest.GetDefaults(model):
continue
# Randomly set a refractory period
t_ref = 1.7
# Create the neuron and devices
nparams = {"t_ref": t_ref}
neuron = nest.Create(model, params=nparams)
name_Vm = "V_m.s" if model in mc_models else "V_m"
vm_params = {"interval": resolution, "record_from": [name_Vm]}
vm = nest.Create("voltmeter", params=vm_params)
sr = nest.Create("spike_recorder")
cg = nest.Create("dc_generator", params={"amplitude": 1200.})
# For models that do not clamp V_m, use very large current to
# trigger almost immediate spiking => t_ref almost equals
# interspike
if model in neurons_interspike_ps:
nest.SetStatus(cg, "amplitude", 10000000.)
elif model == 'ht_neuron':
# ht_neuron use too long time with a very large amplitude
nest.SetStatus(cg, "amplitude", 2000.)
elif model in neurons_interspike:
nest.SetStatus(cg, "amplitude", 15000.)
# Connect them and simulate
nest.Connect(vm, neuron)
nest.Connect(cg, neuron, syn_spec=add_connect_param.get(model, {}))
nest.Connect(neuron, sr)
nest.Simulate(simtime)
# Get and compare t_ref
t_ref_sim = self.compute_reftime(model, sr, vm, neuron)
if model in neurons_with_clamping:
t_ref_sim = t_ref_sim - nest.GetStatus(neuron, "t_clamp")[0]
# Approximate result for precise spikes (interpolation error)
if model in neurons_interspike_ps:
self.assertAlmostEqual(t_ref, t_ref_sim, places=3,
msg='''Error in model {}:
{} != {}'''.format(
model, t_ref, t_ref_sim))
else:
self.assertAlmostEqual(t_ref, t_ref_sim,
msg='''Error in model {}:
{} != {}'''.format(
model, t_ref, t_ref_sim))
# --------------------------------------------------------------------------- #
# Run the comparisons
# --------------------------------------------------------------------------- #
def suite():
return unittest.makeSuite(TestRefractoryCase, "test")
def run():
runner = unittest.TextTestRunner(verbosity=2)
runner.run(suite())
if __name__ == '__main__':
run()
|
heplesser/nest-simulator
|
testsuite/pytests/test_refractory.py
|
Python
|
gpl-2.0
| 8,170
|
[
"NEURON"
] |
ea8005a9133cc06faeb0ad3ee35410650ee6b19095989f38cb1749309f9e1f91
|
# Example for: topology.make(), topology.write()
# This creates a topology library for heavy atoms from the
# CHARMM all-atom topology library:
from modeller import *
env = environ()
tpl = env.libs.topology
# Read CHARMM all-atom topology library:
tpl.read(file='${LIB}/top.lib')
# Keep only heavy atoms (TOPOLOGY_MODEL = 3)
tpl.make(submodel=3)
# Write the resulting topology library to a new file:
tpl.write(file='top_heav.lib')
|
bjornwallner/proq2-server
|
apps/modeller9v8/examples/commands/make_topology_model.py
|
Python
|
gpl-3.0
| 437
|
[
"CHARMM"
] |
23a3e3345709095b2e87a7ecc50f3bd2ef4c282f0d37e1fbb3099f381d6e1dce
|
# -*- coding: utf-8 -*-
#
# structural_plasticity.py
#
# This file is part of NEST.
#
# Copyright (C) 2004 The NEST Initiative
#
# NEST is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# NEST is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with NEST. If not, see <http://www.gnu.org/licenses/>.
"""
Structural Plasticity example
-----------------------------
This example shows a simple network of two populations where structural
plasticity is used. The network has 1000 neurons, 80% excitatory and
20% inhibitory. The simulation starts without any connectivity. A set of
homeostatic rules are defined, according to which structural plasticity will
create and delete synapses dynamically during the simulation until a desired
level of electrical activity is reached. The model of structural plasticity
used here corresponds to the formulation presented in [1]_.
At the end of the simulation, a plot of the evolution of the connectivity
in the network and the average calcium concentration in the neurons is created.
References
~~~~~~~~~~
.. [1] Butz, M., and van Ooyen, A. (2013). A simple rule for dendritic spine and axonal bouton formation can
account for cortical reorganization after focal retinal lesions. PLoS Comput. Biol. 9 (10), e1003259.
"""
####################################################################################
# First, we have import all necessary modules.
import nest
import numpy
import matplotlib.pyplot as plt
import sys
####################################################################################
# We define general simulation parameters
class StructralPlasticityExample:
def __init__(self):
# simulated time (ms)
self.t_sim = 200000.0
# simulation step (ms).
self.dt = 0.1
self.number_excitatory_neurons = 800
self.number_inhibitory_neurons = 200
# Structural_plasticity properties
self.update_interval = 10000.0
self.record_interval = 1000.0
# rate of background Poisson input
self.bg_rate = 10000.0
self.neuron_model = 'iaf_psc_exp'
####################################################################################
# In this implementation of structural plasticity, neurons grow
# connection points called synaptic elements. Synapses can be created
# between compatible synaptic elements. The growth of these elements is
# guided by homeostatic rules, defined as growth curves.
# Here we specify the growth curves for synaptic elements of excitatory
# and inhibitory neurons.
# Excitatory synaptic elements of excitatory neurons
self.growth_curve_e_e = {
'growth_curve': "gaussian",
'growth_rate': 0.0001, # (elements/ms)
'continuous': False,
'eta': 0.0, # Ca2+
'eps': 0.05, # Ca2+
}
# Inhibitory synaptic elements of excitatory neurons
self.growth_curve_e_i = {
'growth_curve': "gaussian",
'growth_rate': 0.0001, # (elements/ms)
'continuous': False,
'eta': 0.0, # Ca2+
'eps': self.growth_curve_e_e['eps'], # Ca2+
}
# Excitatory synaptic elements of inhibitory neurons
self.growth_curve_i_e = {
'growth_curve': "gaussian",
'growth_rate': 0.0004, # (elements/ms)
'continuous': False,
'eta': 0.0, # Ca2+
'eps': 0.2, # Ca2+
}
# Inhibitory synaptic elements of inhibitory neurons
self.growth_curve_i_i = {
'growth_curve': "gaussian",
'growth_rate': 0.0001, # (elements/ms)
'continuous': False,
'eta': 0.0, # Ca2+
'eps': self.growth_curve_i_e['eps'] # Ca2+
}
# Now we specify the neuron model.
self.model_params = {'tau_m': 10.0, # membrane time constant (ms)
# excitatory synaptic time constant (ms)
'tau_syn_ex': 0.5,
# inhibitory synaptic time constant (ms)
'tau_syn_in': 0.5,
't_ref': 2.0, # absolute refractory period (ms)
'E_L': -65.0, # resting membrane potential (mV)
'V_th': -50.0, # spike threshold (mV)
'C_m': 250.0, # membrane capacitance (pF)
'V_reset': -65.0 # reset potential (mV)
}
self.nodes_e = None
self.nodes_i = None
self.mean_ca_e = []
self.mean_ca_i = []
self.total_connections_e = []
self.total_connections_i = []
####################################################################################
# We initialize variables for the postsynaptic currents of the
# excitatory, inhibitory, and external synapses. These values were
# calculated from a PSP amplitude of 1 for excitatory synapses,
# -1 for inhibitory synapses and 0.11 for external synapses.
self.psc_e = 585.0
self.psc_i = -585.0
self.psc_ext = 6.2
def prepare_simulation(self):
nest.ResetKernel()
nest.set_verbosity('M_ERROR')
####################################################################################
# We set global kernel parameters. Here we define the resolution
# for the simulation, which is also the time resolution for the update
# of the synaptic elements.
nest.SetKernelStatus(
{
'resolution': self.dt
}
)
####################################################################################
# Set Structural Plasticity synaptic update interval which is how often
# the connectivity will be updated inside the network. It is important
# to notice that synaptic elements and connections change on different
# time scales.
nest.SetKernelStatus({
'structural_plasticity_update_interval': self.update_interval,
})
####################################################################################
# Now we define Structural Plasticity synapses. In this example we create
# two synapse models, one for excitatory and one for inhibitory synapses.
# Then we define that excitatory synapses can only be created between a
# pre-synaptic element called `Axon_ex` and a postsynaptic element
# called `Den_ex`. In a similar manner, synaptic elements for inhibitory
# synapses are defined.
nest.CopyModel('static_synapse', 'synapse_ex')
nest.SetDefaults('synapse_ex', {'weight': self.psc_e, 'delay': 1.0})
nest.CopyModel('static_synapse', 'synapse_in')
nest.SetDefaults('synapse_in', {'weight': self.psc_i, 'delay': 1.0})
nest.SetKernelStatus({
'structural_plasticity_synapses': {
'synapse_ex': {
'synapse_model': 'synapse_ex',
'post_synaptic_element': 'Den_ex',
'pre_synaptic_element': 'Axon_ex',
},
'synapse_in': {
'synapse_model': 'synapse_in',
'post_synaptic_element': 'Den_in',
'pre_synaptic_element': 'Axon_in',
},
}
})
def create_nodes(self):
"""
Assign growth curves to synaptic elements
"""
synaptic_elements = {
'Den_ex': self.growth_curve_e_e,
'Den_in': self.growth_curve_e_i,
'Axon_ex': self.growth_curve_e_e,
}
synaptic_elements_i = {
'Den_ex': self.growth_curve_i_e,
'Den_in': self.growth_curve_i_i,
'Axon_in': self.growth_curve_i_i,
}
####################################################################################
# Then it is time to create a population with 80% of the total network
# size excitatory neurons and another one with 20% of the total network
# size of inhibitory neurons.
self.nodes_e = nest.Create('iaf_psc_alpha',
self.number_excitatory_neurons,
{'synaptic_elements': synaptic_elements})
self.nodes_i = nest.Create('iaf_psc_alpha',
self.number_inhibitory_neurons,
{'synaptic_elements': synaptic_elements_i})
self.nodes_e.synaptic_elements = synaptic_elements
self.nodes_i.synaptic_elements = synaptic_elements_i
def connect_external_input(self):
"""
We create and connect the Poisson generator for external input
"""
noise = nest.Create('poisson_generator')
noise.rate = self.bg_rate
nest.Connect(noise, self.nodes_e, 'all_to_all',
{'weight': self.psc_ext, 'delay': 1.0})
nest.Connect(noise, self.nodes_i, 'all_to_all',
{'weight': self.psc_ext, 'delay': 1.0})
####################################################################################
# In order to save the amount of average calcium concentration in each
# population through time we create the function ``record_ca``. Here we use
# the value of `Ca` for every neuron in the network and then
# store the average.
def record_ca(self):
ca_e = self.nodes_e.Ca, # Calcium concentration
self.mean_ca_e.append(numpy.mean(ca_e))
ca_i = self.nodes_i.Ca, # Calcium concentration
self.mean_ca_i.append(numpy.mean(ca_i))
####################################################################################
# In order to save the state of the connectivity in the network through time
# we create the function ``record_connectivity``. Here we retrieve the number
# of connected pre-synaptic elements of each neuron. The total amount of
# excitatory connections is equal to the total amount of connected excitatory
# pre-synaptic elements. The same applies for inhibitory connections.
def record_connectivity(self):
syn_elems_e = self.nodes_e.synaptic_elements
syn_elems_i = self.nodes_i.synaptic_elements
self.total_connections_e.append(sum(neuron['Axon_ex']['z_connected']
for neuron in syn_elems_e))
self.total_connections_i.append(sum(neuron['Axon_in']['z_connected']
for neuron in syn_elems_i))
####################################################################################
# We define a function to plot the recorded values
# at the end of the simulation.
def plot_data(self):
fig, ax1 = plt.subplots()
ax1.axhline(self.growth_curve_e_e['eps'],
linewidth=4.0, color='#9999FF')
ax1.plot(self.mean_ca_e, 'b',
label='Ca Concentration Excitatory Neurons', linewidth=2.0)
ax1.axhline(self.growth_curve_i_e['eps'],
linewidth=4.0, color='#FF9999')
ax1.plot(self.mean_ca_i, 'r',
label='Ca Concentration Inhibitory Neurons', linewidth=2.0)
ax1.set_ylim([0, 0.275])
ax1.set_xlabel("Time in [s]")
ax1.set_ylabel("Ca concentration")
ax2 = ax1.twinx()
ax2.plot(self.total_connections_e, 'm',
label='Excitatory connections', linewidth=2.0, linestyle='--')
ax2.plot(self.total_connections_i, 'k',
label='Inhibitory connections', linewidth=2.0, linestyle='--')
ax2.set_ylim([0, 2500])
ax2.set_ylabel("Connections")
ax1.legend(loc=1)
ax2.legend(loc=4)
plt.savefig('StructuralPlasticityExample.eps', format='eps')
####################################################################################
# It is time to specify how we want to perform the simulation. In this
# function we first enable structural plasticity in the network and then we
# simulate in steps. On each step we record the calcium concentration and the
# connectivity. At the end of the simulation, the plot of connections and
# calcium concentration through time is generated.
def simulate(self):
if nest.NumProcesses() > 1:
sys.exit("For simplicity, this example only works " +
"for a single process.")
nest.EnableStructuralPlasticity()
print("Starting simulation")
sim_steps = numpy.arange(0, self.t_sim, self.record_interval)
for i, step in enumerate(sim_steps):
nest.Simulate(self.record_interval)
self.record_ca()
self.record_connectivity()
if i % 20 == 0:
print("Progress: " + str(i / 2) + "%")
print("Simulation finished successfully")
####################################################################################
# Finally we take all the functions that we have defined and create the sequence
# for our example. We prepare the simulation, create the nodes for the network,
# connect the external input and then simulate. Please note that as we are
# simulating 200 biological seconds in this example, it will take a few minutes
# to complete.
if __name__ == '__main__':
example = StructralPlasticityExample()
# Prepare simulation
example.prepare_simulation()
example.create_nodes()
example.connect_external_input()
# Start simulation
example.simulate()
example.plot_data()
|
lekshmideepu/nest-simulator
|
pynest/examples/structural_plasticity.py
|
Python
|
gpl-2.0
| 13,940
|
[
"Gaussian",
"NEURON"
] |
adba6929d5fccb010f36ad9c5f09b53a837b2ec5929fdb02a3976acea309a6ca
|
###############################################################################
##
## Copyright (C) 2014-2015, New York University.
## Copyright (C) 2011-2014, NYU-Poly.
## Copyright (C) 2006-2011, University of Utah.
## All rights reserved.
## Contact: contact@vistrails.org
##
## This file is part of VisTrails.
##
## "Redistribution and use in source and binary forms, with or without
## modification, are permitted provided that the following conditions are met:
##
## - Redistributions of source code must retain the above copyright notice,
## this list of conditions and the following disclaimer.
## - Redistributions in binary form must reproduce the above copyright
## notice, this list of conditions and the following disclaimer in the
## documentation and/or other materials provided with the distribution.
## - Neither the name of the New York University nor the names of its
## contributors may be used to endorse or promote products derived from
## this software without specific prior written permission.
##
## THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
## AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
## THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
## PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
## CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
## EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
## PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
## OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
## WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
## OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
## ADVISED OF THE POSSIBILITY OF SUCH DAMAGE."
##
###############################################################################
##############################################################################
# Changes
#
# 20130827 (by Remi)
# CombineRGBA can now also accept RGB
# Factored code common between Convert and CombineRGBA
# Removes 'geometry' and related ports from most modules, as they were
# *already ignored* except by the Scale module
#
# 20090521 (by Emanuele)
# Added path configuration option so imagemagick does not to be in the path
# Removed ImageMagick presence check
#
# 20081002:
# Added CombineRGBA to create image from channels.
# Moved quiet to configuration
# Fixed bug with GaussianBlur
from __future__ import division
from vistrails.core import debug
import vistrails.core.modules.basic_modules as basic
import vistrails.core.modules.module_registry
from vistrails.core.modules.vistrails_module import Module, ModuleError, \
new_module, IncompleteImplementation
from vistrails.core.system import list2cmdline
import os
################################################################################
class ImageMagick(Module):
"""ImageMagick is the base Module for all Modules in the ImageMagick
package. It simply defines some helper methods for subclasses.
"""
def compute(self):
raise IncompleteImplementation
def input_file_description(self):
"""Returns a fully described name in the ImageMagick format.
For example, a file stored in PNG format may be described by:
- 'graphic.png' indicates the filename 'graphic.png', using
the PNG file format.
- 'png:graphic' indicates the filename 'graphic', still using
the PNG file format.
"""
i = self.get_input("input")
if self.has_input('inputFormat'):
return self.get_input('inputFormat') + ':' + i.name
else:
return i.name
def create_output_file(self):
"""Creates a File with the output format given by the outputFormat
port.
"""
if self.has_input('outputFormat'):
s = '.' + self.get_input('outputFormat')
return self.interpreter.filePool.create_file(suffix=s)
else:
return self.interpreter.filePool.create_file(suffix='.png')
def run(self, *args):
"""run(*args), runs ImageMagick's 'convert' on a shell, passing all
arguments to the program.
"""
path = None
if configuration.check('path'):
path = configuration.path
if path:
cmd = os.path.join(path,'convert')
else:
cmd = 'convert'
cmd = [cmd] + list(args)
cmdline = list2cmdline(cmd)
if not configuration.quiet:
debug.log(cmdline)
r = os.system(cmdline)
if r != 0:
raise ModuleError(self, "system call failed: %r" % cmdline)
class Convert(ImageMagick):
"""Convert is the base Module for VisTrails Modules in the ImageMagick
package that transform a single input into a single output. Each subclass has
a descriptive name of the operation it implements."""
def compute(self):
o = self.create_output_file()
i = self.input_file_description()
self.run(i, o.name)
self.set_output("output", o)
class CombineRGBA(ImageMagick):
"""Combines channels as separate images into a single RGBA file."""
def compute(self):
o = self.create_output_file()
r = self.get_input("r")
g = self.get_input("g")
b = self.get_input("b")
a = self.force_get_input("a")
if a is not None:
self.run(r.name, g.name, b.name, a.name,
'-channel', 'RGBA',
'-combine', o.name)
else:
self.run(r.name, g.name, b.name,
'-channel', 'RGB',
'-combine', o.name)
self.set_output("output", o)
class Scale(Convert):
"""Scale rescales the input image to the given geometry
description.
"""
def geometry_description(self):
"""returns a string with the description of the geometry as
indicated by the appropriate ports (geometry or width and height)"""
# if complete geometry is available, ignore rest
if self.has_input("geometry"):
return self.get_input("geometry")
elif self.has_input("width"):
w = self.get_input("width")
h = self.get_input("height")
return "'%sx%s'" % (w, h)
else:
raise ModuleError(self, "Needs geometry or width/height")
def compute(self):
o = self.create_output_file()
self.run(self.input_file_description(),
"-scale",
self.geometry_description(),
o.name)
self.set_output("output", o)
class GaussianBlur(Convert):
"""GaussianBlur convolves the image with a Gaussian filter of given
radius and standard deviation.
"""
def compute(self):
(radius, sigma) = self.get_input('radiusSigma')
o = self.create_output_file()
self.run(self.input_file_description(),
"-blur",
"%sx%s" % (radius, sigma),
o.name)
self.set_output("output", o)
no_param_options = [("Negate", "-negate",
"""Negate performs the two's complement negation of the image."""
),
("EqualizeHistogram", "-equalize", None),
("Enhance", "-enhance", None),
("VerticalFlip", "-flip", None),
("HorizontalFlip", "-flop", None),
("FloydSteinbergDither", "-dither", None),
("IncreaseContrast", "-contrast", None),
("Despeckle", "-despeckle", None),
("Normalize", "-normalize", None)]
def no_param_options_method_dict(optionName):
"""Creates a method dictionary for a module that takes no extra
parameters. This dictionary will be used to dynamically create a
VisTrails module.
"""
def compute(self):
o = self.create_output_file()
i = self.input_file_description()
self.run(i, optionName, o.name)
self.set_output("output", o)
return {'compute': compute}
float_param_options = [("DetectEdges", "-edge", "radius", "filter radius"),
("Emboss", "-emboss", "radius", "filter radius"),
("GammaCorrect", "-gamma", "gamma", "gamma correction factor"),
("MedianFilter", "-median", "radius", "filter radius")]
def float_param_options_method_dict(optionName, portName):
"""Creates a method dictionary for a module that has one port taking a
floating-point value. This dictionary will be used to dynamically
create a VisTrails module.
"""
def compute(self):
o = self.create_output_file()
optionValue = self.get_input(portName)
i = self.input_file_description()
self.run(i, optionName, str(optionValue), o.name)
self.set_output("output", o)
return {'compute': compute}
################################################################################
def initialize():
def parse_error_if_not_equal(s, expected):
if s != expected:
err = "Parse error on version line. Was expecting '%s', got '%s'"
raise RuntimeError(err % (s, expected))
reg = vistrails.core.modules.module_registry.get_module_registry()
reg.add_module(ImageMagick, abstract=True)
reg.add_module(Convert)
reg.add_input_port(Convert, "input", (basic.File, 'the input file'))
reg.add_input_port(Convert, "inputFormat", (basic.String, 'coerce interpretation of file to this format'))
reg.add_output_port(Convert, "output", (basic.File, 'the output file'))
reg.add_input_port(Convert, "outputFormat", (basic.String, 'Force output to be of this format'))
for (name, opt, doc_string) in no_param_options:
m = new_module(Convert, name, no_param_options_method_dict(opt),
docstring=doc_string)
reg.add_module(m)
for (name, opt, paramName, paramComment) in float_param_options:
m = new_module(Convert, name, float_param_options_method_dict(opt, paramName))
reg.add_module(m)
reg.add_input_port(m, paramName, (basic.Float, paramComment))
reg.add_module(GaussianBlur)
reg.add_input_port(GaussianBlur, "radiusSigma", [(basic.Float, 'radius'), (basic.Float, 'sigma')])
reg.add_module(Scale)
reg.add_input_port(Scale, "geometry", (basic.String, 'ImageMagick geometry'))
reg.add_input_port(Scale, "width", (basic.String, 'width of the geometry for operation'))
reg.add_input_port(Scale, "height", (basic.String, 'height of the geometry for operation'))
reg.add_module(CombineRGBA)
reg.add_input_port(CombineRGBA, "r", basic.File)
reg.add_input_port(CombineRGBA, "g", basic.File)
reg.add_input_port(CombineRGBA, "b", basic.File)
reg.add_input_port(CombineRGBA, "a", basic.File, optional=True)
reg.add_input_port(CombineRGBA, "outputFormat", basic.String)
reg.add_output_port(CombineRGBA, "output", basic.File)
################################################################################
|
hjanime/VisTrails
|
vistrails/packages/ImageMagick/init.py
|
Python
|
bsd-3-clause
| 11,223
|
[
"Gaussian"
] |
0c6703938f44bd908b6bb92cb81e362d3e5de527c59a0bb17262b2ba5a58762b
|
# -*- coding: utf-8 -*-
import logging
from pybel.utils import ensure_quotes
def write_metabolites_proteins_bel(manager, file=None):
"""Writes the metabolite-protein association relations found in HMDB into a BEL document.
:param bio2bel_hmdb.Manager manager: Manager object connected to the local HMDB database
:param file file: A writeable file or file like. Defaults to stdout
"""
interactions = manager.get_metabolite_protein_interactions()
for interaction in interactions:
print('SET Citation = {"Human Metabolite Database"}', file=file)
print('SET Evidence = "Database Entry"', file=file)
print('SET Confidence = "Axiomatic"', file=file)
protein = interaction.protein.uniprot_id
metabolite = interaction.metabolite.accession
write_bel_association('a', 'HMDB', metabolite, 'path', 'UP', protein, file)
print('UNSET ALL', file=file)
def get_journal(interaction):
"""Gets the journal name from the in HMDB provided reference strings.
:param interaction: interaction_table object (e.g. MetaboliteProteins)
:rtype: str
"""
return interaction.reference.reference_text.split(".")[1]
def write_metabolites_diseases_bel(manager, file=None):
"""Writes the metabolite-disease association relations found in HMDB into a BEL document.
:param bio2bel_hmdb.Manager manager: Manager object connected to the local HMDB database
:param file file: A writeable file or file like. Defaults to stdout
"""
interactions = manager.get_metabolite_disease_interactions()
for interaction in interactions:
if interaction.disease.dion is not None:
disease_name = interaction.disease.dion
dis_namespace = 'DOID'
elif interaction.disease.hpo is not None:
disease_name = interaction.disease.hpo
dis_namespace = 'HP'
elif interaction.disease.mesh_diseases is not None:
disease_name = interaction.disease.mesh_diseases
dis_namespace = 'MESH'
else:
logging.warning('HMDB disease name is not found in disease ontologies. HMDB name is used.')
dis_namespace = 'HMDB_D'
disease_name = interaction.disease.name
accession = interaction.metabolite.accession
if interaction.reference.pubmed_id is None:
citation = interaction.reference.reference_text
print('SET Citation = {{"{}"}}'.format(citation), file=file)
else:
pubmed = interaction.reference.pubmed_id
journal = get_journal(interaction)
print('SET Citation = {{"PubMed", "{}", "{}"}}'.format(journal, pubmed))
print('SET Evidence = "Database Entry"', file=file)
print('SET Confidence = "Axiomatic"', file=file)
print('SET Species = "9606”')
print('SET Disease = "{}"'.format(disease_name))
write_bel_association('a', 'HMDB', accession, 'path', dis_namespace, disease_name, file)
print('UNSET ALL', file=file)
def write_bel_association(abundance1, namespace1, accession1, abundance2, namespace2, accession2, file=None):
"""Prints a BEL association.
:param str abundance1: Abundance of the subject
:param str namespace1: Namespace of the subject
:param str accession1: Identifier of the subject
:param str abundance2: Abundance of the object
:param str namespace2: Namespace of the object
:param str accession2: Identifier of the object
:param file file: A writeable file or file like. Defaults to stdout
"""
print(
'{}({}:{}) -- {}({}:{})'.format(
ensure_quotes(abundance1),
ensure_quotes(namespace1),
ensure_quotes(accession1),
ensure_quotes(abundance2),
ensure_quotes(namespace2),
ensure_quotes(accession2)
),
file=file
)
|
bio2bel/hmdb
|
src/bio2bel_hmdb/to_bel.py
|
Python
|
mit
| 3,904
|
[
"Pybel"
] |
d9ec025efbd10a651fcfeb94155cf3bf863298ccf3e241b25ef7831338c95703
|
"""
This file contain all test for the GalaxyConnector class.
This test need a Galaxy instance to be executed
"""
import os
import time
import unittest
from shutil import copyfile
from bioblend import galaxy
from pyramid.paster import get_appsettings
from pyramid import testing
from askomics.libaskomics.ParamManager import ParamManager
from askomics.libaskomics.GalaxyConnector import GalaxyConnector
from interface_galaxy import InterfaceGalaxy
from nose.plugins.attrib import attr
@attr('galaxy')
class GalaxyConnectorTests(unittest.TestCase):
"""
Set up settings and request before testing GalaxyConnector
Also delete old testing history in galaxy, and create a new one (with 2 datasets)
"""
def setUp(self):
"""Set up the settings and the session"""
self.settings = get_appsettings('configs/test.virtuoso.ini', name='main')
self.settings['askomics.upload_user_data_method'] = 'insert'
self.request = testing.DummyRequest()
self.request.session['username'] = 'jdoe'
self.request.session['group'] = 'base'
self.request.session['admin'] = False
self.request.session['blocked'] = True
# Files
# Create the user dir if not exist
self.temp_directory = self.settings['askomics.files_dir'] + '/upload/' + self.request.session['username']
if not os.path.isdir(self.temp_directory):
os.makedirs(self.temp_directory)
# Set the upload dir
self.request.session['upload_directory'] = self.temp_directory
# Copy files if directory is empty
if not os.listdir(self.temp_directory):
files = ['people.tsv', 'instruments.tsv', 'play_instrument.tsv', 'transcript.tsv', 'qtl.tsv', 'small_data.gff3', 'turtle_data.ttl', 'bed_example.bed']
for file in files:
src = os.path.join(os.path.dirname(__file__), "..", "test-data") + '/' + file
dst = self.request.session['upload_directory'] + '/' + file
copyfile(src, dst)
# Galaxy
self.interface_galaxy = InterfaceGalaxy(self.settings, self.request)
self.galaxy = self.interface_galaxy.get_galaxy_credentials()
self.interface_galaxy.delete_testing_histories()
self.history_id = self.interface_galaxy.create_testing_history()
self.interface_galaxy.upload_file_into_history('people.tsv')
self.interface_galaxy.upload_file_into_history('instruments.tsv')
self.interface_galaxy.upload_string_into_history('hello_world.txt', 'hello world')
self.interface_galaxy.wait_until_datasets_ready()
self.datasets = self.interface_galaxy.get_datasets_id()
def test_check_galaxy_instance(self):
"""Test the check_galaxy_instance method"""
galaxy_connector = GalaxyConnector(self.settings, self.request.session, self.galaxy['url'], self.galaxy['key'])
assert galaxy_connector.check_galaxy_instance() is True
#FIXME: Don't raise the ConnectionError
# with self.assertRaises(ConnectionError):
# GalaxyConnector(self.settings, self.request.session, self.galaxy['url'], 'fake_api_key')
def test_get_datasets_and_histories(self):
"""Test the get_datasets_and_histories method"""
galaxy_connector = GalaxyConnector(self.settings, self.request.session, self.galaxy['url'], self.galaxy['key'])
# Test with history id
result = galaxy_connector.get_datasets_and_histories(['tabular'], history_id=self.history_id)
created_history = {
'name': 'askomics_test',
'id': self.history_id,
'selected': True
}
assert isinstance(result, dict)
assert len(result) == 2
assert 'datasets' in result
assert 'histories' in result
assert created_history in result['histories']
# Test without history id
result = galaxy_connector.get_datasets_and_histories(['tabular'])
created_history = {
'name': 'askomics_test',
'id': self.history_id,
'selected': True
}
assert isinstance(result, dict)
assert len(result) == 2
assert 'datasets' in result
assert 'histories' in result
assert created_history in result['histories']
def test_upload_files(self):
"""Test upload_files method"""
galaxy_connector = GalaxyConnector(self.settings, self.request.session, self.galaxy['url'], self.galaxy['key'])
galaxy_connector.upload_files([self.datasets['hello']['dataset_id']])
assert self.interface_galaxy.check_uploaded_files(self.temp_directory) is True
def test_get_file_content(self):
"""Test get_file_content method"""
galaxy_connector = GalaxyConnector(self.settings, self.request.session, self.galaxy['url'], self.galaxy['key'])
content = galaxy_connector.get_file_content(self.datasets['hello']['dataset_id'])
expected_content = 'hello world\n'
assert content == expected_content
def test_send_to_history(self):
"""Test the send_to_history method"""
galaxy_connector = GalaxyConnector(self.settings, self.request.session, self.galaxy['url'], self.galaxy['key'])
param_manager = ParamManager(self.settings, self.request.session)
src_file = param_manager.get_upload_directory()
filepath = src_file + 'play_instrument.tsv'
galaxy_connector.send_to_history(filepath, 'play_instrument.tsv', 'tabular')
assert self.interface_galaxy.check_dataset_presence('play_instrument.tsv') is True
def test_send_json_to_history(self):
"""Test the send_json_to_history method"""
galaxy_connector = GalaxyConnector(self.settings, self.request.session, self.galaxy['url'], self.galaxy['key'])
galaxy_connector.send_json_to_history('hello world')
assert self.interface_galaxy.check_dataset_presence('askomics_query_', start_with=True) is True
|
ofilangi/askomics
|
askomics/test/GalaxyConnector_test.py
|
Python
|
agpl-3.0
| 6,011
|
[
"Galaxy"
] |
bd7349c2bad5cccd1c82e6b1b2c2ace3b721d6e0dda1742667e3d28662c636d4
|
import ase.db
from ase.lattice import bulk
from gpaw import GPAW, PW
from gpaw.response.g0w0 import G0W0
data = {
'C': ['diamond', 3.553],
'Si': ['diamond', 5.421],
'Ge': ['diamond', 5.644],
'SiC': ['zincblende', 4.346],
'AlN': ['zincblende', 4.368],
'AlP': ['zincblende', 5.451],
'AlAs': ['zincblende', 5.649],
'GaN': ['zincblende', 4.520],
'GaP': ['zincblende', 5.439],
'GaAs': ['zincblende', 5.640],
'InP': ['zincblende', 5.858],
'InAs': ['zincblende', 6.047],
'InSb': ['zincblende', 6.468]}
c = ase.db.connect('gw.db')
for name in data:
id = c.reserve(name=name)
if id is None:
continue
x, a = data[name]
atoms = bulk(name, x, a=a)
atoms.calc = GPAW(mode=PW(400),
kpts={'size': (6, 6, 6), 'gamma': True},
txt='%s.txt' % name)
atoms.get_potential_energy()
atoms.calc.diagonalize_full_hamiltonian(nbands=100)
atoms.calc.write(name, mode='all')
n = int(atoms.calc.get_number_of_electrons()) // 2
gw = G0W0(name, 'gw-' + name,
nbands=100,
kpts=[(0, 0, 0), (0.5, 0.5, 0.5), (0.5, 0.5, 0)],
ecut=150,
hilbert=True,
fast=True,
domega0=0.1,
eta=0.2,
bands=(n - 1, n + 1))
results = gw.calculate()
c.write(atoms, name=name, data=results)
del c[id]
|
robwarm/gpaw-symm
|
gpaw/test/big/gw/gw.py
|
Python
|
gpl-3.0
| 1,426
|
[
"ASE",
"GPAW"
] |
42cb3734f38130c081f94c2558ae39566ec0cafb3268c41f588958b46b1c9c5e
|
# Copyright 2008-2010 by Peter Cock. All rights reserved.
#
# This code is part of the Biopython distribution and governed by its
# license. Please see the LICENSE file that should have been included
# as part of this package.
"""Bio.SeqIO support for the "ace" file format.
You are expected to use this module via the Bio.SeqIO functions.
See also the Bio.Sequencing.Ace module which offers more than just accessing
the contig consensus sequences in an ACE file as SeqRecord objects.
"""
from __future__ import print_function
import sys
# Add path to Bio
sys.path.append('../..')
from Bio.Seq import Seq
from Bio.SeqRecord import SeqRecord
from Bio.Alphabet import generic_nucleotide, generic_dna, generic_rna, Gapped
from Bio.Sequencing import Ace
__docformat__ = "restructuredtext en"
def AceIterator(handle):
"""Returns SeqRecord objects from an ACE file.
This uses the Bio.Sequencing.Ace module to do the hard work. Note that
by iterating over the file in a single pass, we are forced to ignore any
WA, CT, RT or WR footer tags.
Ace files include the base quality for each position, which are taken
to be PHRED style scores. Just as if you had read in a FASTQ or QUAL file
using PHRED scores using Bio.SeqIO, these are stored in the SeqRecord's
letter_annotations dictionary under the "phred_quality" key.
>>> from Bio import SeqIO
>>> with open("Ace/consed_sample.ace", "rU") as handle:
... for record in SeqIO.parse(handle, "ace"):
... print("%s %s... %i" % (record.id, record.seq[:10], len(record)))
... print(max(record.letter_annotations["phred_quality"]))
Contig1 agccccgggc... 1475
90
However, ACE files do not include a base quality for any gaps in the
consensus sequence, and these are represented in Biopython with a quality
of zero. Using zero is perhaps misleading as there may be very strong
evidence to support the gap in the consensus. Previous versions of
Biopython therefore used None instead, but this complicated usage, and
prevented output of the gapped sequence as FASTQ format.
>>> from Bio import SeqIO
>>> with open("Ace/contig1.ace", "rU") as handle:
... for record in SeqIO.parse(handle, "ace"):
... print("%s ...%s..." % (record.id, record.seq[85:95]))
... print(record.letter_annotations["phred_quality"][85:95])
... print(max(record.letter_annotations["phred_quality"]))
Contig1 ...AGAGG-ATGC...
[57, 57, 54, 57, 57, 0, 57, 72, 72, 72]
90
Contig2 ...GAATTACTAT...
[68, 68, 68, 68, 68, 68, 68, 68, 68, 68]
90
"""
for ace_contig in Ace.parse(handle):
# Convert the ACE contig record into a SeqRecord...
consensus_seq_str = ace_contig.sequence
# Assume its DNA unless there is a U in it,
if "U" in consensus_seq_str:
if "T" in consensus_seq_str:
# Very odd! Error?
alpha = generic_nucleotide
else:
alpha = generic_rna
else:
alpha = generic_dna
if "*" in consensus_seq_str:
# For consistency with most other file formats, map
# any * gaps into - gaps.
assert "-" not in consensus_seq_str
consensus_seq = Seq(consensus_seq_str.replace("*", "-"),
Gapped(alpha, gap_char="-"))
else:
consensus_seq = Seq(consensus_seq_str, alpha)
# TODO? - Base segments (BS lines) which indicates which read
# phrap has chosen to be the consensus at a particular position.
# Perhaps as SeqFeature objects?
# TODO - Supporting reads (RD lines, plus perhaps QA and DS lines)
# Perhaps as SeqFeature objects?
seq_record = SeqRecord(consensus_seq,
id=ace_contig.name,
name=ace_contig.name)
# Consensus base quality (BQ lines). Note that any gaps (originally
# as * characters) in the consensus do not get a quality entry, so
# we assign a quality of None (zero would be missleading as there may
# be excelent support for having a gap here).
quals = []
i = 0
for base in consensus_seq:
if base == "-":
quals.append(0)
else:
quals.append(ace_contig.quality[i])
i += 1
assert i == len(ace_contig.quality)
seq_record.letter_annotations["phred_quality"] = quals
yield seq_record
# All done
if __name__ == "__main__":
from Bio._utils import run_doctest
run_doctest()
|
Ambuj-UF/ConCat-1.0
|
src/Utils/Bio/SeqIO/AceIO.py
|
Python
|
gpl-2.0
| 4,689
|
[
"Biopython"
] |
bfb20e567975cc7955b3a6894f7a8b975312d72d7af78a2e5aae2a6daffa18da
|
import json
from coalib.bearlib import deprecate_settings
from coalib.bearlib.abstractions.Linter import linter
from dependency_management.requirements.NpmRequirement import NpmRequirement
from coala_utils.param_conversion import negate
def bool_or_str(value):
try:
return bool(value)
except ValueError:
return str(value)
def bool_or_int(value):
try:
return bool(value)
except ValueError:
return int(value)
@linter(executable='jshint',
output_format='regex',
output_regex=r'.+?: line (?P<line>\d+), col (?P<column>\d+), '
r'(?P<message>.+) \((?P<severity>[EWI])\d+\)')
class JSHintBear:
"""
Detect errors and potential problems in JavaScript code and to enforce
appropriate coding conventions. For example, problems like syntax errors,
bugs due to implicit type conversion, leaking variables and much more
can be detected.
For more information on the analysis visit <http://jshint.com/>
"""
LANGUAGES = {'JavaScript'}
REQUIREMENTS = {NpmRequirement('jshint', '2.9.5')}
AUTHORS = {'The coala developers'}
AUTHORS_EMAILS = {'coala-devel@googlegroups.com'}
LICENSE = 'AGPL-3.0'
CAN_DETECT = {'Formatting', 'Syntax', 'Complexity', 'Unused Code'}
@staticmethod
@deprecate_settings(es_version='use_es6_syntax',
javascript_strictness=(
'allow_global_strict',
lambda x: 'global' if x else True),
cyclomatic_complexity='maxcomplexity',
allow_unused_variables=('prohibit_unused', negate),
max_parameters='maxparams',
allow_missing_semicolon='allow_missing_semicol',
allow_this_statements='allow_this_stmt',
allow_with_statements='allow_with_stmt',
allow_bitwise_operators=('prohibit_bitwise', negate),
max_statements='maxstatements',
max_depth='maxdepth',
allow_comma_operator=('prohibit_comma', negate),
allow_non_breaking_whitespace=(
'prohibit_non_breaking_whitespace', negate),
allow_prototype_overwrite=(
'prohibit_prototype_overwrite', negate),
allow_type_coercion=('prohibit_type_coercion', negate),
allow_future_identifiers=('future_hostile', negate),
allow_typeof=('prohibit_typeof', negate),
allow_var_statement=(
'prohibit_variable_statements', negate),
allow_grouping_operator=('prohibit_groups', negate),
allow_variable_shadowing='shadow',
use_mozilla_extension='using_mozilla',
allow_constructor_functions=('prohibit_new', negate),
allow_argument_caller_and_callee=(
'prohibit_arg', negate),
allow_iterator_property=('iterator', negate),
allow_filter_in_forin='force_filter_forin')
def generate_config(filename, file,
allow_bitwise_operators: bool = False,
allow_prototype_overwrite: bool = False,
force_braces: bool = True,
allow_type_coercion: bool = False,
allow_future_identifiers: bool = True,
allow_typeof: bool = True,
allow_filter_in_forin: bool = True,
allow_funcscope: bool = False,
allow_iterator_property: bool = True,
allow_argument_caller_and_callee: bool = False,
allow_comma_operator: bool = True,
allow_non_breaking_whitespace: bool = False,
allow_constructor_functions: bool = True,
allow_grouping_operator: bool = True,
allow_var_statement: bool = True,
allow_missing_semicolon: bool = False,
allow_debugger: bool = False,
allow_assignment_comparisions: bool = False,
allow_eval: bool = False,
allow_increment: bool = False,
allow_proto: bool = False,
allow_scripturls: bool = False,
allow_singleton: bool = False,
allow_this_statements: bool = False,
allow_with_statements: bool = False,
use_mozilla_extension: bool = False,
javascript_strictness: bool_or_str = True,
allow_noyield: bool = False,
allow_eqnull: bool = False,
allow_last_semicolon: bool = False,
allow_func_in_loop: bool = False,
allow_expr_in_assignments: bool = False,
use_es3_array: bool = False,
environment_mootools: bool = False,
environment_couch: bool = False,
environment_jasmine: bool = False,
environment_jquery: bool = False,
environment_node: bool = False,
environment_qunit: bool = False,
environment_rhino: bool = False,
environment_shelljs: bool = False,
environment_prototypejs: bool = False,
environment_yui: bool = False,
environment_mocha: bool = True,
environment_module: bool = False,
environment_wsh: bool = False,
environment_worker: bool = False,
environment_nonstandard: bool = False,
environment_browser: bool = True,
environment_browserify: bool = False,
environment_devel: bool = True,
environment_dojo: bool = False,
environment_typed: bool = False,
environment_phantom: bool = False,
max_statements: bool_or_int = False,
max_depth: bool_or_int = False,
max_parameters: bool_or_int = False,
cyclomatic_complexity: bool_or_int = False,
allow_variable_shadowing: bool_or_str = False,
allow_unused_variables: bool_or_str = False,
allow_latedef: bool_or_str = False,
enforce_trailing_comma: bool = False,
es_version: bool_or_int = 5,
jshint_config: str = '',
):
"""
:param allow_bitwise_operators:
Allows the use of bitwise operators.
:param allow_prototype_overwrite:
This options allows overwriting prototypes of native objects such
as ``Array``.
:param force_braces:
This option requires you to always put curly braces around blocks
in loops and conditionals.
:param allow_type_coercion:
This options allows the use of ``==`` and ``!=``.
:param allow_future_identifiers:
This option allows the use of identifiers which are defined in
future versions of JavaScript.
:param allow_typeof:
This option enables warnings about invalid ``typeof`` operator
values.
:param allow_filter_in_forin:
This option requires all ``for in`` loops to filter object's items.
:param allow_iterator_property:
This option suppresses warnings about the ``__iterator__``
property.
:param allow_funcscope:
This option suppresses warnings about declaring variables inside of
control structures while accessing them later from outside.
:param allow_argument_caller_and_callee:
This option allows the use of ``arguments.caller`` and
``arguments.callee``.
:param allow_comma_operator:
This option allows the use of the comma operator.
:param allow_non_breaking_whitespace:
Allows "non-breaking whitespace characters".
:param allow_constructor_functions:
Allows the use of constructor functions.
:param allow_grouping_operator:
This option allows the use of the grouping operator when it is
not strictly required.
:param allow_var_statement:
Allows the use of the ``var`` statement while declaring a variable.
Should use ``let`` or ``const`` while it is set to ``False``.
:param allow_missing_semicolon:
This option suppresses warnings about missing semicolons.
:param allow_debugger:
This option suppresses warnings about the ``debugger`` statements.
:param allow_assignment_comparisions:
This option suppresses warnings about the use of assignments in
cases where comparisons are expected.
:param allow_eval:
This options suppresses warnings about the use of ``eval``
function.
:param allow_increment:
This option suppresses warnings about the use of unary increment
and decrement operators.
:param allow_proto:
This option suppresses warnings about the ``__proto__`` property.
:param allow_scripturls:
This option suppresses warnings about the use of script-targeted
URLs.
:param allow_singleton:
This option suppresses warnings about constructions like
``new function () { ... }`` and ``new Object;`` sometimes used to
produce singletons.
:param allow_this_statements:
This option suppresses warnings about possible strict violations
when the code is running in strict mode and ``this`` is used in a
non-constructor function.
:param allow_with_statements:
This option suppresses warnings about the use of the ``with``
statement.
:param use_mozilla_extension:
This options tells JSHint that your code uses Mozilla JavaScript
extensions.
:param javascript_strictness:
Determines what sort of strictness to use in the JavaScript code.
The possible options are:
- "global" - there must be a ``"use strict";`` at global level
- "implied" - lint the code as if there is a ``"use strict";``
- "False" - disable warnings about strict mode
- "True" - there must be a ``"use strict";`` at function level
:param allow_noyield:
This option suppresses warnings about generator functions with no
``yield`` statement in them.
:param allow_eqnull:
This option suppresses warnings about ``== null`` comparisons.
:param allow_last_semicolon:
This option suppresses warnings about missing semicolons for the
last statement.
:param allow_func_in_loop:
This option suppresses warnings about functions inside of loops.
:param allow_expr_in_assignments:
This option suppresses warnings about the use of expressions where
normally assignments or function calls are expected.
:param use_es3_array:
This option tells JSHintBear ES3 array elision elements, or empty
elements are used.
:param environment_mootools:
This option defines globals exposed by the Mootools.
:param environment_couch:
This option defines globals exposed by CouchDB.
:param environment_jasmine:
This option defines globals exposed by Jasmine.
:param environment_jquery:
This option defines globals exposed by Jquery.
:param environment_node:
This option defines globals exposed by Node.
:param environment_qunit:
This option defines globals exposed by Qunit.
:param environment_rhino:
This option defines globals exposed when the code is running inside
rhino runtime environment.
:param environment_shelljs:
This option defines globals exposed by the ShellJS.
:param environment_prototypejs:
This option defines globals exposed by the Prototype.
:param environment_yui:
This option defines globals exposed by the YUI JavaScript
Framework.
:param environment_mocha:
This option defines globals exposed by the "BDD" and "TDD" UIs of
the Mocha unit testing framework.
:param environment_module:
This option informs JSHintBear that the input code describes an
ECMAScript 6 module.
:param environment_wsh:
This option defines globals available when the code is running as a
script for the Windows Script Host.
:param environment_worker:
This option defines globals available when the code is running
inside of a Web Worker.
:param environment_nonstandard:
This option defines non- standard but widely adopted globals such
as ``escape`` and ``unescape``.
:param environment_browser:
This option defines globals exposed by modern browsers.
:param environment_browserify:
This option defines globals available when using the Browserify.
:param environment_devel:
This option defines globals that are usually used for debugging:
``console``, ``alert``, etc.
:param environment_dojo:
This option defines globals exposed by the Dojo Toolkit.
:param environment_typed:
This option defines globals for typed array constructors.
:param environment_phantom:
This option defines globals available when your core is running
inside of the PhantomJS runtime environment.
:param max_statements:
Maximum number of statements allowed per function.
:param max_depth:
This option lets you control how nested do you want your blocks to
be.
:param max_parameters:
Maximum number of parameters allowed per function.
:param cyclomatic_complexity:
Maximum cyclomatic complexity in the code.
:param allow_variable_shadowing:
This option suppresses warnings about variable shadowing i.e.
declaring a variable that had been already declared somewhere in
the outer scope.
- "inner" - check for variables defined in the same scope only
- "outer" - check for variables defined in outer scopes as well
- False - same as inner
- True - allow variable shadowing
:param allow_unused_variables:
Allows when variables are defined but never used. This can be set
to ""vars"" to only check for variables, not function parameters,
or ""strict"" to check all variables and parameters.
:param allow_latedef:
This option allows the use of a variable before it was defined.
Setting this option to "nofunc" will allow function declarations to
be ignored.
:param enforce_trailing_comma:
This option warns when a comma is not placed after the last element
in an array or object literal.
:param es_version:
This option is used to specify the ECMAScript version to which the
code must adhere to.
"""
# Assume that when es_version is bool, it is intended for the
# deprecated use_es6_version
if es_version is True:
es_version = 6
elif es_version is False:
es_version = 5
if not jshint_config:
options = {'bitwise': not allow_bitwise_operators,
'freeze': not allow_prototype_overwrite,
'curly': force_braces,
'eqeqeq': not allow_type_coercion,
'futurehostile': not allow_future_identifiers,
'notypeof': not allow_typeof,
'forin': allow_filter_in_forin,
'funcscope': allow_funcscope,
'iterator': not allow_iterator_property,
'noarg': not allow_argument_caller_and_callee,
'nocomma': not allow_comma_operator,
'nonbsp': not allow_non_breaking_whitespace,
'nonew': not allow_constructor_functions,
'undef': True,
'singleGroups': not allow_grouping_operator,
'varstmt': not allow_var_statement,
'asi': allow_missing_semicolon,
'debug': allow_debugger,
'boss': allow_assignment_comparisions,
'evil': allow_eval,
'strict': javascript_strictness,
'plusplus': allow_increment,
'proto': allow_proto,
'scripturl': allow_scripturls,
'supernew': allow_singleton,
'validthis': allow_this_statements,
'withstmt': allow_with_statements,
'moz': use_mozilla_extension,
'noyield': allow_noyield,
'eqnull': allow_eqnull,
'lastsemic': allow_last_semicolon,
'loopfunc': allow_func_in_loop,
'expr': allow_expr_in_assignments,
'elision': use_es3_array,
'mootools': environment_mootools,
'couch': environment_couch,
'jasmine': environment_jasmine,
'jquery': environment_jquery,
'node': environment_node,
'qunit': environment_qunit,
'rhino': environment_rhino,
'shelljs': environment_shelljs,
'prototypejs': environment_prototypejs,
'yui': environment_yui,
'mocha': environment_mocha,
'module': environment_module,
'wsh': environment_wsh,
'worker': environment_worker,
'nonstandard': environment_nonstandard,
'browser': environment_browser,
'browserify': environment_browserify,
'devel': environment_devel,
'dojo': environment_dojo,
'typed': environment_typed,
'phantom': environment_phantom,
'maxerr': 99999,
'maxcomplexity': cyclomatic_complexity,
'maxdepth': max_depth,
'maxparams': max_parameters,
'maxstatements': max_statements,
'shadow': allow_variable_shadowing,
'unused': not allow_unused_variables,
'latedef': allow_latedef,
'trailingcomma': enforce_trailing_comma,
'esversion': es_version}
return json.dumps(options)
else:
return None
@staticmethod
def create_arguments(filename, file, config_file,
jshint_config: str = '',
):
"""
:param jshint_config:
The location of the jshintrc config file. If this option is present
all the above options are not used. Instead the .jshintrc file is
used as the configuration file.
"""
args = ('--verbose', filename, '--config')
if jshint_config:
args += (jshint_config,)
else:
args += (config_file,)
return args
|
coala-analyzer/coala-bears
|
bears/js/JSHintBear.py
|
Python
|
agpl-3.0
| 20,717
|
[
"VisIt"
] |
6c31cb6a2dce33a89c61af9083140fbd85478b94158984a21202e0d2ad6e223b
|
import numpy as np
from compliance_checker.base import check_has, Result
from compliance_checker.defined_base import DefinedNCBaseCheck
from netCDF4 import Dataset
from compliance_checker import DTExportFormat
#from docutils.math.math2html import LimitsProcessor
##
## UR-TODO - simple copy from ROMS, needs adjusting to SHOC
##
##
class DefinedSHOCBaseCheck(DefinedNCBaseCheck):
###############################################################################
#
# HIGHLY RECOMMENDED
#
###############################################################################
supported_ds = [Dataset]
@classmethod
def beliefs(cls):
'''
Not applicable for Defined
'''
return {}
@classmethod
def make_result(cls, level, score, out_of, name, messages, the_method):
return Result(level, (score, out_of), name, messages,None,"shoc",the_method)
def setup(self, ds):
pass
def limits(self,dsp, ftype = "std"):
from netCDF4 import num2date
ds = dsp.dataset
times = list()
corners = list()
if ftype == "std":
if 'y_grid' in ds.variables and 'x_grid' in ds.variables:
lons = ds.variables['x_grid'][:]
lats = ds.variables['y_grid'][:]
else:
raise RuntimeError('Cannot find x_grid/y_grid variables in %s' % ds.filepath)
bounds = [float(np.amin(lons)), float(np.amax(lons)), float(np.amin(lats)), float(np.amax(lats))]
xshape = ds.variables['x_grid'].shape
yshape = ds.variables['y_grid'].shape
if 't' in ds.variables and len(ds.variables['t']) > 0:
tt = ds.variables['t']
times.append(str(len(tt)))
times.append(DTExportFormat.format(num2date(tt[0],tt.units)))
times.append(DTExportFormat.format(num2date(tt[len(tt)-1],tt.units)))
else:
if 'latitude' in ds.variables and 'longitude' in ds.variables:
lons = ds.variables['longitude'][:]
lats = ds.variables['latitude'][:]
xshape = ds.variables['longitude'].shape
yshape = ds.variables['latitude'].shape
elif 'lat' in ds.variables and 'lon' in ds.variables:
lons = ds.variables['lon'][:]
lats = ds.variables['lat'][:]
xshape = ds.variables['lon'].shape
yshape = ds.variables['lat'].shape
else:
raise RuntimeError('Cannot find latitude/longitude variables in %s' % ds.filepath)
bounds = [float(np.amin(lons)), float(np.amax(lons)), float(np.amin(lats)), float(np.amax(lats))]
if 'time' in ds.variables and len(ds.variables['time']) > 0:
tv = ds.variables['time']
times.append(str(len(tv)))
times.append(DTExportFormat.format(num2date(tv[0],tv.units)))
times.append(DTExportFormat.format(num2date(tv[len(tv)-1],tv.units)))
if len(xshape) > 1:
import math
ni = xshape[len(xshape) -1]
nj = xshape[len(xshape) -2]
# from the horizontal -> cartesian
widthX = lons[0,ni-1] - lons[0,0]
heightX = lats[0,ni-1] - lats[0,0]
rotation = DefinedNCBaseCheck.calc_rotation(self,widthX,heightX)
# now extract the actual width and height
widthY = lons[nj-1,0] - lons[0,0]
heightY = lats[nj-1,0] - lats[0,0]
height=math.sqrt((widthY*widthY)+(heightY*heightY))
width=math.sqrt((widthX*widthX)+(heightX*heightX))
origin = [lons[0,0],lats[0,0]]
corners.append(origin)
corners.append((lons[nj-1,0],lats[nj-1,0]))
corners.append((lons[nj-1,ni-1],lats[nj-1,ni-1]))
corners.append((lons[0,ni-1],lats[0,ni-1]))
else:
ni = xshape[0]
nj = yshape[0]
width = lons[len(lons)-1] - lons[0]
height = lats[len(lats)-1] - lats[0]
origin = [lons[0],lats[0]]
rotation = 0.
corners.append(origin)
corners.append((lons[0],lats[nj-1]))
corners.append((lons[ni-1],lats[nj-1]))
corners.append((lons[ni-1],lats[0]))
ninj = [ ni, nj ]
vals = dict()
vals['bounds'] = bounds
vals['ni_nj'] = ninj
vals['time'] = times
vals['width'] = width
vals['height'] = height
vals['rotation'] = rotation
vals['origin'] = origin
vals['corners'] = corners
return vals
@check_has(DefinedNCBaseCheck.HIGH)
def check_high(self, ds):
return ['title', 'summary', 'keywords']
###############################################################################
#
# RECOMMENDED
#
###############################################################################
@check_has(DefinedNCBaseCheck.MEDIUM)
def check_recommended(self, ds):
return [
'history',
'comment',
'date_created',
'creator_name',
'creator_url',
'creator_email',
'institution',
'license'
]
###############################################################################
#
# SUGGESTED
#
###############################################################################
@check_has(DefinedNCBaseCheck.LOW)
def check_suggested(self, ds):
return [
'date_modified',
'date_issued']
def do_check_2D(self, ds, ftype = "std"):
'''
Verifies the data set has the required variables for the 2D grid
what about these
'''
if ftype == "std":
# we could go overboard and test for units and dimensions on the variables as well ....
# not really necessary here
required_variables = [
'x_grid',
'y_grid',
'x_left',
'y_left',
'x_back',
'y_back',
'x_centre',
'y_centre'
]
# they must exist in above
required_dimensions = [
'j_grid',
'i_grid',
'j_centre',
'i_centre',
'j_left',
'i_left',
'j_back',
'i_back'
]
matching_dimension = [
('j_grid','i_grid'),
('j_centre','i_centre'),
('j_left','i_left'),
('j_back','i_back')
]
else:
required_variables = [
'longitude',
'latitude',
'time'
]
# they must exist in above
required_dimensions = [
'j',
'i'
]
matching_dimension = []
# TODO-UR check that the dimensions have the correct size relationship?
level = DefinedNCBaseCheck.HIGH
out_of = len(required_variables) + len(required_dimensions) + len(matching_dimension)
score = 0
messages = []
for variable in required_variables:
test = variable in ds.variables
score += int(test)
if not test:
messages.append("%s is a required variable" % variable)
for dim in required_dimensions:
test = dim in ds.dimensions
score += int(test)
if not test:
messages.append("%s is a required dimension" % dim)
for dimtuple in matching_dimension:
testdim1 = ds.dimensions[dimtuple[0]]
testdim2 = ds.dimensions[dimtuple[1]]
test = int(len(testdim1) == len(testdim2))
score += test
if not test:
messages.append("%s are required to be of same size!" % str(dimtuple))
return self.make_result(level, score, out_of, 'Required Variables and Dimensions', messages,'check_2D')
def do_check_3D(self, ds, ftype = "std"):
'''
Verifies the dataset has the required variables for the 3D grid
'''
if ftype == "std":
# we could go overboard and test for units and dimensions on the variables as well ....
# not really necessary here
required_dimensions = [
'record',
'j_centre',
'i_centre'
]
required_variables = [
't',
'botz'
]
else: # fix me for cf
required_dimensions = [
'j',
'i',
'k'
]
required_variables = [
'botz',
'zc',
'longitude',
'latitude',
'time'
]
level = DefinedNCBaseCheck.HIGH
out_of = len(required_variables) + len(required_dimensions)
score = 0
messages = []
for variable in required_variables:
test = variable in ds.variables
score += int(test)
if not test:
messages.append("%s is a required variable" % variable)
for dim in required_dimensions:
test = dim in ds.dimensions
score += int(test)
if not test:
messages.append("%s is a required variable" % dim)
return self.make_result(level, score, out_of, 'Required Variables and Dimensions', messages,'check_3D')
def do_check_bathy(self, ds, ftype = "std"):
'''
Verifies the dataset has the required variables for bathy
'''
# we could go overboard and test for units and dimensions on the variables as well ....
# not really necessary here
if ftype == "std":
# we could go overboard and test for units and dimensions on the variables as well ....
# not really necessary here
required_dimensions = [
'j_centre',
'i_centre'
]
required_variables = [
'botz'
]
else: # fix me for cf
required_dimensions = [
'j',
'i'
]
required_variables = [
'botz',
'longitude',
'latitude'
]
level = DefinedNCBaseCheck.HIGH
out_of = len(required_variables) + len(required_dimensions)
score = 0
messages = []
for variable in required_variables:
test = variable in ds.variables
score += int(test)
if not test:
messages.append("%s is a required variable" % variable)
for dim in required_dimensions:
test = dim in ds.dimensions
score += int(test)
if not test:
messages.append("%s is a required variable" % dim)
return self.make_result(level, score, out_of, 'Required Variables and Dimensions', messages,'check_bathy')
def do_check_mask(self, ds):
'''
Verifies the dataset has the required variables for bathy
'''
# we could go overboard and test for units and dimesnions on the variables as well ....
# not really necessary here
required_variables = []
required_dimensions = []
level = DefinedNCBaseCheck.HIGH
out_of = len(required_variables) + len(required_dimensions)
score = 0
messages = []
for variable in required_variables:
test = variable in ds.variables
score += int(test)
if not test:
messages.append("%s is a required variable" % variable)
for dim in required_dimensions:
test = dim in ds.dimensions
score += int(test)
if not test:
messages.append("%s is a required variable" % dim)
return self.make_result(level, score, out_of, 'Required Variables and Dimensions', messages,'check_mask')
def check(self,dsp):
from wicken.netcdf_dogma import NetCDFDogma
if not isinstance(dsp.dogma,NetCDFDogma):
raise RuntimeError("Expecting Netcdf dogma, found: "+str(dsp.dogma))
scores = []
ds = dsp.dataset
ftype = "std"
if str("cf") in self.options :
ftype = "cf"
scores.append(self.do_check_2D(ds,ftype))
if str("3D").lower() in self.options:
scores.append(self.do_check_3D(ds,ftype))
if str("bathy").lower() in self.options:
scores.append(self.do_check_bathy(ds,ftype))
return scores
'''
dimensions:
record = UNLIMITED ; // (1 currently)
k_grid = 48 ;
j_grid = 57 ;
i_grid = 33 ;
k_centre = 47 ;
j_centre = 56 ;
i_centre = 32 ;
j_left = 56 ;
i_left = 33 ;
j_back = 57 ;
i_back = 32 ;
variables:
double z_grid(k_grid) ;
z_grid:units = "metre" ;
z_grid:long_name = "Z coordinate at grid layer faces" ;
z_grid:coordinate_type = "Z" ;
double z_centre(k_centre) ;
z_centre:units = "metre" ;
z_centre:long_name = "Z coordinate at grid layer centre" ;
z_centre:coordinate_type = "Z" ;
double x_grid(j_grid, i_grid) ;
x_grid:long_name = "Longitude at grid corners" ;
x_grid:coordinate_type = "longitude" ;
x_grid:units = "degrees_east" ;
x_grid:projection = "geographic" ;
double y_grid(j_grid, i_grid) ;
y_grid:long_name = "Latitude at grid corners" ;
y_grid:coordinate_type = "latitude" ;
y_grid:projection = "geographic" ;
double x_centre(j_centre, i_centre) ;
x_centre:long_name = "Longitude at cell centre" ;
x_centre:coordinate_type = "longitude" ;
x_centre:units = "degrees_east" ;
x_centre:projection = "geographic" ;
double y_centre(j_centre, i_centre) ;
y_centre:long_name = "Latitude at cell centre" ;
y_centre:coordinate_type = "latitude" ;
y_centre:units = "degrees_north" ;
y_centre:projection = "geographic" ;
double x_left(j_left, i_left) ;
x_left:long_name = "Longitude at centre of left face" ;
x_left:coordinate_type = "longitude" ;
x_left:units = "degrees_east" ;
x_left:projection = "geographic" ;
double y_left(j_left, i_left) ;
y_left:long_name = "Latitude at centre of left face" ;
y_left:coordinate_type = "latitude" ;
y_left:units = "degrees_north" ;
y_left:projection = "geographic" ;
double x_back(j_back, i_back) ;
x_back:long_name = "Longitude at centre of back face" ;
x_back:coordinate_type = "longitude" ;
x_back:units = "degrees_east" ;
x_back:projection = "geographic" ;
double y_back(j_back, i_back) ;
y_back:long_name = "Latitude at centre of back face" ;
y_back:coordinate_type = "latitude" ;
y_back:units = "degrees_north" ;
y_back:projection = "geographic" ;
double botz(j_centre, i_centre) ;
botz:units = "metre" ;
botz:long_name = "Z coordinate at sea-bed at cell centre" ;
double h1au1(j_left, i_left) ;
h1au1:units = "metre" ;
h1au1:long_name = "Cell width at centre of left face" ;
h1au1:coordinates = "x_left, y_left" ;
double h1au2(j_back, i_back) ;
h1au2:units = "metre" ;
h1au2:long_name = "Cell width at centre of back face" ;
h1au2:coordinates = "x_back, y_back" ;
double h1acell(j_centre, i_centre) ;
h1acell:units = "metre" ;
h1acell:long_name = "Cell width at cell centre" ;
h1acell:coordinates = "x_centre, y_centre" ;
double h1agrid(j_grid, i_grid) ;
h1agrid:units = "metre" ;
h1agrid:long_name = "Cell width at grid corner" ;
h1agrid:coordinates = "x_grid, y_grid" ;
double h2au1(j_left, i_left) ;
h2au1:units = "metre" ;
h2au1:long_name = "Cell height at centre of left face" ;
h2au1:coordinates = "x_left, y_left" ;
double h2au2(j_back, i_back) ;
h2au2:units = "metre" ;
h2au2:long_name = "Cell height at centre of back face" ;
h2au2:coordinates = "x_back, y_back" ;
double h2acell(j_centre, i_centre) ;
h2acell:units = "metre" ;
h2acell:coordinates = "x_centre, y_centre" ;
double h2agrid(j_grid, i_grid) ;
h2agrid:units = "metre" ;
h2agrid:long_name = "Cell height at grid corner" ;
h2agrid:coordinates = "x_grid, y_grid" ;
double thetau1(j_left, i_left) ;
thetau1:units = "radian" ;
thetau1:long_name = "Cell rotation at centre of left face" ;
thetau1:coordinates = "x_left, y_left" ;
double thetau2(j_back, i_back) ;
thetau2:units = "radian" ;
thetau2:long_name = "Cell rotation at centre of back face" ;
thetau2:coordinates = "x_back, y_back" ;
double coriolis(j_centre, i_centre) ;
coriolis:units = " " ;
coriolis:long_name = "Coriolis parameter" ;
coriolis:coordinates = "x_centre, y_centre" ;
short crci(i_centre) ;
short clci(i_centre) ;
short crfi(i_grid) ;
short clfi(i_grid) ;
short frci(i_centre) ;
short flci(i_centre) ;
short frfi(i_grid) ;
short flfi(i_grid) ;
short cfcj(j_centre) ;
short cbcj(j_centre) ;
short cffj(j_grid) ;
short cbfj(j_grid) ;
short ffcj(j_centre) ;
short fbcj(j_centre) ;
short fffj(j_grid) ;
short fbfj(j_grid) ;
double t(record) ;
t:units = "days since 2014-08-11 00:00:00 +10:00" ;
t:long_name = "Time" ;
t:coordinate_type = "time" ;
double u1av(record, j_left, i_left) ;
u1av:units = "metre second-1" ;
u1av:long_name = "I component of depth averaged current at left face" ;
u1av:coordinates = "t, x_left, y_left" ;
double u2av(record, j_back, i_back) ;
u2av:units = "metre second-1" ;
u2av:long_name = "J component of depth averaged current at back face" ;
u2av:coordinates = "t, x_back, y_back" ;
double wtop(record, j_centre, i_centre) ;
wtop:units = "metre second-1" ;
wtop:long_name = "Vertical velocity at surface" ;
wtop:coordinates = "t, x_centre, y_centre" ;
double topz(record, j_centre, i_centre) ;
topz:units = "metre" ;
topz:long_name = "Z coordinate for surface cell" ;
topz:coordinates = "t, x_centre, y_centre" ;
double eta(record, j_centre, i_centre) ;
eta:units = "metre" ;
eta:long_name = "Surface Elevation" ;
eta:coordinates = "t, x_centre, y_centre" ;
double eta_mean(record, j_centre, i_centre) ;
eta_mean:tracer2D = "true" ;
alerts_cumulative:_FillValueWC = 0. ;
alerts_cumulative:valid_range_wc = 0., 1.e+36 ;
double U1VH0(record, j_centre, i_centre) ;
U1VH0:tracer2D = "true" ;
U1VH0:coordinates = "t, x_centre, y_centre" ;
U1VH0:long_name = "Initial e1 horizontal viscosity" ;
U1VH0:units = "" ;
U1VH0:type = 522 ;
U1VH0:diagn = 0 ;
U1VH0:_FillValueWC = 0. ;
U1VH0:valid_range_wc = 0., 1.e+36 ;
double U2VH0(record, j_centre, i_centre) ;
U2VH0:tracer2D = "true" ;
U2VH0:coordinates = "t, x_centre, y_centre" ;
U2VH0:long_name = "Initial e2 horizontal viscosity" ;
U2VH0:units = "" ;
U2VH0:type = 522 ;
U2VH0:diagn = 0 ;
U2VH0:_FillValueWC = 0. ;
U2VH0:valid_range_wc = 0., 1.e+36 ;
double sonic_depth(record, j_centre, i_centre) ;
sonic_depth:tracer2D = "true" ;
sonic_depth:coordinates = "t, x_centre, y_centre" ;
sonic_depth:long_name = "Sonic depth" ;
sonic_depth:units = "m" ;
sonic_depth:type = 522 ;
sonic_depth:diagn = 0 ;
sonic_depth:_FillValueWC = 0. ;
sonic_depth:valid_range_wc = -10000., 100. ;
double tau_be1(record, j_centre, i_centre) ;
tau_be1:tracer2D = "true" ;
tau_be1:coordinates = "t, x_centre, y_centre" ;
tau_be1:long_name = "Bottom stress in e1 direction" ;
tau_be1:units = "Nm-2" ;
tau_be1:type = 522 ;
tau_be1:diagn = 0 ;
tau_be1:_FillValueWC = 0. ;
tau_be1:valid_range_wc = -10000., 10000. ;
double tau_be2(record, j_centre, i_centre) ;
tau_be2:tracer2D = "true" ;
tau_be2:coordinates = "t, x_centre, y_centre" ;
tau_be2:long_name = "Bottom stress in e2 direction" ;
tau_be2:units = "Nm-2" ;
tau_be2:type = 522 ;
tau_be2:diagn = 0 ;
tau_be2:_FillValueWC = 0. ;
tau_be2:valid_range_wc = -10000., 10000. ;
double tau_bm(record, j_centre, i_centre) ;
tau_bm:tracer2D = "true" ;
double swr_bot_absorb(record, j_centre, i_centre) ;
swr_bot_absorb:tracer2D = "true" ;
swr_bot_absorb:coordinates = "t, x_centre, y_centre" ;
swr_bot_absorb:long_name = "SWR bottom absorption" ;
swr_bot_absorb:units = "" ;
swr_bot_absorb:type = 522 ;
swr_bot_absorb:diagn = 0 ;
swr_bot_absorb:_FillValueWC = 1. ;
swr_bot_absorb:valid_range_wc = 0., 1. ;
double swr_attenuation(record, j_centre, i_centre) ;
swr_attenuation:tracer2D = "true" ;
swr_attenuation:coordinates = "t, x_centre, y_centre" ;
swr_attenuation:long_name = "SWR attenuation" ;
swr_attenuation:units = "m-1" ;
swr_attenuation:type = 1034 ;
swr_attenuation:diagn = 0 ;
swr_attenuation:_FillValueWC = 0.073 ;
swr_attenuation:valid_range_wc = 0., 10. ;
double swr_transmission(record, j_centre, i_centre) ;
swr_transmission:tracer2D = "true" ;
swr_transmission:coordinates = "t, x_centre, y_centre" ;
swr_transmission:long_name = "SWR transmission" ;
swr_transmission:units = "" ;
swr_transmission:type = 1034 ;
swr_transmission:diagn = 0 ;
swr_transmission:_FillValueWC = 0.26 ;
swr_transmission:valid_range_wc = 0., 1. ;
double wind1(record, j_left, i_left) ;
wind1:units = "Nm-2" ;
wind1:long_name = "I component of wind stress at left face" ;
wind1:coordinates = "t, x_left, y_left" ;
double wind2(record, j_back, i_back) ;
wind2:units = "Nm-2" ;
wind2:long_name = "J component of wind stress at back face" ;
wind2:coordinates = "t, x_back, y_back" ;
double patm(record, j_centre, i_centre) ;
patm:units = "Pa" ;
patm:long_name = "Atmospheric pressure" ;
patm:coordinates = "t, x_centre, y_centre" ;
double u1(record, k_centre, j_left, i_left) ;
u1:units = "metre second-1" ;
u1:long_name = "I component of current at left face" ;
u1:coordinates = "t, x_left, y_left, z_centre" ;
double u2(record, k_centre, j_back, i_back) ;
u2:units = "metre second-1" ;
u2:long_name = "J component of current at back face" ;
u2:coordinates = "t, x_back, y_back, z_centre" ;
double w(record, k_centre, j_centre, i_centre) ;
w:units = "metre second-1" ;
w:long_name = "K component of current at cell centre and Z grid" ;
w:coordinates = "t, x_centre, y_centre, z_centre" ;
double salt(record, k_centre, j_centre, i_centre) ;
salt:tracer = "true" ;
salt:advect = 1 ;
salt:diffuse = 1 ;
salt:decay = "0.0" ;
double temp(record, k_centre, j_centre, i_centre) ;
temp:tracer = "true" ;
temp:coordinates = "t, x_centre, y_centre, z_centre" ;
temp:long_name = "Temperature" ;
temp:units = "degrees C" ;
temp:type = 4 ;
temp:diagn = 0 ;
temp:_FillValueWC = 20. ;
temp:valid_range_wc = -4., 40. ;
temp:_FillValueSED = 0. ;
temp:valid_range_sed = 0., 0. ;
temp:inwc = 1 ;
temp:insed = 0 ;
temp:dissol = 1 ;
temp:partic = 0 ;
temp:advect = 1 ;
temp:diffuse = 1 ;
temp:decay = "0.0" ;
double smagorinsky(record, k_centre, j_centre, i_centre) ;
smagorinsky:tracer = "true" ;
flag(record, k_centre, j_grid, i_grid) ;
flag:long_name = "SHOC masking flags" ;
flag:coordinates = "t, x_centre, y_centre, z_centre" ;
double dens(record, k_centre, j_centre, i_centre) ;
dens:units = "kg metre-3" ;
dens:long_name = "Density" ;
dens:coordinates = "t, x_centre, y_centre, z_centre" ;
double dens_0(record, k_centre, j_centre, i_centre) ;
dens_0:units = "kg metre-3" ;
dens_0:long_name = "Potential density" ;
dens_0:coordinates = "t, x_centre, y_centre, z_centre" ;
dens_0:_FillValue = 1025. ;
double Kz(record, k_centre, j_centre, i_centre) ;
Kz:units = "m2 s-1" ;
Kz:long_name = "Kz" ;
Kz:coordinates = "t, x_centre, y_centre, z_centre" ;
Kz:_FillValue = 0. ;
double Vz(record, k_centre, j_centre, i_centre) ;
Vz:units = "m2 s-1" ;
Vz:long_name = "Vz" ;
Vz:coordinates = "t, x_centre, y_centre, z_centre" ;
Vz:_FillValue = 0. ;
double u1bot(record, j_left, i_left) ;
u1bot:units = "metre second-1" ;
u1bot:long_name = "I component of bottom current deviation at left face" ;
u1bot:coordinates = "t, x_left, y_left" ;
double u2bot
'''
|
webtrike/compliance-checker
|
compliance_checker/shoc.py
|
Python
|
apache-2.0
| 28,288
|
[
"NetCDF"
] |
7fe05b86a99a2faaefb37798d43601014f4745a0e7b8d3bf00ceba45159c4990
|
#* This file is part of the MOOSE framework
#* https://www.mooseframework.org
#*
#* All rights reserved, see COPYRIGHT for full restrictions
#* https://github.com/idaholab/moose/blob/master/COPYRIGHT
#*
#* Licensed under LGPL 2.1, please see LICENSE for details
#* https://www.gnu.org/licenses/lgpl-2.1.html
"""
Main program for running MooseDocs. The moosedocs.py script that exists within the
documentation directory for applications call this in similar fashion to
MOOSE run_tests.
"""
import os
import argparse
import logging
import mooseutils
from .commands import build, verify, check, generate, syntax, init
from .common import log
def command_line_options():
"""
The main command line parser, this creates the main parser and calls the
calls the command_line_options method for each command.
"""
desc = "MooseDocs: A utility to build MOOSE documentation from a single source."
parser = argparse.ArgumentParser(description=desc)
subparser = parser.add_subparsers(dest='command', help='Available commands.')
subparser.required = True
# Common arguments
parent = argparse.ArgumentParser(add_help=False)
levels = ['CRITICAL', 'ERROR', 'WARNING', 'INFO', 'DEBUG']
parent.add_argument('--level', '-l',
choices=levels,
default='INFO',
help="Set the python logging level (default: %(default)s).")
build.command_line_options(subparser, parent)
check.command_line_options(subparser, parent)
verify.command_line_options(subparser, parent)
generate.command_line_options(subparser, parent)
syntax.command_line_options(subparser, parent)
init.command_line_options(subparser, parent)
return parser.parse_args()
def run():
"""
Parse the command line options and run the correct command.
"""
options = command_line_options()
log.init_logging(getattr(logging, options.level))
if options.command == 'build':
errno = build.main(options)
elif options.command == 'check':
errno = check.main(options)
elif options.command == 'verify':
errno = verify.main(options)
elif options.command == 'generate':
errno = generate.main(options)
elif options.command == 'syntax':
errno = syntax.main(options)
elif options.command == 'init':
errno = init.main(options)
else:
errno = 1
handler = logging.getLogger('MooseDocs').handlers[0]
critical = handler.getCount(logging.CRITICAL)
errors = handler.getCount(logging.ERROR)
warnings = handler.getCount(logging.WARNING)
print('CRITICAL:{} ERROR:{} WARNING:{}'.format(critical, errors, warnings))
if critical or errors or (errno != 0):
return 1
return 0
if __name__ == '__main__':
run()
|
harterj/moose
|
python/MooseDocs/main.py
|
Python
|
lgpl-2.1
| 2,810
|
[
"MOOSE"
] |
e8e53b26d5d458752f4e8a14f85cbc7ff670c82d0934788909d337b41977a3b3
|
'''
synbiochem (c) University of Manchester 2015
synbiochem is licensed under the MIT License.
To view a copy of this license, visit <http://opensource.org/licenses/MIT/>.
@author: neilswainston
'''
from setuptools import find_packages, setup
setup(name='synbiochem-py',
version='0.6.35',
description='synbiochem-py: Core python modules for SYNBIOCHEM',
long_description='synbiochem-py: Core python modules for SYNBIOCHEM',
url='https://github.com/synbiochem/synbiochem-py',
author='Neil Swainston',
author_email='neil.swainston@liverpool.ac.uk',
license='MIT',
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'Topic :: Software Development :: Build Tools',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3.7'
],
keywords='synbio synthetic biology',
packages=find_packages(),
include_package_data=True,
test_suite='synbiochem.utils.test',
install_requires=['biopython',
'numpy',
'pandas',
'requests==2.12.4',
'pyopenssl'])
|
synbiochem/synbiochem-py
|
setup.py
|
Python
|
mit
| 1,269
|
[
"Biopython",
"VisIt"
] |
442a8f159d2431068527a9e6843b2dc6a8884d9cfce6c28c5edd176bccc4f456
|
# Copyright 2011 Rackspace
# Copyright (c) 2011 X.commerce, a business unit of eBay Inc.
# Copyright 2013 IBM Corp.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import contextlib
import fixtures
import mock
import mox
import netaddr
from oslo.config import cfg
from oslo import messaging
from nova import context
from nova import db
from nova.db.sqlalchemy import models
from nova import exception
from nova import ipv6
from nova.network import floating_ips
from nova.network import linux_net
from nova.network import manager as network_manager
from nova.network import model as net_model
from nova import objects
from nova.objects import quotas as quotas_obj
from nova.objects import virtual_interface as vif_obj
from nova.openstack.common.db import exception as db_exc
from nova.openstack.common import importutils
from nova.openstack.common import log as logging
from nova.openstack.common import processutils
from nova import quota
from nova import test
from nova.tests import fake_instance
from nova.tests import fake_ldap
from nova.tests import fake_network
from nova.tests import matchers
from nova.tests.objects import test_fixed_ip
from nova.tests.objects import test_floating_ip
from nova.tests.objects import test_network
from nova.tests.objects import test_service
from nova import utils
CONF = cfg.CONF
LOG = logging.getLogger(__name__)
HOST = "testhost"
FAKEUUID = "aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa"
fake_inst = fake_instance.fake_db_instance
networks = [{'id': 0,
'uuid': FAKEUUID,
'label': 'test0',
'injected': False,
'multi_host': False,
'cidr': '192.168.0.0/24',
'cidr_v6': '2001:db8::/64',
'gateway_v6': '2001:db8::1',
'netmask_v6': '64',
'netmask': '255.255.255.0',
'bridge': 'fa0',
'bridge_interface': 'fake_fa0',
'gateway': '192.168.0.1',
'dhcp_server': '192.168.0.1',
'broadcast': '192.168.0.255',
'dns1': '192.168.0.1',
'dns2': '192.168.0.2',
'vlan': None,
'host': HOST,
'project_id': 'fake_project',
'vpn_public_address': '192.168.0.2',
'vpn_public_port': '22',
'vpn_private_address': '10.0.0.2'},
{'id': 1,
'uuid': 'bbbbbbbb-bbbb-bbbb-bbbb-bbbbbbbbbbbb',
'label': 'test1',
'injected': False,
'multi_host': False,
'cidr': '192.168.1.0/24',
'cidr_v6': '2001:db9::/64',
'gateway_v6': '2001:db9::1',
'netmask_v6': '64',
'netmask': '255.255.255.0',
'bridge': 'fa1',
'bridge_interface': 'fake_fa1',
'gateway': '192.168.1.1',
'dhcp_server': '192.168.1.1',
'broadcast': '192.168.1.255',
'dns1': '192.168.0.1',
'dns2': '192.168.0.2',
'vlan': None,
'host': HOST,
'project_id': 'fake_project',
'vpn_public_address': '192.168.1.2',
'vpn_public_port': '22',
'vpn_private_address': '10.0.0.2'}]
fixed_ips = [{'id': 0,
'network_id': 0,
'address': '192.168.0.100',
'instance_uuid': 0,
'allocated': False,
'virtual_interface_id': 0,
'floating_ips': []},
{'id': 0,
'network_id': 1,
'address': '192.168.1.100',
'instance_uuid': 0,
'allocated': False,
'virtual_interface_id': 0,
'floating_ips': []},
{'id': 0,
'network_id': 1,
'address': '2001:db9:0:1::10',
'instance_uuid': 0,
'allocated': False,
'virtual_interface_id': 0,
'floating_ips': []}]
flavor = {'id': 0,
'rxtx_cap': 3}
floating_ip_fields = {'id': 0,
'address': '192.168.10.100',
'pool': 'nova',
'interface': 'eth0',
'fixed_ip_id': 0,
'project_id': None,
'auto_assigned': False}
vifs = [{'id': 0,
'created_at': None,
'updated_at': None,
'deleted_at': None,
'deleted': 0,
'address': 'DE:AD:BE:EF:00:00',
'uuid': '00000000-0000-0000-0000-0000000000000000',
'network_id': 0,
'instance_uuid': 0},
{'id': 1,
'created_at': None,
'updated_at': None,
'deleted_at': None,
'deleted': 0,
'address': 'DE:AD:BE:EF:00:01',
'uuid': '00000000-0000-0000-0000-0000000000000001',
'network_id': 1,
'instance_uuid': 0},
{'id': 2,
'created_at': None,
'updated_at': None,
'deleted_at': None,
'deleted': 0,
'address': 'DE:AD:BE:EF:00:02',
'uuid': '00000000-0000-0000-0000-0000000000000002',
'network_id': 2,
'instance_uuid': 0}]
class FlatNetworkTestCase(test.TestCase):
def setUp(self):
super(FlatNetworkTestCase, self).setUp()
self.tempdir = self.useFixture(fixtures.TempDir()).path
self.flags(log_dir=self.tempdir)
self.flags(use_local=True, group='conductor')
self.network = network_manager.FlatManager(host=HOST)
self.network.instance_dns_domain = ''
self.network.db = db
self.context = context.RequestContext('testuser', 'testproject',
is_admin=False)
def test_get_instance_nw_info(self):
fake_get_instance_nw_info = fake_network.fake_get_instance_nw_info
nw_info = fake_get_instance_nw_info(self.stubs, 0, 2)
self.assertFalse(nw_info)
nw_info = fake_get_instance_nw_info(self.stubs, 1, 2)
for i, vif in enumerate(nw_info):
nid = i + 1
check = {'bridge': 'fake_br%d' % nid,
'cidr': '192.168.%s.0/24' % nid,
'cidr_v6': '2001:db8:0:%x::/64' % nid,
'id': '00000000-0000-0000-0000-00000000000000%02d' % nid,
'multi_host': False,
'injected': False,
'bridge_interface': None,
'vlan': None,
'broadcast': '192.168.%d.255' % nid,
'dhcp_server': '192.168.1.1',
'dns': ['192.168.%d.3' % nid, '192.168.%d.4' % nid],
'gateway': '192.168.%d.1' % nid,
'gateway_v6': '2001:db8:0:1::1',
'label': 'test%d' % nid,
'mac': 'DE:AD:BE:EF:00:%02x' % nid,
'rxtx_cap': 30,
'vif_type': net_model.VIF_TYPE_BRIDGE,
'vif_devname': None,
'vif_uuid':
'00000000-0000-0000-0000-00000000000000%02d' % nid,
'ovs_interfaceid': None,
'qbh_params': None,
'qbg_params': None,
'should_create_vlan': False,
'should_create_bridge': False,
'ip': '192.168.%d.%03d' % (nid, nid + 99),
'ip_v6': '2001:db8:0:1::%x' % nid,
'netmask': '255.255.255.0',
'netmask_v6': 64,
'physical_network': None,
}
network = vif['network']
net_v4 = vif['network']['subnets'][0]
net_v6 = vif['network']['subnets'][1]
vif_dict = dict(bridge=network['bridge'],
cidr=net_v4['cidr'],
cidr_v6=net_v6['cidr'],
id=vif['id'],
multi_host=network.get_meta('multi_host', False),
injected=network.get_meta('injected', False),
bridge_interface=
network.get_meta('bridge_interface'),
vlan=network.get_meta('vlan'),
broadcast=str(net_v4.as_netaddr().broadcast),
dhcp_server=network.get_meta('dhcp_server',
net_v4['gateway']['address']),
dns=[ip['address'] for ip in net_v4['dns']],
gateway=net_v4['gateway']['address'],
gateway_v6=net_v6['gateway']['address'],
label=network['label'],
mac=vif['address'],
rxtx_cap=vif.get_meta('rxtx_cap'),
vif_type=vif['type'],
vif_devname=vif.get('devname'),
vif_uuid=vif['id'],
ovs_interfaceid=vif.get('ovs_interfaceid'),
qbh_params=vif.get('qbh_params'),
qbg_params=vif.get('qbg_params'),
should_create_vlan=
network.get_meta('should_create_vlan', False),
should_create_bridge=
network.get_meta('should_create_bridge',
False),
ip=net_v4['ips'][i]['address'],
ip_v6=net_v6['ips'][i]['address'],
netmask=str(net_v4.as_netaddr().netmask),
netmask_v6=net_v6.as_netaddr()._prefixlen,
physical_network=
network.get_meta('physical_network', None))
self.assertThat(vif_dict, matchers.DictMatches(check))
def test_validate_networks(self):
self.mox.StubOutWithMock(db, 'network_get_all_by_uuids')
self.mox.StubOutWithMock(db, 'fixed_ip_get_by_address')
requested_networks = [('bbbbbbbb-bbbb-bbbb-bbbb-bbbbbbbbbbbb',
'192.168.1.100'),
('aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa',
'192.168.0.100')]
db.network_get_all_by_uuids(mox.IgnoreArg(), mox.IgnoreArg(),
mox.IgnoreArg()).AndReturn(
[dict(test_network.fake_network, **net)
for net in networks])
ip = dict(test_fixed_ip.fake_fixed_ip, **fixed_ips[1])
ip['network'] = dict(test_network.fake_network,
**networks[1])
ip['instance_uuid'] = None
db.fixed_ip_get_by_address(mox.IgnoreArg(),
mox.IgnoreArg(),
columns_to_join=mox.IgnoreArg()
).AndReturn(ip)
ip = dict(test_fixed_ip.fake_fixed_ip, **fixed_ips[0])
ip['network'] = dict(test_network.fake_network,
**networks[0])
ip['instance_uuid'] = None
db.fixed_ip_get_by_address(mox.IgnoreArg(),
mox.IgnoreArg(),
columns_to_join=mox.IgnoreArg()
).AndReturn(ip)
self.mox.ReplayAll()
self.network.validate_networks(self.context, requested_networks)
def test_validate_networks_valid_fixed_ipv6(self):
self.mox.StubOutWithMock(db, 'network_get_all_by_uuids')
self.mox.StubOutWithMock(db, 'fixed_ip_get_by_address')
requested_networks = [('bbbbbbbb-bbbb-bbbb-bbbb-bbbbbbbbbbbb',
'2001:db9:0:1::10')]
db.network_get_all_by_uuids(mox.IgnoreArg(), mox.IgnoreArg(),
mox.IgnoreArg()).AndReturn(
[dict(test_network.fake_network, **networks[1])])
ip = dict(test_fixed_ip.fake_fixed_ip, **fixed_ips[2])
ip['network'] = dict(test_network.fake_network,
**networks[1])
ip['instance_uuid'] = None
db.fixed_ip_get_by_address(mox.IgnoreArg(),
mox.IgnoreArg(),
columns_to_join=mox.IgnoreArg()
).AndReturn(ip)
self.mox.ReplayAll()
self.network.validate_networks(self.context, requested_networks)
def test_validate_reserved(self):
context_admin = context.RequestContext('testuser', 'testproject',
is_admin=True)
nets = self.network.create_networks(context_admin, 'fake',
'192.168.0.0/24', False, 1,
256, None, None, None, None, None)
self.assertEqual(1, len(nets))
network = nets[0]
self.assertEqual(3, db.network_count_reserved_ips(context_admin,
network['id']))
def test_validate_networks_none_requested_networks(self):
self.network.validate_networks(self.context, None)
def test_validate_networks_empty_requested_networks(self):
requested_networks = []
self.mox.ReplayAll()
self.network.validate_networks(self.context, requested_networks)
def test_validate_networks_invalid_fixed_ip(self):
self.mox.StubOutWithMock(db, 'network_get_all_by_uuids')
requested_networks = [('bbbbbbbb-bbbb-bbbb-bbbb-bbbbbbbbbbbb',
'192.168.1.100.1'),
('aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa',
'192.168.0.100.1')]
db.network_get_all_by_uuids(mox.IgnoreArg(), mox.IgnoreArg(),
mox.IgnoreArg()).AndReturn(
[dict(test_network.fake_network, **net)
for net in networks])
self.mox.ReplayAll()
self.assertRaises(exception.FixedIpInvalid,
self.network.validate_networks, self.context,
requested_networks)
def test_validate_networks_empty_fixed_ip(self):
self.mox.StubOutWithMock(db, 'network_get_all_by_uuids')
requested_networks = [('bbbbbbbb-bbbb-bbbb-bbbb-bbbbbbbbbbbb',
''),
('aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa',
'')]
db.network_get_all_by_uuids(mox.IgnoreArg(), mox.IgnoreArg(),
mox.IgnoreArg()).AndReturn(
[dict(test_network.fake_network, **net)
for net in networks])
self.mox.ReplayAll()
self.assertRaises(exception.FixedIpInvalid,
self.network.validate_networks,
self.context, requested_networks)
def test_validate_networks_none_fixed_ip(self):
self.mox.StubOutWithMock(db, 'network_get_all_by_uuids')
requested_networks = [('bbbbbbbb-bbbb-bbbb-bbbb-bbbbbbbbbbbb',
None),
('aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa',
None)]
db.network_get_all_by_uuids(mox.IgnoreArg(), mox.IgnoreArg(),
mox.IgnoreArg()).AndReturn(
[dict(test_network.fake_network, **net)
for net in networks])
self.mox.ReplayAll()
self.network.validate_networks(self.context, requested_networks)
@mock.patch('nova.objects.quotas.Quotas.reserve')
def test_add_fixed_ip_instance_using_id_without_vpn(self, reserve):
self.stubs.Set(self.network,
'_do_trigger_security_group_members_refresh_for_instance',
lambda *a, **kw: None)
self.mox.StubOutWithMock(db, 'network_get')
self.mox.StubOutWithMock(db, 'network_update')
self.mox.StubOutWithMock(db, 'fixed_ip_associate_pool')
self.mox.StubOutWithMock(db,
'virtual_interface_get_by_instance_and_network')
self.mox.StubOutWithMock(db, 'fixed_ip_update')
self.mox.StubOutWithMock(db, 'instance_get_by_uuid')
self.mox.StubOutWithMock(self.network, 'get_instance_nw_info')
fixed = dict(test_fixed_ip.fake_fixed_ip,
address='192.168.0.101')
db.fixed_ip_associate_pool(mox.IgnoreArg(),
mox.IgnoreArg(),
instance_uuid=mox.IgnoreArg(),
host=None).AndReturn(fixed)
db.virtual_interface_get_by_instance_and_network(mox.IgnoreArg(),
mox.IgnoreArg(), mox.IgnoreArg()).AndReturn(vifs[0])
db.fixed_ip_update(mox.IgnoreArg(),
mox.IgnoreArg(),
mox.IgnoreArg())
inst = fake_inst(display_name=HOST, uuid=FAKEUUID)
db.instance_get_by_uuid(self.context,
mox.IgnoreArg(), use_slave=False,
columns_to_join=['info_cache',
'security_groups']
).AndReturn(inst)
db.network_get(mox.IgnoreArg(),
mox.IgnoreArg(),
project_only=mox.IgnoreArg()
).AndReturn(dict(test_network.fake_network,
**networks[0]))
db.network_update(mox.IgnoreArg(), mox.IgnoreArg(), mox.IgnoreArg())
self.network.get_instance_nw_info(mox.IgnoreArg(), mox.IgnoreArg(),
mox.IgnoreArg(), mox.IgnoreArg())
self.mox.ReplayAll()
self.network.add_fixed_ip_to_instance(self.context, FAKEUUID, HOST,
networks[0]['id'])
exp_project, exp_user = quotas_obj.ids_from_instance(self.context,
inst)
reserve.assert_called_once_with(self.context, fixed_ips=1,
project_id=exp_project,
user_id=exp_user)
@mock.patch('nova.objects.quotas.Quotas.reserve')
def test_add_fixed_ip_instance_using_uuid_without_vpn(self, reserve):
self.stubs.Set(self.network,
'_do_trigger_security_group_members_refresh_for_instance',
lambda *a, **kw: None)
self.mox.StubOutWithMock(db, 'network_get_by_uuid')
self.mox.StubOutWithMock(db, 'network_update')
self.mox.StubOutWithMock(db, 'fixed_ip_associate_pool')
self.mox.StubOutWithMock(db,
'virtual_interface_get_by_instance_and_network')
self.mox.StubOutWithMock(db, 'fixed_ip_update')
self.mox.StubOutWithMock(db, 'instance_get_by_uuid')
self.mox.StubOutWithMock(self.network, 'get_instance_nw_info')
fixed = dict(test_fixed_ip.fake_fixed_ip,
address='192.168.0.101')
db.fixed_ip_associate_pool(mox.IgnoreArg(),
mox.IgnoreArg(),
instance_uuid=mox.IgnoreArg(),
host=None).AndReturn(fixed)
db.virtual_interface_get_by_instance_and_network(mox.IgnoreArg(),
mox.IgnoreArg(), mox.IgnoreArg()).AndReturn(vifs[0])
db.fixed_ip_update(mox.IgnoreArg(),
mox.IgnoreArg(),
mox.IgnoreArg())
inst = fake_inst(display_name=HOST, uuid=FAKEUUID)
db.instance_get_by_uuid(self.context,
mox.IgnoreArg(), use_slave=False,
columns_to_join=['info_cache',
'security_groups']
).AndReturn(inst)
db.network_get_by_uuid(mox.IgnoreArg(),
mox.IgnoreArg()
).AndReturn(dict(test_network.fake_network,
**networks[0]))
db.network_update(mox.IgnoreArg(), mox.IgnoreArg(), mox.IgnoreArg())
self.network.get_instance_nw_info(mox.IgnoreArg(), mox.IgnoreArg(),
mox.IgnoreArg(), mox.IgnoreArg())
self.mox.ReplayAll()
self.network.add_fixed_ip_to_instance(self.context, FAKEUUID, HOST,
networks[0]['uuid'])
exp_project, exp_user = quotas_obj.ids_from_instance(self.context,
inst)
reserve.assert_called_once_with(self.context, fixed_ips=1,
project_id=exp_project,
user_id=exp_user)
def test_mini_dns_driver(self):
zone1 = "example.org"
zone2 = "example.com"
driver = self.network.instance_dns_manager
driver.create_entry("hostone", "10.0.0.1", "A", zone1)
driver.create_entry("hosttwo", "10.0.0.2", "A", zone1)
driver.create_entry("hostthree", "10.0.0.3", "A", zone1)
driver.create_entry("hostfour", "10.0.0.4", "A", zone1)
driver.create_entry("hostfive", "10.0.0.5", "A", zone2)
driver.delete_entry("hostone", zone1)
driver.modify_address("hostfour", "10.0.0.1", zone1)
driver.modify_address("hostthree", "10.0.0.1", zone1)
names = driver.get_entries_by_address("10.0.0.1", zone1)
self.assertEqual(len(names), 2)
self.assertIn('hostthree', names)
self.assertIn('hostfour', names)
names = driver.get_entries_by_address("10.0.0.5", zone2)
self.assertEqual(len(names), 1)
self.assertIn('hostfive', names)
addresses = driver.get_entries_by_name("hosttwo", zone1)
self.assertEqual(len(addresses), 1)
self.assertIn('10.0.0.2', addresses)
self.assertRaises(exception.InvalidInput,
driver.create_entry,
"hostname",
"10.10.10.10",
"invalidtype",
zone1)
def test_mini_dns_driver_with_mixed_case(self):
zone1 = "example.org"
driver = self.network.instance_dns_manager
driver.create_entry("HostTen", "10.0.0.10", "A", zone1)
addresses = driver.get_entries_by_address("10.0.0.10", zone1)
self.assertEqual(len(addresses), 1)
for n in addresses:
driver.delete_entry(n, zone1)
addresses = driver.get_entries_by_address("10.0.0.10", zone1)
self.assertEqual(len(addresses), 0)
@mock.patch('nova.objects.quotas.Quotas.reserve')
def test_instance_dns(self, reserve):
self.stubs.Set(self.network,
'_do_trigger_security_group_members_refresh_for_instance',
lambda *a, **kw: None)
fixedip = dict(test_fixed_ip.fake_fixed_ip,
address='192.168.0.101')
self.mox.StubOutWithMock(db, 'network_get_by_uuid')
self.mox.StubOutWithMock(db, 'network_update')
self.mox.StubOutWithMock(db, 'fixed_ip_associate_pool')
self.mox.StubOutWithMock(db,
'virtual_interface_get_by_instance_and_network')
self.mox.StubOutWithMock(db, 'fixed_ip_update')
self.mox.StubOutWithMock(db, 'instance_get_by_uuid')
self.mox.StubOutWithMock(self.network, 'get_instance_nw_info')
db.fixed_ip_associate_pool(mox.IgnoreArg(),
mox.IgnoreArg(),
instance_uuid=mox.IgnoreArg(),
host=None
).AndReturn(fixedip)
db.virtual_interface_get_by_instance_and_network(mox.IgnoreArg(),
mox.IgnoreArg(), mox.IgnoreArg()).AndReturn(vifs[0])
db.fixed_ip_update(mox.IgnoreArg(),
mox.IgnoreArg(),
mox.IgnoreArg())
inst = fake_inst(display_name=HOST, uuid=FAKEUUID)
db.instance_get_by_uuid(self.context,
mox.IgnoreArg(), use_slave=False,
columns_to_join=['info_cache',
'security_groups']
).AndReturn(inst)
db.network_get_by_uuid(mox.IgnoreArg(),
mox.IgnoreArg()
).AndReturn(dict(test_network.fake_network,
**networks[0]))
db.network_update(mox.IgnoreArg(), mox.IgnoreArg(), mox.IgnoreArg())
self.network.get_instance_nw_info(mox.IgnoreArg(), mox.IgnoreArg(),
mox.IgnoreArg(), mox.IgnoreArg())
self.mox.ReplayAll()
self.network.add_fixed_ip_to_instance(self.context, FAKEUUID, HOST,
networks[0]['uuid'])
instance_manager = self.network.instance_dns_manager
addresses = instance_manager.get_entries_by_name(HOST,
self.network.instance_dns_domain)
self.assertEqual(len(addresses), 1)
self.assertEqual(addresses[0], fixedip['address'])
addresses = instance_manager.get_entries_by_name(FAKEUUID,
self.network.instance_dns_domain)
self.assertEqual(len(addresses), 1)
self.assertEqual(addresses[0], fixedip['address'])
exp_project, exp_user = quotas_obj.ids_from_instance(self.context,
inst)
reserve.assert_called_once_with(self.context, fixed_ips=1,
project_id=exp_project,
user_id=exp_user)
def test_allocate_floating_ip(self):
self.assertIsNone(self.network.allocate_floating_ip(self.context,
1, None))
def test_deallocate_floating_ip(self):
self.assertIsNone(self.network.deallocate_floating_ip(self.context,
1, None))
def test_associate_floating_ip(self):
self.assertIsNone(self.network.associate_floating_ip(self.context,
None, None))
def test_disassociate_floating_ip(self):
self.assertIsNone(self.network.disassociate_floating_ip(self.context,
None, None))
def test_get_networks_by_uuids_ordering(self):
self.mox.StubOutWithMock(db, 'network_get_all_by_uuids')
requested_networks = ['bbbbbbbb-bbbb-bbbb-bbbb-bbbbbbbbbbbb',
'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa']
db.network_get_all_by_uuids(mox.IgnoreArg(), mox.IgnoreArg(),
mox.IgnoreArg()).AndReturn(
[dict(test_network.fake_network, **net)
for net in networks])
self.mox.ReplayAll()
res = self.network._get_networks_by_uuids(self.context,
requested_networks)
self.assertEqual(res[0]['id'], 1)
self.assertEqual(res[1]['id'], 0)
@mock.patch('nova.objects.instance.Instance.get_by_uuid')
@mock.patch('nova.objects.quotas.Quotas.reserve')
@mock.patch('nova.objects.quotas.ids_from_instance')
def test_allocate_calculates_quota_auth(self, util_method, reserve,
get_by_uuid):
inst = objects.Instance()
inst['uuid'] = 'nosuch'
get_by_uuid.return_value = inst
reserve.side_effect = exception.OverQuota(overs='testing')
util_method.return_value = ('foo', 'bar')
self.assertRaises(exception.FixedIpLimitExceeded,
self.network.allocate_fixed_ip,
self.context, 123, {'uuid': 'nosuch'})
util_method.assert_called_once_with(self.context, inst)
@mock.patch('nova.objects.fixed_ip.FixedIP.get_by_address')
@mock.patch('nova.objects.quotas.Quotas.reserve')
@mock.patch('nova.objects.quotas.ids_from_instance')
def test_deallocate_calculates_quota_auth(self, util_method, reserve,
get_by_address):
inst = objects.Instance(uuid='fake-uuid')
fip = objects.FixedIP(instance_uuid='fake-uuid',
virtual_interface_id=1)
get_by_address.return_value = fip
util_method.return_value = ('foo', 'bar')
# This will fail right after the reserve call when it tries
# to look up the fake instance we created above
self.assertRaises(exception.InstanceNotFound,
self.network.deallocate_fixed_ip,
self.context, '1.2.3.4', instance=inst)
util_method.assert_called_once_with(self.context, inst)
@mock.patch('nova.objects.instance.Instance.get_by_uuid')
@mock.patch('nova.objects.fixed_ip.FixedIP.associate')
def test_allocate_fixed_ip_passes_string_address(self, mock_associate,
mock_get):
mock_associate.side_effect = test.TestingException
instance = objects.Instance(context=self.context)
instance.create()
mock_get.return_value = instance
self.assertRaises(test.TestingException,
self.network.allocate_fixed_ip,
self.context, instance.uuid,
{'cidr': '24', 'id': 1, 'uuid': 'nosuch'},
address=netaddr.IPAddress('1.2.3.4'))
mock_associate.assert_called_once_with(self.context,
'1.2.3.4',
instance.uuid,
1)
@mock.patch('nova.objects.instance.Instance.get_by_uuid')
@mock.patch('nova.objects.virtual_interface.VirtualInterface'
'.get_by_instance_and_network')
@mock.patch('nova.objects.fixed_ip.FixedIP.disassociate')
@mock.patch('nova.objects.fixed_ip.FixedIP.associate')
@mock.patch('nova.objects.fixed_ip.FixedIP.save')
def test_allocate_fixed_ip_cleanup(self,
mock_fixedip_save,
mock_fixedip_associate,
mock_fixedip_disassociate,
mock_vif_get,
mock_instance_get):
address = netaddr.IPAddress('1.2.3.4')
fip = objects.FixedIP(instance_uuid='fake-uuid',
address=address,
virtual_interface_id=1)
mock_fixedip_associate.return_value = fip
instance = objects.Instance(context=self.context)
instance.create()
mock_instance_get.return_value = instance
mock_vif_get.return_value = vif_obj.VirtualInterface(
instance_uuid='fake-uuid', id=1)
with contextlib.nested(
mock.patch.object(self.network, '_setup_network_on_host'),
mock.patch.object(self.network, 'instance_dns_manager'),
mock.patch.object(self.network,
'_do_trigger_security_group_members_refresh_for_instance')
) as (mock_setup_network, mock_dns_manager, mock_ignored):
mock_setup_network.side_effect = test.TestingException
self.assertRaises(test.TestingException,
self.network.allocate_fixed_ip,
self.context, instance.uuid,
{'cidr': '24', 'id': 1, 'uuid': 'nosuch'},
address=address)
mock_dns_manager.delete_entry.assert_has_calls([
mock.call(instance.display_name, ''),
mock.call(instance.uuid, '')
])
mock_fixedip_disassociate.assert_called_once_with(self.context)
class FlatDHCPNetworkTestCase(test.TestCase):
def setUp(self):
super(FlatDHCPNetworkTestCase, self).setUp()
self.useFixture(test.SampleNetworks())
self.flags(use_local=True, group='conductor')
self.network = network_manager.FlatDHCPManager(host=HOST)
self.network.db = db
self.context = context.RequestContext('testuser', 'testproject',
is_admin=False)
self.context_admin = context.RequestContext('testuser', 'testproject',
is_admin=True)
@mock.patch('nova.objects.fixed_ip.FixedIP.get_by_id')
@mock.patch('nova.objects.floating_ip.FloatingIPList.get_by_host')
@mock.patch('nova.network.linux_net.iptables_manager._apply')
def test_init_host_iptables_defer_apply(self, iptable_apply,
floating_get_by_host,
fixed_get_by_id):
def get_by_id(context, fixed_ip_id, **kwargs):
net = objects.Network(bridge='testbridge',
cidr='192.168.1.0/24')
if fixed_ip_id == 1:
return objects.FixedIP(address='192.168.1.4',
network=net)
elif fixed_ip_id == 2:
return objects.FixedIP(address='192.168.1.5',
network=net)
def fake_apply():
fake_apply.count += 1
fake_apply.count = 0
ctxt = context.RequestContext('testuser', 'testproject', is_admin=True)
float1 = objects.FloatingIP(address='1.2.3.4', fixed_ip_id=1)
float2 = objects.FloatingIP(address='1.2.3.5', fixed_ip_id=2)
float1._context = ctxt
float2._context = ctxt
iptable_apply.side_effect = fake_apply
floating_get_by_host.return_value = [float1, float2]
fixed_get_by_id.side_effect = get_by_id
self.network.init_host()
self.assertEqual(1, fake_apply.count)
class VlanNetworkTestCase(test.TestCase):
def setUp(self):
super(VlanNetworkTestCase, self).setUp()
self.useFixture(test.SampleNetworks())
self.flags(use_local=True, group='conductor')
self.network = network_manager.VlanManager(host=HOST)
self.network.db = db
self.context = context.RequestContext('testuser', 'testproject',
is_admin=False)
self.context_admin = context.RequestContext('testuser', 'testproject',
is_admin=True)
def test_quota_driver_type(self):
self.assertEqual(objects.QuotasNoOp,
self.network.quotas_cls)
def test_vpn_allocate_fixed_ip(self):
self.mox.StubOutWithMock(db, 'fixed_ip_associate')
self.mox.StubOutWithMock(db, 'fixed_ip_update')
self.mox.StubOutWithMock(db,
'virtual_interface_get_by_instance_and_network')
self.mox.StubOutWithMock(db, 'instance_get_by_uuid')
fixed = dict(test_fixed_ip.fake_fixed_ip,
address='192.168.0.1')
db.fixed_ip_associate(mox.IgnoreArg(),
mox.IgnoreArg(),
mox.IgnoreArg(),
network_id=mox.IgnoreArg(),
reserved=True).AndReturn(fixed)
db.fixed_ip_update(mox.IgnoreArg(),
mox.IgnoreArg(),
mox.IgnoreArg())
db.virtual_interface_get_by_instance_and_network(mox.IgnoreArg(),
mox.IgnoreArg(), mox.IgnoreArg()).AndReturn(vifs[0])
db.instance_get_by_uuid(mox.IgnoreArg(),
mox.IgnoreArg(), use_slave=False,
columns_to_join=['info_cache',
'security_groups']
).AndReturn(fake_inst(display_name=HOST,
uuid=FAKEUUID))
self.mox.ReplayAll()
network = objects.Network._from_db_object(
self.context, objects.Network(),
dict(test_network.fake_network, **networks[0]))
network.vpn_private_address = '192.168.0.2'
self.network.allocate_fixed_ip(self.context, FAKEUUID, network,
vpn=True)
def test_vpn_allocate_fixed_ip_no_network_id(self):
network = dict(networks[0])
network['vpn_private_address'] = '192.168.0.2'
network['id'] = None
instance = db.instance_create(self.context, {})
self.assertRaises(exception.FixedIpNotFoundForNetwork,
self.network.allocate_fixed_ip,
self.context_admin,
instance['uuid'],
network,
vpn=True)
def test_allocate_fixed_ip(self):
self.stubs.Set(self.network,
'_do_trigger_security_group_members_refresh_for_instance',
lambda *a, **kw: None)
self.mox.StubOutWithMock(db, 'fixed_ip_associate_pool')
self.mox.StubOutWithMock(db, 'fixed_ip_update')
self.mox.StubOutWithMock(db,
'virtual_interface_get_by_instance_and_network')
self.mox.StubOutWithMock(db, 'instance_get_by_uuid')
fixed = dict(test_fixed_ip.fake_fixed_ip,
address='192.168.0.1')
db.fixed_ip_associate_pool(mox.IgnoreArg(),
mox.IgnoreArg(),
instance_uuid=mox.IgnoreArg(),
host=None).AndReturn(fixed)
db.fixed_ip_update(mox.IgnoreArg(),
mox.IgnoreArg(),
mox.IgnoreArg())
db.virtual_interface_get_by_instance_and_network(mox.IgnoreArg(),
mox.IgnoreArg(), mox.IgnoreArg()).AndReturn(vifs[0])
db.instance_get_by_uuid(mox.IgnoreArg(),
mox.IgnoreArg(), use_slave=False,
columns_to_join=['info_cache',
'security_groups']
).AndReturn(fake_inst(display_name=HOST,
uuid=FAKEUUID))
self.mox.ReplayAll()
network = objects.Network._from_db_object(
self.context, objects.Network(),
dict(test_network.fake_network, **networks[0]))
network.vpn_private_address = '192.168.0.2'
self.network.allocate_fixed_ip(self.context, FAKEUUID, network)
@mock.patch('nova.objects.instance.Instance.get_by_uuid')
@mock.patch('nova.objects.fixed_ip.FixedIP.associate')
def test_allocate_fixed_ip_passes_string_address(self, mock_associate,
mock_get):
mock_associate.side_effect = test.TestingException
instance = objects.Instance(context=self.context)
instance.create()
mock_get.return_value = instance
self.assertRaises(test.TestingException,
self.network.allocate_fixed_ip,
self.context, instance.uuid,
{'cidr': '24', 'id': 1, 'uuid': 'nosuch'},
address=netaddr.IPAddress('1.2.3.4'))
mock_associate.assert_called_once_with(self.context,
'1.2.3.4',
instance.uuid,
1)
@mock.patch('nova.objects.instance.Instance.get_by_uuid')
@mock.patch('nova.objects.fixed_ip.FixedIP.associate')
def test_allocate_fixed_ip_passes_string_address_vpn(self, mock_associate,
mock_get):
mock_associate.side_effect = test.TestingException
instance = objects.Instance(context=self.context)
instance.create()
mock_get.return_value = instance
self.assertRaises(test.TestingException,
self.network.allocate_fixed_ip,
self.context, instance.uuid,
{'cidr': '24', 'id': 1, 'uuid': 'nosuch',
'vpn_private_address': netaddr.IPAddress('1.2.3.4')
}, vpn=1)
mock_associate.assert_called_once_with(self.context,
'1.2.3.4',
instance.uuid,
1, reserved=True)
def test_create_networks_too_big(self):
self.assertRaises(ValueError, self.network.create_networks, None,
num_networks=4094, vlan_start=1)
def test_create_networks_too_many(self):
self.assertRaises(ValueError, self.network.create_networks, None,
num_networks=100, vlan_start=1,
cidr='192.168.0.1/24', network_size=100)
def test_duplicate_vlan_raises(self):
# VLAN 100 is already used and we force the network to be created
# in that vlan (vlan=100).
self.assertRaises(exception.DuplicateVlan,
self.network.create_networks,
self.context_admin, label="fake", num_networks=1,
vlan=100, cidr='192.168.0.1/24', network_size=100)
def test_vlan_start(self):
# VLAN 100 and 101 are used, so this network shoud be created in 102
networks = self.network.create_networks(
self.context_admin, label="fake", num_networks=1,
vlan_start=100, cidr='192.168.3.1/24',
network_size=100)
self.assertEqual(networks[0]["vlan"], 102)
def test_vlan_start_multiple(self):
# VLAN 100 and 101 are used, so these networks shoud be created in 102
# and 103
networks = self.network.create_networks(
self.context_admin, label="fake", num_networks=2,
vlan_start=100, cidr='192.168.3.1/24',
network_size=100)
self.assertEqual(networks[0]["vlan"], 102)
self.assertEqual(networks[1]["vlan"], 103)
def test_vlan_start_used(self):
# VLAN 100 and 101 are used, but vlan_start=99.
networks = self.network.create_networks(
self.context_admin, label="fake", num_networks=1,
vlan_start=99, cidr='192.168.3.1/24',
network_size=100)
self.assertEqual(networks[0]["vlan"], 102)
@mock.patch('nova.db.network_get')
def test_validate_networks(self, net_get):
def network_get(_context, network_id, project_only='allow_none'):
return dict(test_network.fake_network, **networks[network_id])
net_get.side_effect = network_get
self.mox.StubOutWithMock(db, 'network_get_all_by_uuids')
self.mox.StubOutWithMock(db, "fixed_ip_get_by_address")
requested_networks = [('bbbbbbbb-bbbb-bbbb-bbbb-bbbbbbbbbbbb',
'192.168.1.100'),
('aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa',
'192.168.0.100')]
db.network_get_all_by_uuids(mox.IgnoreArg(), mox.IgnoreArg(),
mox.IgnoreArg()).AndReturn(
[dict(test_network.fake_network, **net)
for net in networks])
db_fixed1 = dict(test_fixed_ip.fake_fixed_ip,
network_id=networks[1]['id'],
network=dict(test_network.fake_network,
**networks[1]),
instance_uuid=None)
db.fixed_ip_get_by_address(mox.IgnoreArg(),
mox.IgnoreArg(),
columns_to_join=mox.IgnoreArg()
).AndReturn(db_fixed1)
db_fixed2 = dict(test_fixed_ip.fake_fixed_ip,
network_id=networks[0]['id'],
network=dict(test_network.fake_network,
**networks[0]),
instance_uuid=None)
db.fixed_ip_get_by_address(mox.IgnoreArg(),
mox.IgnoreArg(),
columns_to_join=mox.IgnoreArg()
).AndReturn(db_fixed2)
self.mox.ReplayAll()
self.network.validate_networks(self.context, requested_networks)
def test_validate_networks_none_requested_networks(self):
self.network.validate_networks(self.context, None)
def test_validate_networks_empty_requested_networks(self):
requested_networks = []
self.mox.ReplayAll()
self.network.validate_networks(self.context, requested_networks)
def test_validate_networks_invalid_fixed_ip(self):
self.mox.StubOutWithMock(db, 'network_get_all_by_uuids')
requested_networks = [('bbbbbbbb-bbbb-bbbb-bbbb-bbbbbbbbbbbb',
'192.168.1.100.1'),
('aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa',
'192.168.0.100.1')]
db.network_get_all_by_uuids(mox.IgnoreArg(), mox.IgnoreArg(),
mox.IgnoreArg()).AndReturn(
[dict(test_network.fake_network, **net)
for net in networks])
self.mox.ReplayAll()
self.assertRaises(exception.FixedIpInvalid,
self.network.validate_networks, self.context,
requested_networks)
def test_validate_networks_empty_fixed_ip(self):
self.mox.StubOutWithMock(db, 'network_get_all_by_uuids')
requested_networks = [('bbbbbbbb-bbbb-bbbb-bbbb-bbbbbbbbbbbb', ''),
('aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa', '')]
db.network_get_all_by_uuids(mox.IgnoreArg(), mox.IgnoreArg(),
mox.IgnoreArg()).AndReturn(
[dict(test_network.fake_network, **net)
for net in networks])
self.mox.ReplayAll()
self.assertRaises(exception.FixedIpInvalid,
self.network.validate_networks,
self.context, requested_networks)
def test_validate_networks_none_fixed_ip(self):
self.mox.StubOutWithMock(db, 'network_get_all_by_uuids')
requested_networks = [('bbbbbbbb-bbbb-bbbb-bbbb-bbbbbbbbbbbb', None),
('aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa', None)]
db.network_get_all_by_uuids(mox.IgnoreArg(), mox.IgnoreArg(),
mox.IgnoreArg()).AndReturn(
[dict(test_network.fake_network, **net)
for net in networks])
self.mox.ReplayAll()
self.network.validate_networks(self.context, requested_networks)
def test_floating_ip_owned_by_project(self):
ctxt = context.RequestContext('testuser', 'testproject',
is_admin=False)
# raises because floating_ip project_id is None
floating_ip = objects.FloatingIP(address='10.0.0.1',
project_id=None)
self.assertRaises(exception.Forbidden,
self.network._floating_ip_owned_by_project,
ctxt,
floating_ip)
# raises because floating_ip project_id is not equal to ctxt project_id
floating_ip = objects.FloatingIP(address='10.0.0.1',
project_id=ctxt.project_id + '1')
self.assertRaises(exception.Forbidden,
self.network._floating_ip_owned_by_project,
ctxt,
floating_ip)
# does not raise (floating ip is owned by ctxt project)
floating_ip = objects.FloatingIP(address='10.0.0.1',
project_id=ctxt.project_id)
self.network._floating_ip_owned_by_project(ctxt, floating_ip)
ctxt = context.RequestContext(None, None,
is_admin=True)
# does not raise (ctxt is admin)
floating_ip = objects.FloatingIP(address='10.0.0.1',
project_id=None)
self.network._floating_ip_owned_by_project(ctxt, floating_ip)
# does not raise (ctxt is admin)
floating_ip = objects.FloatingIP(address='10.0.0.1',
project_id='testproject')
self.network._floating_ip_owned_by_project(ctxt, floating_ip)
def test_allocate_floating_ip(self):
ctxt = context.RequestContext('testuser', 'testproject',
is_admin=False)
def fake_allocate_address(*args, **kwargs):
return {'address': '10.0.0.1', 'project_id': ctxt.project_id}
self.stubs.Set(self.network.db, 'floating_ip_allocate_address',
fake_allocate_address)
self.network.allocate_floating_ip(ctxt, ctxt.project_id)
@mock.patch('nova.quota.QUOTAS.reserve')
@mock.patch('nova.quota.QUOTAS.commit')
def test_deallocate_floating_ip(self, mock_commit, mock_reserve):
ctxt = context.RequestContext('testuser', 'testproject',
is_admin=False)
def fake1(*args, **kwargs):
return dict(test_floating_ip.fake_floating_ip)
def fake2(*args, **kwargs):
return dict(test_floating_ip.fake_floating_ip,
address='10.0.0.1', fixed_ip_id=1)
def fake3(*args, **kwargs):
return dict(test_floating_ip.fake_floating_ip,
address='10.0.0.1', fixed_ip_id=None,
project_id=ctxt.project_id)
self.stubs.Set(self.network.db, 'floating_ip_deallocate', fake1)
self.stubs.Set(self.network, '_floating_ip_owned_by_project', fake1)
# this time should raise because floating ip is associated to fixed_ip
self.stubs.Set(self.network.db, 'floating_ip_get_by_address', fake2)
self.assertRaises(exception.FloatingIpAssociated,
self.network.deallocate_floating_ip,
ctxt,
mox.IgnoreArg())
mock_reserve.return_value = 'reserve'
# this time should not raise
self.stubs.Set(self.network.db, 'floating_ip_get_by_address', fake3)
self.network.deallocate_floating_ip(ctxt, ctxt.project_id)
mock_commit.assert_called_once_with(ctxt, 'reserve',
project_id='testproject')
@mock.patch('nova.db.fixed_ip_get')
def test_associate_floating_ip(self, fixed_get):
ctxt = context.RequestContext('testuser', 'testproject',
is_admin=False)
def fake1(*args, **kwargs):
return dict(test_fixed_ip.fake_fixed_ip,
address='10.0.0.1',
network=test_network.fake_network)
# floating ip that's already associated
def fake2(*args, **kwargs):
return dict(test_floating_ip.fake_floating_ip,
address='10.0.0.1',
pool='nova',
interface='eth0',
fixed_ip_id=1)
# floating ip that isn't associated
def fake3(*args, **kwargs):
return dict(test_floating_ip.fake_floating_ip,
address='10.0.0.1',
pool='nova',
interface='eth0',
fixed_ip_id=None)
# fixed ip with remote host
def fake4(*args, **kwargs):
return dict(test_fixed_ip.fake_fixed_ip,
address='10.0.0.1',
pool='nova',
instance_uuid=FAKEUUID,
interface='eth0',
network_id=123)
def fake4_network(*args, **kwargs):
return dict(test_network.fake_network,
multi_host=False, host='jibberjabber')
# fixed ip with local host
def fake5(*args, **kwargs):
return dict(test_fixed_ip.fake_fixed_ip,
address='10.0.0.1',
pool='nova',
instance_uuid=FAKEUUID,
interface='eth0',
network_id=1234)
def fake5_network(*args, **kwargs):
return dict(test_network.fake_network,
multi_host=False, host='testhost')
def fake6(ctxt, method, **kwargs):
self.local = False
def fake7(*args, **kwargs):
self.local = True
def fake8(*args, **kwargs):
raise processutils.ProcessExecutionError('',
'Cannot find device "em0"\n')
def fake9(*args, **kwargs):
raise test.TestingException()
# raises because interface doesn't exist
self.stubs.Set(self.network.db,
'floating_ip_fixed_ip_associate',
fake1)
self.stubs.Set(self.network.db, 'floating_ip_disassociate', fake1)
self.stubs.Set(self.network.driver, 'ensure_floating_forward', fake8)
self.assertRaises(exception.NoFloatingIpInterface,
self.network._associate_floating_ip,
ctxt,
'1.2.3.4',
'1.2.3.5',
mox.IgnoreArg(),
mox.IgnoreArg())
self.stubs.Set(self.network, '_floating_ip_owned_by_project', fake1)
# raises because floating_ip is already associated to a fixed_ip
self.stubs.Set(self.network.db, 'floating_ip_get_by_address', fake2)
self.stubs.Set(self.network, 'disassociate_floating_ip', fake9)
fixed_get.return_value = dict(test_fixed_ip.fake_fixed_ip,
address='1.2.3.4',
instance_uuid='fake_uuid',
network=test_network.fake_network)
# doesn't raise because we exit early if the address is the same
self.network.associate_floating_ip(ctxt, mox.IgnoreArg(), '1.2.3.4')
# raises because we call disassociate which is mocked
self.assertRaises(test.TestingException,
self.network.associate_floating_ip,
ctxt,
mox.IgnoreArg(),
'new')
self.stubs.Set(self.network.db, 'floating_ip_get_by_address', fake3)
# does not raise and makes call remotely
self.local = True
self.stubs.Set(self.network.db, 'fixed_ip_get_by_address', fake4)
self.stubs.Set(self.network.db, 'network_get', fake4_network)
self.stubs.Set(self.network.network_rpcapi.client, 'prepare',
lambda **kw: self.network.network_rpcapi.client)
self.stubs.Set(self.network.network_rpcapi.client, 'call', fake6)
self.network.associate_floating_ip(ctxt, mox.IgnoreArg(),
mox.IgnoreArg())
self.assertFalse(self.local)
# does not raise and makes call locally
self.local = False
self.stubs.Set(self.network.db, 'fixed_ip_get_by_address', fake5)
self.stubs.Set(self.network.db, 'network_get', fake5_network)
self.stubs.Set(self.network, '_associate_floating_ip', fake7)
self.network.associate_floating_ip(ctxt, mox.IgnoreArg(),
mox.IgnoreArg())
self.assertTrue(self.local)
def test_add_floating_ip_nat_before_bind(self):
# Tried to verify order with documented mox record/verify
# functionality, but it doesn't seem to work since I can't make it
# fail. I'm using stubs and a flag for now, but if this mox feature
# can be made to work, it would be a better way to test this.
#
# self.mox.StubOutWithMock(self.network.driver,
# 'ensure_floating_forward')
# self.mox.StubOutWithMock(self.network.driver, 'bind_floating_ip')
#
# self.network.driver.ensure_floating_forward(mox.IgnoreArg(),
# mox.IgnoreArg(),
# mox.IgnoreArg(),
# mox.IgnoreArg())
# self.network.driver.bind_floating_ip(mox.IgnoreArg(),
# mox.IgnoreArg())
# self.mox.ReplayAll()
nat_called = [False]
def fake_nat(*args, **kwargs):
nat_called[0] = True
def fake_bind(*args, **kwargs):
self.assertTrue(nat_called[0])
self.stubs.Set(self.network.driver,
'ensure_floating_forward',
fake_nat)
self.stubs.Set(self.network.driver, 'bind_floating_ip', fake_bind)
self.network.l3driver.add_floating_ip('fakefloat',
'fakefixed',
'fakeiface',
'fakenet')
@mock.patch('nova.db.floating_ip_get_all_by_host')
@mock.patch('nova.db.fixed_ip_get')
def _test_floating_ip_init_host(self, fixed_get, floating_get,
public_interface, expected_arg):
floating_get.return_value = [
dict(test_floating_ip.fake_floating_ip,
interface='foo',
address='1.2.3.4'),
dict(test_floating_ip.fake_floating_ip,
interface='fakeiface',
address='1.2.3.5',
fixed_ip_id=1),
dict(test_floating_ip.fake_floating_ip,
interface='bar',
address='1.2.3.6',
fixed_ip_id=2),
]
def fixed_ip_get(_context, fixed_ip_id, get_network):
if fixed_ip_id == 1:
return dict(test_fixed_ip.fake_fixed_ip,
address='1.2.3.4',
network=test_network.fake_network)
raise exception.FixedIpNotFound(id=fixed_ip_id)
fixed_get.side_effect = fixed_ip_get
self.mox.StubOutWithMock(self.network.l3driver, 'add_floating_ip')
self.flags(public_interface=public_interface)
self.network.l3driver.add_floating_ip(netaddr.IPAddress('1.2.3.5'),
netaddr.IPAddress('1.2.3.4'),
expected_arg,
mox.IsA(objects.Network))
self.mox.ReplayAll()
self.network.init_host_floating_ips()
self.mox.UnsetStubs()
self.mox.VerifyAll()
def test_floating_ip_init_host_without_public_interface(self):
self._test_floating_ip_init_host(public_interface=False,
expected_arg='fakeiface')
def test_floating_ip_init_host_with_public_interface(self):
self._test_floating_ip_init_host(public_interface='fooiface',
expected_arg='fooiface')
def test_disassociate_floating_ip(self):
ctxt = context.RequestContext('testuser', 'testproject',
is_admin=False)
def fake1(*args, **kwargs):
pass
# floating ip that isn't associated
def fake2(*args, **kwargs):
return dict(test_floating_ip.fake_floating_ip,
address='10.0.0.1',
pool='nova',
interface='eth0',
fixed_ip_id=None)
# floating ip that is associated
def fake3(*args, **kwargs):
return dict(test_floating_ip.fake_floating_ip,
address='10.0.0.1',
pool='nova',
interface='eth0',
fixed_ip_id=1,
project_id=ctxt.project_id)
# fixed ip with remote host
def fake4(*args, **kwargs):
return dict(test_fixed_ip.fake_fixed_ip,
address='10.0.0.1',
pool='nova',
instance_uuid=FAKEUUID,
interface='eth0',
network_id=123)
def fake4_network(*args, **kwargs):
return dict(test_network.fake_network,
multi_host=False,
host='jibberjabber')
# fixed ip with local host
def fake5(*args, **kwargs):
return dict(test_fixed_ip.fake_fixed_ip,
address='10.0.0.1',
pool='nova',
instance_uuid=FAKEUUID,
interface='eth0',
network_id=1234)
def fake5_network(*args, **kwargs):
return dict(test_network.fake_network,
multi_host=False, host='testhost')
def fake6(ctxt, method, **kwargs):
self.local = False
def fake7(*args, **kwargs):
self.local = True
def fake8(*args, **kwargs):
return dict(test_floating_ip.fake_floating_ip,
address='10.0.0.1',
pool='nova',
interface='eth0',
fixed_ip_id=1,
auto_assigned=True,
project_id=ctxt.project_id)
self.stubs.Set(self.network, '_floating_ip_owned_by_project', fake1)
# raises because floating_ip is not associated to a fixed_ip
self.stubs.Set(self.network.db, 'floating_ip_get_by_address', fake2)
self.assertRaises(exception.FloatingIpNotAssociated,
self.network.disassociate_floating_ip,
ctxt,
mox.IgnoreArg())
self.stubs.Set(self.network.db, 'floating_ip_get_by_address', fake3)
# does not raise and makes call remotely
self.local = True
self.stubs.Set(self.network.db, 'fixed_ip_get', fake4)
self.stubs.Set(self.network.db, 'network_get', fake4_network)
self.stubs.Set(self.network.network_rpcapi.client, 'prepare',
lambda **kw: self.network.network_rpcapi.client)
self.stubs.Set(self.network.network_rpcapi.client, 'call', fake6)
self.network.disassociate_floating_ip(ctxt, mox.IgnoreArg())
self.assertFalse(self.local)
# does not raise and makes call locally
self.local = False
self.stubs.Set(self.network.db, 'fixed_ip_get', fake5)
self.stubs.Set(self.network.db, 'network_get', fake5_network)
self.stubs.Set(self.network, '_disassociate_floating_ip', fake7)
self.network.disassociate_floating_ip(ctxt, mox.IgnoreArg())
self.assertTrue(self.local)
# raises because auto_assigned floating IP cannot be disassociated
self.stubs.Set(self.network.db, 'floating_ip_get_by_address', fake8)
self.assertRaises(exception.CannotDisassociateAutoAssignedFloatingIP,
self.network.disassociate_floating_ip,
ctxt,
mox.IgnoreArg())
def test_add_fixed_ip_instance_without_vpn_requested_networks(self):
self.stubs.Set(self.network,
'_do_trigger_security_group_members_refresh_for_instance',
lambda *a, **kw: None)
self.mox.StubOutWithMock(db, 'network_get')
self.mox.StubOutWithMock(db, 'fixed_ip_associate_pool')
self.mox.StubOutWithMock(db,
'virtual_interface_get_by_instance_and_network')
self.mox.StubOutWithMock(db, 'fixed_ip_update')
self.mox.StubOutWithMock(db, 'instance_get_by_uuid')
self.mox.StubOutWithMock(self.network, 'get_instance_nw_info')
db.fixed_ip_update(mox.IgnoreArg(),
mox.IgnoreArg(),
mox.IgnoreArg())
db.virtual_interface_get_by_instance_and_network(mox.IgnoreArg(),
mox.IgnoreArg(), mox.IgnoreArg()).AndReturn(vifs[0])
fixed = dict(test_fixed_ip.fake_fixed_ip,
address='192.168.0.101')
db.fixed_ip_associate_pool(mox.IgnoreArg(),
mox.IgnoreArg(),
instance_uuid=mox.IgnoreArg(),
host=None).AndReturn(fixed)
db.network_get(mox.IgnoreArg(),
mox.IgnoreArg(),
project_only=mox.IgnoreArg()
).AndReturn(dict(test_network.fake_network,
**networks[0]))
db.instance_get_by_uuid(mox.IgnoreArg(),
mox.IgnoreArg(), use_slave=False,
columns_to_join=['info_cache',
'security_groups']
).AndReturn(fake_inst(display_name=HOST,
uuid=FAKEUUID))
self.network.get_instance_nw_info(mox.IgnoreArg(), mox.IgnoreArg(),
mox.IgnoreArg(), mox.IgnoreArg())
self.mox.ReplayAll()
self.network.add_fixed_ip_to_instance(self.context, FAKEUUID, HOST,
networks[0]['id'])
@mock.patch('nova.db.fixed_ip_get_by_address')
@mock.patch('nova.db.network_get')
def test_ip_association_and_allocation_of_other_project(self, net_get,
fixed_get):
"""Makes sure that we cannot deallocaate or disassociate
a public ip of other project.
"""
net_get.return_value = dict(test_network.fake_network,
**networks[1])
context1 = context.RequestContext('user', 'project1')
context2 = context.RequestContext('user', 'project2')
float_ip = db.floating_ip_create(context1.elevated(),
{'address': '1.2.3.4',
'project_id': context1.project_id})
float_addr = float_ip['address']
instance = db.instance_create(context1,
{'project_id': 'project1'})
fix_addr = db.fixed_ip_associate_pool(context1.elevated(),
1, instance['uuid']).address
fixed_get.return_value = dict(test_fixed_ip.fake_fixed_ip,
address=fix_addr,
instance_uuid=instance.uuid,
network=dict(test_network.fake_network,
**networks[1]))
# Associate the IP with non-admin user context
self.assertRaises(exception.Forbidden,
self.network.associate_floating_ip,
context2,
float_addr,
fix_addr)
# Deallocate address from other project
self.assertRaises(exception.Forbidden,
self.network.deallocate_floating_ip,
context2,
float_addr)
# Now Associates the address to the actual project
self.network.associate_floating_ip(context1, float_addr, fix_addr)
# Now try dis-associating from other project
self.assertRaises(exception.Forbidden,
self.network.disassociate_floating_ip,
context2,
float_addr)
# Clean up the ip addresses
self.network.disassociate_floating_ip(context1, float_addr)
self.network.deallocate_floating_ip(context1, float_addr)
self.network.deallocate_fixed_ip(context1, fix_addr, 'fake')
db.floating_ip_destroy(context1.elevated(), float_addr)
db.fixed_ip_disassociate(context1.elevated(), fix_addr)
@mock.patch('nova.db.fixed_ip_get_by_address')
@mock.patch('nova.db.network_get')
@mock.patch('nova.db.fixed_ip_update')
def test_deallocate_fixed(self, fixed_update, net_get, fixed_get):
"""Verify that release is called properly.
Ensures https://bugs.launchpad.net/nova/+bug/973442 doesn't return
"""
net_get.return_value = dict(test_network.fake_network,
**networks[1])
def vif_get(_context, _vif_id):
return vifs[0]
self.stubs.Set(db, 'virtual_interface_get', vif_get)
context1 = context.RequestContext('user', 'project1')
instance = db.instance_create(context1,
{'project_id': 'project1'})
elevated = context1.elevated()
fix_addr = db.fixed_ip_associate_pool(elevated, 1, instance['uuid'])
fixed_get.return_value = dict(test_fixed_ip.fake_fixed_ip,
address=fix_addr.address,
instance_uuid=instance.uuid,
allocated=True,
virtual_interface_id=3,
network=dict(test_network.fake_network,
**networks[1]))
self.flags(force_dhcp_release=True)
self.mox.StubOutWithMock(linux_net, 'release_dhcp')
linux_net.release_dhcp(networks[1]['bridge'], fix_addr.address,
'DE:AD:BE:EF:00:00')
self.mox.ReplayAll()
self.network.deallocate_fixed_ip(context1, fix_addr.address, 'fake')
fixed_update.assert_called_once_with(context1, fix_addr.address,
{'allocated': False})
def test_deallocate_fixed_deleted(self):
# Verify doesn't deallocate deleted fixed_ip from deleted network.
def teardown_network_on_host(_context, network):
if network['id'] == 0:
raise test.TestingException()
self.stubs.Set(self.network, '_teardown_network_on_host',
teardown_network_on_host)
context1 = context.RequestContext('user', 'project1')
elevated = context1.elevated()
instance = db.instance_create(context1,
{'project_id': 'project1'})
network = db.network_create_safe(elevated, networks[0])
_fix_addr = db.fixed_ip_associate_pool(elevated, 1, instance['uuid'])
fix_addr = _fix_addr.address
db.fixed_ip_update(elevated, fix_addr, {'deleted': 1})
elevated.read_deleted = 'yes'
delfixed = db.fixed_ip_get_by_address(elevated, fix_addr)
values = {'address': fix_addr,
'network_id': network.id,
'instance_uuid': delfixed['instance_uuid']}
db.fixed_ip_create(elevated, values)
elevated.read_deleted = 'no'
elevated.read_deleted = 'yes'
deallocate = self.network.deallocate_fixed_ip
self.assertRaises(test.TestingException, deallocate, context1,
fix_addr, 'fake')
@mock.patch('nova.db.fixed_ip_get_by_address')
@mock.patch('nova.db.network_get')
@mock.patch('nova.db.fixed_ip_update')
def test_deallocate_fixed_no_vif(self, fixed_update, net_get, fixed_get):
"""Verify that deallocate doesn't raise when no vif is returned.
Ensures https://bugs.launchpad.net/nova/+bug/968457 doesn't return
"""
net_get.return_value = dict(test_network.fake_network,
**networks[1])
def vif_get(_context, _vif_id):
return None
self.stubs.Set(db, 'virtual_interface_get', vif_get)
context1 = context.RequestContext('user', 'project1')
instance = db.instance_create(context1,
{'project_id': 'project1'})
elevated = context1.elevated()
fix_addr = db.fixed_ip_associate_pool(elevated, 1, instance['uuid'])
fixed_get.return_value = dict(test_fixed_ip.fake_fixed_ip,
address=fix_addr.address,
allocated=True,
virtual_interface_id=3,
instance_uuid=instance.uuid,
network=dict(test_network.fake_network,
**networks[1]))
self.flags(force_dhcp_release=True)
fixed_update.return_value = fixed_get.return_value
self.network.deallocate_fixed_ip(context1, fix_addr.address, 'fake')
fixed_update.assert_called_once_with(context1, fix_addr.address,
{'allocated': False})
@mock.patch('nova.db.fixed_ip_get_by_address')
@mock.patch('nova.db.network_get')
@mock.patch('nova.db.fixed_ip_update')
def test_fixed_ip_cleanup_fail(self, fixed_update, net_get, fixed_get):
# Verify IP is not deallocated if the security group refresh fails.
net_get.return_value = dict(test_network.fake_network,
**networks[1])
context1 = context.RequestContext('user', 'project1')
instance = db.instance_create(context1,
{'project_id': 'project1'})
elevated = context1.elevated()
fix_addr = objects.FixedIP.associate_pool(elevated, 1,
instance['uuid'])
def fake_refresh(instance_uuid):
raise test.TestingException()
self.stubs.Set(self.network,
'_do_trigger_security_group_members_refresh_for_instance',
fake_refresh)
fixed_get.return_value = dict(test_fixed_ip.fake_fixed_ip,
address=fix_addr.address,
allocated=True,
virtual_interface_id=3,
instance_uuid=instance.uuid,
network=dict(test_network.fake_network,
**networks[1]))
self.assertRaises(test.TestingException,
self.network.deallocate_fixed_ip,
context1, str(fix_addr.address), 'fake')
self.assertFalse(fixed_update.called)
def test_get_networks_by_uuids_ordering(self):
self.mox.StubOutWithMock(db, 'network_get_all_by_uuids')
requested_networks = ['bbbbbbbb-bbbb-bbbb-bbbb-bbbbbbbbbbbb',
'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa']
db.network_get_all_by_uuids(mox.IgnoreArg(), mox.IgnoreArg(),
mox.IgnoreArg()).AndReturn(
[dict(test_network.fake_network, **net)
for net in networks])
self.mox.ReplayAll()
res = self.network._get_networks_by_uuids(self.context,
requested_networks)
self.assertEqual(res[0]['id'], 1)
self.assertEqual(res[1]['id'], 0)
@mock.patch('nova.objects.fixed_ip.FixedIP.get_by_id')
@mock.patch('nova.objects.floating_ip.FloatingIPList.get_by_host')
@mock.patch('nova.network.linux_net.iptables_manager._apply')
def test_init_host_iptables_defer_apply(self, iptable_apply,
floating_get_by_host,
fixed_get_by_id):
def get_by_id(context, fixed_ip_id, **kwargs):
net = objects.Network(bridge='testbridge',
cidr='192.168.1.0/24')
if fixed_ip_id == 1:
return objects.FixedIP(address='192.168.1.4',
network=net)
elif fixed_ip_id == 2:
return objects.FixedIP(address='192.168.1.5',
network=net)
def fake_apply():
fake_apply.count += 1
fake_apply.count = 0
ctxt = context.RequestContext('testuser', 'testproject', is_admin=True)
float1 = objects.FloatingIP(address='1.2.3.4', fixed_ip_id=1)
float2 = objects.FloatingIP(address='1.2.3.5', fixed_ip_id=2)
float1._context = ctxt
float2._context = ctxt
iptable_apply.side_effect = fake_apply
floating_get_by_host.return_value = [float1, float2]
fixed_get_by_id.side_effect = get_by_id
self.network.init_host()
self.assertEqual(1, fake_apply.count)
class _TestDomainObject(object):
def __init__(self, **kwargs):
for k, v in kwargs.iteritems():
self.__setattr__(k, v)
class FakeNetwork(object):
def __init__(self, **kwargs):
self.vlan = None
for k, v in kwargs.iteritems():
self.__setattr__(k, v)
def __getitem__(self, item):
return getattr(self, item)
class CommonNetworkTestCase(test.TestCase):
def setUp(self):
super(CommonNetworkTestCase, self).setUp()
self.context = context.RequestContext('fake', 'fake')
self.flags(ipv6_backend='rfc2462')
self.flags(use_local=True, group='conductor')
ipv6.reset_backend()
def test_validate_instance_zone_for_dns_domain(self):
domain = 'example.com'
az = 'test_az'
domains = {
domain: _TestDomainObject(
domain=domain,
availability_zone=az)}
def dnsdomain_get(context, instance_domain):
return domains.get(instance_domain)
self.stubs.Set(db, 'dnsdomain_get', dnsdomain_get)
fake_instance = {'uuid': FAKEUUID,
'availability_zone': az}
manager = network_manager.NetworkManager()
res = manager._validate_instance_zone_for_dns_domain(self.context,
fake_instance)
self.assertTrue(res)
def fake_create_fixed_ips(self, context, network_id, fixed_cidr=None,
extra_reserved=None):
return None
def test_get_instance_nw_info_client_exceptions(self):
manager = network_manager.NetworkManager()
self.mox.StubOutWithMock(manager.db,
'virtual_interface_get_by_instance')
manager.db.virtual_interface_get_by_instance(
self.context, FAKEUUID,
use_slave=False).AndRaise(exception.InstanceNotFound(
instance_id=FAKEUUID))
self.mox.ReplayAll()
self.assertRaises(messaging.ExpectedException,
manager.get_instance_nw_info,
self.context, FAKEUUID, 'fake_rxtx_factor', HOST)
@mock.patch('nova.db.instance_get')
@mock.patch('nova.db.fixed_ip_get_by_instance')
def test_deallocate_for_instance_passes_host_info(self, fixed_get,
instance_get):
manager = fake_network.FakeNetworkManager()
db = manager.db
instance_get.return_value = fake_inst(uuid='ignoreduuid')
db.virtual_interface_delete_by_instance = lambda _x, _y: None
ctx = context.RequestContext('igonre', 'igonre')
fixed_get.return_value = [dict(test_fixed_ip.fake_fixed_ip,
address='1.2.3.4',
network_id=123)]
manager.deallocate_for_instance(
ctx, instance=objects.Instance._from_db_object(self.context,
objects.Instance(), instance_get.return_value))
self.assertEqual([
(ctx, '1.2.3.4', 'fake-host')
], manager.deallocate_fixed_ip_calls)
@mock.patch('nova.db.fixed_ip_get_by_instance')
@mock.patch('nova.db.fixed_ip_disassociate')
def test_remove_fixed_ip_from_instance(self, disassociate, get):
manager = fake_network.FakeNetworkManager()
get.return_value = [
dict(test_fixed_ip.fake_fixed_ip, **x)
for x in manager.db.fixed_ip_get_by_instance(None,
FAKEUUID)]
manager.remove_fixed_ip_from_instance(self.context, FAKEUUID,
HOST,
'10.0.0.1')
self.assertEqual(manager.deallocate_called, '10.0.0.1')
disassociate.assert_called_once_with(self.context, '10.0.0.1')
@mock.patch('nova.db.fixed_ip_get_by_instance')
def test_remove_fixed_ip_from_instance_bad_input(self, get):
manager = fake_network.FakeNetworkManager()
get.return_value = []
self.assertRaises(exception.FixedIpNotFoundForSpecificInstance,
manager.remove_fixed_ip_from_instance,
self.context, 99, HOST, 'bad input')
def test_validate_cidrs(self):
manager = fake_network.FakeNetworkManager()
nets = manager.create_networks(self.context.elevated(), 'fake',
'192.168.0.0/24',
False, 1, 256, None, None, None,
None, None)
self.assertEqual(1, len(nets))
cidrs = [str(net['cidr']) for net in nets]
self.assertIn('192.168.0.0/24', cidrs)
def test_validate_cidrs_split_exact_in_half(self):
manager = fake_network.FakeNetworkManager()
nets = manager.create_networks(self.context.elevated(), 'fake',
'192.168.0.0/24',
False, 2, 128, None, None, None,
None, None)
self.assertEqual(2, len(nets))
cidrs = [str(net['cidr']) for net in nets]
self.assertIn('192.168.0.0/25', cidrs)
self.assertIn('192.168.0.128/25', cidrs)
@mock.patch('nova.db.network_get_all')
def test_validate_cidrs_split_cidr_in_use_middle_of_range(self, get_all):
manager = fake_network.FakeNetworkManager()
get_all.return_value = [dict(test_network.fake_network,
id=1, cidr='192.168.2.0/24')]
nets = manager.create_networks(self.context.elevated(), 'fake',
'192.168.0.0/16',
False, 4, 256, None, None, None,
None, None)
self.assertEqual(4, len(nets))
cidrs = [str(net['cidr']) for net in nets]
exp_cidrs = ['192.168.0.0/24', '192.168.1.0/24', '192.168.3.0/24',
'192.168.4.0/24']
for exp_cidr in exp_cidrs:
self.assertIn(exp_cidr, cidrs)
self.assertNotIn('192.168.2.0/24', cidrs)
@mock.patch('nova.db.network_get_all')
def test_validate_cidrs_smaller_subnet_in_use(self, get_all):
manager = fake_network.FakeNetworkManager()
get_all.return_value = [dict(test_network.fake_network,
id=1, cidr='192.168.2.9/25')]
# CidrConflict: requested cidr (192.168.2.0/24) conflicts with
# existing smaller cidr
args = (self.context.elevated(), 'fake', '192.168.2.0/24', False,
1, 256, None, None, None, None, None)
self.assertRaises(exception.CidrConflict,
manager.create_networks, *args)
@mock.patch('nova.db.network_get_all')
def test_validate_cidrs_split_smaller_cidr_in_use(self, get_all):
manager = fake_network.FakeNetworkManager()
get_all.return_value = [dict(test_network.fake_network,
id=1, cidr='192.168.2.0/25')]
nets = manager.create_networks(self.context.elevated(), 'fake',
'192.168.0.0/16',
False, 4, 256, None, None, None, None,
None)
self.assertEqual(4, len(nets))
cidrs = [str(net['cidr']) for net in nets]
exp_cidrs = ['192.168.0.0/24', '192.168.1.0/24', '192.168.3.0/24',
'192.168.4.0/24']
for exp_cidr in exp_cidrs:
self.assertIn(exp_cidr, cidrs)
self.assertNotIn('192.168.2.0/24', cidrs)
@mock.patch('nova.db.network_get_all')
def test_validate_cidrs_split_smaller_cidr_in_use2(self, get_all):
manager = fake_network.FakeNetworkManager()
self.mox.StubOutWithMock(manager.db, 'network_get_all')
get_all.return_value = [dict(test_network.fake_network, id=1,
cidr='192.168.2.9/29')]
nets = manager.create_networks(self.context.elevated(), 'fake',
'192.168.2.0/24',
False, 3, 32, None, None, None, None,
None)
self.assertEqual(3, len(nets))
cidrs = [str(net['cidr']) for net in nets]
exp_cidrs = ['192.168.2.32/27', '192.168.2.64/27', '192.168.2.96/27']
for exp_cidr in exp_cidrs:
self.assertIn(exp_cidr, cidrs)
self.assertNotIn('192.168.2.0/27', cidrs)
@mock.patch('nova.db.network_get_all')
def test_validate_cidrs_split_all_in_use(self, get_all):
manager = fake_network.FakeNetworkManager()
in_use = [dict(test_network.fake_network, **values) for values in
[{'id': 1, 'cidr': '192.168.2.9/29'},
{'id': 2, 'cidr': '192.168.2.64/26'},
{'id': 3, 'cidr': '192.168.2.128/26'}]]
get_all.return_value = in_use
args = (self.context.elevated(), 'fake', '192.168.2.0/24', False,
3, 64, None, None, None, None, None)
# CidrConflict: Not enough subnets avail to satisfy requested num_
# networks - some subnets in requested range already
# in use
self.assertRaises(exception.CidrConflict,
manager.create_networks, *args)
def test_validate_cidrs_one_in_use(self):
manager = fake_network.FakeNetworkManager()
args = (None, 'fake', '192.168.0.0/24', False, 2, 256, None, None,
None, None, None)
# ValueError: network_size * num_networks exceeds cidr size
self.assertRaises(ValueError, manager.create_networks, *args)
@mock.patch('nova.db.network_get_all')
def test_validate_cidrs_already_used(self, get_all):
manager = fake_network.FakeNetworkManager()
get_all.return_value = [dict(test_network.fake_network,
cidr='192.168.0.0/24')]
# CidrConflict: cidr already in use
args = (self.context.elevated(), 'fake', '192.168.0.0/24', False,
1, 256, None, None, None, None, None)
self.assertRaises(exception.CidrConflict,
manager.create_networks, *args)
def test_validate_cidrs_too_many(self):
manager = fake_network.FakeNetworkManager()
args = (None, 'fake', '192.168.0.0/24', False, 200, 256, None, None,
None, None, None)
# ValueError: Not enough subnets avail to satisfy requested
# num_networks
self.assertRaises(ValueError, manager.create_networks, *args)
def test_validate_cidrs_split_partial(self):
manager = fake_network.FakeNetworkManager()
nets = manager.create_networks(self.context.elevated(), 'fake',
'192.168.0.0/16',
False, 2, 256, None, None, None, None,
None)
returned_cidrs = [str(net['cidr']) for net in nets]
self.assertIn('192.168.0.0/24', returned_cidrs)
self.assertIn('192.168.1.0/24', returned_cidrs)
@mock.patch('nova.db.network_get_all')
def test_validate_cidrs_conflict_existing_supernet(self, get_all):
manager = fake_network.FakeNetworkManager()
get_all.return_value = [dict(test_network.fake_network,
id=1, cidr='192.168.0.0/8')]
args = (self.context.elevated(), 'fake', '192.168.0.0/24', False,
1, 256, None, None, None, None, None)
# CidrConflict: requested cidr (192.168.0.0/24) conflicts
# with existing supernet
self.assertRaises(exception.CidrConflict,
manager.create_networks, *args)
def test_create_networks(self):
cidr = '192.168.0.0/24'
manager = fake_network.FakeNetworkManager()
self.stubs.Set(manager, '_create_fixed_ips',
self.fake_create_fixed_ips)
args = [self.context.elevated(), 'foo', cidr, None, 1, 256,
'fd00::/48', None, None, None, None, None]
self.assertTrue(manager.create_networks(*args))
@mock.patch('nova.db.network_get_all')
def test_create_networks_cidr_already_used(self, get_all):
manager = fake_network.FakeNetworkManager()
get_all.return_value = [dict(test_network.fake_network,
id=1, cidr='192.168.0.0/24')]
args = [self.context.elevated(), 'foo', '192.168.0.0/24', None, 1, 256,
'fd00::/48', None, None, None, None, None]
self.assertRaises(exception.CidrConflict,
manager.create_networks, *args)
def test_create_networks_many(self):
cidr = '192.168.0.0/16'
manager = fake_network.FakeNetworkManager()
self.stubs.Set(manager, '_create_fixed_ips',
self.fake_create_fixed_ips)
args = [self.context.elevated(), 'foo', cidr, None, 10, 256,
'fd00::/48', None, None, None, None, None]
self.assertTrue(manager.create_networks(*args))
@mock.patch('nova.db.network_get')
@mock.patch('nova.db.fixed_ips_by_virtual_interface')
def test_get_instance_uuids_by_ip_regex(self, fixed_get, network_get):
manager = fake_network.FakeNetworkManager(self.stubs)
fixed_get.side_effect = manager.db.fixed_ips_by_virtual_interface
_vifs = manager.db.virtual_interface_get_all(None)
fake_context = context.RequestContext('user', 'project')
network_get.return_value = dict(test_network.fake_network,
**manager.db.network_get(None, 1))
# Greedy get eveything
res = manager.get_instance_uuids_by_ip_filter(fake_context,
{'ip': '.*'})
self.assertEqual(len(res), len(_vifs))
# Doesn't exist
res = manager.get_instance_uuids_by_ip_filter(fake_context,
{'ip': '10.0.0.1'})
self.assertFalse(res)
# Get instance 1
res = manager.get_instance_uuids_by_ip_filter(fake_context,
{'ip': '172.16.0.2'})
self.assertTrue(res)
self.assertEqual(len(res), 1)
self.assertEqual(res[0]['instance_uuid'], _vifs[1]['instance_uuid'])
# Get instance 2
res = manager.get_instance_uuids_by_ip_filter(fake_context,
{'ip': '173.16.0.2'})
self.assertTrue(res)
self.assertEqual(len(res), 1)
self.assertEqual(res[0]['instance_uuid'], _vifs[2]['instance_uuid'])
# Get instance 0 and 1
res = manager.get_instance_uuids_by_ip_filter(fake_context,
{'ip': '172.16.0.*'})
self.assertTrue(res)
self.assertEqual(len(res), 2)
self.assertEqual(res[0]['instance_uuid'], _vifs[0]['instance_uuid'])
self.assertEqual(res[1]['instance_uuid'], _vifs[1]['instance_uuid'])
# Get instance 1 and 2
res = manager.get_instance_uuids_by_ip_filter(fake_context,
{'ip': '17..16.0.2'})
self.assertTrue(res)
self.assertEqual(len(res), 2)
self.assertEqual(res[0]['instance_uuid'], _vifs[1]['instance_uuid'])
self.assertEqual(res[1]['instance_uuid'], _vifs[2]['instance_uuid'])
@mock.patch('nova.db.network_get')
def test_get_instance_uuids_by_ipv6_regex(self, network_get):
manager = fake_network.FakeNetworkManager(self.stubs)
_vifs = manager.db.virtual_interface_get_all(None)
fake_context = context.RequestContext('user', 'project')
def _network_get(context, network_id, **args):
return dict(test_network.fake_network,
**manager.db.network_get(context, network_id))
network_get.side_effect = _network_get
# Greedy get eveything
res = manager.get_instance_uuids_by_ip_filter(fake_context,
{'ip6': '.*'})
self.assertEqual(len(res), len(_vifs))
# Doesn't exist
res = manager.get_instance_uuids_by_ip_filter(fake_context,
{'ip6': '.*1034.*'})
self.assertFalse(res)
# Get instance 1
res = manager.get_instance_uuids_by_ip_filter(fake_context,
{'ip6': '2001:.*2'})
self.assertTrue(res)
self.assertEqual(len(res), 1)
self.assertEqual(res[0]['instance_uuid'], _vifs[1]['instance_uuid'])
# Get instance 2
ip6 = '2001:db8:69:1f:dead:beff:feff:ef03'
res = manager.get_instance_uuids_by_ip_filter(fake_context,
{'ip6': ip6})
self.assertTrue(res)
self.assertEqual(len(res), 1)
self.assertEqual(res[0]['instance_uuid'], _vifs[2]['instance_uuid'])
# Get instance 0 and 1
res = manager.get_instance_uuids_by_ip_filter(fake_context,
{'ip6': '.*ef0[1,2]'})
self.assertTrue(res)
self.assertEqual(len(res), 2)
self.assertEqual(res[0]['instance_uuid'], _vifs[0]['instance_uuid'])
self.assertEqual(res[1]['instance_uuid'], _vifs[1]['instance_uuid'])
# Get instance 1 and 2
ip6 = '2001:db8:69:1.:dead:beff:feff:ef0.'
res = manager.get_instance_uuids_by_ip_filter(fake_context,
{'ip6': ip6})
self.assertTrue(res)
self.assertEqual(len(res), 2)
self.assertEqual(res[0]['instance_uuid'], _vifs[1]['instance_uuid'])
self.assertEqual(res[1]['instance_uuid'], _vifs[2]['instance_uuid'])
@mock.patch('nova.db.network_get')
@mock.patch('nova.db.fixed_ips_by_virtual_interface')
def test_get_instance_uuids_by_ip(self, fixed_get, network_get):
manager = fake_network.FakeNetworkManager(self.stubs)
fixed_get.side_effect = manager.db.fixed_ips_by_virtual_interface
_vifs = manager.db.virtual_interface_get_all(None)
fake_context = context.RequestContext('user', 'project')
network_get.return_value = dict(test_network.fake_network,
**manager.db.network_get(None, 1))
# No regex for you!
res = manager.get_instance_uuids_by_ip_filter(fake_context,
{'fixed_ip': '.*'})
self.assertFalse(res)
# Doesn't exist
ip = '10.0.0.1'
res = manager.get_instance_uuids_by_ip_filter(fake_context,
{'fixed_ip': ip})
self.assertFalse(res)
# Get instance 1
ip = '172.16.0.2'
res = manager.get_instance_uuids_by_ip_filter(fake_context,
{'fixed_ip': ip})
self.assertTrue(res)
self.assertEqual(len(res), 1)
self.assertEqual(res[0]['instance_uuid'], _vifs[1]['instance_uuid'])
# Get instance 2
ip = '173.16.0.2'
res = manager.get_instance_uuids_by_ip_filter(fake_context,
{'fixed_ip': ip})
self.assertTrue(res)
self.assertEqual(len(res), 1)
self.assertEqual(res[0]['instance_uuid'], _vifs[2]['instance_uuid'])
@mock.patch('nova.db.network_get_by_uuid')
def test_get_network(self, get):
manager = fake_network.FakeNetworkManager()
fake_context = context.RequestContext('user', 'project')
get.return_value = dict(test_network.fake_network, **networks[0])
uuid = 'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa'
network = manager.get_network(fake_context, uuid)
self.assertEqual(network['uuid'], uuid)
@mock.patch('nova.db.network_get_by_uuid')
def test_get_network_not_found(self, get):
manager = fake_network.FakeNetworkManager()
fake_context = context.RequestContext('user', 'project')
get.side_effect = exception.NetworkNotFoundForUUID(uuid='foo')
uuid = 'eeeeeeee-eeee-eeee-eeee-eeeeeeeeeeee'
self.assertRaises(exception.NetworkNotFound,
manager.get_network, fake_context, uuid)
@mock.patch('nova.db.network_get_all')
def test_get_all_networks(self, get_all):
manager = fake_network.FakeNetworkManager()
fake_context = context.RequestContext('user', 'project')
get_all.return_value = [dict(test_network.fake_network, **net)
for net in networks]
output = manager.get_all_networks(fake_context)
self.assertEqual(len(networks), 2)
self.assertEqual(output[0]['uuid'],
'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa')
self.assertEqual(output[1]['uuid'],
'bbbbbbbb-bbbb-bbbb-bbbb-bbbbbbbbbbbb')
@mock.patch('nova.db.network_get_by_uuid')
@mock.patch('nova.db.network_disassociate')
def test_disassociate_network(self, disassociate, get):
manager = fake_network.FakeNetworkManager()
disassociate.return_value = True
fake_context = context.RequestContext('user', 'project')
get.return_value = dict(test_network.fake_network,
**networks[0])
uuid = 'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa'
manager.disassociate_network(fake_context, uuid)
@mock.patch('nova.db.network_get_by_uuid')
def test_disassociate_network_not_found(self, get):
manager = fake_network.FakeNetworkManager()
fake_context = context.RequestContext('user', 'project')
get.side_effect = exception.NetworkNotFoundForUUID(uuid='fake')
uuid = 'eeeeeeee-eeee-eeee-eeee-eeeeeeeeeeee'
self.assertRaises(exception.NetworkNotFound,
manager.disassociate_network, fake_context, uuid)
def _test_init_host_dynamic_fixed_range(self, net_manager):
self.flags(fake_network=True,
routing_source_ip='172.16.0.1',
metadata_host='172.16.0.1',
public_interface='eth1',
dmz_cidr=['10.0.3.0/24'])
binary_name = linux_net.get_binary_name()
# Stub out calls we don't want to really run, mock the db
self.stubs.Set(linux_net.iptables_manager, '_apply', lambda: None)
self.stubs.Set(floating_ips.FloatingIP, 'init_host_floating_ips',
lambda *args: None)
self.stubs.Set(net_manager.l3driver, 'initialize_gateway',
lambda *args: None)
self.mox.StubOutWithMock(db, 'network_get_all_by_host')
fake_networks = [dict(test_network.fake_network, **n)
for n in networks]
db.network_get_all_by_host(mox.IgnoreArg(),
mox.IgnoreArg()
).MultipleTimes().AndReturn(fake_networks)
self.mox.ReplayAll()
net_manager.init_host()
# Get the iptables rules that got created
current_lines = []
new_lines = linux_net.iptables_manager._modify_rules(current_lines,
linux_net.iptables_manager.ipv4['nat'],
table_name='nat')
expected_lines = ['[0:0] -A %s-snat -s %s -d 0.0.0.0/0 '
'-j SNAT --to-source %s -o %s'
% (binary_name, networks[0]['cidr'],
CONF.routing_source_ip,
CONF.public_interface),
'[0:0] -A %s-POSTROUTING -s %s -d %s/32 -j ACCEPT'
% (binary_name, networks[0]['cidr'],
CONF.metadata_host),
'[0:0] -A %s-POSTROUTING -s %s -d %s -j ACCEPT'
% (binary_name, networks[0]['cidr'],
CONF.dmz_cidr[0]),
'[0:0] -A %s-POSTROUTING -s %s -d %s -m conntrack ! '
'--ctstate DNAT -j ACCEPT' % (binary_name,
networks[0]['cidr'],
networks[0]['cidr']),
'[0:0] -A %s-snat -s %s -d 0.0.0.0/0 '
'-j SNAT --to-source %s -o %s'
% (binary_name, networks[1]['cidr'],
CONF.routing_source_ip,
CONF.public_interface),
'[0:0] -A %s-POSTROUTING -s %s -d %s/32 -j ACCEPT'
% (binary_name, networks[1]['cidr'],
CONF.metadata_host),
'[0:0] -A %s-POSTROUTING -s %s -d %s -j ACCEPT'
% (binary_name, networks[1]['cidr'],
CONF.dmz_cidr[0]),
'[0:0] -A %s-POSTROUTING -s %s -d %s -m conntrack ! '
'--ctstate DNAT -j ACCEPT' % (binary_name,
networks[1]['cidr'],
networks[1]['cidr'])]
# Compare the expected rules against the actual ones
for line in expected_lines:
self.assertIn(line, new_lines)
# Add an additional network and ensure the rules get configured
new_network = {'id': 2,
'uuid': 'cccccccc-cccc-cccc-cccc-cccccccc',
'label': 'test2',
'injected': False,
'multi_host': False,
'cidr': '192.168.2.0/24',
'cidr_v6': '2001:dba::/64',
'gateway_v6': '2001:dba::1',
'netmask_v6': '64',
'netmask': '255.255.255.0',
'bridge': 'fa1',
'bridge_interface': 'fake_fa1',
'gateway': '192.168.2.1',
'dhcp_server': '192.168.2.1',
'broadcast': '192.168.2.255',
'dns1': '192.168.2.1',
'dns2': '192.168.2.2',
'vlan': None,
'host': HOST,
'project_id': 'fake_project',
'vpn_public_address': '192.168.2.2',
'vpn_public_port': '22',
'vpn_private_address': '10.0.0.2'}
new_network_obj = objects.Network._from_db_object(
self.context, objects.Network(),
dict(test_network.fake_network, **new_network))
ctxt = context.get_admin_context()
net_manager._setup_network_on_host(ctxt, new_network_obj)
# Get the new iptables rules that got created from adding a new network
current_lines = []
new_lines = linux_net.iptables_manager._modify_rules(current_lines,
linux_net.iptables_manager.ipv4['nat'],
table_name='nat')
# Add the new expected rules to the old ones
expected_lines += ['[0:0] -A %s-snat -s %s -d 0.0.0.0/0 '
'-j SNAT --to-source %s -o %s'
% (binary_name, new_network['cidr'],
CONF.routing_source_ip,
CONF.public_interface),
'[0:0] -A %s-POSTROUTING -s %s -d %s/32 -j ACCEPT'
% (binary_name, new_network['cidr'],
CONF.metadata_host),
'[0:0] -A %s-POSTROUTING -s %s -d %s -j ACCEPT'
% (binary_name, new_network['cidr'],
CONF.dmz_cidr[0]),
'[0:0] -A %s-POSTROUTING -s %s -d %s -m conntrack '
'! --ctstate DNAT -j ACCEPT' % (binary_name,
new_network['cidr'],
new_network['cidr'])]
# Compare the expected rules (with new network) against the actual ones
for line in expected_lines:
self.assertIn(line, new_lines)
def test_flatdhcpmanager_dynamic_fixed_range(self):
"""Test FlatDHCPManager NAT rules for fixed_range."""
# Set the network manager
self.network = network_manager.FlatDHCPManager(host=HOST)
self.network.db = db
# Test new behavior:
# CONF.fixed_range is not set, defaults to None
# Determine networks to NAT based on lookup
self._test_init_host_dynamic_fixed_range(self.network)
def test_vlanmanager_dynamic_fixed_range(self):
"""Test VlanManager NAT rules for fixed_range."""
# Set the network manager
self.network = network_manager.VlanManager(host=HOST)
self.network.db = db
# Test new behavior:
# CONF.fixed_range is not set, defaults to None
# Determine networks to NAT based on lookup
self._test_init_host_dynamic_fixed_range(self.network)
class TestRPCFixedManager(network_manager.RPCAllocateFixedIP,
network_manager.NetworkManager):
"""Dummy manager that implements RPCAllocateFixedIP."""
class RPCAllocateTestCase(test.TestCase):
"""Tests nova.network.manager.RPCAllocateFixedIP."""
def setUp(self):
super(RPCAllocateTestCase, self).setUp()
self.flags(use_local=True, group='conductor')
self.rpc_fixed = TestRPCFixedManager()
self.context = context.RequestContext('fake', 'fake')
def test_rpc_allocate(self):
"""Test to verify bug 855030 doesn't resurface.
Mekes sure _rpc_allocate_fixed_ip returns a value so the call
returns properly and the greenpool completes.
"""
address = '10.10.10.10'
def fake_allocate(*args, **kwargs):
return address
def fake_network_get(*args, **kwargs):
return test_network.fake_network
self.stubs.Set(self.rpc_fixed, 'allocate_fixed_ip', fake_allocate)
self.stubs.Set(self.rpc_fixed.db, 'network_get', fake_network_get)
rval = self.rpc_fixed._rpc_allocate_fixed_ip(self.context,
'fake_instance',
'fake_network')
self.assertEqual(rval, address)
class TestFloatingIPManager(floating_ips.FloatingIP,
network_manager.NetworkManager):
"""Dummy manager that implements FloatingIP."""
class AllocateTestCase(test.TestCase):
def setUp(self):
super(AllocateTestCase, self).setUp()
dns = 'nova.network.noop_dns_driver.NoopDNSDriver'
self.flags(instance_dns_manager=dns)
self.useFixture(test.SampleNetworks())
self.conductor = self.start_service(
'conductor', manager=CONF.conductor.manager)
self.compute = self.start_service('compute')
self.network = self.start_service('network')
self.user_id = 'fake'
self.project_id = 'fake'
self.context = context.RequestContext(self.user_id,
self.project_id,
is_admin=True)
self.user_context = context.RequestContext('testuser',
'testproject')
def test_allocate_for_instance(self):
address = "10.10.10.10"
self.flags(auto_assign_floating_ip=True)
db.floating_ip_create(self.context,
{'address': address,
'pool': 'nova'})
inst = objects.Instance()
inst.host = self.compute.host
inst.display_name = HOST
inst.instance_type_id = 1
inst.uuid = FAKEUUID
inst.create(self.context)
networks = db.network_get_all(self.context)
for network in networks:
db.network_update(self.context, network['id'],
{'host': self.network.host})
project_id = self.user_context.project_id
nw_info = self.network.allocate_for_instance(self.user_context,
instance_id=inst['id'], instance_uuid=inst['uuid'],
host=inst['host'], vpn=None, rxtx_factor=3,
project_id=project_id, macs=None)
self.assertEqual(1, len(nw_info))
fixed_ip = nw_info.fixed_ips()[0]['address']
self.assertTrue(utils.is_valid_ipv4(fixed_ip))
self.network.deallocate_for_instance(self.context,
instance=inst)
def test_allocate_for_instance_illegal_network(self):
networks = db.network_get_all(self.context)
requested_networks = []
for network in networks:
# set all networks to other projects
db.network_update(self.context, network['id'],
{'host': self.network.host,
'project_id': 'otherid'})
requested_networks.append((network['uuid'], None))
# set the first network to our project
db.network_update(self.context, networks[0]['id'],
{'project_id': self.user_context.project_id})
inst = objects.Instance()
inst.host = self.compute.host
inst.display_name = HOST
inst.instance_type_id = 1
inst.uuid = FAKEUUID
inst.create(self.context)
self.assertRaises(exception.NetworkNotFoundForProject,
self.network.allocate_for_instance, self.user_context,
instance_id=inst['id'], instance_uuid=inst['uuid'],
host=inst['host'], vpn=None, rxtx_factor=3,
project_id=self.context.project_id, macs=None,
requested_networks=requested_networks)
def test_allocate_for_instance_with_mac(self):
available_macs = set(['ca:fe:de:ad:be:ef'])
inst = db.instance_create(self.context, {'host': self.compute.host,
'display_name': HOST,
'instance_type_id': 1})
networks = db.network_get_all(self.context)
for network in networks:
db.network_update(self.context, network['id'],
{'host': self.network.host})
project_id = self.context.project_id
nw_info = self.network.allocate_for_instance(self.user_context,
instance_id=inst['id'], instance_uuid=inst['uuid'],
host=inst['host'], vpn=None, rxtx_factor=3,
project_id=project_id, macs=available_macs)
assigned_macs = [vif['address'] for vif in nw_info]
self.assertEqual(1, len(assigned_macs))
self.assertEqual(available_macs.pop(), assigned_macs[0])
self.network.deallocate_for_instance(self.context,
instance_id=inst['id'],
host=self.network.host,
project_id=project_id)
def test_allocate_for_instance_not_enough_macs(self):
available_macs = set()
inst = db.instance_create(self.context, {'host': self.compute.host,
'display_name': HOST,
'instance_type_id': 1})
networks = db.network_get_all(self.context)
for network in networks:
db.network_update(self.context, network['id'],
{'host': self.network.host})
project_id = self.context.project_id
self.assertRaises(exception.VirtualInterfaceCreateException,
self.network.allocate_for_instance,
self.user_context,
instance_id=inst['id'], instance_uuid=inst['uuid'],
host=inst['host'], vpn=None, rxtx_factor=3,
project_id=project_id, macs=available_macs)
class FloatingIPTestCase(test.TestCase):
"""Tests nova.network.manager.FloatingIP."""
def setUp(self):
super(FloatingIPTestCase, self).setUp()
self.tempdir = self.useFixture(fixtures.TempDir()).path
self.flags(log_dir=self.tempdir)
self.flags(use_local=True, group='conductor')
self.network = TestFloatingIPManager()
self.network.db = db
self.project_id = 'testproject'
self.context = context.RequestContext('testuser', self.project_id,
is_admin=False)
@mock.patch('nova.db.fixed_ip_get')
@mock.patch('nova.db.network_get')
@mock.patch('nova.db.instance_get_by_uuid')
@mock.patch('nova.db.service_get_by_host_and_topic')
@mock.patch('nova.db.floating_ip_get_by_address')
def test_disassociate_floating_ip_multi_host_calls(self, floating_get,
service_get,
inst_get, net_get,
fixed_get):
floating_ip = dict(test_floating_ip.fake_floating_ip,
fixed_ip_id=12)
fixed_ip = dict(test_fixed_ip.fake_fixed_ip,
network_id=None,
instance_uuid='instance-uuid')
network = dict(test_network.fake_network,
multi_host=True)
instance = dict(fake_instance.fake_db_instance(host='some-other-host'))
ctxt = context.RequestContext('testuser', 'testproject',
is_admin=False)
self.stubs.Set(self.network,
'_floating_ip_owned_by_project',
lambda _x, _y: True)
floating_get.return_value = floating_ip
fixed_get.return_value = fixed_ip
net_get.return_value = network
inst_get.return_value = instance
service_get.return_value = test_service.fake_service
self.stubs.Set(self.network.servicegroup_api,
'service_is_up',
lambda _x: True)
self.mox.StubOutWithMock(
self.network.network_rpcapi, '_disassociate_floating_ip')
self.network.network_rpcapi._disassociate_floating_ip(
ctxt, 'fl_ip', mox.IgnoreArg(), 'some-other-host', 'instance-uuid')
self.mox.ReplayAll()
self.network.disassociate_floating_ip(ctxt, 'fl_ip', True)
@mock.patch('nova.db.fixed_ip_get_by_address')
@mock.patch('nova.db.network_get')
@mock.patch('nova.db.instance_get_by_uuid')
@mock.patch('nova.db.floating_ip_get_by_address')
def test_associate_floating_ip_multi_host_calls(self, floating_get,
inst_get, net_get,
fixed_get):
floating_ip = dict(test_floating_ip.fake_floating_ip,
fixed_ip_id=None)
fixed_ip = dict(test_fixed_ip.fake_fixed_ip,
network_id=None,
instance_uuid='instance-uuid')
network = dict(test_network.fake_network,
multi_host=True)
instance = dict(fake_instance.fake_db_instance(host='some-other-host'))
ctxt = context.RequestContext('testuser', 'testproject',
is_admin=False)
self.stubs.Set(self.network,
'_floating_ip_owned_by_project',
lambda _x, _y: True)
floating_get.return_value = floating_ip
fixed_get.return_value = fixed_ip
net_get.return_value = network
inst_get.return_value = instance
self.mox.StubOutWithMock(
self.network.network_rpcapi, '_associate_floating_ip')
self.network.network_rpcapi._associate_floating_ip(
ctxt, 'fl_ip', 'fix_ip', mox.IgnoreArg(), 'some-other-host',
'instance-uuid')
self.mox.ReplayAll()
self.network.associate_floating_ip(ctxt, 'fl_ip', 'fix_ip', True)
def test_double_deallocation(self):
instance_ref = db.instance_create(self.context,
{"project_id": self.project_id})
# Run it twice to make it fault if it does not handle
# instances without fixed networks
# If this fails in either, it does not handle having no addresses
self.network.deallocate_for_instance(self.context,
instance_id=instance_ref['id'])
self.network.deallocate_for_instance(self.context,
instance_id=instance_ref['id'])
def test_deallocate_floating_ip_quota_rollback(self):
ctxt = context.RequestContext('testuser', 'testproject',
is_admin=False)
def fake(*args, **kwargs):
return dict(test_floating_ip.fake_floating_ip,
address='10.0.0.1', fixed_ip_id=None,
project_id=ctxt.project_id)
self.stubs.Set(self.network.db, 'floating_ip_get_by_address', fake)
self.mox.StubOutWithMock(db, 'floating_ip_deallocate')
self.mox.StubOutWithMock(self.network,
'_floating_ip_owned_by_project')
self.mox.StubOutWithMock(quota.QUOTAS, 'reserve')
self.mox.StubOutWithMock(quota.QUOTAS, 'rollback')
quota.QUOTAS.reserve(self.context,
floating_ips=-1,
project_id='testproject').AndReturn('fake-rsv')
self.network._floating_ip_owned_by_project(self.context,
mox.IgnoreArg())
db.floating_ip_deallocate(mox.IgnoreArg(),
mox.IgnoreArg()).AndReturn(None)
quota.QUOTAS.rollback(self.context, 'fake-rsv',
project_id='testproject')
self.mox.ReplayAll()
self.network.deallocate_floating_ip(self.context, '10.0.0.1')
def test_deallocation_deleted_instance(self):
self.stubs.Set(self.network, '_teardown_network_on_host',
lambda *args, **kwargs: None)
instance = objects.Instance()
instance.project_id = self.project_id
instance.deleted = True
instance.create(self.context)
network = db.network_create_safe(self.context.elevated(), {
'project_id': self.project_id,
'host': CONF.host,
'label': 'foo'})
fixed = db.fixed_ip_create(self.context, {'allocated': True,
'instance_uuid': instance.uuid, 'address': '10.1.1.1',
'network_id': network['id']})
db.floating_ip_create(self.context, {
'address': '10.10.10.10', 'instance_uuid': instance.uuid,
'fixed_ip_id': fixed['id'],
'project_id': self.project_id})
self.network.deallocate_for_instance(self.context, instance=instance)
def test_deallocation_duplicate_floating_ip(self):
self.stubs.Set(self.network, '_teardown_network_on_host',
lambda *args, **kwargs: None)
instance = objects.Instance()
instance.project_id = self.project_id
instance.create(self.context)
network = db.network_create_safe(self.context.elevated(), {
'project_id': self.project_id,
'host': CONF.host,
'label': 'foo'})
fixed = db.fixed_ip_create(self.context, {'allocated': True,
'instance_uuid': instance.uuid, 'address': '10.1.1.1',
'network_id': network['id']})
db.floating_ip_create(self.context, {
'address': '10.10.10.10',
'deleted': True})
db.floating_ip_create(self.context, {
'address': '10.10.10.10', 'instance_uuid': instance.uuid,
'fixed_ip_id': fixed['id'],
'project_id': self.project_id})
self.network.deallocate_for_instance(self.context, instance=instance)
@mock.patch('nova.db.fixed_ip_get')
@mock.patch('nova.db.floating_ip_get_by_address')
@mock.patch('nova.db.floating_ip_update')
def test_migrate_instance_start(self, floating_update, floating_get,
fixed_get):
called = {'count': 0}
def fake_floating_ip_get_by_address(context, address):
return dict(test_floating_ip.fake_floating_ip,
address=address,
fixed_ip_id=0)
def fake_is_stale_floating_ip_address(context, floating_ip):
return str(floating_ip.address) == '172.24.4.23'
floating_get.side_effect = fake_floating_ip_get_by_address
fixed_get.return_value = dict(test_fixed_ip.fake_fixed_ip,
instance_uuid='fake_uuid',
address='10.0.0.2',
network=test_network.fake_network)
floating_update.return_value = fake_floating_ip_get_by_address(
None, '1.2.3.4')
def fake_remove_floating_ip(floating_addr, fixed_addr, interface,
network):
called['count'] += 1
def fake_clean_conntrack(fixed_ip):
if not str(fixed_ip) == "10.0.0.2":
raise exception.FixedIpInvalid(address=fixed_ip)
self.stubs.Set(self.network, '_is_stale_floating_ip_address',
fake_is_stale_floating_ip_address)
self.stubs.Set(self.network.l3driver, 'remove_floating_ip',
fake_remove_floating_ip)
self.stubs.Set(self.network.driver, 'clean_conntrack',
fake_clean_conntrack)
self.mox.ReplayAll()
addresses = ['172.24.4.23', '172.24.4.24', '172.24.4.25']
self.network.migrate_instance_start(self.context,
instance_uuid=FAKEUUID,
floating_addresses=addresses,
rxtx_factor=3,
project_id=self.project_id,
source='fake_source',
dest='fake_dest')
self.assertEqual(called['count'], 2)
@mock.patch('nova.db.fixed_ip_get')
@mock.patch('nova.db.floating_ip_update')
def test_migrate_instance_finish(self, floating_update, fixed_get):
called = {'count': 0}
def fake_floating_ip_get_by_address(context, address):
return dict(test_floating_ip.fake_floating_ip,
address=address,
fixed_ip_id=0)
def fake_is_stale_floating_ip_address(context, floating_ip):
return str(floating_ip.address) == '172.24.4.23'
fixed_get.return_value = dict(test_fixed_ip.fake_fixed_ip,
instance_uuid='fake_uuid',
address='10.0.0.2',
network=test_network.fake_network)
floating_update.return_value = fake_floating_ip_get_by_address(
None, '1.2.3.4')
def fake_add_floating_ip(floating_addr, fixed_addr, interface,
network):
called['count'] += 1
self.stubs.Set(self.network.db, 'floating_ip_get_by_address',
fake_floating_ip_get_by_address)
self.stubs.Set(self.network, '_is_stale_floating_ip_address',
fake_is_stale_floating_ip_address)
self.stubs.Set(self.network.l3driver, 'add_floating_ip',
fake_add_floating_ip)
self.mox.ReplayAll()
addresses = ['172.24.4.23', '172.24.4.24', '172.24.4.25']
self.network.migrate_instance_finish(self.context,
instance_uuid=FAKEUUID,
floating_addresses=addresses,
host='fake_dest',
rxtx_factor=3,
project_id=self.project_id,
source='fake_source')
self.assertEqual(called['count'], 2)
def test_floating_dns_create_conflict(self):
zone = "example.org"
address1 = "10.10.10.11"
name1 = "foo"
self.network.add_dns_entry(self.context, address1, name1, "A", zone)
self.assertRaises(exception.FloatingIpDNSExists,
self.network.add_dns_entry, self.context,
address1, name1, "A", zone)
def test_floating_create_and_get(self):
zone = "example.org"
address1 = "10.10.10.11"
name1 = "foo"
name2 = "bar"
entries = self.network.get_dns_entries_by_address(self.context,
address1, zone)
self.assertFalse(entries)
self.network.add_dns_entry(self.context, address1, name1, "A", zone)
self.network.add_dns_entry(self.context, address1, name2, "A", zone)
entries = self.network.get_dns_entries_by_address(self.context,
address1, zone)
self.assertEqual(len(entries), 2)
self.assertEqual(entries[0], name1)
self.assertEqual(entries[1], name2)
entries = self.network.get_dns_entries_by_name(self.context,
name1, zone)
self.assertEqual(len(entries), 1)
self.assertEqual(entries[0], address1)
def test_floating_dns_delete(self):
zone = "example.org"
address1 = "10.10.10.11"
name1 = "foo"
name2 = "bar"
self.network.add_dns_entry(self.context, address1, name1, "A", zone)
self.network.add_dns_entry(self.context, address1, name2, "A", zone)
self.network.delete_dns_entry(self.context, name1, zone)
entries = self.network.get_dns_entries_by_address(self.context,
address1, zone)
self.assertEqual(len(entries), 1)
self.assertEqual(entries[0], name2)
self.assertRaises(exception.NotFound,
self.network.delete_dns_entry, self.context,
name1, zone)
def test_floating_dns_domains_public(self):
zone1 = "testzone"
domain1 = "example.org"
domain2 = "example.com"
address1 = '10.10.10.10'
entryname = 'testentry'
context_admin = context.RequestContext('testuser', 'testproject',
is_admin=True)
self.assertRaises(exception.AdminRequired,
self.network.create_public_dns_domain, self.context,
domain1, zone1)
self.network.create_public_dns_domain(context_admin, domain1,
'testproject')
self.network.create_public_dns_domain(context_admin, domain2,
'fakeproject')
domains = self.network.get_dns_domains(self.context)
self.assertEqual(len(domains), 2)
self.assertEqual(domains[0]['domain'], domain1)
self.assertEqual(domains[1]['domain'], domain2)
self.assertEqual(domains[0]['project'], 'testproject')
self.assertEqual(domains[1]['project'], 'fakeproject')
self.network.add_dns_entry(self.context, address1, entryname,
'A', domain1)
entries = self.network.get_dns_entries_by_name(self.context,
entryname, domain1)
self.assertEqual(len(entries), 1)
self.assertEqual(entries[0], address1)
self.assertRaises(exception.AdminRequired,
self.network.delete_dns_domain, self.context,
domain1)
self.network.delete_dns_domain(context_admin, domain1)
self.network.delete_dns_domain(context_admin, domain2)
# Verify that deleting the domain deleted the associated entry
entries = self.network.get_dns_entries_by_name(self.context,
entryname, domain1)
self.assertFalse(entries)
def test_delete_all_by_ip(self):
domain1 = "example.org"
domain2 = "example.com"
address = "10.10.10.10"
name1 = "foo"
name2 = "bar"
def fake_domains(context):
return [{'domain': 'example.org', 'scope': 'public'},
{'domain': 'example.com', 'scope': 'public'},
{'domain': 'test.example.org', 'scope': 'public'}]
self.stubs.Set(self.network, 'get_dns_domains', fake_domains)
context_admin = context.RequestContext('testuser', 'testproject',
is_admin=True)
self.network.create_public_dns_domain(context_admin, domain1,
'testproject')
self.network.create_public_dns_domain(context_admin, domain2,
'fakeproject')
domains = self.network.get_dns_domains(self.context)
for domain in domains:
self.network.add_dns_entry(self.context, address,
name1, "A", domain['domain'])
self.network.add_dns_entry(self.context, address,
name2, "A", domain['domain'])
entries = self.network.get_dns_entries_by_address(self.context,
address,
domain['domain'])
self.assertEqual(len(entries), 2)
self.network._delete_all_entries_for_ip(self.context, address)
for domain in domains:
entries = self.network.get_dns_entries_by_address(self.context,
address,
domain['domain'])
self.assertFalse(entries)
self.network.delete_dns_domain(context_admin, domain1)
self.network.delete_dns_domain(context_admin, domain2)
def test_mac_conflicts(self):
# Make sure MAC collisions are retried.
self.flags(create_unique_mac_address_attempts=3)
ctxt = context.RequestContext('testuser', 'testproject', is_admin=True)
macs = ['bb:bb:bb:bb:bb:bb', 'aa:aa:aa:aa:aa:aa']
# Create a VIF with aa:aa:aa:aa:aa:aa
crash_test_dummy_vif = {
'address': macs[1],
'instance_uuid': 'fake_uuid',
'network_id': 123,
'uuid': 'fake_uuid',
}
self.network.db.virtual_interface_create(ctxt, crash_test_dummy_vif)
# Hand out a collision first, then a legit MAC
def fake_gen_mac():
return macs.pop()
self.stubs.Set(utils, 'generate_mac_address', fake_gen_mac)
# SQLite doesn't seem to honor the uniqueness constraint on the
# address column, so fake the collision-avoidance here
def fake_vif_save(vif):
if vif.address == crash_test_dummy_vif['address']:
raise db_exc.DBError("If you're smart, you'll retry!")
# NOTE(russellb) The VirtualInterface object requires an ID to be
# set, and we expect it to get set automatically when we do the
# save.
vif.id = 1
self.stubs.Set(models.VirtualInterface, 'save', fake_vif_save)
# Attempt to add another and make sure that both MACs are consumed
# by the retry loop
self.network._add_virtual_interface(ctxt, 'fake_uuid', 123)
self.assertEqual(macs, [])
def test_deallocate_client_exceptions(self):
# Ensure that FloatingIpNotFoundForAddress is wrapped.
self.mox.StubOutWithMock(self.network.db, 'floating_ip_get_by_address')
self.network.db.floating_ip_get_by_address(
self.context, '1.2.3.4').AndRaise(
exception.FloatingIpNotFoundForAddress(address='fake'))
self.mox.ReplayAll()
self.assertRaises(messaging.ExpectedException,
self.network.deallocate_floating_ip,
self.context, '1.2.3.4')
def test_associate_client_exceptions(self):
# Ensure that FloatingIpNotFoundForAddress is wrapped.
self.mox.StubOutWithMock(self.network.db, 'floating_ip_get_by_address')
self.network.db.floating_ip_get_by_address(
self.context, '1.2.3.4').AndRaise(
exception.FloatingIpNotFoundForAddress(address='fake'))
self.mox.ReplayAll()
self.assertRaises(messaging.ExpectedException,
self.network.associate_floating_ip,
self.context, '1.2.3.4', '10.0.0.1')
def test_disassociate_client_exceptions(self):
# Ensure that FloatingIpNotFoundForAddress is wrapped.
self.mox.StubOutWithMock(self.network.db, 'floating_ip_get_by_address')
self.network.db.floating_ip_get_by_address(
self.context, '1.2.3.4').AndRaise(
exception.FloatingIpNotFoundForAddress(address='fake'))
self.mox.ReplayAll()
self.assertRaises(messaging.ExpectedException,
self.network.disassociate_floating_ip,
self.context, '1.2.3.4')
def test_get_floating_ip_client_exceptions(self):
# Ensure that FloatingIpNotFoundForAddress is wrapped.
self.mox.StubOutWithMock(self.network.db, 'floating_ip_get')
self.network.db.floating_ip_get(self.context, 'fake-id').AndRaise(
exception.FloatingIpNotFound(id='fake'))
self.mox.ReplayAll()
self.assertRaises(messaging.ExpectedException,
self.network.get_floating_ip,
self.context, 'fake-id')
def _test_associate_floating_ip_failure(self, stdout, expected_exception):
def _fake_catchall(*args, **kwargs):
return dict(test_fixed_ip.fake_fixed_ip,
network=test_network.fake_network)
def _fake_add_floating_ip(*args, **kwargs):
raise processutils.ProcessExecutionError(stdout)
self.stubs.Set(self.network.db, 'floating_ip_fixed_ip_associate',
_fake_catchall)
self.stubs.Set(self.network.db, 'floating_ip_disassociate',
_fake_catchall)
self.stubs.Set(self.network.l3driver, 'add_floating_ip',
_fake_add_floating_ip)
self.assertRaises(expected_exception,
self.network._associate_floating_ip, self.context,
'1.2.3.4', '1.2.3.5', '', '')
def test_associate_floating_ip_failure(self):
self._test_associate_floating_ip_failure(None,
processutils.ProcessExecutionError)
def test_associate_floating_ip_failure_interface_not_found(self):
self._test_associate_floating_ip_failure('Cannot find device',
exception.NoFloatingIpInterface)
@mock.patch('nova.objects.FloatingIP.get_by_address')
def test_get_floating_ip_by_address(self, mock_get):
mock_get.return_value = mock.sentinel.floating
self.assertEqual(mock.sentinel.floating,
self.network.get_floating_ip_by_address(
self.context,
mock.sentinel.address))
mock_get.assert_called_once_with(self.context, mock.sentinel.address)
@mock.patch('nova.objects.FloatingIPList.get_by_project')
def test_get_floating_ips_by_project(self, mock_get):
mock_get.return_value = mock.sentinel.floatings
self.assertEqual(mock.sentinel.floatings,
self.network.get_floating_ips_by_project(
self.context))
mock_get.assert_called_once_with(self.context, self.context.project_id)
@mock.patch('nova.objects.FloatingIPList.get_by_fixed_address')
def test_get_floating_ips_by_fixed_address(self, mock_get):
mock_get.return_value = [objects.FloatingIP(address='1.2.3.4'),
objects.FloatingIP(address='5.6.7.8')]
self.assertEqual(['1.2.3.4', '5.6.7.8'],
self.network.get_floating_ips_by_fixed_address(
self.context, mock.sentinel.address))
mock_get.assert_called_once_with(self.context, mock.sentinel.address)
class InstanceDNSTestCase(test.TestCase):
"""Tests nova.network.manager instance DNS."""
def setUp(self):
super(InstanceDNSTestCase, self).setUp()
self.tempdir = self.useFixture(fixtures.TempDir()).path
self.flags(log_dir=self.tempdir)
self.flags(use_local=True, group='conductor')
self.network = TestFloatingIPManager()
self.network.db = db
self.project_id = 'testproject'
self.context = context.RequestContext('testuser', self.project_id,
is_admin=False)
def test_dns_domains_private(self):
zone1 = 'testzone'
domain1 = 'example.org'
context_admin = context.RequestContext('testuser', 'testproject',
is_admin=True)
self.assertRaises(exception.AdminRequired,
self.network.create_private_dns_domain, self.context,
domain1, zone1)
self.network.create_private_dns_domain(context_admin, domain1, zone1)
domains = self.network.get_dns_domains(self.context)
self.assertEqual(len(domains), 1)
self.assertEqual(domains[0]['domain'], domain1)
self.assertEqual(domains[0]['availability_zone'], zone1)
self.assertRaises(exception.AdminRequired,
self.network.delete_dns_domain, self.context,
domain1)
self.network.delete_dns_domain(context_admin, domain1)
domain1 = "example.org"
domain2 = "example.com"
class LdapDNSTestCase(test.TestCase):
"""Tests nova.network.ldapdns.LdapDNS."""
def setUp(self):
super(LdapDNSTestCase, self).setUp()
self.useFixture(test.ReplaceModule('ldap', fake_ldap))
dns_class = 'nova.network.ldapdns.LdapDNS'
self.driver = importutils.import_object(dns_class)
attrs = {'objectClass': ['domainrelatedobject', 'dnsdomain',
'domain', 'dcobject', 'top'],
'associateddomain': ['root'],
'dc': ['root']}
self.driver.lobj.add_s("ou=hosts,dc=example,dc=org", attrs.items())
self.driver.create_domain(domain1)
self.driver.create_domain(domain2)
def tearDown(self):
self.driver.delete_domain(domain1)
self.driver.delete_domain(domain2)
super(LdapDNSTestCase, self).tearDown()
def test_ldap_dns_domains(self):
domains = self.driver.get_domains()
self.assertEqual(len(domains), 2)
self.assertIn(domain1, domains)
self.assertIn(domain2, domains)
def test_ldap_dns_create_conflict(self):
address1 = "10.10.10.11"
name1 = "foo"
self.driver.create_entry(name1, address1, "A", domain1)
self.assertRaises(exception.FloatingIpDNSExists,
self.driver.create_entry,
name1, address1, "A", domain1)
def test_ldap_dns_create_and_get(self):
address1 = "10.10.10.11"
name1 = "foo"
name2 = "bar"
entries = self.driver.get_entries_by_address(address1, domain1)
self.assertFalse(entries)
self.driver.create_entry(name1, address1, "A", domain1)
self.driver.create_entry(name2, address1, "A", domain1)
entries = self.driver.get_entries_by_address(address1, domain1)
self.assertEqual(len(entries), 2)
self.assertEqual(entries[0], name1)
self.assertEqual(entries[1], name2)
entries = self.driver.get_entries_by_name(name1, domain1)
self.assertEqual(len(entries), 1)
self.assertEqual(entries[0], address1)
def test_ldap_dns_delete(self):
address1 = "10.10.10.11"
name1 = "foo"
name2 = "bar"
self.driver.create_entry(name1, address1, "A", domain1)
self.driver.create_entry(name2, address1, "A", domain1)
entries = self.driver.get_entries_by_address(address1, domain1)
self.assertEqual(len(entries), 2)
self.driver.delete_entry(name1, domain1)
entries = self.driver.get_entries_by_address(address1, domain1)
LOG.debug("entries: %s" % entries)
self.assertEqual(len(entries), 1)
self.assertEqual(entries[0], name2)
self.assertRaises(exception.NotFound,
self.driver.delete_entry,
name1, domain1)
|
viggates/nova
|
nova/tests/network/test_manager.py
|
Python
|
apache-2.0
| 144,833
|
[
"FEFF"
] |
0edf8440b061ae72d1485f7d225c5602450bbfd49a478a1385e64673e90017e5
|
import vtkAll as vtk
from vtkNumpy import addNumpyToVtk
from shallowCopy import shallowCopy
import numpy as np
class DebugData(object):
def __init__(self):
self.append = vtk.vtkAppendPolyData()
def write(self, filename):
writer = vtk.vtkXMLPolyDataWriter()
writer.SetInputConnection(self.append.GetOutputPort())
writer.SetFileName(filename)
writer.Update()
def addPolyData(self, polyData, color=[1,1,1], extraLabels=None):
'''
Add a vtkPolyData to the debug data. A color can be provided.
If the extraLabels argument is used, it should be a list of tuples,
each tuple is (labelName, labelValue) where labelName is a string and
labelValue is an int or float. An array with labelName will be filled
with labelValue and added to the poly data.
'''
polyData = shallowCopy(polyData)
if color is not None:
colorArray = np.empty((polyData.GetNumberOfPoints(), 3), dtype=np.uint8)
colorArray[:,:] = np.array(color)*255
addNumpyToVtk(polyData, colorArray, 'RGB255')
if extraLabels is not None:
for labelName, labelValue in extraLabels:
extraArray = np.empty((polyData.GetNumberOfPoints(), 1), dtype=type(labelValue))
extraArray[:] = labelValue
addNumpyToVtk(polyData, extraArray, labelName)
self.append.AddInput(polyData)
def addLine(self, p1, p2, radius=0.0, color=[1,1,1]):
line = vtk.vtkLineSource()
line.SetPoint1(p1)
line.SetPoint2(p2)
line.Update()
if radius == 0.0:
self.addPolyData(line.GetOutput(), color)
else:
tube = vtk.vtkTubeFilter()
tube.SetRadius(radius)
tube.SetNumberOfSides(24)
tube.CappingOn()
tube.SetInputConnection(line.GetOutputPort())
tube.Update()
self.addPolyData(tube.GetOutput(), color)
def addFrame(self, frame, scale, tubeRadius=0.0):
origin = np.array([0.0, 0.0, 0.0])
axes = [[scale, 0.0, 0.0], [0.0, scale, 0.0], [0.0, 0.0, scale]]
colors = [[1,0,0], [0,1,0], [0,0,1]]
frame.TransformPoint(origin, origin)
for axis, color in zip(axes, colors):
frame.TransformVector(axis, axis)
self.addLine(origin, origin+axis, radius=tubeRadius, color=color)
def addCircle(self, origin, normal, radius, color=[1,1,1]):
self.addCone(origin, normal, radius, height=0, color=color, fill=False)
def addCone(self, origin, normal, radius, height, color=[1,1,1], fill=True):
cone = vtk.vtkConeSource()
cone.SetRadius(radius)
cone.SetCenter(origin)
cone.SetDirection(normal)
cone.SetHeight(height)
cone.SetResolution(32)
if fill:
cone.Update()
self.addPolyData(cone.GetOutput(), color)
else:
edges = vtk.vtkExtractEdges()
edges.AddInputConnection(cone.GetOutputPort())
edges.Update()
self.addPolyData(edges.GetOutput(), color)
def addArrow(self, start, end, headRadius=0.05, tubeRadius=0.01, color=[1,1,1], startHead=False, endHead=True):
normal = np.array(end) - np.array(start)
normal = normal / np.linalg.norm(normal)
if startHead:
start = np.array(start) + headRadius * normal
if endHead:
end = np.array(end) - headRadius * normal
self.addLine(start, end, radius=tubeRadius, color=color)
if startHead:
self.addCone(origin=start, normal=-normal, radius=headRadius,
height=headRadius, color=color, fill=True)
if endHead:
self.addCone(origin=end, normal=normal, radius=headRadius,
height=headRadius, color=color, fill=True)
def addSphere(self, center, radius=0.05, color=[1,1,1], resolution=24):
sphere = vtk.vtkSphereSource()
sphere.SetCenter(center)
sphere.SetThetaResolution(resolution)
sphere.SetPhiResolution(resolution)
sphere.SetRadius(radius)
sphere.Update()
self.addPolyData(sphere.GetOutput(), color)
def addCube(self, dimensions, center, color=[1,1,1], subdivisions=0):
bmin = np.array(center) - np.array(dimensions)/2.0
bmax = np.array(center) + np.array(dimensions)/2.0
cube = vtk.vtkTessellatedBoxSource()
cube.SetBounds(bmin[0], bmax[0], bmin[1], bmax[1], bmin[2], bmax[2])
cube.SetLevel(subdivisions)
cube.QuadsOn()
cube.Update()
self.addPolyData(cube.GetOutput(), color)
def addCylinder(self, center, axis, length, radius, color=[1,1,1]):
axis = np.asarray(axis) / np.linalg.norm(axis)
center = np.array(center)
self.addLine(center - 0.5*length*axis, center + 0.5*length*axis, radius=radius, color=color)
def addCapsule(self, center, axis, length, radius, color=[1,1,1]):
axis = np.asarray(axis) / np.linalg.norm(axis)
center = np.array(center)
self.addCylinder(center=center, axis=axis, radius=radius, length=length)
self.addSphere(center=center-0.5*length*axis, radius=radius)
self.addSphere(center=center+0.5*length*axis, radius=radius)
def addTorus(self, radius, thickness, resolution=30):
q = vtk.vtkSuperquadricSource()
q.SetToroidal(1)
q.SetSize(radius)
q.SetThetaResolution(resolution)
# thickness doesnt seem to match to Eucliean units. 0 is none. 1 is full. .1 is a good valve
q.SetThickness(thickness)
q.Update()
# rotate Torus so that the hole axis (internally y), is set to be z, which we use for valves
transform = vtk.vtkTransform()
transform.RotateWXYZ(90,1,0,0)
transformFilter=vtk.vtkTransformPolyDataFilter()
transformFilter.SetTransform(transform)
transformFilter.SetInputConnection(q.GetOutputPort())
transformFilter.Update()
self.addPolyData(transformFilter.GetOutput())
def addEllipsoid(self, center, radii, color=[1,1,1], alpha=1.0, resolution=24):
"""
Add an ellipsoid centered at [center] with x, y, and z principal axis radii given by
radii = [x_scale, y_scale, z_scale]
"""
sphere = vtk.vtkSphereSource()
sphere.SetCenter([0,0,0])
sphere.SetThetaResolution(resolution)
sphere.SetPhiResolution(resolution)
sphere.SetRadius(1.0)
sphere.Update()
transform = vtk.vtkTransform()
transform.Translate(center)
transform.Scale(radii)
transformFilter = vtk.vtkTransformPolyDataFilter()
transformFilter.SetTransform(transform)
transformFilter.SetInputConnection(sphere.GetOutputPort())
transformFilter.Update()
self.addPolyData(transformFilter.GetOutput())
def getPolyData(self):
if self.append.GetNumberOfInputConnections(0):
self.append.Update()
return shallowCopy(self.append.GetOutput())
|
mitdrc/director
|
src/python/director/debugVis.py
|
Python
|
bsd-3-clause
| 7,129
|
[
"VTK"
] |
d1561a85e947f35a62674cf31c7fd6edb5e7952c0cc7eccedf2161ae1f3c6ccd
|
# coding: utf-8
"""
Vericred API
Vericred's API allows you to search for Health Plans that a specific doctor
accepts.
## Getting Started
Visit our [Developer Portal](https://developers.vericred.com) to
create an account.
Once you have created an account, you can create one Application for
Production and another for our Sandbox (select the appropriate Plan when
you create the Application).
## SDKs
Our API follows standard REST conventions, so you can use any HTTP client
to integrate with us. You will likely find it easier to use one of our
[autogenerated SDKs](https://github.com/vericred/?query=vericred-),
which we make available for several common programming languages.
## Authentication
To authenticate, pass the API Key you created in the Developer Portal as
a `Vericred-Api-Key` header.
`curl -H 'Vericred-Api-Key: YOUR_KEY' "https://api.vericred.com/providers?search_term=Foo&zip_code=11215"`
## Versioning
Vericred's API default to the latest version. However, if you need a specific
version, you can request it with an `Accept-Version` header.
The current version is `v3`. Previous versions are `v1` and `v2`.
`curl -H 'Vericred-Api-Key: YOUR_KEY' -H 'Accept-Version: v2' "https://api.vericred.com/providers?search_term=Foo&zip_code=11215"`
## Pagination
Endpoints that accept `page` and `per_page` parameters are paginated. They expose
four additional fields that contain data about your position in the response,
namely `Total`, `Per-Page`, `Link`, and `Page` as described in [RFC-5988](https://tools.ietf.org/html/rfc5988).
For example, to display 5 results per page and view the second page of a
`GET` to `/networks`, your final request would be `GET /networks?....page=2&per_page=5`.
## Sideloading
When we return multiple levels of an object graph (e.g. `Provider`s and their `State`s
we sideload the associated data. In this example, we would provide an Array of
`State`s and a `state_id` for each provider. This is done primarily to reduce the
payload size since many of the `Provider`s will share a `State`
```
{
providers: [{ id: 1, state_id: 1}, { id: 2, state_id: 1 }],
states: [{ id: 1, code: 'NY' }]
}
```
If you need the second level of the object graph, you can just match the
corresponding id.
## Selecting specific data
All endpoints allow you to specify which fields you would like to return.
This allows you to limit the response to contain only the data you need.
For example, let's take a request that returns the following JSON by default
```
{
provider: {
id: 1,
name: 'John',
phone: '1234567890',
field_we_dont_care_about: 'value_we_dont_care_about'
},
states: [{
id: 1,
name: 'New York',
code: 'NY',
field_we_dont_care_about: 'value_we_dont_care_about'
}]
}
```
To limit our results to only return the fields we care about, we specify the
`select` query string parameter for the corresponding fields in the JSON
document.
In this case, we want to select `name` and `phone` from the `provider` key,
so we would add the parameters `select=provider.name,provider.phone`.
We also want the `name` and `code` from the `states` key, so we would
add the parameters `select=states.name,states.code`. The id field of
each document is always returned whether or not it is requested.
Our final request would be `GET /providers/12345?select=provider.name,provider.phone,states.name,states.code`
The response would be
```
{
provider: {
id: 1,
name: 'John',
phone: '1234567890'
},
states: [{
id: 1,
name: 'New York',
code: 'NY'
}]
}
```
## Benefits summary format
Benefit cost-share strings are formatted to capture:
* Network tiers
* Compound or conditional cost-share
* Limits on the cost-share
* Benefit-specific maximum out-of-pocket costs
**Example #1**
As an example, we would represent [this Summary of Benefits & Coverage](https://s3.amazonaws.com/vericred-data/SBC/2017/33602TX0780032.pdf) as:
* **Hospital stay facility fees**:
- Network Provider: `$400 copay/admit plus 20% coinsurance`
- Out-of-Network Provider: `$1,500 copay/admit plus 50% coinsurance`
- Vericred's format for this benefit: `In-Network: $400 before deductible then 20% after deductible / Out-of-Network: $1,500 before deductible then 50% after deductible`
* **Rehabilitation services:**
- Network Provider: `20% coinsurance`
- Out-of-Network Provider: `50% coinsurance`
- Limitations & Exceptions: `35 visit maximum per benefit period combined with Chiropractic care.`
- Vericred's format for this benefit: `In-Network: 20% after deductible / Out-of-Network: 50% after deductible | limit: 35 visit(s) per Benefit Period`
**Example #2**
In [this other Summary of Benefits & Coverage](https://s3.amazonaws.com/vericred-data/SBC/2017/40733CA0110568.pdf), the **specialty_drugs** cost-share has a maximum out-of-pocket for in-network pharmacies.
* **Specialty drugs:**
- Network Provider: `40% coinsurance up to a $500 maximum for up to a 30 day supply`
- Out-of-Network Provider `Not covered`
- Vericred's format for this benefit: `In-Network: 40% after deductible, up to $500 per script / Out-of-Network: 100%`
**BNF**
Here's a description of the benefits summary string, represented as a context-free grammar:
```
root ::= coverage
coverage ::= (simple_coverage | tiered_coverage) (space pipe space coverage_modifier)?
tiered_coverage ::= tier (space slash space tier)*
tier ::= tier_name colon space (tier_coverage | not_applicable)
tier_coverage ::= simple_coverage (space (then | or | and) space simple_coverage)* tier_limitation?
simple_coverage ::= (pre_coverage_limitation space)? coverage_amount (space post_coverage_limitation)? (comma? space coverage_condition)?
coverage_modifier ::= limit_condition colon space (((simple_coverage | simple_limitation) (semicolon space see_carrier_documentation)?) | see_carrier_documentation | waived_if_admitted | shared_across_tiers)
waived_if_admitted ::= ("copay" space)? "waived if admitted"
simple_limitation ::= pre_coverage_limitation space "copay applies"
tier_name ::= "In-Network-Tier-2" | "Out-of-Network" | "In-Network"
limit_condition ::= "limit" | "condition"
tier_limitation ::= comma space "up to" space (currency | (integer space time_unit plural?)) (space post_coverage_limitation)?
coverage_amount ::= currency | unlimited | included | unknown | percentage | (digits space (treatment_unit | time_unit) plural?)
pre_coverage_limitation ::= first space digits space time_unit plural?
post_coverage_limitation ::= (((then space currency) | "per condition") space)? "per" space (treatment_unit | (integer space time_unit) | time_unit) plural?
coverage_condition ::= ("before deductible" | "after deductible" | "penalty" | allowance | "in-state" | "out-of-state") (space allowance)?
allowance ::= upto_allowance | after_allowance
upto_allowance ::= "up to" space (currency space)? "allowance"
after_allowance ::= "after" space (currency space)? "allowance"
see_carrier_documentation ::= "see carrier documentation for more information"
shared_across_tiers ::= "shared across all tiers"
unknown ::= "unknown"
unlimited ::= /[uU]nlimited/
included ::= /[iI]ncluded in [mM]edical/
time_unit ::= /[hH]our/ | (((/[cC]alendar/ | /[cC]ontract/) space)? /[yY]ear/) | /[mM]onth/ | /[dD]ay/ | /[wW]eek/ | /[vV]isit/ | /[lL]ifetime/ | ((((/[bB]enefit/ plural?) | /[eE]ligibility/) space)? /[pP]eriod/)
treatment_unit ::= /[pP]erson/ | /[gG]roup/ | /[cC]ondition/ | /[sS]cript/ | /[vV]isit/ | /[eE]xam/ | /[iI]tem/ | /[sS]tay/ | /[tT]reatment/ | /[aA]dmission/ | /[eE]pisode/
comma ::= ","
colon ::= ":"
semicolon ::= ";"
pipe ::= "|"
slash ::= "/"
plural ::= "(s)" | "s"
then ::= "then" | ("," space) | space
or ::= "or"
and ::= "and"
not_applicable ::= "Not Applicable" | "N/A" | "NA"
first ::= "first"
currency ::= "$" number
percentage ::= number "%"
number ::= float | integer
float ::= digits "." digits
integer ::= /[0-9]/+ (comma_int | under_int)*
comma_int ::= ("," /[0-9]/*3) !"_"
under_int ::= ("_" /[0-9]/*3) !","
digits ::= /[0-9]/+ ("_" /[0-9]/+)*
space ::= /[ \t]/+
```
OpenAPI spec version: 1.0.0
Generated by: https://github.com/swagger-api/swagger-codegen.git
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from __future__ import absolute_import
import os
import sys
import unittest
import vericred_client
from vericred_client.rest import ApiException
from vericred_client.models.request_plan_find_drug_package import RequestPlanFindDrugPackage
class TestRequestPlanFindDrugPackage(unittest.TestCase):
""" RequestPlanFindDrugPackage unit test stubs """
def setUp(self):
pass
def tearDown(self):
pass
def testRequestPlanFindDrugPackage(self):
"""
Test RequestPlanFindDrugPackage
"""
model = vericred_client.models.request_plan_find_drug_package.RequestPlanFindDrugPackage()
if __name__ == '__main__':
unittest.main()
|
vericred/vericred-python
|
test/test_request_plan_find_drug_package.py
|
Python
|
apache-2.0
| 10,123
|
[
"VisIt"
] |
0c2345c120c9d70e1e4f9553247a3d26389ddc0101befa5765fac685907f508a
|
# -*- coding: utf-8 -*-
#
# hl_api_connections.py
#
# This file is part of NEST.
#
# Copyright (C) 2004 The NEST Initiative
#
# NEST is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# NEST is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with NEST. If not, see <http://www.gnu.org/licenses/>.
"""
Functions for connection handling
"""
from .hl_api_helper import *
from .hl_api_nodes import Create
from .hl_api_info import GetStatus
from .hl_api_simulation import GetKernelStatus, SetKernelStatus
import numpy
@check_stack
@deprecated(alt_func_name='GetConnections')
def FindConnections(source, target=None, synapse_model=None, synapse_type=None):
"""
Return an array of identifiers for connections that match the
given parameters. Only source is mandatory and must be a list of
one or more nodes. If target and/or synapse_model is/are given,
they must be single values, lists of length one or the same length
as source. Use GetStatus()/SetStatus() to inspect/modify the found
connections.
Note: FindConnections() is deprecated and will be removed in the future.
Use GetConnections() instead.
Note: synapse_type is alias for synapse_model for backward compatibility
"""
if synapse_model is not None and synapse_type is not None:
raise kernel.NESTError("'synapse_type' is alias for 'synapse_model' and cannot be used together with 'synapse_model'.")
if synapse_type is not None:
synapse_model = synapse_type
if target is None and synapse_model is None:
params = [{"source": s} for s in source]
elif target is None and synapse_model is not None:
synapse_model = broadcast(synapse_model, len(source), (uni_str,), "synapse_model")
params = [{"source": s, "synapse_model": syn}
for s, syn in zip(source, synapse_model)]
elif target is not None and synapse_model is None:
target = broadcast(target, len(source), (int,), "target")
params = [{"source": s, "target": t} for s, t in zip(source, target)]
else: # target is not None and synapse_model is not None
target = broadcast(target, len(source), (int,), "target")
synapse_model = broadcast(synapse_model, len(source), (uni_str,), "synapse_model")
params = [{"source": s, "target": t, "synapse_model": syn}
for s, t, syn in zip(source, target, synapse_model)]
sps(params)
sr("{FindConnections} Map Flatten")
result = ({
'source': int(src),
'target_thread': int(tt),
'synapse_modelid': int(sm),
'port': int(prt)
} for src, _, tt, sm, prt in spp())
return tuple(result)
@check_stack
def GetConnections(source=None, target=None, synapse_model=None):
"""
Return an array of connection identifiers.
Parameters:
source - list of source GIDs
target - list of target GIDs
synapse_model - string with the synapse model
If GetConnections is called without parameters, all connections
in the network are returned.
If a list of source neurons is given, only connections from these
pre-synaptic neurons are returned.
If a list of target neurons is given, only connections to these
post-synaptic neurons are returned.
If a synapse model is given, only connections with this synapse
type are returned.
Any combination of source, target and synapse_model parameters
is permitted.
Each connection id is a 5-tuple or, if available, a NumPy
array with the following five entries:
source-gid, target-gid, target-thread, synapse-id, port
Note: Only connections with targets on the MPI process executing
the command are returned.
"""
params = {}
if source is not None:
if not is_coercible_to_sli_array(source):
raise TypeError("source must be a list of GIDs")
params['source'] = source
if target is not None:
if not is_coercible_to_sli_array(target):
raise TypeError("target must be a list of GIDs")
params['target'] = target
if synapse_model is not None:
params['synapse_model'] = kernel.SLILiteral(synapse_model)
sps(params)
sr("GetConnections")
return spp()
@check_stack
@deprecated(alt_func_name='Connect')
def OneToOneConnect(pre, post, params=None, delay=None, model="static_synapse"):
"""
Make one-to-one connections of type model between the nodes in
pre and the nodes in post. pre and post have to be lists of the
same length. If params is given (as dictionary or list of
dictionaries), they are used as parameters for the connections. If
params is given as a single float or as list of floats, it is used
as weight(s), in which case delay also has to be given as float or
as list of floats.
"""
if len(pre) != len(post):
raise kernel.NESTError("pre and post have to be the same length")
# pre post Connect
if params is None and delay is None:
for s,d in zip(pre, post):
sps(s)
sps(d)
sr('/%s Connect' % model)
# pre post params Connect
elif params is not None and delay is None:
params = broadcast(params, len(pre), (dict,), "params")
if len(params) != len(pre):
raise kernel.NESTError("params must be a dict, or list of dicts of length 1 or len(pre).")
for s,d,p in zip(pre, post, params) :
sps(s)
sps(d)
sps(p)
sr('/%s Connect' % model)
# pre post w d Connect
elif params is not None and delay is not None:
params = broadcast(params, len(pre), (float,), "params")
if len(params) != len(pre):
raise kernel.NESTError("params must be a float, or list of floats of length 1 or len(pre) and will be used as weight(s).")
delay = broadcast(delay, len(pre), (float,), "delay")
if len(delay) != len(pre):
raise kernel.NESTError("delay must be a float, or list of floats of length 1 or len(pre).")
for s,d,w,dl in zip(pre, post, params, delay) :
sps(s)
sps(d)
sps(w)
sps(dl)
sr('/%s Connect' % model)
else:
raise kernel.NESTError("Both 'params' and 'delay' have to be given.")
@check_stack
@deprecated(alt_func_name='Connect')
def ConvergentConnect(pre, post, weight=None, delay=None, model="static_synapse"):
"""
Connect all neurons in pre to each neuron in post. pre and post
have to be lists. If weight is given (as a single float or as list
of floats), delay also has to be given as float or as list of
floats.
"""
if weight is None and delay is None:
for d in post :
sps(pre)
sps(d)
sr('/%s ConvergentConnect' % model)
elif weight is not None and delay is not None:
weight = broadcast(weight, len(pre), (float,), "weight")
if len(weight) != len(pre):
raise kernel.NESTError("weight must be a float, or sequence of floats of length 1 or len(pre)")
delay = broadcast(delay, len(pre), (float,), "delay")
if len(delay) != len(pre):
raise kernel.NESTError("delay must be a float, or sequence of floats of length 1 or len(pre)")
for d in post:
sps(pre)
sps(d)
sps(weight)
sps(delay)
sr('/%s ConvergentConnect' % model)
else:
raise kernel.NESTError("Both 'weight' and 'delay' have to be given.")
@check_stack
@deprecated(alt_func_name='Connect')
def RandomConvergentConnect(pre, post, n, weight=None, delay=None, model="static_synapse", options=None):
"""
Connect n randomly selected neurons from pre to each neuron in
post. pre and post have to be lists. If weight is given (as a
single float or as list of floats), delay also has to be given as
float or as list of floats. options is a dictionary specifying
options to the RandomConvergentConnect function: allow_autapses,
allow_multapses.
"""
if not isinstance(n, int):
raise TypeError("number of neurons n should be an integer")
# store current options, set desired options
old_options = None
error = False
if options is not None:
old_options = sli_func('GetOptions', '/RandomConvergentConnect',
litconv=True)
del old_options['DefaultOptions'] # in the way when restoring
sli_func('SetOptions', '/RandomConvergentConnect', options,
litconv=True)
if weight is None and delay is None:
sli_func(
'/m Set /n Set /pre Set { pre exch n m RandomConvergentConnect } forall',
post, pre, n, '/'+model, litconv=True)
elif weight is not None and delay is not None:
weight = broadcast(weight, n, (float,), "weight")
if len(weight) != n:
raise kernel.NESTError("weight must be a float, or sequence of floats of length 1 or n")
delay = broadcast(delay, n, (float,), "delay")
if len(delay) != n:
raise kernel.NESTError("delay must be a float, or sequence of floats of length 1 or n")
sli_func(
'/m Set /d Set /w Set /n Set /pre Set { pre exch n w d m RandomConvergentConnect } forall',
post, pre, n, weight, delay, '/'+model, litconv=True)
else:
error = True
# restore old options
if old_options is not None:
sli_func('SetOptions', '/RandomConvergentConnect', old_options,
litconv=True)
if error:
raise kernel.NESTError("Both 'weight' and 'delay' have to be given.")
@check_stack
@deprecated(alt_func_name='Connect')
def DivergentConnect(pre, post, weight=None, delay=None, model="static_synapse"):
"""
Connect each neuron in pre to all neurons in post. pre and post
have to be lists. If weight is given (as a single float or as list
of floats), delay also has to be given as float or as list of
floats.
"""
if weight is None and delay is None:
for s in pre :
sps(s)
sps(post)
sr('/%s DivergentConnect' % model)
elif weight is not None and delay is not None:
weight = broadcast(weight, len(post), (float,), "weight")
if len(weight) != len(post):
raise kernel.NESTError("weight must be a float, or sequence of floats of length 1 or len(post)")
delay = broadcast(delay, len(post), (float,), "delay")
if len(delay) != len(post):
raise kernel.NESTError("delay must be a float, or sequence of floats of length 1 or len(post)")
cmd='/%s DivergentConnect' % model
for s in pre :
sps(s)
sps(post)
sps(weight)
sps(delay)
sr(cmd)
else:
raise kernel.NESTError("Both 'weight' and 'delay' have to be given.")
@check_stack
def Connect(pre, post, conn_spec=None, syn_spec=None, model=None):
"""
Connect pre nodes to post nodes.
Nodes in pre and post are connected using the specified connectivity
(all-to-all by default) and synapse type (static_synapse by default).
Details depend on the connectivity rule.
Note:
Connect does not iterate over subnets, it only connects explicitly
specified nodes.
pre - presynaptic nodes, given as list of GIDs
post - presynaptic nodes, given as list of GIDs
conn_spec - string, or dictionary specifying connectivity rule, see below
syn_spec - string, or dictionary specifying synapse model, see below
Connectivity specification (conn_spec):
Connectivity is specified either as a string containing the name of a
connectivity rule (default: 'all_to_all') or as a dictionary specifying
the rule and any mandatory rule-specific parameters (e.g. 'indegree').
In addition, switches setting permission for establishing self-connections
('autapses', default: True) and multiple connections between a pair of nodes
('multapses', default: True) can be contained in the dictionary.
Available rules and their associated parameters are:
- 'all_to_all' (default)
- 'one_to_one'
- 'fixed_indegree', 'indegree'
- 'fixed_outdegree', 'outdegree'
- 'fixed_total_number', 'N'
- 'pairwise_bernoulli', 'p'
Example choices for the conn_spec are:
- 'one_to_one'
- {'rule': 'fixed_indegree', 'indegree': 2500, 'autapses': False}
- {'rule': 'pairwise_bernoulli', 'p': 0.1}
Synapse specification (syn_spec):
The synapse model and its properties can be given either as a string identifying
a specific synapse model (default: 'static_synapse') or as a dictionary
specifying the synapse model and its parameters.
Available keys in the synapse specification dictionary are 'model', 'weight',
'delay', 'receptor_type' and any parameters specific to the selected synapse model.
All parameters are optional and if not specified, the default values of the synapse
model will be used. The key 'model' identifies the synapse model, this can be one
of NEST's built-in synapse models or a user-defined model created via CopyModel().
If 'model' is not specified the default model 'static_synapse' will be used.
All other parameters can be scalars, arrays or distributions.
In the case of scalar parameters, all keys must be doubles except for 'receptor_type' which must be
initialised with an integer.
Parameter arrays are only available for the rules 'one_to_one' and 'all_to_all'. For 'one_to_one' the
array has to be a one-dimensional NumPy array with length len(pre). For 'all_to_all' the array has
to be a two-dimensional NumPy array with shape (len(post), len(pre)), therefore the rows describe the
target and the columns the source neurons.
Any distributed parameter must be initialised with a further dictionary specifying the distribution
type ('distribution', e.g. 'normal') and any distribution-specific parameters (e.g. 'mu' and 'sigma').
Available distributions are given in the rdevdict, the most common ones (and their
associated parameters) are:
- 'normal' with 'mu', 'sigma'
- 'normal_clipped' with 'mu', 'sigma', 'low', 'high'
- 'lognormal' with 'mu', 'sigma'
- 'lognormal_clipped' with 'mu', 'sigma', 'low', 'high'
- 'uniform' with 'low', 'high'
- 'uniform_int' with 'low', 'high'
To see all available distributions, run:
nest.slirun(’rdevdict info’)
To get information on a particular distribution, e.g. 'binomial', run:
nest.help(’rdevdict::binomial’)
Example choices for the syn_spec are:
- 'stdp_synapse'
- {'weight': 2.4, 'receptor_type': 1}
- {'model': 'stdp_synapse',
'weight': 2.5,
'delay': {'distribution': 'uniform', 'low': 0.8, 'high': 2.5},
'alpha': {'distribution': 'normal_clipped', 'low': 0.5, 'mu': 5.0, 'sigma': 1.0}
}
Note: model is alias for syn_spec for backward compatibility.
"""
if model is not None:
deprecation_text = "".join(["The argument 'model' is there for backward compatibility with the old ",
"Connect function and will be removed in a future version of NEST. Please change the name ",
"of the keyword argument from 'model' to 'syn_spec'. For details, see the ",
"documentation at:\nhttp://www.nest-simulator.org/connection_management"])
show_deprecation_warning("BackwardCompatibilityConnect",
text=deprecation_text)
if model is not None and syn_spec is not None:
raise kernel.NESTError("'model' is an alias for 'syn_spec' and cannot be used together with 'syn_spec'.")
sps(pre)
sps(post)
# default rule
rule = 'all_to_all'
if conn_spec is not None:
sps(conn_spec)
if is_string(conn_spec):
rule = conn_spec
sr("cvlit")
elif isinstance(conn_spec, dict):
rule = conn_spec['rule']
else:
raise kernel.NESTError("conn_spec needs to be a string or dictionary.")
else:
sr('/Connect /conn_spec GetOption')
if model is not None:
syn_spec = model
if syn_spec is not None:
if is_string(syn_spec):
sps(syn_spec)
sr("cvlit")
elif isinstance(syn_spec, dict):
for key,value in syn_spec.items():
# if value is a list, it is converted to a numpy array
if isinstance(value, (list, tuple)):
value = numpy.asarray(value)
if isinstance(value, (numpy.ndarray, numpy.generic)):
if len(value.shape) == 1:
if rule == 'one_to_one':
if value.shape[0] != len(pre):
raise kernel.NESTError("'" + key + "' has to be an array of dimension " + str(len(pre)) + ", a scalar or a dictionary.")
else:
syn_spec[key] = value
else:
raise kernel.NESTError("'" + key + "' has the wrong type. One-dimensional parameter arrays can only be used in conjunction with rule 'one_to_one'.")
elif len(value.shape) == 2:
if rule == 'all_to_all':
if value.shape[0] != len(post) or value.shape[1] != len(pre):
raise kernel.NESTError("'" + key + "' has to be an array of dimension " + str(len(post)) + "x" + str(len(pre)) + " (n_target x n_sources), a scalar or a dictionary.")
else:
syn_spec[key] = value.flatten()
else:
raise kernel.NESTError("'" + key + "' has the wrong type. Two-dimensional parameter arrays can only be used in conjunction with rule 'all_to_all'.")
sps(syn_spec)
else:
raise kernel.NESTError("syn_spec needs to be a string or dictionary.")
sr('Connect')
@check_stack
def DataConnect(pre, params=None, model="static_synapse"):
"""
Connect neurons from lists of connection data.
Variant 1.
pre: [gid_1, ... gid_n]
params: [ {param1}, ..., {param_n} ]
model= 'synapse_model'
Variant 2:
pre = [ {synapse_state1}, ..., {synapse_state_n}]
params=None
model=None
Variant 1 of DataConnect connects each neuron in pre to the targets given in params, using synapse type model.
The dictionary parames must contain at least the following keys:
'target'
'weight'
'delay'
each resolving to a list or numpy.ndarray of values. Depending on the synapse model, other parameters can be given
in the same format. All arrays in params must have the same length as 'target'.
Variant 2 of DataConnect will connect neurons according to a list of synapse status dictionaries,
as obtained from GetStatus.
Note: During connection, status dictionary misses will not raise errors, even if
the kernel property 'dict_miss_is_error' is True.
"""
if not is_coercible_to_sli_array(pre):
raise TypeError("pre must be a list of nodes or connection dictionaries")
if params is not None:
if not is_coercible_to_sli_array(params):
raise TypeError("params must be a list of dictionaries")
cmd = '({0}) DataConnect_i_D_s '.format(model)
for s, p in zip(pre, params):
sps(s)
sps(p)
sr(cmd)
else:
# Call the variant where all connections are given explicitly
# Disable dict checking, because most models can't re-use their own status dict
dict_miss = GetKernelStatus('dict_miss_is_error')
SetKernelStatus({'dict_miss_is_error': False})
sps(pre)
sr('DataConnect_a')
SetKernelStatus({'dict_miss_is_error': dict_miss})
@check_stack
@deprecated(alt_func_name='Connect')
def RandomDivergentConnect(pre, post, n, weight=None, delay=None, model="static_synapse", options=None):
"""
Connect each neuron in pre to n randomly selected neurons from
post. pre and post have to be lists. If weight is given (as a
single float or as list of floats), delay also has to be given as
float or as list of floats. options is a dictionary specifying
options to the RandomDivergentConnect function: allow_autapses,
allow_multapses.
"""
if not isinstance(n, int):
raise TypeError("number of neurons n should be an integer")
# store current options, set desired options
old_options = None
error = False
if options is not None:
old_options = sli_func('GetOptions', '/RandomDivergentConnect',
litconv=True)
del old_options['DefaultOptions'] # in the way when restoring
sli_func('SetOptions', '/RandomDivergentConnect', options,
litconv=True)
if weight is None and delay is None:
sli_func(
'/m Set /n Set /post Set { n post m RandomDivergentConnect } forall',
pre, post, n, '/'+model, litconv=True)
elif weight is not None and delay is not None:
weight = broadcast(weight, n, (float,), "weight")
if len(weight) != n:
raise kernel.NESTError("weight must be a float, or sequence of floats of length 1 or n")
delay = broadcast(delay, n, (float,), "delay")
if len(delay) != n:
raise kernel.NESTError("delay must be a float, or sequence of floats of length 1 or n")
sli_func(
'/m Set /d Set /w Set /n Set /post Set { n post w d m RandomDivergentConnect } forall',
pre, post, n, weight, delay, '/'+model, litconv=True)
else:
error = True
# restore old options
if old_options is not None:
sli_func('SetOptions', '/RandomDivergentConnect', old_options,
litconv=True)
if error:
raise kernel.NESTError("Both 'weight' and 'delay' have to be given.")
def _is_subnet_instance(gids):
"Returns true if all gids point to subnet or derived type."
try:
GetChildren(gids)
return True
except kernel.NESTError:
return False
@check_stack
def CGConnect(pre, post, cg, parameter_map=None, model="static_synapse"):
"""
Connect neurons from pre to neurons from post using connectivity
specified by the connection generator cg. pre and post are either
both lists containing 1 subnet, or lists of gids. parameter_map is
a dictionary mapping names of values such as weight and delay to
value set positions. This function is only available if NEST was
compiled with support for libneurosim.
"""
sr("statusdict/have_libneurosim ::")
if not spp():
raise kernel.NESTError("NEST was not compiled with support for libneurosim: CGConnect is not available.")
if parameter_map is None:
parameter_map = {}
if _is_subnet_instance(pre[:1]):
if not _is_subnet_instance(post[:1]):
raise kernel.NESTError("if pre is a subnet, post also has to be a subnet")
if len(pre) > 1 or len(post) > 1:
raise kernel.NESTError("the length of pre and post has to be 1 if subnets are given")
sli_func('CGConnect', cg, pre[0], post[0], parameter_map, '/'+model, litconv=True)
else:
sli_func('CGConnect', cg, pre, post, parameter_map, '/'+model, litconv=True)
@check_stack
def CGParse(xml_filename):
"""
Parse an XML file and return the correcponding connection
generator cg. The library to provide the parsing can be selected
by CGSelectImplementation().
"""
sr("statusdict/have_libneurosim ::")
if not spp():
raise kernel.NESTError("NEST was not compiled with support for libneurosim: CGParse is not available.")
sps(xml_filename)
sr("CGParse")
return spp()
@check_stack
def CGSelectImplementation(tag, library):
"""
Select a library to provide a parser for XML files and associate
an XML tag with the library. XML files can be read by CGParse().
"""
sr("statusdict/have_libneurosim ::")
if not spp():
raise kernel.NESTError("NEST was not compiled with support for libneurosim: CGSelectImplementation is not available.")
sps(tag)
sps(library)
sr("CGSelectImplementation")
|
zifeo/nest-simulator
|
pynest/nest/lib/hl_api_connections.py
|
Python
|
gpl-2.0
| 25,062
|
[
"NEURON"
] |
d7d7174bb2a3504d2aef1a989b6a942246c3ea4e7a0e233906eb5fee855449e9
|
import os
import subprocess
import sys
import numpy
import argparse
import pysam
import vcf
import pybedtools
import logging
from collections import defaultdict, OrderedDict
from utils import makedirs
def uint(value):
if not value.isdigit(): raise argparse.ArgumentTypeError("%s is not digit-only" % value)
ret = int(value)
if ret < 0: raise argparse.ArgumentTypeError("%s is negative" % value)
return ret
def gen_restricted_reference(reference, regions_bed, out_reference, use_short_contigs_names=False):
logger = logging.getLogger(gen_restricted_reference.__name__)
reference_handle = pysam.Fastafile(reference)
regions_bedtool = pybedtools.BedTool(regions_bed)
with open(out_reference, "w") as out_fasta:
for region_index, region in enumerate(regions_bedtool, start=1):
sequence = reference_handle.fetch(reference=str(region.chrom), start=region.start, end=region.end)
region_name = str(region_index) if use_short_contigs_names else ("%s_%d_%d" % (str(region.chrom), region.start, region.end) )
if region_index == 1:
out_fasta.write(">{}\n{}".format(region_name, sequence))
else: out_fasta.write("\n>{}\n{}".format(region_name, sequence))
pysam.faidx(out_reference)
logger.info("Lifted over the reference to {}".format(out_reference))
reference_handle.close()
return out_reference
def gen_restricted_vcf(in_vcf, regions_bed, out_vcf, restricted_reference, targeted_samples, flank=0, use_short_contig_names=False):
logger = logging.getLogger(gen_restricted_vcf.__name__)
if not in_vcf:
return None
if not os.path.isfile(in_vcf):
logger.error("%s not found" % in_vcf)
return None
reference_handle = pysam.Fastafile(restricted_reference)
contigs = list(zip(reference_handle.references, reference_handle.lengths))
reference_handle.close()
logger.warning("Setting CN to be String type (not standard VCF spec)...")
vcf.parser.RESERVED_FORMAT = {
'GT': 'String', 'DP': 'Integer', 'FT': 'String', 'GL': 'Float',
'GLE': 'String', 'PL': 'Integer', 'GP': 'Float', 'GQ': 'Integer',
'HQ': 'Integer', 'PS': 'Integer', 'PQ': 'Integer', 'EC': 'Integer',
'MQ': 'Integer',
# Keys used for structural variants
'CN': 'String', 'CNQ': 'Float', 'CNL': 'Float', 'NQ': 'Integer',
'HAP': 'Integer', 'AHAP': 'Integer'
}
# get the base name and use it in the output
vcf_template_reader = vcf.Reader(open(in_vcf, "r"))
vcf_template_reader.metadata["reference"] = restricted_reference
vcf_template_reader.contigs = OrderedDict([(contig_name, vcf.parser._Contig(contig_name, contig_length)) for (contig_name, contig_length) in contigs])
new_samples = []
if targeted_samples:
for k,v in sorted(vcf_template_reader._sample_indexes.iteritems()):
if k in targeted_samples:
new_samples.append(k)
vcf_template_reader.samples = new_samples
vcf_writer = vcf.Writer(open(out_vcf, "w"), vcf_template_reader)
if targeted_samples:
vcf_template_reader = vcf.Reader(open(in_vcf, "r"))
#tabix_vcf = pysam.TabixFile(invcf, parser=pysam.asVCF())
info_warned = False
regions_bedtool = pybedtools.BedTool(regions_bed)
logger.warning("only process fully-contained variants")
logger.warning("right now we only deal with SVLEN, which is agnostic of region start")
logger.warning("ignore END in INFO field for now")
for region_index, region in enumerate(regions_bedtool, start=1):
records = None
try: records = vcf_template_reader.fetch(chrom=str(region.chrom), start=region.start, end=region.end)
except ValueError: logger.info("No records found in %s from %s" % (str(region).strip(), in_vcf))
if records is None: continue
for record in records:
if record.POS <= region.start + flank or record.POS + len(record.REF) + flank - 1 >= region.end: continue
if 'SVTYPE' in record.INFO and record.INFO['SVTYPE'] in ['DEL','INV','DUP'] and record.POS + max(map(abs, record.INFO['SVLEN'])) >= region.end + flank: continue
record.CHROM = str(region_index) if use_short_contig_names else ("%s_%d_%d" % (str(region.chrom), region.start, region.end))
# record.POS seems to be zero-based, at least in the infinite wisdom of my version of pysam
record.POS = record.POS - region.start
if not new_samples:
vcf_writer.write_record(record)
continue
else:
snames = []
sindexes = {}
for s in new_samples:
for i in xrange(len(record.samples)):
if s == record.samples[i].sample:
sindexes[s] = i
snames.append(record.samples[i])
vcfrecord = vcf.model._Record(record.CHROM, record.POS, record.ID, record.REF, record.ALT, record.QUAL, record.FILTER, record.INFO, record.FORMAT, sindexes, snames)
vcf_writer.write_record(vcfrecord)
vcf_writer.close()
pysam.tabix_index(out_vcf, force=True, preset='vcf')
logger.info("Lifted over the VCF %s to %s" % (in_vcf, out_vcf))
return "{}.gz".format(out_vcf)
def gen_restricted_ref_and_vcfs(reference, invcfs, regions, samples, outdir, flank=0, short_contig_names=False):
restricted_fasta = reference
outvcfs = invcfs
if regions:
makedirs([outdir])
restricted_fasta = os.path.join(outdir, "ref.fa")
gen_restricted_reference(reference, regions, restricted_fasta, short_contig_names)
if outvcfs:
outvcfs = map(lambda x: os.path.join(outdir, os.path.splitext(os.path.basename(x))[0]) if x else None, invcfs)
generated_vcfs = []
for invcf, outvcf in zip(invcfs, outvcfs):
generated_vcfs.append(gen_restricted_vcf(invcf, regions, outvcf, restricted_fasta, samples, flank, short_contig_names))
outvcfs = generated_vcfs
return (restricted_fasta, outvcfs)
def main():
logger = logging.getLogger(main.__name__)
parser = argparse.ArgumentParser(description="Generate restricted FASTAs and VCFs given a BED file. The contigs are the sequences for each genomic region in the BED file and the name of the contigs reflects that. The VCFs use the coordinates on the new contigs.", formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument("--reference", help="Reference FASTA", required=True)
parser.add_argument("--regions", help="Regions BED", required=True)
parser.add_argument("--vcfs", nargs="+", required=True, default=[])
parser.add_argument("--outdir", required=True)
parser.add_argument("--flank", type=uint, default=0, help="Ignore variants this close to the edges of a region")
parser.add_argument("--short_contig_names", action="store_true", help="Generate short contig names instead of the chr_start_end naming")
parser.add_argument("--samples", nargs="+", default=[], help="Select specific samples. Select all samples if leave empty")
args = parser.parse_args()
gen_restricted_ref_and_vcfs(args.reference, args.vcfs, args.regions, args.samples, args.outdir, args.flank, args.short_contig_names)
if __name__ == "__main__":
FORMAT = '%(levelname)s %(asctime)-15s %(name)-20s %(message)s'
logging.basicConfig(level=logging.INFO, format=FORMAT)
main()
|
bioinform/varsim
|
generate_small_test_ref.py
|
Python
|
bsd-2-clause
| 7,472
|
[
"pysam"
] |
ef3f48193e260f31bb14d77f1b59740b1d7f3231ea4e43a98e080ea3c423a79c
|
# -*- coding: utf-8 -*-
# MD-Tracks is a trajectory analysis toolkit for molecular dynamics
# and monte carlo simulations.
# Copyright (C) 2007 - 2012 Toon Verstraelen <Toon.Verstraelen@UGent.be>, Center
# for Molecular Modeling (CMM), Ghent University, Ghent, Belgium; all rights
# reserved unless otherwise stated.
#
# This file is part of MD-Tracks.
#
# MD-Tracks is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 3
# of the License, or (at your option) any later version.
#
# In addition to the regulations of the GNU General Public License,
# publications and communications based in parts on this program or on
# parts of this program are required to cite the following article:
#
# "MD-TRACKS: A productive solution for the advanced analysis of Molecular
# Dynamics and Monte Carlo simulations", Toon Verstraelen, Marc Van Houteghem,
# Veronique Van Speybroeck and Michel Waroquier, Journal of Chemical Information
# and Modeling, 48 (12), 2414-2424, 2008
# DOI:10.1021/ci800233y
#
# MD-Tracks is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, see <http://www.gnu.org/licenses/>
#
#--
from tracks.core import MultiTracksReader, MultiTracksWriter
from molmod.io import XYZReader, ATRJReader, DLPolyHistoryReader, \
DLPolyOutputReader, LAMMPSDumpReader, GroReader, XYZWriter, \
CPMDTrajectoryReader
from molmod.units import angstrom, femtosecond, deg, amu, picosecond, bar
import os, numpy, itertools
__all__ = [
"xyz_to_tracks", "cp2k_ener_to_tracks", "cpmd_ener_to_tracks",
"cp2k_cell_to_tracks", "cp2k_stress_to_tracks", "cpmd_traj_to_tracks",
"tracks_to_xyz", "atrj_to_tracks", "dlpoly_history_to_tracks",
"dlpoly_output_to_tracks",
]
def iter_real_lines(f):
for line in f:
line = line[:line.find("#")]
if len(line.strip()) > 0:
yield line
def xyz_to_tracks(filename, middle_word, destination, sub=slice(None), file_unit=angstrom, atom_indexes=None, clear=True):
"""Convert an xyz file into separate tracks."""
xyz_reader = XYZReader(filename, sub, file_unit=file_unit)
filenames = []
if atom_indexes is None:
atom_indexes = range(len(xyz_reader.numbers))
else:
atom_indexes = list(atom_indexes)
for index in atom_indexes:
for cor in ["x", "y", "z"]:
filenames.append(os.path.join(destination, "atom.%s.%07i.%s" % (middle_word, index, cor)))
shape = (len(atom_indexes),3)
dtype = numpy.dtype([("cor", float, shape)])
mtw = MultiTracksWriter(filenames, dtype, clear=clear)
for title, coordinates in xyz_reader:
mtw.dump_row((coordinates[atom_indexes],))
mtw.finish()
def cp2k_ener_to_tracks(filename, destination, sub=slice(None), clear=True):
"""Convert a cp2k energy file into separate tracks."""
names = ["step", "time", "kinetic_energy", "temperature", "potential_energy", "conserved_quantity"]
filenames = list(os.path.join(destination, name) for name in names)
dtypes = [int, float, float, float, float, float]
dtype = numpy.dtype([ (name, t, 1) for name, t in zip(names, dtypes) ])
mtw = MultiTracksWriter(filenames, dtype, clear=clear)
f = file(filename)
for line in itertools.islice(iter_real_lines(f), sub.start, sub.stop, sub.step):
row = [float(word) for word in line.split()[:6]]
row[1] = row[1]*femtosecond
mtw.dump_row(tuple(row))
f.close()
mtw.finish()
def cpmd_ener_to_tracks(filename, destination, sub=slice(None), clear=True):
"""Convert a cp2k energy file into separate tracks."""
names = ["step", "fict_kinectic_energy", "temperature", "potential_energy", "classical_energy", "hamiltonian_energy", "ms_displacement"]
filenames = list(os.path.join(destination, name) for name in names)
dtypes = [int, float, float, float, float, float, float]
dtype = numpy.dtype([ (name, t, 1) for name, t in zip(names, dtypes) ])
mtw = MultiTracksWriter(filenames, dtype, clear=clear)
f = file(filename)
for line in itertools.islice(f, sub.start, sub.stop, sub.step):
row = tuple(float(word) for word in line.split()[:7])
mtw.dump_row(row)
f.close()
mtw.finish()
def cp2k_cell_to_tracks(filename, destination, sub=slice(None), clear=True):
names = ["step", "time", "cell.a.x", "cell.a.y", "cell.a.z", "cell.b.x", "cell.b.y", "cell.b.z", "cell.c.x", "cell.c.y", "cell.c.z", "volume", "cell.a", "cell.b", "cell.c", "cell.alpha", "cell.beta", "cell.gamma"]
filenames = list(os.path.join(destination, name) for name in names)
dtype = numpy.dtype([("step", int),("time", float),("cell", float, (3,3)),("volume", float),("norms", float, 3),("angles", float, 3)])
mtw = MultiTracksWriter(filenames, dtype, clear=clear)
f = file(filename)
for line in itertools.islice(iter_real_lines(f), sub.start, sub.stop, sub.step):
values = [float(word) for word in line.split()[:12]]
row = [int(values[0]),values[1]*femtosecond]
cell = numpy.array(values[2:11]).reshape(3,3).transpose()*angstrom
row.append(cell)
row.append(values[11] * angstrom**3)
norms = numpy.sqrt((cell**2).sum(axis=0))
row.append(norms)
alpha = numpy.arccos(numpy.clip(numpy.dot(cell[:,1],cell[:,2])/norms[1]/norms[2], -1,1))
beta = numpy.arccos(numpy.clip(numpy.dot(cell[:,2],cell[:,0])/norms[2]/norms[0], -1,1))
gamma = numpy.arccos(numpy.clip(numpy.dot(cell[:,0],cell[:,1])/norms[0]/norms[1], -1,1))
row.append(numpy.array([alpha,beta,gamma]))
mtw.dump_row(tuple(row))
f.close()
mtw.finish()
def cp2k_stress_to_tracks(filename, destination, sub=slice(None), clear=True):
names = ["step", "time", "stress.xx", "stress.xy", "stress.xz", "stress.yx", "stress.yy", "stress.yz", "stress.zx", "stress.zy", "stress.zz", "pressure"]
filenames = list(os.path.join(destination, name) for name in names)
dtype = numpy.dtype([("step", int),("time", float),("stress", float, (3,3)),("pressure", float)])
mtw = MultiTracksWriter(filenames, dtype, clear=clear)
f = file(filename)
for line in itertools.islice(iter_real_lines(f), sub.start, sub.stop, sub.step):
values = [float(word) for word in line.split()[:11]]
row = [int(values[0]),values[1]*femtosecond]
cell = numpy.array(values[2:11]).reshape(3,3).transpose()*bar
row.append(cell)
row.append((cell[0,0]+cell[1,1]+cell[2,2])/3)
mtw.dump_row(tuple(row))
f.close()
mtw.finish()
def cpmd_traj_to_tracks(filename, num_atoms, destination, sub=slice(None), atom_indexes=None, clear=True):
"""Convert a cpmd trajectory file into separate tracks.
num_atoms must be the number of atoms in the system.
"""
if atom_indexes is None:
atom_indexes = range(num_atoms)
else:
atom_indexes = list(atom_indexes)
names = []
for index in atom_indexes:
names.append("atom.pos.%07i.x" % index)
names.append("atom.pos.%07i.y" % index)
names.append("atom.pos.%07i.z" % index)
for index in atom_indexes:
names.append("atom.vel.%07i.x" % index)
names.append("atom.vel.%07i.y" % index)
names.append("atom.vel.%07i.z" % index)
filenames = list(os.path.join(destination, name) for name in names)
shape = (len(atom_indexes), 3)
dtype = numpy.dtype([("pos", float, shape), ("vel", float, shape)])
mtw = MultiTracksWriter(filenames, dtype, clear=clear)
ctr = CPMDTrajectoryReader(filename, sub)
for pos, vel in ctr:
mtw.dump_row((pos,vel))
mtw.finish()
def tracks_to_xyz(prefix, destination, symbols, sub=slice(None), file_unit=angstrom, atom_indexes=None, unit_cell_iter=None, groups=None):
"""Converts a set of tracks into an xyz file."""
if atom_indexes is None:
atom_indexes = range(len(symbols))
else:
atom_indexes = list(atom_indexes)
if groups is not None:
# reduce the groups to the selected atoms and use the index of the
# reduced set.
reverse_indexes = dict((atom_index, counter) for counter, atom_index in enumerate(atom_indexes))
new_groups = []
for group in groups:
new_group = []
for atom_index in group:
new_index = reverse_indexes.get(atom_index)
if new_index is not None:
new_group.append(new_index)
if len(new_group) > 0:
new_groups.append(new_group)
groups = new_groups
symbols = [symbols[index] for index in atom_indexes]
filenames = []
for index in atom_indexes:
for c in 'xyz':
filenames.append("%s.%07i.%s" % (prefix, index, c))
f = file(destination, 'w')
xyz_writer = XYZWriter(f, symbols, file_unit=file_unit)
dtype = numpy.dtype([("cor", float, (len(atom_indexes), 3))])
mtr = MultiTracksReader(filenames, dtype, sub=sub)
for row in mtr:
coordinates = row["cor"]
if unit_cell_iter is not None:
try:
uc = unit_cell_iter.next()
except StopIteration:
raise ValueError("Not enough frames in the unit cell tracks.")
if groups is None:
coordinates -= numpy.dot(uc.matrix, numpy.floor(numpy.dot(uc.reciprocal, coordinates.transpose()))).transpose()
else:
for group in groups:
center = coordinates[group].mean(axis=0)
coordinates[group] -= numpy.dot(uc.matrix, numpy.floor(numpy.dot(uc.reciprocal, center)))
xyz_writer.dump("None", coordinates)
f.close()
def atrj_to_tracks(filename, destination, sub=slice(None), atom_indexes=None, clear=True):
atrj_reader = ATRJReader(filename, sub)
if atom_indexes is None:
atom_indexes = range(atrj_reader.num_atoms)
else:
atom_indexes = list(atom_indexes)
filenames = []
fields = []
for index in atom_indexes:
for cor in ["x", "y", "z"]:
filenames.append(os.path.join(destination, "atom.pos.%07i.%s" % (index, cor)))
fields.append( ("cor", float, (len(atom_indexes),3)) )
filenames.append(os.path.join(destination, "time"))
fields.append( ("time", float, 1) )
filenames.append(os.path.join(destination, "step"))
fields.append( ("step", int, 1) )
filenames.append(os.path.join(destination, "total_energy"))
fields.append( ("tote", float, 1) )
dtype = numpy.dtype(fields)
mtw = MultiTracksWriter(filenames, dtype, clear=clear)
for frame in atrj_reader:
mtw.dump_row((
frame.coordinates[atom_indexes],
frame.time, frame.step, frame.total_energy
))
mtw.finish()
def dlpoly_history_to_tracks(
filename, destination, sub=slice(None), atom_indexes=None, clear=True,
pos_unit=angstrom, vel_unit=angstrom/picosecond, frc_unit=amu*angstrom/picosecond**2, time_unit=picosecond,
mass_unit=amu
):
hist_reader = DLPolyHistoryReader(filename, sub, pos_unit, vel_unit, frc_unit, time_unit, mass_unit)
if atom_indexes is None:
atom_indexes = range(hist_reader.num_atoms)
else:
atom_indexes = list(atom_indexes)
filenames = []
fields = []
filenames.append(os.path.join(destination, "step"))
fields.append( ("step", int, 1) )
filenames.append(os.path.join(destination, "time"))
fields.append( ("time", float, 1) )
for vec in "abc":
for cor in "xyz":
filenames.append(os.path.join(destination, "cell.%s.%s" % (vec, cor)))
fields.append( ("cell", float, (3,3)) )
for vec in "abc":
filenames.append(os.path.join(destination, "cell.%s" % (vec)))
fields.append( ("norms", float, 3) )
for angle in "alpha", "beta", "gamma":
filenames.append(os.path.join(destination, "cell.%s" % (angle)))
fields.append( ("angles", float, 3) )
for index in atom_indexes:
for cor in "xyz":
filenames.append(os.path.join(destination, "atom.pos.%07i.%s" % (index, cor)))
fields.append( ("pos", float, (len(atom_indexes),3)) )
if hist_reader.keytrj > 0:
for index in atom_indexes:
for cor in "xyz":
filenames.append(os.path.join(destination, "atom.vel.%07i.%s" % (index, cor)))
fields.append( ("vel", float, (len(atom_indexes),3)) )
if hist_reader.keytrj > 1:
for index in atom_indexes:
for cor in "xyz":
filenames.append(os.path.join(destination, "atom.frc.%07i.%s" % (index, cor)))
fields.append( ("frc", float, (len(atom_indexes),3)) )
dtype = numpy.dtype(fields)
mtw = MultiTracksWriter(filenames, dtype, clear=clear)
for frame in hist_reader:
cell = frame["cell"]
norms = numpy.sqrt((cell**2).sum(axis=0))
frame["norms"] = norms
alpha = numpy.arccos(numpy.clip(numpy.dot(cell[:,1],cell[:,2])/norms[1]/norms[2], -1,1))
beta = numpy.arccos(numpy.clip(numpy.dot(cell[:,2],cell[:,0])/norms[2]/norms[0], -1,1))
gamma = numpy.arccos(numpy.clip(numpy.dot(cell[:,0],cell[:,1])/norms[0]/norms[1], -1,1))
frame["angles"] = [alpha, beta, gamma]
frame["pos"] = frame["pos"][atom_indexes]
if hist_reader.keytrj > 0:
frame["vel"] = frame["vel"][atom_indexes]
if hist_reader.keytrj > 0:
frame["frc"] = frame["frc"][atom_indexes]
mtw.dump_row(tuple(frame[name] for name, type, shape in fields))
mtw.finish()
def dlpoly_output_to_tracks(
filename, destination, sub=slice(None), clear=True, skip_equi_period=True,
pos_unit=angstrom, time_unit=picosecond, angle_unit=deg, e_unit=amu/(angstrom/picosecond)**2
):
output_reader = DLPolyOutputReader(filename, sub, skip_equi_period, pos_unit, time_unit, angle_unit, e_unit)
filenames = [
"step", "conserved_quantity", "temperature", "potential_energy",
"vanderwaals_energy", "coulomb_energy", "bond_energy", "bending_energy",
"torsion_energy", "tethering_energy", "time", "enthalpy",
"rotational_temperature", "virial", "vanderwaals_virial",
"coulomb_virial", "bond_viral", "bending_virial", "constraint_virial",
"tethering_virial", "cputime", "volume", "shell_temperature",
"shell_energy", "shell_virial", "cell.alpha", "cell.beta", "cell.gamma",
"pmf_virial", "pressure",
]
filenames = [os.path.join(destination, filename) for filename in filenames]
fields = [("step", int)] + [("foo%i" % i, float) for i in xrange(29)]
dtype = numpy.dtype(fields)
mtw = MultiTracksWriter(filenames, dtype, clear=clear)
for row in output_reader:
mtw.dump_row(tuple(row))
mtw.finish()
def lammps_dump_to_tracks(filename, destination, meta, sub=slice(None), clear=True):
units = []
for unit, name, isvector in meta:
if isvector:
units.extend([unit, unit, unit])
else:
units.append(unit)
dump_reader = LAMMPSDumpReader(filename, units, sub)
num_atoms = dump_reader.num_atoms
filenames = [os.path.join(destination, "step")]
fields = [("step", int)]
for unit, name, isvector in meta:
if isvector:
for i in xrange(num_atoms):
filenames.append(os.path.join(destination, "atom.%s.%07i.x" % (name, i)))
fields.append(("atom.%s.x" % name, float, num_atoms))
for i in xrange(num_atoms):
filenames.append(os.path.join(destination, "atom.%s.%07i.y" % (name, i)))
fields.append(("atom.%s.y" % name, float, num_atoms))
for i in xrange(num_atoms):
filenames.append(os.path.join(destination, "atom.%s.%07i.z" % (name, i)))
fields.append(("atom.%s.z" % name, float, num_atoms))
else:
for i in xrange(num_atoms):
filenames.append(os.path.join(destination, "atom.%s.%07i" % (name, i)))
fields.append(("atom.%s" % name, float, num_atoms))
dtype = numpy.dtype(fields)
mtw = MultiTracksWriter(filenames, dtype, clear=clear)
for frame in dump_reader:
mtw.dump_row(tuple(frame))
mtw.finish()
def gro_to_tracks(filename, destination, sub=slice(None), clear=True):
gro_reader = GroReader(filename, sub)
num_atoms = gro_reader.num_atoms
names = ["time"]
fields = [("time", numpy.float32)]
names.extend(sum((
["atom.pos.%07i.x" % index, "atom.pos.%07i.y" % index,
"atom.pos.%07i.z" % index]
for index in xrange(num_atoms)
), []))
fields.append(("pos", numpy.float32, (num_atoms,3)))
names.extend(sum((
["atom.vel.%07i.x" % index, "atom.vel.%07i.y" % index,
"atom.vel.%07i.z" % index]
for index in xrange(num_atoms)
), []))
fields.append(("vel", numpy.float32, (num_atoms,3)))
names.extend([
"cell.a.x", "cell.b.x", "cell.c.x",
"cell.a.y", "cell.b.y", "cell.c.y",
"cell.a.z", "cell.b.z", "cell.c.z",
])
fields.append(("cell", numpy.float32, (3,3)))
dtype = numpy.dtype(fields)
filenames = [os.path.join(destination, name) for name in names]
mtw = MultiTracksWriter(filenames, dtype, clear=clear)
for time, pos, vel, cell in gro_reader:
mtw.dump_row((time, pos, vel, cell))
mtw.finish()
|
molmod/md-tracks
|
tracks/convert.py
|
Python
|
gpl-3.0
| 17,763
|
[
"CP2K",
"CPMD"
] |
24acb3106b169ae019d11af33d5bf0e53ee6d05ca468bb0f1e8e774798095d7e
|
import unittest
import numpy as np
import cpnest.model
class GaussianModel(cpnest.model.Model):
"""
A simple 2 dimensional gaussian
"""
def __init__(self):
pass
names=['x','y']
bounds=[[-10,10],[-10,10]]
analytic_log_Z=0.0 - np.log(bounds[0][1]-bounds[0][0]) - np.log(bounds[1][1]-bounds[1][0])
def log_likelihood(self,p):
return -0.5*(p['x']**2 + p['y']**2) - np.log(2.0*np.pi)
def log_prior(self,p):
return super(GaussianModel,self).log_prior(p)
def force(self, p):
f = np.zeros(1, dtype = {'names':p.names, 'formats':['f8' for _ in p.names]})
return f
class GaussianTestCase(unittest.TestCase):
"""
Test the gaussian model
"""
def setUp(self):
gaussmodel = GaussianModel()
self.work=cpnest.CPNest(gaussmodel,verbose=2,nthreads=1,nlive=1000,maxmcmc=5000,poolsize=1000)
print('Sampling 2D gaussian with analytic evidence {0}'.format(gaussmodel.analytic_log_Z))
def test_run(self):
self.work.run()
# 2 sigma tolerance
tolerance = 2.0*np.sqrt(self.work.NS.state.info/self.work.NS.Nlive)
self.assertTrue(np.abs(self.work.NS.logZ - GaussianModel.analytic_log_Z)<tolerance, 'Incorrect evidence for normalised distribution: {0} +/- {2} instead of {1}'.format(self.work.NS.logZ ,GaussianModel.analytic_log_Z, tolerance))
def test_all():
unittest.main(verbosity=2)
if __name__=='__main__':
unittest.main(verbosity=2)
|
johnveitch/cpnest
|
tests/test_2d.py
|
Python
|
mit
| 1,488
|
[
"Gaussian"
] |
39c1cde73fba021203d29bf60f553e229efbee779e55054aea7529cb2cf3a689
|
'''apport package hook for foomatic-db-engine
(c) 2009 Canonical Ltd.
Author: Brian Murray <brian@ubuntu.com>
'''
from apport.hookutils import *
def add_info(report):
attach_hardware(report)
attach_printing(report)
|
Alberto-Beralix/Beralix
|
i386-squashfs-root/usr/share/apport/package-hooks/source_foomatic-db-engine.py
|
Python
|
gpl-3.0
| 226
|
[
"Brian"
] |
3d3f84ed7de5d7f126cf87a5ed883fb16dc3e162fca301ddb24938f260261951
|
"""
Accounting agent to consume perfSONAR network metrics received via a message queue.
"""
from datetime import datetime
from DIRAC import S_OK, S_ERROR, gLogger
from DIRAC.Core.Base.AgentModule import AgentModule
from DIRAC.AccountingSystem.Client.Types.Network import Network
from DIRAC.ConfigurationSystem.Client.Config import gConfig
from DIRAC.ConfigurationSystem.Client.CSAPI import CSAPI
from DIRAC.Resources.MessageQueue.MQCommunication import createConsumer
__RCSID__ = "$Id$"
class NetworkAgent (AgentModule):
"""
AccountingSystem agent to processes messages containing perfSONAR network metrics.
Results are stored in the accounting database.
"""
BUFFER_TIMEOUT = 3600 # default number of seconds after which network accounting
# objects are removed form the the temporary buffer
def initialize(self):
self.log = gLogger.getSubLogger('NetworkAgent')
# API initialization is required to get an up-to-date configuration from the CS
self.csAPI = CSAPI()
self.csAPI.initialize()
# temporary buffer for network accounting objects + some parameters
self.buffer = {} # { {addTime: datetime.now(), object: Network() }, ... }
self.bufferTimeout = self.am_getOption('BufferTimeout', NetworkAgent.BUFFER_TIMEOUT)
# internal list of message queue consumers
self.consumers = []
# host-to-dirac name dictionary
self.nameDictionary = {}
# statistics
self.messagesCount = 0 # number of received messages
self.messagesCountOld = 0 # previous number of received messages (used to check connection status)
self.skippedMessagesCount = 0 # number of skipped messages (errors, unsupported metrics, etc.)
self.PLRMetricCount = 0 # number of received packet-loss-rate metrics
self.OWDMetricCount = 0 # number of received one-way-delay metrics
self.skippedMetricCount = 0 # number of skipped metrics (errors, invalid data, etc.)
self.insertedCount = 0 # number of properly inserted accounting objects
self.removedCount = 0 # number of removed accounting objects (missing data)
return S_OK()
def finalize(self):
'''
Gracefully close all consumer connections and commit last records to the DB.
'''
for consumer in self.consumers:
consumer.close()
self.commitData()
return S_OK()
def execute(self):
'''
During each cycle update the internal host-to-dirac name dictionary,
check the consumers status (restart them if necessary),
commit data stored in the buffer and show statistics.
'''
self.updateNameDictionary()
self.checkConsumers()
self.commitData()
self.showStatistics()
return S_OK()
def updateNameDictionary(self):
'''
Update the internal host-to-dirac name dictionary.
'''
result = gConfig.getConfigurationTree('/Resources/Sites', 'Network/', '/Enabled')
if not result['OK']:
self.log.error("getConfigurationTree() failed with message: %s" % result['Message'])
return S_ERROR('Unable to fetch perfSONAR endpoints from CS.')
tmpDict = {}
for path, value in result['Value'].iteritems():
if value == 'True':
elements = path.split('/')
diracName = elements[4]
hostName = elements[6]
tmpDict[hostName] = diracName
self.nameDictionary = tmpDict
def checkConsumers(self):
'''
Check whether consumers exist and work properly.
(Re)create consumers if needed.
'''
# recreate consumers if there are any problems
if not self.consumers or self.messagesCount == self.messagesCountOld:
for consumer in self.consumers:
consumer.close()
for uri in self.am_getOption('MessageQueueURI', '').replace(" ", "").split(','):
result = createConsumer(uri, self.processMessage)
if not result['OK']:
self.log.error('Failed to create a consumer from URI: %s' % uri)
continue
else:
self.log.info('Successfully created a consumer from URI: %s' % uri)
self.consumers.append(result['Value'])
if self.consumers:
return S_OK('Successfully created at least one consumer')
return S_ERROR('Failed to create at least one consumer')
# if everything is OK just update the counter
else:
self.messagesCountOld = self.messagesCount
def processMessage(self, headers, body):
'''
Process a message containing perfSONAR data and store the result in the Accounting DB.
Supports packet-loss-rate and one-way-delay metrics send in raw data streams.
Function is designed to be an MQConsumer callback function.
'''
self.messagesCount += 1
metadata = {
'SourceIP': body['meta']['source'],
'SourceHostName': body['meta']['input_source'],
'DestinationIP': body['meta']['destination'],
'DestinationHostName': body['meta']['input_destination'],
}
try:
metadata['Source'] = self.nameDictionary[body['meta']['input_source']]
metadata['Destination'] = self.nameDictionary[body['meta']['input_destination']]
except KeyError as error:
# messages with unsupported source or destination host name can be safely skipped
self.skippedMessagesCount += 1
self.log.debug('Host "%s" does not exist in the host-to-dirac name dictionary (message skipped)' % error)
return S_OK()
metadataKey = ''
for value in metadata.values():
metadataKey += value
timestamps = sorted(body['datapoints'])
for timestamp in timestamps:
try:
date = datetime.utcfromtimestamp(float(timestamp))
# create a key that allows to join packet-loss-rate and one-way-delay
# metrics in one network accounting record
networkAccountingObjectKey = "%s%s" % (metadataKey, str(date))
# use existing or create a new temporary accounting
# object to store the data in DB
if networkAccountingObjectKey in self.buffer:
net = self.buffer[networkAccountingObjectKey]['object']
timeDifference = datetime.now() - self.buffer[networkAccountingObjectKey]['addTime']
if timeDifference.total_seconds() > 60:
self.log.warn('Object was taken from buffer after %s' % (timeDifference))
else:
net = Network()
net.setStartTime(date)
net.setEndTime(date)
net.setValuesFromDict(metadata)
# get data stored in metric
metricData = body['datapoints'][timestamp]
# look for supported event types
if headers['event-type'] == 'packet-loss-rate':
self.PLRMetricCount += 1
if metricData < 0 or metricData > 1:
raise Exception('Invalid PLR metric (%s)' % (metricData))
net.setValueByKey('PacketLossRate', metricData * 100)
elif headers['event-type'] == 'histogram-owdelay':
self.OWDMetricCount += 1
# calculate statistics from histogram
OWDMin = 999999
OWDMax = 0
total = 0
count = 0
for value, items in metricData.iteritems():
floatValue = float(value)
total += floatValue * items
count += items
OWDMin = min(OWDMin, floatValue)
OWDMax = max(OWDMax, floatValue)
OWDAvg = float(total) / count
# skip metrics with invalid data
if OWDAvg < 0 or OWDMin < 0 or OWDMax < 0:
raise Exception('Invalid OWD metric (%s, %s, %s)' %
(OWDMin, OWDAvg, OWDMax))
else:
# approximate jitter value
net.setValueByKey('Jitter', OWDMax - OWDMin)
net.setValueByKey('OneWayDelay', OWDAvg)
else:
self.skippedMetricCount += 1
continue
self.buffer[networkAccountingObjectKey] = {'addTime': datetime.now(),
'object': net}
# suppress all exceptions to protect the listener thread
except Exception as e:
self.skippedMetricCount += 1
self.log.warn('Metric skipped because of an exception: %s' % e)
return S_OK()
def commitData(self):
'''
Iterates through all object in the temporary buffer and commit objects to DB
if both packet-loss-rate and one-way-delay values are set.
Objects in the buffer older than self.bufferTimeout seconds which still have
missing data are removed permanently (a warning is issued).
'''
now = datetime.now()
removed = False
for key, value in self.buffer.items():
result = value['object'].checkValues()
if not result['OK']:
if (now - value['addTime']).total_seconds() > self.bufferTimeout:
del self.buffer[key]
self.removedCount += 1
removed = True
else:
value['object'].delayedCommit()
del self.buffer[key]
self.insertedCount += 1
if removed:
self.log.warn('Network accounting object(s) has been removed because of missing data')
return S_OK()
def showStatistics(self):
''' Display different statistics as info messages in the log file.
'''
self.log.info("\tReceived messages: %s" % self.messagesCount)
self.log.info("\tSkipped messages: %s" % self.skippedMessagesCount)
self.log.info("\tPacket-Loss-Rate metrics: %s" % self.PLRMetricCount)
self.log.info("\tOne-Way-Delay metrics: %s" % self.OWDMetricCount)
self.log.info("\tSkipped metrics: %s" % self.skippedMetricCount)
self.log.info("")
self.log.info("\tObjects in the buffer: %s" % len(self.buffer))
self.log.info("\tObjects inserted to DB: %s" % self.insertedCount)
self.log.info("\tPermanently removed objects: %s" % self.removedCount)
return S_OK()
|
fstagni/DIRAC
|
AccountingSystem/Agent/NetworkAgent.py
|
Python
|
gpl-3.0
| 9,794
|
[
"DIRAC"
] |
73847ff85e8d9d44994e634766302d87997d14b5de383b11668ab1f52facda5e
|
# Copyright 2012 Brian Waldon
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Self-validating model for arbitrary objects"""
import copy
import warnings
import jsonpatch
import jsonschema
from . import exceptions
class Model(dict):
def __init__(self, *args, **kwargs):
# we overload setattr so set this manually
d = dict(*args, **kwargs)
try:
self.validate(d)
except exceptions.ValidationError as exc:
raise ValueError(str(exc))
else:
dict.__init__(self, d)
self.__dict__["changes"] = {}
self.__dict__["__original__"] = copy.deepcopy(d)
def __setitem__(self, key, value):
mutation = dict(self.items())
mutation[key] = value
try:
self.validate(mutation)
except exceptions.ValidationError as exc:
msg = "Unable to set '%s' to %r. Reason: %s" % (key, value, str(exc))
raise exceptions.InvalidOperation(msg)
dict.__setitem__(self, key, value)
self.__dict__["changes"][key] = value
def __delitem__(self, key):
mutation = dict(self.items())
del mutation[key]
try:
self.validate(mutation)
except exceptions.ValidationError as exc:
msg = "Unable to delete attribute '%s'. Reason: %s" % (key, str(exc))
raise exceptions.InvalidOperation(msg)
dict.__delitem__(self, key)
def __getattr__(self, key):
try:
return self.__getitem__(key)
except KeyError:
raise AttributeError(key)
def __setattr__(self, key, value):
self.__setitem__(key, value)
def __delattr__(self, key):
self.__delitem__(key)
# BEGIN dict compatibility methods
def clear(self):
raise exceptions.InvalidOperation()
def pop(self, key, default=None):
raise exceptions.InvalidOperation()
def popitem(self):
raise exceptions.InvalidOperation()
def copy(self):
return copy.deepcopy(dict(self))
def __copy__(self):
return self.copy()
def __deepcopy__(self, memo):
return copy.deepcopy(dict(self), memo)
def update(self, other):
mutation = dict(self.items())
mutation.update(other)
try:
self.validate(mutation)
except exceptions.ValidationError as exc:
raise exceptions.InvalidOperation(str(exc))
dict.update(self, other)
def items(self):
return copy.deepcopy(dict(self)).items()
def values(self):
return copy.deepcopy(dict(self)).values()
# END dict compatibility methods
@property
def patch(self):
"""Return a jsonpatch object representing the delta"""
original = self.__dict__["__original__"]
return jsonpatch.make_patch(original, dict(self)).to_string()
@property
def changes(self):
"""Dumber version of 'patch' method"""
deprecation_msg = "Model.changes will be removed in warlock v2"
warnings.warn(deprecation_msg, DeprecationWarning, stacklevel=2)
return copy.deepcopy(self.__dict__["changes"])
def validate(self, obj):
"""Apply a JSON schema to an object"""
try:
self.validator_instance.validate(obj)
except jsonschema.ValidationError as exc:
raise exceptions.ValidationError(str(exc))
|
bcwaldon/warlock
|
warlock/model.py
|
Python
|
apache-2.0
| 3,900
|
[
"Brian"
] |
efd3aae27edd64400cc8ca5db540606450a72e0639f350feb2849bb67c6a5d22
|
#
# Copyright (C) 2000 greg Landrum
#
""" unit tests for the Neural network trainer implementation
this basically works out **all** of the network code
"""
import unittest
from rdkit.ML.Neural.ActFuncs import Sigmoid, TanH
from rdkit.ML.Neural.NetNode import NetNode
from rdkit.ML.Neural.Network import Network
class TestCaseActFuncs(unittest.TestCase):
def test_Sigmoid(self):
f = Sigmoid()
self.assertAlmostEqual(f(0), 0.5)
self.assertAlmostEqual(f(0), f.Eval(0))
self.assertAlmostEqual(f.Deriv(0), 0.25)
self.assertAlmostEqual(f(1), 1.0 - f(-1))
self.assertAlmostEqual(f(2), 1.0 - f(-2))
self.assertAlmostEqual(f.Deriv(1), f.Deriv(-1))
self.assertAlmostEqual(f.Deriv(2), f.Deriv(-2))
self.assertLess(f(1), f(2))
self.assertLess(f.Deriv(2), f.Deriv(1))
self.assertAlmostEqual(f.Deriv(1), f.DerivFromVal(f(1)))
def test_TanH(self):
f = TanH()
self.assertAlmostEqual(f(0), 0.0)
self.assertAlmostEqual(f(0), f.Eval(0))
self.assertAlmostEqual(f.Deriv(0), 1.0)
self.assertAlmostEqual(f(1), -f(-1))
self.assertAlmostEqual(f(2), -f(-2))
self.assertAlmostEqual(f.Deriv(1), f.Deriv(-1))
self.assertAlmostEqual(f.Deriv(2), f.Deriv(-2))
self.assertLess(f(1), f(2))
self.assertLess(f.Deriv(2), f.Deriv(1))
self.assertAlmostEqual(f.Deriv(1), f.DerivFromVal(f(1)))
class TestCaseNetNode(unittest.TestCase):
def test_NetNode(self):
# A node without input always returns 1
nodeList = [None] * 2
node = NetNode(0, nodeList)
nodeList[0] = node
valVect = [None] * 2
self.assertEqual(node.Eval(valVect), 1)
self.assertEqual(valVect, [1, None])
node = NetNode(1, nodeList, inputNodes=[0], weights=[0.1])
self.assertRaises(AssertionError, node.SetWeights, [0, 1])
self.assertRaises(AssertionError, node.SetInputs, [0, 1])
class TestCaseNetwork(unittest.TestCase):
def test_Network(self):
nodeCounts = [2, 2, 1, 2]
net = Network(nodeCounts)
self.assertEqual(net.GetNumNodes(), 7)
self.assertEqual(len(net.GetAllNodes()), 7)
self.assertEqual(net.GetInputNodeList(), [0, 1])
self.assertEqual(net.GetHiddenLayerNodeList(0), [2, 3])
self.assertEqual(net.GetHiddenLayerNodeList(1), [4])
self.assertEqual(net.GetOutputNodeList(), [5, 6])
# We get a representation of the network
s = str(net)
self.assertIn('Network', s)
if __name__ == '__main__': # pragma: nocover
unittest.main()
|
ptosco/rdkit
|
rdkit/ML/Neural/UnitTestOther.py
|
Python
|
bsd-3-clause
| 2,461
|
[
"RDKit"
] |
1fe712156a7ecd4d82a3bda2d10936eaa08ba6007c8ccd28023d1b0d791a2c48
|
#!/usr/bin/env python
from setuptools import setup, find_packages
import os
# Utility function to read the README file. Used for the long_description.
def read(fname):
return open(os.path.join(os.path.dirname(__file__), fname)).read()
setup(
name='netcdf_extended_utils',
version='0.1',
description='Extended netcdf utils to modify NetCDF files',
long_description=read('README.rst'),
license='GPL v3',
author='Kristian Sebastian',
author_email='data.centre@socib.es',
url='',
packages=find_packages(),
package_data={'netcdf_extended_utils':['configuration/*']},
install_requires=[
'numpy>=1.8.1',
'netCDF4>=1.0.8'
]
)
|
socib/netcdf_extended_utils
|
setup.py
|
Python
|
gpl-3.0
| 689
|
[
"NetCDF"
] |
4a86a94e157ea6f95f57421acafb4ae5e9aeda628cbce00e827ff2c621d726ee
|
"""
Base implementation of the Page Object pattern.
See https://code.google.com/p/selenium/wiki/PageObjects
"""
from abc import ABCMeta, abstractmethod, abstractproperty
from collections import defaultdict, namedtuple
from functools import wraps
from contextlib import contextmanager
import json
import logging
import os
import socket
from textwrap import dedent
import urlparse
import requests
from selenium.common.exceptions import WebDriverException
from .query import BrowserQuery
from .promise import Promise, EmptyPromise, BrokenPromise
CUR_DIR = os.path.dirname(os.path.abspath(__file__))
AXS_FILE = os.path.join(os.path.split(CUR_DIR)[0], 'bok_choy/vendor/google/axs_testing.js')
AuditResults = namedtuple('AuditResults', 'errors, warnings')
class WrongPageError(Exception):
"""
The page object reports that we're on the wrong page!
"""
pass
class PageLoadError(Exception):
"""
An error occurred while loading the page.
"""
pass
class AccessibilityError(Exception):
"""
The page violates one or more accessibility rules.
"""
pass
def unguarded(method):
"""
Mark a PageObject method as unguarded.
Unguarded methods don't verify that the PageObject is
on the current browser page before they execute
Args:
method (callable): The method to decorate.
Returns:
Decorated method
"""
method._unguarded = True # pylint: disable=protected-access
return method
def pre_verify(method):
"""
Decorator that calls self._verify_page() before executing the decorated method
Args:
method (callable): The method to decorate.
Returns:
Decorated method
"""
@wraps(method)
def wrapper(self, *args, **kwargs): # pylint: disable=missing-docstring
self._verify_page() # pylint: disable=protected-access
return method(self, *args, **kwargs)
return wrapper
class _PageObjectMetaclass(ABCMeta):
"""
Decorates any callable attributes of the class
so that they call self._verify_page() before executing.
Excludes any methods marked as unguarded with the @unguarded
decorator, any methods starting with _, or in the list ALWAYS_UNGUARDED.
"""
ALWAYS_UNGUARDED = ['url', 'is_browser_on_page']
def __new__(mcs, cls_name, cls_bases, cls_attrs):
for name, attr in cls_attrs.items():
# Skip methods marked as unguarded
if getattr(attr, '_unguarded', False) or name in mcs.ALWAYS_UNGUARDED:
continue
# Skip private methods
if name.startswith('_'):
continue
# Skip class attributes that are classes themselves
if isinstance(attr, type):
continue
is_property = isinstance(attr, property)
# Skip non-callable attributes
if not (callable(attr) or is_property):
continue
if is_property:
# For properties, wrap each of the sub-methods separately
property_methods = defaultdict(None)
for fn_name in ('fdel', 'fset', 'fget'):
prop_fn = getattr(cls_attrs[name], fn_name, None)
if prop_fn is not None:
# Check for unguarded properties
if getattr(prop_fn, '_unguarded', False):
property_methods[fn_name] = prop_fn
else:
property_methods[fn_name] = pre_verify(prop_fn)
cls_attrs[name] = property(**property_methods)
else:
cls_attrs[name] = pre_verify(attr)
return super(_PageObjectMetaclass, mcs).__new__(mcs, cls_name, cls_bases, cls_attrs)
class PageObject(object):
"""
Encapsulates user interactions with a specific part
of a web application.
The most important thing is this:
Page objects encapsulate Selenium.
If you find yourself writing CSS selectors in tests,
manipulating forms, or otherwise interacting directly
with the web UI, stop!
Instead, put these in a :class:`PageObject` subclass :)
PageObjects do their best to verify that they are only
used when the browser is on a page containing the object.
To do this, they will call :meth:`is_browser_on_page` before executing
any of their methods, and raise a :class:`WrongPageError` if the
browser isn't on the correct page.
Generally, this is the right behavior. However, at times it
will be useful to not verify the page before executing a method.
In those cases, the method can be marked with the :func:`unguarded`
decorator. Additionally, private methods (those beginning with `_`)
are always unguarded.
Class or instance properties are never guarded. However, methods
marked with the :func:`property` are candidates for being guarded.
To make them unguarded, you must mark the getter, setter, and deleter
as :func:`unguarded` separately, and those decorators must be applied before
the :func:`property` decorator.
Correct::
@property
@unguarded
def foo(self):
return self._foo
Incorrect::
@unguarded
@property
def foo(self):
return self._foo
"""
__metaclass__ = _PageObjectMetaclass
def __init__(self, browser):
"""
Initialize the page object to use the specified browser instance.
Args:
browser (selenium.webdriver): The Selenium-controlled browser.
Returns:
PageObject
"""
self.browser = browser
flag = os.environ.get('VERIFY_ACCESSIBILITY', 'False')
self.verify_accessibility = flag.lower() == 'true'
@abstractmethod
def is_browser_on_page(self):
"""
Check that we are on the right page in the browser.
The specific check will vary from page to page,
but usually this amounts to checking the:
1) browser URL
2) page title
3) page headings
Returns:
A `bool` indicating whether the browser is on the correct page.
"""
return False
@abstractproperty
def url(self):
"""
Return the URL of the page. This may be dynamic,
determined by configuration options passed to the
page object's constructor.
Some pages may not be directly accessible:
perhaps the page object represents a "navigation"
component that occurs on multiple pages.
If this is the case, subclasses can return `None`
to indicate that you can't directly visit the page object.
"""
return None
@unguarded
def warning(self, msg):
"""
Subclasses call this to indicate that something unexpected
occurred while interacting with the page.
Page objects themselves should never make assertions or
raise exceptions, but they can issue warnings to make
tests easier to debug.
Args:
msg (str): The message to log as a warning.
Returns:
None
"""
log = logging.getLogger(self.__class__.__name__)
log.warning(msg)
@unguarded
def visit(self):
"""
Open the page containing this page object in the browser.
Some page objects may not provide a URL, in which case
a `NotImplementedError` will be raised.
Raises:
PageLoadError: The page did not load successfully.
NotImplementedError: The page object does not provide a URL to visit.
Returns:
PageObject
"""
if self.url is None:
raise NotImplementedError("Page {} does not provide a URL to visit.".format(self))
# Validate the URL
if not self.validate_url(self.url):
raise PageLoadError("Invalid URL: '{}'".format(self.url))
# Visit the URL
try:
self.browser.get(self.url)
except (WebDriverException, socket.gaierror):
raise PageLoadError("Could not load page '{!r}' at URL '{}'".format(
self, self.url
))
# Give the browser enough time to get to the page, then return the page object
# so that the caller can chain the call with an action:
# Example: FooPage.visit().do_something()
#
# A BrokenPromise will be raised if the page object's is_browser_on_page method
# does not return True before timing out.
try:
return self.wait_for_page()
except BrokenPromise:
raise PageLoadError("Timed out waiting to load page '{!r}' at URL '{}'".format(
self, self.url
))
@classmethod
@unguarded
def validate_url(cls, url):
"""
Return a boolean indicating whether the URL has a protocol and hostname.
If a port is specified, ensure it is an integer.
Arguments:
url (str): The URL to check.
Returns:
Boolean indicating whether the URL has a protocol and hostname.
"""
result = urlparse.urlsplit(url)
# Check that we have a protocol and hostname
if not result.scheme or not result.netloc:
return False
# Check that the port is an integer
try:
if result.port is not None:
int(result.port)
elif result.netloc.endswith(':'):
# Valid URLs do not end with colons.
return False
except ValueError:
return False
else:
return True
def _verify_page(self):
"""
Ask the page object if we're on the right page;
if not, raise a `WrongPageError`.
"""
if not self.is_browser_on_page():
msg = "Not on the correct page to use '{!r}' at URL '{}'".format(
self, self.url
)
raise WrongPageError(msg)
@unguarded
def wait_for_page(self, timeout=30):
"""
Block until the page loads, then returns the page.
Useful for ensuring that we navigate successfully to a particular page.
Keyword Args:
timeout (int): The number of seconds to wait for the page before timing out with an exception.
Raises:
BrokenPromise: The timeout is exceeded without the page loading successfully.
"""
result = Promise(
lambda: (self.is_browser_on_page(), self), "loaded page {!r}".format(self),
timeout=timeout
).fulfill()
if self.verify_accessibility:
self._check_for_accessibility_errors()
return result
@unguarded
def q(self, **kwargs): # pylint: disable=invalid-name
"""
Construct a query on the browser.
Example usages:
.. code:: python
self.q(css="div.foo").first.click()
self.q(xpath="/foo/bar").text
Keyword Args:
css: A CSS selector.
xpath: An XPath selector.
Returns:
BrowserQuery
"""
return BrowserQuery(self.browser, **kwargs)
@contextmanager
def handle_alert(self, confirm=True):
"""
Context manager that ensures alerts are dismissed.
Example usage:
.. code:: python
with self.handle_alert():
self.q(css='input.submit-button').first.click()
Keyword Args:
confirm (bool): Whether to confirm or cancel the alert.
Returns:
None
"""
# Before executing the `with` block, stub the confirm/alert functions
script = dedent("""
window.confirm = function() {{ return {0}; }};
window.alert = function() {{ return; }};
""".format("true" if confirm else "false")).strip()
self.browser.execute_script(script)
# Execute the `with` block
yield
@unguarded
def wait_for_ajax(self):
"""
Wait for all ajax requests to finish.
Example usage:
.. code:: python
self.q(css='input#email').fill("foo")
self.wait_for_ajax()
Returns:
None
"""
def _is_ajax_finished():
"""
Check if all the ajax calls on the current page have completed.
"""
return self.browser.execute_script("return jQuery.active") == 0
EmptyPromise(_is_ajax_finished, "Finished waiting for ajax requests.").fulfill()
@unguarded
def wait_for(self, promise_check_func, description, result=False, timeout=60):
"""
Calls the method provided as an argument until the Promise satisfied or BrokenPromise
Arguments:
promise_check_func (callable):
* If `result` is False Then
Function that accepts no arguments and returns a boolean indicating whether the promise is fulfilled
* If `result` is True Then
Function that accepts no arguments and returns a `(is_satisfied, result)` tuple,
where `is_satisfied` is a boolean indicating whether the promise was satisfied, and `result`
is a value to return from the fulfilled `Promise`
description (str): Description of the Promise, used in log messages
result (bool): Indicates whether we need result
timeout (float): Maximum number of seconds to wait for the Promise to be satisfied before timing out
Raises:
BrokenPromise: the `Promise` was not satisfied
"""
if result:
return Promise(promise_check_func, description, timeout=timeout).fulfill()
else:
return EmptyPromise(promise_check_func, description, timeout=timeout).fulfill()
@unguarded
def wait_for_element_presence(self, element_selector, description, timeout=60):
"""
Waits for element specified by `element_selector` to be present in DOM.
Example usage:
.. code:: python
self.wait_for_element_presence('.submit', 'Submit Button is Present')
Arguments:
element_selector (str): css selector of the element.
description (str): Description of the Promise, used in log messages.
timeout (float): Maximum number of seconds to wait for the Promise to be satisfied before timing out
"""
self.wait_for(lambda: self.q(css=element_selector).present, description=description, timeout=timeout)
@unguarded
def wait_for_element_absence(self, element_selector, description, timeout=60):
"""
Waits for element specified by `element_selector` until it disappears from DOM.
Example usage:
.. code:: python
self.wait_for_element_absence('.submit', 'Submit Button is not Present')
Arguments:
element_selector (str): css selector of the element.
description (str): Description of the Promise, used in log messages.
timeout (float): Maximum number of seconds to wait for the Promise to be satisfied before timing out
"""
self.wait_for(lambda: not self.q(css=element_selector).present, description=description, timeout=timeout)
@unguarded
def wait_for_element_visibility(self, element_selector, description, timeout=60):
"""
Waits for element specified by `element_selector` until it is displayed on web page.
Example usage:
.. code:: python
self.wait_for_element_visibility('.submit', 'Submit Button is Visible')
Arguments:
element_selector (str): css selector of the element.
description (str): Description of the Promise, used in log messages.
timeout (float): Maximum number of seconds to wait for the Promise to be satisfied before timing out
"""
self.wait_for(lambda: self.q(css=element_selector).visible, description=description, timeout=timeout)
@unguarded
def wait_for_element_invisibility(self, element_selector, description, timeout=60):
"""
Waits for element specified by `element_selector` until it disappears from the web page.
Example usage:
.. code:: python
self.wait_for_element_invisibility('.submit', 'Submit Button Disappeared')
Arguments:
element_selector (str): css selector of the element.
description (str): Description of the Promise, used in log messages.
timeout (float): Maximum number of seconds to wait for the Promise to be satisfied before timing out
"""
self.wait_for(lambda: self.q(css=element_selector).invisible, description=description, timeout=timeout)
def axs_audit_rules_to_run(self):
"""
List of rules to check for accessibility errors on the page.
See https://github.com/GoogleChrome/accessibility-developer-tools/tree/master/src/audits
E.g. return ['badAriaAttributeValue']
An empty list means to check for all available rules.
None means that no audit should be done for this page.
"""
return []
def axs_audit_rules_to_ignore(self):
"""
List of rules to ignore for accessibility errors on the page.
See https://github.com/GoogleChrome/accessibility-developer-tools/tree/master/src/audits
E.g. return ['badAriaAttributeValue']
An empty list means to run rules as defined by axs_audit_rules_to_run.
Otherwise, if rules are listed here, they will be ignored even if
they are specified in axs_audit_rules_to_run.
"""
return []
def axs_scope(self):
"""
The "start point" for the audit: the element which contains the portion of
the page which should be audited.
E.g. return 'document.querySelector("div#foo")'
Defaults to using the document as the scope.
"""
return 'null'
def do_axs_audit(self):
"""
Use Google's Accessibility Developer Tools to audit the
page for accessibility problems.
See https://github.com/GoogleChrome/accessibility-developer-tools
Since this needs to inject JavaScript into the browser page, the only
known way to do this is to use PhantomJS as your browser.
Raises:
NotImplementedError if you are not using PhantomJS
RuntimeError if there was a problem with the injected JS or getting the report
Returns:
A list (one for each browser session) of namedtuples with 'errors' and 'warnings'
fields whose values are the errors and warnings returned from the audit.
None if the page object has no rules defined to check.
"""
if self.browser.name != 'phantomjs':
msg = 'Accessibility auditing is only supported with PhantomJS as the browser.'
raise NotImplementedError(msg)
if not os.path.isfile(AXS_FILE):
msg = 'Could not find the accessibility tools JS file: {}'.format(AXS_FILE)
raise RuntimeError(msg)
rules = self.axs_audit_rules_to_run()
if rules is None:
msg = 'No accessibility rules were specified to check for this page: {}'.format(self)
self.warning(msg)
return None
# The ghostdriver URL will be something like this: 'http://localhost:33225/wd/hub'
ghostdriver_url = self.browser.service.service_url
# Get the session_id from ghostdriver so that we can inject JS into the page.
resp = requests.get('{}/sessions'.format(ghostdriver_url))
sessions = resp.json()
# report is the list that is returned, with one item for each browser session
report = []
for session in sessions.get('value'):
session_id = session.get('id')
# First make sure you can successfully inject the JS on the page
script = dedent("""
return this.injectJs("{file}");
""".format(file=AXS_FILE))
payload = {"script": script, "args": []}
resp = requests.post('{}/session/{}/phantom/execute'.format(
ghostdriver_url, session_id), data=json.dumps(payload))
result = resp.json().get('value')
if result is False:
msg = '{msg} \nScript:{script} \nResponse:{response}'.format(
msg='Failure injecting the Accessibility Audit JS on the page.',
script=script,
response=resp.text)
raise RuntimeError(msg)
# This line will only be included in the script if rules to check on this page
# are specified, as the default behavior of the js is to run all rules.
if len(rules) > 0:
rules_config = "auditConfig.auditRulesToRun = {rules};".format(
rules=rules)
else:
rules_config = ""
ignored_rules = self.axs_audit_rules_to_ignore()
if ignored_rules:
rules_config += (
"\nauditConfig.auditRulesToIgnore = {rules};".format(
rules=ignored_rules
)
)
script = dedent("""
return this.evaluate(function() {{
var auditConfig = new axs.AuditConfiguration();
{rules_config}
auditConfig.scope = {scope};
var run_results = axs.Audit.run(auditConfig);
var audit_results = axs.Audit.auditResults(run_results)
return audit_results;
}});
""".format(rules_config=rules_config, scope=self.axs_scope()))
payload = {"script": script, "args": []}
resp = requests.post('{}/session/{}/phantom/execute'.format(
ghostdriver_url, session_id), data=json.dumps(payload))
result = resp.json().get('value')
if result is None:
msg = '{} {} \nScript:{} \nResponse:{}'.format(
'No results were returned by the audit report.',
'Perhaps there was a problem with the rules or scope defined for this page.',
script,
resp.text)
raise RuntimeError(msg)
# audit_results is report of accessibility errors for that session
audit_results = AuditResults(errors=result.get('errors_'), warnings=result.get('warnings_'))
report.append(audit_results)
return report
def _check_for_accessibility_errors(self):
"""
Parse the results of an axs_audit and raise a single exception
if there are violations.
Note that an error is only raised on errors, not on warnings.
Returns:
None
Raises:
AccessibilityError
"""
errors = []
audit = self.do_axs_audit()
for session_result in audit:
if session_result:
if len(session_result.errors) > 0:
errors.extend(session_result.errors)
num_errors = len(errors)
if num_errors > 0:
msg = "URL '{}' has {} errors: {}".format(self.url, num_errors, ", ".join(errors))
raise AccessibilityError(msg)
|
GbalsaC/bitnamiP
|
venv/lib/python2.7/site-packages/bok_choy/page_object.py
|
Python
|
agpl-3.0
| 23,649
|
[
"VisIt"
] |
1a9e87ed0c96621dcedc7fee8fa85e26b31785b85ea9415d1cce12eb8295c46d
|
#!/usr/bin/env python
import argparse
import collections
import datetime
import itertools
import logging
import os
import sys
import HTSeq as hts
def assess_bundle(bundle, features):
"""
Takes a bundle of (potentially multiply mapped) paired-end read pairs, and looks to
see how many features map to each aligned pair.
"""
counts = collections.Counter()
for (p1,p2) in bundle:
if not p1 or not p2: # ie, one of the mate pairs is missing
counts[ "_unmapped" ] += 1
continue
elif not (p1.aligned and p2.aligned): # mate pairs present, but unaligned
counts[ "_unmapped" ] += 1
continue
# collect all genes that map to this alignment
gene_ids = set()
for iv, val in features[ p1.iv ].steps():
gene_ids |= val
for iv, val in features[ p2.iv ].steps():
gene_ids |= val
# evaluate:
if len(gene_ids) == 1:
counts[ list(gene_ids)[0] ] += 1
elif len(gene_ids) == 0: # TODO: test that mate pairs are matching features!
counts[ "_no_feature" ] += 1
else:
counts[ "_ambiguous" ] += 1
return counts
def ungapped_se_counter(sam_reader, feature_array):
"""
classic alignment counter for ungapped single end reads
"""
counts = collections.Counter( )
for almnt in sam_reader:
if not almnt.aligned:
counts[ "_unmapped" ] += 1
continue
gene_ids = set()
for iv, val in feature_array[ almnt.iv ].steps():
gene_ids |= val
if len(gene_ids) == 1:
gene_id = list(gene_ids)[0]
counts[ gene_id ] += 1
elif len(gene_ids) == 0:
counts[ "_no_feature" ] += 1
else:
counts[ "_ambiguous" ] += 1
return counts
def ungapped_pe_counter(sam_reader, feature_array):
counts = collections.Counter( )
pair_iterator = hts.pair_SAM_alignments( sam_reader, bundle=True )
# bundle puts all multiply-mapped pairs together.
t0 = datetime.datetime.now()
for ic, bundle in enumerate(pair_iterator):
# report progress (to prove that it is still alive):
if ic % 1000000 == 0:
t1 = datetime.datetime.now()
print "\r%d read bundles counted in %s\r" % (ic, t1-t0)
sys.stdout.flush()
if bundle == []: # first bundle for some reason is always an empty list
continue
bcounts = assess_bundle(bundle, feature_array)
"""
To evaluate the multiply mapped bundles, each pair in a bundle must still ALWAYS
and ONLY map to a single feature. Thus, every aligned pair has come from the same
feature (gene), and this bundle counts as evidence of one read for this gene.
If any of the read pairs maps to a different gene, or no gene, or multiple genes,
then the bundle is considered ambiguous.
If all pairs in a bundle map as _no_feature, _unmapped or _ambiguous, then the
bundle counts as one count towards this feature type. (ie, it is passed on to
the final counter to increment by 1).
"""
if len(bcounts) > 1: # ie, is a multiply mapped feature with multiple gene mappings
counts[ "_ambiguous" ] += 1
continue
elif len(bcounts) == 0: # uh oh! There is an error somewhere.
print "#" * 40
print "Error! bundle was not assigned any status"
print "Contents of bundle:"
print bundle
continue
else:
counts[ bcounts.keys()[0] ] += 1
return counts
parser = argparse.ArgumentParser(description=
"""An HTSeq script to count paired-end reads that multiply map, for making
inferences about gene counts in a transcriptome assembly""")
### input options ###
parser.add_argument("-o", "--output", type=str, default='htseq_transcriptome',
help="""specify the filename base to save results to [default is
'htseq_transcriptome_counts'""")
parser.add_argument("-d", "--directory", type=str, default=os.getcwd(),
help="""specify the directory to save results to [default is current
working directory""")
# data file options:
parser.add_argument('alignment_file', nargs=1, type=str,
help="""The <alignment_file> contains the aligned reads in the SAM
format. (Note that the SAMtools contain Perl scripts to convert most
alignment formats to SAM.)""")
parser.add_argument('gtf_file', nargs=1, type=str,
help="""The <gtf_file> contains the features in the GFF format.""")
parser.add_argument("-f", "--format", type=str, choices=['sam', 'bam'],
help="""Format of the input data. Possible values are sam
(for text SAM files) or bam (for binary BAM files). Default is sam""")
parser.add_argument("-r", "--order", type=str,
help="""
For paired-end data, the alignment have to be sorted either by
read name or by alignment position. If your data is not sorted, use
the samtools sort function of samtools to sort it. Use this option,
with name or pos for <order> to indicate how the input data has been
sorted. The default is name.
If name is indicated, htseq-count expects all the alignments for the
reads of a given read pair to appear in adjacent records in the input
data. For pos, this is not expected; rather, read alignments whose
mate alignment have not yet been seen are kept in a buffer in memory
until the mate is found. While, strictly speaking, the latter will
also work with unsorted data, sorting ensures that most alignment
mates appear close to each other in the data and hence the buffer
is much less likely to overflow.""")
parser.add_argument("-s", "--stranded", choices=['yes', 'no', 'reverse'], type=str,
default="yes",
help="""
whether the data is from a strand-specific assay (default: yes)
For stranded=no, a read is considered overlapping with a feature
regardless of whether it is mapped to the same or the opposite strand
as the feature. For stranded=yes and single-end reads, the read has to
be mapped to the same strand as the feature. For paired-end reads,
the first read has to be on the same strand and the second read on
the opposite strand. For stranded=reverse, these rules are reversed.
""")
parser.add_argument("-t", "--type", type=str, default='exon',
help="""feature type (3rd column in GFF file) to be used, all features
of other type are ignored (default, suitable for RNA-Seq analysis
using an Ensembl GTF file: exon)""")
parser.add_argument("-i", "--idattr", type=str, default='gene_id',
help="""GFF attribute to be used as feature ID. Several GFF lines with
the same feature ID will be considered as parts of the same feature.
The feature ID is used to identity the counts in the output table.
The default, suitable for RNA-Seq analysis using an Ensembl GTF file,
is gene_id.""")
parser.add_argument("-y", "--read_type", choices=['single_end', 'paired_end'], type=str,
default="paired_end",
help="""
whether the data is from a single-end read library, or a paired-end
library. Default is paired-end.
""")
args = parser.parse_args()
# define some test files:
samfile = '/home/antqueen/booster/PRO_Odontomachus/trinity_denovo_normalized_camponotus/Star/Cplan_Q2_16Aligned.out.sam'
gtffile = '/home/antqueen/genomics/experiments/analyses/PRO20160405_camponotus/trinity_denovo_normalized_camponotus/Transdecoder_ss/merge_genesets/Cpla_td_gff.Apr21_11.15.families.gtf'
# create gtf iterator
print "\nReading gtf file %s..." % (args.gtf_file[0]),
gtf = hts.GFF_Reader(args.gtf_file[0])
print " done."
# create genomic array and populate with exon features (transcripts and genes)
print "Populating genomic array with GTF features...",
sys.stdout.flush()
if args.stranded == 'yes':
feature_array = hts.GenomicArrayOfSets( "auto", stranded=True)
elif args.stranded == 'no':
feature_array = hts.GenomicArrayOfSets( "auto", stranded=False)
for feature in gtf:
if feature.type == args.type:
feature_array[ feature.iv ] += feature.name
print "done.\n\n"
# create Reader class for samfile:
if args.format == 'sam':
alnmt_file = hts.SAM_Reader(args.alignment_file[0])
else:
alnmt_file = hts.BAM_Reader(args.alignment_file[0])
# count reads:
print "Counting reads..."
if args.read_type == 'single_end':
counts = ungapped_se_counter(alnmt_file, feature_array)
print "\nSample output for ungapped SE counts:"
countlist = sorted(counts.items())
for g, c in countlist[-10:]:
print "%-10s %d" % (g, c)
else:
counts = ungapped_pe_counter(alnmt_file, feature_array)
print "\nSample output for ungapped PE counts:"
countlist = sorted(counts.items())
for g, c in countlist[-10:]:
print "%-10s %d" % (g, c)
# output counts to file:
print "Saving gene counts to file...",
sys.stdout.flush()
outfile = args.output + "_counts.txt"
handle = open(outfile, 'w')
for g, c in countlist:
handle.write("%-20s %d\n" % (g, c))
handle.close()
print "done."
print "All done."
|
oxpeter/htseq-transcriptome
|
htseq-trancriptome.py
|
Python
|
gpl-3.0
| 9,943
|
[
"HTSeq"
] |
f35f952cbc49476fde1f22f459b9000ebf6855babc4b2329ec7fde695d748991
|
""" Test for various numpy_interface modules. Main goal is to test
parallel algorithms in vtk.numpy_interface.algorithms."""
from __future__ import print_function
import sys
try:
import numpy
except ImportError:
print("Numpy (http://numpy.scipy.org) not found.", end=' ')
print("This test requires numpy!")
sys.exit(0)
import vtk
from vtk.test import Testing
import vtk.numpy_interface.dataset_adapter as dsa
import vtk.numpy_interface.algorithms as algs
from mpi4py import MPI
c = vtk.vtkMPIController()
#c.SetGlobalController(None)
rank = c.GetLocalProcessId()
size = c.GetNumberOfProcesses()
def PRINT(text, values):
if values is dsa.NoneArray:
values = numpy.array(0, dtype=numpy.float64)
else:
values = numpy.array(numpy.sum(values)).astype(numpy.float64)
res = numpy.array(values)
MPI.COMM_WORLD.Allreduce([values, MPI.DOUBLE], [res, MPI.DOUBLE], MPI.SUM)
assert numpy.abs(res) < 1E-5
if rank == 0:
print(text, res)
def testArrays(rtData, rtData2, grad, grad2, total_npts):
" Test various parallel algorithms."
if rank == 0:
print('-----------------------')
PRINT( "SUM ones:", algs.sum(rtData / rtData) - total_npts )
PRINT( "SUM sin:", (algs.sum(algs.sin(rtData) + 1) - numpy.sum(numpy.sin(rtData2) + 1)) / numpy.sum(numpy.sin(rtData2) + 1) )
PRINT( "rtData min:", algs.min(rtData) - numpy.min(rtData2) )
PRINT( "rtData max:", algs.max(rtData) - numpy.max(rtData2) )
PRINT( "rtData sum:", (algs.sum(rtData) - numpy.sum(rtData2)) / (2*numpy.sum(rtData2)) )
PRINT( "rtData mean:", (algs.mean(rtData) - numpy.mean(rtData2)) / (2*numpy.mean(rtData2)) )
PRINT( "rtData var:", (algs.var(rtData) - numpy.var(rtData2)) / numpy.var(rtData2) )
PRINT( "rtData std:", (algs.std(rtData) - numpy.std(rtData2)) / numpy.std(rtData2) )
PRINT( "grad min:", algs.min(grad) - numpy.min(grad2) )
PRINT( "grad max:", algs.max(grad) - numpy.max(grad2) )
PRINT( "grad min 0:", algs.min(grad, 0) - numpy.min(grad2, 0) )
PRINT( "grad max 0:", algs.max(grad, 0) - numpy.max(grad2, 0) )
PRINT( "grad min 1:", algs.sum(algs.min(grad, 1)) - numpy.sum(numpy.min(grad2, 1)) )
PRINT( "grad max 1:", algs.sum(algs.max(grad, 1)) - numpy.sum(numpy.max(grad2, 1)) )
PRINT( "grad sum 1:", algs.sum(algs.sum(grad, 1)) - numpy.sum(numpy.sum(grad2, 1)) )
PRINT( "grad var:", (algs.var(grad) - numpy.var(grad2)) / numpy.var(grad2) )
PRINT( "grad var 0:", (algs.var(grad, 0) - numpy.var(grad2, 0)) / numpy.var(grad2, 0) )
w = vtk.vtkRTAnalyticSource()
# Update with ghost level because gradient needs it
# to be piece independent
w.UpdatePiece(rank, size, 1)
print(w.GetOutput())
print(w.GetOutputInformation(0))
# The parallel arrays that we care about
ds = dsa.WrapDataObject(w.GetOutput())
rtData = ds.PointData['RTData']
grad = algs.gradient(rtData)
ds.PointData.append(grad, 'gradient')
# Crop the any ghost points out
org_ext = w.GetOutput().GetExtent()
ext = list(org_ext)
wext = w.GetOutputInformation(0).Get(vtk.vtkStreamingDemandDrivenPipeline.WHOLE_EXTENT())
for i in range(3):
if ext[2*i] != wext[2*i]:
ext[2*i] = ext[2*i] + 2
if ext[2*i+1] != wext[2*i+1]:
ext[2*i+1] = ext[2*i+1] - 1
if ext != list(org_ext):
w.GetOutput().Crop(ext)
# Croppped arrays
rtData = ds.PointData['RTData']
grad = ds.PointData['gradient']
# The whole dataset so that we can compare
# against parallel algorithms.
w2 = vtk.vtkRTAnalyticSource()
w2.Update()
ds2 = dsa.WrapDataObject(w2.GetOutput())
rtData2 = ds2.PointData['RTData']
grad2 = algs.gradient(rtData2)
npts = numpy.array(numpy.int32(ds.GetNumberOfPoints()))
total_npts = numpy.array(npts)
MPI.COMM_WORLD.Allreduce([npts, MPI.INT], [total_npts, MPI.INT], MPI.SUM)
# Test simple distributed data.
testArrays(rtData, rtData2, grad, grad2, total_npts)
# Check that we can disable parallelism by using a dummy controller
# even when a global controller is set
assert algs.sum(rtData / rtData, controller=vtk.vtkDummyController()) != total_npts
# Test where arrays are NoneArray on one of the ranks.
if size > 1:
if rank == 0:
rtData3 = rtData2
grad3 = grad2
else:
rtData3 = dsa.NoneArray
grad3 = dsa.NoneArray
testArrays(rtData3, rtData2, grad3, grad2, total_npts)
# Test composite arrays
rtData3 = dsa.VTKCompositeDataArray([rtData, dsa.NoneArray])
grad3 = dsa.VTKCompositeDataArray([dsa.NoneArray, grad])
testArrays(rtData3, rtData2, grad3, grad2, total_npts)
# Test where arrays are NoneArray on one of the ranks
# and composite on others.
if size > 1:
if rank == 1:
rtData3 = dsa.VTKCompositeDataArray([rtData2])
grad3 = dsa.VTKCompositeDataArray([grad2])
else:
rtData3 = dsa.NoneArray
grad3 = dsa.NoneArray
testArrays(rtData3, rtData2, grad3, grad2, total_npts)
# Test composite arrays with multiple blocks.
# Split the local image to 2.
datasets = []
for i in range(2):
image = vtk.vtkImageData()
image.ShallowCopy(w.GetOutput())
t = vtk.vtkExtentTranslator()
wext = image.GetExtent()
t.SetWholeExtent(wext)
t.SetPiece(i)
t.SetNumberOfPieces(2)
t.PieceToExtent()
ext = list(t.GetExtent())
# Crop the any ghost points out
for i in range(3):
if ext[2*i] != wext[2*i]:
ext[2*i] = ext[2*i] + 1
if ext != list(org_ext):
image.Crop(ext)
datasets.append(dsa.WrapDataObject(image))
rtData3 = dsa.VTKCompositeDataArray([datasets[0].PointData['RTData'], datasets[1].PointData['RTData']])
grad3 = dsa.VTKCompositeDataArray([datasets[0].PointData['gradient'], datasets[1].PointData['gradient']])
testArrays(rtData3, rtData2, grad3, grad2, total_npts)
# Test min/max per block
NUM_BLOCKS = 10
w = vtk.vtkRTAnalyticSource()
w.SetWholeExtent(0, 10, 0, 10, 0, 10)
w.Update()
c = vtk.vtkMultiBlockDataSet()
c.SetNumberOfBlocks(size*NUM_BLOCKS)
if rank == 0:
start = 0
end = NUM_BLOCKS
else:
start = rank*NUM_BLOCKS - 3
end = start + NUM_BLOCKS
for i in range(start, end):
a = vtk.vtkImageData()
a.ShallowCopy(w.GetOutput())
c.SetBlock(i, a)
if rank == 0:
c.SetBlock(NUM_BLOCKS - 1, vtk.vtkPolyData())
cdata = dsa.WrapDataObject(c)
rtdata = cdata.PointData['RTData']
rtdata = algs.abs(rtdata)
g = algs.gradient(rtdata)
g2 = algs.gradient(g)
res = True
dummy = vtk.vtkDummyController()
for axis in [None, 0]:
for array in [rtdata, g, g2]:
if rank == 0:
array2 = array/2
min = algs.min_per_block(array2, axis=axis)
res &= numpy.all(min.Arrays[NUM_BLOCKS - 1] == numpy.min(array, axis=axis))
all_min = algs.min(min, controller=dummy)
all_min_true = numpy.min([algs.min(array, controller=dummy), algs.min(array2, controller=dummy)])
res &= all_min == all_min_true
max = algs.max_per_block(array2, axis=axis)
res &= numpy.all(max.Arrays[NUM_BLOCKS - 1] == numpy.max(array, axis=axis))
all_max = algs.max(max, controller=dummy)
all_max_true = numpy.max([algs.max(array, controller=dummy), algs.max(array2, controller=dummy)])
res &= all_max == all_max_true
sum = algs.sum_per_block(array2, axis=axis)
sum_true = numpy.sum(array2.Arrays[0]) * (NUM_BLOCKS-1)
sum_true += numpy.sum(array.Arrays[0]) * 3
res &= numpy.sum(algs.sum(sum, controller=dummy) - algs.sum(sum_true, controller=dummy)) == 0
mean = algs.mean_per_block(array2, axis=axis)
res &= numpy.sum(mean.Arrays[0] - numpy.mean(array2.Arrays[0], axis=axis)) < 1E-6
if len(array.Arrays[0].shape) == 1:
stk = numpy.hstack
else:
stk = numpy.vstack
res &= numpy.sum(mean.Arrays[NUM_BLOCKS-2] - numpy.mean(stk((array.Arrays[0], array2.Arrays[0])), axis=axis)) < 1E-4
elif rank == 2:
min = algs.min_per_block(dsa.NoneArray, axis=axis)
max = algs.max_per_block(dsa.NoneArray, axis=axis)
sum = algs.sum_per_block(dsa.NoneArray, axis=axis)
mean = algs.mean_per_block(dsa.NoneArray, axis=axis)
else:
min = algs.min_per_block(array, axis=axis)
max = algs.max_per_block(array, axis=axis)
sum = algs.sum_per_block(array, axis=axis)
mean = algs.mean_per_block(array, axis=axis)
if array is g and axis == 0:
ug = algs.unstructured_from_composite_arrays(mean, [(mean, 'mean')])
if mean is dsa.NoneArray:
res &= ug.GetNumberOfPoints() == 0
else:
_array = ug.GetPointData().GetArray('mean')
ntuples = _array.GetNumberOfTuples()
for i in range(ntuples):
if rank == 1:
idx = i+3
else:
idx = i
res &= _array.GetTuple(i) == tuple(mean.Arrays[idx])
res &= algs.min_per_block(dsa.NoneArray) is dsa.NoneArray
if rank == 0:
min = algs.min_per_block(rtdata.Arrays[0]/2)
elif rank == 2:
min = algs.min_per_block(dsa.NoneArray)
res &= min is dsa.NoneArray
else:
min = algs.min_per_block(rtdata.Arrays[0])
if rank == 0:
min = algs.min(rtdata.Arrays[0])
res &= min == numpy.min(rtdata.Arrays[0])
else:
min = algs.min(dsa.NoneArray)
res &= min is dsa.NoneArray
res &= algs.min(dsa.NoneArray) is dsa.NoneArray
if rank == 0:
res &= numpy.all(algs.min(g2, axis=0) == numpy.min(g2.Arrays[0], axis=0))
else:
res &= algs.min(dsa.NoneArray, axis=0) is dsa.NoneArray
res = numpy.array(res, dtype=numpy.bool)
all_res = numpy.array(res)
mpitype = algs._lookup_mpi_type(numpy.bool)
MPI.COMM_WORLD.Allreduce([res, mpitype], [all_res, mpitype], MPI.LAND)
assert all_res
|
SimVascular/VTK
|
Parallel/MPI4Py/Testing/Python/TestParallelNumpy.py
|
Python
|
bsd-3-clause
| 9,846
|
[
"VTK"
] |
6ea097deffc2449c82971a03348fff73a88d19fcb774ad907060dcf9e923c4fb
|
# -*- coding: utf-8 -*-
"""
jinja2.visitor
~~~~~~~~~~~~~~
This module implements a visitor for the nodes.
:copyright: (c) 2017 by the Jinja Team.
:license: BSD.
"""
from jinja2.nodes import Node
class NodeVisitor(object):
"""Walks the abstract syntax tree and call visitor functions for every
node found. The visitor functions may return values which will be
forwarded by the `visit` method.
Per default the visitor functions for the nodes are ``'visit_'`` +
class name of the node. So a `TryFinally` node visit function would
be `visit_TryFinally`. This behavior can be changed by overriding
the `get_visitor` function. If no visitor function exists for a node
(return value `None`) the `generic_visit` visitor is used instead.
"""
def get_visitor(self, node):
"""Return the visitor function for this node or `None` if no visitor
exists for this node. In that case the generic visit function is
used instead.
"""
method = 'visit_' + node.__class__.__name__
return getattr(self, method, None)
def visit(self, node, *args, **kwargs):
"""Visit a node."""
f = self.get_visitor(node)
if f is not None:
return f(node, *args, **kwargs)
return self.generic_visit(node, *args, **kwargs)
def generic_visit(self, node, *args, **kwargs):
"""Called if no explicit visitor function exists for a node."""
for node in node.iter_child_nodes():
self.visit(node, *args, **kwargs)
class NodeTransformer(NodeVisitor):
"""Walks the abstract syntax tree and allows modifications of nodes.
The `NodeTransformer` will walk the AST and use the return value of the
visitor functions to replace or remove the old node. If the return
value of the visitor function is `None` the node will be removed
from the previous location otherwise it's replaced with the return
value. The return value may be the original node in which case no
replacement takes place.
"""
def generic_visit(self, node, *args, **kwargs):
for field, old_value in node.iter_fields():
if isinstance(old_value, list):
new_values = []
for value in old_value:
if isinstance(value, Node):
value = self.visit(value, *args, **kwargs)
if value is None:
continue
elif not isinstance(value, Node):
new_values.extend(value)
continue
new_values.append(value)
old_value[:] = new_values
elif isinstance(old_value, Node):
new_node = self.visit(old_value, *args, **kwargs)
if new_node is None:
delattr(node, field)
else:
setattr(node, field, new_node)
return node
def visit_list(self, node, *args, **kwargs):
"""As transformers may return lists in some places this method
can be used to enforce a list as return value.
"""
rv = self.visit(node, *args, **kwargs)
if not isinstance(rv, list):
rv = [rv]
return rv
|
wildchildyn/autism-website
|
yanni_env/lib/python3.6/site-packages/jinja2/visitor.py
|
Python
|
gpl-3.0
| 3,316
|
[
"VisIt"
] |
243d47d5c00d036f49727b457cde5f3f2a90c41e1b238c02d3407365af971e4b
|
import numpy as np
from assignment2.cs231n.layers import *
from assignment2.cs231n.fast_layers import *
from assignment2.cs231n.layer_utils import *
from assignment2.cs231n.layer_utils import conv_relu_pool_forward,\
affine_relu_forward
from assignment2.cs231n.layers import affine_forward, softmax_loss,\
affine_backward
class ThreeLayerConvNet(object):
"""
A three-layer convolutional network with the following architecture:
conv - relu - 2x2 max pool - affine - relu - affine - softmax
The network operates on minibatches of data that have shape (N, C, H, W)
consisting of N images, each with height H and width W and with C input
channels.
"""
def __init__(self, input_dim=(3, 32, 32), num_filters=32, filter_size=7,
hidden_dim=100, num_classes=10, weight_scale=1e-3, reg=0.0,
dtype=np.float32):
"""
Initialize a new network.
Inputs:
- input_dim: Tuple (C, H, W) giving size of input data
- num_filters: Number of filters to use in the convolutional layer
- filter_size: Size of filters to use in the convolutional layer
- hidden_dim: Number of units to use in the fully-connected hidden layer
- num_classes: Number of scores to produce from the final affine layer.
- weight_scale: Scalar giving standard deviation for random initialization
of weights.
- reg: Scalar giving L2 regularization strength
- dtype: numpy datatype to use for computation.
"""
self.params = {}
self.reg = reg
self.dtype = dtype
############################################################################
# TODO: Initialize weights and biases for the three-layer convolutional #
# network. Weights should be initialized from a Gaussian with standard #
# deviation equal to weight_scale; biases should be initialized to zero. #
# All weights and biases should be stored in the dictionary self.params. #
# Store weights and biases for the convolutional layer using the keys 'W1' #
# and 'b1'; use keys 'W2' and 'b2' for the weights and biases of the #
# hidden affine layer, and keys 'W3' and 'b3' for the weights and biases #
# of the output affine layer. #
############################################################################
# self.conv_param = {'stride': 1, 'pad': 3}
# stride = self.conv_param['stride']
# pad = self.conv_param['pad']
C, H, W = input_dim
self.input_dim = input_dim
self.params['W1'] = weight_scale * np.random.randn(num_filters, C, filter_size, filter_size)
self.params['b1'] = np.zeros(num_filters)
self.params['W2'] = weight_scale * np.random.randn(num_filters*(H/2) *(W/2), hidden_dim)
self.params['b2'] = np.zeros(hidden_dim)
self.params['W3'] = weight_scale * np.random.randn(hidden_dim, num_classes)
self.params['b3'] = np.zeros(num_classes)
############################################################################
# END OF YOUR CODE #
############################################################################
for k, v in self.params.iteritems():
self.params[k] = v.astype(dtype)
def loss(self, X, y=None):
"""
Evaluate loss and gradient for the three-layer convolutional network.
Input / output: Same API as TwoLayerNet in fc_net.py.
"""
W1, b1 = self.params['W1'], self.params['b1']
W2, b2 = self.params['W2'], self.params['b2']
W3, b3 = self.params['W3'], self.params['b3']
# pass conv_param to the forward pass for the convolutional layer
filter_size = W1.shape[2]
conv_param = {'stride': 1, 'pad': (filter_size - 1) / 2}
# pass pool_param to the forward pass for the max-pooling layer
pool_param = {'pool_height': 2, 'pool_width': 2, 'stride': 2}
scores = None
############################################################################
# TODO: Implement the forward pass for the three-layer convolutional net, #
# computing the class scores for X and storing them in the scores #
# variable. #
############################################################################
#conv layer
scores, conv_cache = conv_relu_pool_forward(X, W1, b1, conv_param, pool_param)
#hidden layer
scores, hidden_cahe = affine_relu_forward(scores, W2, b2)
#output layer
scores, output_cahe = affine_forward(scores, W3, b3)
############################################################################
# END OF YOUR CODE #
############################################################################
if y is None:
return scores
loss, grads = 0, {}
############################################################################
# TODO: Implement the backward pass for the three-layer convolutional net, #
# storing the loss and gradients in the loss and grads variables. Compute #
# data loss using softmax, and make sure that grads[k] holds the gradients #
# for self.params[k]. Don't forget to add L2 regularization! #
############################################################################
loss, dout = softmax_loss(scores, y)
#output layer
dout, grads['W3'], grads['b3'] = affine_backward(dout, output_cahe)
#hidden layer
dout, grads['W2'], grads['b2'] = affine_relu_backward(dout, hidden_cahe)
#conv layer
dout, grads['W1'], grads['b1'] = conv_relu_pool_backward(dout, conv_cache)
loss += 0.5 * self.reg * (np.square(W1).sum() + np.square(W2).sum() + np.square(W3).sum())
grads['W1'] += self.reg * self.params['W1']
grads['W2'] += self.reg * self.params['W2']
grads['W3'] += self.reg * self.params['W3']
############################################################################
# END OF YOUR CODE #
############################################################################
return loss, grads
pass
|
machinelearningnanodegree/stanford-cs231
|
solutions/levin/assignment2/cs231n/classifiers/cnn.py
|
Python
|
mit
| 7,062
|
[
"Gaussian"
] |
e624af09a518fbc2c99bb71b97f784dd3f3f277716ba998f618cd1fc16fa67c0
|
import unicodedata
import numpy as np
from .. import coding
from ..core.variable import Variable
# Special characters that are permitted in netCDF names except in the
# 0th position of the string
_specialchars = '_.@+- !"#$%&\\()*,:;<=>?[]^`{|}~'
# The following are reserved names in CDL and may not be used as names of
# variables, dimension, attributes
_reserved_names = {
"byte",
"char",
"short",
"ushort",
"int",
"uint",
"int64",
"uint64",
"float" "real",
"double",
"bool",
"string",
}
# These data-types aren't supported by netCDF3, so they are automatically
# coerced instead as indicated by the "coerce_nc3_dtype" function
_nc3_dtype_coercions = {"int64": "int32", "bool": "int8"}
# encode all strings as UTF-8
STRING_ENCODING = "utf-8"
def coerce_nc3_dtype(arr):
"""Coerce an array to a data type that can be stored in a netCDF-3 file
This function performs the following dtype conversions:
int64 -> int32
bool -> int8
Data is checked for equality, or equivalence (non-NaN values) with
`np.allclose` with the default keyword arguments.
"""
dtype = str(arr.dtype)
if dtype in _nc3_dtype_coercions:
new_dtype = _nc3_dtype_coercions[dtype]
# TODO: raise a warning whenever casting the data-type instead?
cast_arr = arr.astype(new_dtype)
if not (cast_arr == arr).all():
raise ValueError(
f"could not safely cast array from dtype {dtype} to {new_dtype}"
)
arr = cast_arr
return arr
def encode_nc3_attr_value(value):
if isinstance(value, bytes):
pass
elif isinstance(value, str):
value = value.encode(STRING_ENCODING)
else:
value = coerce_nc3_dtype(np.atleast_1d(value))
if value.ndim > 1:
raise ValueError("netCDF attributes must be 1-dimensional")
return value
def encode_nc3_attrs(attrs):
return {k: encode_nc3_attr_value(v) for k, v in attrs.items()}
def encode_nc3_variable(var):
for coder in [
coding.strings.EncodedStringCoder(allows_unicode=False),
coding.strings.CharacterArrayCoder(),
]:
var = coder.encode(var)
data = coerce_nc3_dtype(var.data)
attrs = encode_nc3_attrs(var.attrs)
return Variable(var.dims, data, attrs, var.encoding)
def _isalnumMUTF8(c):
"""Return True if the given UTF-8 encoded character is alphanumeric
or multibyte.
Input is not checked!
"""
return c.isalnum() or (len(c.encode("utf-8")) > 1)
def is_valid_nc3_name(s):
"""Test whether an object can be validly converted to a netCDF-3
dimension, variable or attribute name
Earlier versions of the netCDF C-library reference implementation
enforced a more restricted set of characters in creating new names,
but permitted reading names containing arbitrary bytes. This
specification extends the permitted characters in names to include
multi-byte UTF-8 encoded Unicode and additional printing characters
from the US-ASCII alphabet. The first character of a name must be
alphanumeric, a multi-byte UTF-8 character, or '_' (reserved for
special names with meaning to implementations, such as the
"_FillValue" attribute). Subsequent characters may also include
printing special characters, except for '/' which is not allowed in
names. Names that have trailing space characters are also not
permitted.
"""
if not isinstance(s, str):
return False
if not isinstance(s, str):
s = s.decode("utf-8")
num_bytes = len(s.encode("utf-8"))
return (
(unicodedata.normalize("NFC", s) == s)
and (s not in _reserved_names)
and (num_bytes >= 0)
and ("/" not in s)
and (s[-1] != " ")
and (_isalnumMUTF8(s[0]) or (s[0] == "_"))
and all(_isalnumMUTF8(c) or c in _specialchars for c in s)
)
|
shoyer/xarray
|
xarray/backends/netcdf3.py
|
Python
|
apache-2.0
| 3,931
|
[
"NetCDF"
] |
2a41559ef0ccd144f567d910ed03215e555abcb085af227a5c6bb11f0fc6b42c
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
#******************************************************************************
#*
#* Copyright (C) 2015 Kiran Karra <kiran.karra@gmail.com>
#*
#* This program is free software: you can redistribute it and/or modify
#* it under the terms of the GNU General Public License as published by
#* the Free Software Foundation, either version 3 of the License, or
#* (at your option) any later version.
#*
#* This program is distributed in the hope that it will be useful,
#* but WITHOUT ANY WARRANTY; without even the implied warranty of
#* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
#* GNU General Public License for more details.
#*
#* You should have received a copy of the GNU General Public License
#* along with this program. If not, see <http://www.gnu.org/licenses/>.
#******************************************************************************
import math
import numpy as np
import multivariate_stats
from invcopulastat import invcopulastat
from scipy.stats import kendalltau
from numpy.linalg import eig
"""
copulafit.py contains routines which provide use various techniques, as
specified by the user to fit data to a family of copula (i.e. find the
dependency parameter).
"""
def copulafit(family, X, algorithm):
"""
Attempts to determine the dependency parameter of the copula family
type specified, using the algorithm that is specified for the data
given by the matrix X
Inputs:
family -- the copula family to fit to, must be:
'Gaussian'
't'
'Clayton'
'Gumbel'
'Frank'
X -- the data to determine the copula dependency parameter for. Must be
a numpy array of shape = M x N, where M is the number of samples
and N is the dimensionality of the data
algorithm -- must be one of the following strings:
'MLE' - Maximum Likelihood method
'AMLE' - Approximate Maximum Likelihood method
'PKTE' - Use's Pairwise Kendall's Tau estimator's relationship to the
copula family's dependency parameter (only applicalble
to Clayton, Gumbel, or Frank copula's currently)
Outputs:
the dependency parameter for the copula
"""
algorithm_lc = algorithm.lower()
family_lc = family.lower()
dep_param_est = None
if(algorithm_lc=='MLE'):
raise ValueError('MLE method not yet supported!')
elif(algorithm_lc=='AMLE'):
raise ValueError('Approximate MLE method not yet supported!')
elif(algorithm_lc=='PKTE'):
if(family_lc=='gaussian'):
dep_param_est = _gaussian_PKTE(X)
elif(family_lc=='t'):
dep_param_est = _t_PKTE(X)
elif(family_lc=='clayton'):
dep_param_est = _clayton_PKTE(X)
elif(family_lc=='gumbel'):
dep_param_est = _gumbel_PKTE(X)
elif(family_lc=='frank'):
dep_param_est = _frank_PKTE(X)
else:
raise ValueError('Unsupported Algorithm or options!')
return dep_param_est
def _gaussian_PKTE(X):
# the algorithm for this comes from the paper:
# "Gaussian Copula Precision Estimation with Missing Values"
# by Huahua Wang, Faridel Fazayeli, Soumyadeep Chatterjee, Arindam Banerjee
N = X.shape[1]
sigma_hat = np.ones((N,N))
for dim1 in range(0,N-1):
for dim2 in range(dim1+1,N):
rho = np.sin(math.pi/2 * kendalltau(X[:,dim1],X[:,dim2]))
# correlation matrix is symmetric
sigma_hat[dim1][dim2] = rho
sigma_hat[dim2][dim1] = rho
# ensure that sigma_hat is positive semidefinite
sigma_hat = _nearPD(sigma_hat)
return sigma_hat
# TODO: T copula stuff
def _t_PKTE(X):
# first estimate correlation matrix
sigma_hat = _gaussian_PKTE(X)
# TODO: use MLE to estimate degrees of freedom
nu = 1
return (sigma_hat, nu)
def _clayton_PKTE(X):
# calculate empirical kendall's tau
ktau = multivariate_stats.kendalls_tau(X)
# inverse to find dependency parameter
alpha_hat = invcopulastat('Clayton', 'kendall', ktau)
return alpha_hat
def _gumbel_PKTE(X):
# calculate empirical kendall's tau
ktau = multivariate_stats.kendalls_tau(X)
# inverse to find dependency parameter
alpha_hat = invcopulastat('Gumbel', 'kendall', ktau)
return alpha_hat
def _frank_PKTE(X):
# calculate empirical kendall's tau
ktau = multivariate_stats.kendalls_tau(X)
# inverse to find dependency parameter
alpha_hat = invcopulastat('Frank', 'kendall', ktau)
return alpha_hat
def _getAplus(A):
eigval, eigvec = eig(A)
Q = np.matrix(eigvec)
xdiag = np.matrix(np.diag(np.maximum(eigval, 0)))
return Q*xdiag*Q.T
def _getPs(A, W=None):
W05 = np.matrix(W**.5)
return W05.I * _getAplus(W05 * A * W05) * W05.I
def _getPu(A, W=None):
Aret = np.array(A.copy())
Aret[W > 0] = np.array(W)[W > 0]
return np.matrix(Aret)
def _nearPD(A, nit=10):
n = A.shape[0]
W = np.identity(n)
# W is the matrix used for the norm (assumed to be Identity matrix here)
# the algorithm should work for any diagonal W
deltaS = 0
Yk = A.copy()
for k in range(nit):
Rk = Yk - deltaS
Xk = _getPs(Rk, W=W)
deltaS = Xk - Rk
Yk = _getPu(Xk, W=W)
return Yk
|
kkarrancsu/copula-bayesian-networks
|
copulafit.py
|
Python
|
gpl-3.0
| 5,485
|
[
"Gaussian"
] |
2a0c320c26ddbf154152ec3ecdc0df0491cffc0b5e1d58489a45e3be4704b2e0
|
from . import moe
|
bung87/moto-moe
|
moto/__init__.py
|
Python
|
mit
| 17
|
[
"MOE"
] |
51ce869efa9791369aea30bb25630ba50715aee3f9ac3ab88ea51220fbf41a24
|
#!/usr/bin/env python
""" Virana mapping tool for aligning short read data to human-microbial
reference genomes. Part of the Virana package.
(c) 2013, Sven-Eric Schelhorn, MPI for Informatics.
"""
#from __future__ import print_function
import numpy
import pysam
import sys
import tempfile
import subprocess
import shutil
import os
import os.path
import logging
import bz2
import math
import string
from collections import defaultdict, Counter
from subprocess import PIPE
import time
from zlib import compress
try:
from plumbum import cli
except ImportError:
message = 'This script requires the plumbum python package\n'
sys.stderr.write(message)
sys.exit(1)
# try:
# import HTSeq
# except ImportError:
# message = 'This script requires the HTSeq python package\n'
# sys.stderr.write(message)
# sys.exit(1)
KHMER_AVAILABLE = True
try:
import khmer
except ImportError:
KHMER_AVAILABLE = False
# import line_profiler
NON_ID = ''.join(c for c in map(chr, range(256)) if not c.isalnum())
NON_ID = NON_ID.replace('_', '').replace('-', '')
logging.basicConfig(level=logging.INFO, format='%(message)s')
def which(program):
def is_exe(fpath):
return os.path.isfile(fpath) and os.access(fpath, os.X_OK)
fpath, fname = os.path.split(program)
if fpath:
if is_exe(program):
return program
else:
for path in os.environ["PATH"].split(os.pathsep):
path = path.strip('"')
exe_file = os.path.join(path, program)
if is_exe(exe_file):
return exe_file
return None
class CLI(cli.Application):
"""RNA-Seq and DNA-Seq short read analysis by mapping to known reference sequences."""
PROGNAME = "vmap"
VERSION = "1.0.0"
DESCRIPTION = \
"""DESCRIPTION: virana vmap - short read mapping for clinical metagenomics.
The virana mapping utility ('vmap') is a wrapper around indexing and mapping
fascilities of three short read alignments tools, STAR, BWA-MEM, and SMALT
(the latter is not well supported yet).
vmap is able to generate reference indexes for each of these mappers from a
single FASTA file and then map short reads in FASTQ format against these indexes.
During mappings, alignments are output as SAM-files, unsorted BAM-files,
taxonomic bins (by taxonomic families), and a special virana format that
summarizes reads that align to specific taxonomic reference databases such as
viruses and additionaly capture multimapping information. In addition, some mappers
also supoort output of unmapped reads in FASTQ format for later assembly or
special treatment of chimeric alignments and their output in SAM files.
https://github.com/schelhorn/virana
Schelhorn S-E, Fischer M, Tolosi L, Altmueller J, Nuernberg P, et al. (2013)
Sensitive Detection of Viral Transcripts in Human Tumor Transcriptomes.
PLoS Comput Biol 9(10): e1003228. doi:10.1371/journal.pcbi.1003228"""
USAGE = """USAGE: The program has four modes that can be accessed by
[vmap | python vmap.py] [rnaindex | dnaindex | rnamap | dnamap]
"""
def main(self, *args):
if args:
print self.DESCRIPTION
print
print self.USAGE
print("ERROR: Unknown command %r" % (args[0]))
return 1
if not self.nested_command:
print self.DESCRIPTION
print
print self.USAGE
print("ERROR : No command given")
return 1
class SAMHits:
""" Converts SAM output of mappers into bzipped HIT files. """
def __init__(self, output_file, sample_id, refseq_filter=None, min_mapping_score=None,\
min_alignment_score=None, max_mismatches=None,\
max_relative_mismatches=None, min_continiously_matching=None,\
filter_complexity=False, debug=False):
self.output_file = bz2.BZ2File(output_file, 'wb', buffering=100 * 1024 * 1024)
self.sample_id = sample_id.translate(None, NON_ID)
self.refseq_filter = refseq_filter
self.max_mismatches = max_mismatches
self.max_relative_mismatches = max_relative_mismatches
self.current_group = []
self.min_mapping_score = min_mapping_score
self.min_alignment_score = min_alignment_score
self.min_continiously_matching = min_continiously_matching
self.filter_complexity = filter_complexity
self._record_cache = []
self.stored_records = 0
self.max_records = 10000000
self.debug = debug
self.filtered = [0, 0, 0, 0, 0, 0, 0]
def count(self, parsed_line):
if parsed_line is None:
return
read_key, read_name, flag, ref_name, ref_position, mapping_score,\
cigar, mate_ref_name, mate_ref_position, insert_size, seq, qual,\
is_end1, is_end2, number_mismatches, alignment_score,\
number_hits, is_reverse, is_primary, is_mapped, is_mate_mapped,\
is_paired, number_matches, read_end_pos, max_match = parsed_line
if not is_mapped:
return
if self.min_continiously_matching and self.min_continiously_matching > max_match:
self.filtered[0] += 1
return
if self.max_mismatches\
and int(number_mismatches) > self.max_mismatches:
self.filtered[1] += 1
return
if self.max_relative_mismatches\
and int(number_mismatches) / float(len(seq))\
> self.max_relative_mismatches:
self.filtered[2] += 1
return
if self.min_mapping_score\
and self.min_mapping_score > mapping_score:
self.filtered[3] += 1
return
if self.min_alignment_score\
and self.min_alignment_score > alignment_score:
self.filtered[4] += 1
return
pair_id = ''
if is_end1:
pair_id = '/1'
elif is_end2:
pair_id = '/2'
read_name = self.sample_id + ';' + read_name + pair_id
# Initialize new current group
if len(self.current_group) == 0:
self.current_group = [read_name, seq, []]
# Write old current group to file
if read_name != self.current_group[0]:
self._write_group()
self.current_group = [read_name, seq, []]
try:
refseq_group, family, organism, identifier = ref_name.split(';')[:4]
except ValueError:
logging.error('Warning: read mapped to malformed reference sequence %s, skipping\n' % ref_name)
return
start = int(ref_position) + 1
self.current_group[2].append([refseq_group, family, organism, identifier, str(start), str(read_end_pos)])
def _write_group(self, empty_cache=False):
if self.current_group:
passed_refseq = True
passed_complexity = True
if self.refseq_filter:
passed_refseq = False
for refseq_group, family, organism, identifier, start, end in self.current_group[2]:
if refseq_group in self.refseq_filter:
passed_refseq = True
break
if not passed_refseq:
self.filtered[6] += 1
if passed_refseq and self.filter_complexity:
sequence = self.current_group[1]
avg_compression = float(len(compress(sequence)))/len(sequence)
if avg_compression < 0.5:
passed_complexity = False
self.filtered[5] += 1
if passed_refseq and passed_complexity:
description = []
for identifier in self.current_group[2]:
description.append(';'.join(identifier))
description = '|'.join(description)
sequence = self.current_group[1]
identifier = 'Read;' + self.current_group[0]
self._record_cache.append('>%s %s\n%s\n' % (identifier, description, sequence))
self.stored_records += 1
if empty_cache or self.stored_records > self.max_records:
logging.debug('Writing hit records to file')
self.output_file.writelines(self._record_cache)
self.stored_records = 0
self._record_cache = []
def write(self):
self._write_group(empty_cache=True)
self.output_file.close()
def get_filter_counts(self):
return 'Segments filtered from hit output:\n%10i due to continiously matching,\n%10i due to max mismatches,\n%10i due to max relative mismatches,\n%10i due to min mapping score,\n%10i due to min alignment score,\n%10i due to complexity,\n%10i due to reference groups' % tuple(self.filtered)
class SAMParser:
def __init__(self, fifo_path):
logging.debug('Parsing mapper output from pysam fifo %s' % fifo_path)
self.samfile = pysam.Samfile(fifo_path, "r")
logging.debug('Extracting header from pysam fifo...')
self.header = self.samfile.header
logging.debug('Finished extracting header of length %i' % len(self.header))
# def parse_htseq_lines(self, stream):
# logging.debug('Parsing mapper output from lines')
# for line in iter(stream.readline, ''):
# if line[0] == '@':
# continue
# alignment = HTSeq._HTSeq.SAM_Alignment.from_SAM_line(line)
# yield line, self.parse_htseq(alignment)
# def parse_htseq_reader(self, stream):
# logging.debug('Parsing mapper output from reader')
# iterator = HTSeq.SAM_Reader(stream)
# for alignment in iterator:
# yield alignment.get_sam_line(), self.parse_htseq(alignment)
def parse(self):
getrname = self.samfile.getrname
logging.debug('Parsing SAM file alignments...')
for i, alignment in enumerate(self.samfile):
if i % 100000 == 0:
logging.debug('Processed %i SAM alignments' % i)
read_name = alignment.qname
seq = alignment.seq
qual = alignment.qual
flag = alignment.flag
cigar = None
is_paired = alignment.is_paired
is_mapped = not alignment.is_unmapped
is_mate_mapped = not alignment.mate_is_unmapped
is_reverse = alignment.is_reverse
is_end1 = alignment.is_read1
is_end2 = alignment.is_read2
is_primary = not alignment.is_secondary
read_key = (read_name, is_end1)
ref_name = None
ref_position = None
mapping_score = 0
mate_ref_name = None
mate_ref_position = None
insert_size = None
alignment_score = 0
read_end_pos = None
if is_mate_mapped:
mate_ref_name = getrname(alignment.rnext)
mate_ref_position = alignment.pnext
number_hits = 0
alignment_score = 0
number_mismatches = 0
number_matches = 0
max_match = 0
if is_mapped:
ref_name = getrname(alignment.tid)
ref_position = alignment.pos
read_end_pos = alignment.aend
mapping_score = alignment.mapq
cigar = alignment.cigar
if is_mate_mapped:
insert_size = alignment.tlen
for operation, count in alignment.cigar:
if operation == 0:
number_matches += count
max_match = max(max_match, count)
for tag, value in alignment.tags:
tag = tag.upper()
if tag == 'NH':
number_hits = value
elif tag == 'AS':
alignment_score = value
elif tag == 'NM':
number_mismatches = value
parsed = read_key, read_name, flag, ref_name, ref_position, mapping_score,\
cigar, mate_ref_name, mate_ref_position, insert_size, seq, qual,\
is_end1, is_end2, number_mismatches, alignment_score,\
number_hits, is_reverse, is_primary, is_mapped, is_mate_mapped,\
is_paired, number_matches, read_end_pos, max_match
yield alignment, parsed
# def parse_htseq(self, alignment):
# read_name = alignment.read.name
# seq = alignment.read.seq
# qual = alignment.read.qual
# flag = alignment.flag
# cigar = None
# is_paired = (flag & 1)
# is_mapped = not (flag & 4)
# is_mate_mapped = alignment.mate_aligned is not None #not (flag & 8)
# is_reverse = (flag & 16)
# is_end1 = (flag & 64)
# is_end2 = (flag & 128)
# is_primary = not (flag & 256)
# read_key = (read_name, is_end1)
# ref_name = None
# ref_position = None
# mapping_score = 0
# mate_ref_name = None
# mate_ref_position = None
# insert_size = None
# alignment_score = 0
# read_end_pos = None
# if is_mate_mapped and alignment.mate_start:
# mate_ref_name = alignment.mate_start.chrom
# mate_ref_position = alignment.mate_start.start
# number_hits = 0
# alignment_score = 0
# number_mismatches = 0
# number_matches = 0
# max_match = 0
# if is_mapped:
# ref_name = alignment.iv.chrom
# ref_position = alignment.iv.start
# read_end_pos = alignment.iv.end
# mapping_score = alignment.aQual
# cigar = alignment.cigar
# if is_mate_mapped:
# insert_size = alignment.inferred_insert_size
# for c in cigar:
# if c.type == 'M':
# number_matches += c.size
# max_match = max(max_match, c.size)
# for tag, value in alignment.optional_fields:
# if tag == 'NM':
# number_hits = value
# elif tag == 'AS':
# alignment_score = value
# elif tag == 'NH':
# number_mismatches = value
# return read_key, read_name, flag, ref_name, ref_position, mapping_score,\
# cigar, mate_ref_name, mate_ref_position, insert_size, seq, qual,\
# is_end1, is_end2, number_mismatches, alignment_score,\
# number_hits, is_reverse, is_primary, is_mapped, is_mate_mapped,\
# is_paired, number_matches, read_end_pos, max_match
class SAMQuality:
def __init__(self, file_path):
self.file_path = file_path
self.stored = defaultdict(Counter)
self.all_references = defaultdict(int)
self.primary_references = defaultdict(int)
self.complement = string.maketrans('ATCGN', 'TAGCN')
if KHMER_AVAILABLE:
self.ktable = khmer.new_ktable(10)
def _get_complement(self, sequence):
return sequence.translate(self.complement)[::-1]
def _get_summary(self, counter):
""""Returns five numbers (sum, extrema, mean, and std)
for a max_frequency counter """
maximum = 0
minimum = sys.maxint
thesum = 0
allcount = 0
mode = [0, None]
items = 0.0
mean = 0.0
m2 = 0.0
variance = 0.0
for item in counter:
count = counter[item]
if count > mode[0]:
mode = [count, item]
allcount += count
maximum = max(maximum, item)
minimum = min(minimum, item)
thesum += (count * item)
x = 1
while x <= count:
items += 1
delta = item - mean
mean = mean + delta / items
m2 = m2 + delta * (item - mean)
variance = m2 / items
x += 1
std = math.sqrt(variance)
return allcount, thesum, minimum, maximum, mode[1], mean, std
def _to_unit(self, item, is_percentage=False):
""" Convert a numeric to a string with metric units """
if is_percentage:
return ('%-.3f' % (item * 100)) + '%'
converted = None
try:
item = float(item)
if item > 10**12:
converted = str(round(item / 10**9,3))+'P'
elif item > 10**9:
converted = str(round(item / 10**9,3))+'G'
elif item > 10**6:
converted = str(round(item / 10**6,3))+'M'
elif item > 10**3:
converted = str(round(item / 10**3,3))+'K'
else:
converted = str(round(item,3))
except:
converted = str(item)
return converted
def _str_metrics(self, data):
str_metrics = []
for (item, metric) in sorted(data.keys()):
counter = data[(item, metric)]
if not hasattr(counter.iterkeys().next(), 'real'):
for element, count in counter.most_common():
str_metrics.append(self._str_metric(item, metric, element, count, no_units=True))
else:
summary = self._get_summary(counter)
str_metrics.append(self._str_metric(item, metric, *summary))
return str_metrics
def _str_metric(self, item, metric, count, thesum='', minimum='',\
maximum='', mode='', mean='', std='', no_units=False):
counters = [count, thesum, minimum, maximum, mode, mean, std]
counters = map(str, counters)
if no_units:
items = [item, metric] + counters
else:
units = map(self._to_unit, counters)
items = [item, metric] + units
return '%-15s\t%-60s\t%12s\t%12s\t%12s\t%12s\t%12s\t%12s\t%12s\n' \
% tuple(items)
def _count_read(self, metric, data, sample):
item = 'read'
(insert_size, alignment_score, mapping_score, length,\
q20_length, avg_phred_quality, number_hits, is_reverse) = data
self.stored[(item, metric + ' mappings')][number_hits] += sample
self.stored[(item, metric + ' insert')][insert_size] += sample
def _count_segment(self, metric, data, sample):
item = 'segment'
(insert_size, alignment_score, mapping_score, length,\
q20_length, avg_phred_quality, number_hits, is_reverse) = data
self.stored[(item, metric + ' algq')][alignment_score] += sample
self.stored[(item, metric + ' mapq')][mapping_score] += sample
self.stored[(item, metric + ' length')][length] += sample
self.stored[(item, metric + ' q20length')][q20_length] += sample
self.stored[(item, metric + ' meanbasequal')][avg_phred_quality] += sample
self.stored[(item, metric + ' reverse')][is_reverse] += sample
def count(self, parsed_line):
if parsed_line is None:
return
#print_metric('Item' , 'Metric', 'Count', 'Sum', 'Min', 'Max', 'Mode', 'Mean', 'STD')
read_key, read_name, flag, ref_name, ref_position, mapping_score,\
cigar, mate_ref_name, mate_ref_position, insert_size, seq, qual,\
is_end1, is_end2, number_mismatches, alignment_score,\
number_hits, is_reverse, is_primary, is_mapped, is_mate_mapped,\
is_paired, number_matches, read_end_pos, max_match = parsed_line
qual = numpy.array(qual)
phred_quality = qual - 33
avg_phred_quality = numpy.mean(phred_quality)
length = len(seq)
mate_reference_id = mate_ref_name
reference_id = ref_name
reference = reference_id is not None and reference_id != '*'
insert_size = insert_size and abs(insert_size) or insert_size
is_segment1 = not is_paired or (is_paired and is_end1)
is_reverse = is_reverse
is_unique = is_primary and number_hits == 1
is_translocation = is_paired and is_mapped and is_mate_mapped\
and (mate_reference_id != '=' and reference_id != mate_reference_id)
is_part_of_doublemap = is_paired and is_mapped and is_mate_mapped
is_part_of_halfmap = is_paired and (is_mapped != is_mate_mapped)
is_part_of_nomap = is_paired and not is_mapped and not is_mate_mapped
# Count length until first low quality base call
q20_length = 0
for q in phred_quality:
if q < 20:
break
q20_length += 1
# Count kmers
if KHMER_AVAILABLE:
if not is_reverse:
self.ktable.consume(seq)
else:
self.ktable.consume(self._get_complement(seq))
if reference:
self.all_references[reference_id] += 1
if is_primary:
self.primary_references[reference_id] += 1
data = (insert_size, alignment_score, mapping_score, length,\
q20_length, avg_phred_quality, number_hits, is_reverse)
sample = 1
self._count_segment('sequenced', data, sample)
if is_mapped:
self._count_segment('sequenced mapped multi', data, sample)
if is_primary:
self._count_segment('sequenced mapped primary', data, sample)
if number_hits and is_unique:
self._count_segment('sequenced mapped primary unique', data, sample)
if is_segment1:
self._count_read('sequenced mapped multi', data, sample)
if is_primary:
self._count_read('sequenced mapped primary', data, sample)
if is_paired:
self._count_segment('sequenced paired', data, sample)
if is_part_of_doublemap:
self._count_segment('sequenced paired doublemap', data, sample)
if is_primary:
self._count_segment('sequenced paired doublemap primary', data, sample)
if is_segment1:
self._count_read('sequenced paired doublemap multi', data, sample)
if is_primary:
self._count_read('sequenced paired doublemap primary', data, sample)
if number_hits and is_unique:
self._count_read('sequenced paired doublemap primary unique', data, sample)
if is_translocation:
self._count_read('sequenced paired doublemap primary unique translocation', data, sample)
elif is_part_of_halfmap:
self._count_segment('sequenced paired halfmap', data, sample)
# The mapped segment
if is_mapped:
self._count_segment('sequenced paired halfmap mapped', data, sample)
if is_primary:
self._count_read('sequenced paired halfmap mapped primary', data, sample)
if number_hits and is_unique:
self._count_read('sequenced paired halfmap mapped primary unique', data, sample)
elif not is_primary:
self._count_read('sequenced unpaired mapped multi', data, sample)
# The unmapped segment
elif not is_mapped:
self._count_segment('sequenced paired halfmap unmapped', data, sample)
elif is_part_of_nomap:
self._count_segment('sequenced paired nomap', data, sample)
if is_segment1:
self._count_read('sequenced paired nomap', data, sample)
elif not is_paired:
self._count_segment('sequenced unpaired', data, sample)
if is_mapped:
self._count_segment('sequenced unpaired mapped', data, sample)
if is_primary:
self._count_read('sequenced unpaired mapped primary', data, sample)
if number_hits and is_unique:
self._count_read('sequenced paired unpaired mapped primary unique', data, sample)
elif not is_primary:
self._count_read('sequenced unpaired mapped multi', data, sample)
elif not is_mapped:
self._count_segment('sequenced unpaired unmapped', data, sample)
if is_segment1:
self._count_read('sequenced unpaired unmapped', data, sample)
def write(self):
with open(self.file_path, 'w') as output_file:
all_references = sorted([(count, reference) for reference, count\
in self.all_references.iteritems()], reverse=True)
for j, (count, reference) in enumerate(all_references[:30]):
self.stored[('segment', 'multireference_' + str(j+1))][reference] = count
primary_references = sorted([(count, reference) for reference, count\
in self.primary_references.iteritems()], reverse=True)
for j, (count, reference) in enumerate(primary_references[:30]):
self.stored[('segment', 'primaryreference_' + str(j+1))][reference] = count
# Extract top-ranking kmers
if KHMER_AVAILABLE:
kmer_frequencies = []
for i in range(0, self.ktable.n_entries()):
n = self.ktable.get(i)
if n > 0:
kmer_frequencies.append((n, self.ktable.reverse_hash(i)))
kmer_frequencies = sorted(kmer_frequencies, reverse=True)
for j, (frequency, kmer) in enumerate(kmer_frequencies[:10]):
self.stored[('segment', 'kmer_' + str(j+1))][kmer] = frequency
output_file.writelines(self._str_metrics(self.stored))
class SAMTaxonomy:
""" Provides taxonomic summary information from a SAM file stream. """
def __init__(self, file_path, sample_id):
self.file_path = file_path
self.sample_id = sample_id
self.count_primaries = Counter()
self.detailed_information = {}
self._last_read = (None, None)
self._last_read_human_prim = 0
self._last_read_human_sec = 0
self._last_organisms = set()
self.format = '%40s\t%15s\t%20s\t%-40s\t' + '%6s\t' * 24 + '\n'
self.header = ('Sample','Group', 'Family', 'Organism',\
'AlP', 'AlS', 'AlHP', 'AlHS',\
'MaBP', 'MaBS', 'MaHP', 'MaHS',\
'GIs', 'MapP', 'MapS', 'AlgP', 'AlgS', \
'MisP', 'MisS', 'MatP', 'MatS',\
'MaxP', 'MaxS', 'AlLP', 'AlLS',\
'ReLP', 'ReLS',\
'Span')
def count(self, parsed_line):
if parsed_line is None:
return
read_key, read_name, flag, ref_name, ref_position, mapping_score,\
cigar, mate_ref_name, mate_ref_position, insert_size, seq, qual,\
is_end1, is_end2, number_mismatches, alignment_score,\
number_hits, is_reverse, is_primary, is_mapped, is_mate_mapped,\
is_paired, number_matches, read_end_pos, max_match = parsed_line
if not is_mapped:
return
# Determine mapping information
refseq_group, family, organism, gi = ref_name.split(';')[:4]
# Determine mapping information of mate
both_mates_same_ref = 0
mate_human_ref = 0
if is_paired and is_mate_mapped and ref_name == mate_ref_name:
both_mates_same_ref = 1
if is_paired and is_mate_mapped and 'Homo_sapiens' in mate_ref_name:
mate_human_ref = 1
# Count read as a primary hit
if is_primary:
self.count_primaries[organism] += 1
if organism not in self.detailed_information:
initial = [refseq_group,
family,
set(), # 02 GIs
[[0, 0], [0, 0]], # 03 Mapping score
[[0, 0], [0, 0]], # 04 Alignment score
[[0, 0], [0, 0]], # 05 Mismatches
[[0, 0], [0, 0]], # 06 Overall length of matched region
[[0, 0], [0, 0]], # 07 Longest continiously matched region
[[0, 0], [0, 0]], # 08 Length of aligned part of read
[[0, 0], [0, 0]], # 09 (Potentially hard skipped) read length
[[0, 0], [0, 0]], # 10 Quality
[0, 0], # 11 Nr. alignments to organism
[0, 0], # 12 Nr. alignments also to human
[0, 0], # 13 nr. both mates to same ref
[0, 0], # 14 nr. other mate to human
[0, 0]] # 15 Reference positions (max, min)
self.detailed_information[organism] = initial
entry = self.detailed_information[organism]
entry[2].add(gi)
if is_primary:
index = 0
else:
index = 1
entry[3][index][0] += mapping_score
entry[3][index][1] += 1
entry[4][index][0] += alignment_score
entry[4][index][1] += 1
entry[5][index][0] += number_mismatches
entry[5][index][1] += 1
entry[6][index][0] += number_matches
entry[6][index][1] += 1
entry[7][index][0] += max_match
entry[7][index][1] += 1
entry[8][index][0] += (read_end_pos - ref_position)
entry[8][index][1] += 1
entry[9][index][0] += len(seq)
entry[9][index][1] += 1
entry[11][index] += 1
entry[13][index] += both_mates_same_ref
entry[14][index] += mate_human_ref
if organism != 'Homo_sapiens':
if entry[15][0] == 0:
entry[15][0] = ref_position
if ref_position < entry[15][0]:
entry[15][0] = ref_position
if ref_position > entry[15][1]:
entry[15][1] = ref_position
# Cache information that is agreggated across all alignments of a read
if self._last_read == (None, None):
self._last_read = read_key
if self._last_read != read_key:
for last_organism in self._last_organisms:
self.detailed_information[last_organism][12][0]\
+= self._last_read_human_prim
self.detailed_information[last_organism][12][1]\
+= self._last_read_human_sec
self._last_read = read_key
self._last_organisms = set()
self._last_read_human_prim = 0
self._last_read_human_sec = 0
self._last_organisms.add(organism)
if organism == 'Homo_sapiens':
if is_primary:
self._last_read_human_prim += 1
else:
self._last_read_human_sec += 1
def to_unit(self, count):
if count > 10**9:
count = str(round(count / float(10**9), 1)) + 'G'
elif count > 10**6:
count = str(round(count / float(10**6), 1)) + 'M'
elif count > 10**3:
count = str(round(count / float(10**3), 1)) + 'k'
else:
count = str(int(round(count, 0)))
return count
def to_avg(self, item):
primary = self.to_unit(item[0][0] / float(item[0][1] + 1))
secondary = self.to_unit(item[1][0] / float(item[1][1] + 1))
return primary, secondary
def get_summary(self, top=None):
lines = []
lines.append(self.format % self.header)
if top:
top_organisms = self.count_primaries.most_common(top)
else:
top_organisms = self.count_primaries.most_common()
for organism, count in top_organisms:
entry = self.detailed_information[organism]
nr_gis = self.to_unit(len(entry[2]))
map_score = self.to_avg(entry[3])
alg_score = self.to_avg(entry[4])
mismatch = self.to_avg(entry[5])
allmatch = self.to_avg(entry[6])
maxmatch = self.to_avg(entry[7])
alglength = self.to_avg(entry[8])
readlength = self.to_avg(entry[9])
algs = self.to_unit(entry[11][0]), self.to_unit(entry[11][1])
algs_hum = self.to_unit(entry[12][0]), self.to_unit(entry[12][1])
both_mates = self.to_unit(entry[13][0]), self.to_unit(entry[13][1])
hum_mates = self.to_unit(entry[14][0]), self.to_unit(entry[14][1])
ref_span = self.to_unit(entry[15][1] - entry[15][0])
data = self.sample_id, entry[0][:20], entry[1][:20], organism[:40],\
algs[0], algs[1], \
algs_hum[0], algs_hum[1], both_mates[0], both_mates[1], \
hum_mates[0], hum_mates[1], \
nr_gis, map_score[0], map_score[1], alg_score[0], alg_score[1], \
mismatch[0], mismatch[1], allmatch[0], allmatch[1], \
maxmatch[0], maxmatch[1], alglength[0], alglength[1], \
readlength[0], readlength[1], \
ref_span
lines.append(self.format % data)
return lines
def write(self):
with open(self.file_path, 'w') as output_file:
output_file.writelines(self.get_summary())
class Index(cli.Application):
def setup_logging(self):
if self.debug:
logging.getLogger().setLevel(logging.DEBUG)
return self.debug
def validate_paths(self):
# Validate path to binary
if not which(self.path):
sys.stderr.write(
'Indexer %s is not existing or not executable' % self.path)
sys.exit(1)
# Check if genome directory is existing
if not os.path.exists(self.reference_file):
sys.stderr.write(
'Reference file %s nor existing, exiting' % self.reference_file)
sys.exit(1)
# Check if output directory is existing
if not os.path.exists(self.index_dir):
logging.debug(
'Making output directory for index at %s' % self.index_dir)
os.makedirs(self.index_dir)
def get_command_line(self, temp_path):
# # Make named pipe to extract genomes
# pipe_path = os.path.abspath(os.path.join(self.genome_dir, 'pipe.fa'))
# if os.path.exists(pipe_path):
# os.unlink(pipe_path)
# os.mkfifo(pipe_path)
pass
def run_index_process(self):
# Run index generation process
command_line = self.get_command_line()
process = subprocess.Popen(' '.join(command_line), shell=True, stdout=PIPE, stderr=PIPE)
# Block until streams are closed by the process
stdout, stderr = process.communicate()
if stderr:
sys.stderr.write(stderr)
if self.debug and stdout:
print stdout
def main(self, *args):
self.setup_logging()
self.validate_paths()
self.run_index_process()
@CLI.subcommand("rnaindex")
class RNAIndex(Index):
""" Creates a STAR index from a FASTA genome reference """
reference_file = cli.SwitchAttr(
['-r', '--reference_file'], str, mandatory=True,
help="Sets the reference genome FASTA file.")
index_dir = cli.SwitchAttr(['-i', '--index_dir'], str, mandatory=True,
help="Sets the index output directory." +
" Directory will be generated if not existing." +
" Directory will be filled with several index files.")
threads = cli.SwitchAttr(
['-t', '--threads'], cli.Range(1, 512), mandatory=False,
help="Sets the number of threads to use",
default=1)
max_ram = cli.SwitchAttr(
['-m'], cli.Range(1, 400000000000), mandatory=False,
help="Sets the maximum amount of memory (RAM) to use (in bytes)",
default=400000000000)
path = cli.SwitchAttr(['-p', '--path'], str, mandatory=False,
help="Path to STAR executable",
default='STAR')
sparse = cli.Flag(
["-s", "--sparse"], help="If given, a sparse index that requires less " +
" RAM in the mapping phase will be constructed")
debug = cli.Flag(["-d", "--debug"], help="Enable debug output")
def get_command_line(self):
# Make star command line
cline = [self.path] + ['--runMode', 'genomeGenerate',
'--genomeDir', self.index_dir,
'--limitGenomeGenerateRAM', str(self.max_ram),
'--runThreadN', str(self.threads),
'--genomeFastaFiles'] + [self.reference_file]
# Add parameters for sparse (memory-saving) index generation
if self.sparse:
cline += ['--genomeSAsparseD', '2',
'--genomeChrBinNbits', '12',
'--genomeSAindexNbases', '13']
else:
cline += ['--genomeSAsparseD', '1',
'--genomeChrBinNbits', '18',
'--genomeSAindexNbases', '15']
if self.debug:
print ' '.join(cline)
return cline
@CLI.subcommand("dnaindex")
class DNAIndex(Index):
""" Creates a BWA index from a FASTA reference file """
reference_file = cli.SwitchAttr(['-r', '--reference_file'], str, mandatory=True,
help="Sets the input reference FASTA file.")
index_dir = cli.SwitchAttr(['-i', '--index_dir'], str, mandatory=True,
help="Sets the index output directory." +
" Directory will be generated if not existing." +
" Directory will be filled with several index files.")
path = cli.SwitchAttr(['-p', '--path'], str, mandatory=False,
help="Path to BWA executable",
default='bwa')
debug = cli.Flag(["-d", "--debug"], help="Enable debug output")
def get_command_line(self):
# Make star command line
cline = [self.path] + ['index', '-a', 'bwtsw', '-p', os.path.join(self.index_dir, 'index'), self.reference_file]
return cline
@CLI.subcommand("varindex")
class VARIndex(Index):
""" Creates a SMALT index from a FASTA reference file """
reference_file = cli.SwitchAttr(['-r', '--reference_file'], str, mandatory=True,
help="Sets the input reference FASTA file.")
index_dir = cli.SwitchAttr(['-i', '--index_dir'], str, mandatory=True,
help="Sets the index output directory." +
" Directory will be generated if not existing." +
" Directory will be filled with several index files.")
path = cli.SwitchAttr(['-p', '--path'], str, mandatory=False,
help="Path to SMALT executable",
default='smalt_x86_64')
debug = cli.Flag(["-d", "--debug"], help="Enable debug output")
def get_command_line(self):
# Make star command line
cline = [self.path] + ['index', '-k', '20', '-s', '2', os.path.join(self.index_dir, 'index'), self.reference_file]
return cline
class Mapper(cli.Application):
def setup_logging(self):
if self.debug:
logging.getLogger().setLevel(logging.DEBUG)
return self.debug
def validate_paths(self):
# Validate path to binary
if not which(self.mapper_path):
sys.stderr.write(
'Mapper %s is not existing or not executable' % self.mapper_path)
sys.exit(1)
#mapper_path = self.mapper_path and self.mapper_path or mapper_executable
#samtools_path = self.samtools_path and self.samtools_path or 'samtools'
# Check if genome directory is existing
if not os.path.exists(self.index_dir):
message = 'Index directory %s not existing, exiting' % self.index_dir
sys.stderr.write(message)
sys.exit(1)
# Check for number of read input files
if len(self.reads) not in (1, 2):
message = 'Invalid number of FASTQ files; supply either one (single end) or two (paired end)\n'
sys.stderr.write(message)
sys.exit(1)
# Make temporary directories
if self.temp_path:
temp_path = tempfile.mkdtemp(dir=self.temp_path)
else:
temp_path = tempfile.mkdtemp()
# Try if we can make the relevant output files
outputs = ['unmapped1', 'unmapped2', 'taxonomy', 'qual', 'hits', 'sam', 'bam', 'chimeric_mappings']
for output in outputs:
try:
attribute = getattr(self, output)
except AttributeError:
continue
if attribute is None or attribute == '':
continue
try:
with file(attribute, 'a'):
os.utime(attribute, None)
except IOError:
sys.stderr.write('Could not write output file %s\n' % attribute)
sys.exit(1)
return temp_path
def get_command_line(self, temp_path):
pass
def run_mapper_process(self, temp_path, to_fifo=False):
command_line = ' '.join(self.get_command_line(temp_path))
if self.debug:
logging.debug('Running mapper command line: %s' % command_line)
if to_fifo:
logging.debug('Preparing fifo')
fifo_path = os.path.join(temp_path, "namedpipe")
os.mkfifo(fifo_path)
logging.debug('Opening fifo for writing')
# target = os.open(fifo_path, os.O_WRONLY | os.O_NONBLOCK | os.O_CREAT)
target = open(fifo_path, 'w+')
logging.debug('Opened fifo %s' % fifo_path)
else:
fifo_path = None
target = PIPE
logging.debug('Starting mapper process...')
process = subprocess.Popen(command_line, shell=True, stdout=target, stderr=sys.stderr)
logging.debug('Executed mapper with: %s' % command_line)
return process, fifo_path
def setup_outputs(self, fifo_path):
logging.debug('Opening input SAM parser and fifo')
parser = SAMParser(fifo_path)
taxonomy = None
if self.taxonomy:
if os.path.dirname(self.taxonomy) and not os.path.exists(os.path.dirname(self.taxonomy)):
logging.debug('Making directories for output file %s' % self.taxonomy)
os.makedirs(os.path.dirname(self.taxonomy))
logging.debug('Outputting taxonomy to %s', self.taxonomy)
taxonomy = SAMTaxonomy(self.taxonomy, self.sample_id)
quality = None
if self.qual:
if os.path.dirname(self.qual) and not os.path.exists(os.path.dirname(self.qual)):
logging.debug('Making directories for output file %s' % self.qual)
os.makedirs(os.path.dirname(self.qual))
quality = SAMQuality(self.qual)
hits = None
if self.hits:
if os.path.dirname(self.hits) and not os.path.exists(os.path.dirname(self.hits)):
logging.debug('Making directories for output file %s' % self.hits)
os.makedirs(os.path.dirname(self.hits))
logging.debug('Outputting virana hits to %s', self.hits)
hits = SAMHits(self.hits, self.sample_id, set(self.hit_filter),
self.min_mapping_score,
self.min_alignment_score,
self.max_mismatches,
self.max_relative_mismatches,
self.min_continiously_matching,
self.filter_complexity, self.debug)
sam_file = None
if self.sam:
if os.path.dirname(self.sam) and not os.path.exists(os.path.dirname(self.sam)):
logging.debug('Making directories for output file %s' % self.sam)
os.makedirs(os.path.dirname(self.sam))
logging.debug('Outputting SAM file to %s', self.sam)
sam_file = pysam.Samfile(self.sam, 'w', header=parser.header)
bam_file = None
if self.bam:
if os.path.dirname(self.bam) and not os.path.exists(os.path.dirname(self.bam)):
logging.debug('Making directories for output file %s' % self.bam)
os.makedirs(os.path.dirname(self.bam))
logging.debug('Outputting BAM file to %s', self.bam)
bam_file = pysam.Samfile(self.bam, 'wb', header=parser.header)
return parser, taxonomy, quality, hits, sam_file, bam_file
def post_process(self, bam_file, sam_file, hits, taxonomy, quality, temp_path, fifo_path):
logging.info('Mapping completed, writing outputs')
if bam_file:
bam_file.close()
if sam_file:
sam_file.close()
if hits:
hits.write()
if taxonomy:
taxonomy.write()
if quality:
quality.write()
if fifo_path:
os.unlink(fifo_path)
shutil.rmtree(temp_path)
def main(self, *args):
debug = self.setup_logging()
temp_path = self.validate_paths()
logging.debug('Running mapper')
mapper_process, fifo_path = self.run_mapper_process(temp_path, to_fifo=True)
logging.debug('Setting outputs')
parser, taxonomy, quality, hits, sam_file, bam_file = self.setup_outputs(fifo_path)
last_time = None
start_time = None
alignments_all = 0
alignments_last = 0
logging.debug('Starting to parse with pysam...')
for alignment, parsed_line in parser.parse():
if debug:
alignments_all += 1
alignments_last += 1
now = time.time()
if not start_time:
start_time = now
last_time = now
time_diff = (now - last_time)
if time_diff > 60:
overal_time_diff = (now - start_time) / 3600.0
last_time_diff = (now - last_time) / 3600.0
logging.debug('Runtime %5.1fh\nAnalyzed %5iM alignments;\nanalyzing at average rates of\n%5iM alignments/h (based on overall throughput)\n%5iM alignments/h (based on last minute)' % (overal_time_diff, alignments_all / 10**6, (alignments_all / overal_time_diff) / 10**6, (alignments_last / last_time_diff) / 10**6))
if hits:
logging.debug(hits.get_filter_counts())
if taxonomy:
logging.debug(''.join(taxonomy.get_summary(10)))
last_time = now
alignments_last = 0
if sam_file:
sam_file.write(alignment)
if bam_file:
bam_file.write(alignment)
if taxonomy:
taxonomy.count(parsed_line)
if quality:
quality.count(parsed_line)
if hits:
hits.count(parsed_line)
self.post_process(bam_file, sam_file, hits, taxonomy, quality, temp_path, fifo_path)
@CLI.subcommand("rnamap")
class RNAmap(Mapper):
""" Map input reads against a STAR index """
index_dir = cli.SwitchAttr(['-i', '--index_dir'], str, mandatory=True,
help="Sets the index output directory")
threads = cli.SwitchAttr(
['-t', '--threads'], cli.Range(1, 512), mandatory=False,
help="Sets the number of threads to use",
default=1)
taxonomy = cli.SwitchAttr(
['-x', '--taxonomy'], str, mandatory=False,
help="Output path for the taxonomy file; setting this option will also enable regular taxonomy output to stdout during mapping",
default='')
mapper_path = cli.SwitchAttr(['--star_path'], str, mandatory=False,
help="Path to STAR executable",
default='STAR')
samtools_path = cli.SwitchAttr(['--samtools_path'], str, mandatory=False,
help="Path to samtools executable",
default='')
temp_path = cli.SwitchAttr(['--temporary_path'], str, mandatory=False,
help="Path to temporary directory in which to generate temp files. All temp files with be automatically deleted after execution is complete.",
default='')
min_mapping_score = cli.SwitchAttr(['--min_mapping_score'], cli.Range(1, 255), mandatory=False,
help="Mimimum mapping score for saved hits (only applied to -v/--virana_hits)",
default=None)
min_alignment_score = cli.SwitchAttr(['--min_alignment_score'], cli.Range(1, 255), mandatory=False,
help="Mimimum alignment score for saved hits (only applied to -v/--virana_hits)",
default=None)
max_mismatches = cli.SwitchAttr(['--max_mismatches'], cli.Range(0, 10000000), mandatory=False,
help="Maximum number of mismatches for saved hits (only applied to -v/--virana_hits)",
default=None)
max_relative_mismatches = cli.SwitchAttr(['--max_relative_mismatches'], float, mandatory=False,
help="Maximum number of mismatches relative to read length for saved hits (only applied to -v/--virana_hits)",
default=None)
min_continiously_matching = cli.SwitchAttr(['--min_continiously_matching'], cli.Range(0, 10000000), mandatory=False,
help="Minimum number of continious matches for saved hits (only applied to -v/--virana_hits)",
default=None)
filter_complexity = cli.Flag(['--filter_complexity'],
help="Discard low-complexity reads (only applied to -v/--virana_hits). Adds some extra processing load to the mapping and may discard important information. Applies to all output files, including quality files (!)",
default=False)
bam = cli.SwitchAttr(['-b', '--bam'], str, mandatory=False,
help="Path to unsorted, unindexed output BAM file",
default='')
sam = cli.SwitchAttr(['-s', '--sam'], str, mandatory=False,
help="Path to output SAM file",
default='')
qual = cli.SwitchAttr(['-q', '--qual'], str, mandatory=False,
help="Path to output quality file",
default='')
hits = cli.SwitchAttr(['-v', '--virana_hits'], str, mandatory=False,
help="Path to bzip2-compressed tab-delimited output hit file",
default='')
sample_id = cli.SwitchAttr(['--sample_id'], str, mandatory=False,
help="Alphanumeric string ([0-9a-zA-Z_-]*) used to designate sample information within the hit and taxonomy files",
default='no_sample_id')
unmapped1 = cli.SwitchAttr(['--unmapped_end_1'], str, mandatory=False,
help="Output path to uncompressed fastq file containing unmapped reads, first ends only for paired ends.",
default='')
unmapped2 = cli.SwitchAttr(['--unmapped_end_2'], str, mandatory=False,
help="Output path to uncompressed fastq file containing unmapped reads, second ends only for paired ends.",
default='')
splice_junctions = cli.SwitchAttr(['--splice_junctions'], str, mandatory=False,
help="Input path to splice junction file (currently not implemented)",
default='')
chimeric_mappings = cli.SwitchAttr(['--chimeric_mappings'], str, mandatory=False,
help="Ouput path to SAM file containing chimeric mappings",
default='')
hit_filter = cli.SwitchAttr(
['-f', '--virana_hit_filter'], str, list=True, mandatory=False,
help="Only generate hit groups that include at last one read mapping to a reference of this reference group.",
default=[])
debug = cli.Flag(["-d", "--debug"], help="Enable debug information")
reads = cli.SwitchAttr(
['-r', '--reads'], str, list=True, mandatory=True,
help="Sets the input reads. Add this parameter twice for paired end reads.")
zipped = cli.Flag(["-z", "--zipped"], help="Input reads are zipped (e.g., using gzip")
bzipped = cli.Flag(["--bzipped"], help="Input reads are bzipped (e.g., using bzip2")
sensitive = cli.Flag(
["--sensitive"], help="If given, mapping will process slower and more sensitive")
def get_command_line(self, temp_path):
first_ends = []
second_ends = []
single_ends = []
if len(self.reads) == 2:
first, second = self.reads
first_ends.append(first)
second_ends.append(second)
elif len(self.reads) == 1:
single_ends.append(self.reads[0])
if single_ends and not first_ends and not second_ends:
reads = [','.join(single_ends)]
elif first_ends and second_ends:
reads = [','.join(first_ends), ','.join(second_ends)]
# Option: Use AllBestScore instead of OneBestScore
command_line = [self.mapper_path] + ['--runMode', 'alignReads',
'--genomeDir', self.index_dir,
'--runThreadN', str(self.threads),
'--readMatesLengthsIn', 'NotEqual',
'--outFileNamePrefix', os.path.join(
temp_path, 'out'),
'--outSAMmode', 'Full',
'--outSAMstrandField', 'None',
'--outSAMattributes', 'Standard',
'--outSAMunmapped', 'Within',
'--outStd', 'SAM',
'--outFilterMultimapNmax', '1000',
'--outSAMprimaryFlag', 'OneBestScore',
'--limitOutSAMoneReadBytes', '1000000',
'--limitOutSJcollapsed', '2000000',
'--genomeLoad', 'NoSharedMemory'] # LoadAndRemove
if self.unmapped1 or self.unmapped2:
command_line += ['--outReadsUnmapped', 'Fastx']
else:
command_line += ['--outReadsUnmapped', 'None']
if self.zipped:
command_line += ['--readFilesCommand', 'zcat']
elif self.bzipped:
command_line += ['--readFilesCommand', 'bzip2 -cd']
if self.sensitive:
command_line += ['--outFilterMultimapScoreRange', '10',
'--outFilterMismatchNmax', '60',
'--outFilterMismatchNoverLmax', '0.3',
'--outFilterScoreMin', '0',
'--outFilterScoreMinOverLread', '0.3',
'--outFilterMatchNmin', '0',
'--outFilterMatchNminOverLread', '0.66',
'--seedSearchStartLmax', '12',
'--winAnchorMultimapNmax', '50']
if self.splice_junctions:
command_line += ['--sjdbFileChrStartEnd', self.splice_junctions, '--sjdbOverhang', '75']
command_line += ['--readFilesIn'] + reads
return command_line
def post_process(self, bam_file, sam_file, hits, taxonomy, quality, temp_path, fifo_path):
try:
if self.unmapped1:
shutil.move(os.path.join(temp_path, 'out' + 'Unmapped.out.mate1'),\
self.unmapped1)
except IOError:
pass
try:
if self.unmapped2:
shutil.move(os.path.join(temp_path, 'out' + 'Unmapped.out.mate2'),\
self.unmapped2)
except IOError:
pass
try:
if self.chimeric_mappings:
shutil.move(os.path.join(temp_path, 'out' + 'Chimeric.out.sam'),\
self.chimeric_mappings)
except IOError:
pass
super(RNAmap, self).post_process(bam_file, sam_file, hits, taxonomy, quality, temp_path, fifo_path)
@CLI.subcommand("dnamap")
class DNAmap(Mapper):
""" Map input reads with BWA-MEM against a BWA index """
index_dir = cli.SwitchAttr(['-i', '--index_dir'], str, mandatory=True,
help="Sets the index output directory")
threads = cli.SwitchAttr(
['-t', '--threads'], cli.Range(1, 512), mandatory=False,
help="Sets the number of threads to use",
default=1)
taxonomy = cli.SwitchAttr(
['-x', '--taxonomy'], str, mandatory=False,
help="Output path for the taxonomy file; setting this option will also enable regular taxonomy output to stdout during mapping",
default='')
samtools_path = cli.SwitchAttr(['--samtools_path'], str, mandatory=False,
help="Path to samtools executable",
default='samtools')
temp_path = cli.SwitchAttr(['--temporary_path'], str, mandatory=False,
help="Path to temporary directory in which to generate temp files. All temp files with be automatically deleted after execution is complete.",
default='')
min_mapping_score = cli.SwitchAttr(['--min_mapping_score'], cli.Range(1, 255), mandatory=False,
help="Mimimum mapping score for saved hits (only applied to -v/--virana_hits)",
default=None)
min_alignment_score = cli.SwitchAttr(['--min_alignment_score'], cli.Range(1, 255), mandatory=False,
help="Mimimum alignment score for saved hits (only applied to -v/--virana_hits)",
default=None)
max_mismatches = cli.SwitchAttr(['--max_mismatches'], cli.Range(0, 10000000), mandatory=False,
help="Maximum number of mismatches for saved hits (only applied to -v/--virana_hits)",
default=None)
max_relative_mismatches = cli.SwitchAttr(['--max_relative_mismatches'], float, mandatory=False,
help="Maximum number of mismatches relative to read length for saved hits (only applied to -v/--virana_hits)",
default=None)
min_continiously_matching = cli.SwitchAttr(['--min_continiously_matching'], cli.Range(0, 10000000), mandatory=False,
help="Minimum number of continious matches for saved hits (only applied to -v/--virana_hits)",
default=None)
filter_complexity = cli.Flag(['--filter_complexity'],
help="Discard low-complexity reads (only applied to -v/--virana_hits). Adds some extra processing load to the mapping and may discard important information. Applies to all output files, including quality files (!)",
default=False)
sample_id = cli.SwitchAttr(['--sample_id'], str, mandatory=False,
help="Alphanumeric string ([0-9a-zA-Z_-]*) used to designate sample information within the hit and taxonomy files",
default='no_sample_id')
bam = cli.SwitchAttr(['-b', '--bam'], str, mandatory=False,
help="Path to unsorted, unindexed output BAM file",
default='')
sam = cli.SwitchAttr(['-s', '--sam'], str, mandatory=False,
help="Path to output SAM file",
default='')
qual = cli.SwitchAttr(['-q', '--qual'], str, mandatory=False,
help="Path to output quality file",
default='')
hits = cli.SwitchAttr(['-v', '--virana_hits'], str, mandatory=False,
help="Path to bzip2-compressed tab-delimited output virana hit file",
default='')
interleaved = cli.Flag(['--interleaved'],
help="Inputs FASTQ is an interleaved paired end file. ",
default=False)
zipped = cli.Flag(['-z', '--zipped'],
help="Inputs FASTQ are zipped (or gz-compressed)",
default=False)
bzipped = cli.Flag(['--bzipped'],
help="Inputs FASTQ are bzipped (or bz2-compressed)",
default=False)
fqz = cli.SwitchAttr(['--fqz'], str, mandatory=False,
help="Reads are fqz compressed and this argument specifies the path to fqzcomp's 'fqz_comp' binary",
default=False)
bam_input = cli.Flag(['--bam_input'],
help="Input reads are contained in a single BAM file.",
default=False)
hit_filter = cli.SwitchAttr(
['-f', '--virana_hit_filter'], str, list=True, mandatory=False,
help="Only generate hit groups that include at last one read mapping to a reference of this reference group.",
default=[])
debug = cli.Flag(["-d", "--debug"], help="Enable debug information")
zipped = cli.Flag(["-z", "--zipped"], help="Input reads are zipped")
sensitive = cli.Flag(
["--sensitive"], help="If given, mapping will process slower and more sensitive")
mapper_path = cli.SwitchAttr(['--bwa_path'], str, mandatory=False,
help="Path to BWA executable",
default='bwa')
reads = cli.SwitchAttr(
['-r', '--reads'], str, list=True, mandatory=True,
help="Sets the input reads. Add this parameter twice for paired end reads.")
def get_command_line(self, temp_path):
command_line = [self.mapper_path] + ['mem', '-t', str(self.threads), '-c', '2000', '-m' ,'50']
if self.interleaved:
command_line += ['-p']
command_line += [os.path.join(self.index_dir, 'index')]
reads = []
if self.bam_input:
reads = ["'<%s bam2fq %s'" % (self.samtools_path, read) for read in self.reads]
elif self.zipped:
reads = ["'<gzip -cd %s'" % read for read in self.reads]
elif self.bzipped:
reads = ["'<bzip2 -cd %s'" % read for read in self.reads]
elif self.fqz:
reads = ["'<%s -d %s'" % (self.fqz, read) for read in self.reads]
else:
reads = self.reads
command_line += reads
return command_line
@CLI.subcommand("varmap")
class VARmap(Mapper):
""" Map input reads with SMALT against a SMALT index """
index_dir = cli.SwitchAttr(['-i', '--index_dir'], str, mandatory=True,
help="Sets the index output directory")
threads = cli.SwitchAttr(
['-t', '--threads'], cli.Range(1, 512), mandatory=False,
help="Sets the number of threads to use",
default=1)
taxonomy = cli.SwitchAttr(
['-x', '--taxonomy'], str, mandatory=False,
help="Output path for the taxonomy file; setting this option will also enable regular taxonomy output to stdout during mapping",
default='')
samtools_path = cli.SwitchAttr(['--samtools_path'], str, mandatory=False,
help="Path to samtools executable",
default='')
temp_path = cli.SwitchAttr(['--temporary_path'], str, mandatory=False,
help="Path to temporary directory in which to generate temp files. All temp files with be automatically deleted after execution is complete.",
default='')
min_mapping_score = cli.SwitchAttr(['--min_mapping_score'], cli.Range(1, 255), mandatory=False,
help="Mimimum mapping score for saved hits (only applied to -v/--virana_hits)",
default=None)
min_alignment_score = cli.SwitchAttr(['--min_alignment_score'], cli.Range(1, 255), mandatory=False,
help="Mimimum alignment score for saved hits (only applied to -v/--virana_hits)",
default=None)
max_mismatches = cli.SwitchAttr(['--max_mismatches'], cli.Range(0, 10000000), mandatory=False,
help="Maximum number of mismatches for saved hits (only applied to -v/--virana_hits)",
default=None)
max_relative_mismatches = cli.SwitchAttr(['--max_relative_mismatches'], float, mandatory=False,
help="Maximum number of mismatches relative to read length for saved hits (only applied to -v/--virana_hits)",
default=None)
min_continiously_matching = cli.SwitchAttr(['--min_continiously_matching'], cli.Range(0, 10000000), mandatory=False,
help="Minimum number of continious matches for saved hits (only applied to -v/--virana_hits)",
default=None)
filter_complexity = cli.Flag(['--filter_complexity'],
help="Discard low-complexity reads (only applied to -v/--virana_hits). Adds some extra processing load to the mapping and may discard important information. Applies to all output files, including quality files (!)",
default=False)
sample_id = cli.SwitchAttr(['--sample_id'], str, mandatory=False,
help="Alphanumeric string ([0-9a-zA-Z_-]*) used to designate sample information within the hit and taxonomy files",
default='no_sample_id')
bam_input = cli.Flag(['--bam_input'],
help="Input is a bam file",
default=False)
bam = cli.SwitchAttr(['-b', '--bam'], str, mandatory=False,
help="Path to unsorted, unindexed output BAM file",
default='')
sam = cli.SwitchAttr(['-s', '--sam'], str, mandatory=False,
help="Path to output SAM file",
default='')
qual = cli.SwitchAttr(['-q', '--qual'], str, mandatory=False,
help="Path to output quality file",
default='')
hits = cli.SwitchAttr(['-v', '--virana_hits'], str, mandatory=False,
help="Path to bzip2-compressed tab-delimited output virana hit file",
default='')
hit_filter = cli.SwitchAttr(
['-f', '--virana_hit_filter'], str, list=True, mandatory=False,
help="Only generate hit groups that include at last one read mapping to a reference of this reference group.",
default=[])
debug = cli.Flag(["-d", "--debug"], help="Enable debug information")
zipped = cli.Flag(["-z", "--zipped"], help="Input reads are zipped")
sensitive = cli.Flag(
["--sensitive"], help="If given, mapping will process slower and more sensitive")
mapper_path = cli.SwitchAttr(['--smalt_path'], str, mandatory=False,
help="Path to SMALT executable",
default='smalt_x86_64')
debug = cli.Flag(["-d", "--debug"], help="Enable debug information")
reads = cli.SwitchAttr(
['-r', '--reads'], str, list=True, mandatory=True,
help="Sets the input reads. Add this parameter twice for paired end reads.")
def get_command_line(self, temp_path):
command_line = [self.mapper_path] + ['map', '-f', 'sam', '-l', '-n', self.threads, '-O', '-p']
if self.sensitive:
command_line += ['-x']
command_line += self.reads
return command_line
if __name__ == "__main__":
CLI.run()
|
schelhorn/virana
|
virana/vmap.py
|
Python
|
apache-2.0
| 69,616
|
[
"BWA",
"HTSeq",
"pysam"
] |
627b742ce237a25d25b819173dbe32ed41c9d53bb2e2a00fea4c56bd96e2373c
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# --- BEGIN_HEADER ---
#
# vncsession - [insert a few words of module description on this line]
# Copyright (C) 2003-2009 The MiG Project lead by Brian Vinter
#
# This file is part of MiG.
#
# MiG is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# MiG is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# -- END_HEADER ---
#
"""Front end to VNC session"""
import cgi
import cgitb
cgitb.enable()
from shared.functionality.vncsession import main
from shared.cgiscriptstub import run_cgi_script
run_cgi_script(main)
|
heromod/migrid
|
mig/cgi-bin/vncsession.py
|
Python
|
gpl-2.0
| 1,140
|
[
"Brian"
] |
b27f057c3615585c3fcca88a36ca56b4eeed22fb072f042dab8130b225265278
|
###############################
# This file is part of PyLaDa.
#
# Copyright (C) 2013 National Renewable Energy Lab
#
# PyLaDa is a high throughput computational platform for Physics. It aims to make it easier to submit
# large numbers of jobs on supercomputers. It provides a python interface to physical input, such as
# crystal structures, as well as to a number of DFT (VASP, CRYSTAL) and atomic potential programs. It
# is able to organise and launch computational jobs on PBS and SLURM.
#
# PyLaDa is free software: you can redistribute it and/or modify it under the terms of the GNU General
# Public License as published by the Free Software Foundation, either version 3 of the License, or (at
# your option) any later version.
#
# PyLaDa is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even
# the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details.
#
# You should have received a copy of the GNU General Public License along with PyLaDa. If not, see
# <http://www.gnu.org/licenses/>.
###############################
""" Standard parameter types for use as attributes in Incar """
__docformat__ = "restructuredtext en"
__all__ = ["SpecialVaspParam", "ExtraElectron", "Algo", "Precision", "Ediff",
"Ediffg", "Encut", "EncutGW", "FFTGrid", "Restart", "UParams", "IniWave",
"Magmom", 'Npar', 'Boolean', 'Integer', 'Choices', 'PrecFock', 'NonScf',
"System", 'PartialRestart', 'Relaxation', 'Smearing', 'Lsorbit']
from ...vasp import logger
from quantities import eV
class SpecialVaspParam(object):
""" Base type for special vasp parameters.
Special vasp parameters do something more than just print to the incar.
What *more* means depends upon the parameter.
"""
def __init__(self, value):
super(SpecialVaspParam, self).__init__()
self.value = value
""" Value derived classes will do something with. """
def __repr__(self): return "{0.__class__.__name__}({1!r})".format(self, self.value)
class Magmom(SpecialVaspParam):
""" Sets the initial magnetic moments on each atom.
There are three types of usage:
- do nothing if the instance's value is None or False, or if not a
single atom has a 'magmom' attribute
- print a string preceded by "MAGMOM = " if the instance's value is a string.
- print the actual MAGMOM string from the magnetic moments attributes
``magmom`` in the structure's atoms if anything but a string, None,
or False.
If the calculation is **not** spin-polarized, then the magnetic moment
tag is not set.
.. seealso:: `MAGMOM <http://cms.mpi.univie.ac.at/vasp/guide/node100.html>`_
"""
def __init__(self, value=True):
super(Magmom, self).__init__(value)
def incar_string(self, **kwargs):
from ...crystal import specieset
if self.value is None or self.value == False:
return None
if kwargs["vasp"].ispin == 1:
return None
if isinstance(self.value, str):
return "MAGMOM = {0}".format(self.value)
structure = kwargs['structure']
if all(not hasattr(u, 'magmom') for u in structure):
return None
result = ""
for specie in specieset(structure):
moments = [getattr(u, 'magmom', 0e0) for u in structure if u.type == specie]
tupled = [[1, moments[0]]]
for m in moments[1:]:
# Change precision from 1.e-12 to 1.e-1, per Vladan, 2013-10-09
#if abs(m - tupled[-1][1]) < 1e-12: tupled[-1][0] += 1
if abs(m - tupled[-1][1]) < 1e-1:
tupled[-1][0] += 1
else:
tupled.append([1, m])
for i, m in tupled:
# Change format from .2f to .1f, per Vladan, 2013-10-09
if i == 1:
result += "{0:.1f} ".format(m)
else:
result += "{0}*{1:.1f} ".format(i, m)
return 'MAGMOM = {0}'.format(result.rstrip())
class System(SpecialVaspParam):
""" System title to use for calculation.
Adds system name to OUTCAR. If value is the python object ``True``, the
structure is checked for a ``name`` attribute. If it is False or None,
SYSTEM is not added to the incar. In all other case, tries to convert the
result to a string and use that.
The call is protected by a try statement.
.. seealso:: `SYSTEM <http://cms.mpi.univie.ac.at/vasp/guide/node94.html>`_
"""
def __init__(self, value): super(System, self).__init__(value)
def incar_string(self, **kwargs):
if self.value is None or self.value is False:
return None
try:
if self.value is True:
if not hasattr(kwargs["structure"], "name"):
return None
name = kwargs["structure"].name.rstrip().lstrip()
if len(name) == 0:
return None
return "SYSTEM = {0}".format(name)
return "SYSTEM = {0}".format(self.value)
except:
return None
class Npar(SpecialVaspParam):
""" Parallelization over bands.
Npar defines how many nodes work on one band.
It can be set to a particular number:
>>> vasp.npar = 2
Or it can be deduced automatically. Different schemes are available:
- power of two: npar is set to the largest power of 2 which divides the
number of processors.
>>> vasp.npar = "power of two"
If the number of processors is not a power of two, prints nothing.
- square root: npar is set to the square root of the number of processors.
>>> vasp.npar = "sqrt"
.. seealso: `NPAR <http://cms.mpi.univie.ac.at/vasp/guide/node138.html>`_
"""
def __init__(self, value): super(Npar, self).__init__(value)
def incar_string(self, **kwargs):
from math import log, sqrt
if self.value is None:
return None
if not isinstance(self.value, str):
if self.value < 1:
return None
return "NPAR = {0}".format(self.value)
comm = kwargs.get('comm', None)
n = getattr(comm, 'n', getattr(comm, 'size', -1))
if n == 1:
return None
if self.value == "power of two":
m = int(log(n) / log(2))
for i in range(m, 0, -1):
if n % 2**i == 0:
return "NPAR = {0}".format(i)
return None
if self.value == "sqrt":
return "NPAR = {0}".format(int(sqrt(n) + 0.001))
raise ValueError("Unknown request npar = {0}".format(self.value))
class ExtraElectron(SpecialVaspParam):
""" Sets number of electrons relative to neutral system.
Gets the number of electrons in the (neutral) system. Then adds value to
it and computes with the resulting number of electrons.
>>> vasp.extraelectron = 0 # charge neutral system
>>> vasp.extraelectron = 1 # charge -1 (1 extra electron)
>>> vasp.extraelectron = -1 # charge +1 (1 extra hole)
:param integer value:
Number of electrons to add to charge neutral system. Defaults to 0.
.. seealso:: `NELECT <http://cms.mpi.univie.ac.at/vasp/vasp/NELECT.html>`_
"""
def __init__(self, value=0): super(ExtraElectron, self).__init__(value)
def nelectrons(self, vasp, structure):
""" Total number of electrons in the system """
from math import fsum
# constructs dictionnary of valence charge
valence = {}
for key, value in vasp.species.items():
valence[key] = value.valence
# sums up charge.
return fsum(valence[atom.type] for atom in structure)
def incar_string(self, **kwargs):
# gets number of electrons.
charge_neutral = self.nelectrons(kwargs['vasp'], kwargs['structure'])
# then prints incar string.
if self.value == 0:
return "# NELECT = {0} Charge neutral system".format(charge_neutral)
elif self.value > 0:
return "NELECT = {0} # negatively charged system ({1})"\
.format(charge_neutral + self.value, -self.value)
else:
return "NELECT = {0} # positively charged system (+{1})"\
.format(charge_neutral + self.value, -self.value)
class Algo(SpecialVaspParam):
""" Electronic minimization.
Defines the kind of algorithm vasp will run.
- very fast
- fast, f (default)
- normal, n
- all, a
- damped, d
- Diag
- conjugate, c (vasp 5)
- subrot (vasp 5)
- eigenval (vasp 5)
- Nothing (vasp 5)
- Exact (vasp 5)
- chi
- gw
- gw0
- scgw
- scgw0
If :py:data:`is_vasp_4 <pylada.is_vasp_4>` is an existing configuration
variable of :py:mod:`pylada` the parameters marked as vasp 5 will fail.
.. warning:: The string None is not allowed, as it would lead to
confusion with the python object None. Please use "Nothing" instead.
The python object None will simply not print the ALGO keyword to the
INCAR file.
.. note:: By special request, "fast" is the default algorithm.
.. seealso:: `ALGO <http://cms.mpi.univie.ac.at/vasp/vasp/ALGO_tag.html>`_
"""
def __init__(self, value="fast"): super(Algo, self).__init__(value)
@property
def value(self): return self._value
@value.setter
def value(self, value):
if value is None:
self._value = None
return None
from pylada import is_vasp_4
if not hasattr(value, 'lower'):
raise TypeError("ALGO cannot be set with {0}.".format(value))
lower = value.lower().rstrip().lstrip()
lower = lower.replace('_', '')
lower = lower.replace('-', '')
if is_vasp_4 \
and (lower[0] in ['c', 's', 'e']
or lower in ["nothing", "subrot", "exact",
"gw", "gw0", "chi", "scgw",
"scgw0"]):
raise ValueError("algo value ({0}) is not valid with VASP 4.6.".format(value))
if lower == "diag":
value = "Diag"
elif lower == "nothing":
value = "Nothing"
elif lower == "chi":
value = "chi"
elif lower == "gw":
value = "GW"
elif lower == "gw0":
value = "GW0"
elif lower == "scgw":
value = "scGW"
elif lower == "scgw0":
value = "scGW0"
elif lower[0] == 'v':
value = "Very_Fast" if is_vasp_4 else 'VeryFast'
elif lower[0] == 'f':
value = "Fast"
elif lower[0] == 'n':
value = "Normal"
elif lower[0] == 'd':
value = "Damped"
elif lower[0] == 'a':
value = "All"
elif lower[0] == 'c':
value = "Conjugate"
elif lower[0] == 's':
value = "Subrot"
elif lower[0] == 'e':
value = "Eigenval"
else:
self._value = None
raise ValueError("algo value ({0!r}) is invalid.".format(value))
self._value = value
def incar_string(self, **kwargs):
if self.value is None:
return None
return "ALGO = {0}".format(self.value)
class Ediff(SpecialVaspParam):
""" Sets the convergence criteria (per atom) for electronic minimization.
- value > 0e0: the tolerance is multiplied by the number of atoms in the
system. This makes tolerance consistent from one system to the next.
- value < 0e0: tolerance is given as absolute value, without multiplying
by size of system.
.. seealso:: `EDIFF <http://cms.mpi.univie.ac.at/vasp/guide/node105.html>`_
"""
def __init__(self, value):
""" Creates *per atom* tolerance. """
super(Ediff, self).__init__(value)
print("vasp/incar/_params: Ediff.const: value: %s" % (value,))
def incar_string(self, **kwargs):
if self.value is None:
return
if self.value < 0:
return "EDIFF = {0} ".format(-self.value)
res = "EDIFF = {0} ".format(self.value * float(len(kwargs["structure"])))
print("vasp/incar/_params: Ediff.incar_string: res: %s" % (res,))
return res
def __repr__(self):
return "{0.__class__.__name__}({0.value!r})".format(self)
class Ediffg(SpecialVaspParam):
""" Sets the convergence criteria (per atom) for ionic minimization.
- value > 0e0: the tolerance is multiplied by the number of atoms in the
system. This makes tolerance consistent from one system to the next.
- value < 0e0: tolerance is given as is (negative), and applies to forces.
.. seealso:: `EDIFFG <http://cms.mpi.univie.ac.at/vasp/guide/node107.html>`_
"""
def __init__(self, value):
""" Creates *per atom* tolerance. """
super(Ediffg, self).__init__(value)
def incar_string(self, **kwargs):
if self.value is None:
return
if self.value < 0:
return "EDIFFG = {0} ".format(self.value)
return "EDIFFG = {0} ".format(self.value * float(len(kwargs["structure"])))
def __repr__(self):
return "{0.__class__.__name__}({0.value!r})".format(self)
class Encut(SpecialVaspParam):
""" Defines cutoff factor for calculation.
There are three ways to set this parameter:
- if value is floating point and 0 < value <= 3: then the cutoff is
``value * ENMAX``, where ENMAX is the maximum recommended cutoff for
the species in the system.
- if value > 3 eV, then prints encut is exactly value (in eV). Any energy
unit is acceptable.
- if value < 0 eV or None, does not print anything to INCAR.
.. seealso:: `ENCUT <http://cms.mpi.univie.ac.at/vasp/vasp/ENCUT_tag.html>`_
"""
KEY = "ENCUT"
""" Corresponding VASP key. """
units = eV
""" Units with which to sign cutoff. """
def __init__(self, value): super(Encut, self).__init__(value)
@property
def value(self):
""" Returns value signed by a physical unit. """
if self._value is None:
return None
if self._value <= 1e-12:
return None
return self._value if self._value <= 3.0 else self._value * self.units
@value.setter
def value(self, value):
""" Sets value taking unit into account. """
if hasattr(value, 'rescale'):
value = value.rescale(self.units).magnitude
self._value = value
def incar_string(self, **kwargs):
from ...crystal import specieset
value = self._value
if value is None:
return None
elif value < 1e-12:
return None
elif value >= 1e-12 and value <= 3.0:
types = specieset(kwargs["structure"])
encut = max(kwargs["vasp"].species[type].enmax for type in types)
if hasattr(encut, 'rescale'):
encut = float(encut.rescale(eV))
return "{0} = {1} ".format(self.KEY, encut * value)
return "{0} = {1}".format(self.KEY, value)
class EncutGW(Encut):
""" Defines cutoff factor for GW calculation.
There are three ways to set this parameter:
- if value is floating point and 0 < value <= 3: then the cutoff is
``value * ENMAX``, where ENMAX is the maximum recommended cutoff for
the species in the system.
- if value > 3 eV, then prints encut is exactly value (in eV). Any energy
unit is acceptable.
- if value < 0 eV or None, does not print anything to INCAR.
.. seealso:: `ENCUTGW
<http://cms.mpi.univie.ac.at/vasp/vasp/ENCUTGW_energy_cutoff_response_function.html>`_
"""
KEY = "ENCUTGW"
def __init__(self, value): super(EncutGW, self).__init__(value)
class FFTGrid(SpecialVaspParam):
""" FFT mesh of the wavefunctions.
This must a sequence of three integers.
.. seealso:: `NGX, NGY, NGZ
<http://cms.mpi.univie.ac.at/vasp/guide/node93.html>`_
"""
def __init__(self, value): super(FFTGrid, self).__init__(value)
@property
def value(self): return self._value
@value.setter
def value(self, value):
from numpy import array
if value is None:
self._value = None
return
if len(list(value)) != 3:
raise TypeError("FFTGrid expects three numbers.")
self._value = array(value)
def incar_string(self, **kwargs):
if self.value is None:
return None
return "NGX = {0[0]}\nNGY = {0[1]}\nNGZ = {0[2]}".format(self.value)
class PartialRestart(SpecialVaspParam):
""" Restart from previous run.
It is either an vasp extraction object of some kind, or None. In the
latter case, the calculation starts from scratch. However, if an
extraction object exists *and* the calculation it refers to was
successfull, then it will check whether WAVECAR and CHGCAR exist and set
ISTART_ and ICHARG_ accordingly. It also checks whether
:py:attr:`nonscf <incar.Incar.nonscf>` is True or False, and sets
ICHARG_ accordingly. The CONTCAR file is *never* copied from the
previous run. For an alternate behavior, see :py:class:`Restart`.
.. seealso:: ICHARG_, ISTART_, :py:class:`Restart`
"""
def __init__(self, value): super(PartialRestart, self).__init__(value)
def incar_string(self, **kwargs):
from os.path import join, exists, getsize
from shutil import copy
from ...misc import copyfile
from .. import files
if self.value is None or self.value.success == False:
if kwargs['vasp'].nonscf:
kwargs['vasp'].icharg = 12
logger.warning('vasp/incar/_params: PartialRestart: no luck')
else:
logger.warning('vasp/incar/_params: PartialRestart: self.val.dir: %s' %
self.value.directory)
ewave = exists(join(self.value.directory, files.WAVECAR))
if ewave:
ewave = getsize(join(self.value.directory, files.WAVECAR)) > 0
if ewave:
copy(join(self.value.directory, files.WAVECAR), ".")
kwargs['vasp'].istart = 1
else:
kwargs['vasp'].istart = 0
echarg = exists(join(self.value.directory, files.CHGCAR))
if echarg:
echarg = getsize(join(self.value.directory, files.CHGCAR)) > 0
if echarg:
copy(join(self.value.directory, files.CHGCAR), ".")
kwargs['vasp'].icharg = 1
else:
kwargs['vasp'].icharg = 0 if kwargs['vasp'].istart == 1 else 2
if getattr(kwargs["vasp"], 'nonscf', False):
kwargs['vasp'].icharg += 10
copyfile(join(self.value.directory, files.EIGENVALUES), nothrow='same exists',
nocopyempty=True)
copyfile(join(self.value.directory, files.WAVEDER), files.WAVEDER,
nothrow='same exists', symlink=getattr(kwargs["vasp"], 'symlink', False),
nocopyempty=True)
copyfile(join(self.value.directory, files.TMPCAR), files.TMPCAR,
nothrow='same exists', symlink=getattr(kwargs["vasp"], 'symlink', False),
nocopyempty=True)
if kwargs['vasp'].lsorbit == True:
kwargs['vasp'].nbands = 2 * self.value.nbands
return None
class Restart(PartialRestart):
""" Return from previous run from which to restart.
Restart from a previous run, as described in :py:class:`PartialRestart`.
However, unlike :py:class:`PartialRestart`, the CONTCAR is copied from
the previous run, if it exists and is not empty.
.. seealso:: ICHARG_, ISTART_
"""
def __init__(self, value): super(Restart, self).__init__(value)
def incar_string(self, **kwargs):
from os.path import join
from os import getcwd
from ...misc import copyfile
from .. import files
result = super(Restart, self).incar_string(**kwargs)
if self.value is not None and self.value.success:
copyfile(join(self.value.directory, files.CONTCAR), files.POSCAR,
nothrow='same exists', symlink=getattr(kwargs["vasp"], 'symlink', False),
nocopyempty=True)
logger.warning('vasp/incar/_params: Restart CONTCAR: self.val.dir: %s' %
self.value.directory)
logger.debug('vasp/incar/_params: Restart: getcwd(): %s' % getcwd())
logger.debug('vasp/incar/_params: Restart: result: %s' % result)
return result
class NonScf(SpecialVaspParam):
""" Whether to perform a self-consistent or non-self-consistent run.
Accepts only True or False(default). This parameter works with
:py:class:`Restart` to determine the value to give :py:attr:`icharg
<pylada.vasp.incar.Incar.icharg>`
"""
def __init__(self, value): super(NonScf, self).__init__(value)
@property
def value(self): return self._value
@value.setter
def value(self, value):
if isinstance(value, str):
if len(value) == 0:
value = False
elif value.lower() == "true"[:min(len(value), len("true"))]:
value = True
elif value.lower() == "false"[:min(len(value), len("false"))]:
value = False
else:
raise RuntimeError("Uknown value for nonscf: {0}").format(value)
self._value = value == True
def incar_string(self, **kwargs): return None
def __repr__(self): return "{0.__class__.__name__}({0.value!r})".format(self)
class UParams(SpecialVaspParam):
""" Sets U, nlep, and enlep parameters.
The U, nlep, and enlep parameters of the atomic species are set at the
same time as the pseudo-potentials. This object merely sets up the incar
with right input.
However, it does accept one parameter, which can be "off", "on", "occ" or
"all" wich defines the level of verbosity of VASP (with respect to the
parameters).
.. seealso:: `LDAU, LDAUTYPE, LDAUL, LDAUPRINT
<http://cms.mpi.univie.ac.at/vasp/vasp/On_site_Coulomb_interaction_L_S_DA_U.html>`_
"""
def __init__(self, value):
import re
if value is None:
value = 0
elif hasattr(value, "lower"):
value = value.lower()
if value == "off":
value = 0
elif value == "on":
value = 1
elif None != re.match(r"\s*occ(upancy)?\s*", value):
value = 1
elif None != re.match(r"\s*(all|pot(ential)?)\s*", value):
value = 2
super(UParams, self).__init__(value)
def incar_string(self, **kwargs):
from ...crystal import specieset
from ... import error
types = specieset(kwargs['structure'])
species = kwargs['vasp'].species
# existence and sanity check
has_U, which_type = False, None
for type in types:
specie = species[type]
if len(specie.U) == 0:
continue
if len(specie.U) > 4:
raise error.ValueError("More than 4 channels for U/NLEP parameters")
has_U = True
# checks consistency.
which_type = specie.U[0]["type"]
for l in specie.U[1:]:
if which_type != l["type"]:
raise error.ValueError("LDA+U/NLEP types are not consistent across species.")
if not has_U:
return "# no LDA+U/NLEP parameters"
# Prints LDA + U parameters
result = "LDAU = .TRUE.\nLDAUPRINT = {0}\nLDAUTYPE = {1}\n".format(self.value, which_type)
for i in range(max(len(species[type].U) for type in types)):
line = "LDUL{0}=".format(i + 1),\
"LDUU{0}=".format(i + 1),\
"LDUJ{0}=".format(i + 1),\
"LDUO{0}=".format(i + 1)
for type in types:
specie = species[type]
a = -1, 0e0, 0e0, 1
if len(specie.U) <= i:
pass
else:
if specie.U[i]["func"] == "U":
a = [specie.U[i]["l"], specie.U[i]["U"], specie.U[i]["J"], 1]
elif specie.U[i]["func"] == "nlep":
a = [specie.U[i]["l"], specie.U[i]["U0"], 0e0, 2]
elif specie.U[i]["func"] == "enlep":
a = [specie.U[i]["l"], specie.U[i]["U0"], specie.U[i]["U1"], 3]
else:
raise RuntimeError("Debug Error.")
if hasattr(a[1], "rescale"):
a[1] = a[1].rescale("eV")
if hasattr(a[2], "rescale"):
a[2] = a[2].rescale("eV")
line = "{0[0]} {1[0]}". format(line, a),\
"{0[1]} {1[1]:18.10e}". format(line, a),\
"{0[2]} {1[2]:18.10e}".format(line, a),\
"{0[3]} {1[3]}". format(line, a)
result += "\n{0}\n{1}\n{2}\n{3}\n".format(*line)
return result
def __repr__(self):
return "{0.__class__.__name__}({1!r})".format(self, ["off", "on", "all"][self.value])
class Boolean(SpecialVaspParam):
""" Any boolean vasp parameters.
Python is very liberal in how it converts any object to a boolean, eg an
empty dictionary is false while non-empty dictionary is true. In order
to keep this behavior, the value given to this parameter is kept as is as
long as possible, and converted only when writing the incar. The only
difference with the python behavior is that if using strings (which
generally evaluate to true or depending whether or not they are empty),
these must be "True" or "False", or variations thereoff. 'on' and 'off'
evaluate to True and False, respectively. The empty string will
evaluate to the VASP default (eg equivalent to using None).
"""
def __init__(self, key, value):
super(Boolean, self).__init__(value)
self.key = key
""" VASP key corresponding to this input. """
@property
def value(self):
return self._value
@value.setter
def value(self, value):
if value is None:
self._value = None
return
elif isinstance(value, str):
value = value.lstrip().rstrip.lower()
if len(value) == 0:
return
elif value == "on":
value = True
elif value == "off":
value = False
elif value == "true"[:min(len(value), len("true"))]:
value = True
elif value == "false"[:min(len(value), len("false"))]:
value = False
else:
raise TypeError("Cannot interpret string {0} as a boolean.".format(value))
self._value = value == True
def incar_string(self, **kwargs):
value = self._value
if isinstance(value, str):
if len(value) == 0:
value is None
elif value.lower() == "true"[:len(value)]:
value = True
else:
value = False
if self.value is None:
return None
return "{0} = {1}".format(self.key.upper(), ".TRUE." if bool(self.value) else ".FALSE.")
def __repr__(self):
""" Representation of this object. """
return "{0.__class__.__name__}({1!r}, {2!r})".format(self, self.key, self.value)
class Integer(SpecialVaspParam):
""" Any integer vasp parameters.
The value is always of type integer. Other types are converted to an
integer where possible, and will throw TypeError otherwise.
"""
def __init__(self, key, value):
super(Integer, self).__init__(value)
self.key = key
""" VASP key corresponding to this input. """
@property
def value(self): return self._value
@value.setter
def value(self, value):
if value is None:
self._value = None
return
try:
self._value = int(value)
except:
raise TypeError("Could not evaluate {0} as an integer.".format(value))
def incar_string(self, **kwargs):
if self.value is None:
return None
return "{0} = {1}".format(self.key.upper(), self.value)
def __repr__(self):
""" Representation of this object. """
return "{0.__class__.__name__}({1}, {2})".format(self, repr(self.key), repr(self.value))
class Choices(SpecialVaspParam):
""" Vasp parameters with a limited set of choices.
Initializes the Choices-type vasp parameters.
:param key:
Name of the VASP parameter, e.g. "precfock". It needs not be in
uppercase. In fact, lower case is preferred for being more pythonic.
:param choices:
Dictionary where key is an allowed VASP input for this parameter.
To each key is associated a list (or set), with allowable forms
which will translate to the key in the incar. A modified copy of
this dictionary is owned by the instance being initialized. All
keys and items should be meaningfully convertible to strings.
:param default:
Option from ``choices`` to use as default.
.. note:: The keys are case-sensitive. The values are not.
"""
def __init__(self, key, choices, default=None):
self.key = key
""" VASP key corresponding to this input. """
self.choices = {}
""" Allowable set of choices. """
for key, items in choices.items():
self.choices[key] = [u.lower() if hasattr(u, 'lower') else u for u in items]
self.choices[key].append(key.lower() if hasattr(key, 'lower') else key)
super(Choices, self).__init__(default)
@property
def value(self): return self._value
@value.setter
def value(self, value):
if value is None:
self._value = None
return
if hasattr(value, 'lower'):
value = value.lower()
for key, items in self.choices.items():
if value in items:
self._value = key
return
raise ValueError(
"{0} is not an acceptable choice for {1.key}: {1.choices}.".format(value, self))
def incar_string(self, **kwargs):
if self.value is None:
return None
return "{0} = {1}".format(self.key.upper(), self.value)
def __repr__(self):
""" Representation of this object. """
return "{0.__class__.__name__}({1}, {2}, {3})"\
.format(self, repr(self.key), repr(self.choices), repr(self.value))
class PrecFock(Choices):
""" Sets up FFT grid in hartree-fock related routines.
Allowable options are:
- low
- medium
- fast
- normal
- accurate
.. note:: The values are not case-sensitive.
.. seealso:: `PRECFOCK <http://cms.mpi.univie.ac.at/vasp/vasp/PRECFOCK_FFT_grid_in_HF_related_routines.html>`_
"""
def __init__(self, value=None):
choices = {'Low': ['low'], 'Medium': ['medium'], 'Fast': ['fast'],
'Normal': ['normal'], 'Accurate': ['accurate']}
super(PrecFock, self).__init__("PRECFOCK", choices, value)
def __repr__(self):
return "{0.__class__.__name__}({0.value!r})".format(self)
class Precision(Choices):
""" Sets accuracy of calculation.
- accurate (default)
- low
- medium
- high
- single
.. seealso:: `PREC <http://cms.mpi.univie.ac.at/vasp/vasp/PREC_tag.html>`_
"""
def __init__(self, value='accurate'):
choices = {'Accurate': ['accurate'], 'Low': ['low'], 'Normal': ['normal'],
'Medium': ['medium'], 'High': ['high'], 'Single': ['single']}
super(Precision, self).__init__('PREC', choices, value)
def __repr__(self):
return "{0.__class__.__name__}({0.value!r})".format(self)
class IniWave(Choices):
""" Specifies how to setup initial wavefunctions.
- 0, jellium
- 1, random
.. seealso:: `INIWAV <http://cms.mpi.univie.ac.at/vasp/guide/node103.html>`_
"""
def __init__(self, value=None):
choices = {0: ['jellium'], 1: ['random']}
super(IniWave, self).__init__('INIWAV', choices, value)
def __repr__(self):
return "{0.__class__.__name__}({1!r})".format(self, self.choices[self.value][0])
class Relaxation(SpecialVaspParam):
""" Sets type of relaxation.
Defaults to None, eg use VASP defaults for ISIF_, NSW_, IBRION_, POTIM_.
It can be set to a single value, or to a tuple of up to four elements:
>>> vasp.relaxation = "static"
>>> vasp.relaxation = "static", 20
- first argument can be "static", or a combination of "ionic",
"cellshape", and "volume". The combination must be allowed by
ISIF_. It can also be an integer, in which case ISIF_ is set
directly.
- second (optional) argument is NSW_
- third (optional) argument is IBRION_
- fourth (optional) argument is POTIM_
.. warning: When the first parameter is one of "cellshape", "volume",
"ionic" and yet the second (NSW_) is None, 0 or not present, then NSW_
is set to 50. The assumption is when you ask to relax, then indeed
you do ask to relax.
.. warning: When the first parameter is one of "cellshape", "volume",
"ionic" and IBRION_ is not specified or None, then IBRION_ is to 2.
In contrast, VASP defaults IBRION_ to zero. However, that's just
painful.
.. _ISIF: http://cms.mpi.univie.ac.at/vasp/guide/node112.html
.. _NSW: http://cms.mpi.univie.ac.at/vasp/guide/node108.html
.. _IBRION: http://cms.mpi.univie.ac.at/vasp/guide/node110.html
.. _POTIM: http://cms.mpi.univie.ac.at/vasp/vasp/POTIM_tag.html
"""
def __init__(self, isif=None, nsw=None, ibrion=None, potim=None):
super(Relaxation, self).__init__((isif, nsw, ibrion, potim))
@property
def value(self):
if self.isif is None and self.ibrion is None and self.potim is None and self.nsw is None:
return None
result = [None, self.nsw, self.ibrion, self.potim]
if self.ibrion == -1 or self.nsw == 0:
result[0] = 'static'
result[2] = None
elif self.isif is not None and self.ibrion != -1:
if result[0] is None:
result[0] = ''
if self.isif < 5:
result[0] += ' ionic'
if self.isif > 2 and self.isif < 7:
result[0] += ' cellshape'
if self.isif in [3, 6, 7]:
result[0] += ' volume'
result[0] = result[0].lstrip()
if self.nsw == 50 and self.ibrion is None and self.potim is None:
result[1] = None
for i in range(4):
if result[-1] is None:
result = result[:-1]
if len(result) == 1:
return result[0]
if len(result) == 3 and result[0] != 'static' and result[2] == 2:
return tuple(result[:2]) if result[1] != 50 else result[0]
return tuple(result)
@value.setter
def value(self, args):
import re
if 'nsw' not in self.__dict__:
self.nsw = None
if 'isif' not in self.__dict__:
self.isif = None
if 'ibrion' not in self.__dict__:
self.ibrion = None
if 'potim' not in self.__dict__:
self.potim = None
if args == None:
self.isif = None
return
isif, nsw, ibrion, potim = None, None, None, None
if hasattr(args, 'lower'):
dof = args.lower().rstrip().lstrip()
elif hasattr(args, '__len__') and hasattr(args, '__getitem__'):
if len(args) > 0:
if hasattr(args[0], 'lower'):
dof = args[0].lower().rstrip().lstrip()
elif args[0] is None:
isif, dof = None, None
else:
isif, dof = int(args[0]), None
if len(args) > 1 and args[1] is not None:
nsw = int(args[1])
if len(args) > 2 and args[2] is not None:
ibrion = int(args[2])
if len(args) > 3 and args[3] is not None:
potim = float(args[3])
else:
isif, dof = int(args), None
if dof is not None:
if dof == 'all':
dof = 'ionic cellshape volume'
ionic = re.search("ion(ic|s)?", dof) is not None
cellshape = re.search(r"cell(\s+|-|_)?(?:shape)?", dof) is not None
volume = re.search("volume", dof) is not None
# static calculation.
if (not ionic) and (not cellshape) and (not volume):
if dof != 'static':
raise RuntimeError("Unkown value for relaxation: {0}.".format(dof))
isif = 2
ibrion = -1
else: # Some kind of relaxations.
# ionic calculation.
if ionic and (not cellshape) and (not volume):
isif = 2
elif ionic and cellshape and (not volume):
isif = 4
elif ionic and cellshape and volume:
isif = 3
elif (not ionic) and cellshape and volume:
isif = 6
elif (not ionic) and cellshape and (not volume):
isif = 5
elif (not ionic) and (not cellshape) and volume:
isif = 7
elif ionic and (not cellshape) and volume:
raise RuntimeError("VASP does not allow relaxation of atomic position "
"and volume at constant cell-shape.\n")
if nsw == 0:
raise ValueError("Cannot set nsw < 1 and perform strain relaxations.")
elif nsw is None:
nsw = 50
if isif is None and dof is not None:
raise ValueError("Unexpected argument to relaxation: {0}.".format(dof))
if nsw is not None:
self.nsw = nsw
if isif is not None:
self.isif = isif
if ibrion is not None:
self.ibrion = ibrion
if potim is not None:
self.potim = potim
if self.ibrion is None and self.nsw is not None and self.nsw > 1 and self.potim is None:
self.ibrion = 2
def incar_string(self, **kwargs):
if self.value is None:
return None
result = "ISIF = {0}\n".format(self.isif) if self.isif is not None else ''
if self.nsw != None and self.ibrion != -1 and self.nsw != 0:
result += "NSW = {0}\n".format(self.nsw)
if self.potim != None and self.ibrion != -1 and self.nsw != 0:
result += "POTIM = {0}\n".format(self.potim)
if self.ibrion != None:
result += "IBRION = {0}\n".format(self.ibrion)
vasp = kwargs['vasp']
structure = kwargs['structure']
if self.ibrion != -1 and vasp.ediffg is not None:
if vasp.ediffg < vasp.ediff and vasp.ediffg > 0 and vasp.ediff > 0:
raise RuntimeError(
"Using ediffg (positive) smaller than ediff does not make sense.")
if vasp.ediffg > 0 and vasp.ediff < 0 and abs(vasp.ediff) > vasp.ediffg * float(len(structure)):
raise RuntimeError(
"Using ediffg (positive) smaller than ediff does not make sense.")
if result[-1] == '\n':
result = result[:-1]
return result
def __repr__(self):
value = self.value
if value is None:
return "{0.__class__.__name__}(None)".format(self)
if isinstance(value, str):
return "{0.__class__.__name__}({1!r})".format(self, value)
return "{0.__class__.__name__}({1})".format(self, repr(self.value)[1:-1])
class Smearing(SpecialVaspParam):
""" Value of the smearing used in the calculation.
It can be specified as a string:
>>> vasp.smearing = "type", x
Where type is any of "fermi", "gaussian", "mp N", "tetra",
"metal", or "insulator", and x is the energy scale.
- fermi: use a Fermi-Dirac broadening.
- gaussian: uses a gaussian smearing.
- mp N: is for Methfessel-Paxton, where N is an integer indicating the
order the mp method.
- tetra: tetrahedron method without Bloechl correction.
- bloechl: means tetrahedron method with Bloechl correction.
- metal: equivalent to "mp 1 x"
- insulator: is equivalent to "bloechl".
- dynamic: corresponds to ISMEAR=-3.
.. seealso:: `ISMEAR, SIGMA <http://cms.mpi.univie.ac.at/vasp/guide/node124.html>`_
"""
def __init__(self, type=None, sigma=None):
super(Smearing, self).__init__((type, sigma))
@property
def value(self):
if self.ismear is None and self.sigma is None:
return None
ismear = {-1: 'fermi', 0: 'gaussian', 1: 'metal', -5: 'insulator', -3: 'dynamic',
-4: 'tetra', 2: 'mp 2', 3: 'mp 3', None: None}[self.ismear]
if self.sigma is None:
return ismear
sigma = self.sigma
if not hasattr(sigma, 'rescale'):
sigma *= eV
return ismear, sigma
@value.setter
def value(self, args):
if args is None:
self.ismear, self.sigma = None, None
return
if isinstance(args, str):
ismear, sigma = args, None
elif len(args) == 1:
ismear, sigma = args[0], None
elif len(args) == 2:
ismear, sigma = args
else:
raise ValueError("Incorrect input to smearing: {0}.".format(args))
if hasattr(ismear, 'lower'):
ismear = ismear.rstrip().lstrip().replace(' ', '').lower()
ismear = {'fermi': -1, 'gaussian': 0, 'metal': 1, 'bloechl': -5, 'dynamic': -3,
'tetra': -4, 'insulator': -5, 'mp1': 1, 'mp2': 2, 'mp3': 3, None: None}[ismear]
elif ismear is not None:
ismear = int(ismear)
if ismear < -5 or ismear > 3:
raise RuntimeError("Unknown value for ismear: {0}.\n".format(ismear))
self.ismear = ismear
if sigma is not None:
self.sigma = sigma
if not hasattr(self.sigma, 'rescale'):
self.sigma *= eV
else:
self.sigma = self.sigma.rescale(eV)
if len(self.sigma.shape) > 0 and self.ismear is not None and self.ismear != -3:
raise RuntimeError('Cannot use more than one smearing '
'parameter with ismear={0}.'.format(self.ismear))
elif len(args) == 2:
self.sigma = None
def incar_string(self, **kwargs):
result = ''
if self.ismear is not None:
result = 'ISMEAR = {0}\n'.format(self.ismear)
if self.sigma is not None:
sigma = self.sigma.rescale(eV).magnitude if hasattr(self.sigma, 'rescale')\
else self.sigma
if len(sigma.shape) == 0:
result += 'SIGMA = {0}'.format(sigma)
else:
if self.ismear is None:
result += 'ISMEAR = -3\n'
result += 'SIGMA ='
for u in sigma:
result += ' {0}'.format(u)
if len(result) == 0:
return None
return result[:-1] if result[-1] == '\n' else result
def __repr__(self):
value = self.value
if value is None:
return "{0.__class__.__name__}(None)".format(self)
if isinstance(value, str):
return "{0.__class__.__name__}({1!r})".format(self, value)
return "{0.__class__.__name__}({1})".format(self, repr(self.value)[1:-1])
class Lsorbit(Boolean):
""" Run calculation with spin-orbit coupling.
Accepts None, True, or False.
If True, then sets :py:attr:`~pylada.vasp.incar.Incar.nonscf` to True and
:py:attr:`~pylada.vasp.incar.Incar.ispin` to 2.
"""
def __init__(self, value=None):
super(Lsorbit, self).__init__('LSORBIT', value)
def incar_string(self, **kwargs):
if self.value == True:
kwargs['vasp'].nonscf = True
kwargs['vasp'].ispin = 2
return super(Lsorbit, self).incar_string(**kwargs)
|
pylada/pylada-light
|
src/pylada/vasp/incar/_params.py
|
Python
|
gpl-3.0
| 45,787
|
[
"CRYSTAL",
"DIRAC",
"Gaussian",
"VASP"
] |
0db37bf58836df832e551df7d3106108a846cfe5518dbe1d13a64dc6c199c79c
|
#!/usr/bin/python3
#
# Copyright (c) 2012 Mikkel Schubert <MikkelSch@gmail.com>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
import logging
import os
from collections import defaultdict
import paleomix
import paleomix.common.logging
import paleomix.node
from paleomix.common.fileutils import swap_ext
from paleomix.common.layout import Layout
from paleomix.common.yaml import YAMLError
from paleomix.nodes.adapterremoval import PE_AdapterRemovalNode, SE_AdapterRemovalNode
from paleomix.nodes.bowtie2 import Bowtie2IndexNode, Bowtie2Node
from paleomix.nodes.bwa import (
BWAAlgorithmNode,
BWABacktrack,
BWAIndexNode,
BWAMem2IndexNode,
BWASampe,
BWASamse,
)
from paleomix.nodes.commands import FilterCollapsedBAMNode
from paleomix.nodes.mapdamage import (
MapDamageModelNode,
MapDamagePlotNode,
MapDamageRescaleNode,
)
from paleomix.nodes.samtools import (
BAMIndexNode,
BAMMergeNode,
FastaIndexNode,
MarkDupNode,
)
from paleomix.nodes.validation import ValidateFASTAFilesNode, ValidateFASTQFilesNode
from paleomix.pipeline import Pypeline
from paleomix.pipelines.bam.makefile import MakefileError, read_makefiles
LAYOUT = {
"{sample}.cache": {
"{genome}.validated": "final_validated",
"{genome}": {
"{library}": {
"{lane}": {
"{shortname}": {
"{read_type}.bam": "initial_bam",
"{read_type}.stats": "initial_stats",
}
}
},
# Libraries processed using `samtools markdup` or `paleomix rmdup_collapsed`
"{library}.rmdup.{method}.bam": "deduplicated_bam",
"{library}.rmdup.{method}.statistics": "deduplicated_stats",
# Libraries where quality scores have been rescaled using mapDamage
"{library}.rescaled.bam": "rescaled_bam",
},
"reads": {
"{library}": {
"{lane}": {
"{shortname}": {
"reads": "reads_prefix",
"pretrimmed.json": "reads_statistics",
},
},
},
},
},
"{sample}.{genome}.bam": "final_bam",
"{sample}.{genome}.bam.bai": "final_bai",
"{sample}.{genome}.mapDamage": {
"{library}": "mapdamage_folder",
},
}
########################################################################################
def index_genomes(log, makefiles):
tasks = {}
any_errors = False
for makefile in makefiles:
genomes = makefile["Genomes"]
if not genomes:
log.error("No genomes specified in %r", makefile["Filename"])
any_errors = True
for genome in genomes.values():
path = genome["Path"]
# Multiple genomes may use the path from different
abspath = os.path.abspath(path)
if abspath not in tasks:
# Basic validation is preformed since downstream tools may produce
# inconsistent/unexpected results when run on malformed files, such
# as `samtools faidx` ignoring any sequence with a previously used name.
validation = ValidateFASTAFilesNode(
input_file=path,
output_file=path + ".validated",
)
# Indexing of FASTA file using 'samtools faidx'
fai_indexing = FastaIndexNode(path, dependencies=[validation])
# Indexing of FASTA file using 'bwa index'
bwa_indexing = BWAIndexNode(path, dependencies=[validation])
# Indexing of FASTA file using 'bwa-mem2 index'
bwa_mem2_indexing = BWAMem2IndexNode(path, dependencies=[validation])
# Indexing of FASTA file using 'bowtie2-build'
bowtie2_indexing = Bowtie2IndexNode(path, dependencies=[validation])
tasks[abspath] = {
"BWA": [fai_indexing, bwa_indexing],
"BWA-MEM2": [fai_indexing, bwa_mem2_indexing],
"Bowtie2": [fai_indexing, bowtie2_indexing],
}
genome["Tasks"] = tasks[abspath]
return not any_errors
########################################################################################
def process_fastq_reads(args, layout, record):
lane_type = record["Type"]
if lane_type == "Untrimmed":
trimmed_reads = _process_untrimmed_reads(layout, record, args)
else:
trimmed_reads = _process_pretrimmed_reads(layout, record)
records = []
for read_type, files, task in trimmed_reads:
# TODO: Warn if lane is completely excluded
if not record["Options"]["ExcludeReads"][read_type]:
records.append(
{
"Task": task,
"Path": files,
"Type": read_type,
"Shortname": record["Shortname"],
"Options": record["Options"],
}
)
return records
def _process_pretrimmed_reads(layout, record):
read_type = record["Type"]
input_files = [filename for filename in record["Path"] if filename is not None]
yield read_type, record["Path"], ValidateFASTQFilesNode(
input_files=input_files,
output_file=layout.get(
"reads_statistics",
shortname=record["Shortname"],
),
offset=record["Options"]["QualityOffset"],
collapsed=("Collapsed" in read_type),
)
def _process_untrimmed_reads(layout, record, args):
quality_offset = record["Options"]["QualityOffset"]
options = dict(record["Options"]["AdapterRemoval"])
if quality_offset != 33:
options["--qualitybase"] = quality_offset
# Quality scores of trimmed reads is normalized to Phred+33
options["--qualitybase-output"] = 33
output_prefix = layout.get(
"reads_prefix",
shortname=record["Shortname"],
)
file_1, file_2 = record["Path"]
if file_2 is None:
task = SE_AdapterRemovalNode(
input_file=file_1,
output_prefix=output_prefix,
threads=args.adapterremoval_max_threads,
options=options,
)
yield ("Single", (task.out_truncated, None), task)
else:
task = PE_AdapterRemovalNode(
input_file_1=file_1,
input_file_2=file_2,
output_prefix=output_prefix,
threads=args.adapterremoval_max_threads,
options=options,
)
yield ("Singleton", (task.out_singleton, None), task)
out_paired = (task.out_paired.format(Pair=1), task.out_paired.format(Pair=2))
yield ("Paired", out_paired, task)
if task.out_merged:
yield ("Collapsed", (task.out_merged, None), task)
if task.out_merged_truncated:
yield ("CollapsedTruncated", (task.out_merged_truncated, None), task)
########################################################################################
def map_fastq_reads(args, layout, genome, record):
layout = layout.update(
shortname=record["Shortname"],
read_type=record["Type"].lower(),
)
options = record["Options"]
aligner = options["Aligners"]["Program"]
output_file = layout["initial_bam"]
# Common mapping parameters
parameters = {
"input_file_1": record["Path"][0],
"input_file_2": record["Path"][1],
"output_file": output_file,
"reference": genome["Path"],
"mapping_options": record["Options"]["Aligners"][aligner],
"cleanup_options": _cleanup_options(record, layout),
"dependencies": [record["Task"]],
}
if aligner == "BWA":
algorithm = options["Aligners"][aligner]["Algorithm"].lower()
parameters["threads"] = args.bwa_max_threads
if algorithm == "backtrack":
mapping_task_func = _build_bwa_backtrack_task
elif algorithm in ("mem", "bwasw", "mem2"):
parameters["algorithm"] = algorithm
mapping_task_func = BWAAlgorithmNode
# BWA-MEM2 uses a new index format
if algorithm == "mem2":
aligner = "BWA-MEM2"
else:
raise NotImplementedError(f"BWA {algorithm} not implemented!")
elif aligner == "Bowtie2":
parameters["threads"] = args.bowtie2_max_threads
mapping_task_func = Bowtie2Node
else:
raise NotImplementedError(f"Aligner {aligner!r} not supported!")
# Dependencies for genome indexing for the selected aligner
parameters["dependencies"].extend(genome["Tasks"][aligner])
return {
"Type": record["Type"],
"Path": output_file,
"Options": record["Options"],
"Task": mapping_task_func(**parameters),
}
def _build_bwa_backtrack_se_task(
*,
input_file,
output_file,
reference,
threads,
dependencies,
mapping_options,
cleanup_options,
):
output_file_sai = swap_ext(output_file, ".sai")
sai_task = BWABacktrack(
input_file=input_file,
output_file=output_file_sai,
threads=threads,
reference=reference,
mapping_options=mapping_options,
dependencies=dependencies,
)
return BWASamse(
input_file_fq=input_file,
input_file_sai=output_file_sai,
output_file=output_file,
reference=reference,
threads=max(2, threads // 2),
cleanup_options=cleanup_options,
dependencies=sai_task,
)
def _build_bwa_backtrack_pe_task(
*,
input_file_1,
input_file_2,
output_file,
reference,
threads,
dependencies,
mapping_options,
cleanup_options,
):
backtrack_options = {
"threads": threads,
"reference": reference,
"mapping_options": mapping_options,
"dependencies": dependencies,
}
output_sai_1 = swap_ext(output_file, "%i.sai" % (1,))
output_sai_2 = swap_ext(output_file, "%i.sai" % (2,))
task_sai_1 = BWABacktrack(input_file_1, output_sai_1, **backtrack_options)
task_sai_2 = BWABacktrack(input_file_2, output_sai_2, **backtrack_options)
return BWASampe(
input_file_sai_1=output_sai_1,
input_file_sai_2=output_sai_2,
input_file_fq_1=input_file_1,
input_file_fq_2=input_file_2,
output_file=output_file,
reference=reference,
threads=max(2, threads // 2),
cleanup_options=cleanup_options,
dependencies=(task_sai_1, task_sai_2),
)
def _build_bwa_backtrack_task(input_file_1, input_file_2, mapping_options, **kwargs):
if not mapping_options["UseSeed"]:
mapping_options = dict(mapping_options)
mapping_options["-l"] = 2 ** 16 - 1
if input_file_2 is None:
return _build_bwa_backtrack_se_task(
input_file=input_file_1,
mapping_options=mapping_options,
**kwargs,
)
else:
return _build_bwa_backtrack_pe_task(
input_file_1=input_file_1,
input_file_2=input_file_2,
mapping_options=mapping_options,
**kwargs,
)
def _cleanup_options(record, layout):
aligner = record["Options"]["Aligners"]["Program"]
aligner_options = record["Options"]["Aligners"][aligner]
platform = record["Options"]["Platform"].upper()
sample = layout.get_field("sample")
library = layout.get_field("library")
barcode = layout.get_field("lane")
options = {
# Add mate-score tag required by `samtools markdup`
"--add-mate-score": None,
"--rg-id": library,
"--rg": [
f"SM:{sample}",
f"LB:{library}",
f"PU:{barcode}",
f"PL:{platform}",
],
"-q": aligner_options["MinQuality"],
}
if aligner_options["FilterUnmappedReads"]:
options["-F"] = "0x4"
return options
########################################################################################
def filter_pcr_duplicates(args, layout, records):
# The PCRDuplicates feature cannot be set below the library level,
# checking the first record is sufficient
pcr_filtering = records[0]["Options"]["Features"]["PCRDuplicates"]
if not pcr_filtering:
return records
options = None
tasks_by_read_type = defaultdict(dict)
for record in records:
filepath = record["Path"]
options = record["Options"]
if record["Type"] in ("Collapsed", "CollapsedTruncated"):
tasks_by_read_type["Merged"][filepath] = record["Task"]
elif record["Type"] in ("Single", "Paired", "Singleton"):
tasks_by_read_type["Unmerged"][filepath] = record["Task"]
else:
raise NotImplementedError(record)
return _filter_pcr_duplicates_by_type(
args=args,
layout=layout,
options=options,
tasks_by_read_type=tasks_by_read_type,
strategy=pcr_filtering,
)
def _filter_pcr_duplicates_by_type(args, layout, options, tasks_by_read_type, strategy):
keep_duplicates = isinstance(strategy, str) and strategy.lower() == "mark"
markdup_options = {"--threads": args.samtools_max_threads}
if not keep_duplicates:
markdup_options["-r"] = None
records = []
for key, filenames_and_tasks in tasks_by_read_type.items():
layout = layout.update(method=key.lower())
out_bam = layout["deduplicated_bam"]
out_statistics = layout["deduplicated_stats"]
if key == "Merged":
task = FilterCollapsedBAMNode(
input_bams=list(filenames_and_tasks),
output_bam=out_bam,
keep_dupes=keep_duplicates,
dependencies=filenames_and_tasks.values(),
)
elif key == "Unmerged":
task = MarkDupNode(
in_bams=list(filenames_and_tasks),
out_bam=out_bam,
out_stats=out_statistics,
options=markdup_options,
dependencies=filenames_and_tasks.values(),
)
else:
raise RuntimeError("unexpected read type {!r}".format(key))
records.append(
{
"Type": key,
"Path": out_bam,
"Task": task,
"Options": options,
}
)
return records
########################################################################################
def run_mapdamage(layout: Layout, genome, records):
options = records[0]["Options"]
run_type = options["Features"]["mapDamage"]
extra_task = None
if run_type in ("rescale", "model", "plot", True):
# Basic run of mapDamage, only generates plots / tables
extra_task = MapDamagePlotNode(
reference=genome["Path"],
input_files=[record["Path"] for record in records],
output_directory=layout["mapdamage_folder"],
title="mapDamage plot for library %r" % (layout.get_field("library"),),
options=options["mapDamage"],
dependencies=[record["Task"] for record in records],
)
if run_type in ("rescale", "model"):
# Builds model of post-mortem DNA damage
assert extra_task is not None
extra_task = MapDamageModelNode(
reference=genome["Path"],
directory=layout["mapdamage_folder"],
options=options,
dependencies=(extra_task,),
)
if run_type in ("rescale",):
# Rescales BAM quality scores using model built above
assert extra_task is not None
task = MapDamageRescaleNode(
reference=genome["Path"],
input_files=[record["Path"] for record in records],
output_file=layout["rescaled_bam"],
directory=layout["mapdamage_folder"],
options=options["mapDamage"],
dependencies=(extra_task,),
)
extra_task = None
records = [
{
"Type": "Rescaled",
"Path": layout["rescaled_bam"],
"Task": task,
"Options": options,
}
]
return records, extra_task
########################################################################################
def merge_libraries(args, layout, genome, records):
# FIXME: Do a file-copy if there is only one input file
task = BAMMergeNode(
in_files=[record["Path"] for record in records],
out_file=layout["final_bam"],
options={
"--threads": args.samtools_max_threads,
},
dependencies=[record["Task"] for record in records],
)
task = BAMIndexNode(
infile=layout["final_bam"],
index_format=genome["IndexFormat"],
options={
"-@": args.samtools_max_threads,
},
dependencies=[task],
)
return {
"Genome": genome,
"Path": layout["final_bam"],
"Task": task,
"Options": records[0]["Options"],
}
########################################################################################
def build_pipeline_trimming(args, makefile):
for sample, libraries in makefile["Samples"].items():
for library, lanes in libraries.items():
for barcode, records in lanes.items():
for record in records:
layout = args.layout.update(
sample=sample,
library=library,
lane=barcode,
)
for trimmed_reads in process_fastq_reads(args, layout, record):
yield trimmed_reads["Task"]
def build_pipeline_full(args, makefile):
layout = args.layout
for sample, library_records in makefile["Samples"].items():
# Trimmed reads are instantiated as needed, ensuring that sorting tasks by ID
# results in trimming (IO heavy) and mapping (CPU heavy) tasks being interleaved
trimmed_reads_cache = {}
for genome in makefile["Genomes"].values():
libraries = []
for library, lane_records in library_records.items():
lanes = []
for barcode, records in lane_records.items():
layout = args.layout.update(
sample=sample,
genome=genome["Name"],
library=library,
lane=barcode,
)
for record in records:
key = id(record)
trimmed_reads = trimmed_reads_cache.get(key)
if trimmed_reads is None:
# Trim untrimmed reads and validate already trimmed reads
trimmed_reads = process_fastq_reads(args, layout, record)
trimmed_reads_cache[key] = trimmed_reads
for record in trimmed_reads:
# Map trimmed reads to the target genome
lanes.append(map_fastq_reads(args, layout, genome, record))
# Exclusion of read types may result in the elimination of libraries
if lanes:
# Optionally filter PCR duplicates
library = filter_pcr_duplicates(args, layout, lanes)
# Optionally run mapDamage on library
library, extra_task = run_mapdamage(layout, genome, library)
if extra_task is not None:
yield extra_task
libraries.extend(library)
# Exclusion of read types may lead to the elimination of all libraries
if libraries:
record = merge_libraries(args, layout, genome, libraries)
yield record["Task"]
# TODO: Per sample statistics
def run(config, pipeline_variant):
paleomix.common.logging.initialize(
log_level=config.log_level,
log_file=config.log_file,
auto_log_file=os.path.join(config.temp_root, "bam_pipeline"),
)
logger = logging.getLogger(__name__)
if pipeline_variant not in ("bam", "trim"):
logger.critical("Unexpected BAM pipeline variant %r", pipeline_variant)
return 1
if not os.path.exists(config.temp_root):
try:
os.makedirs(config.temp_root)
except OSError as error:
logger.error("Could not create temp root: %s", error)
return 1
if not os.access(config.temp_root, os.R_OK | os.W_OK | os.X_OK):
logger.error("Insufficient permissions for temp root: %r", config.temp_root)
return 1
try:
makefiles = read_makefiles(config.makefiles, pipeline_variant)
except (MakefileError, YAMLError, IOError) as error:
logger.error("Error reading makefiles: %s", error)
return 1
pipeline_func = build_pipeline_trimming
if pipeline_variant != "trim":
# Genomes are processed first so that these tasks are started before reads are
# trimmed. This allows mapping to be started as early as possible.
if not index_genomes(logger, makefiles):
return 1
pipeline_func = build_pipeline_full
config.layout = Layout({"{root}": LAYOUT}, root=config.destination)
nodes = []
for makefile in makefiles:
logger.info("Building BAM pipeline for %r", makefile["Filename"])
try:
nodes.extend(pipeline_func(config, makefile))
except paleomix.node.NodeError as error:
logger.error(
"Error while building pipeline for %r:\n%s", makefile["Filename"], error
)
return 1
pipeline = Pypeline(
nodes=nodes,
temp_root=config.temp_root,
max_threads=config.max_threads,
)
return pipeline.run(config.pipeline_mode)
|
MikkelSchubert/paleomix
|
paleomix/pipelines/bam/pipeline.py
|
Python
|
mit
| 23,115
|
[
"BWA"
] |
077d7ca0f79975e57e34ee812c1cc58dff7f0af7a1b2666c7a84f37f63a3eb13
|
# * Copyright 2020 Google LLC
# *
# * Licensed under the Apache License, Version 2.0 (the "License");
# * you may not use this file except in compliance with the License.
# * You may obtain a copy of the License at
# *
# * http://www.apache.org/licenses/LICENSE-2.0
# *
# * Unless required by applicable law or agreed to in writing, software
# * distributed under the License is distributed on an "AS IS" BASIS,
# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# * See the License for the specific language governing permissions and
# * limitations under the License.
from google.cloud import vision
from google.cloud.vision import types
from pyvisionproductsearch import ProductSearch, ProductCategories
import io
import os
CLOTHING_OBJECT_LABELS = [
"Outerwear",
"Jacket",
"Jeans",
"Shorts",
"Shirt",
"Coat",
"Suit",
"Swimwear",
"Dress",
"Miniskirt",
"Pants",
"Footwear",
"Skirt",
"Belt",
"Underpants",
"Shoe",
"Sandal",
"Handbag",
"Suitcase",
"Satchel",
"Sunglasses",
"Top",
"Bracelet",
"Scarf",
"Earrings",
"Boot",
"Hat",
"High heels",
"Cowboy hat",
"Backpack",
"Necklace",
"Tiara",
"Bowtie",
"Straw hat",
"Fedora",
"Glasses",
"Briefcase",
"Tie",
"Sun hat",
"Glove",
"Sombrero",
"Helmet",
"Crown",
"Sock",
"Goggles"
]
def detectLabels(file_path=None, image_uri=None):
if bool(file_path) == bool(image_uri):
raise Exception(
"Must provide one of either a file path or an image uri")
client = vision.ImageAnnotatorClient()
if file_path:
with io.open(file_path, 'rb') as image_file:
content = image_file.read()
image = vision.types.Image(content=content)
else:
image_source = vision.types.ImageSource(image_uri=image_uri)
image = vision.types.Image(source=image_source)
# Performs label detection on the image file
response = client.label_detection(image=image)
return response.label_annotations
def detectObjects(file_path=None, image_uri=None):
if bool(file_path) == bool(image_uri):
raise Exception(
"Must provide one of either a file path or an image uri")
client = vision.ImageAnnotatorClient()
if file_path:
with io.open(file_path, 'rb') as image_file:
content = image_file.read()
image = vision.types.Image(content=content)
else:
image_source = vision.types.ImageSource(image_uri=image_uri)
image = vision.types.Image(source=image_source)
# Performs label detection on the image file
return client.object_localization(
image=image).localized_object_annotations
|
google/making_with_ml
|
instafashion/scripts/utils.py
|
Python
|
apache-2.0
| 2,814
|
[
"Bowtie"
] |
05fe67058a5494393c15b6f8b9a8925a59428a830990da68acfa5fe488461bbc
|
import numpy as np
import math
unit = 2.2
height = 1.6
from chemfiles import Trajectory, UnitCell, Atom, Topology, Frame, Selection
def coordinate_si(ring, column, state):
x = unit * column
y = - unit * ring
if state == 0 or state == 7:
return 0, 0, 0, 0, 0, 0, 0
if state == 1:
return x, y, x, y, x, y + 1, height
if state == 4:
return x, y, x, y, x, y - 1, height
if state == 2:
return x, y, x, y, x + 0.866, y + 0.5, height
if state == 3:
return x, y, x, y, x + 0.866, y - 0.5, height
if state == 5:
return x, y, x, y, x - 0.866, y - 0.5, height
if state == 6:
return x, y, x, y, x - 0.866, y + 0.5, height
return 0, 0, 0, 0, 0, 0, height
def coordinate_up(ring, column, state):
x = unit * column
y = - unit * ring
if state == 7:
return 0, 0, 0, 0, 0, 0, 0
if state == 8:
return x, y, x, y + 1, x, y, height + 1
if state == 9:
return x, y, x + 0.866, y - 0.5, x, y, height + 1
if state == 10:
return x, y, x - 0.866, y - 0.5, x, y, height + 1
if state == 11:
return x, y, x, y + 1, x + 0.866, y - 0.5, height
if state == 12:
return x, y, x - 0.866, y - 0.5, x + 0.866, y - 0.5, height
if state == 13:
return x, y, x - 0.866, y - 0.5, x, y + 1, height
def coordinate_down(ring, column, state):
x = unit * column
y = - unit * ring
if state == 7:
return 0, 0, 0, 0, 0, 0, 0
if state == 8:
return x, y, x, y - 1, x, y, height + 1
if state == 9:
return x, y, x + 0.866, y + 0.5, x, y, height + 1
if state == 10:
return x, y, x - 0.866, y + 0.5, x, y, height + 1
if state == 11:
return x, y, x, y - 1, x + 0.866, y + 0.5, height
if state == 12:
return x, y, x - 0.866, y + 0.5, x + 0.866, y + 0.5, height
if state == 13:
return x, y, x - 0.866, y + 0.5, x, y - 1, height
with Trajectory('conf.xyz','w') as output:
with open('conf.dat', 'r') as conf:
ring = 0
skip = 0
for line in conf:
if line.startswith("Step:") and skip % 100 == 0:
frame = Frame()
ring = 0
elif line.startswith("####"):
if skip % 100 == 0:
output.write(frame)
skip += 1
elif skip % 100 == 0:
row = map(int,line.split())
for column, state in enumerate(row):
if state < 7:
x1, y1, x2, y2, x3, y3, z3 = coordinate_si(ring, column, state)
if state > 6 and ring % 4 == 1 and column % 2 == 0: #up (vert)
x1, y1, x2, y2, x3, y3, z3 = coordinate_up(ring, column, state)
if state > 6 and ring % 4 == 1 and column % 2 == 1: #down (rouge)
x1, y1, x2, y2, x3, y3, z3 = coordinate_down(ring, column, state)
if state > 6 and ring % 4 == 3 and column % 2 == 1: #up (vert)
x1, y1, x2, y2, x3, y3, z3 = coordinate_up(ring, column, state)
if state > 6 and ring % 4 == 3 and column % 2 == 0: #down (rouge)
x1, y1, x2, y2, x3, y3, z3 = coordinate_down(ring, column, state)
if state < 7 and state > 0:
atom1 = Atom("Si", "Si")
atom2 = Atom("O", "O")
atom3 = Atom("H", "H")
frame.add_atom(atom1, [x1, y1, 0])
frame.add_atom(atom2, [x2, y2, height])
frame.add_atom(atom3, [x3, y3, height])
if state > 7:
atom1 = Atom("Ow", "O")
atom2 = Atom("Hw", "H")
atom3 = Atom("Hw", "H")
frame.add_atom(atom1, [x1, y1, height])
frame.add_atom(atom2, [x2, y2, height])
frame.add_atom(atom3, [x3, y3, z3])
ring += 1
|
lscalfi/imogolite
|
5-IsingModel/film.py
|
Python
|
mit
| 4,077
|
[
"Chemfiles"
] |
76bf9d32074de93e86a5bf40d24cf1276cf0ee87915052482d7a98ee1e185c52
|
'''
This script reads snow depth geotiffs and preprocesses them
and stores results to a dict.
Dict structure is src_name (str)
snow with NaN mask (numpy array)
rasterio metadata
Gaussian blurred snow (numpy array)
Normalized (0 to 1) snow (numpy array)
binary drift mask (numpy masked array)
binary scour mask (numpy masked array)
normal drift snow (numpy array)
real drift snow (numpy array)
normal scour snow (numpy array)
real scour snow (numpy array)
'''
import os
import rasterio
from sklearn.preprocessing import *
import cv2
import numpy as np
import matplotlib.pyplot as plt
%matplotlib qt
# Initialize empty dictionary
src_dict = {}
# Can use glob to pull all snow rasters in a certain folder
test = ['/home/cparr/water_tracks/sub_wt_2012.tif', '/home/cparr/water_tracks/sub_wt_2013.tif']
def preprocess_snow( tif_list ):
for f in tif_list:
src_name = os.path.basename( f ).replace( '.tif', '_src' )
src = rasterio.open( f )
meta = src.meta
real_snow = np.ma.masked_values( src.read(1), src.nodata )
blur_snow = cv2.GaussianBlur( real_snow, (3,3), 0 )
norm_snow = maxabs_scale( blur_snow, axis=1, copy=True )
drifted_mask = np.ma.MaskedArray( norm_snow > 0.5 )
scoured_mask = np.ma.MaskedArray( norm_snow <= 0.5 )
drifted_norm_snow = drifted_mask*norm_snow
drifted_real_snow = drifted_mask*real_snow
scoured_norm_snow = scoured_mask*norm_snow
scoured_real_snow = scoured_mask*real_snow
src_dict[src_name] = real_snow, meta, blur_snow, norm_snow,drifted_mask, scoured_mask,drifted_norm_snow, drifted_real_snow,scoured_norm_snow, scoured_real_snow
#return src_dict
preprocess_snow( test )
i = 1
for k,v in src_dict.iteritems():
plt.subplot(1,2,i)
plt.imshow(v[3])
i += 1
|
charparr/tundra-snow
|
snow_preprocess.py
|
Python
|
mit
| 2,083
|
[
"Gaussian"
] |
0a46f58da57740df202d8860acea620cef00fc20eb0a225fa140249792f1c528
|
""" DIRAC Workload Management System Client class encapsulates all the
methods necessary to communicate with the Workload Management System
"""
import os
import StringIO
import time
from DIRAC import S_OK, S_ERROR, gLogger
from DIRAC.Core.DISET.RPCClient import RPCClient
from DIRAC.Core.Utilities.ClassAd.ClassAdLight import ClassAd
from DIRAC.Core.Utilities import File
from DIRAC.WorkloadManagementSystem.Client.SandboxStoreClient import SandboxStoreClient
from DIRAC.WorkloadManagementSystem.Utilities.ParametricJob import getParameterVectorLength
from DIRAC.ConfigurationSystem.Client.Helpers.Operations import Operations
from DIRAC.Core.Utilities.DErrno import EWMSJDL, EWMSSUBM
__RCSID__ = "$Id$"
class WMSClient(object):
""" Class exposing the following jobs methods:
submit
kill
delete
reschedule
reset
"""
def __init__(self, jobManagerClient=None, sbRPCClient=None, sbTransferClient=None,
useCertificates=False, timeout=600, delegatedDN=None, delegatedGroup=None):
""" WMS Client constructor
Here we also initialize the needed clients and connections
"""
self.useCertificates = useCertificates
self.delegatedDN = delegatedDN
self.delegatedGroup = delegatedGroup
self.timeout = timeout
self._jobManager = jobManagerClient
self.operationsHelper = Operations()
self.sandboxClient = None
if sbRPCClient and sbTransferClient:
self.sandboxClient = SandboxStoreClient(rpcClient=sbRPCClient,
transferClient=sbTransferClient,
useCertificates=useCertificates)
@property
def jobManager(self):
if not self._jobManager:
self._jobManager = RPCClient('WorkloadManagement/JobManager',
useCertificates=self.useCertificates,
delegatedDN=self.delegatedDN,
delegatedGroup=self.delegatedGroup,
timeout=self.timeout)
return self._jobManager
###############################################################################
def __getInputSandboxEntries(self, classAdJob):
if classAdJob.lookupAttribute("InputSandbox"):
inputSandbox = classAdJob.get_expression("InputSandbox")
inputSandbox = inputSandbox.replace('","', "\n")
inputSandbox = inputSandbox.replace('{', "")
inputSandbox = inputSandbox.replace('}', "")
inputSandbox = inputSandbox.replace('"', "")
inputSandbox = inputSandbox.replace(',', "")
inputSandbox = inputSandbox.split()
else:
inputSandbox = []
return inputSandbox
def __uploadInputSandbox(self, classAdJob, jobDescriptionObject=None):
"""Checks the validity of the job Input Sandbox.
The function returns the list of Input Sandbox files.
The total volume of the input sandbox is evaluated
"""
inputSandbox = self.__getInputSandboxEntries(classAdJob)
realFiles = []
badFiles = []
diskFiles = []
for isFile in inputSandbox:
if not isFile.startswith(('lfn:', 'LFN:', 'SB:', '%s', '%(')):
realFiles.append(isFile)
stringIOFiles = []
stringIOFilesSize = 0
if jobDescriptionObject is not None:
if isinstance(jobDescriptionObject, StringIO.StringIO):
stringIOFiles = [jobDescriptionObject]
stringIOFilesSize = len(jobDescriptionObject.buf)
gLogger.debug("Size of the stringIOFiles: " + str(stringIOFilesSize))
else:
return S_ERROR(EWMSJDL, "jobDescriptionObject is not a StringIO object")
# Check real files
for isFile in realFiles:
if not os.path.exists(isFile): # we are passing in real files, we expect them to be on disk
badFiles.append(isFile)
gLogger.warn("inputSandbox file/directory " + isFile + " not found. Keep looking for the others")
continue
diskFiles.append(isFile)
diskFilesSize = File.getGlobbedTotalSize(diskFiles)
gLogger.debug("Size of the diskFiles: " + str(diskFilesSize))
totalSize = diskFilesSize + stringIOFilesSize
gLogger.verbose("Total size of the inputSandbox: " + str(totalSize))
okFiles = stringIOFiles + diskFiles
if badFiles:
result = S_ERROR(EWMSJDL, 'Input Sandbox is not valid')
result['BadFile'] = badFiles
result['TotalSize'] = totalSize
return result
if okFiles:
if not self.sandboxClient:
self.sandboxClient = SandboxStoreClient(useCertificates=self.useCertificates,
delegatedDN=self.delegatedDN,
delegatedGroup=self.delegatedGroup)
result = self.sandboxClient.uploadFilesAsSandbox(okFiles)
if not result['OK']:
return result
inputSandbox.append(result['Value'])
classAdJob.insertAttributeVectorString("InputSandbox", inputSandbox)
return S_OK()
def submitJob(self, jdl, jobDescriptionObject=None):
""" Submit one job specified by its JDL to WMS.
The JDL may actually be the desciption of a parametric job,
resulting in multiple DIRAC jobs submitted to the DIRAC WMS
"""
if os.path.exists(jdl):
with open(jdl, "r") as fic:
jdlString = fic.read()
else:
# If file JDL does not exist, assume that the JDL is passed as a string
jdlString = jdl
jdlString = jdlString.strip()
# Strip of comments in the jdl string
newJdlList = []
for line in jdlString.split('\n'):
if not line.strip().startswith('#'):
newJdlList.append(line)
jdlString = '\n'.join(newJdlList)
# Check the validity of the input JDL
if jdlString.find("[") != 0:
jdlString = "[%s]" % jdlString
classAdJob = ClassAd(jdlString)
if not classAdJob.isOK():
return S_ERROR(EWMSJDL, 'Invalid job JDL')
# Check the size and the contents of the input sandbox
result = self.__uploadInputSandbox(classAdJob, jobDescriptionObject)
if not result['OK']:
return result
# Submit the job now and get the new job ID
result = getParameterVectorLength(classAdJob)
if not result['OK']:
return result
nJobs = result['Value']
parametricJob = nJobs > 0
result = self.jobManager.submitJob(classAdJob.asJDL())
if parametricJob:
gLogger.debug('Applying transactional job submission')
# The server applies transactional bulk submission, we should confirm the jobs
if result['OK']:
jobIDList = result['Value']
if len(jobIDList) == nJobs:
# Confirm the submitted jobs
confirmed = False
for _attempt in xrange(3):
result = self.jobManager.confirmBulkSubmission(jobIDList)
if result['OK']:
confirmed = True
break
time.sleep(1)
if not confirmed:
# The bulk submission failed, try to delete the created jobs
resultDelete = self.jobManager.deleteJob(jobIDList)
error = "Job submission failed to confirm bulk transaction"
if not resultDelete['OK']:
error += "; removal of created jobs failed"
return S_ERROR(EWMSSUBM, error)
else:
return S_ERROR(EWMSSUBM, "The number of submitted jobs does not match job description")
if result.get('requireProxyUpload'):
gLogger.warn("Need to upload the proxy")
return result
def killJob(self, jobID):
""" Kill running job.
jobID can be an integer representing a single DIRAC job ID or a list of IDs
"""
return self.jobManager.killJob(jobID)
def deleteJob(self, jobID):
""" Delete job(s) from the WMS Job database.
jobID can be an integer representing a single DIRAC job ID or a list of IDs
"""
return self.jobManager.deleteJob(jobID)
def rescheduleJob(self, jobID):
""" Reschedule job(s) in WMS Job database.
jobID can be an integer representing a single DIRAC job ID or a list of IDs
"""
return self.jobManager.rescheduleJob(jobID)
def resetJob(self, jobID):
""" Reset job(s) in WMS Job database.
jobID can be an integer representing a single DIRAC job ID or a list of IDs
"""
return self.jobManager.resetJob(jobID)
|
fstagni/DIRAC
|
WorkloadManagementSystem/Client/WMSClient.py
|
Python
|
gpl-3.0
| 8,338
|
[
"DIRAC"
] |
0c69ecbe71545212254978e76459e241edf6222fcc6a75021701332054223cac
|
# coding: utf-8
# Copyright 2017 CERN. This software is distributed under the
# terms of the GNU General Public Licence version 3 (GPL Version 3),
# copied verbatim in the file LICENCE.md.
# In applying this licence, CERN does not waive the privileges and immunities
# granted to it by virtue of its status as an Intergovernmental Organization or
# submit itself to any jurisdiction.
# Project website: http://blond.web.cern.ch/
'''
**Unit-tests for the self.SparseSlices class.**
:Authors: **Markus Schwarz**
'''
# General imports
# -----------------
import unittest
import numpy as np
# BLonD imports
# --------------
from blond.beam.beam import Beam, Proton
from blond.input_parameters.ring import Ring
from blond.beam.sparse_slices import SparseSlices
from blond.beam.profile import Profile, CutOptions
from blond.input_parameters.rf_parameters import RFStation
from blond.beam.distributions import bigaussian
class testProfileClass(unittest.TestCase):
# Run before every test
def setUp(self):
"""
Slicing of the same Gaussian profile using four distinct settings to
test different features.
"""
np.random.seed(1984)
intensity_pb = 1.0e11
sigma = 0.2e-9 # Gauss sigma, [s]
n_macroparticles_pb = int(1e4)
n_bunches = 2
# --- Ring and RF ----------------------------------------------
intensity = n_bunches * intensity_pb # total intensity SPS
n_turns = 1
# Ring parameters SPS
circumference = 6911.5038 # Machine circumference [m]
sync_momentum = 25.92e9 # SPS momentum at injection [eV/c]
gamma_transition = 17.95142852 # Q20 Transition gamma
momentum_compaction = 1./gamma_transition**2 # Momentum compaction array
ring = Ring(circumference, momentum_compaction, sync_momentum, Proton(),
n_turns=n_turns)
# RF parameters SPS
harmonic_number = 4620 # harmonic number
voltage = 3.5e6 # [V]
phi_offsets = 0
self.rf_station = RFStation(ring, harmonic_number, voltage, phi_offsets, n_rf=1)
t_rf = self.rf_station.t_rf[0,0]
bunch_spacing = 5 # RF buckets
n_macroparticles = n_bunches * n_macroparticles_pb
self.beam = Beam(ring, n_macroparticles, intensity)
for bunch in range(n_bunches):
bunchBeam = Beam(ring, n_macroparticles_pb, intensity_pb)
bigaussian(ring, self.rf_station, bunchBeam, sigma, reinsertion=True, seed=1984+bunch)
self.beam.dt[bunch*n_macroparticles_pb : (bunch+1)*n_macroparticles_pb] \
= bunchBeam.dt + bunch*bunch_spacing * t_rf
self.beam.dE[bunch*n_macroparticles_pb : (bunch+1)*n_macroparticles_pb] = bunchBeam.dE
self.filling_pattern = np.zeros(bunch_spacing * (n_bunches-1) + 1)
self.filling_pattern[::bunch_spacing] = 1
# uniform profile
profile_margin = 0 * t_rf
t_batch_begin = 0 * t_rf
t_batch_end = (bunch_spacing * (n_bunches-1) + 1) * t_rf
self.n_slices_rf = 32 # number of slices per RF-bucket
cut_left = t_batch_begin - profile_margin
cut_right = t_batch_end + profile_margin
# number of rf-buckets of the self.beam
# + rf-buckets before the self.beam + rf-buckets after the self.beam
n_slices = self.n_slices_rf * (bunch_spacing * (n_bunches-1) + 1
+ int(np.round((t_batch_begin - cut_left)/t_rf))
+ int(np.round((cut_right - t_batch_end)/t_rf)))
self.uniform_profile = Profile(self.beam,
CutOptions=CutOptions(cut_left=cut_left, n_slices=n_slices,
cut_right=cut_right))
self.uniform_profile.track()
def test_WrongTrackingFunction(self):
with self.assertRaises(RuntimeError):
SparseSlices(self.rf_station, self.beam, self.n_slices_rf, self.filling_pattern,
tracker='something horribly wrong')
nonuniform_profile = SparseSlices(self.rf_station, self.beam, self.n_slices_rf,
self.filling_pattern)
self.assertEqual(nonuniform_profile.bin_centers_array.shape, (2, self.n_slices_rf),
msg='Wrong shape of bin_centers_array!')
def test_onebyone(self):
rtol = 1e-6 # relative tolerance
atol = 0 # absolute tolerance
nonuniform_profile = SparseSlices(self.rf_station, self.beam, self.n_slices_rf,
self.filling_pattern, tracker='onebyone',
direct_slicing=True)
for bunch in range(2):
indexes = (self.uniform_profile.bin_centers>nonuniform_profile.cut_left_array[bunch])\
* (self.uniform_profile.bin_centers<nonuniform_profile.cut_right_array[bunch])
np.testing.assert_allclose(self.uniform_profile.bin_centers[indexes],
nonuniform_profile.bin_centers_array[bunch],
rtol=rtol, atol=atol,
err_msg=f'Bins for bunch {bunch} do not agree '
+ 'for tracker="onebyone"')
np.testing.assert_allclose(self.uniform_profile.n_macroparticles[indexes],
nonuniform_profile.n_macroparticles_array[bunch],
rtol=rtol, atol=atol,
err_msg=f'Profiles for bunch {bunch} do not agree '
+ 'for tracker="onebyone"')
def test_Ctracker(self):
rtol = 1e-6 # relative tolerance
atol = 0 # absolute tolerance
nonuniform_profile = SparseSlices(self.rf_station, self.beam, self.n_slices_rf,
self.filling_pattern, tracker='C',
direct_slicing=True)
for bunch in range(2):
indexes = (self.uniform_profile.bin_centers>nonuniform_profile.cut_left_array[bunch])\
* (self.uniform_profile.bin_centers<nonuniform_profile.cut_right_array[bunch])
np.testing.assert_allclose(self.uniform_profile.bin_centers[indexes],
nonuniform_profile.bin_centers_array[bunch],
rtol=rtol, atol=atol,
err_msg=f'Bins for bunch {bunch} do not agree '
+ 'for tracker="C"')
np.testing.assert_allclose(self.uniform_profile.n_macroparticles[indexes],
nonuniform_profile.n_macroparticles_array[bunch],
rtol=rtol, atol=atol,
err_msg=f'Profiles for bunch {bunch} do not agree '
+ 'for tracker="C"')
if __name__ == '__main__':
unittest.main()
|
blond-admin/BLonD
|
unittests/beam_profile/test_sparse_profile.py
|
Python
|
gpl-3.0
| 7,221
|
[
"Gaussian"
] |
771e441418f5d5623945ac6ff17b55370b148a4c7accb9f88b7240ad627dd592
|
"""
Creates a table with efficiency metrics (running time, peak memory
footprint) from SLURM output generated from sbatch_align.sh.
"""
from __future__ import print_function
import glob
import sys
from collections import defaultdict
nslurm, nsam = 0, 0
sam_names = defaultdict(int)
tab_wrapped = defaultdict(lambda: defaultdict(int))
to_slurm_wrapped = {}
for fn in glob.glob('slurm-*.out'):
with open(fn) as fh:
ln = fh.readline()
if 'is up to date' in ln:
continue # skip trivial job target was already up to date
if ln.split()[0] != 'python':
continue
nsam += 1
name, t_tandal, t_tandpa, wrappeak, childpeak, t_overall, t_inp = None, 0, 0, 0, 0, 0, 0
while True:
ln = fh.readline()
if len(ln) == 0:
break
ln = ln.rstrip()
if '--vanilla-out' in ln:
assert name is None
name = ln.split()[1]
if 'INFO:Overall' in ln:
t_overall = float(ln.split()[-1])
if 'INFO:Aligning input reads' in ln:
t_inp = float(ln.split()[-1])
if 'INFO:Aligning tandem reads' in ln and 'paired' not in ln:
t_tandal = float(ln.split()[-1])
if 'INFO:Parsing tandem alignments' in ln:
t_tandpa = float(ln.split()[-1])
if 'INFO:Peak memory usage (RSS) of Python wrapper' in ln:
wrappeak = ln.split()[-1]
assert wrappeak[-2:] == 'GB'
wrappeak = float(wrappeak[:-2]) * 1024 * 1024 * 1024
if 'INFO:Peak memory usage (RSS) of children' in ln:
childpeak = ln.split()[-1]
assert childpeak[-2:] == 'GB'
childpeak = float(childpeak[:-2]) * 1024 * 1024 * 1024
sam_names[name] += 1
tab_wrapped[name]['wrappeak'] = wrappeak
tab_wrapped[name]['childpeak'] = childpeak
tab_wrapped[name]['t_overall'] = t_overall
tab_wrapped[name]['t_inp'] = t_inp
tab_wrapped[name]['t_tandal'] = t_tandal
tab_wrapped[name]['t_tandpa'] = t_tandpa
to_slurm_wrapped[name] = fn
nslurm += 1
print('# slurm files: %d' % nslurm, file=sys.stderr)
print('# sam files: %d' % nsam, file=sys.stderr)
for k, v in sorted(sam_names.items()):
print(' %s: %d' % (k, v), file=sys.stderr)
aln_map = {'bt2': 'Bowtie 2', 'bwa': 'BWA-MEM', 'snap': 'SNAP'}
print('data,aligner,paired,align_time,overall_time,pct_increase_a_to_o,peak_wrapper,peak_children,pct_increase_peak')
for k in sorted(sam_names.keys()):
wrappeak = tab_wrapped[k]['wrappeak']
childpeak = tab_wrapped[k]['childpeak']
wrappct = 0 if childpeak == 0 else (wrappeak * 100.0 / childpeak)
wrappct = ('+' if wrappct >= 0 else '') + ('%0.3f' % wrappct)
t_o, t_a = tab_wrapped[k]['t_overall'], tab_wrapped[k]['t_inp']
t_pct = 0 if t_a == 0 else ((t_o - t_a) * 100.0/t_a)
t_pct = ('+' if t_pct >= 0 else '') + ('%0.3f' % t_pct)
srr, aligner, paired, _ = k.split('.')
srr = srr[:-2] # chop off trailing _1
aligner = aln_map[aligner]
paired = 'T' if paired == 'pair' else 'F'
print('%s,%s,%s,%.0f,%.0f,%s,%.0f,%.0f,%s' % (srr, aligner, paired, t_a, t_o, t_pct, wrappeak, childpeak, wrappct))
|
BenLangmead/qtip-experiments
|
experiments/real_data/perf_tabulate.py
|
Python
|
mit
| 3,305
|
[
"BWA",
"Bowtie"
] |
f8a7abbf4a6a6e38d6225ab92d8e92d3232745b591bb77c3c0ac4f2212bfee0b
|
import gzip
import os
import shutil
import socket
import urllib2
from contextlib import closing
import requests
from ftplib import FTP
import settings
from models.conan import CONAN_PIPELINES
from models.soft_mapper import SoftHeaderMapper, SoftTableMapper
from utils.common import execute_command
from utils.conan.conan import submit_conan_task
from utils.email.sender import send_email
__author__ = 'Ahmed G. Ali'
def get_platform_url(geo_acc):
platform_number = geo_acc.replace('GPL', '')
ftp_dir = ''
if int(platform_number) < 1000:
ftp_dir = 'GPLnnn'
else:
char_dif = len(platform_number) - len('nnn')
ftp_dir = 'GPL' + platform_number[:char_dif] + 'nnn'
url = settings.GEO_PLATFORM_URL.format(dir_name=ftp_dir, accession=geo_acc, arch=geo_acc+'_family.soft.gz')
return url
def download_soft_file(geo_acc, by='platform'):
# adf_tmp_dir =os.path.join(settings.TEMP_FOLDER, geo_accession.replace('GPL', 'A-GEOD-'))
adf_tmp_dir = os.path.join(settings.ADF_LOAD_DIR, geo_acc.replace('GPL', 'A-GEOD-'))
if not os.path.exists(adf_tmp_dir):
os.mkdir(adf_tmp_dir)
file_name = geo_acc + '_family.soft.gz'
host = settings.GEO_SOFT_URL % by + geo_acc
url = get_platform_url(geo_acc)
print url
# link = FTP(host=settings.GEO_SOFT_URL % by + geo_acc + , timeout=5)
# r = requests.get(url, stream=True)
# with open(os.path.join(adf_tmp_dir, file_name), 'wb') as f:
# for chunk in r.iter_content(chunk_size=1024):
# if chunk: # filter out keep-alive new chunks
# f.write(r.content)
# with closing(urllib2.urlopen(settings.GEO_SOFT_URL % by + geo_acc + '/' + file_name)) as r:
# with open(os.path.join(adf_tmp_dir, file_name), 'wb') as f:
# shutil.copyfileobj(r, f)
# local_filename = os.path.join(adf_tmp_dir, file_name)
# print host
# with closing(FTP()) as ftp:
# try:
# ftp.connect('ftp.ncbi.nih.gov',port=21, timeout= 30 * 60) # 30 mins timeout
# # print ftp.getwelcome()
# ftp.login('anonymous', '')
# ftp.set_pasv(True)
# ftp.sock.setsockopt(socket.SOL_SOCKET, socket.SO_KEEPALIVE, 1)
# ftp.sock.setsockopt(socket.IPPROTO_TCP, socket.TCP_KEEPINTVL, 75)
# ftp.sock.setsockopt(socket.IPPROTO_TCP, socket.TCP_KEEPIDLE, 60)
# with open(local_filename, 'w+b') as f:
# res = ftp.retrbinary('RETR %s' % url.split('ftp.ncbi.nih.gov/')[1], f.write)
#
# if not res.startswith('226 Transfer complete'):
# # logging.error('Downloaded of file {0} is not compile.'.format(orig_filename))
# os.remove(local_filename)
# return None
#
# # os.rename(local_filename, self.storage + filename + file_ext)
# # ftp.rename(orig_filename, orig_filename + '.copied')
#
# # return filename + file_ext
#
# except:
# raise
# # logging.exception('Error during download from FTP')
command = """wget -m %s -O %s """ % (url, os.path.join(adf_tmp_dir, file_name))
print command
print execute_command(command)
return os.path.join(adf_tmp_dir, file_name)
def parse_soft_file(soft_file):
table = []
start_header = False
start_table = False
header = []
with gzip.open(soft_file, 'rb') as infile:
for line in infile:
if '^PLATFORM =' in line:
start_header = True
if line.startswith('!platform_table_begin'):
start_header = False
start_table = True
continue
if '!platform_table_end' in line:
break
if start_header:
header.append(line)
# print line
if start_table:
table.append(line)
return header, table
def generate_adf(geo_accession, header, table):
header_obj = SoftHeaderMapper(header)
header_obj.generate_header()
# print 'header generated'
table_obj = SoftTableMapper(table)
# 'HEADER'
# print table_obj.ae_header
# for r in table_obj.rows:
# print r.print_soft()
adf_file = os.path.join(settings.ADF_LOAD_DIR, geo_accession.replace('GPL', 'A-GEOD-'),
geo_accession + '.adf.txt')
f = open(adf_file, 'w')
f.write(header_obj.header_txt + '\n[main]\n' + table_obj.table)
f.close()
comments_file = os.path.join(settings.ADF_LOAD_DIR, geo_accession.replace('GPL', 'A-GEOD-'),
geo_accession.replace('GPL', 'A-GEOD-') + '_comments.txt')
f = open(comments_file, 'w')
f.write(table_obj.comments)
f.close()
# print table_obj.ae_header
def import_geo_platform(geo_acc):
try:
soft_file = download_soft_file(geo_acc)
header, table = parse_soft_file(soft_file)
generate_adf(geo_acc, header, table)
adf_file = os.path.join(settings.ADF_LOAD_DIR, geo_acc.replace('GPL', 'A-GEOD-'),
geo_acc + '.adf.txt')
print execute_command('magetab_insert_array.pl -f %s -a %s -c' % (adf_file, geo_acc.replace('GPL', 'A-GEOD-')))
# shutil.copyfile(os.path.join(settings.ADF_LOAD_DIR, geo_acc.replace('GPL', 'A-GEOD-')), )
out, err = execute_command('reset_array.pl -a A-GEOD-%s -c' % geo_acc.replace('GPL', ''))
if 'error' in out.lower() or 'error' in err.lower():
msg = """Dear Curators,
While trying to execute rest_array.pl for %s the we had the following output:
%s
%s""" % (geo_acc, out, err)
send_email(from_email='AE Automation<ae-automation@ebi.ac.uk>',
to_emails=['miamexpress@ebi.ac.uk', 'ahmed@ebi.ac.uk'],
subject='GEO Array Error ' + geo_acc.replace('GPL', 'A-GEOD-'),
body=msg)
return
submit_conan_task(accession=geo_acc.replace('GPL', 'A-GEOD-'), pipeline_name=CONAN_PIPELINES.load_adf)
except Exception, e:
msg = """The following error occurred while importing: %s
%s""" % (geo_acc, str(e))
send_email(from_email='AE Automation<ae-automation@ebi.ac.uk>',
to_emails=['ahmed@ebi.ac.uk'],
subject='Platform imported',
body=msg)
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser(description='Downloads and converts a GEO Platform into MAGE-TAB')
parser.add_argument('accession', metavar='GPLxxxx', type=str,
help='''The accession number for the GEO Platform''')
args = parser.parse_args()
geo_accession = args.accession
import_geo_platform(geo_accession)
|
arrayexpress/ae_auto
|
automation/geo/import_adf.py
|
Python
|
apache-2.0
| 6,795
|
[
"ADF"
] |
523e6c32d2ef5554eef531a0e3ba05948fa31d385c07d021c2e22d53c168942d
|
# Author: Travis Oliphant
# 2003
from numpy import asarray, zeros, place, nan, mod, pi, extract, log, sqrt, \
exp, cos, sin, size, polyval, polyint, log10
def sawtooth(t,width=1):
"""Returns a periodic sawtooth waveform with period 2*pi
which rises from -1 to 1 on the interval 0 to width*2*pi
and drops from 1 to -1 on the interval width*2*pi to 2*pi
width must be in the interval [0,1]
"""
t,w = asarray(t), asarray(width)
w = asarray(w + (t-t))
t = asarray(t + (w-w))
if t.dtype.char in ['fFdD']:
ytype = t.dtype.char
else:
ytype = 'd'
y = zeros(t.shape,ytype)
# width must be between 0 and 1 inclusive
mask1 = (w > 1) | (w < 0)
place(y,mask1,nan)
# take t modulo 2*pi
tmod = mod(t,2*pi)
# on the interval 0 to width*2*pi function is
# tmod / (pi*w) - 1
mask2 = (1-mask1) & (tmod < w*2*pi)
tsub = extract(mask2,tmod)
wsub = extract(mask2,w)
place(y,mask2,tsub / (pi*wsub) - 1)
# on the interval width*2*pi to 2*pi function is
# (pi*(w+1)-tmod) / (pi*(1-w))
mask3 = (1-mask1) & (1-mask2)
tsub = extract(mask3,tmod)
wsub = extract(mask3,w)
place(y,mask3, (pi*(wsub+1)-tsub)/(pi*(1-wsub)))
return y
def square(t,duty=0.5):
"""Returns a periodic square-wave waveform with period 2*pi
which is +1 from 0 to 2*pi*duty and -1 from 2*pi*duty to 2*pi
duty must be in the interval [0,1]
"""
t,w = asarray(t), asarray(duty)
w = asarray(w + (t-t))
t = asarray(t + (w-w))
if t.dtype.char in ['fFdD']:
ytype = t.dtype.char
else:
ytype = 'd'
y = zeros(t.shape,ytype)
# width must be between 0 and 1 inclusive
mask1 = (w > 1) | (w < 0)
place(y,mask1,nan)
# take t modulo 2*pi
tmod = mod(t,2*pi)
# on the interval 0 to duty*2*pi function is
# 1
mask2 = (1-mask1) & (tmod < w*2*pi)
tsub = extract(mask2,tmod)
wsub = extract(mask2,w)
place(y,mask2,1)
# on the interval duty*2*pi to 2*pi function is
# (pi*(w+1)-tmod) / (pi*(1-w))
mask3 = (1-mask1) & (1-mask2)
tsub = extract(mask3,tmod)
wsub = extract(mask3,w)
place(y,mask3,-1)
return y
def gausspulse(t,fc=1000,bw=0.5,bwr=-6,tpr=-60,retquad=0,retenv=0):
"""Return a gaussian modulated sinusoid: exp(-a t^2) exp(1j*2*pi*fc)
If retquad is non-zero, then return the real and imaginary parts
(inphase and quadrature)
If retenv is non-zero, then return the envelope (unmodulated signal).
Otherwise, return the real part of the modulated sinusoid.
Inputs:
t -- Input array.
fc -- Center frequency (Hz).
bw -- Fractional bandwidth in frequency domain of pulse (Hz).
bwr -- Reference level at which fractional bandwidth is calculated (dB).
tpr -- If t is 'cutoff', then the function returns the cutoff time for when the
pulse amplitude falls below tpr (in dB).
retquad -- Return the quadrature (imaginary) as well as the real part of the signal
retenv -- Return the envelope of th signal.
"""
if fc < 0:
raise ValueError, "Center frequency (fc=%.2f) must be >=0." % fc
if bw <= 0:
raise ValueError, "Fractional bandwidth (bw=%.2f) must be > 0." % bw
if bwr >= 0:
raise ValueError, "Reference level for bandwidth (bwr=%.2f) must " \
"be < 0 dB" % bwr
# exp(-a t^2) <-> sqrt(pi/a) exp(-pi^2/a * f^2) = g(f)
ref = pow(10, bwr/ 20)
# fdel = fc*bw/2: g(fdel) = ref --- solve this for a
#
# pi^2/a * fc^2 * bw^2 /4=-log(ref)
a = -(pi*fc*bw)**2 / (4*log(ref))
if t == 'cutoff': # compute cut_off point
# Solve exp(-a tc**2) = tref for tc
# tc = sqrt(-log(tref) / a) where tref = 10^(tpr/20)
if tpr >= 0:
raise ValueError, "Reference level for time cutoff must be < 0 dB"
tref = pow(10, tpr / 20)
return sqrt(-log(tref)/a)
yenv = exp(-a*t*t)
yI = yenv * cos(2*pi*fc*t)
yQ = yenv * sin(2*pi*fc*t)
if not retquad and not retenv:
return yI
if not retquad and retenv:
return yI, yenv
if retquad and not retenv:
return yI, yQ
if retquad and retenv:
return yI, yQ, yenv
def chirp(t, f0=0, t1=1, f1=100, method='linear', phi=0, qshape=None):
"""Frequency-swept cosine generator.
Parameters
----------
t : ndarray
Times at which to evaluate the waveform.
f0 : float or ndarray, optional
Frequency (in Hz) of the waveform at time 0. If `f0` is an
ndarray, it specifies the frequency change as a polynomial in
`t` (see Notes below).
t1 : float, optional
Time at which `f1` is specified.
f1 : float, optional
Frequency (in Hz) of the waveform at time `t1`.
method : {'linear', 'quadratic', 'logarithmic'}, optional
Kind of frequency sweep.
phi : float
Phase offset, in degrees.
qshape : {'convex', 'concave'}
If method is 'quadratic', `qshape` specifies its shape.
Notes
-----
If `f0` is an array, it forms the coefficients of a polynomial in
`t` (see `numpy.polval`). The polynomial determines the waveform
frequency change in time. In this case, the values of `f1`, `t1`,
`method`, and `qshape` are ignored.
"""
# Convert to radians.
phi *= pi / 180
if size(f0) > 1:
# We were given a polynomial.
return cos(2*pi*polyval(polyint(f0),t)+phi)
if method in ['linear','lin','li']:
beta = (f1-f0)/t1
phase_angle = 2*pi * (f0*t + 0.5*beta*t*t)
elif method in ['quadratic','quad','q']:
if qshape == 'concave':
mxf = max(f0,f1)
mnf = min(f0,f1)
f1,f0 = mxf, mnf
elif qshape == 'convex':
mxf = max(f0,f1)
mnf = min(f0,f1)
f1,f0 = mnf, mxf
else:
raise ValueError("qshape must be either 'concave' or 'convex' but "
"a value of %r was given." % qshape)
beta = (f1-f0)/t1/t1
phase_angle = 2*pi * (f0*t + beta*t*t*t/3)
elif method in ['logarithmic','log','lo']:
if f1 <= f0:
raise ValueError(
"For a logarithmic sweep, f1=%f must be larger than f0=%f."
% (f1, f0))
beta = log10(f1-f0)/t1
phase_angle = 2*pi * (f0*t + (pow(10,beta*t)-1)/(beta*log(10)))
else:
raise ValueError("method must be 'linear', 'quadratic', or "
"'logarithmic' but a value of %r was given." % method)
return cos(phase_angle + phi)
|
huard/scipy-work
|
scipy/signal/waveforms.py
|
Python
|
bsd-3-clause
| 6,637
|
[
"Gaussian"
] |
34df022500bbfa229a24adb00c917330d560bb20d8ac542a0b9f22d5ae0583b4
|
#!/usr/bin/env python
"""
Implementation for get_environment CLI.
"""
from pymatgen.analysis.chemenv.utils.scripts_utils import compute_environments, welcome, thankyou
from pymatgen.analysis.chemenv.utils.chemenv_config import ChemEnvConfig
from argparse import ArgumentParser
import logging
__author__ = 'waroquiers'
def main():
"""
Main function.
"""
m_description = 'Welcome to the Chemical Environment Package.'
parser = ArgumentParser(description=m_description)
setup_help = 'Used to setup the configuration of the package '
setup_help += '(MaterialsProject access, ICSD database access, package options, ...)'
parser.add_argument('-s', '--setup', help=setup_help, action='store_true')
parser.add_argument('-m', '--message-level', help='Message level (DEBUG, INFO, WARNING, ERROR or CRITICAL - '
'default : WARNING)',
default='WARNING')
args = parser.parse_args()
if args.setup:
chemenv_config = ChemEnvConfig.auto_load()
chemenv_config.setup()
print('\n Setup completed')
else:
chemenv_config = ChemEnvConfig.auto_load()
welcome(chemenv_config)
logging.basicConfig(format='%(levelname)s:%(module)s:%(funcName)s:%(message)s', level=args.message_level)
compute_environments(chemenv_config)
thankyou()
if __name__ == '__main__':
main()
|
gVallverdu/pymatgen
|
pymatgen/cli/get_environment.py
|
Python
|
mit
| 1,425
|
[
"pymatgen"
] |
5208728c91c384a5fa02a793648987e3cc0c93e0f42772ef6b76a0f942d83611
|
# -*- coding: utf-8 -*-
"""Miscellaneous convenience functions.
"""
import ast
import functools
import operator
import os
import shutil
import subprocess
import collections
import itertools
from numbers import Number
import traceback
from warnings import warn
from functools import (partial, wraps)
import xarray
import pandas
import numpy as np
__all__ = [
'deprecated',
'extract_block_diag',
'safe_eval',
'unique',
'path_append',
'path_prepend',
'path_remove',
'get_time_dimensions',
'get_time_coordinates',
'concat_each_time_coordinate',
'undo_xarray_floatification',
'image2mpeg',
'stack_xarray_repdim',
'split_units',
'reraise_with_stack',
'get_xarray_groups',
'get_xarray_group',
'add_xarray_groups',
'to_array',
]
def deprecated(func=None, new_name=None, message=None):
"""Decorator which can be used to mark functions as deprecated.
Examples:
Calling ``foo()`` will raise a ``DeprecationWarning``.
>>> @deprecated
... def deprecated_function():
... pass
Display message with additional information:
>>> @deprecated(message='Additional information message.')
... def deprecated_function():
... pass
"""
# Return partial when no arguments are passed.
# This allows a plain call of the decorator.
if func is None:
return partial(deprecated, new_name=new_name, message=message)
# Build warning message (with optional information).
msg = f'\nCall to deprecated function `{func.__name__}`.'
if new_name is not None:
msg += f' Use `{new_name}` instead.'
if message is not None:
msg += f'\n{message}'
# Wrapper that prints the warning before calling the deprecated function.
@wraps(func)
def wrapper(*args, **kwargs):
warn(msg, category=DeprecationWarning, stacklevel=2)
return func(*args, **kwargs)
if wrapper.__doc__ is None:
wrapper.__doc__ = ''
# Lines added to the docstring need to be indented with four spaces as this
# is technically the case for all lines except the first one.
# If this is not done, the Sphinx build will produce wrong results as the
# relative indentation of the added lines and the original docstring does
# not match.
wrapper.__doc__ += (
'\n\n .. warning::\n Function is deprecated'
' and will be removed in a future version.'
)
if new_name is not None:
wrapper.__doc__ += f' Use :func:`{new_name}` instead.'
if message is not None:
wrapper.__doc__ += f'\n\n {message}'
return wrapper
def extract_block_diag(M, n):
"""Extract diagonal blocks from square Matrix.
Args:
M (np.array): Square matrix.
n (int): Number of blocks to extract.
Example:
>>> foo = np.array([[ 1., 1., 0., 0.],
... [ 1., 1., 0., 0.],
... [ 0., 0., 2., 2.],
... [ 0., 0., 2., 2.]])
>>> extract_block_diag(foo, 2)
[array([[ 1., 1.],
[ 1., 1.]]), array([[ 2., 2.],
[ 2., 2.]])]
"""
return [np.split(m, n, axis=1)[i] for i, m in enumerate(np.split(M, n))]
# This code, or a previous version thereof, was posted by user 'J. F.
# Sebastian' on http://stackoverflow.com/a/9558001/974555 on 2012-03-04
# and is dual-licensed under CC-BY-SA 3.0 and MIT, as confirmed at
# https://stackoverflow.com/questions/2371436/evaluating-a-mathematical-expression-in-a-string/9558001?noredirect=1#comment76927447_9558001
# on 2017-07-07
operators = {ast.Add: operator.add,
ast.Sub: operator.sub,
ast.Mult: operator.mul,
ast.Div: operator.truediv,
ast.Pow: operator.pow,
ast.BitXor: operator.xor,
ast.USub: operator.neg}
def safe_eval(expr):
"""Safely evaluate string that may contain basic arithmetic
"""
return _safe_eval_node(ast.parse(expr, mode="eval").body)
def _safe_eval_node(node):
if isinstance(node, ast.Num): # <number>
return node.n
elif isinstance(node, ast.BinOp): # <left> <operator> <right>
return operators[type(node.op)](
_safe_eval_node(node.left), _safe_eval_node(node.right))
elif isinstance(node, ast.UnaryOp): # <operator> <operand> e.g., -1
return operators[type(node.op)](_safe_eval_node(node.operand))
else:
raise TypeError(node)
# End of snippet derived from http://stackoverflow.com/a/9558001/974555
def unique(seq):
"""Remove duplicates from list whilst keeping the original order
Notes:
If you do not care about keeping the order, use this code:
>>>> list(set([0, 5, 1, 2, 0, 3, 1,]))
[0, 1, 2, 3, 5]
This code is taken from https://stackoverflow.com/a/480227.
Args:
seq: A sequence (list, etc.) of elements.
Returns:
A list with unique items with original order.
Examples:
>>>> unique([0, 5, 1, 2, 0, 3, 1,])
"""
seen = set()
seen_add = seen.add
return [x for x in seq if not (x in seen or seen_add(x))]
def path_append(dirname, path='PATH'):
"""Append a directory to environment path variable.
Append entries to colon-separated variables (e.g. the system path).
If the entry is already in the list, it is moved to the end.
A path variable is set, if not existing at function call.
Parameters:
dirname (str): Directory to add to the path.
path (str): Name of the path variable to append to.
Defaults to the system path 'PATH'.
"""
if path in os.environ:
dir_list = os.environ[path].split(os.pathsep)
if dirname in dir_list:
dir_list.remove(dirname)
dir_list.append(dirname)
os.environ[path] = os.pathsep.join(dir_list)
else:
os.environ[path] = dirname
def path_prepend(dirname, path='PATH'):
"""Prepend a directory to environment path variable.
Append entries to colon-separated variables (e.g. the system path).
If the entry is already in the list, it is moved to the end.
A path variable is set, if not existing at function call.
Parameters:
dirname (str): Directory to add to the path.
path (str): Name of the path variable to append to.
Defaults to the system path 'PATH'.
"""
if path in os.environ:
dir_list = os.environ[path].split(os.pathsep)
if dirname in dir_list:
dir_list.remove(dirname)
dir_list.insert(0, dirname)
os.environ[path] = os.pathsep.join(dir_list)
else:
os.environ[path] = dirname
def path_remove(dirname, path='PATH'):
"""Remove a directory from environment path variable.
Remove entries from colon-separated variables (e.g. the system path).
If the path variable is not set, nothing is done.
Parameters:
dirname (str): Directory to add to the path.
path (str): Name of the path variable to append to.
Defaults to the system path 'PATH'.
"""
if path in os.environ:
dir_list = os.environ[path].split(os.pathsep)
dir_list.remove(dirname)
os.environ[path] = os.pathsep.join(dir_list)
def get_time_dimensions(ds):
"""From a xarray dataset or dataarray, get dimensions corresponding to time coordinates
"""
return {k for (k, v) in ds.coords.items() if k in ds.dims and v.dtype.kind == "M"}
def get_time_coordinates(ds):
"""From a xarray dataset or dataarray, get coordinates with at least 1 time dimension
"""
time_dims = get_time_dimensions(ds)
return {k for (k, v) in ds.coords.items() if set(v.dims)&time_dims}
# Any commits made to this module between 2015-05-01 and 2017-03-01
# by Gerrit Holl are developed for the EC project “Fidelity and
# Uncertainty in Climate Data Records from Earth Observations (FIDUCEO)”.
# Grant agreement: 638822. This specifically applies to the function
# concat_each_time_coordinate.
#
# All those contributions are dual-licensed under the MIT license for use
# in typhon, and the GNU General Public License version 3.
def concat_each_time_coordinate(*datasets):
"""Concatenate xarray datasets along each time coordinate
Given two or more xarray datasets, concatenate seperately data
variables with different time coordinates. For example, one might
have dimensions 'scanline' and 'calibration_cycle' that are each along
time coordinates. Data variables may have dimension either scanline
or calibration_cycle or neither, but not both. Both correspond to
coordinates a datetime index. Ordinary xarray.concat along either
dimension will broadcast the other one in a way similar to repmat,
thus exploding memory usage (test case for one FCDR HIRS granule: 89
MB to 81 GB). Instead, here, for each data variable, we will
concatenate only along at most one time coordinate.
Arguments:
*datasets: xarray.Dataset objects to be concatenated
"""
time_coords = get_time_coordinates(datasets[0])
time_dims = get_time_dimensions(datasets[0])
# ensure each data-variable has zero or one of those time coordinates
# as dimensions
for ds in datasets:
if not all([len(set(v.dims) & time_coords) <= 1
for (k, v) in ds.data_vars.items()]):
raise ValueError("Found vars with multiple time coords")
new_sizes = {k: sum(g.dims[k] for g in datasets)
if k in time_coords
else datasets[0].dims[k]
for k in datasets[0].dims.keys()}
# note data vars per time coordinate
time_vars = {k: (set(v.dims)&time_coords).pop() for (k, v) in datasets[0].variables.items() if set(v.dims)&time_coords}
time_vars_per_time_dim = {k: {vn for (vn, dn) in time_vars.items() if dn==k} for k in time_coords}
untimed_vars = datasets[0].data_vars.keys() - time_vars.keys()
# allocate new
new = xarray.Dataset(
{k: (v.dims,
np.zeros(shape=[new_sizes[d] for d in v.dims],
dtype=v.dtype))
for (k, v) in datasets[0].data_vars.items()})
# coordinates cannot be set in the same way so need to be allocated
# separately
new_coords = {k: xarray.DataArray(
np.zeros(shape=[new_sizes[d] for d in v.dims],
dtype=datasets[0][k].dtype),
dims=v.dims)
for (k, v) in datasets[0].coords.items()}
# copy over untimed vars
for v in untimed_vars:
new[v].values[...] = datasets[0][v].values
# and untimed coords
for c in datasets[0].coords.keys() - time_coords:
new_coords[c][...] = datasets[0].coords[c]
# keep track of progress per time dimension
n_per_dim = dict.fromkeys(time_coords, 0)
# copy timed vars dataset by dataset
for ds in datasets:
for (v, timedim) in time_vars.items():
ncur = n_per_dim[timedim]
nnew_cur = ds.dims[timedim]
if nnew_cur == 0:
# nothing to fill, but prevent
# https://github.com/pydata/xarray/issues/1329
continue
slc = {dim: slice(ncur, ncur+nnew_cur)
if dim==timedim else slice(None)
for dim in ds[v].dims}
if v in time_coords: # time coordinate
new_coords[v][slc] = ds[v]
else:
new[v].loc[slc] = ds[v]
for timedim in time_dims:
n_per_dim[timedim] += ds.dims[timedim]
# copy attributes
new.attrs.update(**datasets[0].attrs)
for k in new.variables.keys():
new[k].attrs.update(**datasets[0][k].attrs)
new[k].encoding.update(**datasets[0][k].encoding)
return new.assign_coords(**new_coords)
def undo_xarray_floatification(ds, fields=None):
"""convert floats back to ints in xarray dataset where appropriate
When xarray opens a NetCDF file with the default decode_cf=True,
any integer values that have a _FillValue set are converted to float,
such that any _FillValue-set values can be set to nan. Some datasets
may have such _FillValue set even though they are never used.
In this case, it may be desirable to convert those values back to
the original dtype (which is preserved in the .encoding attribute),
for example, when those integers are intended to be used as indices.
This function takes an xarray Dataset, checks all the variables which
originally have an integer dtype and a fillvalue set, and converts
those back to int. Optionally only a subset of those is converted.
Use this function only when those fill values are not used. Behaviour
when fill values are actually used is undefined.
Parameters:
ds (xarray.Dataset): xarray dataset to be converted. Will be
copied.
fields (Collection or None): Describes what fields shall be
converted. If not given or None (default), convert all fields
that were originally ints but converted to float due to having a
_FillValue set. Even when given, only fields meeting those
criteria will be converted.
Returns:
The same dataset but with changes as described above.
"""
to_correct = {k for (k, v) in ds.data_vars.items()
if v.encoding.get("dtype", np.dtype("O")).kind[0] in "ui" and
not v.dtype.kind in "uiMm"} # don't convert datetime/deltas
if fields is not None:
to_correct &= fields
ds2 = ds.copy()
for k in to_correct:
ds2[k] = ds[k].astype(ds[k].encoding["dtype"])
ds2[k].encoding.update(ds[k].encoding)
return ds2
def image2mpeg(glob, outfile, framerate=12, resolution='1920x1080'):
"""Combine image files to a video using ``ffmpeg``.
Notes:
The function is tested for ``ffmpeg`` versions 2.8.6 and 3.2.2.
Parameters:
glob (str): Glob pattern for input files.
outfile (str): Path to output file.
The file fileformat is determined by the extension.
framerate (int or str): Number of frames per second.
resolution (str or tuple): Video resolution given in width and height
(``"WxH"`` or ``(W, H)``).
Raises:
Exception: The function raises an exception if the
underlying ``ffmpeg`` process returns a non-zero exit code.
Example:
>>> image2mpeg('foo_*.png', 'foo.mp4')
"""
if not shutil.which('ffmpeg'):
raise Exception('``ffmpeg`` not found.')
# If video resolution is given as tuple, convert it into string format
# to directly pass it to ffmpeg later.
if isinstance(resolution, tuple) and len(resolution) == 2:
resolution = '{width}x{height}'.format(width=resolution[0],
height=resolution[1])
p = subprocess.run(
['ffmpeg',
'-framerate', str(framerate),
'-pattern_type', 'glob', '-i', glob,
'-s:v', resolution,
'-c:v', 'libx264',
'-profile:v', 'high',
'-crf', '20',
'-pix_fmt', 'yuv420p',
'-y', outfile
],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
universal_newlines=True
)
# If the subprocess fails, raise exception including error message.
if p.returncode != 0:
raise Exception(p.stderr)
def stack_xarray_repdim(da, **dims):
"""Like xarrays stack, but with partial support for repeated dimensions
The xarray.DataArray.stack method fails when any dimension occurs
multiple times, as repeated dimensions are not currently very well
supported in xarray (2018-03-26). This method provides a workaround
so that stack can be used for an array where some dimensions are
repeated, as long as the repeated dimensions are themselves not
stacked.
Parameters:
da (DataArray): DataArray to operate on.
**dims: Dimensions to stack. As for xarray.DataArray.stack.
"""
# make view of da without repeated dimensions
cnt = collections.Counter(da.dims)
D = {k: itertools.count() for k in cnt.keys()}
tmpdims = []
dimmap = {}
for dim in da.dims:
if cnt[dim] == 1:
tmpdims.append(dim)
else:
newdim = "{:s}{:d}".format(dim, next(D[dim]))
tmpdims.append(newdim)
dimmap[newdim] = dim
da2 = xarray.DataArray(da.values, dims=tmpdims)
da2_stacked = da2.stack(**dims)
# put back repeated dimensions with new coordinates
da3 = xarray.DataArray(da2_stacked.values,
dims=[dimmap.get(d, d) for d in da2_stacked.dims])
da3 = da3.assign_coords(
**{k: pandas.MultiIndex.from_product(
[da.coords[kk] for kk in dims[k]], names=dims[k])
if k in dims else da.coords[k] for k in np.unique(da3.dims)})
return da3
def split_units(value):
"""Splits a string into float number and potential unit
References
Taken from https://stackoverflow.com/a/30087094
Args:
value: String with number and unit.
Returns
A tuple of a float and unit string.
Examples:
>>> split_units("2GB")
(2.0, 'GB')
>>> split_units("17 ft")
(17.0, 'ft')
>>> split_units(" 3.4e-27 frobnitzem ")
(3.4e-27, 'frobnitzem')
>>> split_units("9001")
(9001.0, '')
>>> split_units("spam sandwhiches")
(0, 'spam sandwhiches')
>>> split_units("")
(0, '')
"""
units = ""
number = 0
while value:
try:
number = float(value)
break
except ValueError:
units = value[-1:] + units
value = value[:-1]
return number, units.strip()
def reraise_with_stack(func):
"""Make functions include the whole stack in raised exceptions
Notes:
This is a decorator function.
When using the concurrent.futures module, the original traceback message
gets lost, which makes it difficult to debug. This decorator solves the
problem.
References:
Taken from https://stackoverflow.com/a/29357032.
"""
@functools.wraps(func)
def wrapped(*args, **kwargs):
try:
return func(*args, **kwargs)
except Exception as e:
traceback_str = traceback.format_exc()
raise Exception(
"Error occurred. Original traceback is\n%s\n" % traceback_str
)
return wrapped
def get_xarray_groups(dataset, only_names=False):
"""Get pseudo groups from xarray dataset object (only direct under root)
xarray.Dataset objects does not allow the use of groups, but you can
emulate them by using */* in the variable's name.
Args:
dataset: A xarray.Dataset object
only_names: Return only the names of the groups.
Returns:
A set of group names if `only_names` is True. Otherwise, a dictionary
with the group names and all its variables as xarray.Datasets objects.
"""
groups = {
var_name.split("/", 1)[0]
for var_name in dataset.variables
if "/" in var_name
}
if only_names:
return groups
return {
group: get_xarray_group(dataset, group)
for group in groups
}
def get_xarray_group(dataset, group):
"""Get pseudo group from xarray.Dataset
Args:
dataset: A xarray.Dataset object with pseudo groups.
group: The name of the group (can also be a subgroup).
Returns:
A xarray.Dataset with the pseudo group.
"""
if not group.endswith("/"):
group += "/"
group_vars = [
var
for var in dataset.variables
if var.startswith(group)
]
if not group_vars:
raise KeyError(f"The group {group} was not found!")
return dataset[group_vars]
def add_xarray_groups(ds, **kwargs):
"""Add a xarray.Dataset as a subgroup to another xarray.Dataset
Args:
ds: The root xarray.Dataset.
**kwargs: Keyword arguments: the key is the name of the group and the
value must be a xarray.Dataset.
Returns:
`ds` with the added subgroups
"""
datasets = [ds]
for group_name, group in kwargs.items():
group = group.rename(
{
var_name: "/".join([group_name, var_name])
for var_name in group.variables
},
)
# Add the group name also to the dimensions:
group = group.rename({
dim: "/".join([group_name, dim])
for dim in group.dims
if dim not in group.coords
})
datasets.append(group)
return xarray.merge(datasets)
def to_array(item):
"""Convert item to numpy array
Args:
item: Can be a number, list, tuple or numpy array.
Returns:
`item` converted to a numpy array.
"""
if isinstance(item, np.ndarray):
return item
if isinstance(item, Number):
return np.array([item])
else:
return np.array(item)
|
atmtools/typhon
|
typhon/utils/common.py
|
Python
|
mit
| 21,289
|
[
"NetCDF"
] |
f0f0ca42413bb645370bb347d3e1d1392748db2dcfc2c371db090107bc864e49
|
"""Generate html report from MNE database
"""
# Authors: Alex Gramfort <alexandre.gramfort@telecom-paristech.fr>
# Mainak Jas <mainak@neuro.hut.fi>
# Teon Brooks <teon.brooks@gmail.com>
#
# License: BSD (3-clause)
import os
import os.path as op
import fnmatch
import re
import codecs
import time
from glob import glob
import warnings
import base64
from datetime import datetime as dt
import numpy as np
from . import read_evokeds, read_events, pick_types, read_cov
from .io import Raw, read_info
from .utils import _TempDir, logger, verbose, get_subjects_dir
from .viz import plot_events, plot_trans, plot_cov
from .viz._3d import _plot_mri_contours
from .forward import read_forward_solution
from .epochs import read_epochs
from .minimum_norm import read_inverse_operator
from .parallel import parallel_func, check_n_jobs
from .externals.tempita import HTMLTemplate, Template
from .externals.six import BytesIO
from .externals.six import moves
VALID_EXTENSIONS = ['raw.fif', 'raw.fif.gz', 'sss.fif', 'sss.fif.gz',
'-eve.fif', '-eve.fif.gz', '-cov.fif', '-cov.fif.gz',
'-trans.fif', '-trans.fif.gz', '-fwd.fif', '-fwd.fif.gz',
'-epo.fif', '-epo.fif.gz', '-inv.fif', '-inv.fif.gz',
'-ave.fif', '-ave.fif.gz', 'T1.mgz']
SECTION_ORDER = ['raw', 'events', 'epochs', 'evoked', 'covariance', 'trans',
'mri', 'forward', 'inverse']
###############################################################################
# PLOTTING FUNCTIONS
def _fig_to_img(function=None, fig=None, image_format='png',
scale=None, **kwargs):
"""Wrapper function to plot figure and create a binary image"""
import matplotlib.pyplot as plt
if function is not None:
plt.close('all')
fig = function(**kwargs)
output = BytesIO()
if scale is not None:
_scale_mpl_figure(fig, scale)
fig.savefig(output, format=image_format, bbox_inches='tight',
dpi=fig.get_dpi())
plt.close(fig)
output = output.getvalue()
return (output if image_format == 'svg' else
base64.b64encode(output).decode('ascii'))
def _scale_mpl_figure(fig, scale):
"""Magic scaling helper
Keeps font-size and artist sizes constant
0.5 : current font - 4pt
2.0 : current font + 4pt
XXX it's unclear why this works, but good to go for most cases
"""
fig.set_size_inches(fig.get_size_inches() * scale)
fig.set_dpi(fig.get_dpi() * scale)
import matplotlib as mpl
if scale >= 1:
sfactor = scale ** 2
elif scale < 1:
sfactor = -((1. / scale) ** 2)
for text in fig.findobj(mpl.text.Text):
fs = text.get_fontsize()
new_size = fs + sfactor
if new_size <= 0:
raise ValueError('could not rescale matplotlib fonts, consider '
'increasing "scale"')
text.set_fontsize(new_size)
fig.canvas.draw()
def _figs_to_mrislices(sl, n_jobs, **kwargs):
import matplotlib.pyplot as plt
plt.close('all')
use_jobs = min(n_jobs, max(1, len(sl)))
parallel, p_fun, _ = parallel_func(_plot_mri_contours, use_jobs)
outs = parallel(p_fun(slices=s, **kwargs)
for s in np.array_split(sl, use_jobs))
for o in outs[1:]:
outs[0] += o
return outs[0]
def _iterate_trans_views(function, **kwargs):
"""Auxiliary function to iterate over views in trans fig.
"""
from scipy.misc import imread
import matplotlib.pyplot as plt
import mayavi
fig = function(**kwargs)
assert isinstance(fig, mayavi.core.scene.Scene)
views = [(90, 90), (0, 90), (0, -90)]
fig2, axes = plt.subplots(1, len(views))
for view, ax in zip(views, axes):
mayavi.mlab.view(view[0], view[1])
# XXX: save_bmp / save_png / ...
tempdir = _TempDir()
temp_fname = op.join(tempdir, 'test.png')
if fig.scene is not None:
fig.scene.save_png(temp_fname)
im = imread(temp_fname)
else: # Testing mode
im = np.zeros((2, 2, 3))
ax.imshow(im)
ax.axis('off')
mayavi.mlab.close(fig)
img = _fig_to_img(fig=fig2)
return img
###############################################################################
# TOC FUNCTIONS
def _is_bad_fname(fname):
"""Auxiliary function for identifying bad file naming patterns
and highlighting them in red in the TOC.
"""
if fname.endswith('(whitened)'):
fname = fname[:-11]
if not fname.endswith(tuple(VALID_EXTENSIONS + ['bem', 'custom'])):
return 'red'
else:
return ''
def _get_toc_property(fname):
"""Auxiliary function to assign class names to TOC
list elements to allow toggling with buttons.
"""
if fname.endswith(('-eve.fif', '-eve.fif.gz')):
div_klass = 'events'
tooltip = fname
text = op.basename(fname)
elif fname.endswith(('-ave.fif', '-ave.fif.gz')):
div_klass = 'evoked'
tooltip = fname
text = op.basename(fname)
elif fname.endswith(('-cov.fif', '-cov.fif.gz')):
div_klass = 'covariance'
tooltip = fname
text = op.basename(fname)
elif fname.endswith(('raw.fif', 'raw.fif.gz',
'sss.fif', 'sss.fif.gz')):
div_klass = 'raw'
tooltip = fname
text = op.basename(fname)
elif fname.endswith(('-trans.fif', '-trans.fif.gz')):
div_klass = 'trans'
tooltip = fname
text = op.basename(fname)
elif fname.endswith(('-fwd.fif', '-fwd.fif.gz')):
div_klass = 'forward'
tooltip = fname
text = op.basename(fname)
elif fname.endswith(('-inv.fif', '-inv.fif.gz')):
div_klass = 'inverse'
tooltip = fname
text = op.basename(fname)
elif fname.endswith(('-epo.fif', '-epo.fif.gz')):
div_klass = 'epochs'
tooltip = fname
text = op.basename(fname)
elif fname.endswith(('.nii', '.nii.gz', '.mgh', '.mgz')):
div_klass = 'mri'
tooltip = 'MRI'
text = 'MRI'
elif fname.endswith(('bem')):
div_klass = 'mri'
tooltip = 'MRI'
text = 'MRI'
elif fname.endswith('(whitened)'):
div_klass = 'evoked'
tooltip = fname
text = op.basename(fname[:-11]) + '(whitened)'
else:
div_klass = fname.split('-#-')[1]
tooltip = fname.split('-#-')[0]
text = fname.split('-#-')[0]
return div_klass, tooltip, text
def _iterate_files(report, fnames, info, cov, baseline, sfreq, on_error):
"""Auxiliary function to parallel process in batch mode.
"""
htmls, report_fnames, report_sectionlabels = [], [], []
def _update_html(html, report_fname, report_sectionlabel):
"""Update the lists above."""
htmls.append(html)
report_fnames.append(report_fname)
report_sectionlabels.append(report_sectionlabel)
for fname in fnames:
logger.info("Rendering : %s"
% op.join('...' + report.data_path[-20:],
fname))
try:
if fname.endswith(('raw.fif', 'raw.fif.gz',
'sss.fif', 'sss.fif.gz')):
html = report._render_raw(fname)
report_fname = fname
report_sectionlabel = 'raw'
elif fname.endswith(('-fwd.fif', '-fwd.fif.gz')):
html = report._render_forward(fname)
report_fname = fname
report_sectionlabel = 'forward'
elif fname.endswith(('-inv.fif', '-inv.fif.gz')):
html = report._render_inverse(fname)
report_fname = fname
report_sectionlabel = 'inverse'
elif fname.endswith(('-ave.fif', '-ave.fif.gz')):
if cov is not None:
html = report._render_whitened_evoked(fname, cov, baseline)
report_fname = fname + ' (whitened)'
report_sectionlabel = 'evoked'
_update_html(html, report_fname, report_sectionlabel)
html = report._render_evoked(fname, baseline)
report_fname = fname
report_sectionlabel = 'evoked'
elif fname.endswith(('-eve.fif', '-eve.fif.gz')):
html = report._render_eve(fname, sfreq)
report_fname = fname
report_sectionlabel = 'events'
elif fname.endswith(('-epo.fif', '-epo.fif.gz')):
html = report._render_epochs(fname)
report_fname = fname
report_sectionlabel = 'epochs'
elif (fname.endswith(('-cov.fif', '-cov.fif.gz')) and
report.info_fname is not None):
html = report._render_cov(fname, info)
report_fname = fname
report_sectionlabel = 'covariance'
elif (fname.endswith(('-trans.fif', '-trans.fif.gz')) and
report.info_fname is not None and report.subjects_dir
is not None and report.subject is not None):
html = report._render_trans(fname, report.data_path, info,
report.subject,
report.subjects_dir)
report_fname = fname
report_sectionlabel = 'trans'
else:
html = None
report_fname = None
report_sectionlabel = None
except Exception as e:
if on_error == 'warn':
logger.warning('Failed to process file %s:\n"%s"' % (fname, e))
elif on_error == 'raise':
raise
html = None
report_fname = None
report_sectionlabel = None
_update_html(html, report_fname, report_sectionlabel)
return htmls, report_fnames, report_sectionlabels
###############################################################################
# IMAGE FUNCTIONS
def _build_image(data, cmap='gray'):
"""Build an image encoded in base64.
"""
import matplotlib.pyplot as plt
from matplotlib.figure import Figure
from matplotlib.backends.backend_agg import FigureCanvasAgg as FigureCanvas
figsize = data.shape[::-1]
if figsize[0] == 1:
figsize = tuple(figsize[1:])
data = data[:, :, 0]
fig = Figure(figsize=figsize, dpi=1.0, frameon=False)
FigureCanvas(fig)
cmap = getattr(plt.cm, cmap, plt.cm.gray)
fig.figimage(data, cmap=cmap)
output = BytesIO()
fig.savefig(output, dpi=1.0, format='png')
return base64.b64encode(output.getvalue()).decode('ascii')
def _iterate_sagittal_slices(array, limits=None):
"""Iterate sagittal slice.
"""
shape = array.shape[0]
for ind in range(shape):
if limits and ind not in limits:
continue
yield ind, array[ind, :, :]
def _iterate_axial_slices(array, limits=None):
"""Iterate axial slice.
"""
shape = array.shape[1]
for ind in range(shape):
if limits and ind not in limits:
continue
yield ind, array[:, ind, :]
def _iterate_coronal_slices(array, limits=None):
"""Iterate coronal slice.
"""
shape = array.shape[2]
for ind in range(shape):
if limits and ind not in limits:
continue
yield ind, np.flipud(np.rot90(array[:, :, ind]))
def _iterate_mri_slices(name, ind, global_id, slides_klass, data, cmap,
image_format='png'):
"""Auxiliary function for parallel processing of mri slices.
"""
img_klass = 'slideimg-%s' % name
caption = u'Slice %s %s' % (name, ind)
slice_id = '%s-%s-%s' % (name, global_id, ind)
div_klass = 'span12 %s' % slides_klass
img = _build_image(data, cmap=cmap)
first = True if ind == 0 else False
html = _build_html_image(img, slice_id, div_klass,
img_klass, caption, first)
return ind, html
###############################################################################
# HTML functions
def _build_html_image(img, id, div_klass, img_klass, caption=None, show=True):
"""Build a html image from a slice array.
"""
html = []
add_style = u'' if show else u'style="display: none"'
html.append(u'<li class="%s" id="%s" %s>' % (div_klass, id, add_style))
html.append(u'<div class="thumbnail">')
html.append(u'<img class="%s" alt="" style="width:90%%;" '
'src="data:image/png;base64,%s">'
% (img_klass, img))
html.append(u'</div>')
if caption:
html.append(u'<h4>%s</h4>' % caption)
html.append(u'</li>')
return u'\n'.join(html)
slider_template = HTMLTemplate(u"""
<script>$("#{{slider_id}}").slider({
range: "min",
/*orientation: "vertical",*/
min: {{minvalue}},
max: {{maxvalue}},
step: {{step}},
value: {{startvalue}},
create: function(event, ui) {
$(".{{klass}}").hide();
$("#{{klass}}-{{startvalue}}").show();},
stop: function(event, ui) {
var list_value = $("#{{slider_id}}").slider("value");
$(".{{klass}}").hide();
$("#{{klass}}-"+list_value).show();}
})</script>
""")
def _build_html_slider(slices_range, slides_klass, slider_id):
"""Build an html slider for a given slices range and a slices klass.
"""
startvalue = slices_range[len(slices_range) // 2]
return slider_template.substitute(slider_id=slider_id,
klass=slides_klass,
step=slices_range[1] - slices_range[0],
minvalue=slices_range[0],
maxvalue=slices_range[-1],
startvalue=startvalue)
###############################################################################
# HTML scan renderer
header_template = Template(u"""
<!DOCTYPE html>
<html lang="fr">
<head>
{{include}}
<script type="text/javascript">
var toggle_state = false;
$(document).on('keydown', function (event) {
if (event.which == 84){
if (!toggle_state)
$('.has_toggle').trigger('click');
else if (toggle_state)
$('.has_toggle').trigger('click');
toggle_state = !toggle_state;
}
});
function togglebutton(class_name){
$(class_name).toggle();
if ($(class_name + '-btn').hasClass('active'))
$(class_name + '-btn').removeClass('active');
else
$(class_name + '-btn').addClass('active');
}
/* Scroll down on click to #id so that caption is not hidden
by navbar */
var shiftWindow = function() { scrollBy(0, -60) };
if (location.hash) shiftWindow();
window.addEventListener("hashchange", shiftWindow);
</script>
<style type="text/css">
body {
line-height: 1.5em;
font-family: arial, sans-serif;
}
h1 {
font-size: 30px;
text-align: center;
}
h4 {
text-align: center;
}
@link-color: @brand-primary;
@link-hover-color: darken(@link-color, 15%);
a{
color: @link-color;
&:hover {
color: @link-hover-color;
text-decoration: underline;
}
}
li{
list-style-type:none;
}
#wrapper {
text-align: left;
margin: 5em auto;
width: 700px;
}
#container{
position: relative;
}
#content{
margin-left: 22%;
margin-top: 60px;
width: 75%;
}
#toc {
margin-top: navbar-height;
position: fixed;
width: 20%;
height: 90%;
overflow: auto;
}
#toc li {
overflow: hidden;
padding-bottom: 2px;
margin-left: 20px;
}
#toc span {
float: left;
padding: 0 2px 3px 0;
}
div.footer {
background-color: #C0C0C0;
color: #000000;
padding: 3px 8px 3px 0;
clear: both;
font-size: 0.8em;
text-align: right;
}
</style>
</head>
<body>
<nav class="navbar navbar-inverse navbar-fixed-top" role="navigation">
<div class="container-fluid">
<div class="navbar-header navbar-left">
<ul class="nav nav-pills"><li class="active">
<a class="navbar-btn" data-toggle="collapse"
data-target="#viewnavbar" href="javascript:void(0)">
></a></li></ul>
</div>
<h3 class="navbar-text" style="color:white">{{title}}</h3>
<ul class="nav nav-pills navbar-right" style="margin-top: 7px;"
id="viewnavbar">
{{for section in sections}}
<li class="active {{sectionvars[section]}}-btn">
<a href="javascript:void(0)"
onclick="togglebutton('.{{sectionvars[section]}}')"
class="has_toggle">
{{section if section != 'mri' else 'MRI'}}
</a>
</li>
{{endfor}}
</ul>
</div>
</nav>
""")
footer_template = HTMLTemplate(u"""
</div></body>
<div class="footer">
© Copyright 2012-2013, MNE Developers.
Created on {{date}}.
Powered by <a href="http://martinos.org/mne">MNE.
</div>
</html>
""")
html_template = Template(u"""
<li class="{{div_klass}}" id="{{id}}">
<h4>{{caption}}</h4>
<div class="thumbnail">{{html}}</div>
</li>
""")
image_template = Template(u"""
{{default interactive = False}}
{{default width = 50}}
{{default id = False}}
{{default image_format = 'png'}}
{{default scale = None}}
{{default comment = None}}
<li class="{{div_klass}}" {{if id}}id="{{id}}"{{endif}}
{{if not show}}style="display: none"{{endif}}>
{{if caption}}
<h4>{{caption}}</h4>
{{endif}}
<div class="thumbnail">
{{if not interactive}}
{{if image_format == 'png'}}
{{if scale is not None}}
<img alt="" style="width:{{width}}%;"
src="data:image/png;base64,{{img}}">
{{else}}
<img alt=""
src="data:image/png;base64,{{img}}">
{{endif}}
{{elif image_format == 'svg'}}
<div style="text-align:center;">
{{img}}
</div>
{{endif}}
{{if comment is not None}}
<br><br>
<div style="text-align:center;">
<style>
p.test {word-wrap: break-word;}
</style>
<p class="test">
{{comment}}
</p>
</div>
{{endif}}
{{else}}
<center>{{interactive}}</center>
{{endif}}
</div>
</li>
""")
repr_template = Template(u"""
<li class="{{div_klass}}" id="{{id}}">
<h4>{{caption}}</h4><hr>
{{repr}}
<hr></li>
""")
raw_template = Template(u"""
<li class="{{div_klass}}" id="{{id}}">
<h4>{{caption}}</h4>
<table class="table table-hover">
<tr>
<th>Measurement date</th>
{{if meas_date is not None}}
<td>{{meas_date}}</td>
{{else}}<td>Unknown</td>{{endif}}
</tr>
<tr>
<th>Experimenter</th>
{{if info['experimenter'] is not None}}
<td>{{info['experimenter']}}</td>
{{else}}<td>Unknown</td>{{endif}}
</tr>
<tr>
<th>Digitized points</th>
{{if info['dig'] is not None}}
<td>{{len(info['dig'])}} points</td>
{{else}}
<td>Not available</td>
{{endif}}
</tr>
<tr>
<th>Good channels</th>
<td>{{n_mag}} magnetometer, {{n_grad}} gradiometer,
and {{n_eeg}} EEG channels</td>
</tr>
<tr>
<th>Bad channels</th>
{{if info['bads'] is not None}}
<td>{{', '.join(info['bads'])}}</td>
{{else}}<td>None</td>{{endif}}
</tr>
<tr>
<th>EOG channels</th>
<td>{{eog}}</td>
</tr>
<tr>
<th>ECG channels</th>
<td>{{ecg}}</td>
<tr>
<th>Measurement time range</th>
<td>{{u'%0.2f' % tmin}} to {{u'%0.2f' % tmax}} sec.</td>
</tr>
<tr>
<th>Sampling frequency</th>
<td>{{u'%0.2f' % info['sfreq']}} Hz</td>
</tr>
<tr>
<th>Highpass</th>
<td>{{u'%0.2f' % info['highpass']}} Hz</td>
</tr>
<tr>
<th>Lowpass</th>
<td>{{u'%0.2f' % info['lowpass']}} Hz</td>
</tr>
</table>
</li>
""")
toc_list = Template(u"""
<li class="{{div_klass}}">
{{if id}}
<a href="javascript:void(0)" onclick="window.location.hash={{id}};">
{{endif}}
<span title="{{tooltip}}" style="color:{{color}}"> {{text}}</span>
{{if id}}</a>{{endif}}
</li>
""")
def _check_scale(scale):
"""Helper to ensure valid scale value is passed"""
if np.isscalar(scale) and scale <= 0:
raise ValueError('scale must be positive, not %s' % scale)
class Report(object):
"""Object for rendering HTML
Parameters
----------
info_fname : str
Name of the file containing the info dictionary.
subjects_dir : str | None
Path to the SUBJECTS_DIR. If None, the path is obtained by using
the environment variable SUBJECTS_DIR.
subject : str | None
Subject name.
title : str
Title of the report.
cov_fname : str
Name of the file containing the noise covariance.
baseline : None or tuple of length 2 (default (None, 0))
The time interval to apply baseline correction for evokeds.
If None do not apply it. If baseline is (a, b)
the interval is between "a (s)" and "b (s)".
If a is None the beginning of the data is used
and if b is None then b is set to the end of the interval.
If baseline is equal to (None, None) all the time
interval is used.
The baseline (a, b) includes both endpoints, i.e. all
timepoints t such that a <= t <= b.
verbose : bool, str, int, or None
If not None, override default verbose level (see mne.verbose).
Notes
-----
To toggle the show/hide state of all sections in the html report, press 't'
.. versionadded:: 0.8.0
"""
def __init__(self, info_fname=None, subjects_dir=None,
subject=None, title=None, cov_fname=None, baseline=None,
verbose=None):
self.info_fname = info_fname
self.cov_fname = cov_fname
self.baseline = baseline
self.subjects_dir = get_subjects_dir(subjects_dir, raise_error=False)
self.subject = subject
self.title = title
self.verbose = verbose
self.initial_id = 0
self.html = []
self.fnames = [] # List of file names rendered
self.sections = [] # List of sections
self._sectionlabels = [] # Section labels
self._sectionvars = {} # Section variable names in js
# boolean to specify if sections should be ordered in natural
# order of processing (raw -> events ... -> inverse)
self._sort_sections = False
self._init_render() # Initialize the renderer
def _get_id(self):
"""Get id of plot.
"""
self.initial_id += 1
return self.initial_id
def _validate_input(self, items, captions, section, comments=None):
"""Validate input.
"""
if not isinstance(items, (list, tuple)):
items = [items]
if not isinstance(captions, (list, tuple)):
captions = [captions]
if not isinstance(comments, (list, tuple)):
if comments is None:
comments = [comments] * len(captions)
else:
comments = [comments]
if len(comments) != len(items):
raise ValueError('Comments and report items must have the same '
'length or comments should be None.')
elif len(captions) != len(items):
raise ValueError('Captions and report items must have the same '
'length.')
# Book-keeping of section names
if section not in self.sections:
self.sections.append(section)
self._sectionvars[section] = _clean_varnames(section)
return items, captions, comments
def _add_figs_to_section(self, figs, captions, section='custom',
image_format='png', scale=None, comments=None):
"""Auxiliary method for `add_section` and `add_figs_to_section`.
"""
from scipy.misc import imread
import matplotlib.pyplot as plt
mayavi = None
try:
# on some version mayavi.core won't be exposed unless ...
from mayavi import mlab # noqa, mlab imported
import mayavi
except: # on some systems importing Mayavi raises SystemExit (!)
warnings.warn('Could not import mayavi. Trying to render '
'`mayavi.core.scene.Scene` figure instances'
' will throw an error.')
figs, captions, comments = self._validate_input(figs, captions,
section, comments)
_check_scale(scale)
for fig, caption, comment in zip(figs, captions, comments):
caption = 'custom plot' if caption == '' else caption
sectionvar = self._sectionvars[section]
global_id = self._get_id()
div_klass = self._sectionvars[section]
img_klass = self._sectionvars[section]
if mayavi is not None and isinstance(fig, mayavi.core.scene.Scene):
tempdir = _TempDir()
temp_fname = op.join(tempdir, 'test')
if fig.scene is not None:
fig.scene.save_png(temp_fname)
img = imread(temp_fname)
else: # Testing mode
img = np.zeros((2, 2, 3))
mayavi.mlab.close(fig)
fig = plt.figure()
plt.imshow(img)
plt.axis('off')
img = _fig_to_img(fig=fig, scale=scale,
image_format=image_format)
html = image_template.substitute(img=img, id=global_id,
div_klass=div_klass,
img_klass=img_klass,
caption=caption,
show=True,
image_format=image_format,
comment=comment)
self.fnames.append('%s-#-%s-#-custom' % (caption, sectionvar))
self._sectionlabels.append(sectionvar)
self.html.append(html)
def add_figs_to_section(self, figs, captions, section='custom',
scale=None, image_format='png', comments=None):
"""Append custom user-defined figures.
Parameters
----------
figs : list of figures.
Each figure in the list can be an instance of
matplotlib.pyplot.Figure, mayavi.core.scene.Scene,
or np.ndarray (images read in using scipy.imread).
captions : list of str
A list of captions to the figures.
section : str
Name of the section. If section already exists, the figures
will be appended to the end of the section
scale : float | None | callable
Scale the images maintaining the aspect ratio.
If None, no scaling is applied. If float, scale will determine
the relative scaling (might not work for scale <= 1 depending on
font sizes). If function, should take a figure object as input
parameter. Defaults to None.
image_format : {'png', 'svg'}
The image format to be used for the report. Defaults to 'png'.
comments : None | str | list of str
A string of text or a list of strings of text to be appended after
the figure.
"""
return self._add_figs_to_section(figs=figs, captions=captions,
section=section, scale=scale,
image_format=image_format,
comments=comments)
def add_images_to_section(self, fnames, captions, scale=None,
section='custom', comments=None):
"""Append custom user-defined images.
Parameters
----------
fnames : str | list of str
A filename or a list of filenames from which images are read.
captions : str | list of str
A caption or a list of captions to the images.
scale : float | None
Scale the images maintaining the aspect ratio.
Defaults to None. If None, no scaling will be applied.
section : str
Name of the section. If section already exists, the images
will be appended to the end of the section.
comments : None | str | list of str
A string of text or a list of strings of text to be appended after
the image.
"""
# Note: using scipy.misc is equivalent because scipy internally
# imports PIL anyway. It's not possible to redirect image output
# to binary string using scipy.misc.
from PIL import Image
fnames, captions, comments = self._validate_input(fnames, captions,
section, comments)
_check_scale(scale)
for fname, caption, comment in zip(fnames, captions, comments):
caption = 'custom plot' if caption == '' else caption
sectionvar = self._sectionvars[section]
global_id = self._get_id()
div_klass = self._sectionvars[section]
img_klass = self._sectionvars[section]
# Convert image to binary string.
im = Image.open(fname)
output = BytesIO()
im.save(output, format='png')
img = base64.b64encode(output.getvalue()).decode('ascii')
html = image_template.substitute(img=img, id=global_id,
div_klass=div_klass,
img_klass=img_klass,
caption=caption,
width=scale,
comment=comment,
show=True)
self.fnames.append('%s-#-%s-#-custom' % (caption, sectionvar))
self._sectionlabels.append(sectionvar)
self.html.append(html)
def add_htmls_to_section(self, htmls, captions, section='custom'):
"""Append htmls to the report.
Parameters
----------
htmls : str | list of str
An html str or a list of html str.
captions : str | list of str
A caption or a list of captions to the htmls.
section : str
Name of the section. If section already exists, the images
will be appended to the end of the section.
Notes
-----
.. versionadded:: 0.9.0
"""
htmls, captions, _ = self._validate_input(htmls, captions, section)
for html, caption in zip(htmls, captions):
caption = 'custom plot' if caption == '' else caption
sectionvar = self._sectionvars[section]
global_id = self._get_id()
div_klass = self._sectionvars[section]
self.fnames.append('%s-#-%s-#-custom' % (caption, sectionvar))
self._sectionlabels.append(sectionvar)
self.html.append(
html_template.substitute(div_klass=div_klass, id=global_id,
caption=caption, html=html))
def add_bem_to_section(self, subject, caption='BEM', section='bem',
decim=2, n_jobs=1, subjects_dir=None):
"""Renders a bem slider html str.
Parameters
----------
subject : str
Subject name.
caption : str
A caption for the bem.
section : str
Name of the section. If section already exists, the bem
will be appended to the end of the section.
decim : int
Use this decimation factor for generating MRI/BEM images
(since it can be time consuming).
n_jobs : int
Number of jobs to run in parallel.
subjects_dir : str | None
Path to the SUBJECTS_DIR. If None, the path is obtained by using
the environment variable SUBJECTS_DIR.
Notes
-----
.. versionadded:: 0.9.0
"""
caption = 'custom plot' if caption == '' else caption
html = self._render_bem(subject=subject, subjects_dir=subjects_dir,
decim=decim, n_jobs=n_jobs, section=section,
caption=caption)
html, caption, _ = self._validate_input(html, caption, section)
sectionvar = self._sectionvars[section]
self.fnames.append('%s-#-%s-#-custom' % (caption[0], sectionvar))
self._sectionlabels.append(sectionvar)
self.html.extend(html)
###########################################################################
# HTML rendering
def _render_one_axis(self, slices_iter, name, global_id, cmap,
n_elements, n_jobs):
"""Render one axis of the array.
"""
global_id = global_id or name
html = []
slices, slices_range = [], []
html.append(u'<div class="col-xs-6 col-md-4">')
slides_klass = '%s-%s' % (name, global_id)
use_jobs = min(n_jobs, max(1, n_elements))
parallel, p_fun, _ = parallel_func(_iterate_mri_slices, use_jobs)
r = parallel(p_fun(name, ind, global_id, slides_klass, data, cmap)
for ind, data in slices_iter)
slices_range, slices = zip(*r)
# Render the slider
slider_id = 'select-%s-%s' % (name, global_id)
html.append(u'<div id="%s"></div>' % slider_id)
html.append(u'<ul class="thumbnails">')
# Render the slices
html.append(u'\n'.join(slices))
html.append(u'</ul>')
html.append(_build_html_slider(slices_range, slides_klass, slider_id))
html.append(u'</div>')
return '\n'.join(html)
###########################################################################
# global rendering functions
@verbose
def _init_render(self, verbose=None):
"""Initialize the renderer.
"""
inc_fnames = ['jquery-1.10.2.min.js', 'jquery-ui.min.js',
'bootstrap.min.js', 'jquery-ui.min.css',
'bootstrap.min.css']
include = list()
for inc_fname in inc_fnames:
logger.info('Embedding : %s' % inc_fname)
f = open(op.join(op.dirname(__file__), 'html', inc_fname),
'r')
if inc_fname.endswith('.js'):
include.append(u'<script type="text/javascript">' +
f.read() + u'</script>')
elif inc_fname.endswith('.css'):
include.append(u'<style type="text/css">' +
f.read() + u'</style>')
f.close()
self.include = ''.join(include)
@verbose
def parse_folder(self, data_path, pattern='*.fif', n_jobs=1, mri_decim=2,
sort_sections=True, on_error='warn', verbose=None):
"""Renders all the files in the folder.
Parameters
----------
data_path : str
Path to the folder containing data whose HTML report will be
created.
pattern : str | list of str
Filename pattern(s) to include in the report.
Example: [\*raw.fif, \*ave.fif] will include Raw as well as Evoked
files.
n_jobs : int
Number of jobs to run in parallel.
mri_decim : int
Use this decimation factor for generating MRI/BEM images
(since it can be time consuming).
sort_sections : bool
If True, sort sections in the order: raw -> events -> epochs
-> evoked -> covariance -> trans -> mri -> forward -> inverse.
on_error : str
What to do if a file cannot be rendered. Can be 'ignore',
'warn' (default), or 'raise'.
verbose : bool, str, int, or None
If not None, override default verbose level (see mne.verbose).
"""
valid_errors = ['ignore', 'warn', 'raise']
if on_error not in valid_errors:
raise ValueError('on_error must be one of %s, not %s'
% (valid_errors, on_error))
self._sort = sort_sections
n_jobs = check_n_jobs(n_jobs)
self.data_path = data_path
if self.title is None:
self.title = 'MNE Report for ...%s' % self.data_path[-20:]
if not isinstance(pattern, (list, tuple)):
pattern = [pattern]
# iterate through the possible patterns
fnames = list()
for p in pattern:
fnames.extend(_recursive_search(self.data_path, p))
if self.info_fname is not None:
info = read_info(self.info_fname)
sfreq = info['sfreq']
else:
warnings.warn('`info_fname` not provided. Cannot render'
'-cov.fif(.gz) and -trans.fif(.gz) files.')
info, sfreq = None, None
cov = None
if self.cov_fname is not None:
cov = read_cov(self.cov_fname)
baseline = self.baseline
# render plots in parallel; check that n_jobs <= # of files
logger.info('Iterating over %s potential files (this may take some '
'time)' % len(fnames))
use_jobs = min(n_jobs, max(1, len(fnames)))
parallel, p_fun, _ = parallel_func(_iterate_files, use_jobs)
r = parallel(p_fun(self, fname, info, cov, baseline, sfreq, on_error)
for fname in np.array_split(fnames, use_jobs))
htmls, report_fnames, report_sectionlabels = zip(*r)
# combine results from n_jobs discarding plots not rendered
self.html = [html for html in sum(htmls, []) if html is not None]
self.fnames = [fname for fname in sum(report_fnames, []) if
fname is not None]
self._sectionlabels = [slabel for slabel in
sum(report_sectionlabels, [])
if slabel is not None]
# find unique section labels
self.sections = sorted(set(self._sectionlabels))
self._sectionvars = dict(zip(self.sections, self.sections))
# render mri
if self.subjects_dir is not None and self.subject is not None:
logger.info('Rendering BEM')
self.html.append(self._render_bem(self.subject, self.subjects_dir,
mri_decim, n_jobs))
self.fnames.append('bem')
self._sectionlabels.append('mri')
else:
warnings.warn('`subjects_dir` and `subject` not provided.'
' Cannot render MRI and -trans.fif(.gz) files.')
def save(self, fname=None, open_browser=True, overwrite=False):
"""Save html report and open it in browser.
Parameters
----------
fname : str
File name of the report.
open_browser : bool
Open html browser after saving if True.
overwrite : bool
If True, overwrite report if it already exists.
"""
if fname is None:
if not hasattr(self, 'data_path'):
self.data_path = op.dirname(__file__)
warnings.warn('`data_path` not provided. Using %s instead'
% self.data_path)
fname = op.realpath(op.join(self.data_path, 'report.html'))
else:
fname = op.realpath(fname)
self._render_toc()
html = footer_template.substitute(date=time.strftime("%B %d, %Y"))
self.html.append(html)
if not overwrite and op.isfile(fname):
msg = ('Report already exists at location %s. '
'Overwrite it (y/[n])? '
% fname)
answer = moves.input(msg)
if answer.lower() == 'y':
overwrite = True
if overwrite or not op.isfile(fname):
logger.info('Saving report to location %s' % fname)
fobj = codecs.open(fname, 'w', 'utf-8')
fobj.write(_fix_global_ids(u''.join(self.html)))
fobj.close()
# remove header, TOC and footer to allow more saves
self.html.pop(0)
self.html.pop(0)
self.html.pop()
if open_browser:
import webbrowser
webbrowser.open_new_tab('file://' + fname)
return fname
@verbose
def _render_toc(self, verbose=None):
"""Render the Table of Contents.
"""
logger.info('Rendering : Table of Contents')
html_toc = u'<div id="container">'
html_toc += u'<div id="toc"><center><h4>CONTENTS</h4></center>'
global_id = 1
# Reorder self.sections to reflect natural ordering
if self._sort_sections:
sections = list(set(self.sections) & set(SECTION_ORDER))
custom = [section for section in self.sections if section
not in SECTION_ORDER]
order = [sections.index(section) for section in SECTION_ORDER if
section in sections]
self.sections = np.array(sections)[order].tolist() + custom
# Sort by section
html, fnames, sectionlabels = [], [], []
for section in self.sections:
logger.info('%s' % section)
for sectionlabel, this_html, fname in (zip(self._sectionlabels,
self.html, self.fnames)):
if self._sectionvars[section] == sectionlabel:
html.append(this_html)
fnames.append(fname)
sectionlabels.append(sectionlabel)
logger.info('\t... %s' % fname[-20:])
color = _is_bad_fname(fname)
div_klass, tooltip, text = _get_toc_property(fname)
# loop through conditions for evoked
if fname.endswith(('-ave.fif', '-ave.fif.gz',
'(whitened)')):
text = os.path.basename(fname)
if fname.endswith('(whitened)'):
fname = fname[:-11]
# XXX: remove redundant read_evokeds
evokeds = read_evokeds(fname, verbose=False)
html_toc += toc_list.substitute(
div_klass=div_klass, id=None, tooltip=fname,
color='#428bca', text=text)
html_toc += u'<li class="evoked"><ul>'
for ev in evokeds:
html_toc += toc_list.substitute(
div_klass=div_klass, id=global_id,
tooltip=fname, color=color, text=ev.comment)
global_id += 1
html_toc += u'</ul></li>'
elif fname.endswith(tuple(VALID_EXTENSIONS +
['bem', 'custom'])):
html_toc += toc_list.substitute(div_klass=div_klass,
id=global_id,
tooltip=tooltip,
color=color,
text=text)
global_id += 1
html_toc += u'\n</ul></div>'
html_toc += u'<div id="content">'
# The sorted html (according to section)
self.html = html
self.fnames = fnames
self._sectionlabels = sectionlabels
html_header = header_template.substitute(title=self.title,
include=self.include,
sections=self.sections,
sectionvars=self._sectionvars)
self.html.insert(0, html_header) # Insert header at position 0
self.html.insert(1, html_toc) # insert TOC
def _render_array(self, array, global_id=None, cmap='gray',
limits=None, n_jobs=1):
"""Render mri without bem contours.
"""
html = []
html.append(u'<div class="row">')
# Axial
limits = limits or {}
axial_limit = limits.get('axial')
axial_slices_gen = _iterate_axial_slices(array, axial_limit)
html.append(
self._render_one_axis(axial_slices_gen, 'axial',
global_id, cmap, array.shape[1], n_jobs))
# Sagittal
sagittal_limit = limits.get('sagittal')
sagittal_slices_gen = _iterate_sagittal_slices(array, sagittal_limit)
html.append(
self._render_one_axis(sagittal_slices_gen, 'sagittal',
global_id, cmap, array.shape[1], n_jobs))
html.append(u'</div>')
html.append(u'<div class="row">')
# Coronal
coronal_limit = limits.get('coronal')
coronal_slices_gen = _iterate_coronal_slices(array, coronal_limit)
html.append(
self._render_one_axis(coronal_slices_gen, 'coronal',
global_id, cmap, array.shape[1], n_jobs))
# Close section
html.append(u'</div>')
return '\n'.join(html)
def _render_one_bem_axis(self, mri_fname, surf_fnames, global_id,
shape, orientation='coronal', decim=2, n_jobs=1):
"""Render one axis of bem contours.
"""
orientation_name2axis = dict(sagittal=0, axial=1, coronal=2)
orientation_axis = orientation_name2axis[orientation]
n_slices = shape[orientation_axis]
orig_size = np.roll(shape, orientation_axis)[[1, 2]]
name = orientation
html = []
html.append(u'<div class="col-xs-6 col-md-4">')
slides_klass = '%s-%s' % (name, global_id)
sl = np.arange(0, n_slices, decim)
kwargs = dict(mri_fname=mri_fname, surf_fnames=surf_fnames, show=False,
orientation=orientation, img_output=orig_size)
imgs = _figs_to_mrislices(sl, n_jobs, **kwargs)
slices = []
img_klass = 'slideimg-%s' % name
div_klass = 'span12 %s' % slides_klass
for ii, img in enumerate(imgs):
slice_id = '%s-%s-%s' % (name, global_id, sl[ii])
caption = u'Slice %s %s' % (name, sl[ii])
first = True if ii == 0 else False
slices.append(_build_html_image(img, slice_id, div_klass,
img_klass, caption, first))
# Render the slider
slider_id = 'select-%s-%s' % (name, global_id)
html.append(u'<div id="%s"></div>' % slider_id)
html.append(u'<ul class="thumbnails">')
# Render the slices
html.append(u'\n'.join(slices))
html.append(u'</ul>')
html.append(_build_html_slider(sl, slides_klass, slider_id))
html.append(u'</div>')
return '\n'.join(html)
def _render_image(self, image, cmap='gray', n_jobs=1):
"""Render one slice of mri without bem.
"""
import nibabel as nib
global_id = self._get_id()
if 'mri' not in self.sections:
self.sections.append('mri')
self._sectionvars['mri'] = 'mri'
nim = nib.load(image)
data = nim.get_data()
shape = data.shape
limits = {'sagittal': range(0, shape[0], 2),
'axial': range(0, shape[1], 2),
'coronal': range(0, shape[2], 2)}
name = op.basename(image)
html = u'<li class="mri" id="%d">\n' % global_id
html += u'<h2>%s</h2>\n' % name
html += self._render_array(data, global_id=global_id,
cmap=cmap, limits=limits,
n_jobs=n_jobs)
html += u'</li>\n'
return html
def _render_raw(self, raw_fname):
"""Render raw.
"""
global_id = self._get_id()
div_klass = 'raw'
caption = u'Raw : %s' % raw_fname
raw = Raw(raw_fname)
n_eeg = len(pick_types(raw.info, meg=False, eeg=True))
n_grad = len(pick_types(raw.info, meg='grad'))
n_mag = len(pick_types(raw.info, meg='mag'))
pick_eog = pick_types(raw.info, meg=False, eog=True)
if len(pick_eog) > 0:
eog = ', '.join(np.array(raw.info['ch_names'])[pick_eog])
else:
eog = 'Not available'
pick_ecg = pick_types(raw.info, meg=False, ecg=True)
if len(pick_ecg) > 0:
ecg = ', '.join(np.array(raw.info['ch_names'])[pick_ecg])
else:
ecg = 'Not available'
meas_date = raw.info['meas_date']
if meas_date is not None:
meas_date = dt.fromtimestamp(meas_date[0]).strftime("%B %d, %Y")
tmin = raw.first_samp / raw.info['sfreq']
tmax = raw.last_samp / raw.info['sfreq']
html = raw_template.substitute(div_klass=div_klass,
id=global_id,
caption=caption,
info=raw.info,
meas_date=meas_date,
n_eeg=n_eeg, n_grad=n_grad,
n_mag=n_mag, eog=eog,
ecg=ecg, tmin=tmin, tmax=tmax)
return html
def _render_forward(self, fwd_fname):
"""Render forward.
"""
div_klass = 'forward'
caption = u'Forward: %s' % fwd_fname
fwd = read_forward_solution(fwd_fname)
repr_fwd = re.sub('>', '', re.sub('<', '', repr(fwd)))
global_id = self._get_id()
html = repr_template.substitute(div_klass=div_klass,
id=global_id,
caption=caption,
repr=repr_fwd)
return html
def _render_inverse(self, inv_fname):
"""Render inverse.
"""
div_klass = 'inverse'
caption = u'Inverse: %s' % inv_fname
inv = read_inverse_operator(inv_fname)
repr_inv = re.sub('>', '', re.sub('<', '', repr(inv)))
global_id = self._get_id()
html = repr_template.substitute(div_klass=div_klass,
id=global_id,
caption=caption,
repr=repr_inv)
return html
def _render_evoked(self, evoked_fname, baseline=None, figsize=None):
"""Render evoked.
"""
evokeds = read_evokeds(evoked_fname, baseline=baseline, verbose=False)
html = []
for ev in evokeds:
global_id = self._get_id()
kwargs = dict(show=False)
img = _fig_to_img(ev.plot, **kwargs)
caption = u'Evoked : %s (%s)' % (evoked_fname, ev.comment)
div_klass = 'evoked'
img_klass = 'evoked'
show = True
html.append(image_template.substitute(img=img, id=global_id,
div_klass=div_klass,
img_klass=img_klass,
caption=caption,
show=show))
has_types = []
if len(pick_types(ev.info, meg=False, eeg=True)) > 0:
has_types.append('eeg')
if len(pick_types(ev.info, meg='grad', eeg=False)) > 0:
has_types.append('grad')
if len(pick_types(ev.info, meg='mag', eeg=False)) > 0:
has_types.append('mag')
for ch_type in has_types:
kwargs.update(ch_type=ch_type)
img = _fig_to_img(ev.plot_topomap, **kwargs)
caption = u'Topomap (ch_type = %s)' % ch_type
html.append(image_template.substitute(img=img,
div_klass=div_klass,
img_klass=img_klass,
caption=caption,
show=show))
return '\n'.join(html)
def _render_eve(self, eve_fname, sfreq=None):
"""Render events.
"""
global_id = self._get_id()
events = read_events(eve_fname)
kwargs = dict(events=events, sfreq=sfreq, show=False)
img = _fig_to_img(plot_events, **kwargs)
caption = 'Events : ' + eve_fname
div_klass = 'events'
img_klass = 'events'
show = True
html = image_template.substitute(img=img, id=global_id,
div_klass=div_klass,
img_klass=img_klass,
caption=caption,
show=show)
return html
def _render_epochs(self, epo_fname):
"""Render epochs.
"""
global_id = self._get_id()
epochs = read_epochs(epo_fname)
kwargs = dict(subject=self.subject, show=False)
img = _fig_to_img(epochs.plot_drop_log, **kwargs)
caption = 'Epochs : ' + epo_fname
div_klass = 'epochs'
img_klass = 'epochs'
show = True
html = image_template.substitute(img=img, id=global_id,
div_klass=div_klass,
img_klass=img_klass,
caption=caption,
show=show)
return html
def _render_cov(self, cov_fname, info_fname):
"""Render cov.
"""
global_id = self._get_id()
cov = read_cov(cov_fname)
fig, _ = plot_cov(cov, info_fname, show=False)
img = _fig_to_img(fig=fig)
caption = 'Covariance : %s (n_samples: %s)' % (cov_fname, cov.nfree)
div_klass = 'covariance'
img_klass = 'covariance'
show = True
html = image_template.substitute(img=img, id=global_id,
div_klass=div_klass,
img_klass=img_klass,
caption=caption,
show=show)
return html
def _render_whitened_evoked(self, evoked_fname, noise_cov, baseline):
"""Show whitened evoked.
"""
global_id = self._get_id()
evokeds = read_evokeds(evoked_fname, verbose=False)
html = []
for ev in evokeds:
ev = read_evokeds(evoked_fname, ev.comment, baseline=baseline,
verbose=False)
global_id = self._get_id()
kwargs = dict(noise_cov=noise_cov, show=False)
img = _fig_to_img(ev.plot_white, **kwargs)
caption = u'Whitened evoked : %s (%s)' % (evoked_fname, ev.comment)
div_klass = 'evoked'
img_klass = 'evoked'
show = True
html.append(image_template.substitute(img=img, id=global_id,
div_klass=div_klass,
img_klass=img_klass,
caption=caption,
show=show))
return '\n'.join(html)
def _render_trans(self, trans, path, info, subject,
subjects_dir, image_format='png'):
"""Render trans.
"""
kwargs = dict(info=info, trans=trans, subject=subject,
subjects_dir=subjects_dir)
try:
img = _iterate_trans_views(function=plot_trans, **kwargs)
except IOError:
img = _iterate_trans_views(function=plot_trans, source='head',
**kwargs)
if img is not None:
global_id = self._get_id()
caption = 'Trans : ' + trans
div_klass = 'trans'
img_klass = 'trans'
show = True
html = image_template.substitute(img=img, id=global_id,
div_klass=div_klass,
img_klass=img_klass,
caption=caption,
width=75,
show=show)
return html
def _render_bem(self, subject, subjects_dir, decim, n_jobs,
section='mri', caption='BEM'):
"""Render mri+bem.
"""
import nibabel as nib
subjects_dir = get_subjects_dir(subjects_dir, raise_error=True)
# Get the MRI filename
mri_fname = op.join(subjects_dir, subject, 'mri', 'T1.mgz')
if not op.isfile(mri_fname):
warnings.warn('MRI file "%s" does not exist' % mri_fname)
# Get the BEM surface filenames
bem_path = op.join(subjects_dir, subject, 'bem')
if not op.isdir(bem_path):
warnings.warn('Subject bem directory "%s" does not exist' %
bem_path)
return self._render_image(mri_fname, cmap='gray', n_jobs=n_jobs)
surf_fnames = []
for surf_name in ['*inner_skull', '*outer_skull', '*outer_skin']:
surf_fname = glob(op.join(bem_path, surf_name + '.surf'))
if len(surf_fname) > 0:
surf_fname = surf_fname[0]
else:
warnings.warn('No surface found for %s.' % surf_name)
return self._render_image(mri_fname, cmap='gray')
surf_fnames.append(surf_fname)
# XXX : find a better way to get max range of slices
nim = nib.load(mri_fname)
data = nim.get_data()
shape = data.shape
del data # free up memory
html = []
global_id = self._get_id()
if section == 'mri' and 'mri' not in self.sections:
self.sections.append('mri')
self._sectionvars['mri'] = 'mri'
name = caption
html += u'<li class="mri" id="%d">\n' % global_id
html += u'<h2>%s</h2>\n' % name
html += u'<div class="row">'
html += self._render_one_bem_axis(mri_fname, surf_fnames, global_id,
shape, 'axial', decim, n_jobs)
html += self._render_one_bem_axis(mri_fname, surf_fnames, global_id,
shape, 'sagittal', decim, n_jobs)
html += u'</div><div class="row">'
html += self._render_one_bem_axis(mri_fname, surf_fnames, global_id,
shape, 'coronal', decim, n_jobs)
html += u'</div>'
html += u'</li>\n'
return ''.join(html)
def _clean_varnames(s):
# Remove invalid characters
s = re.sub('[^0-9a-zA-Z_]', '', s)
# add report_ at the beginning so that the javascript class names
# are valid ones
return 'report_' + s
def _recursive_search(path, pattern):
"""Auxiliary function for recursive_search of the directory.
"""
filtered_files = list()
for dirpath, dirnames, files in os.walk(path):
for f in fnmatch.filter(files, pattern):
# only the following file types are supported
# this ensures equitable distribution of jobs
if f.endswith(tuple(VALID_EXTENSIONS)):
filtered_files.append(op.realpath(op.join(dirpath, f)))
return filtered_files
def _fix_global_ids(html):
"""Auxiliary function for fixing the global_ids after reordering in
_render_toc().
"""
html = re.sub('id="\d+"', 'id="###"', html)
global_id = 1
while len(re.findall('id="###"', html)) > 0:
html = re.sub('id="###"', 'id="%s"' % global_id, html, count=1)
global_id += 1
return html
|
matthew-tucker/mne-python
|
mne/report.py
|
Python
|
bsd-3-clause
| 61,163
|
[
"Mayavi"
] |
91f476b36792ff17da0c6bf0b8d41746fcdf650137a48a4b97e41365e0bf8792
|
# Copyright (c) 2014-2016 Genome Research Ltd.
#
# This file is part of IVA.
#
# IVA is free software: you can redistribute it and/or modify it under
# the terms of the GNU General Public License as published by the Free Software
# Foundation; either version 3 of the License, or (at your option) any later
# version.
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
# You should have received a copy of the GNU General Public License along with
# this program. If not, see <http://www.gnu.org/licenses/>.
import unittest
import sys
import os
import filecmp
import pysam
from iva import assembly
modules_dir = os.path.dirname(os.path.abspath(assembly.__file__))
data_dir = os.path.join(modules_dir, 'tests', 'data')
class TestAssembly(unittest.TestCase):
def test_init_and_write(self):
'''test assembly initialise and write to file'''
a = assembly.Assembly(contigs_file=os.path.join(data_dir, 'assembly_test.fa'))
tmp_out = 'tmp.assembly_test.out.fa'
a.write_contigs_to_file(tmp_out)
self.assertTrue(filecmp.cmp(tmp_out, os.path.join(data_dir, 'assembly_test.fa')))
os.unlink(tmp_out)
def test_map_reads(self):
'''test _map_reads'''
a = assembly.Assembly(contigs_file=os.path.join(data_dir, 'assembly_test.fa'))
reads_prefix = os.path.join(data_dir, 'assembly_test.to_map')
out_prefix = 'tmp.assembly_test.out'
a._map_reads(reads_prefix + '_1.fastq', reads_prefix + '_2.fastq', out_prefix)
# different smalt version output slightly different BAMs. Some columns
# should never change, so check just those ones
def get_sam_columns(bamfile):
sams = []
sam_reader = pysam.Samfile(bamfile, "rb")
for sam in sam_reader.fetch(until_eof=True):
if sam.is_unmapped:
refname = None
else:
refname = sam_reader.getrname(sam.tid)
sams.append((sam.qname, sam.flag, refname, sam.pos, sam.cigar, sam.seq))
return sams
expected = get_sam_columns(os.path.join(data_dir, 'assembly_test.mapped.bam'))
got = get_sam_columns(out_prefix + '.bam')
self.assertListEqual(expected, got)
os.unlink(out_prefix + '.bam')
def test_extend_contigs_with_bam(self):
'''test _extend_contigs_with_bam'''
a = assembly.Assembly(contigs_file=os.path.join(data_dir, 'mapping_test.ref.trimmed.fa'), ext_min_cov=1, ext_min_ratio=1, ext_bases=10, max_insert=200)
bam = os.path.join(data_dir, 'mapping_test.smalt.out.bam')
out_prefix = 'tmp'
a._extend_contigs_with_bam(bam, out_prefix, output_all_useful_reads=False)
tmp_contigs = 'tmp.new_contigs.fa'
tmp_reads_1 = out_prefix + '_1.fa'
tmp_reads_2 = out_prefix + '_2.fa'
a.write_contigs_to_file(tmp_contigs)
self.assertTrue(filecmp.cmp(os.path.join(data_dir, 'mapping_test.ref.fa'), tmp_contigs))
self.assertTrue(filecmp.cmp(os.path.join(data_dir, 'assembly_test.extend_kept_reads_1.fa'), tmp_reads_1))
self.assertTrue(filecmp.cmp(os.path.join(data_dir, 'assembly_test.extend_kept_reads_2.fa'), tmp_reads_2))
os.unlink(tmp_contigs)
os.unlink(tmp_reads_1)
os.unlink(tmp_reads_2)
def test_contig_worth_extending(self):
'''Test _contig_worth_extending'''
a = assembly.Assembly(contigs_file=os.path.join(data_dir, 'assembly_test.fa'))
self.assertTrue(a._contig_worth_extending('A'))
self.assertTrue(a._contig_worth_extending('B'))
self.assertTrue(a._contig_worth_extending('C'))
a.contig_lengths['A'].append([100, 0, 0])
self.assertTrue(a._contig_worth_extending('A'))
a.contig_lengths['A'].append([100, 0, 0])
self.assertFalse(a._contig_worth_extending('A'))
a.contig_lengths['A'].append([100, 0, 0])
self.assertFalse(a._contig_worth_extending('A'))
a.contig_lengths['A'][-1] = [101, 1, 0]
self.assertTrue(a._contig_worth_extending('A'))
def test_worth_extending(self):
'''Test worth_extending'''
a = assembly.Assembly(contigs_file=os.path.join(data_dir, 'assembly_test.fa'))
self.assertTrue(a._worth_extending())
for x in ['A', 'B', 'C']:
a.contig_lengths[x].append([100, 0, 0])
self.assertTrue(a._worth_extending())
for x in ['A', 'B', 'C']:
a.contig_lengths[x].append([100, 0, 0])
self.assertFalse(a._worth_extending())
for x in ['A', 'B']:
a.contig_lengths[x].append([100, 0, 0])
self.assertFalse(a._worth_extending())
a.contig_lengths['C'].append([100, 0, 0])
self.assertFalse(a._worth_extending())
def test_read_pair_extend(self):
'''Test read_pair_extend'''
ref = os.path.join(data_dir, 'assembly_test_read_pair_extend.ref.fasta')
to_extend = os.path.join(data_dir, 'assembly_test_read_pair_extend.to_extend.fasta')
reads_prefix = os.path.join(data_dir, 'assembly_test_read_pair_extend.ref.reads')
a = assembly.Assembly(contigs_file=to_extend, strand_bias=0, verbose=4)
a.read_pair_extend(reads_prefix, 'tmp.extend')
self.assertTrue(len(a.contigs['1']) > 900)
def test_get_ref_length(self):
'''Test _get_ref_length'''
a = assembly.Assembly(contigs_file=os.path.join(data_dir, 'assembly_test.fa'))
sam_reader = pysam.Samfile(os.path.join(data_dir, 'assembly_test.mapped.bam'), "rb")
expected_lengths = [100] * 6 + [None, None]
i = 0
for sam in sam_reader.fetch(until_eof=True):
self.assertEqual(expected_lengths[i], a._get_ref_length(sam_reader, sam))
i += 1
def test_get_ref_length_sam_pair(self):
'''Test _get_ref_length_sam_pair'''
previous_sam = None
a = assembly.Assembly(contigs_file=os.path.join(data_dir, 'assembly_test.fa'))
sam_reader = pysam.Samfile(os.path.join(data_dir, 'assembly_test.mapped.bam'), "rb")
expected_lengths = [100] * 3 + [None]
i = 0
for current_sam in sam_reader.fetch(until_eof=True):
if previous_sam is None:
previous_sam = current_sam
continue
self.assertEqual(expected_lengths[i], a._get_ref_length_sam_pair(sam_reader, current_sam, previous_sam))
i += 1
previous_sam = None
def test_get_unmapped_pairs(self):
'''Test _get_unmapped_pairs'''
a = assembly.Assembly(contigs_file=os.path.join(data_dir, 'assembly_test.fa'))
reads1 = os.path.join(data_dir, 'assembly_test.to_map_1.fastq')
reads2 = os.path.join(data_dir, 'assembly_test.to_map_2.fastq')
a._get_unmapped_pairs(reads1, reads2, 'tmp')
self.assertTrue(filecmp.cmp('tmp_1.fa', os.path.join(data_dir, 'assembly_test_get_unmapped_pairs_1.fa'), shallow=False))
self.assertTrue(filecmp.cmp('tmp_2.fa', os.path.join(data_dir, 'assembly_test_get_unmapped_pairs_2.fa'), shallow=False))
os.unlink('tmp_1.fa')
os.unlink('tmp_2.fa')
def test_add_new_seed_contig(self):
'''Test add_new_seed_contig'''
a = assembly.Assembly(contigs_file=os.path.join(data_dir, 'assembly_test.fa'), verbose=3, min_clip=1, seed_min_cov=1, seed_min_kmer_count=1, seed_start_length=10, seed_overlap_length=5, seed_stop_length=20)
reads1 = os.path.join(data_dir, 'assembly_test_add_new_seed_contig.reads_1.fa')
reads2 = os.path.join(data_dir, 'assembly_test_add_new_seed_contig.reads_2.fa')
new_contig_name = a.add_new_seed_contig(reads1, reads2)
self.assertEqual('seeded.00001', new_contig_name)
self.assertTrue(new_contig_name in a.contigs)
def test_good_intervals_from_strand_coverage(self):
'''Test good_intervals_from_strand_coverage'''
fwd_cov = [0, 1, 1, 2, 5, 10, 100, 10, 10, 6, 0, 10, 10, 10, 5, 10]
rev_cov = [0, 5, 5, 5, 5, 20, 10, 10, 10, 100, 9, 10, 10, 10, 5, 0]
expected = [(3,5), (7,8), (11,14)]
a = assembly.Assembly(strand_bias=0.2)
got = a._good_intervals_from_strand_coverage(fwd_cov, rev_cov)
self.assertListEqual(expected, got)
def test_get_contig_order_by_orfs(self):
'''Test get_contig_order_by_orfs'''
a = assembly.Assembly(contigs_file=os.path.join(data_dir, 'assembly_test_order_by_orfs.fa'))
got = a._get_contig_order_by_orfs(min_length=240)
expected = [('1', True), ('3', False), ('4', False), ('6', False), ('2', False), ('5', False)]
self.assertListEqual(expected, got)
def test_trim_contig_for_strand_bias(self):
'''Test _trim_contig_for_strand_bias'''
# TODO
pass
def test_subcontigs_from_strand_bias(self):
'''Test _subcontigs_from_strand_bias'''
# TODO
pass
def test_trim_strand_biased_ends(self):
'''Test _trim_strand_biased_ends'''
# TODO
pass
def test_trim_contigs(self):
'''Test trim_contigs'''
# TODO
pass
def test_remove_contained_contigs(self):
'''Test _remove_contained_contigs'''
# TODO
pass
def test_coords_to_new_contig(self):
'''Test _coords_to_new_contig'''
# TODO
pass
def test_merge_overlapping_contigs(self):
'''Test _merge_overlapping_contigs'''
# TODO
pass
def test_contig_names_size_order(self):
'''Test _contig_names_size_order'''
# TODO
pass
def test_contig_contained_in_nucmer_hits(self):
'''Test _contig_contained_in_nucmer_hits'''
# TODO
pass
def test_remove_contig_from_nucmer_hits(self):
'''Test _remove_contig_from_nucmer_hits'''
# TODO
pass
def test_remove_contig(self):
'''Test _remove_contig'''
# TODO
pass
|
sanger-pathogens/iva
|
iva/tests/assembly_test.py
|
Python
|
gpl-3.0
| 10,150
|
[
"pysam"
] |
b7ca9c199d7b932f0e8aefeba5e13924dcd8b912799cfc5311356815bf0fc405
|
# Transformer/IO/StructureIO.py
# -------
# Imports
# -------
import os;
from Transformer.IO import _AIMS, _VASP;
# ---------
# Constants
# ---------
# Tuples of (formatCode, defaultExtension, readSupport, writeSupport).
SupportedFileFormats = [
('aims', '.geometry.in', True, True),
('vasp', '.vasp', True, True)
];
# ----------------
# Public Functions
# ----------------
def GetFileTypeFromExtension(filePath):
fileFormat = _TryGetFileFormat(filePath);
for supportedFileFormat, defaultExtension, _, _ in SupportedFileFormats:
if fileFormat == supportedFileFormat:
return (fileFormat, defaultExtension);
def ReadStructure(filePathOrObj, fileFormat = None, atomicSymbolLookupTable = None):
# _GetCheckFileFormat() attempts to automatically determine a file format if one is not supplied, and also checks the format is supported for reading.
fileFormat = _GetCheckFileFormat(filePathOrObj, fileFormat, mode = 'r');
# File-like object to read from.
fileObj = None;
# Variable to keep track of whether fileObj was opened within this routine.
fileObjOpened = False;
# If filePathOrObj is a string, assume it specifies a file path and set fileObj to an open file for reading.
# If not, assume filePathOrObj implements a file-like interface and use as is.
if isinstance(filePathOrObj, str):
fileObj = open(filePathOrObj, 'r');
fileObjOpened = True;
else:
fileObj = filePathOrObj;
try:
# Dispatch to different reader functions depending on the selected file format.
if fileFormat == 'aims':
return _AIMS.ReadGeometryInFile(
fileObj, atomicSymbolLookupTable = atomicSymbolLookupTable
);
elif fileFormat == 'vasp':
return _VASP.ReadPOSCARFile(
fileObj, atomicSymbolLookupTable = atomicSymbolLookupTable
);
else:
# Catch-all, just in case.
raise NotImplementedError("Error: An import routine for the file format '{0}' has not yet been implemented.".format(fileFormat));
finally:
# If we opened a file, make sure it gets closed.
if fileObjOpened:
fileObj.close();
def WriteStructure(structure, filePathOrObj, fileFormat = None, atomicSymbolLookupTable = None):
# Determine and/or check the file format is supported for writing.
fileFormat = _GetCheckFileFormat(filePathOrObj, fileFormat, mode = 'w');
# If required, set up a file-like object to write to.
fileObj = None;
fileObjOpened = False;
if isinstance(filePathOrObj, str):
fileObj = open(filePathOrObj, 'w');
fileObjOpened = True;
else:
fileObj = filePathOrObj;
# Dispatch to the appropriate writer function.
try:
if fileFormat == 'aims':
_AIMS.WriteGeometryInFile(
structure, fileObj, atomicSymbolLookupTable = atomicSymbolLookupTable
);
elif fileFormat == 'vasp':
_VASP.WritePOSCARFile(
structure, fileObj, atomicSymbolLookupTable = atomicSymbolLookupTable
);
else:
raise NotImplementedError("Error: An export routine for the file format '{0}' has not yet been implemented.".format(fileFormat));
finally:
if fileObjOpened:
fileObj.close();
# -----------------
# Utility Functions
# -----------------
def _GetCheckFileFormat(filePathOrObj, fileFormat, mode):
if fileFormat == None:
# If filePathOrObj is a string, assume it specifies a file path and match the ending against the default extensions of the formats listed in SupportedFileFormats.
if isinstance(filePathOrObj, str):
_, tail = os.path.split(filePathOrObj);
tail = tail.lower();
for supportedFileFormat, defaultExtension, _, _ in SupportedFileFormats:
if tail.endswith(defaultExtension):
fileFormat = supportedFileFormat;
# If we were unable to determine a file format automatically, throw an error.
if fileFormat == None:
raise Exception("Error: A file format could not be automatically determined.");
else:
fileFormat = fileFormat.lower();
# Check the file format is supported for the desired mode.
if mode == 'r':
# Check for reading support.
for supportedFileFormat, _, readSupport, _ in SupportedFileFormats:
if fileFormat == supportedFileFormat and not readSupport:
raise Exception("Error: File format '{0}' is not supported for reading.".format(fileFormat));
elif mode == 'w':
# Check for writing support.
for supportedFileFormat, _, _, writeSupport in SupportedFileFormats:
if fileFormat == supportedFileFormat and not writeSupport:
raise Exception("Error: File format '{0}' is not supported for writing.".format(fileFormat));
else:
# Catch all - should only be for debugging purposes.
raise Exception("Error: Unknown mode '{0}'.".format(mode))
# Return the file format.
return fileFormat;
|
JMSkelton/Transformer
|
Transformer/IO/StructureIO.py
|
Python
|
gpl-3.0
| 5,218
|
[
"VASP"
] |
769e6a58eb08bd4246440fe5e04d5bb8917735d1ba0dd0377b4a10248bbee4d1
|
"""
Unit tests for the atom renderer
"""
from __future__ import absolute_import
from __future__ import unicode_literals
import unittest
import numpy as np
import vtk
from vtk.util import numpy_support
from .. import atomRenderer
from ... import utils
from six.moves import range
################################################################################
# required unless ColouringOptions is rewritten to have a non GUI dependent settings object
class DummyColouringOpts(object):
def __init__(self):
self.colourBy = "Species"
self.heightAxis = 1
self.minVal = 0.0
self.maxVal = 1.0
self.solidColourRGB = (1.0, 0.0, 0.0)
self.scalarBarText = "Height in Y (A)"
################################################################################
class TestAtomRenderer(unittest.TestCase):
"""
Test the atom renderer
"""
def setUp(self):
"""
Called before each test
"""
# arrays
points = np.asarray([[1.2,1.2,1.6], [0,0,0], [8,8,8], [5.4,8,1], [4,1,0]], dtype=np.float64)
scalars = np.asarray([0,0,1,0,1], dtype=np.float64)
radii = np.asarray([1.2, 1.2, 0.8, 1.5, 1.1], dtype=np.float64)
# convert to vtk arrays
self.atomPoints = utils.NumpyVTKData(points)
self.radiusArray = utils.NumpyVTKData(radii, name="radius")
self.scalarsArray = utils.NumpyVTKData(scalars, name="colours")
# lut
self.nspecies = 2
self.lut = vtk.vtkLookupTable()
self.lut.SetNumberOfColors(self.nspecies)
self.lut.SetNumberOfTableValues(self.nspecies)
self.lut.SetTableRange(0, self.nspecies - 1)
self.lut.SetRange(0, self.nspecies - 1)
for i in range(self.nspecies):
self.lut.SetTableValue(i, 1, 0, 0, 1.0)
def tearDown(self):
"""
Called after each test
"""
# remove refs
self.atomPoints = None
self.radiusArray = None
self.scalarsArray = None
self.lut = None
def test_atomRenderer(self):
"""
Atom renderer
"""
# the renderer
renderer = atomRenderer.AtomRenderer()
# some settings
colouringOptions = DummyColouringOpts()
atomScaleFactor = 1
resolution = 10
# render atoms
renderer.render(self.atomPoints, self.scalarsArray, self.radiusArray, self.nspecies, colouringOptions,
atomScaleFactor, self.lut, resolution)
# check result is correct type
self.assertIsInstance(renderer.getActor(), utils.ActorObject)
|
chrisdjscott/Atoman
|
atoman/rendering/renderers/tests/test_atomRenderer.py
|
Python
|
mit
| 2,682
|
[
"VTK"
] |
f3e05f17d505ccc81316b4ce56ab640a3776958c386e7e8801bb7694f2403c7c
|
# -*- coding: utf-8 -*-
# Copyright 2013 splinter authors. All rights reserved.
# Use of this source code is governed by a BSD-style
# license that can be found in the LICENSE file.
import unittest
import pytest
from .fake_webapp import EXAMPLE_APP
from .base import WebDriverTests, get_browser
class ChromeBrowserTest(WebDriverTests, unittest.TestCase):
@pytest.fixture(autouse=True, scope='class')
def setup_browser(self, request):
request.cls.browser = get_browser('chrome', fullscreen=False)
request.addfinalizer(request.cls.browser.quit)
@pytest.fixture(autouse=True)
def visit_example_app(self, request):
self.browser.driver.set_window_size(1024, 768)
self.browser.visit(EXAMPLE_APP)
class ChromeBrowserFullscreenTest(WebDriverTests, unittest.TestCase):
@pytest.fixture(autouse=True, scope='class')
def setup_browser(self, request):
request.cls.browser = get_browser('chrome', fullscreen=True)
request.addfinalizer(request.cls.browser.quit)
@pytest.fixture(autouse=True)
def visit_example_app(self):
self.browser.visit(EXAMPLE_APP)
|
cobrateam/splinter
|
tests/test_webdriver_chrome.py
|
Python
|
bsd-3-clause
| 1,136
|
[
"VisIt"
] |
4113c9aa72b785ad409ff042bac6329668ea1caf5bc8b627a83aacc6b7cb61d6
|
import os
import os.path
import shutil
from easybuild.framework.easyblock import EasyBlock
from easybuild.easyblocks.generic.configuremake import ConfigureMake
from easybuild.framework.easyconfig import CUSTOM
from easybuild.tools.build_log import EasyBuildError
from easybuild.tools.run import run_cmd
class EB_LAMMPS(EasyBlock):
"""
Support for building LAMMPS
- modify Makefiles for libs
- build with Make.py and install
"""
def __init__(self, *args, **kwargs):
"""Initialisation of custom class variables for LAMMPS."""
super(EB_LAMMPS, self).__init__(*args, **kwargs)
self.pkgs = []
self.lib_opts = []
self.opts = []
self.lib_custom_links = {}
self.actions = []
self.rename = 'auto'
self.flags = []
self.makelammps = None
@staticmethod
def extra_options():
extra_vars = {
'pkgs': [' ', "List of packages to add or remove from LAMMPS", CUSTOM],
'lib_opts': [' ', "Arguments to be used with library switches in Make.py",CUSTOM],
'opts': [' ', "Other option switches to be used with Make.py ", CUSTOM],
'lib_custom_links': [{ } , "Custom linking options for USER Libs", CUSTOM],
'actions': [ ' ', "Arguments to be used with the action switch of Make.py", CUSTOM],
'rename': [ 'auto', "Renames the LAMMPS binary to lmp_<rename>", CUSTOM],
'flags': [ ' ' , "Arguments to be used with the flag switch for Make.py", CUSTOM],
}
return ConfigureMake.extra_options(extra_vars)
def configure_step(self):
"""
Create Make.Lammps file for packages to build from
"""
self.pkgs = self.cfg['pkgs']
self.lib_opts = self.cfg['lib_opts']
self.opts =self.cfg['opts']
self.lib_custom_links = self.cfg['lib_custom_links']
self.actions = self.cfg['actions']
self.rename = self.cfg['rename']
self.flags = self.cfg['flags']
for lib in self.lib_custom_links:
try:
self.makelammps = os.path.join(self.builddir,self.name + "-" + self.version,"lib",lib,"Makefile.lammps.eb")
txt = '\n'.join([
"# Settings that the LAMMPS build will import when this package library is used",
" ",
"user-%s_SYSINC = %s" % (lib, self.lib_custom_links[lib]['SYSINC']),
"user-%s_SYSLIB = %s" % (lib, self.lib_custom_links[lib]['SYSLIB']),
"user-%s_SYSPATH = %s "% (lib, self.lib_custom_links[lib]['SYSPATH']),
])
f=file(self.makelammps,"w")
f.write(txt)
f.close
except OSError, err:
raise EasyBuildError("Failed to create Makefile.lammps.eb for user-lib-%(lib)s: %s", err)
try:
if os.path.exists(os.path.join(self.builddir,self.name+"-"+self.version,"lib",lib,"Makefile.lammps")):
os.remove(os.path.join(self.builddir,self.name+"-"+self.version,"lib",lib,"Makefile.lammps"))
shutil.copy2(os.path.join(self.builddir,self.name+"-"+self.version,"lib",lib,"Makefile.lammps.eb"),os.path.join(self.builddir,self.name+"-"+self.version,"lib",lib,"Makefile.lammps"))
except OSError, err:
raise EasyBuildError("Failed to copy Makefile.lammps.eb to Makefile.lammps: %s", err)
def build_step(self):
"""
Build with Make.py script and specified options
"""
try:
os.chdir(self.builddir+"/"+self.name+"-"+self.version+"/src/")
except OSError, err:
raise EasyBuildError("Failed to change the path to the build dir: %s", err)
"""
Clean build area
"""
cmd = "make clean-all"
run_cmd(cmd, log_all=True, simple=True, log_output=True)
cmd ="./Make.py -j 16"
#Add options
for opt in self.opts:
cmd += " "+opt
#Add flags
for flag in self.flags:
cmd += " "+flag
#Add library options
for libopts in self.lib_opts:
cmd += " "+libopts
#Add/Remove Packages
cmd += " -p"
for pkg in self.pkgs:
cmd += " "+pkg
#Rename binary
cmd += " -o "+self.rename
#Add actions
cmd += " -a"
for action in self.actions:
cmd +=" "+action
#Build with Makefile.auto
cmd += " file exe"
run_cmd(cmd, log_all=True, simple=True, log_output=True)
def install_step(self):
"""
Install by copying files to install dir
"""
srcdir = os.path.join(self.builddir,self.name+"-"+self.version,"src")
destdir = self.installdir
srcfile = os.path.join(srcdir, "lmp_"+self.rename)
try:
if os.path.exists(destdir):
shutil.copy(srcfile, destdir)
else:
os.makedirs(destdir)
shutil.copy(srcfile, destdir)
except OSError, err:
raise EasyBuildError("Copying %s to installation dir %s failed: %s", srcfile, destdir, err)
def sanity_check_step(self):
"""
Custom sanity check for LAMMPS
"""
custom_paths = {
'files': ["lmp_"+self.rename],
'dirs': []
}
super(EB_LAMMPS, self).sanity_check_step(custom_paths)
def make_module_req_guess(self):
"""Custom extra module file entries for LAMMPS ."""
guesses = super(EB_LAMMPS, self).make_module_req_guess()
guesses.update({"PATH":'/ '})
return guesses
|
qldhpc/eb_local
|
eb_blocks/l/lammps.py
|
Python
|
apache-2.0
| 5,482
|
[
"LAMMPS"
] |
41528372b34353fd35a1c9898df204f6548255f0373b7ec5cffae3bf4a0e87cc
|
#
# Copyright (c) 2015 nexB Inc. and others. All rights reserved.
# http://nexb.com and https://github.com/nexB/scancode-toolkit/
# The ScanCode software is licensed under the Apache License version 2.0.
# Data generated with ScanCode require an acknowledgment.
# ScanCode is a trademark of nexB Inc.
#
# You may not use this software except in compliance with the License.
# You may obtain a copy of the License at: http://apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software distributed
# under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
# CONDITIONS OF ANY KIND, either express or implied. See the License for the
# specific language governing permissions and limitations under the License.
#
# When you publish or redistribute any data created with ScanCode or any ScanCode
# derivative work, you must accompany this data with the following acknowledgment:
#
# Generated with ScanCode and provided on an "AS IS" BASIS, WITHOUT WARRANTIES
# OR CONDITIONS OF ANY KIND, either express or implied. No content created from
# ScanCode should be considered or used as legal advice. Consult an Attorney
# for any legal advice.
# ScanCode is a free software code scanning tool from nexB Inc. and others.
# Visit https://github.com/nexB/scancode-toolkit/ for support and download.
from __future__ import absolute_import, print_function
from unittest.case import TestCase
from commoncode.functional import flatten
class TestFunctional(TestCase):
def test_flatten(self):
expected = [7, 6, 5, 4, 'a', 3, 3, 2, 1]
test = flatten([7, (6, [5, [4, ["a"], 3]], 3), 2, 1])
self.assertEqual(expected, test)
def test_flatten_generator(self):
def gen():
for _ in range(2):
yield range(5)
expected = [0, 1, 2, 3, 4, 0, 1, 2, 3, 4]
test = flatten(gen())
self.assertEqual(expected, test)
def test_flatten_empties(self):
expected = ['a']
test = flatten([[], (), ['a']])
self.assertEqual(expected, test)
|
retrography/scancode-toolkit
|
tests/commoncode/test_functional.py
|
Python
|
apache-2.0
| 2,092
|
[
"VisIt"
] |
710870084a2a79e8c5298c25ed0fe990ff1f6ffc265da4592312b51def38aacd
|
import vtk
import time
import numpy as np
from director import transformUtils
from director.timercallback import TimerCallback
from director import propertyset
from collections import OrderedDict
class OrbitController(TimerCallback):
def __init__(self, view):
TimerCallback.__init__(self)
self.view = view
self.orbitTime = 20.0
def tick(self):
speed = 360.0 / self.orbitTime
degrees = self.elapsed * speed
self.view.camera().Azimuth(degrees)
self.view.render()
class CameraInterpolator(object):
def __init__(self, view):
self.view = view
self.reset()
def getViewCameraCopy(self):
camera = vtk.vtkCamera()
camera.DeepCopy(self.view.camera())
return camera
def reset(self):
self.interp = vtk.vtkCameraInterpolator()
def addCameraAtTime(self, camera, t):
self.interp.AddCamera(t, camera)
def addViewCameraAtTime(self, t):
self.addCameraAtTime(self.getViewCameraCopy(), t)
def setViewCameraAtTime(self, t):
self.interp.InterpolateCamera(t, self.view.camera())
self.view.render()
class Flyer(TimerCallback):
def __init__(self, view):
TimerCallback.__init__(self)
self.view = view
self.flyTime = 0.5
self.startTime = 0.0
self.maintainViewDirection = False
self.positionZoom = 0.7
def getCameraCopy(self):
camera = vtk.vtkCamera()
camera.DeepCopy(self.view.camera())
return camera
def zoomTo(self, newFocalPoint, newPosition=None):
self.interp = vtk.vtkCameraInterpolator()
self.interp.AddCamera(0.0, self.getCameraCopy())
c = self.getCameraCopy()
newFocalPoint = np.array(newFocalPoint)
oldFocalPoint = np.array(c.GetFocalPoint())
oldPosition = np.array(c.GetPosition())
if newPosition is None:
if self.maintainViewDirection:
newPosition = oldPosition + (newFocalPoint - oldFocalPoint)
else:
newPosition = oldPosition
newPosition += self.positionZoom*(newFocalPoint - newPosition)
#newPosition = newFocalPoint - self.positionZoom*(newFocalPoint - newPosition)
c.SetFocalPoint(newFocalPoint)
c.SetPosition(newPosition)
c.SetViewUp([0.0, 0.0, 1.0])
self.interp.AddCamera(1.0, c)
self.startTime = time.time()
self.start()
def tick(self):
elapsed = time.time() - self.startTime
t = (elapsed / float(self.flyTime)) if self.flyTime > 0 else 1.0
self.interp.InterpolateCamera(t, self.view.camera())
self.view.render()
if t >= 1.0:
return False
class CameraTracker(object):
def __init__(self, view, targetFrame):
self.view = view
self.targetFrame = targetFrame
self.camera = view.camera()
self.actions = []
self.properties = propertyset.PropertySet()
self.properties.connectPropertyChanged(self.onPropertyChanged)
self.setup()
def getTargetPose(self):
return transformUtils.poseFromTransform(self.targetFrame.transform)
def getTargetQuaternion(self):
return self.getTargetPose()[1]
def getTargetPosition(self):
return np.array(self.targetFrame.transform.GetPosition())
def getCameraTransform(self):
c = self.camera
return transformUtils.getLookAtTransform(c.GetFocalPoint(), c.GetPosition(), c.GetViewUp())
def getCameraToTargetTransform(self, targetFrame):
targetToWorld = transformUtils.copyFrame(targetFrame)
cameraToWorld = self.getCameraTransform()
cameraToTarget = transformUtils.concatenateTransforms([cameraToWorld, targetToWorld.GetLinearInverse()])
focalDistance = np.linalg.norm(np.array(self.camera.GetFocalPoint()) - np.array(self.camera.GetPosition()))
return cameraToTarget, focalDistance
def setCameraFocalPointToTarget(self):
self.camera.SetFocalPoint(self.getTargetPosition())
self.view.render()
def getProperties():
return self.properties
def setup(self):
pass
def reset(self):
pass
def update(self):
pass
def onAction(self, actionName):
pass
def getMinimumUpdateRate(self):
return 0
def onPropertyChanged(self, propertySet, propertyName):
pass
class PositionTracker(CameraTracker):
def setup(self):
self.actions = ['Re-center']
def onAction(self, actionName):
if actionName == 'Re-center':
self.setCameraFocalPointToTarget()
def reset(self):
self.lastTargetPosition = self.getTargetPosition()
self.lastTargetQuaternion = self.getTargetQuaternion()
def update(self):
newTargetPosition = self.getTargetPosition()
delta = newTargetPosition - self.lastTargetPosition
followAxes = [True, True, True]
for i in xrange(3):
if not followAxes[i]:
delta[i] = 0.0
self.lastTargetPosition = newTargetPosition
c = self.camera
oldFocalPoint = np.array(c.GetFocalPoint())
oldPosition = np.array(c.GetPosition())
c.SetFocalPoint(oldFocalPoint + delta)
c.SetPosition(oldPosition + delta)
self.view.render()
class LookAtTracker(CameraTracker):
def update(self):
self.setCameraFocalPointToTarget()
def reset(self):
pass
class OrbitTracker(PositionTracker):
def setup(self):
super(OrbitTracker, self).setup()
self.properties.addProperty('Orbit Time (s)', 20, attributes=propertyset.PropertyAttributes(minimum=1, maximum=100, singleStep=1))
def update(self):
super(OrbitTracker, self).update()
orbitTime = self.properties.getProperty('Orbit Time (s)')
speed = 360.0 / orbitTime
degrees = self.dt * speed
self.view.camera().Azimuth(degrees)
self.view.render()
def getMinimumUpdateRate(self):
return 60
class PositionOrientationTracker(CameraTracker):
def storeTargetPose(self):
self.lastTargetPosition = self.getTargetPosition()
self.lastTargetQuaternion = self.getTargetQuaternion()
def reset(self):
self.storeTargetPose()
targetToWorld = transformUtils.copyFrame(self.targetFrame.transform)
cameraToWorld = self.getCameraTransform()
cameraToTarget = transformUtils.concatenateTransforms([cameraToWorld, targetToWorld.GetLinearInverse()])
self.boomTransform = cameraToTarget
self.focalDistance = np.linalg.norm(np.array(self.camera.GetFocalPoint()) - np.array(self.camera.GetPosition()))
def update(self):
previousTargetFrame = transformUtils.transformFromPose(self.lastTargetPosition, self.lastTargetQuaternion)
self.storeTargetPose()
cameraToTarget, focalDistance = self.getCameraToTargetTransform(previousTargetFrame)
targetToWorld = self.targetFrame.transform
#cameraToTarget = self.boomTransform
cameraToWorld = transformUtils.concatenateTransforms([cameraToTarget, targetToWorld])
c = self.camera
focalPoint = cameraToWorld.TransformPoint([self.focalDistance, 0, 0])
focalPoint = targetToWorld.GetPosition()
#print 'focal distance:', self.focalDistance
#print 'cameraToTarget pos:', cameraToTarget.GetPosition()
#print 'cameraToWorld pos:', cameraToWorld.GetPosition()
#print 'targetToWorld pos:', targetToWorld.GetPosition()
#print 'focal pos:', focalPoint
c.SetPosition(cameraToWorld.GetPosition())
c.SetFocalPoint(focalPoint)
self.view.render()
class SmoothFollowTracker(CameraTracker):
def getMinimumUpdateRate(self):
return 30
def setup(self):
self.properties.addProperty('Smooth Time (s)', 0.5, attributes=propertyset.PropertyAttributes(decimals=1, minimum=0.1, maximum=5, singleStep=0.1))
self.properties.addProperty('Distance (m)', 15, attributes=propertyset.PropertyAttributes(decimals=1, minimum=0.5, maximum=1000.0, singleStep=1))
self.properties.addProperty('Elevation (deg)', 10, attributes=propertyset.PropertyAttributes(minimum=-90, maximum=90, singleStep=2))
self.properties.addProperty('Azimuth (deg)', 0, attributes=propertyset.PropertyAttributes(minimum=-180, maximum=180, singleStep=10))
def reset(self):
self.currentVelocity = np.array([0.0, 0.0, 0.0])
def update(self):
if not self.targetFrame:
return
r = self.properties.getProperty('Distance (m)')
theta = np.radians(90 - self.properties.getProperty('Elevation (deg)'))
phi = np.radians(180 - self.properties.getProperty('Azimuth (deg)'))
x = r * np.cos(phi) * np.sin(theta)
y = r * np.sin(phi) * np.sin(theta)
z = r * np.cos(theta)
c = self.camera
targetToWorld = self.targetFrame.transform
currentPosition = np.array(c.GetPosition())
desiredPosition = np.array(targetToWorld.TransformPoint([x, y, z]))
smoothTime = self.properties.getProperty('Smooth Time (s)')
newPosition, self.currentVelocity = smoothDamp(currentPosition, desiredPosition, self.currentVelocity, smoothTime, maxSpeed=100, deltaTime=self.dt)
trackerToWorld = transformUtils.getLookAtTransform(targetToWorld.GetPosition(), newPosition)
c.SetFocalPoint(targetToWorld.GetPosition())
c.SetPosition(trackerToWorld.GetPosition())
self.view.render()
class TargetFrameConverter(object):
def __init__(self):
self.targetFrame = None
def getTargetFrame(self):
return self.targetFrame
@classmethod
def canConvert(cls, obj):
return False
class CameraTrackerManager(object):
def __init__(self):
self.target = None
self.targetFrame = None
self.trackerClass = None
self.camera = None
self.view = None
self.timer = TimerCallback()
self.timer.callback = self.updateTimer
self.addTrackers()
self.initTracker()
def updateTimer(self):
tNow = time.time()
dt = tNow - self.tLast
if dt < self.timer.elapsed/2.0:
return
self.update()
def setView(self, view):
self.view = view
self.camera = view.camera()
def setTarget(self, target):
'''
target should be an instance of TargetFrameConverter or
any object that provides a method getTargetFrame().
'''
if target == self.target:
return
self.disableActiveTracker()
if not target:
return
self.target = target
self.targetFrame = target.getTargetFrame()
self.callbackId = self.targetFrame.connectFrameModified(self.onTargetFrameModified)
self.initTracker()
def disableActiveTracker(self):
if self.targetFrame:
self.targetFrame.disconnectFrameModified(self.callbackId)
self.target = None
self.targetFrame = None
self.initTracker()
def update(self):
tNow = time.time()
dt = tNow - self.tLast
self.tLast = tNow
if self.activeTracker:
self.activeTracker.dt = dt
self.activeTracker.update()
def reset(self):
self.tLast = time.time()
if self.activeTracker:
self.activeTracker.reset()
def getModeActions(self):
if self.activeTracker:
return self.activeTracker.actions
return []
def onModeAction(self, actionName):
if self.activeTracker:
self.activeTracker.onAction(actionName)
def getModeProperties(self):
if self.activeTracker:
return self.activeTracker.properties
return None
def onTargetFrameModified(self, frame):
self.update()
def initTracker(self):
self.timer.stop()
self.activeTracker = self.trackerClass(self.view, self.targetFrame) if (self.trackerClass and self.targetFrame) else None
self.reset()
self.update()
if self.activeTracker:
minimumUpdateRate = self.activeTracker.getMinimumUpdateRate()
if minimumUpdateRate > 0:
self.timer.targetFps = minimumUpdateRate
self.timer.start()
def addTrackers(self):
self.trackers = OrderedDict([
['Off', None],
['Position', PositionTracker],
['Position & Orientation', PositionOrientationTracker],
['Smooth Follow', SmoothFollowTracker],
['Look At', LookAtTracker],
['Orbit', OrbitTracker],
])
def setTrackerMode(self, modeName):
assert modeName in self.trackers
self.trackerClass = self.trackers[modeName]
self.initTracker()
def smoothDamp(current, target, currentVelocity, smoothTime, maxSpeed, deltaTime):
'''
Based on Unity3D SmoothDamp
See: http://answers.unity3d.com/answers/310645/view.html
'''
smoothTime = max(0.0001, smoothTime)
num = 2.0 / smoothTime;
num2 = num * deltaTime;
num3 = 1.0 / (1.0 + num2 + 0.48 * num2 * num2 + 0.235 * num2 * num2 * num2)
num4 = current - target
num5 = target
num6 = maxSpeed * smoothTime
num4 = np.clip(num4, -num6, num6)
target = current - num4
num7 = (currentVelocity + num * num4) * deltaTime
currentVelocity = (currentVelocity - num * num7) * num3
num8 = target + (num4 + num7) * num3
for i in xrange(len(current)):
if (num5[i] - current[i] > 0.0 == num8[i] > num5[i]):
num8[i] = num5[i]
currentVelocity[i] = (num8[i] - num5[i]) / deltaTime
return num8, currentVelocity
class RobotModelFollower(object):
def __init__(self, view, robotModel, jointController):
self.view = view
self.robotModel = robotModel
self.jointController = jointController
self.followAxes = [True, True, True]
self.callbackId = None
def start(self):
self.callbackId = self.robotModel.connectModelChanged(self.onModelChanged)
self.lastTrackPosition = np.array(self.jointController.q[:3])
def stop(self):
self.robotModel.disconnectModelChanged(self.callbackId)
def getCameraCopy(self):
camera = vtk.vtkCamera()
camera.DeepCopy(self.view.camera())
return camera
def onModelChanged(self, model):
newTrackPosition = np.array(self.jointController.q[:3])
delta = newTrackPosition - self.lastTrackPosition
for i in xrange(3):
if not self.followAxes[i]:
delta[i] = 0.0
self.lastTrackPosition = newTrackPosition
c = self.view.camera()
oldFocalPoint = np.array(c.GetFocalPoint())
oldPosition = np.array(c.GetPosition())
c.SetFocalPoint(oldFocalPoint + delta)
c.SetPosition(oldPosition + delta)
self.view.render()
|
manuelli/director
|
src/python/director/cameracontrol.py
|
Python
|
bsd-3-clause
| 15,095
|
[
"VTK"
] |
7f0e3b2334dde1d46f1ee7d5e898f7d85de1f1082145fb9529925af0733575cb
|
# ----------------------------------------------------------------------------
# Copyright (c) 2016-2022, QIIME 2 development team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file LICENSE, distributed with this software.
# ----------------------------------------------------------------------------
from qiime2.plugin import (Plugin, Str, Properties, Choices, Int, Bool, Range,
Float, Set, Visualization, Metadata, MetadataColumn,
Categorical, Numeric, Citations)
import q2_diversity
from q2_diversity import _alpha as alpha
from q2_diversity import _beta as beta
from q2_types.feature_table import (FeatureTable, Frequency, RelativeFrequency,
PresenceAbsence)
from q2_types.distance_matrix import DistanceMatrix
from q2_types.sample_data import AlphaDiversity, SampleData
from q2_types.tree import Phylogeny, Rooted
from q2_types.ordination import PCoAResults, ProcrustesStatistics
citations = Citations.load('citations.bib', package='q2_diversity')
n_jobs_description = (
'The number of concurrent jobs to use in performing this calculation. '
'May not exceed the number of available physical cores. If n_jobs = '
'\'auto\', one job will be launched for each identified CPU core on the '
'host.'
)
threads_description = (
'The number of CPU threads to use in performing this calculation. '
'May not exceed the number of available physical cores. If threads = '
'\'auto\', one thread will be created for each identified CPU core on the '
'host.'
)
n_jobs_or_threads_description = (
'The number of concurrent jobs or CPU threads to use in performing this '
'calculation. Individual methods will create jobs/threads as implemented '
'in q2-diversity-lib dependencies. May not exceed the number of available '
'physical cores. If n_jobs_or_threads = \'auto\', one thread/job will be '
'created for each identified CPU core on the host.'
)
plugin = Plugin(
name='diversity',
version=q2_diversity.__version__,
website='https://github.com/qiime2/q2-diversity',
package='q2_diversity',
description=('This QIIME 2 plugin supports metrics for calculating '
'and exploring community alpha and beta diversity through '
'statistics and visualizations in the context of sample '
'metadata.'),
short_description='Plugin for exploring community diversity.',
)
plugin.pipelines.register_function(
function=q2_diversity.beta_phylogenetic,
inputs={'table':
FeatureTable[Frequency | RelativeFrequency | PresenceAbsence],
'phylogeny': Phylogeny[Rooted]},
parameters={'metric': Str % Choices(beta.METRICS['PHYLO']['IMPL'] |
beta.METRICS['PHYLO']['UNIMPL']),
'threads': Int % Range(1, None) | Str % Choices(['auto']),
'variance_adjusted': Bool,
'alpha': Float % Range(0, 1, inclusive_end=True),
'bypass_tips': Bool},
outputs=[('distance_matrix', DistanceMatrix)],
input_descriptions={
'table': ('The feature table containing the samples over which beta '
'diversity should be computed.'),
'phylogeny': ('Phylogenetic tree containing tip identifiers that '
'correspond to the feature identifiers in the table. '
'This tree can contain tip ids that are not present in '
'the table, but all feature ids in the table must be '
'present in this tree.')
},
parameter_descriptions={
'metric': 'The beta diversity metric to be computed.',
'threads': threads_description,
'variance_adjusted': ('Perform variance adjustment based on Chang et '
'al. BMC Bioinformatics 2011. Weights distances '
'based on the proportion of the relative '
'abundance represented between the samples at a'
' given node under evaluation.'),
'alpha': ('This parameter is only used when the choice of metric is '
'generalized_unifrac. The value of alpha controls importance'
' of sample proportions. 1.0 is weighted normalized UniFrac.'
' 0.0 is close to unweighted UniFrac, but only if the sample'
' proportions are dichotomized.'),
'bypass_tips': ('In a bifurcating tree, the tips make up about 50% of '
'the nodes in a tree. By ignoring them, specificity '
'can be traded for reduced compute time. This has the'
' effect of collapsing the phylogeny, and is analogous'
' (in concept) to moving from 99% to 97% OTUs')
},
output_descriptions={'distance_matrix': 'The resulting distance matrix.'},
name='Beta diversity (phylogenetic)',
description=("Computes a user-specified phylogenetic beta diversity metric"
" for all pairs of samples in a feature table.")
)
plugin.pipelines.register_function(
function=q2_diversity.beta,
inputs={'table':
FeatureTable[Frequency | RelativeFrequency | PresenceAbsence]},
parameters={'metric': Str % Choices(beta.METRICS['NONPHYLO']['IMPL'] |
beta.METRICS['NONPHYLO']['UNIMPL']),
'pseudocount': Int % Range(1, None),
'n_jobs': Int % Range(1, None) | Str % Choices(['auto'])},
outputs=[('distance_matrix', DistanceMatrix)],
input_descriptions={
'table': ('The feature table containing the samples over which beta '
'diversity should be computed.')
},
parameter_descriptions={
'metric': 'The beta diversity metric to be computed.',
'pseudocount': ('A pseudocount to handle zeros for compositional '
'metrics. This is ignored for other metrics.'),
'n_jobs': n_jobs_description
},
output_descriptions={'distance_matrix': 'The resulting distance matrix.'},
name='Beta diversity',
description=("Computes a user-specified beta diversity metric for all "
"pairs of samples in a feature table.")
)
plugin.pipelines.register_function(
function=q2_diversity.alpha_phylogenetic,
inputs={'table':
FeatureTable[Frequency | RelativeFrequency | PresenceAbsence],
'phylogeny': Phylogeny[Rooted]},
parameters={'metric': Str % Choices(alpha.METRICS['PHYLO']['IMPL'] |
alpha.METRICS['PHYLO']['UNIMPL'])},
outputs=[('alpha_diversity',
SampleData[AlphaDiversity])],
input_descriptions={
'table': ('The feature table containing the samples for which alpha '
'diversity should be computed.'),
'phylogeny': ('Phylogenetic tree containing tip identifiers that '
'correspond to the feature identifiers in the table. '
'This tree can contain tip ids that are not present in '
'the table, but all feature ids in the table must be '
'present in this tree.')
},
parameter_descriptions={
'metric': 'The alpha diversity metric to be computed.'
},
output_descriptions={
'alpha_diversity': 'Vector containing per-sample alpha diversities.'
},
name='Alpha diversity (phylogenetic)',
description=('Computes a user-specified phylogenetic alpha diversity '
'metric for all samples in a feature table.'),
)
plugin.pipelines.register_function(
function=q2_diversity.alpha,
inputs={'table':
FeatureTable[Frequency | RelativeFrequency | PresenceAbsence]},
parameters={'metric': Str % Choices(alpha.METRICS['NONPHYLO']['IMPL'] |
alpha.METRICS['NONPHYLO']['UNIMPL'])},
outputs=[('alpha_diversity', SampleData[AlphaDiversity])],
input_descriptions={
'table': ('The feature table containing the samples for which alpha '
'diversity should be computed.')
},
parameter_descriptions={
'metric': 'The alpha diversity metric to be computed. Information '
'about specific metrics is available at '
'https://data.qiime2.org/a_diversity_metrics'
},
output_descriptions={
'alpha_diversity': 'Vector containing per-sample alpha diversities.'
},
name='Alpha diversity',
description=('Computes a user-specified alpha diversity metric for all '
'samples in a feature table.')
)
plugin.methods.register_function(
function=q2_diversity.pcoa,
inputs={'distance_matrix': DistanceMatrix},
parameters={
'number_of_dimensions': Int % Range(1, None)
},
outputs=[('pcoa', PCoAResults)],
input_descriptions={
'distance_matrix': ('The distance matrix on which PCoA should be '
'computed.')
},
parameter_descriptions={
'number_of_dimensions': "Dimensions to reduce the distance matrix to. "
"This number determines how many "
"eigenvectors and eigenvalues are returned,"
"and influences the choice of algorithm used "
"to compute them. "
"By default, uses the default "
"eigendecomposition method, SciPy's eigh, "
"which computes all eigenvectors "
"and eigenvalues in an exact manner. For very "
"large matrices, this is expected to be slow. "
"If a value is specified for this parameter, "
"then the fast, heuristic "
"eigendecomposition algorithm fsvd "
"is used, which only computes and returns the "
"number of dimensions specified, but suffers "
"some degree of accuracy loss, the magnitude "
"of which varies across different datasets."
},
output_descriptions={'pcoa': 'The resulting PCoA matrix.'},
name='Principal Coordinate Analysis',
description=("Apply principal coordinate analysis."),
citations=[citations['legendrelegendre'],
citations['halko2010']]
)
plugin.methods.register_function(
function=q2_diversity.pcoa_biplot,
inputs={'pcoa': PCoAResults,
'features': FeatureTable[RelativeFrequency]},
parameters={},
outputs=[('biplot', PCoAResults % Properties('biplot'))],
input_descriptions={
'pcoa': 'The PCoA where the features will be projected onto.',
'features': 'Variables to project onto the PCoA matrix'
},
parameter_descriptions={},
output_descriptions={'biplot': 'The resulting PCoA matrix.'},
name='Principal Coordinate Analysis Biplot',
description="Project features into a principal coordinates matrix. The "
"features used should be the features used to compute the "
"distance matrix. It is recommended that these variables be"
" normalized in cases of dimensionally heterogeneous physical"
" variables.",
citations=[citations['legendrelegendre']]
)
plugin.methods.register_function(
function=q2_diversity.tsne,
inputs={'distance_matrix': DistanceMatrix},
parameters={
'number_of_dimensions': Int % Range(2, None),
'perplexity': Float % Range(1, None),
'early_exaggeration': Float % Range(0, None),
'learning_rate': Float % Range(10.0, None),
'n_iter': Int % Range(1, None),
'random_state': Int
},
outputs=[('tsne', PCoAResults)],
input_descriptions={
'distance_matrix': ('The distance matrix on which t-SNE should be '
'computed.')
},
parameter_descriptions={
'number_of_dimensions': "Dimensions to reduce the distance matrix to.",
'perplexity': "Provide the balance between local and global "
"structure. Low values concentrate on local "
"structure. Large values sacrifice local "
"details for a broader global embedding. "
"The default value is 25 to achieve better "
"results for small microbiome datasets.",
'early_exaggeration': "Affects the tightness of the shown clusters. "
"Larger values increase the distance between "
"natural clusters in the embedded space.",
'learning_rate': "Controls how much the weights are adjusted "
"at each update.",
'random_state': "Seed used by random number generator."
},
output_descriptions={'tsne': 'The resulting t-SNE matrix.'},
name='t-distributed stochastic neighbor embedding',
description=("Apply t-distributed stochastic neighbor embedding."),
)
plugin.methods.register_function(
function=q2_diversity.umap,
inputs={'distance_matrix': DistanceMatrix},
parameters={
'number_of_dimensions': Int % Range(2, None),
'n_neighbors': Int % Range(1, None),
'min_dist': Float % Range(0, None),
'random_state': Int
},
outputs=[('umap', PCoAResults)],
input_descriptions={
'distance_matrix': ('The distance matrix on which UMAP should be '
'computed.')
},
parameter_descriptions={
'number_of_dimensions': "Dimensions to reduce the distance matrix to.",
'n_neighbors': "Provide the balance between local and global "
"structure. Low values prioritize the "
"preservation of local structures. Large "
"values sacrifice local details for a "
"broader global embedding.",
'min_dist': "Controls the cluster size. Low values cause "
"clumpier clusters. Higher values preserve a "
"broad topological structure. To get "
"less overlapping data points the "
"default value is set to 0.4. For more "
"details visit: "
"https://umap-learn.readthedocs.io/en/latest/"
"parameters.html",
'random_state': "Seed used by random number generator."
},
output_descriptions={'umap': 'The resulting UMAP matrix.'},
name='Uniform Manifold Approximation and Projection',
description=("Apply Uniform Manifold Approximation and Projection."),
)
plugin.methods.register_function(
function=q2_diversity.procrustes_analysis,
inputs={'reference': PCoAResults, 'other': PCoAResults},
parameters={
'dimensions': Int % Range(1, None),
'permutations': Int % Range(1, None) | Str % Choices('disable')
},
outputs=[
('transformed_reference', PCoAResults),
('transformed_other', PCoAResults),
('disparity_results', ProcrustesStatistics)
],
input_descriptions={
'reference': ('The ordination matrix to which data is fitted to.'),
'other': ("The ordination matrix that's fitted to the reference "
"ordination."),
},
parameter_descriptions={
'dimensions': ('The number of dimensions to use when fitting the two '
'matrices'),
'permutations': 'The number of permutations to be run when computing '
'p-values. Supplying a value of `disable` will disable'
' permutation testing and p-values will not be '
'calculated (this results in *much* quicker execution '
'time if p-values are not desired).',
},
output_descriptions={
'transformed_reference': 'A normalized version of the "reference" '
'ordination matrix.',
'transformed_other': 'A normalized and fitted version of the "other" '
'ordination matrix.',
'disparity_results': 'The sum of the squares of the pointwise '
'differences between the two input datasets & '
'its p value.'},
name='Procrustes Analysis',
description='Fit two ordination matrices with Procrustes analysis'
)
plugin.pipelines.register_function(
function=q2_diversity.core_metrics_phylogenetic,
inputs={
'table': FeatureTable[Frequency],
'phylogeny': Phylogeny[Rooted]
},
parameters={
'sampling_depth': Int % Range(1, None),
'metadata': Metadata,
'n_jobs_or_threads': Int % Range(1, None) | Str % Choices(['auto']),
},
outputs=[
('rarefied_table', FeatureTable[Frequency]),
('faith_pd_vector', SampleData[AlphaDiversity]),
('observed_features_vector', SampleData[AlphaDiversity]),
('shannon_vector', SampleData[AlphaDiversity]),
('evenness_vector', SampleData[AlphaDiversity]),
('unweighted_unifrac_distance_matrix', DistanceMatrix),
('weighted_unifrac_distance_matrix', DistanceMatrix),
('jaccard_distance_matrix', DistanceMatrix),
('bray_curtis_distance_matrix', DistanceMatrix),
('unweighted_unifrac_pcoa_results', PCoAResults),
('weighted_unifrac_pcoa_results', PCoAResults),
('jaccard_pcoa_results', PCoAResults),
('bray_curtis_pcoa_results', PCoAResults),
('unweighted_unifrac_emperor', Visualization),
('weighted_unifrac_emperor', Visualization),
('jaccard_emperor', Visualization),
('bray_curtis_emperor', Visualization),
],
input_descriptions={
'table': 'The feature table containing the samples over which '
'diversity metrics should be computed.',
'phylogeny': 'Phylogenetic tree containing tip identifiers that '
'correspond to the feature identifiers in the table. '
'This tree can contain tip ids that are not present in '
'the table, but all feature ids in the table must be '
'present in this tree.'
},
parameter_descriptions={
'sampling_depth': 'The total frequency that each sample should be '
'rarefied to prior to computing diversity metrics.',
'metadata': 'The sample metadata to use in the emperor plots.',
'n_jobs_or_threads': '[beta/beta-phylogenetic methods only] - %s'
% n_jobs_or_threads_description
},
output_descriptions={
'rarefied_table': 'The resulting rarefied feature table.',
'faith_pd_vector': 'Vector of Faith PD values by sample.',
'observed_features_vector': 'Vector of Observed Features values by '
'sample.',
'shannon_vector': 'Vector of Shannon diversity values by sample.',
'evenness_vector': 'Vector of Pielou\'s evenness values by sample.',
'unweighted_unifrac_distance_matrix':
'Matrix of unweighted UniFrac distances between pairs of samples.',
'weighted_unifrac_distance_matrix':
'Matrix of weighted UniFrac distances between pairs of samples.',
'jaccard_distance_matrix':
'Matrix of Jaccard distances between pairs of samples.',
'bray_curtis_distance_matrix':
'Matrix of Bray-Curtis distances between pairs of samples.',
'unweighted_unifrac_pcoa_results':
'PCoA matrix computed from unweighted UniFrac distances between '
'samples.',
'weighted_unifrac_pcoa_results':
'PCoA matrix computed from weighted UniFrac distances between '
'samples.',
'jaccard_pcoa_results':
'PCoA matrix computed from Jaccard distances between '
'samples.',
'bray_curtis_pcoa_results':
'PCoA matrix computed from Bray-Curtis distances between '
'samples.',
'unweighted_unifrac_emperor':
'Emperor plot of the PCoA matrix computed from unweighted'
' UniFrac.',
'weighted_unifrac_emperor':
'Emperor plot of the PCoA matrix computed from weighted UniFrac.',
'jaccard_emperor':
'Emperor plot of the PCoA matrix computed from Jaccard.',
'bray_curtis_emperor':
'Emperor plot of the PCoA matrix computed from Bray-Curtis.',
},
name='Core diversity metrics (phylogenetic and non-phylogenetic)',
description="Applies a collection of diversity metrics (both "
"phylogenetic and non-phylogenetic) to a feature table."
)
plugin.pipelines.register_function(
function=q2_diversity.core_metrics,
inputs={
'table': FeatureTable[Frequency],
},
parameters={
'sampling_depth': Int % Range(1, None),
'metadata': Metadata,
'with_replacement': Bool,
'n_jobs': Int % Range(1, None) | Str % Choices(['auto']),
},
outputs=[
('rarefied_table', FeatureTable[Frequency]),
('observed_features_vector', SampleData[AlphaDiversity]),
('shannon_vector', SampleData[AlphaDiversity]),
('evenness_vector', SampleData[AlphaDiversity]),
('jaccard_distance_matrix', DistanceMatrix),
('bray_curtis_distance_matrix', DistanceMatrix),
('jaccard_pcoa_results', PCoAResults),
('bray_curtis_pcoa_results', PCoAResults),
('jaccard_emperor', Visualization),
('bray_curtis_emperor', Visualization),
],
input_descriptions={
'table': 'The feature table containing the samples over which '
'diversity metrics should be computed.',
},
parameter_descriptions={
'sampling_depth': 'The total frequency that each sample should be '
'rarefied to prior to computing diversity metrics.',
'metadata': 'The sample metadata to use in the emperor plots.',
'with_replacement': 'Rarefy with replacement by sampling from the '
'multinomial distribution instead of rarefying '
'without replacement.',
'n_jobs': '[beta methods only] - %s' % n_jobs_description
},
output_descriptions={
'rarefied_table': 'The resulting rarefied feature table.',
'observed_features_vector': 'Vector of Observed Features values by '
'sample.',
'shannon_vector': 'Vector of Shannon diversity values by sample.',
'evenness_vector': 'Vector of Pielou\'s evenness values by sample.',
'jaccard_distance_matrix':
'Matrix of Jaccard distances between pairs of samples.',
'bray_curtis_distance_matrix':
'Matrix of Bray-Curtis distances between pairs of samples.',
'jaccard_pcoa_results':
'PCoA matrix computed from Jaccard distances between samples.',
'bray_curtis_pcoa_results':
'PCoA matrix computed from Bray-Curtis distances between samples.',
'jaccard_emperor':
'Emperor plot of the PCoA matrix computed from Jaccard.',
'bray_curtis_emperor':
'Emperor plot of the PCoA matrix computed from Bray-Curtis.',
},
name='Core diversity metrics (non-phylogenetic)',
description=("Applies a collection of diversity metrics "
"(non-phylogenetic) to a feature table.")
)
plugin.pipelines.register_function(
function=q2_diversity.beta_correlation,
inputs={'distance_matrix': DistanceMatrix},
parameters={
'metadata': MetadataColumn[Numeric],
'method': Str % Choices(['spearman', 'pearson']),
'permutations': Int % Range(0, None),
'intersect_ids': Bool,
'label1': Str,
'label2': Str
},
outputs=[('metadata_distance_matrix', DistanceMatrix),
('mantel_scatter_visualization', Visualization)],
input_descriptions={
'distance_matrix': 'Matrix of distances between pairs of samples.'},
parameter_descriptions={
'metadata': 'Numeric metadata column from which to compute pairwise '
'Euclidean distances',
'method': 'The correlation test to be applied in the Mantel test.',
'permutations': 'The number of permutations to be run when computing '
'p-values. Supplying a value of zero will disable '
'permutation testing and p-values will not be '
'calculated (this results in *much* quicker execution '
'time if p-values are not desired).',
'intersect_ids': 'If supplied, IDs that are not found in both '
'distance matrices will be discarded before applying '
'the Mantel test. Default behavior is to error on '
'any mismatched IDs.',
'label1': 'Label for `distance_matrix` in the output visualization.',
'label2': 'Label for `metadata_distance_matrix` in the output '
'visualization.'
},
output_descriptions={
'metadata_distance_matrix': 'The Distance Matrix produced from the '
'metadata column and used in the mantel '
'test',
'mantel_scatter_visualization': 'Scatter plot rendering of the mantel'
'test results'},
name='Beta diversity correlation',
description=('Create a distance matrix from a numeric metadata column and '
'apply a two-sided Mantel test to identify correlation '
'between two distance matrices. Actions used internally: '
'`distance-matrix` from q2-metadata and `mantel` from '
'q2-diversity.')
)
plugin.methods.register_function(
function=q2_diversity.filter_distance_matrix,
inputs={
'distance_matrix': DistanceMatrix
},
parameters={
'metadata': Metadata,
'where': Str,
'exclude_ids': Bool
},
outputs=[
('filtered_distance_matrix', DistanceMatrix)
],
name="Filter samples from a distance matrix.",
description="Filter samples from a distance matrix, retaining only the "
"samples matching search criteria specified by "
"`metadata` and `where` parameters (or retaining only the "
"samples not matching that criteria, if `exclude_ids` is "
"True). See the filtering tutorial on "
"https://docs.qiime2.org for additional details.",
input_descriptions={
'distance_matrix': 'Distance matrix to filter by sample.'
},
parameter_descriptions={
'metadata': 'Sample metadata used with `where` parameter when '
'selecting samples to retain, or with `exclude_ids` '
'when selecting samples to discard.',
'where': 'SQLite WHERE clause specifying sample metadata criteria '
'that must be met to be included in the filtered distance '
'matrix. If not provided, all samples in `metadata` that are '
'also in the input distance matrix will be retained.',
'exclude_ids': 'If `True`, the samples selected by `metadata` or '
'`where` parameters will be excluded from the filtered '
'distance matrix instead of being retained.'
},
output_descriptions={
'filtered_distance_matrix': 'Distance matrix filtered to include '
'samples matching search criteria'
}
)
plugin.visualizers.register_function(
function=q2_diversity.alpha_group_significance,
inputs={'alpha_diversity': SampleData[AlphaDiversity]},
parameters={'metadata': Metadata},
input_descriptions={
'alpha_diversity': 'Vector of alpha diversity values by sample.'
},
parameter_descriptions={
'metadata': 'The sample metadata.'
},
name='Alpha diversity comparisons',
description=("Visually and statistically compare groups of alpha diversity"
" values."),
citations=[citations['kruskal1952use']]
)
plugin.visualizers.register_function(
function=q2_diversity.bioenv,
inputs={'distance_matrix': DistanceMatrix},
parameters={'metadata': Metadata},
input_descriptions={
'distance_matrix': 'Matrix of distances between pairs of samples.'
},
parameter_descriptions={
'metadata': 'The sample metadata.'
},
name='bioenv',
description=("Find the subsets of variables in metadata whose Euclidean "
"distances are maximally rank-correlated with distance "
"matrix. All numeric variables in metadata will be "
"considered, and samples which are missing data will be "
"dropped. The output visualization will indicate how many "
"samples were dropped due to missing data, if any were "
"dropped."),
citations=[citations['clarke1993method']]
)
beta_group_significance_methods = \
list(q2_diversity._beta._visualizer._beta_group_significance_fns)
plugin.visualizers.register_function(
function=q2_diversity.beta_group_significance,
inputs={'distance_matrix': DistanceMatrix},
parameters={'method': Str % Choices(beta_group_significance_methods),
'permutations': Int,
'metadata': MetadataColumn[Categorical],
'pairwise': Bool},
input_descriptions={
'distance_matrix': 'Matrix of distances between pairs of samples.'
},
parameter_descriptions={
'method': 'The group significance test to be applied.',
'permutations': ('The number of permutations to be run when computing '
'p-values.'),
'metadata': 'Categorical sample metadata column.',
'pairwise': ('Perform pairwise tests between all pairs of groups '
'in addition to the test across all groups. '
'This can be very slow if there are a lot of groups '
'in the metadata column.')
},
name='Beta diversity group significance',
description=('Determine whether groups of samples are significantly '
'different from one another using a permutation-based '
'statistical test.'),
citations=[citations['anderson2001new']]
)
plugin.visualizers.register_function(
function=q2_diversity.mantel,
inputs={'dm1': DistanceMatrix,
'dm2': DistanceMatrix},
parameters={'permutations': Int % Range(0, None),
'method': Str % Choices(['spearman', 'pearson']),
'intersect_ids': Bool,
'label1': Str,
'label2': Str},
name='Apply the Mantel test to two distance matrices',
description='Apply a two-sided Mantel test to identify correlation '
'between two distance matrices.\n\nNote: the directionality '
'of the comparison has no bearing on the results. Thus, '
'comparing distance matrix X to distance matrix Y is '
'equivalent to comparing Y to X.\n\nNote: the order of '
'samples within the two distance matrices does not need to be '
'the same; the distance matrices will be reordered before '
'applying the Mantel test.\n\nSee the scikit-bio docs for '
'more details about the Mantel test:\n\n'
'http://scikit-bio.org/docs/latest/generated/'
'skbio.stats.distance.mantel',
input_descriptions={
'dm1': 'Matrix of distances between pairs of samples.',
'dm2': 'Matrix of distances between pairs of samples.'
},
parameter_descriptions={
'method': 'The correlation test to be applied in the Mantel test.',
'permutations': 'The number of permutations to be run when computing '
'p-values. Supplying a value of zero will disable '
'permutation testing and p-values will not be '
'calculated (this results in *much* quicker execution '
'time if p-values are not desired).',
'intersect_ids': 'If supplied, IDs that are not found in both '
'distance matrices will be discarded before applying '
'the Mantel test. Default behavior is to error on '
'any mismatched IDs.',
'label1': 'Label for `dm1` in the output visualization.',
'label2': 'Label for `dm2` in the output visualization.'
},
citations=[
citations['mantel1967detection'],
citations['pearson1895note'],
citations['spearman1904proof']]
)
alpha_correlation_methods = \
list(q2_diversity._alpha._visualizer._alpha_correlation_fns)
plugin.visualizers.register_function(
function=q2_diversity.alpha_correlation,
inputs={'alpha_diversity': SampleData[AlphaDiversity]},
parameters={'method': Str % Choices(alpha_correlation_methods),
'metadata': Metadata,
'intersect_ids': Bool},
input_descriptions={
'alpha_diversity': 'Vector of alpha diversity values by sample.'
},
parameter_descriptions={
'method': 'The correlation test to be applied.',
'metadata': 'The sample metadata.',
'intersect_ids': 'If supplied, IDs that are not found in both '
'the alpha diversity vector and metadata will '
'be discarded before calculating '
'the correlation. Default behavior is to error on '
'any mismatched IDs.'
},
name='Alpha diversity correlation',
description=('Determine whether numeric sample metadata columns are '
'correlated with alpha diversity.'),
citations=[citations['pearson1895note'], citations['spearman1904proof']]
)
_metric_set = Set[Str % Choices((alpha.METRICS['PHYLO']['IMPL'] |
alpha.METRICS['PHYLO']['UNIMPL'] |
alpha.METRICS['NONPHYLO']['IMPL'] |
alpha.METRICS['NONPHYLO']['UNIMPL']) -
alpha.alpha_rarefaction_unsupported_metrics)]
plugin.visualizers.register_function(
function=q2_diversity.alpha_rarefaction,
inputs={'table': FeatureTable[Frequency],
'phylogeny': Phylogeny[Rooted]},
parameters={'metrics': _metric_set,
'metadata': Metadata,
'min_depth': Int % Range(1, None),
'max_depth': Int % Range(1, None),
'steps': Int % Range(2, None),
'iterations': Int % Range(1, None)},
input_descriptions={
'table': 'Feature table to compute rarefaction curves from.',
'phylogeny': 'Optional phylogeny for phylogenetic metrics.',
},
parameter_descriptions={
'metrics': ('The metrics to be measured. By default computes '
'observed_features, shannon, and if phylogeny is '
'provided, faith_pd.'),
'metadata': 'The sample metadata.',
'min_depth': 'The minimum rarefaction depth.',
'max_depth': ('The maximum rarefaction depth. '
'Must be greater than min_depth.'),
'steps': ('The number of rarefaction depths to include '
'between min_depth and max_depth.'),
'iterations': ('The number of rarefied feature tables to '
'compute at each step.'),
},
name='Alpha rarefaction curves',
description=('Generate interactive alpha rarefaction curves by computing '
'rarefactions between `min_depth` and `max_depth`. The '
'number of intermediate depths to compute is controlled by '
'the `steps` parameter, with n `iterations` being computed '
'at each rarefaction depth. If sample metadata is provided, '
'samples may be grouped based on distinct values within a '
'metadata column.'),
)
_beta_rarefaction_color_schemes = [
'BrBG', 'BrBG_r', 'PRGn', 'PRGn_r', 'PiYG', 'PiYG_r',
'PuOr', 'PuOr_r', 'RdBu', 'RdBu_r', 'RdGy', 'RdGy_r',
'RdYlBu', 'RdYlBu_r', 'RdYlGn', 'RdYlGn_r']
plugin.visualizers.register_function(
function=q2_diversity._beta.beta_rarefaction,
inputs={
'table': FeatureTable[Frequency],
'phylogeny': Phylogeny[Rooted]},
parameters={
'metric': Str % Choices(beta.METRICS['NONPHYLO']['IMPL'] |
beta.METRICS['NONPHYLO']['UNIMPL'] |
beta.METRICS['PHYLO']['IMPL'] |
beta.METRICS['PHYLO']['UNIMPL']),
'clustering_method': Str % Choices({'nj', 'upgma'}),
'metadata': Metadata,
'sampling_depth': Int % Range(1, None),
# Need at least two iterations to do a comparison.
'iterations': Int % Range(2, None),
'correlation_method': Str % Choices({'spearman', 'pearson'}),
'color_scheme': Str % Choices(_beta_rarefaction_color_schemes)
},
input_descriptions={
'table': 'Feature table upon which to perform beta diversity '
'rarefaction analyses.',
'phylogeny': 'Phylogenetic tree containing tip identifiers that '
'correspond to the feature identifiers in the table. '
'This tree can contain tip ids that are not present in '
'the table, but all feature ids in the table must be '
'present in this tree. [required for phylogenetic '
'metrics]'
},
parameter_descriptions={
'metric': 'The beta diversity metric to be computed.',
'sampling_depth': 'The total frequency that each sample should be '
'rarefied to prior to computing the diversity '
'metric.',
'clustering_method': 'Samples can be clustered with neighbor joining '
'or UPGMA. An arbitrary rarefaction trial will '
'be used for the tree, and the remaining trials '
'are used to calculate the support of the '
'internal nodes of that tree.',
'metadata': 'The sample metadata used for the Emperor jackknifed PCoA '
'plot.',
'iterations': 'Number of times to rarefy the feature table at a given '
'sampling depth.',
'correlation_method': 'The Mantel correlation test to be applied when '
'computing correlation between beta diversity '
'distance matrices.',
'color_scheme': 'The matplotlib color scheme to generate the heatmap '
'with.',
},
name='Beta diversity rarefaction',
description='Repeatedly rarefy a feature table to compare beta diversity '
'results within a given rarefaction depth.\n\n'
'For a given beta diversity metric, this visualizer will '
'provide: an Emperor jackknifed PCoA plot, samples clustered '
'by UPGMA or neighbor joining with support calculation, and '
'a heatmap showing the correlation between rarefaction trials '
'of that beta diversity metric.',
citations=[
citations['mantel1967detection'],
citations['pearson1895note'],
citations['spearman1904proof']]
)
plugin.visualizers.register_function(
function=q2_diversity.adonis,
inputs={'distance_matrix': DistanceMatrix},
parameters={'metadata': Metadata,
'formula': Str,
'permutations': Int % Range(1, None),
'n_jobs': Int % Range(1, None)},
input_descriptions={
'distance_matrix': 'Matrix of distances between pairs of samples.'
},
parameter_descriptions={
'metadata': 'Sample metadata containing formula terms.',
'formula': 'Model formula containing only independent terms contained '
'in the sample metadata. These can be continuous variables '
'or factors, and they can have interactions as in a '
'typical R formula. E.g., the formula "treatment+block" '
'would test whether the input distance matrix partitions '
'based on "treatment" and "block" sample metadata. The '
'formula "treatment*block" would test both of those '
'effects as well as their interaction. Enclose formulae in '
'quotes to avoid unpleasant surprises.',
'permutations': 'The number of permutations to be run when computing '
'p-values.',
'n_jobs': 'Number of parallel processes to run.'
},
name='adonis PERMANOVA test for beta group significance',
description=('Determine whether groups of samples are significantly '
'different from one another using the ADONIS permutation-'
'based statistical test in vegan-R. The function partitions '
'sums of squares of a multivariate data set, and is directly '
'analogous to MANOVA (multivariate analysis of variance). '
'This action differs from beta_group_significance in that it '
'accepts R formulae to perform multi-way ADONIS tests; '
'beta_group_signficance only performs one-way tests. For '
'more details, consult the reference manual available '
'on the CRAN vegan page: '
'https://CRAN.R-project.org/package=vegan'),
citations=[citations['anderson2001new'], citations['Oksanen2018']]
)
|
qiime2/q2-diversity
|
q2_diversity/plugin_setup.py
|
Python
|
bsd-3-clause
| 42,797
|
[
"VisIt",
"scikit-bio"
] |
c6092ee73d2cb534f077eae4a8324ef9ab7b40346fcf214e967fbbeae804ae3e
|
"""
End-to-end test for cohorted courseware. This uses both Studio and LMS.
"""
from bok_choy.page_object import XSS_INJECTION
from common.test.acceptance.fixtures.course import XBlockFixtureDesc
from common.test.acceptance.pages.common.auto_auth import AutoAuthPage
from common.test.acceptance.pages.common.utils import add_enrollment_course_modes, enroll_user_track
from common.test.acceptance.pages.lms.courseware import CoursewarePage
from common.test.acceptance.pages.lms.instructor_dashboard import InstructorDashboardPage
from common.test.acceptance.pages.studio.settings_group_configurations import GroupConfigurationsPage
from common.test.acceptance.pages.studio.xblock_editor import XBlockVisibilityEditorView
from common.test.acceptance.tests.discussion.helpers import CohortTestMixin
from common.test.acceptance.tests.lms.test_lms_user_preview import verify_expected_problem_visibility
from .studio.base_studio_test import ContainerBase
AUDIT_TRACK = "Audit"
VERIFIED_TRACK = "Verified"
class EndToEndCohortedCoursewareTest(ContainerBase, CohortTestMixin):
"""
End-to-end of cohorted courseware.
"""
shard = 5
def setUp(self, is_staff=True):
super(EndToEndCohortedCoursewareTest, self).setUp(is_staff=is_staff)
self.staff_user = self.user
self.content_group_a = "Content Group A" + XSS_INJECTION
self.content_group_b = "Content Group B" + XSS_INJECTION
# Creates the Course modes needed to test enrollment tracks
add_enrollment_course_modes(self.browser, self.course_id, ["audit", "verified"])
# Create a student who will be in "Cohort A"
self.cohort_a_student_username = "cohort_a_student"
self.cohort_a_student_email = "cohort_a_student@example.com"
AutoAuthPage(
self.browser, username=self.cohort_a_student_username, email=self.cohort_a_student_email, no_login=True
).visit()
# Create a student who will be in "Cohort B"
self.cohort_b_student_username = "cohort_b_student"
self.cohort_b_student_email = "cohort_b_student@example.com"
AutoAuthPage(
self.browser, username=self.cohort_b_student_username, email=self.cohort_b_student_email, no_login=True
).visit()
# Create a Verified Student
self.cohort_verified_student_username = "cohort_verified_student"
self.cohort_verified_student_email = "cohort_verified_student@example.com"
AutoAuthPage(
self.browser,
username=self.cohort_verified_student_username,
email=self.cohort_verified_student_email,
no_login=True
).visit()
# Create audit student
self.cohort_audit_student_username = "cohort_audit_student"
self.cohort_audit_student_email = "cohort_audit_student@example.com"
AutoAuthPage(
self.browser,
username=self.cohort_audit_student_username,
email=self.cohort_audit_student_email,
no_login=True
).visit()
# Create a student who will end up in the default cohort group
self.cohort_default_student_username = "cohort_default_student"
self.cohort_default_student_email = "cohort_default_student@example.com"
AutoAuthPage(
self.browser, username=self.cohort_default_student_username,
email=self.cohort_default_student_email, no_login=True
).visit()
# Start logged in as the staff user.
AutoAuthPage(
self.browser, username=self.staff_user["username"], email=self.staff_user["email"]
).visit()
def populate_course_fixture(self, course_fixture):
"""
Populate the children of the test course fixture.
"""
self.group_a_problem = 'GROUP A CONTENT'
self.group_b_problem = 'GROUP B CONTENT'
self.group_verified_problem = 'GROUP VERIFIED CONTENT'
self.group_audit_problem = 'GROUP AUDIT CONTENT'
self.group_a_and_b_problem = 'GROUP A AND B CONTENT'
self.visible_to_all_problem = 'VISIBLE TO ALL CONTENT'
course_fixture.add_children(
XBlockFixtureDesc('chapter', 'Test Section').add_children(
XBlockFixtureDesc('sequential', 'Test Subsection').add_children(
XBlockFixtureDesc('vertical', 'Test Unit').add_children(
XBlockFixtureDesc('problem', self.group_a_problem, data='<problem></problem>'),
XBlockFixtureDesc('problem', self.group_b_problem, data='<problem></problem>'),
XBlockFixtureDesc('problem', self.group_verified_problem, data='<problem></problem>'),
XBlockFixtureDesc('problem', self.group_audit_problem, data='<problem></problem>'),
XBlockFixtureDesc('problem', self.group_a_and_b_problem, data='<problem></problem>'),
XBlockFixtureDesc('problem', self.visible_to_all_problem, data='<problem></problem>')
)
)
)
)
def create_content_groups(self):
"""
Creates two content groups in Studio Group Configurations Settings.
"""
group_configurations_page = GroupConfigurationsPage(
self.browser,
self.course_info['org'],
self.course_info['number'],
self.course_info['run']
)
group_configurations_page.visit()
group_configurations_page.create_first_content_group()
config = group_configurations_page.content_groups[0]
config.name = self.content_group_a
config.save()
group_configurations_page.add_content_group()
config = group_configurations_page.content_groups[1]
config.name = self.content_group_b
config.save()
def link_problems_to_content_groups_and_publish(self):
"""
Updates 5 of the 6 existing problems to limit their visibility by content group.
Publishes the modified units.
"""
container_page = self.go_to_unit_page()
enrollment_group = 'enrollment_track_group'
def set_visibility(problem_index, groups, group_partition='content_group'):
problem = container_page.xblocks[problem_index]
problem.edit_visibility()
visibility_dialog = XBlockVisibilityEditorView(self.browser, problem.locator)
partition_name = (visibility_dialog.ENROLLMENT_TRACK_PARTITION
if group_partition == enrollment_group
else visibility_dialog.CONTENT_GROUP_PARTITION)
visibility_dialog.select_groups_in_partition_scheme(partition_name, groups)
set_visibility(1, [self.content_group_a])
set_visibility(2, [self.content_group_b])
set_visibility(3, [VERIFIED_TRACK], enrollment_group)
set_visibility(4, [AUDIT_TRACK], enrollment_group)
set_visibility(5, [self.content_group_a, self.content_group_b])
container_page.publish()
def create_cohorts_and_assign_students(self):
"""
Adds 2 manual cohorts, linked to content groups, to the course.
Each cohort is assigned one student.
"""
instructor_dashboard_page = InstructorDashboardPage(self.browser, self.course_id)
instructor_dashboard_page.visit()
cohort_management_page = instructor_dashboard_page.select_cohort_management()
def add_cohort_with_student(cohort_name, content_group, student):
cohort_management_page.add_cohort(cohort_name, content_group=content_group)
cohort_management_page.add_students_to_selected_cohort([student])
add_cohort_with_student("Cohort A", self.content_group_a, self.cohort_a_student_username)
add_cohort_with_student("Cohort B", self.content_group_b, self.cohort_b_student_username)
def view_cohorted_content_as_different_users(self):
"""
View content as staff, student in Cohort A, student in Cohort B, Verified Student, Audit student,
and student in Default Cohort.
"""
courseware_page = CoursewarePage(self.browser, self.course_id)
def login_and_verify_visible_problems(username, email, expected_problems, track=None):
AutoAuthPage(
self.browser, username=username, email=email, course_id=self.course_id
).visit()
if track is not None:
enroll_user_track(self.browser, self.course_id, track)
courseware_page.visit()
verify_expected_problem_visibility(self, courseware_page, expected_problems)
login_and_verify_visible_problems(
self.staff_user["username"], self.staff_user["email"],
[self.group_a_problem,
self.group_b_problem,
self.group_verified_problem,
self.group_audit_problem,
self.group_a_and_b_problem,
self.visible_to_all_problem
],
)
login_and_verify_visible_problems(
self.cohort_a_student_username, self.cohort_a_student_email,
[self.group_a_problem, self.group_audit_problem, self.group_a_and_b_problem, self.visible_to_all_problem]
)
login_and_verify_visible_problems(
self.cohort_b_student_username, self.cohort_b_student_email,
[self.group_b_problem, self.group_audit_problem, self.group_a_and_b_problem, self.visible_to_all_problem]
)
login_and_verify_visible_problems(
self.cohort_verified_student_username, self.cohort_verified_student_email,
[self.group_verified_problem, self.visible_to_all_problem],
'verified'
)
login_and_verify_visible_problems(
self.cohort_audit_student_username, self.cohort_audit_student_email,
[self.group_audit_problem, self.visible_to_all_problem],
'audit'
)
login_and_verify_visible_problems(
self.cohort_default_student_username, self.cohort_default_student_email,
[self.group_audit_problem, self.visible_to_all_problem],
)
def test_cohorted_courseware(self):
"""
Scenario: Can create content that is only visible to students in particular cohorts
Given that I have course with 6 problems, 1 staff member, and 6 students
When I enable cohorts in the course
And I add the Course Modes for Verified and Audit
And I create two content groups, Content Group A, and Content Group B, in the course
And I link one problem to Content Group A
And I link one problem to Content Group B
And I link one problem to the Verified Group
And I link one problem to the Audit Group
And I link one problem to both Content Group A and Content Group B
And one problem remains unlinked to any Content Group
And I create two manual cohorts, Cohort A and Cohort B,
linked to Content Group A and Content Group B, respectively
And I assign one student to each manual cohort
And I assign one student to each enrollment track
And one student remains in the default cohort
Then the staff member can see all 6 problems
And the student in Cohort A can see all the problems linked to A
And the student in Cohort B can see all the problems linked to B
And the student in Verified can see the problems linked to Verified and those not linked to a Group
And the student in Audit can see the problems linked to Audit and those not linked to a Group
And the student in the default cohort can ony see the problem that is unlinked to any Content Group
"""
self.enable_cohorting(self.course_fixture)
self.create_content_groups()
self.link_problems_to_content_groups_and_publish()
self.create_cohorts_and_assign_students()
self.view_cohorted_content_as_different_users()
|
cpennington/edx-platform
|
common/test/acceptance/tests/test_cohorted_courseware.py
|
Python
|
agpl-3.0
| 12,088
|
[
"VisIt"
] |
6d635771aaf6b846eb86a7b08e26e2fadd99f09404e5d61947b912cba4cb08e0
|
import numpy as np # type: ignore
from scipy.ndimage.filters import gaussian_filter1d # type: ignore
from pymatgen import Structure # type: ignore
from typing import List, Optional, TypeVar, Type
"""
This module provides classes for calculating radial disitrbution functions
and Van Hove correlation functions.
"""
RDF = TypeVar('RDF', bound='RadialDistributionFunction')
class RadialDistributionFunction(object):
"""
Class for computing radial distribution functions.
Attributes:
nbins (int): Number of bins.
range ((float, float)): Minimum and maximum values of r.
intervals (np.array(float)): r values of the bin edges.
dr (float): bin width.
r (float): mid-points of each bin.
rdf (np.array(float)): RDF values.
coordination_number (np.array(float)): Volume integral of the RDF.
"""
def __init__(self,
structures: List[Structure],
indices_i: List[int],
indices_j: Optional[List[int]] = None,
nbins: int = 500,
r_min: float = 0.0,
r_max: float = 10.0,
weights: Optional[List[float]] = None) -> None:
"""
Initialise a RadialDistributionFunction instance.
Args:
structures (list(pymatgen.Structure)): List of pymatgen Structure objects.
indices_i (list(int)): List of indices for species i.
indices_j (:obj:`list(int)`, optional): List of indices for species j. Optional,
default is `None`.
nbins (:obj:`int`, optional): Number of bins used for the RDF. Optional, default is 500.
rmin (:obj:`float`, optional): Minimum r value. Optional, default is 0.0.
rmax (:obj:`float`, optional): Maximum r value. Optional, default is 10.0.
weights (:obj:`list(float)`, optional): List of weights for each structure.
Optional, default is `None`.
Returns:
None
"""
if weights:
if len(weights) != len(structures):
raise ValueError('List of structure weights needs to be the same length'
' as the list of structures.')
else:
weights = [1.0] * len(structures)
self.self_reference = (not indices_j) or (indices_j == indices_i)
if not indices_j:
indices_j = indices_i
self.indices_i = indices_i
self.indices_j = indices_j
self.nbins = nbins
self.range = (r_min, r_max)
self.intervals = np.linspace(r_min, r_max, nbins + 1)
self.dr = (r_max - r_min) / nbins
self.r = self.intervals[:-1] + self.dr / 2.0
ff = shell_volumes(self.intervals)
self.coordination_number = np.zeros(nbins)
self.rdf = np.zeros((nbins), dtype=np.double)
for structure, weight in zip(structures, weights):
hist = np.histogram(self.__dr_ij(structure),
bins=nbins,
range=(r_min, r_max),
density=False)[0]
rho = float(len(self.indices_i)) / structure.lattice.volume
self.rdf += hist * weight / rho
self.coordination_number += np.cumsum(hist)
self.rdf = self.rdf / ff / sum(weights) / float(len(indices_j))
self.coordination_number = self.coordination_number / \
sum(weights) / float(len(self.indices_j))
def smeared_rdf(self,
sigma: float = 0.1) -> np.ndarray:
"""
Smear the RDF with a Gaussian kernel.
Args:
sigma (:obj:`float`, optional): Standard deviation for Gaussian kernel.
Optional, default is 0.1.
Returns:
(np.array): Smeared RDF data.
"""
sigma_n_bins = sigma / self.dr
return gaussian_filter1d(self.rdf, sigma=sigma_n_bins)
@classmethod
def from_species_strings(cls: Type[RDF],
structures: List[Structure],
species_i: str,
species_j: Optional[str] = None,
**kwargs) -> RDF:
"""
Initialise a RadialDistributionFunction instance by specifying species strings.
Args:
structures (list(pymatgen.Structure)): List of pymatgen Structure objects.
species_i (str): String for species i, e.g. ``"Na"``.
species_j (:obj:`str`, optional): String for species j, e.g. ``"Cl"``. Optional,
default is `None`.
**kwargs: Variable length keyword argument list.
See :func:`vasppy.rdf.RadialDistributionFunction`
for the full list of accepted arguments.
Returns:
(RadialDistributionFunction)
"""
indices_i: List[int]
indices_j: Optional[List[int]]
indices_i = [i for i, site in
enumerate(structures[0]) if site.species_string is species_i]
if species_j:
indices_j = [j for j, site in
enumerate(structures[0]) if site.species_string is species_j]
else:
indices_j = None
return cls(structures=structures,
indices_i=indices_i,
indices_j=indices_j,
**kwargs)
def __dr_ij(self,
structure: Structure) -> np.ndarray:
"""
Calculate all i-j interatomic distances for a single pymatgen Structure.
Args:
structure (:obj:`pymatgen.Structure`): A pymatgen Structure.
Returns:
np.array: 1D numpy array of length N_i x N_j of distances.
"""
lattice = structure.lattice
i_frac_coords = structure.frac_coords[self.indices_i]
j_frac_coords = structure.frac_coords[self.indices_j]
dr_ij = lattice.get_all_distances(i_frac_coords, j_frac_coords)
# Mask dr_ij 2D array to remove i==j dr=0 terms
mask = np.ones(dr_ij.shape, dtype=bool)
if self.self_reference:
np.fill_diagonal(mask, 0)
return np.ndarray.flatten(dr_ij[mask])
VHA = TypeVar('VHA', bound='VanHoveAnalysis')
class VanHoveAnalysis(object):
"""
Class for computing Van Hove correlation functions.
Attributes:
nbins (int): Number of bins.
range ((float, float)): Minimum and maximum values of r.
intervals (np.array(float)): r values of the bin edges.
dr (float): bin width.
r (float): mid-points of each bin.
gsrt (np.array(float)): Self part of the Van Hove correlation function.
gdrt (np.array(float)): Distinct part of the Van Hove correlation function.
"""
def __init__(self,
structures: List[Structure],
indices: List[int],
d_steps: int,
nbins: int = 500,
r_min: float = 0.0,
r_max: float = 10.0):
"""
Initialise a VanHoveCorrelationFunction instance.
Args:
structures (list(pymatgen.Structure)): List of pymatgen Structure objects.
indices (list(int)): List of indices for species to consider.
d_steps (int): number of steps between structures at dt=0 and dt=t.
nbins (:obj:`int`, optional): Number of bins used for the RDF. Optional, default is 500.
rmin (:obj:`float`, optional): Minimum r value. Optional, default is 0.0.
rmax (:obj:`float`, optional): Maximum r value. Optional, default is 10.0.
Returns:
None
"""
self.nbins = nbins
self.range = (r_min, r_max)
self.intervals = np.linspace(r_min, r_max, nbins + 1)
self.dr = (r_max - r_min) / nbins
self.r = self.intervals[:-1] + self.dr / 2.0
self.gdrt = np.zeros((nbins), dtype=np.double)
self.gsrt = np.zeros((nbins), dtype=np.double)
rho = len(indices) / structures[0].lattice.volume
lattice = structures[0].lattice
ff = shell_volumes(self.intervals)
rho = len(indices) / lattice.volume
for struc_i, struc_j in zip(structures[:len(structures) - d_steps], structures[d_steps:]):
i_frac_coords = struc_i.frac_coords[indices]
j_frac_coords = struc_j.frac_coords[indices]
dr_ij = lattice.get_all_distances(i_frac_coords, j_frac_coords)
mask = np.ones(dr_ij.shape, dtype=bool)
np.fill_diagonal(mask, 0)
distinct_dr_ij = np.ndarray.flatten(dr_ij[mask])
hist = np.histogram(distinct_dr_ij, bins=nbins,
range=(0.0, r_max), density=False)[0]
self.gdrt += hist / rho
self_dr_ij = np.ndarray.flatten(dr_ij[np.invert(mask)])
hist = np.histogram(self_dr_ij, bins=nbins,
range=(0.0, r_max), density=False)[0]
self.gsrt += hist / rho
self.gdrt = self.gdrt / ff / \
(len(structures) - d_steps) / float(len(indices))
self.gsrt = self.gsrt / \
(len(structures) - d_steps) / float(len(indices))
def self(self,
sigma: Optional[float] = None) -> np.ndarray:
if sigma:
return self.smeared_gsrt(sigma=sigma)
else:
return self.gsrt
def distinct(self,
sigma: Optional[float] = None) -> np.ndarray:
if sigma:
return self.smeared_gdrt(sigma=sigma)
else:
return self.gdrt
def smeared_gsrt(self,
sigma: float = 0.1) -> np.ndarray:
"""
Smear the self part of the Van Hove correlation function with a Gaussian kernel.
Args:
sigma (:obj:`float`, optional): Standard deviation for Gaussian kernel. Optional, default is 0.1.
Returns:
(np.array): Smeared data.
"""
sigma_n_bins = sigma / self.dr
return gaussian_filter1d(self.gsrt, sigma=sigma_n_bins)
def smeared_gdrt(self,
sigma: float = 0.1) -> np.ndarray:
"""
Smear the distinct part of the Van Hove correlation function with a Gaussian kernel.
Args:
sigma (:obj:`float`, optional): Standard deviation for Gaussian kernel. Optional, default is 0.1.
Returns:
(np.array): Smeared data.
"""
sigma_n_bins = sigma / self.dr
return gaussian_filter1d(self.gdrt, sigma=sigma_n_bins)
def shell_volumes(intervals: np.ndarray) -> np.ndarray:
"""Volumes of concentric spherical shells.
Args:
intervals (np.array): N radial boundaries used to define the set of N-1 shells.
Returns:
np.array: Volumes of each shell.
"""
return 4.0 / 3.0 * np.pi * (intervals[1:]**3 - intervals[:-1]**3)
|
bjmorgan/vasppy
|
vasppy/rdf.py
|
Python
|
mit
| 10,969
|
[
"Gaussian",
"pymatgen"
] |
76e19bb92ac4a3c9a800b30da9fba825a0ab90e23ddde3e5ad457bbb8384c93a
|
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
"""
This package contains classes to parse input files from the exciting
code package.
"""
from .inputs import ExcitingInput
|
materialsproject/pymatgen
|
pymatgen/io/exciting/__init__.py
|
Python
|
mit
| 220
|
[
"exciting",
"pymatgen"
] |
c5179c38fe73a9302b5a1490c441560b56686d1311f5c20b765f38bceb29413f
|
import pysam, sys, os, random
from jobTree.src.bioio import fastaRead, fastqRead, \
cigarReadFromString,PairwiseAlignment, fastaWrite, fastqWrite, logger, absSymPath, reverseComplementChar
def pathToBaseNanoporeDir():
"""Returns path to base directory "marginAlign"
"""
import marginAlign
i = absSymPath(__file__)
return os.path.split(os.path.split(os.path.split(i)[0])[0])[0]
def getFirstNonClippedPositionInRead(alignedSegment, readSeq):
"""Gets the coordinate of the first non-clipped position in the read relative to the
complete read sequence (including any hard clipped bases).
If the alignment is on the reverse strand the coordinate is negative, e.g. the reverse strand coordinate of
the 2nd position of the read sequence is -1 (0 based).
"""
if alignedSegment.cigar[0][0] == 5: #Translate the read position to the original
#coordinates by removing hard clipping
readOffset = alignedSegment.cigar[0][1]
else:
readOffset = 0
if alignedSegment.is_reverse: #SEQ is reverse complemented
readOffset = -(len(readSeq) - 1 - readOffset)
readOffset += alignedSegment.query_alignment_start #This removes any soft-clipping
return readOffset
def getLastNonClippedPositionInRead(alignedSegment, readSeq):
"""As getFirstNonClippedPositionInRead, but returns the last
non-clipped position in the read, relative to the complete read sequence.
"""
return getFirstNonClippedPositionInRead(alignedSegment, readSeq) + \
alignedSegment.query_alignment_end - alignedSegment.query_alignment_start -1
def getExonerateCigarFormatString(alignedSegment, sam):
"""Gets a complete exonerate like cigar-string describing the sam line,
with the cigar described with respect to the alignedSegment.query_sequence string,
which includes softclip bases, but not hard-clipped bases.
"""
for op, length in alignedSegment.cigar:
assert op in (0, 1, 2, 4, 5)
translation = { 0:"M", 1:"I", 2:"D" }
cigarString = " ".join([ "%s %i" % (translation[op], length) for op, length in
alignedSegment.cigar if op in translation ]) #Ignore soft clips
completeCigarString = "cigar: %s %i %i + %s %i %i + 1 %s" % (
alignedSegment.query_name, alignedSegment.qstart, alignedSegment.query_alignment_end,
sam.getrname(alignedSegment.reference_id), alignedSegment.reference_start, alignedSegment.reference_end, cigarString)
##Assertions
pA = cigarReadFromString(completeCigarString) #This checks it's an okay cigar
assert sum([ op.length for op in pA.operationList if op.type == \
PairwiseAlignment.PAIRWISE_MATCH ]) == \
len([ readPos for readPos, refPos in alignedSegment.aligned_pairs if \
readPos != None and refPos != None ])
##End assertions
return completeCigarString
def samToBamFile(samInputFile, bamOutputFile):
"""Converts a sam file to a bam file (sorted)
"""
samfile = pysam.Samfile(samInputFile, "r" )
bamfile = pysam.Samfile(bamOutputFile, "wb", template=samfile)
for line in samfile:
bamfile.write(line)
samfile.close()
bamfile.close()
def getFastaDictionary(fastaFile):
"""Returns a dictionary of the first words of fasta headers to their corresponding
fasta sequence
"""
namesAndSequences = map(lambda x : (x[0].split()[0], x[1]), fastaRead(open(fastaFile, 'r')))
names = map(lambda x : x[0], namesAndSequences)
assert len(names) == len(set(names)) #Check all the names are unique
return dict(namesAndSequences) #Hash of names to sequences
def makeFastaSequenceNamesUnique(inputFastaFile, outputFastaFile):
"""Makes a fasta file with unique names
"""
names = set()
fileHandle = open(outputFastaFile, 'w')
for name, seq in fastaRead(open(inputFastaFile, 'r')):
while name in names:
logger.critical("Got a duplicate fasta sequence name: %s" % name)
name += "i"
names.add(name)
fastaWrite(fileHandle, name, seq)
fileHandle.close()
return outputFastaFile
def makeFastqSequenceNamesUnique(inputFastqFile, outputFastqFile):
"""Makes a fastq file with unique names
"""
names = set()
fileHandle = open(outputFastqFile, 'w')
for name, seq, quals in fastqRead(open(inputFastqFile, 'r')):
name = name.split()[0] #Get rid of any white space
while name in names:
logger.critical("Got a duplicate fastq sequence name: %s" % name)
name += "i"
names.add(name)
fastqWrite(fileHandle, name, seq, quals)
fileHandle.close()
return outputFastqFile
def samIterator(sam):
"""Creates an iterator over the aligned reads in a sam file, filtering out
any reads that have no reference alignment.
"""
for aR in sam:
if aR.reference_id != -1:
yield aR
def combineSamFiles(baseSamFile, extraSamFiles, outputSamFile):
"""Combines the lines from multiple sam files into one sam file
"""
sam = pysam.Samfile(baseSamFile, "r" )
outputSam = pysam.Samfile(outputSamFile, "wh", template=sam)
sam.close()
for samFile in [ baseSamFile ] + extraSamFiles:
sam = pysam.Samfile(samFile, "r" )
for line in sam:
outputSam.write(line)
sam.close()
outputSam.close()
def paralleliseSamProcessingTargetFn(target, samFile,
referenceFastaFile, outputFile,
childTargetFn, followOnTargetFn, options):
"""Parallelise a computation over the alignments in a SAM file.
"""
#Load reference sequences
refSequences = getFastaDictionary(referenceFastaFile) #Hash of names to sequences
tempOutputFiles = []
childCount, totalSeqLength = 0, sys.maxint
tempExonerateFile, tempQueryFile = None, None
tempExonerateFileHandle, tempQueryFileHandle = None, None
refName = None
#Read through the SAM file
sam = pysam.Samfile(samFile, "r" )
def makeChild():
#Add a child target to do the processing of a subset of the lines.
if tempExonerateFile != None:
tempExonerateFileHandle.close()
tempQueryFileHandle.close()
#Temporary cigar file to store the realignment
tempOutputFiles.append(os.path.join(target.getGlobalTempDir(),
"tempOutput_%i.txt" % childCount))
target.addChildTargetFn(childTargetFn,
args=(tempExonerateFile, refName,
refSequences[refName],
tempQueryFile, tempOutputFiles[-1], options))
for aR, index in zip(samIterator(sam), xrange(sys.maxint)):
#Iterate on the sam lines realigning them in parallel
if totalSeqLength > options.maxAlignmentLengthPerJob or \
refName != sam.getrname(aR.reference_id):
makeChild()
tempExonerateFile = os.path.join(target.getGlobalTempDir(),
"tempExonerateCigar_%s.cig" % childCount)
tempExonerateFileHandle = open(tempExonerateFile, 'w')
tempQueryFile = os.path.join(target.getGlobalTempDir(),
"tempQueryCigar_%s.fa" % childCount)
tempQueryFileHandle = open(tempQueryFile, 'w')
childCount += 1
totalSeqLength = 0
tempExonerateFileHandle.write(getExonerateCigarFormatString(aR, sam) + "\n")
fastaWrite(tempQueryFileHandle, aR.query_name, aR.query_sequence) #This is the query sequence, including soft clipped bases, but excluding hard clip bases
totalSeqLength += len(aR.query_sequence)
refName = sam.getrname(aR.reference_id)
makeChild()
target.setFollowOnTargetFn(followOnTargetFn, args=(samFile, referenceFastaFile, \
outputFile, tempOutputFiles, options))
#Finish up
sam.close()
###The following code is used by the tests/plots
def getFastqDictionary(fastqFile):
"""Returns a dictionary of the first words of fastq headers to their corresponding
fastq sequence
"""
namesAndSequences = map(lambda x : (x[0].split()[0], x[1]), fastqRead(open(fastqFile, 'r')))
names = map(lambda x : x[0], namesAndSequences)
assert len(names) == len(set(names)) #Check all the names are unique
return dict(namesAndSequences) #Hash of names to sequences
class AlignedPair:
"""Represents an aligned pair of positions using absolute reference/read coordinates.
Originally coded when I was figuring out pySam, hence is full of assertions and uses
global coordinates.
"""
def __init__(self, refPos, refSeq, readPos, isReversed, readSeq, pPair):
assert refPos >= 0 and refPos < len(refSeq)
self.refPos = refPos
self.refSeq = refSeq
assert readPos >= 0 and readPos < len(readSeq)
self.readPos = readPos
self.isReversed = isReversed
self.readSeq = readSeq
self.pPair = pPair #Pointer to the previous aligned pair
self.bases = set([ 'A', 'C', 'G', 'T' ])
def isMatch(self):
return self.getRefBase().upper() == self.getReadBase().upper() and \
self.getRefBase().upper() in self.bases
def isMismatch(self):
return self.getRefBase().upper() != self.getReadBase().upper() and \
self.getRefBase().upper() in self.bases and self.getReadBase().upper() in self.bases
def getRefBase(self):
return self.refSeq[self.refPos]
def getReadBase(self):
if self.isReversed:
return reverseComplementChar(self.readSeq[self.readPos])
return self.readSeq[self.readPos]
def getSignedReadPos(self):
if self.isReversed:
return -self.readPos
return self.readPos
def getPrecedingReadInsertionLength(self, globalAlignment=False):
#If global alignment flag is true any unaligned prefix/suffix insertion at the beginning
#and end of the read sequence is interpreted as an insertion, rather than being ignored.
if self.pPair == None:
if globalAlignment:
if self.isReversed:
assert len(self.readSeq) - self.readPos - 1 >= 0
return len(self.readSeq) - self.readPos - 1
return self.readPos
return 0
return self._indelLength(self.readPos, self.pPair.readPos)
def getPrecedingReadDeletionLength(self, globalAlignment=False):
if self.pPair == None:
if globalAlignment:
return self.refPos
return 0
return self._indelLength(self.refPos, self.pPair.refPos)
@staticmethod
def _indelLength(pos, pPos):
length = abs(pPos - pos) - 1
assert length >= 0
return length
@staticmethod
def iterator(alignedSegment, refSeq, readSeq):
"""Generates aligned pairs from a pysam.AlignedSegment object.
"""
readOffset = getFirstNonClippedPositionInRead(alignedSegment, readSeq)
pPair = None
assert len(alignedSegment.query_sequence) <= len(readSeq)
for readPos, refPos in alignedSegment.aligned_pairs: #Iterate over the block
if readPos != None and refPos != None:
assert refPos >= alignedSegment.reference_start and refPos < alignedSegment.reference_end
if refPos >= len(refSeq): #This is masking an (apparently minor?) one
#off error in the BWA sam files?
logger.critical("Detected an aligned reference position out of \
bounds! Reference length: %s, reference coordinate: %s" % \
(len(refSeq), refPos))
continue
aP = AlignedPair(refPos, refSeq, abs(readOffset + readPos),
alignedSegment.is_reverse, readSeq, pPair)
if aP.getReadBase().upper() != alignedSegment.query_alignment_sequence[readPos].upper():
logger.critical("Detected a discrepancy between the absolute read \
sequence and the aligned read sequence. Bases: %s %s, \
read-position: %s, is reversed: %s, absolute read offset: %s, \
length absolute read sequence %s, length aligned read sequence %s, \
length aligned read sequence plus soft clipping %s, read name: %s, \
cigar string %s" % (aP.getReadBase().upper(),
alignedSegment.query_alignment_sequence[readPos].upper(), readPos,
alignedSegment.is_reverse, readOffset, len(readSeq),
len(alignedSegment.query_alignment_sequence), len(alignedSegment.query_sequence),
alignedSegment.query_name, alignedSegment.cigarstring))
pPair = aP
yield aP
class ReadAlignmentStats:
"""Calculates stats of a given read alignment.
Global alignment means the entire reference and read sequences (trailing indels).
"""
def __init__(self, readSeq, refSeq, alignedRead, globalAlignment=False):
self.matches = 0
self.mismatches = 0
self.ns = 0
self.totalReadInsertionLength = 0
self.totalReadInsertions = 0
self.totalReadDeletionLength = 0
self.totalReadDeletions = 0
self.readSeq = readSeq
self.refSeq = refSeq
self.globalAlignment = globalAlignment
#Now process the read alignment
totalReadInsertionLength, totalReadDeletionLength = 0, 0
aP = None
for aP in AlignedPair.iterator(alignedRead, self.refSeq, self.readSeq):
if aP.isMatch():
self.matches += 1
elif aP.isMismatch():
self.mismatches += 1
else:
self.ns += 1
if aP.getPrecedingReadInsertionLength(self.globalAlignment) > 0:
self.totalReadInsertions += 1
totalReadInsertionLength += aP.getPrecedingReadInsertionLength(self.globalAlignment)
if aP.getPrecedingReadDeletionLength(self.globalAlignment) > 0:
self.totalReadDeletions += 1
totalReadDeletionLength += aP.getPrecedingReadDeletionLength(self.globalAlignment)
if self.globalAlignment and aP != None: #If global alignment account for any trailing indels
assert len(self.refSeq) - aP.refPos - 1 >= 0
if len(self.refSeq) - aP.refPos - 1 > 0:
self.totalReadDeletions += 1
self.totalReadDeletionLength += len(self.refSeq) - aP.refPos - 1
if alignedRead.is_reverse:
aP.readPos >= 0
if aP.readPos > 0:
self.totalReadInsertions += 1
totalReadInsertionLength += aP.readPos
else:
assert len(self.readSeq) - aP.readPos - 1 >= 0
if len(self.readSeq) - aP.readPos - 1 > 0:
self.totalReadInsertions += 1
totalReadInsertionLength += len(self.readSeq) - aP.readPos - 1
assert totalReadInsertionLength <= len(self.readSeq)
assert totalReadDeletionLength <= len(self.refSeq)
self.totalReadInsertionLength += totalReadInsertionLength
self.totalReadDeletionLength += totalReadDeletionLength
@staticmethod
def formatRatio(numerator, denominator):
if denominator == 0:
return float("nan")
return float(numerator)/denominator
def readCoverage(self):
return self.formatRatio(self.matches + self.mismatches, self.matches + self.mismatches + self.totalReadInsertionLength)
def referenceCoverage(self):
return self.formatRatio(self.matches + self.mismatches, self.matches + self.mismatches + self.totalReadDeletionLength)
def readIdentity(self):
return self.formatRatio(self.matches, self.matches + self.mismatches + self.totalReadInsertionLength)
def alignmentIdentity(self):
return self.formatRatio(self.matches, self.matches + self.mismatches + self.totalReadInsertionLength + self.totalReadDeletionLength)
def mismatchesPerAlignedBase(self):
return self.formatRatio(self.mismatches, self.matches + self.mismatches)
def deletionsPerReadBase(self):
return self.formatRatio(self.totalReadDeletions, self.matches + self.mismatches)
def insertionsPerReadBase(self):
return self.formatRatio(self.totalReadInsertions, self.matches + self.mismatches)
def readLength(self):
return len(self.readSeq)
@staticmethod
def getReadAlignmentStats(samFile, readFastqFile, referenceFastaFile, globalAlignment=True):
"""Gets a list of ReadAlignmentStats objects, one for each alignment in the same file
"""
refSequences = getFastaDictionary(referenceFastaFile) #Hash of names to sequences
readSequences = getFastqDictionary(readFastqFile) #Hash of names to sequences
sam = pysam.Samfile(samFile, "r")
readsToReadCoverages = {}
readAlignmentStats = map(lambda aR : ReadAlignmentStats(readSequences[aR.qname], \
refSequences[sam.getrname(aR.rname)], aR, globalAlignment), samIterator(sam))
sam.close()
return readAlignmentStats
##Functions used in the creation of mutations - used in the testing of margin-caller
def mutateSequence(sequence, snpRate): #Does not preserve softmasking
"""Returns sequence with snpRate proportion of sites mutated and a list of those mutations
"""
mutations = []
mutatedSequence = list(sequence)
for i in xrange(len(sequence)):
if random.random() < snpRate:
base = sequence[i]
altBase = random.choice(list(set(("A", 'C', 'G', 'T')) - set(base.upper())))
altBase = altBase if base.upper() == base else altBase.lower()
mutations.append((i, base, altBase))
mutatedSequence[i] = altBase
return "".join(mutatedSequence), mutations
def mutateSequences(sequences, snpRate):
"""As mutateSequence, but for collection of sequences. Sequences is a dictionary
of sequences of names to sequences. Return value is a dictionary of names to mutated
sequences and a list of those mutations, represented as triples of (sequenceName, position, alt).
"""
mutatedSequences = {}; allMutations = [] #List of refSequenceName, position, altBase
for name in sequences.keys():
mutatedSequence, mutations = mutateSequence(sequences[name], snpRate)
mutatedSequences[name] = mutatedSequence
allMutations += map(lambda x : (name, x[0], x[1], x[2]), mutations)
return mutatedSequences, allMutations
|
benedictpaten/marginAlign
|
src/margin/utils.py
|
Python
|
mit
| 19,174
|
[
"BWA",
"pysam"
] |
b195f47e86f56f73a6b25fd95709d8379d37461c2824bf0b61fa8be17bfa6d64
|
''' FreeDiskSpaceCommand
The Command gets the free space that is left in a Storage Element
Note: there are, still, many references to "space tokens",
for example ResourceManagementClient().selectSpaceTokenOccupancyCache(token=elementName)
This is for historical reasons, and shoud be fixed one day.
For the moment, when you see "token" or "space token" here, just read "StorageElement".
'''
__RCSID__ = '$Id$'
import sys
import errno
from datetime import datetime
from DIRAC import S_OK, S_ERROR, gLogger
from DIRAC.Core.Utilities.File import convertSizeUnits
from DIRAC.ResourceStatusSystem.Command.Command import Command
from DIRAC.ResourceStatusSystem.Utilities import CSHelpers
from DIRAC.Resources.Storage.StorageElement import StorageElement
from DIRAC.ResourceStatusSystem.Client.ResourceManagementClient import ResourceManagementClient
class FreeDiskSpaceCommand(Command):
'''
Uses diskSpace method to get the free space
'''
def __init__(self, args=None, clients=None):
super(FreeDiskSpaceCommand, self).__init__(args, clients=clients)
self.rmClient = ResourceManagementClient()
def _prepareCommand(self):
'''
FreeDiskSpaceCommand requires one argument:
- name : <str>
'''
if 'name' not in self.args:
return S_ERROR('"name" not found in self.args')
elementName = self.args['name']
# We keep TB as default as this is what was used (and will still be used)
# in the policy for "space tokens" ("real", "data" SEs)
unit = self.args.get('unit', 'TB')
return S_OK((elementName, unit))
def doNew(self, masterParams=None):
"""
Gets the parameters to run, either from the master method or from its
own arguments.
Gets the total and the free disk space of a storage element
and inserts the results in the SpaceTokenOccupancyCache table
of ResourceManagementDB database.
The result is also returned to the caller, not only inserted.
What is inserted in the DB will normally be in MB,
what is returned will be in the specified unit.
"""
if masterParams is not None:
elementName, unit = masterParams
else:
params = self._prepareCommand()
if not params['OK']:
return params
elementName, unit = params['Value']
endpointResult = CSHelpers.getStorageElementEndpoint(elementName)
if not endpointResult['OK']:
return endpointResult
se = StorageElement(elementName)
occupancyResult = se.getOccupancy(unit=unit)
if not occupancyResult['OK']:
return occupancyResult
occupancy = occupancyResult['Value']
free = occupancy['Free']
total = occupancy['Total']
results = {'Endpoint': endpointResult['Value'],
'Free': free,
'Total': total,
'ElementName': elementName}
result = self._storeCommand(results)
if not result['OK']:
return result
return S_OK({'Free': free, 'Total': total})
def _storeCommand(self, results):
""" Here purely for extensibility
"""
return self.rmClient.addOrModifySpaceTokenOccupancyCache(endpoint=results['Endpoint'],
lastCheckTime=datetime.utcnow(),
free=results['Free'],
total=results['Total'],
token=results['ElementName'])
def doCache(self):
"""
This is a method that gets the element's details from the spaceTokenOccupancyCache DB table.
It will return a dictionary with th results, converted to "correct" unit.
"""
params = self._prepareCommand()
if not params['OK']:
return params
elementName, unit = params['Value']
result = self.rmClient.selectSpaceTokenOccupancyCache(token=elementName)
if not result['OK']:
return result
if not result['Value']:
return S_ERROR(errno.ENODATA, "No occupancy recorded")
# results are normally in 'MB'
free = result['Value'][0][3]
total = result['Value'][0][4]
free = convertSizeUnits(free, 'MB', unit)
total = convertSizeUnits(total, 'MB', unit)
if free == -sys.maxsize or total == -sys.maxsize:
return S_ERROR("No valid unit specified")
return S_OK({'Free': free, 'Total': total})
def doMaster(self):
"""
This method calls the doNew method for each storage element
that exists in the CS.
"""
elements = CSHelpers.getStorageElements()
for name in elements['Value']:
# keeping TB as default
diskSpace = self.doNew((name, 'MB'))
if not diskSpace['OK']:
gLogger.warn("Unable to calculate free/total disk space", "name: %s" % name)
gLogger.warn(diskSpace['Message'])
continue
return S_OK()
|
arrabito/DIRAC
|
ResourceStatusSystem/Command/FreeDiskSpaceCommand.py
|
Python
|
gpl-3.0
| 4,872
|
[
"DIRAC"
] |
564742fcc77fa4633119c00c885751edcc33b32c15b167788625a216edb1ab45
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.